diff options
Diffstat (limited to 'sql')
255 files changed, 31932 insertions, 13793 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 18054252584..7b99e2bdb0b 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -65,7 +65,7 @@ ADD_CUSTOM_COMMAND( DEPENDS gen_lex_token ) -FIND_PACKAGE(BISON 2.0) +FIND_PACKAGE(BISON 2.4) ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/yy_mariadb.yy @@ -122,15 +122,17 @@ SET (SQL_SOURCE sql_list.cc sql_load.cc sql_manager.cc sql_parse.cc sql_bootstrap.cc sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc - debug_sync.cc + debug_sync.cc debug.cc sql_repl.cc sql_select.cc sql_show.cc sql_state.c group_by_handler.cc derived_handler.cc select_handler.cc sql_statistics.cc sql_string.cc lex_string.h sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc + ddl_log.cc ddl_log.h sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc sql_time.cc tztime.cc unireg.cc item_xmlfunc.cc uniques.cc - rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc + rpl_tblmap.cc sql_binlog.cc event_scheduler.cc + event_data_objects.cc event_queue.cc event_db_repository.cc sql_tablespace.cc events.cc ../sql-common/my_user.c partition_info.cc rpl_utility.cc rpl_utility_server.cc @@ -151,7 +153,6 @@ SET (SQL_SOURCE opt_index_cond_pushdown.cc opt_subselect.cc opt_table_elimination.cc sql_expression_cache.cc gcalc_slicescan.cc gcalc_tools.cc - ../sql-common/mysql_async.c my_apc.cc mf_iocache_encr.cc item_jsonfunc.cc my_json_writer.cc rpl_gtid.cc rpl_parallel.cc @@ -170,6 +171,7 @@ SET (SQL_SOURCE rowid_filter.cc rowid_filter.h opt_trace.cc table_cache.cc encryption.cc temporary_tables.cc + json_table.cc proxy_protocol.cc backup.cc xa.cc ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h @@ -191,7 +193,7 @@ IF ((CMAKE_SYSTEM_NAME MATCHES "Linux" OR AND (NOT DISABLE_THREADPOOL)) ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS) IF(WIN32) - SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc) + SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc threadpool_winsockets.cc threadpool_winsockets.h) ENDIF() SET(SQL_SOURCE ${SQL_SOURCE} threadpool_generic.cc) SET(SQL_SOURCE ${SQL_SOURCE} threadpool_common.cc) @@ -199,7 +201,7 @@ IF ((CMAKE_SYSTEM_NAME MATCHES "Linux" OR ENDIF() IF(WIN32) - SET(SQL_SOURCE ${SQL_SOURCE} handle_connections_win.cc nt_servc.cc) + SET(SQL_SOURCE ${SQL_SOURCE} handle_connections_win.cc winmain.cc) ENDIF() MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY @@ -271,7 +273,9 @@ IF(MSVC OR CMAKE_SYSTEM_NAME MATCHES AIX) sql_builtins ) IF(MSVC) - SET_TARGET_PROPERTIES(server PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + IF(NOT WITHOUT_DYNAMIC_PLUGINS) + SET_TARGET_PROPERTIES(server PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + ENDIF() ELSE() SET_TARGET_PROPERTIES(server PROPERTIES AIX_EXPORT_ALL_SYMBOLS TRUE) ENDIF() @@ -331,7 +335,6 @@ IF(WITH_MYSQLD_LDFLAGS) ENDIF() - # Handle out-of-source build from source package with possibly broken # bison. Copy bison output to from source to build directory, if not already # there @@ -381,7 +384,7 @@ ADD_CUSTOM_COMMAND( DEPENDS gen_lex_hash ) -MYSQL_ADD_EXECUTABLE(mariadb-tzinfo-to-sql tztime.cc COMPONENT Server) +MYSQL_ADD_EXECUTABLE(mariadb-tzinfo-to-sql tztime.cc) SET_TARGET_PROPERTIES(mariadb-tzinfo-to-sql PROPERTIES COMPILE_FLAGS "-DTZINFO2SQL") TARGET_LINK_LIBRARIES(mariadb-tzinfo-to-sql mysys mysys_ssl) @@ -430,7 +433,7 @@ IF(TARGET mariadbd AND (NOT CMAKE_CROSSCOMPILING OR DEFINED CMAKE_CROSSCOMPILING COMMAND ${CMAKE_COMMAND} -E make_directory data COMMAND ${CMAKE_COMMAND} -E chdir data ${CMAKE_COMMAND} ${CONFIG_PARAM} - -DTOP_SRCDIR="${CMAKE_SOURCE_DIR}" + -DTOP_SRCDIR="${CMAKE_BINARY_DIR}" -DBINDIR="${CMAKE_CURRENT_BINARY_DIR}" -DMYSQLD_EXECUTABLE="$<TARGET_FILE:mariadbd>" -DCMAKE_CFG_INTDIR="${CMAKE_CFG_INTDIR}" @@ -457,14 +460,15 @@ IF(WIN32) # Create bootstrapper SQL script ADD_CUSTOM_COMMAND(OUTPUT ${my_bootstrap_sql} - COMMAND ${CMAKE_COMMAND} -E chdir ${CMAKE_SOURCE_DIR}/scripts - cmd /c copy mysql_system_tables.sql+mysql_system_tables_data.sql+fill_help_tables.sql+mysql_performance_tables.sql+mysql_test_db.sql ${native_outfile} + COMMAND ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/scripts + cmd /c copy mysql_system_tables.sql+mysql_system_tables_data.sql+fill_help_tables.sql+mysql_performance_tables.sql+mysql_test_db.sql+mysql_sys_schema.sql ${native_outfile} DEPENDS ${CMAKE_SOURCE_DIR}/scripts/mysql_system_tables.sql ${CMAKE_SOURCE_DIR}/scripts/mysql_system_tables_data.sql ${CMAKE_SOURCE_DIR}/scripts/fill_help_tables.sql ${CMAKE_SOURCE_DIR}/scripts/mysql_performance_tables.sql ${CMAKE_SOURCE_DIR}/scripts/mysql_test_db.sql + ${CMAKE_BINARY_DIR}/scripts/mysql_sys_schema.sql ) ADD_CUSTOM_COMMAND( @@ -482,7 +486,10 @@ IF(WIN32) ${CMAKE_CURRENT_BINARY_DIR}/mysql_bootstrap_sql.c COMPONENT Server ) - SET_TARGET_PROPERTIES(mariadb-install-db PROPERTIES COMPILE_FLAGS -DINSTALL_PLUGINDIR=${INSTALL_PLUGINDIR}) + + SET_TARGET_PROPERTIES(mariadb-install-db PROPERTIES COMPILE_DEFINITIONS + "INSTALL_PLUGINDIR=${INSTALL_PLUGINDIR};INSTALL_SHAREDIR=${INSTALL_SHAREDIR}" + ) TARGET_LINK_LIBRARIES(mariadb-install-db mysys shlwapi) ADD_LIBRARY(winservice STATIC winservice.c) diff --git a/sql/backup.cc b/sql/backup.cc index b015b2de4a1..0a5cc97f431 100644 --- a/sql/backup.cc +++ b/sql/backup.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, 2020, MariaDB Corporation. +/* Copyright (c) 2018, 2022, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. @@ -34,6 +34,7 @@ #include "sql_insert.h" // kill_delayed_threads #include "sql_handler.h" // mysql_ha_cleanup_no_free #include <my_sys.h> +#include <strfunc.h> // strconvert() #ifdef WITH_WSREP #include "wsrep_server_state.h" #endif /* WITH_WSREP */ @@ -45,11 +46,15 @@ TYPELIB backup_stage_names= { array_elements(stage_names)-1, "", stage_names, 0 }; static MDL_ticket *backup_flush_ticket; +static File volatile backup_log= -1; +static int backup_log_error= 0; static bool backup_start(THD *thd); static bool backup_flush(THD *thd); static bool backup_block_ddl(THD *thd); static bool backup_block_commit(THD *thd); +static bool start_ddl_logging(); +static void stop_ddl_logging(); /** Run next stage of backup @@ -58,6 +63,8 @@ static bool backup_block_commit(THD *thd); void backup_init() { backup_flush_ticket= 0; + backup_log= -1; + backup_log_error= 0; } bool run_backup_stage(THD *thd, backup_stages stage) @@ -155,7 +162,6 @@ static bool backup_start(THD *thd) thd->current_backup_stage= BACKUP_FINISHED; // For next test if (thd->has_read_only_protection()) DBUG_RETURN(1); - thd->current_backup_stage= BACKUP_START; if (thd->locked_tables_mode) { @@ -163,14 +169,31 @@ static bool backup_start(THD *thd) DBUG_RETURN(1); } - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_START, + /* this will be reset if this stage fails */ + thd->current_backup_stage= BACKUP_START; + + /* + Wait for old backup to finish and block ddl's so that we can start the + ddl logger + */ + MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_BLOCK_DDL, MDL_EXPLICIT); if (thd->mdl_context.acquire_lock(&mdl_request, thd->variables.lock_wait_timeout)) DBUG_RETURN(1); + if (start_ddl_logging()) + { + thd->mdl_context.release_lock(mdl_request.ticket); + DBUG_RETURN(1); + } + + DBUG_ASSERT(backup_flush_ticket == 0); backup_flush_ticket= mdl_request.ticket; + /* Downgrade lock to only block other backups */ + backup_flush_ticket->downgrade_lock(MDL_BACKUP_START); + ha_prepare_for_backup(); DBUG_RETURN(0); } @@ -219,7 +242,8 @@ static bool backup_flush(THD *thd) - Kill all insert delay handlers, to ensure that all non transactional tables are closed (can be improved in the future). - - Close handlers as other threads may wait for these, which can cause deadlocks. + - Close handlers as other threads may wait for these, which can cause + deadlocks. - Wait for all statements using write locked non-transactional tables to end. @@ -240,17 +264,20 @@ static bool backup_flush(THD *thd) static bool backup_block_ddl(THD *thd) { + PSI_stage_info org_stage; uint sleep_time; DBUG_ENTER("backup_block_ddl"); kill_delayed_threads(); mysql_ha_cleanup_no_free(thd); + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_waiting_for_flush); /* Wait until all non trans statements has ended */ if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket, MDL_BACKUP_WAIT_FLUSH, thd->variables.lock_wait_timeout)) - DBUG_RETURN(1); + goto err; /* Remove not used tables from the table share. Flush all changes to @@ -287,6 +314,7 @@ static bool backup_block_ddl(THD *thd) RENAME TABLE t1 TO t2, t3 TO t3 and the MDL happens in the middle of it. */ + THD_STAGE_INFO(thd, stage_waiting_for_ddl); sleep_time= 100; // Start with 0.1 seconds for (uint i= 0 ; i <= MAX_RETRY_COUNT ; i++) { @@ -302,15 +330,23 @@ static bool backup_block_ddl(THD *thd) was called so that this function can be called again */ backup_flush_ticket->downgrade_lock(MDL_BACKUP_FLUSH); - DBUG_RETURN(1); + goto err; } thd->clear_error(); // Forget the DEADLOCK error my_sleep(sleep_time); sleep_time*= 5; // Wait a bit longer next time } + + /* There can't be anything more that needs to be logged to ddl log */ + THD_STAGE_INFO(thd, org_stage); + stop_ddl_logging(); DBUG_RETURN(0); +err: + THD_STAGE_INFO(thd, org_stage); + DBUG_RETURN(1); } + /** backup_block_commit() @@ -340,6 +376,7 @@ static bool backup_block_commit(THD *thd) DBUG_RETURN(0); } + /** backup_end() @@ -353,9 +390,14 @@ bool backup_end(THD *thd) if (thd->current_backup_stage != BACKUP_FINISHED) { + DBUG_ASSERT(backup_flush_ticket); + MDL_ticket *old_ticket= backup_flush_ticket; ha_end_backup(); + // This is needed as we may call backup_end without backup_block_commit + stop_ddl_logging(); + backup_flush_ticket= 0; thd->current_backup_stage= BACKUP_FINISHED; - thd->mdl_context.release_lock(backup_flush_ticket); + thd->mdl_context.release_lock(old_ticket); #ifdef WITH_WSREP if (WSREP_NNULL(thd) && thd->wsrep_desynced_backup_stage) { @@ -415,7 +457,7 @@ bool backup_reset_alter_copy_lock(THD *thd) /***************************************************************************** - Backup locks + Interfaces for BACKUP LOCK These functions are used by maria_backup to ensure that there are no active ddl's on the object the backup is going to copy *****************************************************************************/ @@ -447,3 +489,135 @@ void backup_unlock(THD *thd) thd->mdl_context.release_lock(thd->mdl_backup_lock); thd->mdl_backup_lock= 0; } + + +/***************************************************************************** + Logging of ddl statements to backup log +*****************************************************************************/ + +static bool start_ddl_logging() +{ + char name[FN_REFLEN]; + DBUG_ENTER("start_ddl_logging"); + + fn_format(name, "ddl", mysql_data_home, ".log", 0); + + backup_log_error= 0; + backup_log= mysql_file_create(key_file_log_ddl, name, CREATE_MODE, + O_TRUNC | O_WRONLY | O_APPEND | O_NOFOLLOW, + MYF(MY_WME)); + DBUG_RETURN(backup_log < 0); +} + +static void stop_ddl_logging() +{ + mysql_mutex_lock(&LOCK_backup_log); + if (backup_log >= 0) + { + mysql_file_close(backup_log, MYF(MY_WME)); + backup_log= -1; + } + backup_log_error= 0; + mysql_mutex_unlock(&LOCK_backup_log); +} + + +static inline char *add_str_to_buffer(char *ptr, const LEX_CSTRING *from) +{ + if (from->length) // If length == 0, str may be 0 + memcpy(ptr, from->str, from->length); + ptr[from->length]= '\t'; + return ptr+ from->length + 1; +} + +static char *add_name_to_buffer(char *ptr, const LEX_CSTRING *from) +{ + LEX_CSTRING tmp; + char buff[NAME_LEN*4]; + uint errors; + + tmp.str= buff; + tmp.length= strconvert(system_charset_info, from->str, from->length, + &my_charset_filename, buff, sizeof(buff), &errors); + return add_str_to_buffer(ptr, &tmp); +} + + +static char *add_id_to_buffer(char *ptr, const LEX_CUSTRING *from) +{ + LEX_CSTRING tmp; + char buff[MY_UUID_STRING_LENGTH]; + + if (!from->length) + return add_str_to_buffer(ptr, (LEX_CSTRING*) from); + + tmp.str= buff; + tmp.length= MY_UUID_STRING_LENGTH; + my_uuid2str(from->str, buff); + return add_str_to_buffer(ptr, &tmp); +} + + +static char *add_bool_to_buffer(char *ptr, bool value) { + *(ptr++) = value ? '1' : '0'; + *(ptr++) = '\t'; + return ptr; +} + +/* + Write to backup log + + Sets backup_log_error in case of error. The backup thread could check this + to ensure that all logging had succeded +*/ + +void backup_log_ddl(const backup_log_info *info) +{ + if (backup_log >= 0 && backup_log_error == 0) + { + mysql_mutex_lock(&LOCK_backup_log); + if (backup_log < 0) + { + mysql_mutex_unlock(&LOCK_backup_log); + return; + } + /* Enough place for db.table *2 + query + engine_name * 2 + tabs+ uuids */ + char buff[NAME_CHAR_LEN*4+20+40*2+10+MY_UUID_STRING_LENGTH*2], *ptr= buff; + char timebuff[20]; + struct tm current_time; + LEX_CSTRING tmp_lex; + time_t tmp_time= my_time(0); + + localtime_r(&tmp_time, ¤t_time); + tmp_lex.str= timebuff; + tmp_lex.length= snprintf(timebuff, sizeof(timebuff), + "%4d-%02d-%02d %2d:%02d:%02d", + current_time.tm_year + 1900, + current_time.tm_mon+1, + current_time.tm_mday, + current_time.tm_hour, + current_time.tm_min, + current_time.tm_sec); + ptr= add_str_to_buffer(ptr, &tmp_lex); + + ptr= add_str_to_buffer(ptr, &info->query); + ptr= add_str_to_buffer(ptr, &info->org_storage_engine_name); + ptr= add_bool_to_buffer(ptr, info->org_partitioned); + ptr= add_name_to_buffer(ptr, &info->org_database); + ptr= add_name_to_buffer(ptr, &info->org_table); + ptr= add_id_to_buffer(ptr, &info->org_table_id); + + /* The following fields are only set in case of rename */ + ptr= add_str_to_buffer(ptr, &info->new_storage_engine_name); + ptr= add_bool_to_buffer(ptr, info->new_partitioned); + ptr= add_name_to_buffer(ptr, &info->new_database); + ptr= add_name_to_buffer(ptr, &info->new_table); + ptr= add_id_to_buffer(ptr, &info->new_table_id); + + ptr[-1]= '\n'; // Replace last tab with nl + if (mysql_file_write(backup_log, (uchar*) buff, (size_t) (ptr-buff), + MYF(MY_FNABP))) + backup_log_error= my_errno; + mysql_mutex_unlock(&LOCK_backup_log); + } +} diff --git a/sql/backup.h b/sql/backup.h index 8d8a28b6082..2e5c3a58ba2 100644 --- a/sql/backup.h +++ b/sql/backup.h @@ -23,6 +23,18 @@ enum backup_stages extern TYPELIB backup_stage_names; +struct backup_log_info { + LEX_CSTRING query; + LEX_CUSTRING org_table_id; /* Unique id from frm */ + LEX_CSTRING org_database, org_table; + LEX_CSTRING org_storage_engine_name; + LEX_CSTRING new_database, new_table; + LEX_CSTRING new_storage_engine_name; + LEX_CUSTRING new_table_id; /* Unique id from frm */ + bool org_partitioned; + bool new_partitioned; +}; + void backup_init(); bool run_backup_stage(THD *thd, backup_stages stage); bool backup_end(THD *thd); @@ -31,4 +43,5 @@ bool backup_reset_alter_copy_lock(THD *thd); bool backup_lock(THD *thd, TABLE_LIST *table); void backup_unlock(THD *thd); +void backup_log_ddl(const backup_log_info *info); #endif /* BACKUP_INCLUDED */ diff --git a/sql/create_options.cc b/sql/create_options.cc index 60e9b733efc..5437de0f0c3 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -402,7 +402,7 @@ static bool resolve_sysvars(handlerton *hton, ha_create_table_option *rules) str.length(0); for (const char **s= optp.typelib->type_names; *s; s++) { - if (str.append(*s) || str.append(',')) + if (str.append(*s, strlen(*s)) || str.append(',')) return 1; } DBUG_ASSERT(str.length()); diff --git a/sql/create_tmp_table.h b/sql/create_tmp_table.h new file mode 100644 index 00000000000..ce86c9456e4 --- /dev/null +++ b/sql/create_tmp_table.h @@ -0,0 +1,80 @@ +#ifndef CREATE_TMP_TABLE_INCLUDED +#define CREATE_TMP_TABLE_INCLUDED + +/* Copyright (c) 2021, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + + +/* + Class for creating internal tempory tables in sql_select.cc +*/ + +class Create_tmp_table: public Data_type_statistics +{ +protected: + // The following members are initialized only in start() + Field **m_from_field, **m_default_field; + KEY_PART_INFO *m_key_part_info; + uchar *m_group_buff, *m_bitmaps; + // The following members are initialized in ctor + uint m_alloced_field_count; + bool m_using_unique_constraint; + uint m_temp_pool_slot; + ORDER *m_group; + bool m_distinct; + bool m_save_sum_fields; + bool m_with_cycle; + ulonglong m_select_options; + ha_rows m_rows_limit; + uint m_group_null_items; + + // counter for distinct/other fields + uint m_field_count[2]; + // counter for distinct/other fields which can be NULL + uint m_null_count[2]; + // counter for distinct/other blob fields + uint m_blobs_count[2]; + // counter for "tails" of bit fields which do not fit in a byte + uint m_uneven_bit[2]; + +public: + enum counter {distinct, other}; + /* + shows which field we are processing: distinct/other (set in processing + cycles) + */ + counter current_counter; + Create_tmp_table(ORDER *group, bool distinct, bool save_sum_fields, + ulonglong select_options, ha_rows rows_limit); + virtual ~Create_tmp_table() {} + virtual bool choose_engine(THD *thd, TABLE *table, TMP_TABLE_PARAM *param); + void add_field(TABLE *table, Field *field, uint fieldnr, + bool force_not_null_cols); + TABLE *start(THD *thd, + TMP_TABLE_PARAM *param, + const LEX_CSTRING *table_alias); + bool add_fields(THD *thd, TABLE *table, + TMP_TABLE_PARAM *param, List<Item> &fields); + + bool add_schema_fields(THD *thd, TABLE *table, + TMP_TABLE_PARAM *param, + const ST_SCHEMA_TABLE &schema_table); + + bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, + bool do_not_open, bool keep_row_order); + void cleanup_on_failure(THD *thd, TABLE *table); +}; + +#endif /* CREATE_TMP_TABLE_INCLUDED */ diff --git a/sql/custom_conf.h b/sql/custom_conf.h index 5847b939ca7..f0bb619a515 100644 --- a/sql/custom_conf.h +++ b/sql/custom_conf.h @@ -18,7 +18,7 @@ #define __MYSQL_CUSTOM_BUILD_CONFIG__ #define MYSQL_PORT 5002 -#ifdef __WIN__ +#ifdef _WIN32 #define MYSQL_NAMEDPIPE "SwSqlServer" #define MYSQL_SERVICENAME "SwSqlServer" #define KEY_SERVICE_PARAMETERS diff --git a/sql/datadict.cc b/sql/datadict.cc index 37f90d0309a..e85478a710c 100644 --- a/sql/datadict.cc +++ b/sql/datadict.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2017, 2022, MariaDB corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -55,10 +56,12 @@ static int read_string(File file, uchar**to, size_t length) @retval TABLE_TYPE_VIEW view */ -Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name) +Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name, + LEX_CSTRING *partition_engine_name, + LEX_CUSTRING *table_version) { File file; - uchar header[40]; //"TYPE=VIEW\n" it is 10 characters + uchar header[64+ MY_UUID_SIZE + 2]; // Header and uuid size_t error; Table_type type= TABLE_TYPE_UNKNOWN; uchar dbt; @@ -83,8 +86,18 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name) engine_name->length= 0; ((char*) (engine_name->str))[0]= 0; } - - if (unlikely((error= mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP))))) + if (partition_engine_name) + { + partition_engine_name->length= 0; + partition_engine_name->str= 0; + } + if (table_version) + { + table_version->length= 0; + table_version->str= 0; // Allocated if needed + } + if (unlikely((error= mysql_file_read(file, (uchar*) header, sizeof(header), + MYF(MY_NABP))))) goto err; if (unlikely((!strncmp((char*) header, "TYPE=VIEW\n", 10)))) @@ -93,35 +106,56 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name) goto err; } - /* engine_name is 0 if we only want to know if table is view or not */ - if (!engine_name) - goto err; - if (!is_binary_frm_header(header)) goto err; dbt= header[3]; - if (((header[39] >> 4) & 3) == HA_CHOICE_YES) + if ((header[39] & 0x30) == (HA_CHOICE_YES << 4)) { DBUG_PRINT("info", ("Sequence found")); type= TABLE_TYPE_SEQUENCE; } + if (table_version) + { + /* Read the table version (if it is a 'new' frm file) */ + if (header[64] == EXTRA2_TABLEDEF_VERSION && header[65] == MY_UUID_SIZE) + if ((table_version->str= (uchar*) thd->memdup(header + 66, MY_UUID_SIZE))) + table_version->length= MY_UUID_SIZE; + } + /* cannot use ha_resolve_by_legacy_type without a THD */ if (thd && dbt < DB_TYPE_FIRST_DYNAMIC) { - handlerton *ht= ha_resolve_by_legacy_type(thd, (enum legacy_db_type)dbt); + handlerton *ht= ha_resolve_by_legacy_type(thd, (legacy_db_type) dbt); if (ht) { - *engine_name= hton2plugin[ht->slot]->name; + if (engine_name) + *engine_name= hton2plugin[ht->slot]->name; +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (partition_engine_name && dbt == DB_TYPE_PARTITION_DB) + { + handlerton *p_ht; + legacy_db_type new_dbt= (legacy_db_type) header[61]; + if (new_dbt >= DB_TYPE_FIRST_DYNAMIC) + goto cont; + if (!(p_ht= ha_resolve_by_legacy_type(thd, new_dbt))) + goto err; + *partition_engine_name= *hton_name(p_ht); + } +#endif // WITH_PARTITION_STORAGE_ENGINE goto err; } } +#ifdef WITH_PARTITION_STORAGE_ENGINE +cont: +#endif /* read the true engine name */ + if (engine_name) { - MY_STAT state; + MY_STAT state; uchar *frm_image= 0; uint n_length; @@ -136,7 +170,8 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name) if (read_string(file, &frm_image, (size_t)state.st_size)) goto err; - if ((n_length= uint4korr(frm_image+55))) + /* The test for !engine_name->length is only true for partition engine */ + if (!engine_name->length && (n_length= uint4korr(frm_image+55))) { uint record_offset= uint2korr(frm_image+6)+ ((uint2korr(frm_image+14) == 0xffff ? @@ -162,6 +197,43 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name) } } +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (partition_engine_name && dbt == DB_TYPE_PARTITION_DB) + { + uint len; + const uchar *extra2; + /* Length of the MariaDB extra2 segment in the form file. */ + len = uint2korr(frm_image+4); + extra2= frm_image + 64; + if (*extra2 != '/') // old frm had '/' there + { + const uchar *e2end= extra2 + len; + while (extra2 + 3 <= e2end) + { + uchar type= *extra2++; + size_t length= *extra2++; + if (!length) + { + if (extra2 + 2 >= e2end) + break; + length= uint2korr(extra2); + extra2+= 2; + if (length < 256) + break; + } + if (extra2 + length > e2end) + break; + if (type == EXTRA2_DEFAULT_PART_ENGINE) + { + partition_engine_name->str= thd->strmake((char*)extra2, length); + partition_engine_name->length= length; + break; + } + extra2+= length; + } + } + } +#endif // WITH_PARTITION_STORAGE_ENGINE my_free(frm_image); } @@ -196,6 +268,6 @@ bool dd_recreate_table(THD *thd, const char *db, const char *table_name) build_table_filename(path_buf, sizeof(path_buf) - 1, db, table_name, "", 0); /* Attempt to reconstruct the table. */ - DBUG_RETURN(ha_create_table(thd, path_buf, db, table_name, &create_info, 0)); + DBUG_RETURN(ha_create_table(thd, path_buf, db, table_name, &create_info, 0, 0)); } diff --git a/sql/datadict.h b/sql/datadict.h index f4af592247a..bec093aa141 100644 --- a/sql/datadict.h +++ b/sql/datadict.h @@ -38,11 +38,13 @@ enum Table_type To check whether it's an frm of a view, use dd_frm_is_view(). */ -enum Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name); +enum Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name, + LEX_CSTRING *partition_engine_name, + LEX_CUSTRING *table_version); static inline bool dd_frm_is_view(THD *thd, char *path) { - return dd_frm_type(thd, path, NULL) == TABLE_TYPE_VIEW; + return dd_frm_type(thd, path, NULL, NULL, NULL) == TABLE_TYPE_VIEW; } bool dd_recreate_table(THD *thd, const char *db, const char *table_name); diff --git a/sql/ddl_log.cc b/sql/ddl_log.cc new file mode 100644 index 00000000000..8722d88ba95 --- /dev/null +++ b/sql/ddl_log.cc @@ -0,0 +1,3528 @@ +/* + Copyright (c) 2000, 2019, Oracle and/or its affiliates. + Copyright (c) 2010, 2021, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA +*/ + +#include "mariadb.h" +#include "mysqld.h" +#include "sql_class.h" // init_sql_alloc() +#include "log.h" // sql_print_error() +#include "ddl_log.h" +#include "ha_partition.h" // PAR_EXT +#include "sql_table.h" // build_table_filename +#include "sql_statistics.h" // rename_table_in_stats_tables +#include "sql_view.h" // mysql_rename_view() +#include "strfunc.h" // strconvert +#include "sql_show.h" // append_identifier() +#include "sql_db.h" // drop_database_objects() +#include <mysys_err.h> // EE_LINK + + +/*-------------------------------------------------------------------------- + + MODULE: DDL log + ----------------- + + This module is used to ensure that we can recover from crashes that + occur in the middle of a meta-data operation in MySQL. E.g. DROP + TABLE t1, t2; We need to ensure that both t1 and t2 are dropped and + not only t1 and also that each table drop is entirely done and not + "half-baked". + + To support this we create log entries for each meta-data statement + in the ddl log while we are executing. These entries are dropped + when the operation is completed. + + At recovery those entries that were not completed will be executed. + + There is only one ddl log in the system and it is protected by a mutex + and there is a global struct that contains information about its current + state. + + DDL recovery after a crash works the following way: + + - ddl_log_initialize() initializes the global global_ddl_log variable + and opens the binary log if it exists. If it doesn't exists a new one + is created. + - ddl_log_close_binlogged_events() loops over all log events and checks if + their xid (stored in the EXECUTE_CODE event) is in the binary log. If xid + exists in the binary log the entry is marked as finished in the ddl log. + - After a new binary log is created and is open for new entries, + ddl_log_execute_recovery() is executed on remaining open events: + - Loop over all events + - For each entry with DDL_LOG_ENTRY_CODE execute the remaining phases + in ddl_log_execute_entry_no_lock() + + The ddl_log.log file is created at startup and deleted when server goes down. + After the final recovery phase is done, the file is truncated. + + History: + First version written in 2006 by Mikael Ronstrom + Second version in 2020 by Monty +--------------------------------------------------------------------------*/ + +#define DDL_LOG_MAGIC_LENGTH 4 +/* How many times to try to execute a ddl log entry that causes crashes */ +#define DDL_LOG_MAX_RETRY 3 + +uchar ddl_log_file_magic[]= +{ (uchar) 254, (uchar) 254, (uchar) 11, (uchar) 2 }; + +/* Action names for ddl_log_action_code */ + +const char *ddl_log_action_name[DDL_LOG_LAST_ACTION]= +{ + "Unknown", "partitioning delete", "partitioning rename", + "partitioning replace", "partitioning exchange", + "rename table", "rename view", + "initialize drop table", "drop table", + "drop view", "drop trigger", "drop db", "create table", "create view", + "delete tmp file", "create trigger", "alter table", "store query" +}; + +/* Number of phases per entry */ +const uchar ddl_log_entry_phases[DDL_LOG_LAST_ACTION]= +{ + 0, 1, 1, 2, + (uchar) EXCH_PHASE_END, (uchar) DDL_RENAME_PHASE_END, 1, 1, + (uchar) DDL_DROP_PHASE_END, 1, 1, + (uchar) DDL_DROP_DB_PHASE_END, (uchar) DDL_CREATE_TABLE_PHASE_END, + (uchar) DDL_CREATE_VIEW_PHASE_END, 0, (uchar) DDL_CREATE_TRIGGER_PHASE_END, + DDL_ALTER_TABLE_PHASE_END, 1 +}; + + +struct st_global_ddl_log +{ + uchar *file_entry_buf; + DDL_LOG_MEMORY_ENTRY *first_free; + DDL_LOG_MEMORY_ENTRY *first_used; + File file_id; + uint num_entries; + uint name_pos; + uint io_size; + bool initialized; + bool open, backup_done, created; +}; + +/* + The following structure is only used during startup recovery + for writing queries to the binary log. + */ + +class st_ddl_recovery { +public: + String drop_table; + String drop_view; + String query; + String db; + size_t drop_table_init_length, drop_view_init_length; + char current_db[NAME_LEN]; + uint execute_entry_pos; + ulonglong xid; +}; + +static st_global_ddl_log global_ddl_log; +static st_ddl_recovery recovery_state; + +mysql_mutex_t LOCK_gdl; + +/* Positions to different data in a ddl log block */ +#define DDL_LOG_ENTRY_TYPE_POS 0 +/* + Note that ACTION_TYPE and PHASE_POS must be after each other. + See update_phase() +*/ +#define DDL_LOG_ACTION_TYPE_POS 1 +#define DDL_LOG_PHASE_POS 2 +#define DDL_LOG_NEXT_ENTRY_POS 4 +/* Flags to remember something unique about the query, like if .frm was used */ +#define DDL_LOG_FLAG_POS 8 +/* Used to store XID entry that was written to binary log */ +#define DDL_LOG_XID_POS 10 +/* Used to store unique uuid from the .frm file */ +#define DDL_LOG_UUID_POS 18 +/* ID_POS can be used to store something unique, like file size (4 bytes) */ +#define DDL_LOG_ID_POS DDL_LOG_UUID_POS + MY_UUID_SIZE +#define DDL_LOG_END_POS DDL_LOG_ID_POS + 8 + +/* + Position to where names are stored in the ddl log blocks. The current + value is stored in the header and can thus be changed if we need more + space for constants in the header than what is between DDL_LOG_ID_POS and + DDL_LOG_TMP_NAME_POS. +*/ +#define DDL_LOG_TMP_NAME_POS 56 + +/* Definitions for the ddl log header, the first block in the file */ +/* IO_SIZE is stored in the header and can thus be changed */ +#define DDL_LOG_IO_SIZE IO_SIZE + +/* Header is stored in positions 0-3 */ +#define DDL_LOG_IO_SIZE_POS 4 +#define DDL_LOG_NAME_OFFSET_POS 6 +/* Marks if we have done a backup of the ddl log */ +#define DDL_LOG_BACKUP_OFFSET_POS 8 +/* Sum of the above variables */ +#define DDL_LOG_HEADER_SIZE 4+2+2+1 + +/** + Sync the ddl log file. + + @return Operation status + @retval FALSE Success + @retval TRUE Error +*/ + +static bool ddl_log_sync_file() +{ + DBUG_ENTER("ddl_log_sync_file"); + DBUG_RETURN(mysql_file_sync(global_ddl_log.file_id, MYF(MY_WME))); +} + +/* Same as above, but ensure we have the LOCK_gdl locked */ + +static bool ddl_log_sync_no_lock() +{ + DBUG_ENTER("ddl_log_sync_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + DBUG_RETURN(ddl_log_sync_file()); +} + + +/** + Create ddl log file name. + @param file_name Filename setup +*/ + +static inline void create_ddl_log_file_name(char *file_name, bool backup) +{ + fn_format(file_name, opt_ddl_recovery_file, mysql_data_home, + backup ? "-backup.log" : ".log", MYF(MY_REPLACE_EXT)); +} + + +/** + Write ddl log header. + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool write_ddl_log_header() +{ + uchar header[DDL_LOG_HEADER_SIZE]; + DBUG_ENTER("write_ddl_log_header"); + + memcpy(&header, ddl_log_file_magic, DDL_LOG_MAGIC_LENGTH); + int2store(&header[DDL_LOG_IO_SIZE_POS], global_ddl_log.io_size); + int2store(&header[DDL_LOG_NAME_OFFSET_POS], global_ddl_log.name_pos); + header[DDL_LOG_BACKUP_OFFSET_POS]= 0; + + if (mysql_file_pwrite(global_ddl_log.file_id, + header, sizeof(header), 0, + MYF(MY_WME | MY_NABP))) + DBUG_RETURN(TRUE); + DBUG_RETURN(ddl_log_sync_file()); +} + + +/* + Mark in the ddl log file that we have made a backup of it +*/ + +static void mark_ddl_log_header_backup_done() +{ + uchar marker[1]; + marker[0]= 1; + (void) mysql_file_pwrite(global_ddl_log.file_id, + marker, sizeof(marker), DDL_LOG_BACKUP_OFFSET_POS, + MYF(MY_WME | MY_NABP)); +} + + +void ddl_log_create_backup_file() +{ + char org_file_name[FN_REFLEN]; + char backup_file_name[FN_REFLEN]; + + create_ddl_log_file_name(org_file_name, 0); + create_ddl_log_file_name(backup_file_name, 1); + + my_copy(org_file_name, backup_file_name, MYF(MY_WME)); + mark_ddl_log_header_backup_done(); +} + + +/** + Read one entry from ddl log file. + + @param entry_pos Entry number to read + + @return Operation status + @retval true Error + @retval false Success +*/ + +static bool read_ddl_log_file_entry(uint entry_pos) +{ + uchar *file_entry_buf= global_ddl_log.file_entry_buf; + size_t io_size= global_ddl_log.io_size; + DBUG_ENTER("read_ddl_log_file_entry"); + + mysql_mutex_assert_owner(&LOCK_gdl); + DBUG_RETURN (mysql_file_pread(global_ddl_log.file_id, + file_entry_buf, io_size, + io_size * entry_pos, + MYF(MY_WME | MY_NABP))); +} + + +/** + Write one entry to ddl log file. + + @param entry_pos Entry number to write + + @return + @retval true Error + @retval false Success +*/ + +static bool write_ddl_log_file_entry(uint entry_pos) +{ + bool error= FALSE; + File file_id= global_ddl_log.file_id; + uchar *file_entry_buf= global_ddl_log.file_entry_buf; + DBUG_ENTER("write_ddl_log_file_entry"); + + mysql_mutex_assert_owner(&LOCK_gdl); // To be removed + DBUG_RETURN(mysql_file_pwrite(file_id, file_entry_buf, + global_ddl_log.io_size, + global_ddl_log.io_size * entry_pos, + MYF(MY_WME | MY_NABP))); + DBUG_RETURN(error); +} + + +/** + Update phase of ddl log entry + + @param entry_pos ddl_log entry to update + @param phase New phase + + @return + @retval 0 ok + @retval 1 Write error. Error given + + This is done without locks as it's guaranteed to be atomic +*/ + +static bool update_phase(uint entry_pos, uchar phase) +{ + DBUG_ENTER("update_phase"); + DBUG_PRINT("enter", ("phase: %d", (int) phase)); + + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, &phase, 1, + global_ddl_log.io_size * entry_pos + + DDL_LOG_PHASE_POS, + MYF(MY_WME | MY_NABP)) || + ddl_log_sync_file()); +} + + +/* + Update flags in ddl log entry + + This is not synced as it usually followed by a phase change, which will sync. +*/ + +static bool update_flags(uint entry_pos, uint16 flags) +{ + uchar buff[2]; + DBUG_ENTER("update_flags"); + + int2store(buff, flags); + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_FLAG_POS, + MYF(MY_WME | MY_NABP))); +} + + +static bool update_next_entry_pos(uint entry_pos, uint next_entry) +{ + uchar buff[4]; + DBUG_ENTER("update_next_entry_pos"); + + int4store(buff, next_entry); + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_NEXT_ENTRY_POS, + MYF(MY_WME | MY_NABP))); +} + + +static bool update_xid(uint entry_pos, ulonglong xid) +{ + uchar buff[8]; + DBUG_ENTER("update_xid"); + + int8store(buff, xid); + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_XID_POS, + MYF(MY_WME | MY_NABP)) || + ddl_log_sync_file()); +} + + +static bool update_unique_id(uint entry_pos, ulonglong id) +{ + uchar buff[8]; + DBUG_ENTER("update_unique_xid"); + + int8store(buff, id); + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_ID_POS, + MYF(MY_WME | MY_NABP)) || + ddl_log_sync_file()); +} + + +/* + Disable an execute entry + + @param entry_pos ddl_log entry to update + + Notes: + We don't need sync here as this is mainly done during + recover phase to mark already done entries. We instead sync all entries + at the same time. +*/ + +static bool disable_execute_entry(uint entry_pos) +{ + uchar buff[1]; + DBUG_ENTER("disable_execute_entry"); + + buff[0]= DDL_LOG_IGNORE_ENTRY_CODE; + DBUG_RETURN(mysql_file_pwrite(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_ENTRY_TYPE_POS, + MYF(MY_WME | MY_NABP))); +} + +/* + Disable an execute entry +*/ + +bool ddl_log_disable_execute_entry(DDL_LOG_MEMORY_ENTRY **active_entry) +{ + bool res= disable_execute_entry((*active_entry)->entry_pos); + ddl_log_sync_no_lock(); + return res; +} + + +/* + Check if an executive entry is active + + @return 0 Entry is active + @return 1 Entry is not active +*/ + +static bool is_execute_entry_active(uint entry_pos) +{ + uchar buff[1]; + DBUG_ENTER("disable_execute_entry"); + + if (mysql_file_pread(global_ddl_log.file_id, buff, sizeof(buff), + global_ddl_log.io_size * entry_pos + + DDL_LOG_ENTRY_TYPE_POS, + MYF(MY_WME | MY_NABP))) + DBUG_RETURN(1); + DBUG_RETURN(buff[0] == (uchar) DDL_LOG_EXECUTE_CODE); +} + + +/** + Read header of ddl log file. + + When we read the ddl log header we get information about maximum sizes + of names in the ddl log and we also get information about the number + of entries in the ddl log. + + This is read only once at server startup, so no mutex is needed. + + @return Last entry in ddl log (0 if no entries). + @return -1 if log could not be opened or could not be read +*/ + +static int read_ddl_log_header(const char *file_name) +{ + uchar header[DDL_LOG_HEADER_SIZE]; + int max_entry; + int file_id; + uint io_size; + DBUG_ENTER("read_ddl_log_header"); + + if ((file_id= mysql_file_open(key_file_global_ddl_log, + file_name, + O_RDWR | O_BINARY, MYF(0))) < 0) + DBUG_RETURN(-1); + + if (mysql_file_read(file_id, + header, sizeof(header), MYF(MY_WME | MY_NABP))) + { + /* Write message into error log */ + sql_print_error("DDL_LOG: Failed to read ddl log file '%s' during " + "recovery", file_name); + goto err; + } + + if (memcmp(header, ddl_log_file_magic, 4)) + { + /* Probably upgrade from MySQL 10.5 or earlier */ + sql_print_warning("DDL_LOG: Wrong header in %s. Assuming it is an old " + "recovery file from MariaDB 10.5 or earlier. " + "Skipping DDL recovery", file_name); + goto err; + } + + io_size= uint2korr(&header[DDL_LOG_IO_SIZE_POS]); + global_ddl_log.name_pos= uint2korr(&header[DDL_LOG_NAME_OFFSET_POS]); + global_ddl_log.backup_done= header[DDL_LOG_BACKUP_OFFSET_POS]; + + max_entry= (uint) (mysql_file_seek(file_id, 0L, MY_SEEK_END, MYF(0)) / + io_size); + if (max_entry) + max_entry--; // Don't count first block + + if (!(global_ddl_log.file_entry_buf= (uchar*) + my_malloc(key_memory_DDL_LOG_MEMORY_ENTRY, io_size, + MYF(MY_WME | MY_ZEROFILL)))) + goto err; + + global_ddl_log.open= TRUE; + global_ddl_log.created= 0; + global_ddl_log.file_id= file_id; + global_ddl_log.num_entries= max_entry; + global_ddl_log.io_size= io_size; + DBUG_RETURN(max_entry); + +err: + if (file_id >= 0) + my_close(file_id, MYF(0)); + /* We return -1 to force the ddl log to be re-created */ + DBUG_RETURN(-1); +} + + +/* + Store and read strings in ddl log buffers + + Format is: + 2 byte: length (not counting end \0) + X byte: string value of length 'length' + 1 byte: \0 +*/ + +static uchar *store_string(uchar *pos, uchar *end, const LEX_CSTRING *str) +{ + uint32 length= (uint32) str->length; + if (unlikely(pos + 2 + length + 1 > end)) + { + DBUG_ASSERT(0); + return end; // Overflow + } + + int2store(pos, length); + if (likely(length)) + memcpy(pos+2, str->str, length); + pos[2+length]= 0; // Store end \0 + return pos + 2 + length +1; +} + + +static LEX_CSTRING get_string(uchar **pos, const uchar *end) +{ + LEX_CSTRING tmp; + uint32 length; + if (likely(*pos + 3 <= end)) + { + length= uint2korr(*pos); + if (likely(*pos + 2 + length + 1 <= end)) + { + char *str= (char*) *pos+2; + *pos= *pos + 2 + length + 1; + tmp.str= str; + tmp.length= length; + return tmp; + } + } + /* + Overflow on read, should never happen + Set *pos to end to ensure any future calls also returns empty string + */ + DBUG_ASSERT(0); + *pos= (uchar*) end; + tmp.str= ""; + tmp.length= 0; + return tmp; +} + + +/** + Convert from ddl_log_entry struct to file_entry_buf binary blob. + + @param ddl_log_entry filled in ddl_log_entry struct. +*/ + +static void set_global_from_ddl_log_entry(const DDL_LOG_ENTRY *ddl_log_entry) +{ + uchar *file_entry_buf= global_ddl_log.file_entry_buf, *pos, *end; + + mysql_mutex_assert_owner(&LOCK_gdl); + + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (uchar) ddl_log_entry->entry_type; + file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= (uchar) ddl_log_entry->action_type; + file_entry_buf[DDL_LOG_PHASE_POS]= (uchar) ddl_log_entry->phase; + int4store(file_entry_buf+DDL_LOG_NEXT_ENTRY_POS, ddl_log_entry->next_entry); + int2store(file_entry_buf+DDL_LOG_FLAG_POS, ddl_log_entry->flags); + int8store(file_entry_buf+DDL_LOG_XID_POS, ddl_log_entry->xid); + memcpy(file_entry_buf+DDL_LOG_UUID_POS, ddl_log_entry->uuid, MY_UUID_SIZE); + int8store(file_entry_buf+DDL_LOG_ID_POS, ddl_log_entry->unique_id); + bzero(file_entry_buf+DDL_LOG_END_POS, + global_ddl_log.name_pos - DDL_LOG_END_POS); + + pos= file_entry_buf + global_ddl_log.name_pos; + end= file_entry_buf + global_ddl_log.io_size; + + pos= store_string(pos, end, &ddl_log_entry->handler_name); + pos= store_string(pos, end, &ddl_log_entry->db); + pos= store_string(pos, end, &ddl_log_entry->name); + pos= store_string(pos, end, &ddl_log_entry->from_handler_name); + pos= store_string(pos, end, &ddl_log_entry->from_db); + pos= store_string(pos, end, &ddl_log_entry->from_name); + pos= store_string(pos, end, &ddl_log_entry->tmp_name); + pos= store_string(pos, end, &ddl_log_entry->extra_name); + bzero(pos, global_ddl_log.io_size - (pos - file_entry_buf)); +} + + +/* + Calculate how much space we have left in the log entry for one string + + This can be used to check if we have space to store the query string + in the block. +*/ + +static size_t ddl_log_free_space_in_entry(const DDL_LOG_ENTRY *ddl_log_entry) +{ + size_t length= global_ddl_log.name_pos + 3*7; // 3 byte per string below + length+= ddl_log_entry->handler_name.length; + length+= ddl_log_entry->db.length; + length+= ddl_log_entry->name.length; + length+= ddl_log_entry->from_handler_name.length; + length+= ddl_log_entry->from_db.length; + length+= ddl_log_entry->from_name.length; + length+= ddl_log_entry->tmp_name.length; + length+= ddl_log_entry->extra_name.length; + return global_ddl_log.io_size - length - 3; // 3 is for storing next string +} + + +/** + Convert from file_entry_buf binary blob to ddl_log_entry struct. + + @param[out] ddl_log_entry struct to fill in. + + @note Strings (names) are pointing to the global_ddl_log structure, + so LOCK_gdl needs to be hold until they are read or copied. +*/ + +static void set_ddl_log_entry_from_global(DDL_LOG_ENTRY *ddl_log_entry, + const uint read_entry) +{ + uchar *file_entry_buf= global_ddl_log.file_entry_buf, *pos; + const uchar *end= file_entry_buf + global_ddl_log.io_size; + uchar single_char; + + mysql_mutex_assert_owner(&LOCK_gdl); + ddl_log_entry->entry_pos= read_entry; + single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]; + ddl_log_entry->entry_type= (enum ddl_log_entry_code) single_char; + single_char= file_entry_buf[DDL_LOG_ACTION_TYPE_POS]; + ddl_log_entry->action_type= (enum ddl_log_action_code) single_char; + ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS]; + ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]); + ddl_log_entry->flags= uint2korr(file_entry_buf + DDL_LOG_FLAG_POS); + ddl_log_entry->xid= uint8korr(file_entry_buf + DDL_LOG_XID_POS); + ddl_log_entry->unique_id= uint8korr(file_entry_buf + DDL_LOG_ID_POS); + memcpy(ddl_log_entry->uuid, file_entry_buf+ DDL_LOG_UUID_POS, MY_UUID_SIZE); + + pos= file_entry_buf + global_ddl_log.name_pos; + ddl_log_entry->handler_name= get_string(&pos, end); + ddl_log_entry->db= get_string(&pos, end); + ddl_log_entry->name= get_string(&pos, end); + ddl_log_entry->from_handler_name= get_string(&pos, end); + ddl_log_entry->from_db= get_string(&pos, end); + ddl_log_entry->from_name= get_string(&pos, end); + ddl_log_entry->tmp_name= get_string(&pos, end); + ddl_log_entry->extra_name= get_string(&pos, end); +} + + +/** + Read a ddl log entry. + + Read a specified entry in the ddl log. + + @param read_entry Number of entry to read + @param[out] entry_info Information from entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) +{ + DBUG_ENTER("read_ddl_log_entry"); + + if (read_ddl_log_file_entry(read_entry)) + { + sql_print_error("DDL_LOG: Failed to read entry %u", read_entry); + DBUG_RETURN(TRUE); + } + set_ddl_log_entry_from_global(ddl_log_entry, read_entry); + DBUG_RETURN(FALSE); +} + + +/** + Create the ddl log file + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool create_ddl_log() +{ + char file_name[FN_REFLEN]; + DBUG_ENTER("create_ddl_log"); + + global_ddl_log.open= 0; + global_ddl_log.created= 1; + global_ddl_log.num_entries= 0; + global_ddl_log.name_pos= DDL_LOG_TMP_NAME_POS; + global_ddl_log.num_entries= 0; + global_ddl_log.backup_done= 0; + + /* + Fix file_entry_buf if the old log had a different io_size or if open of old + log didn't succeed. + */ + if (global_ddl_log.io_size != DDL_LOG_IO_SIZE) + { + uchar *ptr= (uchar*) + my_realloc(key_memory_DDL_LOG_MEMORY_ENTRY, + global_ddl_log.file_entry_buf, DDL_LOG_IO_SIZE, + MYF(MY_WME | MY_ALLOW_ZERO_PTR)); + if (ptr) // Resize succeded */ + { + global_ddl_log.file_entry_buf= ptr; + global_ddl_log.io_size= DDL_LOG_IO_SIZE; + } + if (!global_ddl_log.file_entry_buf) + DBUG_RETURN(TRUE); + } + DBUG_ASSERT(global_ddl_log.file_entry_buf); + bzero(global_ddl_log.file_entry_buf, global_ddl_log.io_size); + create_ddl_log_file_name(file_name, 0); + if ((global_ddl_log.file_id= + mysql_file_create(key_file_global_ddl_log, + file_name, CREATE_MODE, + O_RDWR | O_TRUNC | O_BINARY, + MYF(MY_WME | ME_ERROR_LOG))) < 0) + { + /* Couldn't create ddl log file, this is serious error */ + sql_print_error("DDL_LOG: Failed to create ddl log file: %s", file_name); + my_free(global_ddl_log.file_entry_buf); + global_ddl_log.file_entry_buf= 0; + DBUG_RETURN(TRUE); + } + if (write_ddl_log_header()) + { + (void) mysql_file_close(global_ddl_log.file_id, MYF(MY_WME)); + my_free(global_ddl_log.file_entry_buf); + global_ddl_log.file_entry_buf= 0; + DBUG_RETURN(TRUE); + } + global_ddl_log.open= TRUE; + DBUG_RETURN(FALSE); +} + + +/** + Open ddl log and initialise ddl log variables + Create a backuip of of +*/ + +bool ddl_log_initialize() +{ + char file_name[FN_REFLEN]; + DBUG_ENTER("ddl_log_initialize"); + + bzero(&global_ddl_log, sizeof(global_ddl_log)); + global_ddl_log.file_id= (File) -1; + global_ddl_log.initialized= 1; + + mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_SLOW); + + create_ddl_log_file_name(file_name, 0); + if (unlikely(read_ddl_log_header(file_name) < 0)) + { + /* Fatal error, log not opened. Recreate it */ + if (create_ddl_log()) + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/** + @brief Deactivate an individual entry. + + @details For complex rename operations we need to deactivate individual + entries. + + During replace operations where we start with an existing table called + t1 and a replacement table called t1#temp or something else and where + we want to delete t1 and rename t1#temp to t1 this is not possible to + do in a safe manner unless the ddl log is informed of the phases in + the change. + + Delete actions are 1-phase actions that can be ignored immediately after + being executed. + Rename actions from x to y is also a 1-phase action since there is no + interaction with any other handlers named x and y. + Replace action where drop y and x -> y happens needs to be a two-phase + action. Thus the first phase will drop y and the second phase will + rename x -> y. + + @param entry_pos Entry position of record to change + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool ddl_log_increment_phase_no_lock(uint entry_pos) +{ + uchar *file_entry_buf= global_ddl_log.file_entry_buf; + DBUG_ENTER("ddl_log_increment_phase_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + if (!read_ddl_log_file_entry(entry_pos)) + { + ddl_log_entry_code code= ((ddl_log_entry_code) + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]); + ddl_log_action_code action= ((ddl_log_action_code) + file_entry_buf[DDL_LOG_ACTION_TYPE_POS]); + + if (code == DDL_LOG_ENTRY_CODE && action < (uint) DDL_LOG_LAST_ACTION) + { + /* + Log entry: + Increase the phase by one. If complete mark it done (IGNORE). + */ + char phase= file_entry_buf[DDL_LOG_PHASE_POS]+ 1; + if (ddl_log_entry_phases[action] <= phase) + { + DBUG_ASSERT(phase == ddl_log_entry_phases[action]); + /* Same effect as setting DDL_LOG_IGNORE_ENTRY_CODE */ + phase= DDL_LOG_FINAL_PHASE; + } + file_entry_buf[DDL_LOG_PHASE_POS]= phase; + if (update_phase(entry_pos, phase)) + DBUG_RETURN(TRUE); + } + else + { + /* + Trying to deativate an execute entry or already deactive entry. + This should not happen + */ + DBUG_ASSERT(0); + } + } + else + { + sql_print_error("DDL_LOG: Failed in reading entry before updating it"); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); +} + + +/* + Increment phase and sync ddl log. This expects LOCK_gdl to be locked +*/ + +static bool increment_phase(uint entry_pos) +{ + if (ddl_log_increment_phase_no_lock(entry_pos)) + return 1; + ddl_log_sync_no_lock(); + return 0; +} + + +/* + Ignore errors from the file system about: + - Non existing tables or file (from drop table or delete file) + - Error about tables files that already exists. + - Error from delete table (from Drop_table_error_handler) + - Wrong trigger definer (from Drop_table_error_handler) +*/ + +class ddl_log_error_handler : public Internal_error_handler +{ +public: + int handled_errors; + int unhandled_errors; + int first_error; + bool only_ignore_non_existing_errors; + + ddl_log_error_handler() : handled_errors(0), unhandled_errors(0), + first_error(0), only_ignore_non_existing_errors(0) + {} + + bool handle_condition(THD *thd, + uint sql_errno, + const char* sqlstate, + Sql_condition::enum_warning_level *level, + const char* msg, + Sql_condition ** cond_hdl) + { + *cond_hdl= NULL; + if (non_existing_table_error(sql_errno) || + (!only_ignore_non_existing_errors && + (sql_errno == EE_LINK || + sql_errno == EE_DELETE || sql_errno == ER_TRG_NO_DEFINER))) + { + handled_errors++; + return TRUE; + } + if (!first_error) + first_error= sql_errno; + + if (*level == Sql_condition::WARN_LEVEL_ERROR) + unhandled_errors++; + return FALSE; + } + bool safely_trapped_errors() + { + return (handled_errors > 0 && unhandled_errors == 0); + } +}; + + +/* + Build a filename for a table, trigger file or .frm + Delete also any temporary file suffixed with ~ + + @return 0 Temporary file deleted + @return 1 No temporary file found +*/ + +static bool build_filename_and_delete_tmp_file(char *path, size_t path_length, + const LEX_CSTRING *db, + const LEX_CSTRING *name, + const char *ext, + PSI_file_key psi_key) +{ + bool deleted; + uint length= build_table_filename(path, path_length-1, + db->str, name->str, ext, 0); + path[length]= '~'; + path[length+1]= 0; + deleted= mysql_file_delete(psi_key, path, MYF(0)) != 0; + path[length]= 0; + return deleted; +} + + +static LEX_CSTRING end_comment= +{ STRING_WITH_LEN(" /* generated by ddl recovery */")}; + + +/** + Log DROP query to binary log with comment + + This function is only run during recovery +*/ + +static void ddl_log_to_binary_log(THD *thd, String *query) +{ + LEX_CSTRING thd_db= thd->db; + + lex_string_set(&thd->db, recovery_state.current_db); + query->length(query->length()-1); // Removed end ',' + query->append(&end_comment); + mysql_mutex_unlock(&LOCK_gdl); + (void) thd->binlog_query(THD::STMT_QUERY_TYPE, + query->ptr(), query->length(), + TRUE, FALSE, FALSE, 0); + mysql_mutex_lock(&LOCK_gdl); + thd->db= thd_db; +} + + +/** + Log DROP TABLE/VIEW to binary log when needed + + @result 0 Nothing was done + @result 1 Query was logged to binary log & query was reset + + Logging happens in the following cases + - This is the last DROP entry + - The query could be longer than max_packet_length if we would add another + table name to the query + + When we log, we always log all found tables and views at the same time. This + is done to simply the exceute code as otherwise we would have to keep + information of what was logged. +*/ + +static bool ddl_log_drop_to_binary_log(THD *thd, DDL_LOG_ENTRY *ddl_log_entry, + String *query) +{ + DBUG_ENTER("ddl_log_drop_to_binary_log"); + if (mysql_bin_log.is_open()) + { + if (!ddl_log_entry->next_entry || + query->length() + end_comment.length + NAME_LEN + 100 > + thd->variables.max_allowed_packet) + { + if (recovery_state.drop_table.length() > + recovery_state.drop_table_init_length) + { + ddl_log_to_binary_log(thd, &recovery_state.drop_table); + recovery_state.drop_table.length(recovery_state.drop_table_init_length); + } + if (recovery_state.drop_view.length() > + recovery_state.drop_view_init_length) + { + ddl_log_to_binary_log(thd, &recovery_state.drop_view); + recovery_state.drop_view.length(recovery_state.drop_view_init_length); + } + DBUG_RETURN(1); + } + } + DBUG_RETURN(0); +} + +/* + Create a new handler based on handlerton name +*/ + +static handler *create_handler(THD *thd, MEM_ROOT *mem_root, + LEX_CSTRING *name) +{ + handlerton *hton; + handler *file; + plugin_ref plugin= my_plugin_lock_by_name(thd, name, + MYSQL_STORAGE_ENGINE_PLUGIN); + if (!plugin) + { + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(ME_ERROR_LOG), name->str); + return 0; + } + hton= plugin_hton(plugin); + if (!ha_storage_engine_is_enabled(hton)) + { + my_error(ER_STORAGE_ENGINE_DISABLED, MYF(ME_ERROR_LOG), name->str); + return 0; + } + if ((file= hton->create(hton, (TABLE_SHARE*) 0, mem_root))) + file->init(); + return file; +} + + +/* + Rename a table and its .frm file for a ddl_log_entry + + We first rename the table and then the .frm file as some engines, + like connect, needs the .frm file to exists to be able to do an rename. +*/ + +static void execute_rename_table(DDL_LOG_ENTRY *ddl_log_entry, handler *file, + const LEX_CSTRING *from_db, + const LEX_CSTRING *from_table, + const LEX_CSTRING *to_db, + const LEX_CSTRING *to_table, + uint flags, + char *from_path, char *to_path) +{ + uint to_length=0, fr_length=0; + DBUG_ENTER("execute_rename_table"); + + if (file->needs_lower_case_filenames()) + { + build_lower_case_table_filename(from_path, FN_REFLEN, + from_db, from_table, + flags & FN_FROM_IS_TMP); + build_lower_case_table_filename(to_path, FN_REFLEN, + to_db, to_table, flags & FN_TO_IS_TMP); + } + else + { + fr_length= build_table_filename(from_path, FN_REFLEN, + from_db->str, from_table->str, "", + flags & FN_TO_IS_TMP); + to_length= build_table_filename(to_path, FN_REFLEN, + to_db->str, to_table->str, "", + flags & FN_TO_IS_TMP); + } + file->ha_rename_table(from_path, to_path); + if (file->needs_lower_case_filenames()) + { + /* + We have to rebuild the file names as the .frm file should be used + without lower case conversion + */ + fr_length= build_table_filename(from_path, FN_REFLEN, + from_db->str, from_table->str, reg_ext, + flags & FN_FROM_IS_TMP); + to_length= build_table_filename(to_path, FN_REFLEN, + to_db->str, to_table->str, reg_ext, + flags & FN_TO_IS_TMP); + } + else + { + strmov(from_path+fr_length, reg_ext); + strmov(to_path+to_length, reg_ext); + } + if (!access(from_path, F_OK)) + (void) mysql_file_rename(key_file_frm, from_path, to_path, MYF(MY_WME)); + DBUG_VOID_RETURN; +} + + +/* + Update triggers + + If swap_tables == 0 (Restoring the original in case of failed rename) + Convert triggers for db.name -> from_db.from_name + else (Doing the rename in case of ALTER TABLE ... RENAME) + Convert triggers for from_db.from_name -> db.extra_name +*/ + +static void rename_triggers(THD *thd, DDL_LOG_ENTRY *ddl_log_entry, + bool swap_tables) +{ + LEX_CSTRING to_table, from_table, to_db, from_db, from_converted_name; + char to_path[FN_REFLEN+1], from_path[FN_REFLEN+1], conv_path[FN_REFLEN+1]; + + if (!swap_tables) + { + from_db= ddl_log_entry->db; + from_table= ddl_log_entry->name; + to_db= ddl_log_entry->from_db; + to_table= ddl_log_entry->from_name; + } + else + { + from_db= ddl_log_entry->from_db; + from_table= ddl_log_entry->from_name; + to_db= ddl_log_entry->db; + to_table= ddl_log_entry->extra_name; + } + + build_filename_and_delete_tmp_file(from_path, sizeof(from_path), + &from_db, &from_table, + TRG_EXT, key_file_trg); + build_filename_and_delete_tmp_file(to_path, sizeof(to_path), + &to_db, &to_table, + TRG_EXT, key_file_trg); + if (lower_case_table_names) + { + uint errors; + from_converted_name.str= conv_path; + from_converted_name.length= + strconvert(system_charset_info, from_table.str, from_table.length, + files_charset_info, conv_path, FN_REFLEN, &errors); + } + else + from_converted_name= from_table; + + if (!access(to_path, F_OK)) + { + /* + The original file was never renamed or we crashed in recovery + just after renaming back the file. + In this case the current file is correct and we can remove any + left over copied files + */ + (void) mysql_file_delete(key_file_trg, from_path, MYF(0)); + } + else if (!access(from_path, F_OK)) + { + /* .TRG file was renamed. Rename it back */ + /* + We have to create a MDL lock as change_table_names() checks that we + have a mdl locks for the table + */ + MDL_request mdl_request; + TRIGGER_RENAME_PARAM trigger_param; + int error __attribute__((unused)); + MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, + from_db.str, + from_converted_name.str, + MDL_EXCLUSIVE, MDL_EXPLICIT); + error= thd->mdl_context.acquire_lock(&mdl_request, 1); + /* acquire_locks() should never fail during recovery */ + DBUG_ASSERT(error == 0); + + (void) Table_triggers_list::prepare_for_rename(thd, + &trigger_param, + &from_db, + &from_table, + &from_converted_name, + &to_db, + &to_table); + (void) Table_triggers_list::change_table_name(thd, + &trigger_param, + &from_db, + &from_table, + &from_converted_name, + &to_db, + &to_table); + thd->mdl_context.release_lock(mdl_request.ticket); + } +} + + +/* + Update stat tables + + If swap_tables == 0 + Convert stats for from_db.from_table -> db.name + else + Convert stats for db.name -> from_db.from_table +*/ + +static void rename_in_stat_tables(THD *thd, DDL_LOG_ENTRY *ddl_log_entry, + bool swap_tables) +{ + LEX_CSTRING from_table, to_table, from_db, to_db, from_converted_name; + char conv_path[FN_REFLEN+1]; + + if (!swap_tables) + { + from_db= ddl_log_entry->db; + from_table= ddl_log_entry->name; + to_db= ddl_log_entry->from_db; + to_table= ddl_log_entry->from_name; + } + else + { + from_db= ddl_log_entry->from_db; + from_table= ddl_log_entry->from_name; + to_db= ddl_log_entry->db; + to_table= ddl_log_entry->extra_name; + } + if (lower_case_table_names) + { + uint errors; + from_converted_name.str= conv_path; + from_converted_name.length= + strconvert(system_charset_info, from_table.str, from_table.length, + files_charset_info, conv_path, FN_REFLEN, &errors); + } + else + from_converted_name= from_table; + + (void) rename_table_in_stat_tables(thd, + &from_db, + &from_converted_name, + &to_db, + &to_table); +} + + +/** + Execute one action in a ddl log entry + + @param ddl_log_entry Information in action entry to execute + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static int ddl_log_execute_action(THD *thd, MEM_ROOT *mem_root, + DDL_LOG_ENTRY *ddl_log_entry) +{ + LEX_CSTRING handler_name; + handler *file= NULL; + char to_path[FN_REFLEN+1], from_path[FN_REFLEN+1]; + handlerton *hton= 0; + ddl_log_error_handler no_such_table_handler; + uint entry_pos= ddl_log_entry->entry_pos; + int error; + bool frm_action= FALSE; + DBUG_ENTER("ddl_log_execute_action"); + + mysql_mutex_assert_owner(&LOCK_gdl); + DBUG_PRINT("ddl_log", + ("entry type: %u action type: %u (%s) phase: %u next: %u " + "handler: '%s' name: '%s' from_name: '%s' tmp_name: '%s'", + (uint) ddl_log_entry->entry_type, + (uint) ddl_log_entry->action_type, + ddl_log_action_name[ddl_log_entry->action_type], + (uint) ddl_log_entry->phase, + ddl_log_entry->next_entry, + ddl_log_entry->handler_name.str, + ddl_log_entry->name.str, + ddl_log_entry->from_name.str, + ddl_log_entry->tmp_name.str)); + + if (ddl_log_entry->entry_type == DDL_LOG_IGNORE_ENTRY_CODE || + ddl_log_entry->phase == DDL_LOG_FINAL_PHASE) + DBUG_RETURN(FALSE); + + handler_name= ddl_log_entry->handler_name; + thd->push_internal_handler(&no_such_table_handler); + + if (!strcmp(ddl_log_entry->handler_name.str, reg_ext)) + frm_action= TRUE; + else if (ddl_log_entry->handler_name.length) + { + if (!(file= create_handler(thd, mem_root, &handler_name))) + goto end; + hton= file->ht; + } + + switch (ddl_log_entry->action_type) { + case DDL_LOG_REPLACE_ACTION: + case DDL_LOG_DELETE_ACTION: + { + if (ddl_log_entry->phase == 0) + { + if (frm_action) + { + strxmov(to_path, ddl_log_entry->name.str, reg_ext, NullS); + if (unlikely((error= mysql_file_delete(key_file_frm, to_path, + MYF(MY_WME | + MY_IGNORE_ENOENT))))) + break; +#ifdef WITH_PARTITION_STORAGE_ENGINE + strxmov(to_path, ddl_log_entry->name.str, PAR_EXT, NullS); + (void) mysql_file_delete(key_file_partition_ddl_log, to_path, + MYF(0)); +#endif + } + else + { + if (unlikely((error= hton->drop_table(hton, ddl_log_entry->name.str)))) + { + if (!non_existing_table_error(error)) + break; + } + } + if (increment_phase(entry_pos)) + break; + error= 0; + if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION) + break; + } + } + DBUG_ASSERT(ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION); + /* + Fall through and perform the rename action of the replace + action. We have already indicated the success of the delete + action in the log entry by stepping up the phase. + */ + /* fall through */ + case DDL_LOG_RENAME_ACTION: + { + error= TRUE; + if (frm_action) + { + strxmov(to_path, ddl_log_entry->name.str, reg_ext, NullS); + strxmov(from_path, ddl_log_entry->from_name.str, reg_ext, NullS); + (void) mysql_file_rename(key_file_frm, from_path, to_path, MYF(MY_WME)); +#ifdef WITH_PARTITION_STORAGE_ENGINE + strxmov(to_path, ddl_log_entry->name.str, PAR_EXT, NullS); + strxmov(from_path, ddl_log_entry->from_name.str, PAR_EXT, NullS); + (void) mysql_file_rename(key_file_partition_ddl_log, from_path, to_path, + MYF(MY_WME)); +#endif + } + else + (void) file->ha_rename_table(ddl_log_entry->from_name.str, + ddl_log_entry->name.str); + if (increment_phase(entry_pos)) + break; + break; + } + case DDL_LOG_EXCHANGE_ACTION: + { + /* We hold LOCK_gdl, so we can alter global_ddl_log.file_entry_buf */ + uchar *file_entry_buf= global_ddl_log.file_entry_buf; + /* not yet implemented for frm */ + DBUG_ASSERT(!frm_action); + /* + Using a case-switch here to revert all currently done phases, + since it will fall through until the first phase is undone. + */ + switch (ddl_log_entry->phase) { + case EXCH_PHASE_TEMP_TO_FROM: + /* tmp_name -> from_name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->from_name.str, + ddl_log_entry->tmp_name.str); + /* decrease the phase and sync */ + file_entry_buf[DDL_LOG_PHASE_POS]--; + if (write_ddl_log_file_entry(entry_pos)) + break; + (void) ddl_log_sync_no_lock(); + /* fall through */ + case EXCH_PHASE_FROM_TO_NAME: + /* from_name -> name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->name.str, + ddl_log_entry->from_name.str); + /* decrease the phase and sync */ + file_entry_buf[DDL_LOG_PHASE_POS]--; + if (write_ddl_log_file_entry(entry_pos)) + break; + (void) ddl_log_sync_no_lock(); + /* fall through */ + case EXCH_PHASE_NAME_TO_TEMP: + /* name -> tmp_name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->tmp_name.str, + ddl_log_entry->name.str); + /* disable the entry and sync */ + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_LOG_IGNORE_ENTRY_CODE; + (void) write_ddl_log_file_entry(entry_pos); + (void) ddl_log_sync_no_lock(); + break; + } + break; + } + case DDL_LOG_RENAME_TABLE_ACTION: + { + /* + We should restore things by renaming from + 'entry->name' to 'entry->from_name' + */ + switch (ddl_log_entry->phase) { + case DDL_RENAME_PHASE_TRIGGER: + rename_triggers(thd, ddl_log_entry, 0); + if (increment_phase(entry_pos)) + break; + /* fall through */ + case DDL_RENAME_PHASE_STAT: + /* + Stat tables must be updated last so that we can handle a rename of + a stat table. For now we just rememeber that we have to update it + */ + update_flags(ddl_log_entry->entry_pos, DDL_LOG_FLAG_UPDATE_STAT); + ddl_log_entry->flags|= DDL_LOG_FLAG_UPDATE_STAT; + /* fall through */ + case DDL_RENAME_PHASE_TABLE: + /* Restore frm and table to original names */ + execute_rename_table(ddl_log_entry, file, + &ddl_log_entry->db, &ddl_log_entry->name, + &ddl_log_entry->from_db, &ddl_log_entry->from_name, + 0, + from_path, to_path); + + if (ddl_log_entry->flags & DDL_LOG_FLAG_UPDATE_STAT) + { + /* Update stat tables last */ + rename_in_stat_tables(thd, ddl_log_entry, 0); + } + + /* disable the entry and sync */ + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + default: + DBUG_ASSERT(0); + break; + } + break; + } + case DDL_LOG_RENAME_VIEW_ACTION: + { + LEX_CSTRING from_table, to_table; + from_table= ddl_log_entry->from_name; + to_table= ddl_log_entry->name; + + /* Delete any left over .frm~ files */ + build_filename_and_delete_tmp_file(to_path, sizeof(to_path) - 1, + &ddl_log_entry->db, + &ddl_log_entry->name, + reg_ext, + key_file_fileparser); + build_filename_and_delete_tmp_file(from_path, sizeof(from_path) - 1, + &ddl_log_entry->from_db, + &ddl_log_entry->from_name, + reg_ext, key_file_fileparser); + + /* Rename view back if the original rename did succeed */ + if (!access(to_path, F_OK)) + (void) mysql_rename_view(thd, + &ddl_log_entry->from_db, &from_table, + &ddl_log_entry->db, &to_table); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + } + break; + /* + Initialize variables for DROP TABLE and DROP VIEW + In normal cases a query only contains one action. However in case of + DROP DATABASE we may get a mix of both and we have to keep these + separate. + */ + case DDL_LOG_DROP_INIT_ACTION: + { + LEX_CSTRING *comment= &ddl_log_entry->tmp_name; + recovery_state.drop_table.length(0); + recovery_state.drop_table.set_charset(system_charset_info); + recovery_state.drop_table.append(STRING_WITH_LEN("DROP TABLE IF EXISTS ")); + if (comment->length) + { + recovery_state.drop_table.append(comment); + recovery_state.drop_table.append(' '); + } + recovery_state.drop_table_init_length= recovery_state.drop_table.length(); + + recovery_state.drop_view.length(0); + recovery_state.drop_view.set_charset(system_charset_info); + recovery_state.drop_view.append(STRING_WITH_LEN("DROP VIEW IF EXISTS ")); + recovery_state.drop_view_init_length= recovery_state.drop_view.length(); + + strmake(recovery_state.current_db, + ddl_log_entry->from_db.str, sizeof(recovery_state.current_db)-1); + /* We don't increment phase as we want to retry this in case of crash */ + break; + } + case DDL_LOG_DROP_TABLE_ACTION: + { + LEX_CSTRING db, table, path; + db= ddl_log_entry->db; + table= ddl_log_entry->name; + /* Note that path is without .frm extension */ + path= ddl_log_entry->tmp_name; + + switch (ddl_log_entry->phase) { + case DDL_DROP_PHASE_TABLE: + if (hton) + { + no_such_table_handler.only_ignore_non_existing_errors= 1; + error= hton->drop_table(hton, path.str); + no_such_table_handler.only_ignore_non_existing_errors= 0; + if (error) + { + if (!non_existing_table_error(error)) + break; + error= -1; + } + } + else + error= ha_delete_table_force(thd, path.str, &db, &table); + if (error <= 0) + { + /* Not found or already deleted. Delete .frm if it exists */ + strxnmov(to_path, sizeof(to_path)-1, path.str, reg_ext, NullS); + mysql_file_delete(key_file_frm, to_path, MYF(MY_WME|MY_IGNORE_ENOENT)); + error= 0; + } + if (increment_phase(entry_pos)) + break; + /* Fall through */ + case DDL_DROP_PHASE_TRIGGER: + Table_triggers_list::drop_all_triggers(thd, &db, &table, + MYF(MY_WME | MY_IGNORE_ENOENT)); + if (increment_phase(entry_pos)) + break; + /* Fall through */ + case DDL_DROP_PHASE_BINLOG: + if (strcmp(recovery_state.current_db, db.str)) + { + append_identifier(thd, &recovery_state.drop_table, &db); + recovery_state.drop_table.append('.'); + } + append_identifier(thd, &recovery_state.drop_table, &table); + recovery_state.drop_table.append(','); + /* We don't increment phase as we want to retry this in case of crash */ + + if (ddl_log_drop_to_binary_log(thd, ddl_log_entry, + &recovery_state.drop_table)) + { + if (increment_phase(entry_pos)) + break; + } + break; + case DDL_DROP_PHASE_RESET: + /* We have already logged all previous drop's. Clear the query */ + recovery_state.drop_table.length(recovery_state.drop_table_init_length); + recovery_state.drop_view.length(recovery_state.drop_view_init_length); + break; + } + break; + } + case DDL_LOG_DROP_VIEW_ACTION: + { + LEX_CSTRING db, table, path; + db= ddl_log_entry->db; + table= ddl_log_entry->name; + /* Note that for views path is WITH .frm extension */ + path= ddl_log_entry->tmp_name; + + if (ddl_log_entry->phase == 0) + { + mysql_file_delete(key_file_frm, path.str, MYF(MY_WME|MY_IGNORE_ENOENT)); + if (strcmp(recovery_state.current_db, db.str)) + { + append_identifier(thd, &recovery_state.drop_view, &db); + recovery_state.drop_view.append('.'); + } + append_identifier(thd, &recovery_state.drop_view, &table); + recovery_state.drop_view.append(','); + + if (ddl_log_drop_to_binary_log(thd, ddl_log_entry, + &recovery_state.drop_view)) + { + if (increment_phase(entry_pos)) + break; + } + } + else + { + /* We have already logged all previous drop's. Clear the query */ + recovery_state.drop_table.length(recovery_state.drop_table_init_length); + recovery_state.drop_view.length(recovery_state.drop_table_init_length); + } + break; + } + case DDL_LOG_DROP_TRIGGER_ACTION: + { + MY_STAT stat_info; + off_t frm_length= 1; // Impossible length + LEX_CSTRING thd_db= thd->db; + + /* Delete trigger temporary file if it still exists */ + if (!build_filename_and_delete_tmp_file(to_path, sizeof(to_path) - 1, + &ddl_log_entry->db, + &ddl_log_entry->name, + TRG_EXT, + key_file_fileparser)) + { + /* Temporary file existed and was deleted, nothing left to do */ + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + /* + We can use length of TRG file as an indication if trigger was removed. + If there is no file, then it means that this was the last trigger + and the file was removed. + */ + if (my_stat(to_path, &stat_info, MYF(0))) + frm_length= (off_t) stat_info.st_size; + if (frm_length != (off_t) ddl_log_entry->unique_id && + mysql_bin_log.is_open()) + { + /* + File size changed and it was not binlogged (as this entry was + executed) + */ + (void) rm_trigname_file(to_path, &ddl_log_entry->db, + &ddl_log_entry->from_name, + MYF(0)); + + recovery_state.drop_table.length(0); + recovery_state.drop_table.set_charset(system_charset_info); + if (ddl_log_entry->tmp_name.length) + { + /* We can use the original query */ + recovery_state.drop_table.append(&ddl_log_entry->tmp_name); + } + else + { + /* Generate new query */ + recovery_state.drop_table.append(STRING_WITH_LEN("DROP TRIGGER IF " + "EXISTS ")); + append_identifier(thd, &recovery_state.drop_table, + &ddl_log_entry->from_name); + recovery_state.drop_table.append(&end_comment); + } + if (mysql_bin_log.is_open()) + { + mysql_mutex_unlock(&LOCK_gdl); + thd->db= ddl_log_entry->db; + (void) thd->binlog_query(THD::STMT_QUERY_TYPE, + recovery_state.drop_table.ptr(), + recovery_state.drop_table.length(), TRUE, FALSE, + FALSE, 0); + thd->db= thd_db; + mysql_mutex_lock(&LOCK_gdl); + } + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + case DDL_LOG_DROP_DB_ACTION: + { + LEX_CSTRING db, path; + db= ddl_log_entry->db; + path= ddl_log_entry->tmp_name; + + switch (ddl_log_entry->phase) { + case DDL_DROP_DB_PHASE_INIT: + drop_database_objects(thd, &path, &db, + !my_strcasecmp(system_charset_info, + MYSQL_SCHEMA_NAME.str, db.str)); + + strxnmov(to_path, sizeof(to_path)-1, path.str, MY_DB_OPT_FILE, NullS); + mysql_file_delete_with_symlink(key_file_misc, to_path, "", MYF(0)); + + (void) rm_dir_w_symlink(path.str, 0); + if (increment_phase(entry_pos)) + break; + /* fall through */ + case DDL_DROP_DB_PHASE_LOG: + { + String *query= &recovery_state.drop_table; + + query->length(0); + query->append(STRING_WITH_LEN("DROP DATABASE IF EXISTS ")); + append_identifier(thd, query, &db); + query->append(&end_comment); + + if (mysql_bin_log.is_open()) + { + mysql_mutex_unlock(&LOCK_gdl); + (void) thd->binlog_query(THD::STMT_QUERY_TYPE, + query->ptr(), query->length(), + TRUE, FALSE, FALSE, 0); + mysql_mutex_lock(&LOCK_gdl); + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + } + break; + } + case DDL_LOG_CREATE_TABLE_ACTION: + { + LEX_CSTRING db, table, path; + db= ddl_log_entry->db; + table= ddl_log_entry->name; + path= ddl_log_entry->tmp_name; + + /* Don't delete the table if we didn't create it */ + if (ddl_log_entry->flags == 0) + { + if (hton) + { + if ((error= hton->drop_table(hton, path.str))) + { + if (!non_existing_table_error(error)) + break; + error= -1; + } + } + else + error= ha_delete_table_force(thd, path.str, &db, &table); + } + strxnmov(to_path, sizeof(to_path)-1, path.str, reg_ext, NullS); + mysql_file_delete(key_file_frm, to_path, MYF(MY_WME|MY_IGNORE_ENOENT)); + if (ddl_log_entry->phase == DDL_CREATE_TABLE_PHASE_LOG) + { + /* + The server logged CREATE TABLE ... SELECT into binary log + before crashing. As the commit failed and we have delete the + table above, we have now to log the DROP of the created table. + */ + + String *query= &recovery_state.drop_table; + query->length(0); + query->append(STRING_WITH_LEN("DROP TABLE IF EXISTS ")); + append_identifier(thd, query, &db); + query->append('.'); + append_identifier(thd, query, &table); + query->append(&end_comment); + + if (mysql_bin_log.is_open()) + { + mysql_mutex_unlock(&LOCK_gdl); + (void) thd->binlog_query(THD::STMT_QUERY_TYPE, + query->ptr(), query->length(), + TRUE, FALSE, FALSE, 0); + mysql_mutex_lock(&LOCK_gdl); + } + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + error= 0; + break; + } + case DDL_LOG_CREATE_VIEW_ACTION: + { + char *path= to_path; + size_t path_length= ddl_log_entry->tmp_name.length; + memcpy(path, ddl_log_entry->tmp_name.str, path_length+1); + path[path_length+1]= 0; // Prepare for extending + + /* Remove temporary parser file */ + path[path_length]='~'; + mysql_file_delete(key_file_fileparser, path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + path[path_length]= 0; + + switch (ddl_log_entry->phase) { + case DDL_CREATE_VIEW_PHASE_NO_OLD_VIEW: + { + /* + No old view exists, so we can just delete the .frm and temporary files + */ + path[path_length]='-'; + mysql_file_delete(key_file_fileparser, path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + path[path_length]= 0; + mysql_file_delete(key_file_frm, path, MYF(MY_WME|MY_IGNORE_ENOENT)); + break; + } + case DDL_CREATE_VIEW_PHASE_DELETE_VIEW_COPY: + { + /* + Old view existed. We crashed before we had done a copy and change + state to DDL_CREATE_VIEW_PHASE_OLD_VIEW_COPIED + */ + path[path_length]='-'; + mysql_file_delete(key_file_fileparser, path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + path[path_length]= 0; + break; + } + case DDL_CREATE_VIEW_PHASE_OLD_VIEW_COPIED: + { + /* + Old view existed copied to '-' file. Restore it + */ + memcpy(from_path, path, path_length+2); + from_path[path_length]='-'; + if (!access(from_path, F_OK)) + mysql_file_rename(key_file_fileparser, from_path, path, MYF(MY_WME)); + break; + } + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + case DDL_LOG_DELETE_TMP_FILE_ACTION: + { + LEX_CSTRING path= ddl_log_entry->tmp_name; + DBUG_ASSERT(ddl_log_entry->unique_id <= UINT_MAX32); + if (!ddl_log_entry->unique_id || + !is_execute_entry_active((uint) ddl_log_entry->unique_id)) + mysql_file_delete(key_file_fileparser, path.str, + MYF(MY_WME|MY_IGNORE_ENOENT)); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + case DDL_LOG_CREATE_TRIGGER_ACTION: + { + LEX_CSTRING db, table, trigger; + db= ddl_log_entry->db; + table= ddl_log_entry->name; + trigger= ddl_log_entry->tmp_name; + + /* Delete backup .TRG (trigger file) if it exists */ + (void) build_filename_and_delete_tmp_file(to_path, sizeof(to_path) - 1, + &db, &table, + TRG_EXT, + key_file_fileparser); + (void) build_filename_and_delete_tmp_file(to_path, sizeof(to_path) - 1, + &db, &trigger, + TRN_EXT, + key_file_fileparser); + switch (ddl_log_entry->phase) { + case DDL_CREATE_TRIGGER_PHASE_DELETE_COPY: + { + size_t length; + /* Delete copy of .TRN and .TRG files */ + length= build_table_filename(to_path, sizeof(to_path) - 1, + db.str, table.str, TRG_EXT, 0); + to_path[length]= '-'; + to_path[length+1]= 0; + mysql_file_delete(key_file_fileparser, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + + length= build_table_filename(to_path, sizeof(to_path) - 1, + db.str, trigger.str, TRN_EXT, 0); + to_path[length]= '-'; + to_path[length+1]= 0; + mysql_file_delete(key_file_fileparser, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + } + /* Nothing else to do */ + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + case DDL_CREATE_TRIGGER_PHASE_OLD_COPIED: + { + LEX_CSTRING path= {to_path, 0}; + size_t length; + /* Restore old version if the .TRN and .TRG files */ + length= build_table_filename(to_path, sizeof(to_path) - 1, + db.str, table.str, TRG_EXT, 0); + to_path[length]='-'; + to_path[length+1]= 0; + path.length= length+1; + /* an old TRN file only exist in the case if REPLACE was used */ + if (!access(to_path, F_OK)) + sql_restore_definition_file(&path); + + length= build_table_filename(to_path, sizeof(to_path) - 1, + db.str, trigger.str, TRN_EXT, 0); + to_path[length]='-'; + to_path[length+1]= 0; + path.length= length+1; + if (!access(to_path, F_OK)) + sql_restore_definition_file(&path); + else + { + /* + There was originally no .TRN for this trigger. + Delete the newly created one. + */ + to_path[length]= 0; + mysql_file_delete(key_file_fileparser, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + case DDL_CREATE_TRIGGER_PHASE_NO_OLD_TRIGGER: + { + /* No old trigger existed. We can just delete the .TRN and .TRG files */ + build_table_filename(to_path, sizeof(to_path) - 1, + db.str, table.str, TRG_EXT, 0); + mysql_file_delete(key_file_fileparser, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + build_table_filename(to_path, sizeof(to_path) - 1, + db.str, trigger.str, TRN_EXT, 0); + mysql_file_delete(key_file_fileparser, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + } + break; + } + case DDL_LOG_ALTER_TABLE_ACTION: + { + handlerton *org_hton, *partition_hton; + handler *org_file; + bool is_renamed= ddl_log_entry->flags & DDL_LOG_FLAG_ALTER_RENAME; + bool new_version_ready= 0, new_version_unusable= 0; + LEX_CSTRING db, table; + db= ddl_log_entry->db; + table= ddl_log_entry->name; + + if (!(org_file= create_handler(thd, mem_root, + &ddl_log_entry->from_handler_name))) + goto end; + /* Handlerton of the final table and any temporary tables */ + org_hton= org_file->ht; + /* + partition_hton is the hton for the new file, or + in case of ALTER of a partitioned table, the underlying + table + */ + partition_hton= hton; + + if (ddl_log_entry->flags & DDL_LOG_FLAG_ALTER_PARTITION) + { + /* + The from and to tables where both using the partition engine. + */ + hton= org_hton; + } + switch (ddl_log_entry->phase) { + case DDL_ALTER_TABLE_PHASE_RENAME_FAILED: + /* + We come here when the final rename of temporary table (#sql-alter) to + the original name failed. Now we have to delete the temporary table + and restore the backup. + */ + quick_rm_table(thd, hton, &db, &table, FN_IS_TMP); + if (!is_renamed) + { + execute_rename_table(ddl_log_entry, file, + &ddl_log_entry->from_db, + &ddl_log_entry->extra_name, // #sql-backup + &ddl_log_entry->from_db, + &ddl_log_entry->from_name, + FN_FROM_IS_TMP, + from_path, to_path); + } + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + case DDL_ALTER_TABLE_PHASE_PREPARE_INPLACE: + /* We crashed before ddl_log_update_unique_id() was called */ + new_version_unusable= 1; + /* fall through */ + case DDL_ALTER_TABLE_PHASE_INPLACE_COPIED: + /* The inplace alter table is committed and ready to be used */ + if (!new_version_unusable) + new_version_ready= 1; + /* fall through */ + case DDL_ALTER_TABLE_PHASE_INPLACE: + { + int fr_length, to_length; + /* + Inplace alter table was used. + On disk there are now a table with the original name, the + original .frm file and potentially a #sql-alter...frm file + with the new definition. + */ + fr_length= build_table_filename(from_path, sizeof(from_path) - 1, + ddl_log_entry->db.str, + ddl_log_entry->name.str, + reg_ext, 0); + to_length= build_table_filename(to_path, sizeof(to_path) - 1, + ddl_log_entry->from_db.str, + ddl_log_entry->from_name.str, + reg_ext, 0); + if (!access(from_path, F_OK)) // Does #sql-alter.. exists? + { + LEX_CUSTRING version= {ddl_log_entry->uuid, MY_UUID_SIZE}; + /* + Temporary .frm file exists. This means that that the table in + the storage engine can be of either old or new version. + If old version, delete the new .frm table and keep the old one. + If new version, replace the old .frm with the new one. + */ + to_path[to_length - reg_ext_length]= 0; // Remove .frm + if (!new_version_unusable && + ( !partition_hton->check_version || new_version_ready || + !partition_hton->check_version(partition_hton, + to_path, &version, + ddl_log_entry->unique_id))) + { + /* Table is up to date */ + + /* + Update state so that if we crash and retry the ddl log entry, + we know that we can use the new table even if .frm is renamed. + */ + if (ddl_log_entry->phase != DDL_ALTER_TABLE_PHASE_INPLACE_COPIED) + (void) update_phase(entry_pos, + DDL_ALTER_TABLE_PHASE_INPLACE_COPIED); + /* Replace old .frm file with new one */ + to_path[to_length - reg_ext_length]= FN_EXTCHAR; + (void) mysql_file_rename(key_file_frm, from_path, to_path, + MYF(MY_WME)); + new_version_ready= 1; + } + else + { + DBUG_ASSERT(!new_version_ready); + /* + Use original version of the .frm file. + Remove temporary #sql-alter.frm file and the #sql-alter table. + We have also to remove the temporary table as some storage engines, + like InnoDB, may use it as an internal temporary table + during inplace alter table. + */ + from_path[fr_length - reg_ext_length]= 0; + error= org_hton->drop_table(org_hton, from_path); + if (non_existing_table_error(error)) + error= 0; + from_path[fr_length - reg_ext_length]= FN_EXTCHAR; + mysql_file_delete(key_file_frm, from_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + } + if (is_renamed && new_version_ready) + { + /* After the renames above, the original table is now in from_name */ + ddl_log_entry->name= ddl_log_entry->from_name; + /* Rename db.name -> db.extra_name */ + execute_rename_table(ddl_log_entry, file, + &ddl_log_entry->db, &ddl_log_entry->name, + &ddl_log_entry->db, &ddl_log_entry->extra_name, + 0, + from_path, to_path); + } + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_UPDATE_TRIGGERS); + goto update_triggers; + } + case DDL_ALTER_TABLE_PHASE_COPIED: + { + char *from_end; + /* + New table is created and we have the query for the binary log. + We should remove the original table and in the next stage replace + it with the new one. + */ + build_table_filename(from_path, sizeof(from_path) - 1, + ddl_log_entry->from_db.str, + ddl_log_entry->from_name.str, + "", 0); + build_table_filename(to_path, sizeof(to_path) - 1, + ddl_log_entry->db.str, + ddl_log_entry->name.str, + "", 0); + from_end= strend(from_path); + if (likely(org_hton)) + { + error= org_hton->drop_table(org_hton, from_path); + if (non_existing_table_error(error)) + error= 0; + } + strmov(from_end, reg_ext); + mysql_file_delete(key_file_frm, from_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + *from_end= 0; // Remove extension + + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_OLD_RENAMED); + } + /* fall through */ + case DDL_ALTER_TABLE_PHASE_OLD_RENAMED: + { + /* + The new table (from_path) is up to date. + Original table is either renamed as backup table (normal case), + only frm is renamed (in case of engine change) or deleted above. + */ + if (!is_renamed) + { + uint length; + /* Rename new "temporary" table to the original wanted name */ + execute_rename_table(ddl_log_entry, file, + &ddl_log_entry->db, + &ddl_log_entry->name, + &ddl_log_entry->from_db, + &ddl_log_entry->from_name, + FN_FROM_IS_TMP, + from_path, to_path); + + /* + Remove backup (only happens if alter table used without rename). + Backup name is always in lower case, so there is no need for + converting table names. + */ + length= build_table_filename(from_path, sizeof(from_path) - 1, + ddl_log_entry->from_db.str, + ddl_log_entry->extra_name.str, + "", FN_IS_TMP); + if (likely(org_hton)) + { + if (ddl_log_entry->flags & DDL_LOG_FLAG_ALTER_ENGINE_CHANGED) + { + /* Only frm is renamed, storage engine files have original name */ + build_table_filename(to_path, sizeof(from_path) - 1, + ddl_log_entry->from_db.str, + ddl_log_entry->from_name.str, + "", 0); + error= org_hton->drop_table(org_hton, to_path); + } + else + error= org_hton->drop_table(org_hton, from_path); + if (non_existing_table_error(error)) + error= 0; + } + strmov(from_path + length, reg_ext); + mysql_file_delete(key_file_frm, from_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + } + else + execute_rename_table(ddl_log_entry, file, + &ddl_log_entry->db, &ddl_log_entry->name, + &ddl_log_entry->db, &ddl_log_entry->extra_name, + FN_FROM_IS_TMP, + from_path, to_path); + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_UPDATE_TRIGGERS); + } + /* fall through */ + case DDL_ALTER_TABLE_PHASE_UPDATE_TRIGGERS: + update_triggers: + { + if (is_renamed) + { + // rename_triggers will rename from: from_db.from_name -> db.extra_name + rename_triggers(thd, ddl_log_entry, 1); + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_UPDATE_STATS); + } + } + /* fall through */ + case DDL_ALTER_TABLE_PHASE_UPDATE_STATS: + if (is_renamed) + { + ddl_log_entry->name= ddl_log_entry->from_name; + ddl_log_entry->from_name= ddl_log_entry->extra_name; + rename_in_stat_tables(thd, ddl_log_entry, 1); + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_UPDATE_STATS); + } + /* fall through */ + case DDL_ALTER_TABLE_PHASE_UPDATE_BINARY_LOG: + { + /* Write ALTER TABLE query to binary log */ + if (recovery_state.query.length() && mysql_bin_log.is_open()) + { + LEX_CSTRING save_db; + /* Reuse old xid value if possible */ + if (!recovery_state.xid) + recovery_state.xid= server_uuid_value(); + thd->binlog_xid= recovery_state.xid; + update_xid(recovery_state.execute_entry_pos, thd->binlog_xid); + + mysql_mutex_unlock(&LOCK_gdl); + save_db= thd->db; + lex_string_set3(&thd->db, recovery_state.db.ptr(), + recovery_state.db.length()); + (void) thd->binlog_query(THD::STMT_QUERY_TYPE, + recovery_state.query.ptr(), + recovery_state.query.length(), + TRUE, FALSE, FALSE, 0); + thd->binlog_xid= 0; + thd->db= save_db; + mysql_mutex_lock(&LOCK_gdl); + } + recovery_state.query.length(0); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + /* + The following cases are when alter table failed and we have to roll + back + */ + case DDL_ALTER_TABLE_PHASE_CREATED: + { + /* + Temporary table should have been created. Delete it. + */ + if (likely(hton)) + { + error= hton->drop_table(hton, ddl_log_entry->tmp_name.str); + if (non_existing_table_error(error)) + error= 0; + } + (void) update_phase(entry_pos, DDL_ALTER_TABLE_PHASE_INIT); + } + /* fall through */ + case DDL_ALTER_TABLE_PHASE_INIT: + { + /* + A temporary .frm and possible a .par files should have been created + */ + strxmov(to_path, ddl_log_entry->tmp_name.str, reg_ext, NullS); + mysql_file_delete(key_file_frm, to_path, MYF(MY_WME|MY_IGNORE_ENOENT)); + strxmov(to_path, ddl_log_entry->tmp_name.str, PAR_EXT, NullS); + mysql_file_delete(key_file_partition_ddl_log, to_path, + MYF(MY_WME|MY_IGNORE_ENOENT)); + (void) update_phase(entry_pos, DDL_LOG_FINAL_PHASE); + break; + } + } + delete org_file; + break; + } + case DDL_LOG_STORE_QUERY_ACTION: + { + /* + Read query for next ddl command + */ + if (ddl_log_entry->flags) + { + /* + First QUERY event. Allocate query string. + Query length is stored in unique_id + */ + if (recovery_state.query.alloc((size_t) (ddl_log_entry->unique_id+1))) + goto end; + recovery_state.query.length(0); + recovery_state.db.copy(ddl_log_entry->db.str, ddl_log_entry->db.length, + system_charset_info); + } + if (unlikely(recovery_state.query.length() + + ddl_log_entry->extra_name.length > + recovery_state.query.alloced_length())) + { + /* Impossible length. Ignore query */ + recovery_state.query.length(0); + error= 1; + my_error(ER_INTERNAL_ERROR, MYF(0), + "DDL log: QUERY event has impossible length"); + break; + } + recovery_state.query.qs_append(&ddl_log_entry->extra_name); + break; + } + default: + DBUG_ASSERT(0); + break; + } + +end: + delete file; + /* We are only interested in errors that where not ignored */ + if ((error= (no_such_table_handler.unhandled_errors > 0))) + my_errno= no_such_table_handler.first_error; + thd->pop_internal_handler(); + DBUG_RETURN(error); +} + + +/** + Get a free entry in the ddl log + + @param[out] active_entry A ddl log memory entry returned + @param[out] write_header Set to 1 if ddl log was enlarged + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool ddl_log_get_free_entry(DDL_LOG_MEMORY_ENTRY **active_entry) +{ + DDL_LOG_MEMORY_ENTRY *used_entry; + DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used; + DBUG_ENTER("ddl_log_get_free_entry"); + + if (global_ddl_log.first_free == NULL) + { + if (!(used_entry= ((DDL_LOG_MEMORY_ENTRY*) + my_malloc(key_memory_DDL_LOG_MEMORY_ENTRY, + sizeof(DDL_LOG_MEMORY_ENTRY), MYF(MY_WME))))) + { + sql_print_error("DDL_LOG: Failed to allocate memory for ddl log free " + "list"); + *active_entry= 0; + DBUG_RETURN(TRUE); + } + global_ddl_log.num_entries++; + used_entry->entry_pos= global_ddl_log.num_entries; + } + else + { + used_entry= global_ddl_log.first_free; + global_ddl_log.first_free= used_entry->next_log_entry; + } + /* + Move from free list to used list + */ + used_entry->next_log_entry= first_used; + used_entry->prev_log_entry= NULL; + used_entry->next_active_log_entry= NULL; + global_ddl_log.first_used= used_entry; + if (first_used) + first_used->prev_log_entry= used_entry; + + *active_entry= used_entry; + DBUG_RETURN(FALSE); +} + + +/** + Release a log memory entry. + @param log_memory_entry Log memory entry to release +*/ + +void ddl_log_release_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) +{ + DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry; + DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry; + DBUG_ENTER("ddl_log_release_memory_entry"); + + mysql_mutex_assert_owner(&LOCK_gdl); + log_entry->next_log_entry= global_ddl_log.first_free; + global_ddl_log.first_free= log_entry; + + if (prev_log_entry) + prev_log_entry->next_log_entry= next_log_entry; + else + global_ddl_log.first_used= next_log_entry; + if (next_log_entry) + next_log_entry->prev_log_entry= prev_log_entry; + // Ensure we get a crash if we try to access this link again. + log_entry->next_active_log_entry= (DDL_LOG_MEMORY_ENTRY*) 0x1; + DBUG_VOID_RETURN; +} + + +/** + Execute one entry in the ddl log. + + Executing an entry means executing a linked list of actions. + + @param first_entry Reference to first action in entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool ddl_log_execute_entry_no_lock(THD *thd, uint first_entry) +{ + DDL_LOG_ENTRY ddl_log_entry; + uint read_entry= first_entry; + MEM_ROOT mem_root; + DBUG_ENTER("ddl_log_execute_entry_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + init_sql_alloc(key_memory_gdl, &mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, + MYF(MY_THREAD_SPECIFIC)); + do + { + if (read_ddl_log_entry(read_entry, &ddl_log_entry)) + { + /* Error logged to error log. Continue with next log entry */ + break; + } + DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || + ddl_log_entry.entry_type == DDL_LOG_IGNORE_ENTRY_CODE); + + if (ddl_log_execute_action(thd, &mem_root, &ddl_log_entry)) + { + uint action_type= ddl_log_entry.action_type; + if (action_type >= DDL_LOG_LAST_ACTION) + action_type= 0; + + /* Write to error log and continue with next log entry */ + sql_print_error("DDL_LOG: Got error %d when trying to execute action " + "for entry %u of type '%s'", + (int) my_errno, read_entry, + ddl_log_action_name[action_type]); + break; + } + read_entry= ddl_log_entry.next_entry; + } while (read_entry); + + free_root(&mem_root, MYF(0)); + DBUG_RETURN(FALSE); +} + + +/* + External interface methods for the DDL log Module + --------------------------------------------------- +*/ + +/** + Write a ddl log entry. + + A careful write of the ddl log is performed to ensure that we can + handle crashes occurring during CREATE and ALTER TABLE processing. + + @param ddl_log_entry Information about log entry + @param[out] entry_written Entry information written into + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +bool ddl_log_write_entry(DDL_LOG_ENTRY *ddl_log_entry, + DDL_LOG_MEMORY_ENTRY **active_entry) +{ + bool error; + DBUG_ENTER("ddl_log_write_entry"); + + *active_entry= 0; + mysql_mutex_assert_owner(&LOCK_gdl); + DBUG_ASSERT(global_ddl_log.open); + if (unlikely(!global_ddl_log.open)) + { + my_error(ER_INTERNAL_ERROR, MYF(0), "ddl log not initialized"); + DBUG_RETURN(TRUE); + } + + ddl_log_entry->entry_type= DDL_LOG_ENTRY_CODE; + set_global_from_ddl_log_entry(ddl_log_entry); + if (ddl_log_get_free_entry(active_entry)) + DBUG_RETURN(TRUE); + + error= FALSE; + DBUG_PRINT("ddl_log", + ("entry type: %u action type: %u (%s) phase: %u next: %u " + "handler: '%s' name: '%s' from_name: '%s' tmp_name: '%s'", + (uint) ddl_log_entry->entry_type, + (uint) ddl_log_entry->action_type, + ddl_log_action_name[ddl_log_entry->action_type], + (uint) ddl_log_entry->phase, + ddl_log_entry->next_entry, + ddl_log_entry->handler_name.str, + ddl_log_entry->name.str, + ddl_log_entry->from_name.str, + ddl_log_entry->tmp_name.str)); + + if (unlikely(write_ddl_log_file_entry((*active_entry)->entry_pos))) + { + sql_print_error("DDL_LOG: Failed to write entry %u", + (*active_entry)->entry_pos); + ddl_log_release_memory_entry(*active_entry); + *active_entry= 0; + error= TRUE; + } + DBUG_RETURN(error); +} + + +/** + @brief Write or update execute entry in the ddl log. + + @details An execute entry points to the first entry that should + be excuted during recovery. In some cases it's only written once, + in other cases it's updated for each log entry to point to the new + header for the list. + + When called, the previous log entries have already been written but not yet + synched to disk. We write a couple of log entries that describes + action to perform. This entries are set-up in a linked list, + however only when an execute entry is put as the first entry these will be + executed during recovery. + + @param first_entry First entry in linked list of entries + to execute. + @param[in,out] active_entry Entry to execute, 0 = NULL if the entry + is written first time and needs to be + returned. In this case the entry written + is returned in this parameter + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +bool ddl_log_write_execute_entry(uint first_entry, + DDL_LOG_MEMORY_ENTRY **active_entry) +{ + uchar *file_entry_buf= global_ddl_log.file_entry_buf; + bool got_free_entry= 0; + DBUG_ENTER("ddl_log_write_execute_entry"); + + mysql_mutex_assert_owner(&LOCK_gdl); + /* + We haven't synched the log entries yet, we sync them now before + writing the execute entry. + */ + (void) ddl_log_sync_no_lock(); + bzero(file_entry_buf, global_ddl_log.io_size); + + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (uchar)DDL_LOG_EXECUTE_CODE; + int4store(file_entry_buf + DDL_LOG_NEXT_ENTRY_POS, first_entry); + + if (!(*active_entry)) + { + if (ddl_log_get_free_entry(active_entry)) + DBUG_RETURN(TRUE); + got_free_entry= TRUE; + } + if (write_ddl_log_file_entry((*active_entry)->entry_pos)) + { + sql_print_error("DDL_LOG: Error writing execute entry %u", + (*active_entry)->entry_pos); + if (got_free_entry) + { + ddl_log_release_memory_entry(*active_entry); + *active_entry= 0; + } + DBUG_RETURN(TRUE); + } + (void) ddl_log_sync_no_lock(); + DBUG_RETURN(FALSE); +} + + +/** + Increment phase for entry. Will deactivate entry after all phases are done + + @details see ddl_log_increment_phase_no_lock. + + @param entry_pos Entry position of record to change + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +bool ddl_log_increment_phase(uint entry_pos) +{ + bool error; + DBUG_ENTER("ddl_log_increment_phase"); + + mysql_mutex_lock(&LOCK_gdl); + error= ddl_log_increment_phase_no_lock(entry_pos); + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(error); +} + + +/** + Sync ddl log file. + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +bool ddl_log_sync() +{ + bool error; + DBUG_ENTER("ddl_log_sync"); + + mysql_mutex_lock(&LOCK_gdl); + error= ddl_log_sync_no_lock(); + mysql_mutex_unlock(&LOCK_gdl); + + DBUG_RETURN(error); +} + + +/** + Execute one entry in the ddl log. + + Executing an entry means executing a linked list of actions. + + This function is called for recovering partitioning in case of error. + + @param first_entry Reference to first action in entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +bool ddl_log_execute_entry(THD *thd, uint first_entry) +{ + bool error; + DBUG_ENTER("ddl_log_execute_entry"); + + mysql_mutex_lock(&LOCK_gdl); + error= ddl_log_execute_entry_no_lock(thd, first_entry); + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(error); +} + + +/** + Close the ddl log. +*/ + +static void close_ddl_log() +{ + DBUG_ENTER("close_ddl_log"); + if (global_ddl_log.file_id >= 0) + { + (void) mysql_file_close(global_ddl_log.file_id, MYF(MY_WME)); + global_ddl_log.file_id= (File) -1; + } + global_ddl_log.open= 0; + DBUG_VOID_RETURN; +} + + +/** + Loop over ddl log excute entries and mark those that are already stored + in the binary log as completed + + @return + @retval 0 ok + @return 1 fail (write error) + +*/ + +bool ddl_log_close_binlogged_events(HASH *xids) +{ + uint i; + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_close_binlogged_events"); + + if (global_ddl_log.num_entries == 0 || xids->records == 0) + DBUG_RETURN(0); + + mysql_mutex_lock(&LOCK_gdl); + for (i= 1; i <= global_ddl_log.num_entries; i++) + { + if (read_ddl_log_entry(i, &ddl_log_entry)) + break; // Read error. Stop reading + DBUG_PRINT("xid",("xid: %llu", ddl_log_entry.xid)); + if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE && + ddl_log_entry.xid != 0 && + my_hash_search(xids, (uchar*) &ddl_log_entry.xid, + sizeof(ddl_log_entry.xid))) + { + if (disable_execute_entry(i)) + { + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(1); // Write error. Fatal! + } + } + } + (void) ddl_log_sync_no_lock(); + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(0); +} + + +/** + Execute the ddl log at recovery of MySQL Server. + + @return + @retval 0 Ok. + @retval > 0 Fatal error. We have to abort (can't create ddl log) + @return < -1 Recovery failed, but new log exists and is usable + +*/ + +int ddl_log_execute_recovery() +{ + uint i, count= 0; + int error= 0; + THD *thd, *original_thd; + DDL_LOG_ENTRY ddl_log_entry; + static char recover_query_string[]= "INTERNAL DDL LOG RECOVER IN PROGRESS"; + DBUG_ENTER("ddl_log_execute_recovery"); + + if (!global_ddl_log.backup_done && !global_ddl_log.created) + ddl_log_create_backup_file(); + + if (global_ddl_log.num_entries == 0) + DBUG_RETURN(0); + + /* + To be able to run this from boot, we allocate a temporary THD + */ + if (!(thd=new THD(0))) + { + DBUG_ASSERT(0); // Fatal error + DBUG_RETURN(1); + } + original_thd= current_thd; // Probably NULL + thd->thread_stack= (char*) &thd; + thd->store_globals(); + thd->init(); // Needed for error messages + + thd->log_all_errors= (global_system_variables.log_warnings >= 3); + recovery_state.drop_table.free(); + recovery_state.drop_view.free(); + recovery_state.query.free(); + recovery_state.db.free(); + + thd->set_query(recover_query_string, strlen(recover_query_string)); + + mysql_mutex_lock(&LOCK_gdl); + for (i= 1; i <= global_ddl_log.num_entries; i++) + { + if (read_ddl_log_entry(i, &ddl_log_entry)) + { + error= -1; + continue; + } + if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE) + { + /* + Remeber information about executive ddl log entry, + used for binary logging during recovery + */ + recovery_state.execute_entry_pos= i; + recovery_state.xid= ddl_log_entry.xid; + + /* purecov: begin tested */ + if (ddl_log_entry.unique_id > DDL_LOG_MAX_RETRY) + { + error= -1; + continue; + } + update_unique_id(i, ++ddl_log_entry.unique_id); + if (ddl_log_entry.unique_id > DDL_LOG_MAX_RETRY) + { + sql_print_error("DDL_LOG: Aborting executing entry %u after %llu " + "retries", i, ddl_log_entry.unique_id); + error= -1; + continue; + } + /* purecov: end tested */ + + if (ddl_log_execute_entry_no_lock(thd, ddl_log_entry.next_entry)) + { + /* Real unpleasant scenario but we have to continue anyway */ + error= -1; + continue; + } + count++; + } + } + recovery_state.drop_table.free(); + recovery_state.drop_view.free(); + recovery_state.query.free(); + recovery_state.db.free(); + close_ddl_log(); + mysql_mutex_unlock(&LOCK_gdl); + thd->reset_query(); + delete thd; + set_current_thd(original_thd); + + /* + Create a new ddl_log to get rid of old stuff and ensure that header matches + the current source version + */ + if (create_ddl_log()) + error= 1; + if (count > 0) + sql_print_information("DDL_LOG: Crash recovery executed %u entries", + count); + + set_current_thd(original_thd); + DBUG_RETURN(error); +} + + +/** + Release all memory allocated to the ddl log and delete the ddl log +*/ + +void ddl_log_release() +{ + char file_name[FN_REFLEN]; + DDL_LOG_MEMORY_ENTRY *free_list; + DDL_LOG_MEMORY_ENTRY *used_list; + DBUG_ENTER("ddl_log_release"); + + if (!global_ddl_log.initialized) + DBUG_VOID_RETURN; + + global_ddl_log.initialized= 0; + + free_list= global_ddl_log.first_free; + used_list= global_ddl_log.first_used; + while (used_list) + { + DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry; + my_free(used_list); + used_list= tmp; + } + while (free_list) + { + DDL_LOG_MEMORY_ENTRY *tmp= free_list->next_log_entry; + my_free(free_list); + free_list= tmp; + } + my_free(global_ddl_log.file_entry_buf); + global_ddl_log.file_entry_buf= 0; + close_ddl_log(); + + create_ddl_log_file_name(file_name, 0); + (void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0)); + mysql_mutex_destroy(&LOCK_gdl); + DBUG_VOID_RETURN; +} + + +/** + Methods for DDL_LOG_STATE +*/ + +static void add_log_entry(DDL_LOG_STATE *state, + DDL_LOG_MEMORY_ENTRY *log_entry) +{ + log_entry->next_active_log_entry= state->list; + state->main_entry= state->list= log_entry; +} + + +void ddl_log_release_entries(DDL_LOG_STATE *ddl_log_state) +{ + DDL_LOG_MEMORY_ENTRY *next; + for (DDL_LOG_MEMORY_ENTRY *log_entry= ddl_log_state->list; + log_entry; + log_entry= next) + { + next= log_entry->next_active_log_entry; + ddl_log_release_memory_entry(log_entry); + } + ddl_log_state->list= 0; + + if (ddl_log_state->execute_entry) + { + ddl_log_release_memory_entry(ddl_log_state->execute_entry); + ddl_log_state->execute_entry= 0; // Not needed but future safe + } +} + + +/**************************************************************************** + Implementations of common ddl entries +*****************************************************************************/ + +/** + Complete ddl logging. This is done when all statements has completed + successfully and we can disable the execute log entry. +*/ + +void ddl_log_complete(DDL_LOG_STATE *state) +{ + DBUG_ENTER("ddl_log_complete"); + + if (unlikely(!state->list)) + DBUG_VOID_RETURN; // ddl log not used + + mysql_mutex_lock(&LOCK_gdl); + if (likely(state->execute_entry)) + ddl_log_disable_execute_entry(&state->execute_entry); + ddl_log_release_entries(state); + mysql_mutex_unlock(&LOCK_gdl); + state->list= 0; + DBUG_VOID_RETURN; +}; + + +/** + Revert (execute) all entries in the ddl log + + This is called for failed rename table, create trigger or drop trigger. +*/ + +bool ddl_log_revert(THD *thd, DDL_LOG_STATE *state) +{ + bool res= 0; + DBUG_ENTER("ddl_log_revert"); + + if (unlikely(!state->list)) + DBUG_RETURN(0); // ddl log not used + + mysql_mutex_lock(&LOCK_gdl); + if (likely(state->execute_entry)) + { + res= ddl_log_execute_entry_no_lock(thd, state->list->entry_pos); + ddl_log_disable_execute_entry(&state->execute_entry); + } + ddl_log_release_entries(state); + mysql_mutex_unlock(&LOCK_gdl); + state->list= 0; + DBUG_RETURN(res); +} + + +/* + Update phase of main ddl log entry (usually the last one created, + except in case of query events, the one before the query event). +*/ + +bool ddl_log_update_phase(DDL_LOG_STATE *state, uchar phase) +{ + DBUG_ENTER("ddl_log_update_phase"); + if (likely(state->list)) + DBUG_RETURN(update_phase(state->main_entry->entry_pos, phase)); + DBUG_RETURN(0); +} + + +/* + Update flag bits in main ddl log entry (usually last created, except in case + of query events, the one before the query event. +*/ + +bool ddl_log_add_flag(DDL_LOG_STATE *state, uint16 flags) +{ + DBUG_ENTER("ddl_log_update_phase"); + if (likely(state->list)) + { + state->flags|= flags; + DBUG_RETURN(update_flags(state->main_entry->entry_pos, state->flags)); + } + DBUG_RETURN(0); +} + + +/** + Update unique_id (used for inplace alter table) +*/ + +bool ddl_log_update_unique_id(DDL_LOG_STATE *state, ulonglong id) +{ + DBUG_ENTER("ddl_log_update_unique_id"); + DBUG_PRINT("enter", ("id: %llu", id)); + /* The following may not be true in case of temporary tables */ + if (likely(state->list)) + DBUG_RETURN(update_unique_id(state->main_entry->entry_pos, id)); + DBUG_RETURN(0); +} + + +/** + Disable last ddl entry +*/ + +bool ddl_log_disable_entry(DDL_LOG_STATE *state) +{ + DBUG_ENTER("ddl_log_disable_entry"); + /* The following may not be true in case of temporary tables */ + if (likely(state->list)) + DBUG_RETURN(update_phase(state->list->entry_pos, DDL_LOG_FINAL_PHASE)); + DBUG_RETURN(0); +} + + +/** + Update XID for execute event +*/ + +bool ddl_log_update_xid(DDL_LOG_STATE *state, ulonglong xid) +{ + DBUG_ENTER("ddl_log_update_xid"); + DBUG_PRINT("enter", ("xid: %llu", xid)); + /* The following may not be true in case of temporary tables */ + if (likely(state->execute_entry)) + DBUG_RETURN(update_xid(state->execute_entry->entry_pos, xid)); + DBUG_RETURN(0); +} + + +/* + Write ddl_log_entry and write or update ddl_execute_entry + + Will update DDL_LOG_STATE->flags +*/ + +static bool ddl_log_write(DDL_LOG_STATE *ddl_state, + DDL_LOG_ENTRY *ddl_log_entry) +{ + int error; + DDL_LOG_MEMORY_ENTRY *log_entry; + DBUG_ENTER("ddl_log_write"); + + mysql_mutex_lock(&LOCK_gdl); + error= ((ddl_log_write_entry(ddl_log_entry, &log_entry)) || + ddl_log_write_execute_entry(log_entry->entry_pos, + &ddl_state->execute_entry)); + mysql_mutex_unlock(&LOCK_gdl); + if (error) + { + if (log_entry) + ddl_log_release_memory_entry(log_entry); + DBUG_RETURN(1); + } + add_log_entry(ddl_state, log_entry); + ddl_state->flags|= ddl_log_entry->flags; // Update cache + DBUG_RETURN(0); +} + + +/** + Logging of rename table +*/ + +bool ddl_log_rename_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *org_db, + const LEX_CSTRING *org_alias, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_alias) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_rename_file"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + + ddl_log_entry.action_type= DDL_LOG_RENAME_TABLE_ACTION; + ddl_log_entry.next_entry= ddl_state->list ? ddl_state->list->entry_pos : 0; + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(hton)); + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(new_db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(new_alias); + ddl_log_entry.from_db= *const_cast<LEX_CSTRING*>(org_db); + ddl_log_entry.from_name= *const_cast<LEX_CSTRING*>(org_alias); + ddl_log_entry.phase= DDL_RENAME_PHASE_TABLE; + + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + +/* + Logging of rename view +*/ + +bool ddl_log_rename_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *org_db, + const LEX_CSTRING *org_alias, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_alias) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_rename_file"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + + ddl_log_entry.action_type= DDL_LOG_RENAME_VIEW_ACTION; + ddl_log_entry.next_entry= ddl_state->list ? ddl_state->list->entry_pos : 0; + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(new_db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(new_alias); + ddl_log_entry.from_db= *const_cast<LEX_CSTRING*>(org_db); + ddl_log_entry.from_name= *const_cast<LEX_CSTRING*>(org_alias); + + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Logging of DROP TABLE and DROP VIEW + + Note that in contrast to rename, which are re-done in reverse order, + deletes are stored in a linked list according to delete order. This + is to ensure that the tables, for the query generated for binlog, + is in original delete order. +*/ + +static bool ddl_log_drop_init(THD *thd, DDL_LOG_STATE *ddl_state, + ddl_log_action_code action_code, + const LEX_CSTRING *db, + const LEX_CSTRING *comment) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_drop_file"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + + ddl_log_entry.action_type= action_code; + ddl_log_entry.from_db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(comment); + + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +bool ddl_log_drop_table_init(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, + const LEX_CSTRING *comment) +{ + return ddl_log_drop_init(thd, ddl_state, DDL_LOG_DROP_INIT_ACTION, + db, comment); +} + +bool ddl_log_drop_view_init(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db) +{ + return ddl_log_drop_init(thd, ddl_state, DDL_LOG_DROP_INIT_ACTION, + db, &empty_clex_str); +} + + +/** + Log DROP TABLE to the ddl log. + + This code does not call ddl_log_write() as we want the events to + be stored in call order instead of reverse order, which is the normal + case for all other events. + See also comment before ddl_log_drop_init(). +*/ + +static bool ddl_log_drop(THD *thd, DDL_LOG_STATE *ddl_state, + ddl_log_action_code action_code, + uint phase, + handlerton *hton, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table) +{ + DDL_LOG_ENTRY ddl_log_entry; + DDL_LOG_MEMORY_ENTRY *log_entry; + DBUG_ENTER("ddl_log_drop"); + + DBUG_ASSERT(ddl_state->list); + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + + ddl_log_entry.action_type= action_code; + if (hton) + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(hton)); + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(table); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(path); + ddl_log_entry.phase= (uchar) phase; + + mysql_mutex_lock(&LOCK_gdl); + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) + goto error; + + (void) ddl_log_sync_no_lock(); + if (update_next_entry_pos(ddl_state->list->entry_pos, + log_entry->entry_pos)) + { + ddl_log_release_memory_entry(log_entry); + goto error; + } + + mysql_mutex_unlock(&LOCK_gdl); + add_log_entry(ddl_state, log_entry); + DBUG_RETURN(0); + +error: + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(1); +} + + +bool ddl_log_drop_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table) +{ + DBUG_ENTER("ddl_log_drop_table"); + DBUG_RETURN(ddl_log_drop(thd, ddl_state, + DDL_LOG_DROP_TABLE_ACTION, DDL_DROP_PHASE_TABLE, + hton, path, db, table)); +} + + +bool ddl_log_drop_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table) +{ + DBUG_ENTER("ddl_log_drop_view"); + DBUG_RETURN(ddl_log_drop(thd, ddl_state, + DDL_LOG_DROP_VIEW_ACTION, 0, + (handlerton*) 0, path, db, table)); +} + + +bool ddl_log_drop_trigger(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + const LEX_CSTRING *trigger_name, + const LEX_CSTRING *query) +{ + DDL_LOG_ENTRY ddl_log_entry; + MY_STAT stat_info; + char path[FN_REFLEN+1]; + off_t frm_length= 0; + size_t max_query_length; + DBUG_ENTER("ddl_log_drop_trigger"); + + build_table_filename(path, sizeof(path)-1, db->str, table->str, TRG_EXT, 0); + + /* We can use length of frm file as an indication if trigger was removed */ + if (my_stat(path, &stat_info, MYF(MY_WME | ME_WARNING))) + frm_length= (off_t) stat_info.st_size; + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + + ddl_log_entry.action_type= DDL_LOG_DROP_TRIGGER_ACTION; + ddl_log_entry.unique_id= (ulonglong) frm_length; + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(table); + ddl_log_entry.from_name= *const_cast<LEX_CSTRING*>(trigger_name); + + /* + If we can store query as is, we store it. Otherwise it will be + re-generated on recovery + */ + + max_query_length= ddl_log_free_space_in_entry(&ddl_log_entry); + if (max_query_length >= query->length) + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(query); + + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log DROP DATABASE + + This is logged after all DROP TABLE's for the database. + As now know we are going to log DROP DATABASE to the binary log, we want + to ignore want to ignore all preceding DROP TABLE entries. We do that by + linking this entry directly after the execute entry and forgetting the + link to the previous entries (not setting ddl_log_entry.next_entry) +*/ + +bool ddl_log_drop_db(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, const LEX_CSTRING *path) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_drop_db"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_DROP_DB_ACTION; + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(path); + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log CREATE TABLE + + @param only_frm On recovery, only drop the .frm. This is needed for + example when deleting a table that was discovered. +*/ + +bool ddl_log_create_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + bool only_frm) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_create_table"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_CREATE_TABLE_ACTION; + if (hton) + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(hton)); + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(table); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(path); + ddl_log_entry.flags= only_frm ? DDL_LOG_FLAG_ONLY_FRM : 0; + + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log CREATE VIEW +*/ + +bool ddl_log_create_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + enum_ddl_log_create_view_phase phase) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_create_view"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_CREATE_VIEW_ACTION; + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(path); + ddl_log_entry.phase= (uchar) phase; + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log creation of temporary file that should be deleted during recovery + + @param thd Thread handler + @param ddl_log_state ddl_state + @param path Path to file to be deleted + @param depending_state If not NULL, then do not delete the temp file if this + entry exists and is active. +*/ + +bool ddl_log_delete_tmp_file(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + DDL_LOG_STATE *depending_state) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_delete_tmp_file"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_DELETE_TMP_FILE_ACTION; + ddl_log_entry.next_entry= ddl_state->list ? ddl_state->list->entry_pos : 0; + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(path); + if (depending_state) + ddl_log_entry.unique_id= depending_state->execute_entry->entry_pos; + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log CREATE TRIGGER +*/ + +bool ddl_log_create_trigger(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, const LEX_CSTRING *table, + const LEX_CSTRING *trigger_name, + enum_ddl_log_create_trigger_phase phase) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_create_view"); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_CREATE_TRIGGER_ACTION; + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(table); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(trigger_name); + ddl_log_entry.phase= (uchar) phase; + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/** + Log ALTER TABLE + + $param backup_name Name of backup table. In case of ALTER TABLE rename + this is the final table name +*/ + +bool ddl_log_alter_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *org_hton, + const LEX_CSTRING *db, const LEX_CSTRING *table, + handlerton *new_hton, + handlerton *partition_underlying_hton, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table, + const LEX_CSTRING *frm_path, + const LEX_CSTRING *backup_name, + const LEX_CUSTRING *version, + ulonglong table_version, + bool is_renamed) +{ + DDL_LOG_ENTRY ddl_log_entry; + DBUG_ENTER("ddl_log_alter_table"); + DBUG_ASSERT(new_hton); + DBUG_ASSERT(org_hton); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_ALTER_TABLE_ACTION; + if (new_hton) + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(new_hton)); + /* Store temporary table name */ + ddl_log_entry.db= *const_cast<LEX_CSTRING*>(new_db); + ddl_log_entry.name= *const_cast<LEX_CSTRING*>(new_table); + if (org_hton) + lex_string_set(&ddl_log_entry.from_handler_name, + ha_resolve_storage_engine_name(org_hton)); + ddl_log_entry.from_db= *const_cast<LEX_CSTRING*>(db); + ddl_log_entry.from_name= *const_cast<LEX_CSTRING*>(table); + ddl_log_entry.tmp_name= *const_cast<LEX_CSTRING*>(frm_path); + ddl_log_entry.extra_name= *const_cast<LEX_CSTRING*>(backup_name); + ddl_log_entry.flags= is_renamed ? DDL_LOG_FLAG_ALTER_RENAME : 0; + ddl_log_entry.unique_id= table_version; + + /* + If we are doing an inplace of a partition engine, we need to log the + underlaying engine. We store this is in ddl_log_entry.handler_name + */ + if (new_hton == org_hton && partition_underlying_hton != new_hton) + { + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(partition_underlying_hton)); + ddl_log_entry.flags|= DDL_LOG_FLAG_ALTER_PARTITION; + } + DBUG_ASSERT(version->length == MY_UUID_SIZE); + memcpy(ddl_log_entry.uuid, version->str, version->length); + DBUG_RETURN(ddl_log_write(ddl_state, &ddl_log_entry)); +} + + +/* + Store query that later should be logged to binary log + + The links of the query log event is + + execute_log_event -> first log_query_event [-> log_query_event...] -> + action_log_event (probably a LOG_ALTER_TABLE_ACTION event) + + This ensures that when we execute the log_query_event it can collect + the full query from the log_query_events and then execute the + action_log_event with the original query stored in 'recovery_state.query'. + + The query is stored in ddl_log_entry.extra_name as this is the last string + stored in the log block (makes it easier to check and debug). +*/ + +bool ddl_log_store_query(THD *thd, DDL_LOG_STATE *ddl_state, + const char *query, size_t length) +{ + DDL_LOG_ENTRY ddl_log_entry; + DDL_LOG_MEMORY_ENTRY *first_entry, *next_entry= 0; + DDL_LOG_MEMORY_ENTRY *original_entry= ddl_state->list; + size_t max_query_length; + uint entry_pos, next_entry_pos= 0, parent_entry_pos; + DBUG_ENTER("ddl_log_store_query"); + DBUG_ASSERT(length <= UINT_MAX32); + DBUG_ASSERT(length > 0); + DBUG_ASSERT(ddl_state->list); + + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); + ddl_log_entry.action_type= DDL_LOG_STORE_QUERY_ACTION; + ddl_log_entry.unique_id= length; + ddl_log_entry.flags= 1; // First entry + ddl_log_entry.db= thd->db; // Current database + + max_query_length= ddl_log_free_space_in_entry(&ddl_log_entry); + + mysql_mutex_lock(&LOCK_gdl); + ddl_log_entry.entry_type= DDL_LOG_ENTRY_CODE; + + if (ddl_log_get_free_entry(&first_entry)) + goto err; + parent_entry_pos= ddl_state->list->entry_pos; + entry_pos= first_entry->entry_pos; + add_log_entry(ddl_state, first_entry); + + while (length) + { + size_t write_length= MY_MIN(length, max_query_length); + ddl_log_entry.extra_name.str= query; + ddl_log_entry.extra_name.length= write_length; + + query+= write_length; + length-= write_length; + + if (length > 0) + { + if (ddl_log_get_free_entry(&next_entry)) + goto err; + ddl_log_entry.next_entry= next_entry_pos= next_entry->entry_pos; + add_log_entry(ddl_state, next_entry); + } + else + { + /* point next link of last query_action event to the original action */ + ddl_log_entry.next_entry= parent_entry_pos; + } + set_global_from_ddl_log_entry(&ddl_log_entry); + if (unlikely(write_ddl_log_file_entry(entry_pos))) + goto err; + entry_pos= next_entry_pos; + ddl_log_entry.flags= 0; // Only first entry has this set + ddl_log_entry.db.length= 0; // Don't need DB anymore + ddl_log_entry.extra_name.length= 0; + max_query_length= ddl_log_free_space_in_entry(&ddl_log_entry); + } + if (ddl_log_write_execute_entry(first_entry->entry_pos, + &ddl_state->execute_entry)) + goto err; + + /* Set the original entry to be used for future PHASE updates */ + ddl_state->main_entry= original_entry; + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(0); +err: + /* + Allocated ddl_log entries will be released by the + ddl_log_release_entries() call in dl_log_complete() + */ + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(1); +} diff --git a/sql/ddl_log.h b/sql/ddl_log.h new file mode 100644 index 00000000000..a2a6af76a77 --- /dev/null +++ b/sql/ddl_log.h @@ -0,0 +1,352 @@ +/* + Copyright (c) 2000, 2019, Oracle and/or its affiliates. + Copyright (c) 2010, 2021, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA +*/ + +/* External interfaces to ddl log functions */ + +#ifndef DDL_LOG_INCLUDED +#define DDL_LOG_INCLUDED + +enum ddl_log_entry_code +{ + /* + DDL_LOG_UNKOWN + Here mainly to detect blocks that are all zero + + DDL_LOG_EXECUTE_CODE: + This is a code that indicates that this is a log entry to + be executed, from this entry a linked list of log entries + can be found and executed. + DDL_LOG_ENTRY_CODE: + An entry to be executed in a linked list from an execute log + entry. + DDL_LOG_IGNORE_ENTRY_CODE: + An entry that is to be ignored + */ + DDL_LOG_UNKNOWN= 0, + DDL_LOG_EXECUTE_CODE= 1, + DDL_LOG_ENTRY_CODE= 2, + DDL_LOG_IGNORE_ENTRY_CODE= 3, + DDL_LOG_ENTRY_CODE_LAST= 4 +}; + + +/* + When adding things below, also add an entry to ddl_log_action_names and + ddl_log_entry_phases in ddl_log.cc +*/ + +enum ddl_log_action_code +{ + /* + The type of action that a DDL_LOG_ENTRY_CODE entry is to + perform. + */ + DDL_LOG_UNKNOWN_ACTION= 0, + + /* Delete a .frm file or a table in the partition engine */ + DDL_LOG_DELETE_ACTION= 1, + + /* Rename a .frm fire a table in the partition engine */ + DDL_LOG_RENAME_ACTION= 2, + + /* + Rename an entity after removing the previous entry with the + new name, that is replace this entry. + */ + DDL_LOG_REPLACE_ACTION= 3, + + /* Exchange two entities by renaming them a -> tmp, b -> a, tmp -> b */ + DDL_LOG_EXCHANGE_ACTION= 4, + /* + log do_rename(): Rename of .frm file, table, stat_tables and triggers + */ + DDL_LOG_RENAME_TABLE_ACTION= 5, + DDL_LOG_RENAME_VIEW_ACTION= 6, + DDL_LOG_DROP_INIT_ACTION= 7, + DDL_LOG_DROP_TABLE_ACTION= 8, + DDL_LOG_DROP_VIEW_ACTION= 9, + DDL_LOG_DROP_TRIGGER_ACTION= 10, + DDL_LOG_DROP_DB_ACTION=11, + DDL_LOG_CREATE_TABLE_ACTION=12, + DDL_LOG_CREATE_VIEW_ACTION=13, + DDL_LOG_DELETE_TMP_FILE_ACTION=14, + DDL_LOG_CREATE_TRIGGER_ACTION=15, + DDL_LOG_ALTER_TABLE_ACTION=16, + DDL_LOG_STORE_QUERY_ACTION=17, + DDL_LOG_LAST_ACTION /* End marker */ +}; + + +/* Number of phases for each ddl_log_action_code */ +extern const uchar ddl_log_entry_phases[DDL_LOG_LAST_ACTION]; + + +enum enum_ddl_log_exchange_phase { + EXCH_PHASE_NAME_TO_TEMP= 0, + EXCH_PHASE_FROM_TO_NAME= 1, + EXCH_PHASE_TEMP_TO_FROM= 2, + EXCH_PHASE_END +}; + +enum enum_ddl_log_rename_table_phase { + DDL_RENAME_PHASE_TRIGGER= 0, + DDL_RENAME_PHASE_STAT, + DDL_RENAME_PHASE_TABLE, + DDL_RENAME_PHASE_END +}; + +enum enum_ddl_log_drop_table_phase { + DDL_DROP_PHASE_TABLE=0, + DDL_DROP_PHASE_TRIGGER, + DDL_DROP_PHASE_BINLOG, + DDL_DROP_PHASE_RESET, /* Reset found list of dropped tables */ + DDL_DROP_PHASE_END +}; + +enum enum_ddl_log_drop_db_phase { + DDL_DROP_DB_PHASE_INIT=0, + DDL_DROP_DB_PHASE_LOG, + DDL_DROP_DB_PHASE_END +}; + +enum enum_ddl_log_create_table_phase { + DDL_CREATE_TABLE_PHASE_INIT=0, + DDL_CREATE_TABLE_PHASE_LOG, + DDL_CREATE_TABLE_PHASE_END +}; + +enum enum_ddl_log_create_view_phase { + DDL_CREATE_VIEW_PHASE_NO_OLD_VIEW, + DDL_CREATE_VIEW_PHASE_DELETE_VIEW_COPY, + DDL_CREATE_VIEW_PHASE_OLD_VIEW_COPIED, + DDL_CREATE_VIEW_PHASE_END +}; + +enum enum_ddl_log_create_trigger_phase { + DDL_CREATE_TRIGGER_PHASE_NO_OLD_TRIGGER, + DDL_CREATE_TRIGGER_PHASE_DELETE_COPY, + DDL_CREATE_TRIGGER_PHASE_OLD_COPIED, + DDL_CREATE_TRIGGER_PHASE_END +}; + +enum enum_ddl_log_alter_table_phase { + DDL_ALTER_TABLE_PHASE_INIT, + DDL_ALTER_TABLE_PHASE_RENAME_FAILED, + DDL_ALTER_TABLE_PHASE_INPLACE_COPIED, + DDL_ALTER_TABLE_PHASE_INPLACE, + DDL_ALTER_TABLE_PHASE_PREPARE_INPLACE, + DDL_ALTER_TABLE_PHASE_CREATED, + DDL_ALTER_TABLE_PHASE_COPIED, + DDL_ALTER_TABLE_PHASE_OLD_RENAMED, + DDL_ALTER_TABLE_PHASE_UPDATE_TRIGGERS, + DDL_ALTER_TABLE_PHASE_UPDATE_STATS, + DDL_ALTER_TABLE_PHASE_UPDATE_BINARY_LOG, + DDL_ALTER_TABLE_PHASE_END +}; + + +/* + Flags stored in DDL_LOG_ENTRY.flags + The flag values can be reused for different commands +*/ +#define DDL_LOG_FLAG_ALTER_RENAME (1 << 0) +#define DDL_LOG_FLAG_ALTER_ENGINE_CHANGED (1 << 1) +#define DDL_LOG_FLAG_ONLY_FRM (1 << 2) +#define DDL_LOG_FLAG_UPDATE_STAT (1 << 3) +/* + Set when using ALTER TABLE on a partitioned table and the table + engine is not changed +*/ +#define DDL_LOG_FLAG_ALTER_PARTITION (1 << 4) + +/* + Setting ddl_log_entry.phase to this has the same effect as setting + the phase to the maximum phase (..PHASE_END) for an entry. +*/ + +#define DDL_LOG_FINAL_PHASE ((uchar) 0xff) + +typedef struct st_ddl_log_entry +{ + LEX_CSTRING name; + LEX_CSTRING from_name; + LEX_CSTRING handler_name; + LEX_CSTRING db; + LEX_CSTRING from_db; + LEX_CSTRING from_handler_name; + LEX_CSTRING tmp_name; /* frm file or temporary file name */ + LEX_CSTRING extra_name; /* Backup table name */ + uchar uuid[MY_UUID_SIZE]; // UUID for new frm file + + ulonglong xid; // Xid stored in the binary log + /* + unique_id can be used to store a unique number to check current state. + Currently it is used to store new size of frm file, link to another ddl log + entry or store an a uniq version for a storage engine in alter table. + For execute entries this is reused as an execute counter to ensure we + don't repeat an entry too many times if executing the entry fails. + */ + ulonglong unique_id; + uint next_entry; + uint entry_pos; // Set by write_dll_log_entry() + uint16 flags; // Flags unique for each command + enum ddl_log_entry_code entry_type; // Set automatically + enum ddl_log_action_code action_type; + /* + Most actions have only one phase. REPLACE does however have two + phases. The first phase removes the file with the new name if + there was one there before and the second phase renames the + old name to the new name. + */ + uchar phase; // set automatically +} DDL_LOG_ENTRY; + +typedef struct st_ddl_log_memory_entry +{ + uint entry_pos; + struct st_ddl_log_memory_entry *next_log_entry; + struct st_ddl_log_memory_entry *prev_log_entry; + struct st_ddl_log_memory_entry *next_active_log_entry; +} DDL_LOG_MEMORY_ENTRY; + + +/* + State of the ddl log during execution of a DDL. + + A ddl log state has one execute entry (main entry pointing to the first + action entry) and many 'action entries' linked in a list in the order + they should be executed. + One recovery the log is parsed and all execute entries will be executed. + + All entries are stored as separate blocks in the ddl recovery file. +*/ + +typedef struct st_ddl_log_state +{ + /* List of ddl log entries */ + DDL_LOG_MEMORY_ENTRY *list; + /* One execute entry per list */ + DDL_LOG_MEMORY_ENTRY *execute_entry; + /* + Entry used for PHASE updates. Normally same as first in 'list', but in + case of a query log event, this points to the main event. + */ + DDL_LOG_MEMORY_ENTRY *main_entry; + uint16 flags; /* Cache for flags */ + bool is_active() { return list != 0; } +} DDL_LOG_STATE; + + +/* These functions are for recovery */ +bool ddl_log_initialize(); +void ddl_log_release(); +bool ddl_log_close_binlogged_events(HASH *xids); +int ddl_log_execute_recovery(); + +/* functions for updating the ddl log */ +bool ddl_log_write_entry(DDL_LOG_ENTRY *ddl_log_entry, + DDL_LOG_MEMORY_ENTRY **active_entry); + +bool ddl_log_write_execute_entry(uint first_entry, + DDL_LOG_MEMORY_ENTRY **active_entry); +bool ddl_log_disable_execute_entry(DDL_LOG_MEMORY_ENTRY **active_entry); + +void ddl_log_complete(DDL_LOG_STATE *ddl_log_state); +bool ddl_log_revert(THD *thd, DDL_LOG_STATE *ddl_log_state); + +bool ddl_log_update_phase(DDL_LOG_STATE *entry, uchar phase); +bool ddl_log_add_flag(DDL_LOG_STATE *entry, uint16 flag); +bool ddl_log_update_unique_id(DDL_LOG_STATE *state, ulonglong id); +bool ddl_log_update_xid(DDL_LOG_STATE *state, ulonglong xid); +bool ddl_log_disable_entry(DDL_LOG_STATE *state); +bool ddl_log_increment_phase(uint entry_pos); +void ddl_log_release_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry); +bool ddl_log_sync(); +bool ddl_log_execute_entry(THD *thd, uint first_entry); + +void ddl_log_release_entries(DDL_LOG_STATE *ddl_log_state); +bool ddl_log_rename_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *org_db, + const LEX_CSTRING *org_alias, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_alias); +bool ddl_log_rename_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *org_db, + const LEX_CSTRING *org_alias, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_alias); +bool ddl_log_drop_table_init(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, + const LEX_CSTRING *comment); +bool ddl_log_drop_view_init(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db); +bool ddl_log_drop_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table); +bool ddl_log_drop_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table); +bool ddl_log_drop_trigger(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + const LEX_CSTRING *trigger_name, + const LEX_CSTRING *query); +bool ddl_log_drop_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table); +bool ddl_log_drop_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db); +bool ddl_log_drop_db(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, const LEX_CSTRING *path); +bool ddl_log_create_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *hton, + const LEX_CSTRING *path, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + bool only_frm); +bool ddl_log_create_view(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + enum_ddl_log_create_view_phase phase); +bool ddl_log_delete_tmp_file(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *path, + DDL_LOG_STATE *depending_state); +bool ddl_log_create_trigger(THD *thd, DDL_LOG_STATE *ddl_state, + const LEX_CSTRING *db, const LEX_CSTRING *table, + const LEX_CSTRING *trigger_name, + enum_ddl_log_create_trigger_phase phase); +bool ddl_log_alter_table(THD *thd, DDL_LOG_STATE *ddl_state, + handlerton *org_hton, + const LEX_CSTRING *db, const LEX_CSTRING *table, + handlerton *new_hton, + handlerton *partition_underlying_hton, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table, + const LEX_CSTRING *frm_path, + const LEX_CSTRING *backup_table_name, + const LEX_CUSTRING *version, + ulonglong table_version, + bool is_renamed); +bool ddl_log_store_query(THD *thd, DDL_LOG_STATE *ddl_log_state, + const char *query, size_t length); +extern mysql_mutex_t LOCK_gdl; +#endif /* DDL_LOG_INCLUDED */ diff --git a/sql/debug.cc b/sql/debug.cc new file mode 100644 index 00000000000..a0e2340e254 --- /dev/null +++ b/sql/debug.cc @@ -0,0 +1,88 @@ +/* Copyright (c) 2021, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include "mariadb.h" +#include "sql_class.h" +#include "debug.h" + +/** + Debug utility to do crash after a set number of executions + + The user variable, either @debug_crash_counter or @debug_error_counter, + is decremented each time debug_crash() or debug_simulate_error is called + if the keyword is set with @@debug_push, like + @@debug_push="d+frm_data_type_info_emulate" + + If the variable is not set or is not an integer it will be ignored. +*/ + +#ifndef DBUG_OFF + +static const LEX_CSTRING debug_crash_counter= +{ STRING_WITH_LEN("debug_crash_counter") }; +static const LEX_CSTRING debug_error_counter= +{ STRING_WITH_LEN("debug_error_counter") }; + +static bool debug_decrement_counter(const LEX_CSTRING *name) +{ + THD *thd= current_thd; + user_var_entry *entry= (user_var_entry*) + my_hash_search(&thd->user_vars, (uchar*) name->str, name->length); + if (!entry || entry->type != INT_RESULT || ! entry->value) + return 0; + (*(ulonglong*) entry->value)= (*(ulonglong*) entry->value)-1; + return !*(ulonglong*) entry->value; +} + +void debug_crash_here(const char *keyword) +{ + DBUG_ENTER("debug_crash_here"); + DBUG_PRINT("enter", ("keyword: %s", keyword)); + + DBUG_EXECUTE_IF(keyword, + if (debug_decrement_counter(&debug_crash_counter)) + { + my_printf_error(ER_INTERNAL_ERROR, + "Crashing at %s", + MYF(ME_ERROR_LOG | ME_NOTE), keyword); + DBUG_SUICIDE(); + }); + DBUG_VOID_RETURN; +} + +/* + This can be used as debug_counter to simulate an error at a specific + position. + + Typical usage would be + if (debug_simualte_error("keyword")) + error= 1; +*/ + +bool debug_simulate_error(const char *keyword, uint error) +{ + DBUG_ENTER("debug_crash_here"); + DBUG_PRINT("enter", ("keyword: %s", keyword)); + DBUG_EXECUTE_IF(keyword, + if (debug_decrement_counter(&debug_error_counter)) + { + my_printf_error(error, + "Simulating error for '%s'", + MYF(ME_ERROR_LOG), keyword); + DBUG_RETURN(1); + }); + DBUG_RETURN(0); +} +#endif /* DBUG_OFF */ diff --git a/sql/debug.h b/sql/debug.h new file mode 100644 index 00000000000..48bae774625 --- /dev/null +++ b/sql/debug.h @@ -0,0 +1,39 @@ +#ifndef DEBUG_INCLUDED +#define DEBUG_INCLUDED + +/* Copyright (c) 2021, MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +/** + @file + + Declarations for debug_crash_here and other future mariadb server debug + functionality. +*/ + +/* debug_crash_here() functionallity. + See mysql_test/suite/atomic/create_table.test for an example of how it + can be used +*/ + +#ifndef DBUG_OFF +void debug_crash_here(const char *keyword); +bool debug_simulate_error(const char *keyword, uint error); +#else +#define debug_crash_here(A) do { } while(0) +#define debug_simulate_error(A, B) 0 +#endif + +#endif /* DEBUG_INCLUDED */ diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index b5de53be000..55523a728f8 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -201,13 +201,12 @@ void debug_sync_end(void) /* Print statistics. */ { - char llbuff[22]; - sql_print_information("Debug sync points hit: %22s", - llstr(debug_sync_global.dsp_hits, llbuff)); - sql_print_information("Debug sync points executed: %22s", - llstr(debug_sync_global.dsp_executed, llbuff)); - sql_print_information("Debug sync points max active per thread: %22s", - llstr(debug_sync_global.dsp_max_active, llbuff)); + sql_print_information("Debug sync points hit: %lld", + debug_sync_global.dsp_hits); + sql_print_information("Debug sync points executed: %lld", + debug_sync_global.dsp_executed); + sql_print_information("Debug sync points max active per thread: %lld", + debug_sync_global.dsp_max_active); } } @@ -999,6 +998,15 @@ static char *debug_sync_number(ulong *number_p, char *actstrptr, The input string needs to be ASCII NUL ('\0') terminated. We split nul-terminated tokens in it without copy. + @note + The current implementation does not support two 'now SIGNAL xxx' commands + in a row for multiple threads as the first one can get lost while + the waiting threads are sleeping on mysql_cond_timedwait(). + One reason for this is that the signal name is stored in a global variable + that is overwritten. A better way would be to store all signals in + an array together with a 'time' when the signal was sent. This array + should be checked on broadcast. + @see the function comment of debug_sync_token() for more constraints for the string. */ diff --git a/sql/debug_sync.h b/sql/debug_sync.h index 3b8aa8815e1..831b86b688e 100644 --- a/sql/debug_sync.h +++ b/sql/debug_sync.h @@ -52,5 +52,4 @@ static inline void debug_sync_reset_thread(THD *thd) {} static inline bool debug_sync_set_action(THD *, const char *, size_t) { return false; } #endif /* defined(ENABLED_DEBUG_SYNC) */ - #endif /* DEBUG_SYNC_INCLUDED */ diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 1e406e9143f..3980f248327 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -117,7 +117,7 @@ Event_creation_ctx::load_from_db(THD *thd, bool invalid_creation_ctx= FALSE; - if (load_charset(event_mem_root, + if (load_charset(thd, event_mem_root, event_tbl->field[ET_FIELD_CHARACTER_SET_CLIENT], thd->variables.character_set_client, &client_cs)) @@ -130,7 +130,7 @@ Event_creation_ctx::load_from_db(THD *thd, invalid_creation_ctx= TRUE; } - if (load_collation(event_mem_root, + if (load_collation(thd, event_mem_root, event_tbl->field[ET_FIELD_COLLATION_CONNECTION], thd->variables.collation_connection, &connection_cl)) @@ -143,7 +143,7 @@ Event_creation_ctx::load_from_db(THD *thd, invalid_creation_ctx= TRUE; } - if (load_collation(event_mem_root, + if (load_collation(thd, event_mem_root, event_tbl->field[ET_FIELD_DB_COLLATION], NULL, &db_cl)) @@ -294,7 +294,7 @@ Event_basic::load_string_fields(Field **fields, ...) bool Event_basic::load_time_zone(THD *thd, const LEX_CSTRING *tz_name) { - String str(tz_name->str, &my_charset_latin1); + String str(tz_name->str, strlen(tz_name->str), &my_charset_latin1); time_zone= my_tz_find(thd, &str); return (time_zone == NULL); @@ -1546,7 +1546,9 @@ end: if (sql_command_set) { - WSREP_TO_ISOLATION_END; +#ifdef WITH_WSREP + wsrep_to_isolation_end(thd); +#endif thd->lex->sql_command = sql_command_save; } diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index f8573629dcf..cf27e9b7326 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -43,12 +43,12 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] = { { STRING_WITH_LEN("db") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("name") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("body") }, @@ -57,8 +57,8 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] = }, { { STRING_WITH_LEN("definer") }, - { STRING_WITH_LEN("char(") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("varchar(") }, + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("execute_at") }, @@ -131,7 +131,7 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] = { { STRING_WITH_LEN("comment") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("originator") }, @@ -146,17 +146,17 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] = { { STRING_WITH_LEN("character_set_client") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("collation_connection") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("db_collation") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("body_utf8") }, @@ -341,31 +341,27 @@ mysql_event_fill_row(THD *thd, } fields[ET_FIELD_CHARACTER_SET_CLIENT]->set_notnull(); - rs|= fields[ET_FIELD_CHARACTER_SET_CLIENT]->store( - thd->variables.character_set_client->csname, - strlen(thd->variables.character_set_client->csname), - system_charset_info); + rs|= fields[ET_FIELD_CHARACTER_SET_CLIENT]-> + store(&thd->variables.character_set_client->cs_name, + system_charset_info); fields[ET_FIELD_COLLATION_CONNECTION]->set_notnull(); - rs|= fields[ET_FIELD_COLLATION_CONNECTION]->store( - thd->variables.collation_connection->name, - strlen(thd->variables.collation_connection->name), - system_charset_info); + rs|= fields[ET_FIELD_COLLATION_CONNECTION]-> + store(&thd->variables.collation_connection->coll_name, + system_charset_info); { CHARSET_INFO *db_cl= get_default_db_collation(thd, et->dbname.str); fields[ET_FIELD_DB_COLLATION]->set_notnull(); - rs|= fields[ET_FIELD_DB_COLLATION]->store(db_cl->name, - strlen(db_cl->name), + rs|= fields[ET_FIELD_DB_COLLATION]->store(&db_cl->coll_name, system_charset_info); } if (et->body_changed) { fields[ET_FIELD_BODY_UTF8]->set_notnull(); - rs|= fields[ET_FIELD_BODY_UTF8]->store(sp->m_body_utf8.str, - sp->m_body_utf8.length, + rs|= fields[ET_FIELD_BODY_UTF8]->store(&sp->m_body_utf8, system_charset_info); } diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index d2a168e538e..80d2f9c9fe4 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -480,7 +480,7 @@ Event_parse_data::report_bad_value(const char *item_name, Item *bad_item) { char buff[120]; String str(buff,(uint32) sizeof(buff), system_charset_info); - String *str2= bad_item->is_fixed() ? bad_item->val_str(&str) : NULL; + String *str2= bad_item->fixed() ? bad_item->val_str(&str) : NULL; my_error(ER_WRONG_VALUE, MYF(0), item_name, str2? str2->c_ptr_safe():"NULL"); } diff --git a/sql/event_queue.cc b/sql/event_queue.cc index c4604b63084..ebd2dfeefd6 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -516,8 +516,10 @@ Event_queue::empty_queue() uint i; DBUG_ENTER("Event_queue::empty_queue"); DBUG_PRINT("enter", ("Purging the queue. %u element(s)", queue.elements)); - sql_print_information("Event Scheduler: Purging the queue. %u events", - queue.elements); + + if (queue.elements) + sql_print_information("Event Scheduler: Purging the queue. %u events", + queue.elements); /* empty the queue */ for (i= queue_first_element(&queue); i <= queue_last_element(&queue); diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index 9015c1b2655..97529bd9809 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -89,11 +89,11 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) prefix.append(STRING_WITH_LEN("Event Scheduler: [")); prefix.append(et->definer.str, et->definer.length, system_charset_info); - prefix.append("][", 2); + prefix.append(STRING_WITH_LEN("][")); prefix.append(et->dbname.str, et->dbname.length, system_charset_info); prefix.append('.'); prefix.append(et->name.str, et->name.length, system_charset_info); - prefix.append("] ", 2); + prefix.append(STRING_WITH_LEN("] ")); Diagnostics_area::Sql_condition_iterator it= thd->get_stmt_da()->sql_conditions(); diff --git a/sql/events.cc b/sql/events.cc index 6fe0974430c..f06068e84e8 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -742,14 +742,11 @@ send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol) protocol->store(tz_name->ptr(), tz_name->length(), system_charset_info); protocol->store(show_str.ptr(), show_str.length(), et->creation_ctx->get_client_cs()); - protocol->store(et->creation_ctx->get_client_cs()->csname, - strlen(et->creation_ctx->get_client_cs()->csname), + protocol->store(&et->creation_ctx->get_client_cs()->cs_name, system_charset_info); - protocol->store(et->creation_ctx->get_connection_cl()->name, - strlen(et->creation_ctx->get_connection_cl()->name), + protocol->store(&et->creation_ctx->get_connection_cl()->coll_name, system_charset_info); - protocol->store(et->creation_ctx->get_db_cl()->name, - strlen(et->creation_ctx->get_db_cl()->name), + protocol->store(&et->creation_ctx->get_db_cl()->coll_name, system_charset_info); if (protocol->write()) diff --git a/sql/field.cc b/sql/field.cc index 8afbb6fc421..d721bdefe52 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1515,7 +1515,7 @@ bool Field::make_empty_rec_store_default_value(THD *thd, Item *item) Field_num::Field_num(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg, bool zero_arg, bool unsigned_arg) + decimal_digits_t dec_arg, bool zero_arg, bool unsigned_arg) :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg), dec(dec_arg),zerofill(zero_arg),unsigned_flag(unsigned_arg) @@ -1768,7 +1768,8 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, size_t len, if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION && check_int(cs, from, len, end, error)) return 1; - return 0; + + return error && get_thd()->count_cuted_fields == CHECK_FIELD_EXPRESSION; out_of_range: set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); @@ -1850,12 +1851,12 @@ String *Field::val_int_as_str(String *val_buffer, bool unsigned_val) Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const LEX_CSTRING *field_name_arg) - :ptr(ptr_arg), invisible(VISIBLE), + :ptr(ptr_arg), null_ptr(null_ptr_arg), table(0), orig_table(0), table_name(0), field_name(*field_name_arg), option_list(0), option_struct(0), key_start(0), part_of_key(0), part_of_key_not_clustered(0), part_of_sortkey(0), - unireg_check(unireg_check_arg), field_length(length_arg), + unireg_check(unireg_check_arg), invisible(VISIBLE), field_length(length_arg), null_bit(null_bit_arg), is_created_from_null_item(FALSE), read_stats(NULL), collected_stats(0), vcol_info(0), check_constraint(0), default_value(0) @@ -1905,8 +1906,7 @@ void Field::copy_from_tmp(int row_offset) bool Field::send(Protocol *protocol) { - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),charset()); + StringBuffer<MAX_FIELD_WIDTH> tmp(charset()); val_str(&tmp); return protocol->store(tmp.ptr(), tmp.length(), tmp.charset()); } @@ -2345,6 +2345,17 @@ bool Field_str::can_be_substituted_to_equal_item(const Context &ctx, } +void Field_str::change_charset(const DTCollation &new_cs) +{ + if (!has_charset()) + return; + + field_length= (field_length * new_cs.collation->mbmaxlen) / + m_collation.collation->mbmaxlen; + m_collation= new_cs; +} + + void Field_num::make_send_field(Send_field *field) { Field::make_send_field(field); @@ -3301,10 +3312,11 @@ Field *Field_decimal::make_new_field(MEM_ROOT *root, TABLE *new_table, ** Field_new_decimal ****************************************************************************/ -static uint get_decimal_precision(uint len, uint8 dec, bool unsigned_val) +static decimal_digits_t get_decimal_precision(uint len, decimal_digits_t dec, + bool unsigned_val) { uint precision= my_decimal_length_to_precision(len, dec, unsigned_val); - return MY_MIN(precision, DECIMAL_MAX_PRECISION); + return (decimal_digits_t) MY_MIN(precision, DECIMAL_MAX_PRECISION); } Field_new_decimal::Field_new_decimal(uchar *ptr_arg, @@ -3312,7 +3324,7 @@ Field_new_decimal::Field_new_decimal(uchar *ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg,bool zero_arg, + decimal_digits_t dec_arg,bool zero_arg, bool unsigned_arg) :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, @@ -4842,7 +4854,7 @@ int Field_double::store(longlong nr, bool unsigned_val) 1 Value was truncated */ -int truncate_double(double *nr, uint field_length, uint dec, +int truncate_double(double *nr, uint field_length, decimal_digits_t dec, bool unsigned_flag, double max_value) { int error= 0; @@ -5468,7 +5480,7 @@ bool Field_timestamp0::send(Protocol *protocol) { MYSQL_TIME ltime; Field_timestamp0::get_date(<ime, date_mode_t(0)); - return protocol->store(<ime, 0); + return protocol->store_datetime(<ime, 0); } @@ -5628,7 +5640,7 @@ bool Field_timestamp_with_dec::send(Protocol *protocol) { MYSQL_TIME ltime; Field_timestamp::get_date(<ime, date_mode_t(0)); - return protocol->store(<ime, dec); + return protocol->store_datetime(<ime, dec); } @@ -6767,7 +6779,7 @@ String *Field_newdate::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { DBUG_ASSERT(marked_for_read()); - val_buffer->alloc(field_length); + val_buffer->alloc(field_length+1); val_buffer->length(field_length); uint32 tmp=(uint32) uint3korr(ptr); int part; @@ -6923,7 +6935,7 @@ bool Field_datetime0::send(Protocol *protocol) { MYSQL_TIME tm; Field_datetime0::get_date(&tm, date_mode_t(0)); - return protocol->store(&tm, 0); + return protocol->store_datetime(&tm, 0); } @@ -6939,7 +6951,7 @@ longlong Field_datetime0::val_int(void) String *Field_datetime0::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { - val_buffer->alloc(field_length); + val_buffer->alloc(field_length+1); val_buffer->length(field_length); DBUG_ASSERT(marked_for_read()); @@ -7051,7 +7063,7 @@ bool Field_datetime_with_dec::send(Protocol *protocol) { MYSQL_TIME ltime; get_date(<ime, date_mode_t(0)); - return protocol->store(<ime, dec); + return protocol->store_datetime(<ime, dec); } @@ -7617,7 +7629,7 @@ void Field_string::sql_rpl_type(String *res) const res->alloced_length(), "char(%u octets) character set %s", field_length, - charset()->csname); + charset()->cs_name.str); res->length(length); } else @@ -7899,6 +7911,19 @@ bool Field_varstring::send(Protocol *protocol) #ifdef HAVE_MEM_CHECK + +/* + Mark the unused part of the varstring as defined. + + This function is only used be Unique when calculating statistics. + + The marking is needed as we write the whole tree to disk in case of + overflows. For using or comparing values the undefined value part + is never used. We could also use bzero() here, but it would be + slower in production environments. + This function is tested by main.stat_tables-enospc +*/ + void Field_varstring::mark_unused_memory_as_defined() { uint used_length __attribute__((unused)) = get_length(); @@ -8072,7 +8097,7 @@ void Field_varstring::sql_rpl_type(String *res) const res->alloced_length(), "varchar(%u octets) character set %s", field_length, - charset()->csname); + charset()->cs_name.str); res->length(length); } else @@ -9418,16 +9443,6 @@ String *Field_set::val_str(String *val_buffer, ulonglong tmp=(ulonglong) Field_enum::val_int(); uint bitnr=0; - /* - Some callers expect *val_buffer to contain the result, - so we assign to it, rather than doing 'return &empty_set_string. - */ - *val_buffer= empty_set_string; - if (tmp == 0) - { - return val_buffer; - } - val_buffer->set_charset(field_charset()); val_buffer->length(0); @@ -9437,8 +9452,7 @@ String *Field_set::val_str(String *val_buffer, { if (val_buffer->length()) val_buffer->append(&field_separator, 1, &my_charset_latin1); - String str(typelib->type_names[bitnr], - typelib->type_lengths[bitnr], + String str(typelib->type_names[bitnr], typelib->type_lengths[bitnr], field_charset()); val_buffer->append(str); } @@ -10430,7 +10444,8 @@ void Column_definition::create_length_to_internal_length_bit() void Column_definition::create_length_to_internal_length_newdecimal() { DBUG_ASSERT(length < UINT_MAX32); - uint prec= get_decimal_precision((uint)length, decimals, flags & UNSIGNED_FLAG); + decimal_digit_t prec= get_decimal_precision((uint)length, decimals, + flags & UNSIGNED_FLAG); pack_length= my_decimal_get_binary_size(prec, decimals); } @@ -10766,24 +10781,24 @@ bool Field_vers_trx_id::test_if_equality_guarantees_uniqueness(const Item* item) Column_definition_attributes::Column_definition_attributes(const Field *field) :length(field->character_octet_length() / field->charset()->mbmaxlen), - decimals(field->decimals()), - unireg_check(field->unireg_check), interval(NULL), charset(field->charset()), // May be NULL ptr srid(0), - pack_flag(0) + pack_flag(0), + decimals(field->decimals()), + unireg_check(field->unireg_check) {} Column_definition_attributes:: Column_definition_attributes(const Type_all_attributes &attr) :length(attr.max_length), - decimals(attr.decimals), - unireg_check(Field::NONE), interval(attr.get_typelib()), charset(attr.collation.collation), srid(0), - pack_flag(attr.unsigned_flag ? 0 : FIELDFLAG_DECIMAL) + pack_flag(attr.unsigned_flag ? 0 : FIELDFLAG_DECIMAL), + decimals(attr.decimals), + unireg_check(Field::NONE) {} @@ -11387,7 +11402,7 @@ Field::print_key_part_value(String *out, const uchar* key, uint32 length) */ if (*key) { - out->append(STRING_WITH_LEN("NULL")); + out->append(NULL_clex_str); return; } null_byte++; // Skip null byte diff --git a/sql/field.h b/sql/field.h index 941090ed846..62dbdc00176 100644 --- a/sql/field.h +++ b/sql/field.h @@ -472,7 +472,7 @@ struct ha_field_option_struct; struct st_cache_field; int field_conv(Field *to,Field *from); -int truncate_double(double *nr, uint field_length, uint dec, +int truncate_double(double *nr, uint field_length, decimal_digits_t dec, bool unsigned_flag, double max_value); inline uint get_enum_pack_length(int elements) @@ -662,76 +662,76 @@ public: SIGN_UNSIGNED, SIGN_NOT_APPLICABLE // for non-numeric types }; - uchar m_type_code; // according to Field::binlog_type() /** Retrieve the field metadata for fields. */ - uint16 m_metadata; - uint8 m_metadata_size; - binlog_sign_t m_signedness; CHARSET_INFO *m_cs; // NULL if not relevant TYPELIB *m_enum_typelib; // NULL if not relevant TYPELIB *m_set_typelib; // NULL if not relevant + binlog_sign_t m_signedness; + uint16 m_metadata; + uint8 m_metadata_size; + uchar m_type_code; // according to Field::binlog_type() uchar m_geom_type; // Non-geometry fields can return 0 + Binlog_type_info(uchar type_code, uint16 metadata, uint8 metadata_size) - :m_type_code(type_code), - m_metadata(metadata), - m_metadata_size(metadata_size), - m_signedness(SIGN_NOT_APPLICABLE), - m_cs(NULL), + :m_cs(NULL), m_enum_typelib(NULL), m_set_typelib(NULL), + m_signedness(SIGN_NOT_APPLICABLE), + m_metadata(metadata), + m_metadata_size(metadata_size), + m_type_code(type_code), m_geom_type(0) {}; Binlog_type_info(uchar type_code, uint16 metadata, uint8 metadata_size, binlog_sign_t signedness) - :m_type_code(type_code), - m_metadata(metadata), - m_metadata_size(metadata_size), - m_signedness(signedness), - m_cs(NULL), + : m_cs(NULL), m_enum_typelib(NULL), m_set_typelib(NULL), + m_signedness(signedness), + m_metadata(metadata), + m_metadata_size(metadata_size), + m_type_code(type_code), m_geom_type(0) {}; Binlog_type_info(uchar type_code, uint16 metadata, - uint8 metadata_size, - CHARSET_INFO *cs) - :m_type_code(type_code), - m_metadata(metadata), - m_metadata_size(metadata_size), - m_signedness(SIGN_NOT_APPLICABLE), - m_cs(cs), + uint8 metadata_size, CHARSET_INFO *cs) + :m_cs(cs), m_enum_typelib(NULL), m_set_typelib(NULL), + m_signedness(SIGN_NOT_APPLICABLE), + m_metadata(metadata), + m_metadata_size(metadata_size), + m_type_code(type_code), m_geom_type(0) {}; Binlog_type_info(uchar type_code, uint16 metadata, uint8 metadata_size, CHARSET_INFO *cs, TYPELIB *t_enum, TYPELIB *t_set) - :m_type_code(type_code), - m_metadata(metadata), - m_metadata_size(metadata_size), - m_signedness(SIGN_NOT_APPLICABLE), - m_cs(cs), + :m_cs(cs), m_enum_typelib(t_enum), m_set_typelib(t_set), + m_signedness(SIGN_NOT_APPLICABLE), + m_metadata(metadata), + m_metadata_size(metadata_size), + m_type_code(type_code), m_geom_type(0) {}; Binlog_type_info(uchar type_code, uint16 metadata, uint8 metadata_size, CHARSET_INFO *cs, uchar geom_type) - :m_type_code(type_code), - m_metadata(metadata), - m_metadata_size(metadata_size), - m_signedness(SIGN_NOT_APPLICABLE), - m_cs(cs), + :m_cs(cs), m_enum_typelib(NULL), m_set_typelib(NULL), + m_signedness(SIGN_NOT_APPLICABLE), + m_metadata(metadata), + m_metadata_size(metadata_size), + m_type_code(type_code), m_geom_type(geom_type) {}; static void *operator new(size_t size, MEM_ROOT *mem_root) throw () @@ -795,7 +795,6 @@ public: uchar *ptr; // Position to field in record - field_visibility_t invisible; /** Byte where the @c NULL bit is stored inside a record. If this Field is a @c NOT @c NULL field, this member is @c NULL. @@ -830,7 +829,7 @@ public: in more clean way with transition to new text based .frm format. See also comment for Field_timestamp::Field_timestamp(). */ - enum utype { + enum __attribute__((packed)) utype { NONE=0, NEXT_NUMBER=15, // AUTO_INCREMENT TIMESTAMP_OLD_FIELD=18, // TIMESTAMP created before 4.1.3 @@ -841,11 +840,13 @@ public: }; enum imagetype { itRAW, itMBR}; - utype unireg_check; - uint32 field_length; // Length of field - uint32 flags; - uint16 field_index; // field number in fields array - uchar null_bit; // Bit used to test null bit + utype unireg_check; + field_visibility_t invisible; + uint32 field_length; // Length of field + uint32 flags; + field_index_t field_index; // field number in fields array + uchar null_bit; // Bit used to test null bit + /** If true, this field was created in create_tmp_field_from_item from a NULL value. This means that the type of the field is just a guess, and the type @@ -1211,7 +1212,7 @@ public: virtual uint16 key_part_flag() const { return 0; } virtual uint16 key_part_length_bytes() const { return 0; } virtual uint32 key_length() const { return pack_length(); } - virtual const Type_handler *type_handler() const= 0; + virtual const Type_handler *type_handler() const = 0; virtual enum_field_types type() const { return type_handler()->field_type(); @@ -1341,7 +1342,7 @@ public: { memcpy(ptr, val, len); } - virtual uint decimals() const { return 0; } + virtual decimal_digits_t decimals() const { return 0; } virtual Information_schema_numeric_attributes information_schema_numeric_attributes() const { @@ -1625,6 +1626,8 @@ public: virtual longlong val_time_packed(THD *thd); virtual const TYPELIB *get_typelib() const { return NULL; } virtual CHARSET_INFO *charset() const= 0; + /* returns TRUE if the new charset differs. */ + virtual void change_charset(const DTCollation &new_cs) {} virtual const DTCollation &dtcollation() const= 0; virtual CHARSET_INFO *charset_for_protocol(void) const { return binary() ? &my_charset_bin : charset(); } @@ -2023,12 +2026,12 @@ protected: protocol_send_type_t send_type); public: - const uint8 dec; + const decimal_digits_t dec; bool zerofill,unsigned_flag; // Purify cannot handle bit fields Field_num(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg, bool zero_arg, bool unsigned_arg); + decimal_digits_t dec_arg, bool zero_arg, bool unsigned_arg); CHARSET_INFO *charset() const override { return DTCollation_numeric::singleton().collation; @@ -2048,7 +2051,7 @@ public: void add_zerofill_and_unsigned(String &res) const; friend class Create_field; void make_send_field(Send_field *) override; - uint decimals() const override { return (uint) dec; } + decimal_digits_t decimals() const override { return dec; } uint size_of() const override { return sizeof(*this); } bool eq_def(const Field *field) const override; Copy_func *get_copy_func(const Field *from) const override @@ -2098,7 +2101,8 @@ public: uchar null_bit_arg, utype unireg_check_arg, const LEX_CSTRING *field_name_arg, const DTCollation &collation); - uint decimals() const override { return is_created_from_null_item ? 0 : NOT_FIXED_DEC; } + decimal_digits_t decimals() const override + { return is_created_from_null_item ? 0 : NOT_FIXED_DEC; } int save_in_field(Field *to) override { return save_in_field_str(to); } bool memcpy_field_possible(const Field *from) const override { @@ -2119,6 +2123,7 @@ public: { return m_collation; } + void change_charset(const DTCollation &new_cs) override; bool binary() const override { return field_charset() == &my_charset_bin; } uint32 max_display_length() const override { return field_length; } uint32 character_octet_length() const override { return field_length; } @@ -2245,7 +2250,7 @@ public: Field_real(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg, bool zero_arg, bool unsigned_arg) + decimal_digits_t dec_arg, bool zero_arg, bool unsigned_arg) :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg), not_fixed(dec_arg >= FLOATING_POINT_DECIMALS) @@ -2297,7 +2302,7 @@ public: Field_decimal(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg,bool zero_arg,bool unsigned_arg) + decimal_digits_t dec_arg, bool zero_arg,bool unsigned_arg) :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg) @@ -2341,8 +2346,8 @@ public: class Field_new_decimal final :public Field_num { public: /* The maximum number of decimal digits can be stored */ - uint precision; - uint bin_size; + decimal_digits_t precision; + uint32 bin_size; /* Constructors take max_length of the field as a parameter - not the precision as the number of decimal digits allowed. @@ -2353,7 +2358,7 @@ public: uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg, bool zero_arg, bool unsigned_arg); + decimal_digits_t dec_arg, bool zero_arg, bool unsigned_arg); const Type_handler *type_handler() const override { return &type_handler_newdecimal; } enum ha_base_keytype key_type() const override { return HA_KEYTYPE_BINARY; } @@ -2428,7 +2433,7 @@ public: return Information_schema_numeric_attributes(precision, dec); } uint size_of() const override { return sizeof *this; } - uint32 pack_length() const override { return (uint32) bin_size; } + uint32 pack_length() const override { return bin_size; } uint pack_length_from_metadata(uint field_metadata) const override; uint row_pack_length() const override { return pack_length(); } bool compatible_field_size(uint field_metadata, const Relay_log_info *rli, @@ -2835,7 +2840,7 @@ public: Field_float(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg,bool zero_arg,bool unsigned_arg) + decimal_digits_t dec_arg,bool zero_arg,bool unsigned_arg) :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg) @@ -2844,7 +2849,7 @@ public: dec_arg= NOT_FIXED_DEC; } Field_float(uint32 len_arg, bool maybe_null_arg, - const LEX_CSTRING *field_name_arg, uint8 dec_arg) + const LEX_CSTRING *field_name_arg, decimal_digits_t dec_arg) :Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0, NONE, field_name_arg, dec_arg, 0, 0) { @@ -2883,7 +2888,7 @@ public: Field_double(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg,bool zero_arg,bool unsigned_arg) + decimal_digits_t dec_arg,bool zero_arg,bool unsigned_arg) :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg) @@ -2892,7 +2897,7 @@ public: dec_arg= NOT_FIXED_DEC; } Field_double(uint32 len_arg, bool maybe_null_arg, - const LEX_CSTRING *field_name_arg, uint8 dec_arg) + const LEX_CSTRING *field_name_arg, decimal_digits_t dec_arg) :Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0, NONE, field_name_arg, dec_arg, 0, 0) { @@ -2901,7 +2906,7 @@ public: } Field_double(uint32 len_arg, bool maybe_null_arg, const LEX_CSTRING *field_name_arg, - uint8 dec_arg, bool not_fixed_arg) + decimal_digits_t dec_arg, bool not_fixed_arg) :Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0, NONE, field_name_arg, dec_arg, 0, 0) { @@ -3256,21 +3261,22 @@ public: */ class Field_timestamp_with_dec :public Field_timestamp { protected: - uint dec; + decimal_digits_t dec; public: Field_timestamp_with_dec(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - TABLE_SHARE *share, uint dec_arg) : + TABLE_SHARE *share, decimal_digits_t dec_arg) : Field_timestamp(ptr_arg, - MAX_DATETIME_WIDTH + dec_arg + MY_TEST(dec_arg), null_ptr_arg, + MAX_DATETIME_WIDTH + dec_arg + MY_TEST(dec_arg), + null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, share), dec(dec_arg) { DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS); } - uint decimals() const override { return dec; } + decimal_digits_t decimals() const override { return dec; } enum ha_base_keytype key_type() const override { return HA_KEYTYPE_BINARY; } uchar *pack(uchar *to, const uchar *from, uint max_length) override { return Field::pack(to, from, max_length); } @@ -3301,7 +3307,7 @@ public: uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - TABLE_SHARE *share, uint dec_arg) : + TABLE_SHARE *share, decimal_digits_t dec_arg) : Field_timestamp_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, share, dec_arg) { @@ -3330,7 +3336,7 @@ public: uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - TABLE_SHARE *share, uint dec_arg) : + TABLE_SHARE *share, decimal_digits_t dec_arg) : Field_timestamp_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, share, dec_arg) {} @@ -3626,19 +3632,19 @@ public: */ class Field_time_with_dec :public Field_time { protected: - uint dec; + decimal_digits_t dec; public: Field_time_with_dec(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint dec_arg) + decimal_digits_t dec_arg) :Field_time(ptr_arg, MIN_TIME_WIDTH + dec_arg + MY_TEST(dec_arg), null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg), dec(dec_arg) { DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS); } - uint decimals() const override { return dec; } + decimal_digits_t decimals() const override { return dec; } enum ha_base_keytype key_type() const override { return HA_KEYTYPE_BINARY; } longlong val_int() override; double val_real() override; @@ -3654,8 +3660,9 @@ class Field_time_hires final :public Field_time_with_dec { void store_TIME(const MYSQL_TIME *) override; public: Field_time_hires(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, - enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint dec_arg) + enum utype unireg_check_arg, + const LEX_CSTRING *field_name_arg, + decimal_digits_t dec_arg) :Field_time_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg) @@ -3686,8 +3693,8 @@ class Field_timef final :public Field_time_with_dec { void store_TIME(const MYSQL_TIME *ltime) override; public: Field_timef(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, - enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint dec_arg) + enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, + decimal_digits_t dec_arg) :Field_time_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg) @@ -3825,18 +3832,18 @@ public: */ class Field_datetime_with_dec :public Field_datetime { protected: - uint dec; + decimal_digits_t dec; public: Field_datetime_with_dec(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, - const LEX_CSTRING *field_name_arg, uint dec_arg) + const LEX_CSTRING *field_name_arg, decimal_digits_t dec_arg) :Field_datetime(ptr_arg, MAX_DATETIME_WIDTH + dec_arg + MY_TEST(dec_arg), null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg), dec(dec_arg) { DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS); } - uint decimals() const override final { return dec; } + decimal_digits_t decimals() const override final { return dec; } enum ha_base_keytype key_type() const override final { return HA_KEYTYPE_BINARY; } void make_send_field(Send_field *field) override final; bool send(Protocol *protocol) override final; @@ -3866,7 +3873,7 @@ class Field_datetime_hires final :public Field_datetime_with_dec { public: Field_datetime_hires(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, - const LEX_CSTRING *field_name_arg, uint dec_arg) + const LEX_CSTRING *field_name_arg, decimal_digits_t dec_arg) :Field_datetime_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg) { @@ -3897,7 +3904,7 @@ class Field_datetimef final :public Field_datetime_with_dec { public: Field_datetimef(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, - const LEX_CSTRING *field_name_arg, uint dec_arg) + const LEX_CSTRING *field_name_arg, decimal_digits_t dec_arg) :Field_datetime_with_dec(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, dec_arg) {} @@ -3941,7 +3948,7 @@ static inline Field_timestamp * new_Field_timestamp(MEM_ROOT *root,uchar *ptr, uchar *null_ptr, uchar null_bit, enum Field::utype unireg_check, const LEX_CSTRING *field_name, - TABLE_SHARE *share, uint dec) + TABLE_SHARE *share, decimal_digits_t dec) { if (dec==0) return new (root) @@ -3957,7 +3964,7 @@ new_Field_timestamp(MEM_ROOT *root,uchar *ptr, uchar *null_ptr, uchar null_bit, static inline Field_time * new_Field_time(MEM_ROOT *root, uchar *ptr, uchar *null_ptr, uchar null_bit, enum Field::utype unireg_check, const LEX_CSTRING *field_name, - uint dec) + decimal_digits_t dec) { if (dec == 0) return new (root) @@ -3972,7 +3979,7 @@ new_Field_time(MEM_ROOT *root, uchar *ptr, uchar *null_ptr, uchar null_bit, static inline Field_datetime * new_Field_datetime(MEM_ROOT *root, uchar *ptr, uchar *null_ptr, uchar null_bit, enum Field::utype unireg_check, - const LEX_CSTRING *field_name, uint dec) + const LEX_CSTRING *field_name, decimal_digits_t dec) { if (dec == 0) return new (root) @@ -4147,7 +4154,7 @@ public: { return (uint32) field_length + sort_suffix_length(); } - virtual uint32 sort_suffix_length() const override + uint32 sort_suffix_length() const override { return (field_charset() == &my_charset_bin ? length_bytes : 0); } @@ -4492,7 +4499,7 @@ public: uint32 sort_length() const override; uint32 sort_suffix_length() const override; uint32 value_length() override { return get_length(); } - virtual uint32 max_data_length() const override + uint32 max_data_length() const override { return (uint32) (((ulonglong) 1 << (packlength*8)) -1); } @@ -4789,7 +4796,7 @@ public: bool has_charset() const override { return true; } /* enum and set are sorted as integers */ CHARSET_INFO *sort_charset() const override { return &my_charset_bin; } - uint decimals() const override { return 0; } + decimal_digits_t decimals() const override { return 0; } const TYPELIB *get_typelib() const override { return typelib; } uchar *pack(uchar *to, const uchar *from, uint max_length) override; @@ -4828,15 +4835,11 @@ private: class Field_set final :public Field_enum { public: Field_set(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, - uchar null_bit_arg, - enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg, - uint32 packlength_arg, + uchar null_bit_arg, enum utype unireg_check_arg, + const LEX_CSTRING *field_name_arg, uint32 packlength_arg, const TYPELIB *typelib_arg, const DTCollation &collation) - :Field_enum(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, - packlength_arg, - typelib_arg, collation), - empty_set_string("", 0, collation.collation) + :Field_enum(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, + field_name_arg, packlength_arg, typelib_arg, collation) { flags=(flags & ~ENUM_FLAG) | SET_FLAG; } @@ -4859,8 +4862,6 @@ public: { return &type_handler_set; } bool has_charset() const override { return true; } Binlog_type_info binlog_type_info() const override; -private: - const String empty_set_string; }; @@ -5131,20 +5132,20 @@ public: max number of characters. */ ulonglong length; - uint decimals; - Field::utype unireg_check; - const TYPELIB *interval; // Which interval to use + const TYPELIB *interval; CHARSET_INFO *charset; uint32 srid; - uint pack_flag; + uint32 pack_flag; + decimal_digits_t decimals; + Field::utype unireg_check; Column_definition_attributes() :length(0), - decimals(0), - unireg_check(Field::NONE), interval(NULL), charset(&my_charset_bin), srid(0), - pack_flag(0) + pack_flag(0), + decimals(0), + unireg_check(Field::NONE) { } Column_definition_attributes(const Field *field); Column_definition_attributes(const Type_all_attributes &attr); @@ -5694,7 +5695,8 @@ public: LEX_CSTRING table_name, org_table_name; LEX_CSTRING col_name, org_col_name; ulong length; - uint flags, decimals; + uint flags; + decimal_digits_t decimals; Send_field(Field *field) { field->make_send_field(this); diff --git a/sql/field_conv.cc b/sql/field_conv.cc index ff6d60e7626..f7e303be6d3 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -369,7 +369,7 @@ void Field::do_field_string(Copy_field *copy) res.length(0U); copy->from_field->val_str(&res); - copy->to_field->store(res.c_ptr_quick(), res.length(), res.charset()); + copy->to_field->store(res.ptr(), res.length(), res.charset()); } @@ -385,14 +385,14 @@ void Field_enum::do_field_enum(Copy_field *copy) static void do_field_varbinary_pre50(Copy_field *copy) { char buff[MAX_FIELD_WIDTH]; - copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); + copy->tmp.set_buffer_if_not_allocated(buff,sizeof(buff),copy->tmp.charset()); copy->from_field->val_str(©->tmp); /* Use the same function as in 4.1 to trim trailing spaces */ - size_t length= my_lengthsp_8bit(&my_charset_bin, copy->tmp.c_ptr_quick(), + size_t length= my_lengthsp_8bit(&my_charset_bin, copy->tmp.ptr(), copy->from_field->field_length); - copy->to_field->store(copy->tmp.c_ptr_quick(), length, + copy->to_field->store(copy->tmp.ptr(), length, copy->tmp.charset()); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 4e5aeccb78e..bf5520955c9 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -93,14 +93,16 @@ static uint32 read_keypart_length(const uchar *from, uint bytes) // @param sortlen [Maximum] length of the sort key void Sort_param::init_for_filesort(uint sortlen, TABLE *table, - ha_rows maxrows, bool sort_positions) + ha_rows maxrows, Filesort *filesort) { DBUG_ASSERT(addon_fields == NULL); sort_length= sortlen; ref_length= table->file->ref_length; + accepted_rows= filesort->accepted_rows; + if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) && - !table->fulltext_searched && !sort_positions) + !table->fulltext_searched && !filesort->sort_positions) { /* Get the descriptors of all fields whose values are appended @@ -196,16 +198,15 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, size_t memory_available= (size_t)thd->variables.sortbuff_size; uint maxbuffer; Merge_chunk *buffpek; - ha_rows num_rows= HA_POS_ERROR; + ha_rows num_rows= HA_POS_ERROR, not_used=0; IO_CACHE tempfile, buffpek_pointers, *outfile; Sort_param param; bool allow_packing_for_sortkeys; Bounded_queue<uchar, uchar> pq; SQL_SELECT *const select= filesort->select; ha_rows max_rows= filesort->limit; - uint s_length= 0; + uint s_length= 0, sort_len; Sort_keys *sort_keys; - DBUG_ENTER("filesort"); if (!(sort_keys= filesort->make_sortorder(thd, join, first_table_bit))) @@ -247,9 +248,13 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, sort->found_rows= HA_POS_ERROR; param.sort_keys= sort_keys; - uint sort_len= sortlength(thd, sort_keys, &allow_packing_for_sortkeys); + sort_len= sortlength(thd, sort_keys, &allow_packing_for_sortkeys); + param.init_for_filesort(sort_len, table, max_rows, filesort); + if (!param.accepted_rows) + param.accepted_rows= ¬_used; - param.init_for_filesort(sort_len, table, max_rows, filesort->sort_positions); + param.set_all_read_bits= filesort->set_all_read_bits; + param.unpack= filesort->unpack; sort->addon_fields= param.addon_fields; sort->sort_keys= param.sort_keys; @@ -477,7 +482,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, thd->killed == ABORT_QUERY ? "" : thd->get_stmt_da()->message()); - if (global_system_variables.log_warnings > 1) + if ((thd->killed == ABORT_QUERY || kill_errno) && + global_system_variables.log_warnings > 1) { sql_print_warning("%s, host: %s, user: %s, thread: %lu, query: %-.4096s", ER_THD(thd, ER_FILSORT_ABORT), @@ -664,24 +670,25 @@ const char* dbug_print_table_row(TABLE *table) output.length(0); output.append(table->alias); - output.append("("); + output.append('('); bool first= true; for (pfield= table->field; *pfield ; pfield++) { + const LEX_CSTRING *name; if (table->read_set && !bitmap_is_set(table->read_set, (*pfield)->field_index)) continue; if (first) first= false; else - output.append(","); + output.append(','); - output.append((*pfield)->field_name.str ? - (*pfield)->field_name.str: "NULL"); + name= (*pfield)->field_name.str ? &(*pfield)->field_name: &NULL_clex_str; + output.append(name); } - output.append(")=("); + output.append(STRING_WITH_LEN(")=(")); first= true; for (pfield= table->field; *pfield ; pfield++) @@ -694,10 +701,10 @@ const char* dbug_print_table_row(TABLE *table) if (first) first= false; else - output.append(","); + output.append(','); if (field->is_null()) - output.append("NULL"); + output.append(&NULL_clex_str); else { if (field->type() == MYSQL_TYPE_BIT) @@ -707,7 +714,7 @@ const char* dbug_print_table_row(TABLE *table) output.append(tmp.ptr(), tmp.length()); } } - output.append(")"); + output.append(')'); return output.c_ptr_safe(); } @@ -893,6 +900,8 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, goto err; } + if (param->set_all_read_bits) + sort_form->column_bitmaps_set(save_read_set, save_write_set); DEBUG_SYNC(thd, "after_index_merge_phase1"); for (;;) @@ -900,7 +909,11 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, if (quick_select) error= select->quick->get_next(); else /* Not quick-select */ + { error= file->ha_rnd_next(sort_form->record[0]); + if (param->unpack) + param->unpack(sort_form); + } if (unlikely(error)) break; file->position(sort_form->record[0]); @@ -969,6 +982,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, idx++; } num_records++; + (*param->accepted_rows)++; } /* It does not make sense to read more keys in case of a fatal error */ @@ -1108,7 +1122,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, Sort_param *param) const { CHARSET_INFO *cs= item->collation.collation; - bool maybe_null= item->maybe_null; + bool maybe_null= item->maybe_null(); if (maybe_null) *to++= 1; @@ -1178,7 +1192,7 @@ Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, Sort_param *param) const { longlong value= item->val_int_result(); - make_sort_key_longlong(to, item->maybe_null, item->null_value, + make_sort_key_longlong(to, item->maybe_null(), item->null_value, item->unsigned_flag, value); } @@ -1194,13 +1208,13 @@ Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, static const Temporal::Options opt(TIME_INVALID_DATES, TIME_FRAC_NONE); if (item->get_date_result(current_thd, &buf, opt)) { - DBUG_ASSERT(item->maybe_null); + DBUG_ASSERT(item->maybe_null()); DBUG_ASSERT(item->null_value); - make_sort_key_longlong(to, item->maybe_null, true, + make_sort_key_longlong(to, item->maybe_null(), true, item->unsigned_flag, 0); } else - make_sort_key_longlong(to, item->maybe_null, false, + make_sort_key_longlong(to, item->maybe_null(), false, item->unsigned_flag, pack_time(&buf)); } @@ -1216,11 +1230,11 @@ Type_handler_timestamp_common::make_sort_key_part(uchar *to, Item *item, if (native.is_null() || native.is_zero_datetime()) { // NULL or '0000-00-00 00:00:00' - bzero(to, item->maybe_null ? binlen + 1 : binlen); + bzero(to, item->maybe_null() ? binlen + 1 : binlen); } else { - if (item->maybe_null) + if (item->maybe_null()) *to++= 1; if (native.length() != binlen) { @@ -1304,7 +1318,7 @@ Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, Sort_param *param) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); - if (item->maybe_null) + if (item->maybe_null()) { if (item->null_value) { @@ -1324,7 +1338,7 @@ Type_handler_real_result::make_sort_key_part(uchar *to, Item *item, Sort_param *param) const { double value= item->val_result(); - if (item->maybe_null) + if (item->maybe_null()) { if (item->null_value) { @@ -2266,7 +2280,7 @@ sortlength(THD *thd, Sort_keys *sort_keys, bool *allow_packing_for_sortkeys) thd->variables.max_sort_length)); } - if ((sortorder->maybe_null= sortorder->item->maybe_null)) + if ((sortorder->maybe_null= sortorder->item->maybe_null())) nullable_cols++; // Place for NULL marker } if (sortorder->is_variable_sized()) @@ -2569,7 +2583,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, Sort_param *param) const { CHARSET_INFO *cs= item->collation.collation; - bool maybe_null= item->maybe_null; + bool maybe_null= item->maybe_null(); if (maybe_null) *to++= 1; @@ -2608,7 +2622,7 @@ Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, Sort_param *param) const { longlong value= item->val_int_result(); - return make_packed_sort_key_longlong(to, item->maybe_null, + return make_packed_sort_key_longlong(to, item->maybe_null(), item->null_value, item->unsigned_flag, value, sort_field); } @@ -2620,7 +2634,7 @@ Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, Sort_param *param) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); - if (item->maybe_null) + if (item->maybe_null()) { if (item->null_value) { @@ -2642,7 +2656,7 @@ Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, Sort_param *param) const { double value= item->val_result(); - if (item->maybe_null) + if (item->maybe_null()) { if (item->null_value) { @@ -2668,12 +2682,12 @@ Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, static const Temporal::Options opt(TIME_INVALID_DATES, TIME_FRAC_NONE); if (item->get_date_result(current_thd, &buf, opt)) { - DBUG_ASSERT(item->maybe_null); + DBUG_ASSERT(item->maybe_null()); DBUG_ASSERT(item->null_value); - return make_packed_sort_key_longlong(to, item->maybe_null, true, + return make_packed_sort_key_longlong(to, item->maybe_null(), true, item->unsigned_flag, 0, sort_field); } - return make_packed_sort_key_longlong(to, item->maybe_null, false, + return make_packed_sort_key_longlong(to, item->maybe_null(), false, item->unsigned_flag, pack_time(&buf), sort_field); } @@ -2690,7 +2704,7 @@ Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item, if (native.is_null() || native.is_zero_datetime()) { // NULL or '0000-00-00 00:00:00' - if (item->maybe_null) + if (item->maybe_null()) { *to++=0; return 0; @@ -2703,7 +2717,7 @@ Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item, } else { - if (item->maybe_null) + if (item->maybe_null()) *to++= 1; if (native.length() != binlen) { @@ -3027,7 +3041,7 @@ static uint make_sortkey(Sort_param *param, uchar *to) sort_field->item->type_handler()->make_sort_key_part(to, sort_field->item, sort_field, param); - if ((maybe_null= sort_field->item->maybe_null)) + if ((maybe_null= sort_field->item->maybe_null())) to++; } @@ -3080,7 +3094,7 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to) length= item->type_handler()->make_packed_sort_key_part(to, item, sort_field, param); - if ((maybe_null= sort_field->item->maybe_null)) + if ((maybe_null= sort_field->item->maybe_null())) to++; } to+= length; diff --git a/sql/filesort.h b/sql/filesort.h index 9f71da02c96..ebb521e2adc 100644 --- a/sql/filesort.h +++ b/sql/filesort.h @@ -45,33 +45,47 @@ public: ha_rows limit; /** ORDER BY list with some precalculated info for filesort */ SORT_FIELD *sortorder; + /* Used with ROWNUM. Contains the number of rows filesort has found so far */ + ha_rows *accepted_rows; /** select to use for getting records */ SQL_SELECT *select; + /** TRUE <=> free select on destruction */ bool own_select; - /** true means we are using Priority Queue for order by with limit. */ + /** TRUE means we are using Priority Queue for order by with limit. */ bool using_pq; - /* TRUE means sort operation must produce table rowids. FALSE means that it halso has an option of producing {sort_key, addon_fields} pairs. */ bool sort_positions; + /* + TRUE means all the fields of table of whose bitmap read_set is set + need to be read while reading records in the sort buffer. + FALSE otherwise + */ + bool set_all_read_bits; Filesort_tracker *tracker; Sort_keys *sort_keys; + /* Unpack temp table columns to base table columns*/ + void (*unpack)(TABLE *); + Filesort(ORDER *order_arg, ha_rows limit_arg, bool sort_positions_arg, SQL_SELECT *select_arg): order(order_arg), limit(limit_arg), sortorder(NULL), + accepted_rows(0), select(select_arg), own_select(false), using_pq(false), sort_positions(sort_positions_arg), - sort_keys(NULL) + set_all_read_bits(false), + sort_keys(NULL), + unpack(NULL) { DBUG_ASSERT(order); }; diff --git a/sql/group_by_handler.cc b/sql/group_by_handler.cc index 71703cf09b6..7b998494af9 100644 --- a/sql/group_by_handler.cc +++ b/sql/group_by_handler.cc @@ -58,7 +58,7 @@ int Pushdown_query::execute(JOIN *join) { max_limit= join->unit->lim.get_select_limit(); if (join->unit->fake_select_lex) - reset_item= &join->unit->fake_select_lex->select_limit; + reset_item= &join->unit->fake_select_lex->limit_params.select_limit; } while (!(err= handler->next_row())) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 7298dcd15c6..688db6403b4 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -59,6 +59,7 @@ #include "sql_show.h" // append_identifier #include "sql_admin.h" // SQL_ADMIN_MSG_TEXT_SIZE #include "sql_select.h" +#include "ddl_log.h" #include "debug_sync.h" @@ -303,8 +304,10 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) void ha_partition::ha_partition_init() { + DBUG_ENTER("ha_partition::ha_partition_init"); init_alloc_root(PSI_INSTRUMENT_ME, &m_mem_root, 512, 512, MYF(0)); init_handler_variables(); + DBUG_VOID_RETURN; } /* @@ -456,14 +459,12 @@ void ha_partition::init_handler_variables() #endif } - -const char *ha_partition::table_type() const +const char *ha_partition::real_table_type() const { // we can do this since we only support a single engine type return m_file[0]->table_type(); } - /* Destructor method @@ -940,7 +941,7 @@ int ha_partition::drop_partitions(const char *path) DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff)); if (unlikely((ret_error= file->delete_table(part_name_buff)))) error= ret_error; - if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + if (unlikely(ddl_log_increment_phase(sub_elem->log_entry-> entry_pos))) error= 1; } while (++j < num_subparts); @@ -957,7 +958,7 @@ int ha_partition::drop_partitions(const char *path) DBUG_PRINT("info", ("Drop partition %s", part_name_buff)); if (unlikely((ret_error= file->delete_table(part_name_buff)))) error= ret_error; - if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + if (unlikely(ddl_log_increment_phase(part_elem->log_entry-> entry_pos))) error= 1; } @@ -968,7 +969,7 @@ int ha_partition::drop_partitions(const char *path) part_elem->part_state= PART_IS_DROPPED; } } while (++i < num_parts); - (void) sync_ddl_log(); + (void) ddl_log_sync(); DBUG_RETURN(error); } @@ -1049,7 +1050,7 @@ int ha_partition::rename_partitions(const char *path) DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff)); if (unlikely((ret_error= file->delete_table(norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(sub_elem->log_entry-> entry_pos))) error= 1; else @@ -1070,7 +1071,7 @@ int ha_partition::rename_partitions(const char *path) DBUG_PRINT("info", ("Delete partition %s", norm_name_buff)); if (unlikely((ret_error= file->delete_table(norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(part_elem->log_entry-> entry_pos))) error= 1; else @@ -1078,7 +1079,7 @@ int ha_partition::rename_partitions(const char *path) } } } while (++i < temp_partitions); - (void) sync_ddl_log(); + (void) ddl_log_sync(); } i= 0; do @@ -1131,10 +1132,10 @@ int ha_partition::rename_partitions(const char *path) DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff)); if (unlikely((ret_error= file->delete_table(norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(sub_elem->log_entry-> entry_pos))) error= 1; - (void) sync_ddl_log(); + (void) ddl_log_sync(); } file= m_new_file[part]; if (unlikely((ret_error= @@ -1149,7 +1150,7 @@ int ha_partition::rename_partitions(const char *path) if (unlikely((ret_error= file->ha_rename_table(part_name_buff, norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(sub_elem->log_entry-> entry_pos))) error= 1; else @@ -1178,10 +1179,10 @@ int ha_partition::rename_partitions(const char *path) DBUG_PRINT("info", ("Delete partition %s", norm_name_buff)); if (unlikely((ret_error= file->delete_table(norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(part_elem->log_entry-> entry_pos))) error= 1; - (void) sync_ddl_log(); + (void) ddl_log_sync(); } file= m_new_file[i]; DBUG_PRINT("info", ("Rename partition from %s to %s", @@ -1189,7 +1190,7 @@ int ha_partition::rename_partitions(const char *path) if (unlikely((ret_error= file->ha_rename_table(part_name_buff, norm_name_buff)))) error= ret_error; - else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry-> + else if (unlikely(ddl_log_increment_phase(part_elem->log_entry-> entry_pos))) error= 1; else @@ -1198,7 +1199,7 @@ int ha_partition::rename_partitions(const char *path) } } } while (++i < num_parts); - (void) sync_ddl_log(); + (void) ddl_log_sync(); DBUG_RETURN(error); } @@ -1210,9 +1211,21 @@ int ha_partition::rename_partitions(const char *path) #define ASSIGN_KEYCACHE_PARTS 5 #define PRELOAD_KEYS_PARTS 6 -static const char *opt_op_name[]= {NULL, - "optimize", "analyze", "check", "repair", - "assign_to_keycache", "preload_keys"}; +static const LEX_CSTRING opt_op_name[]= +{ + { NULL, 0}, + { STRING_WITH_LEN("optimize") }, + { STRING_WITH_LEN("analyze") }, + { STRING_WITH_LEN("check") }, + { STRING_WITH_LEN("repair") }, + { STRING_WITH_LEN("assign_to_keycache") }, + { STRING_WITH_LEN("preload_keys") } +}; + + +static const LEX_CSTRING msg_warning= { STRING_WITH_LEN("warning") }; +#define msg_error error_clex_str + /* Optimize table @@ -1416,14 +1429,14 @@ int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, TODO: move this into the handler, or rewrite mysql_admin_table. */ bool print_admin_msg(THD* thd, uint len, - const char* msg_type, - const char* db_name, String &table_name, - const char* op_name, const char *fmt, ...) + const LEX_CSTRING *msg_type, + const char* db_name, String &table_name, + const LEX_CSTRING *op_name, const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 7, 8); bool print_admin_msg(THD* thd, uint len, - const char* msg_type, - const char* db_name, String &table_name, - const char* op_name, const char *fmt, ...) + const LEX_CSTRING *msg_type, + const char* db_name, String &table_name, + const LEX_CSTRING *op_name, const char *fmt, ...) { va_list args; Protocol *protocol= thd->protocol; @@ -1532,9 +1545,9 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, error != HA_ADMIN_TRY_ALTER && error != HA_ERR_TABLE_READONLY) { - print_admin_msg(thd, MYSQL_ERRMSG_SIZE, "error", + print_admin_msg(thd, MYSQL_ERRMSG_SIZE, &msg_error, table_share->db.str, table->alias, - opt_op_name[flag], + &opt_op_name[flag], "Subpartition %s returned error", sub_elem->partition_name); } @@ -1559,9 +1572,9 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, error != HA_ADMIN_ALREADY_DONE && error != HA_ADMIN_TRY_ALTER) { - print_admin_msg(thd, MYSQL_ERRMSG_SIZE, "error", + print_admin_msg(thd, MYSQL_ERRMSG_SIZE, &msg_error, table_share->db.str, table->alias, - opt_op_name[flag], "Partition %s returned error", + &opt_op_name[flag], "Partition %s returned error", part_elem->partition_name); } /* reset part_state for the remaining partitions */ @@ -3848,6 +3861,15 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ clear_handler_file(); + DBUG_ASSERT(part_share); + lock_shared_ha_data(); + /* Protect against cloned file, for which we don't need engine name */ + if (m_file[0]) + part_share->partition_engine_name= real_table_type(); + else + part_share->partition_engine_name= 0; // Checked in ha_table_exists() + unlock_shared_ha_data(); + /* Some handlers update statistics as part of the open call. This will in some cases corrupt the statistics of the partition handler and thus @@ -4326,19 +4348,11 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) /* Add partition to be called in reset(). */ bitmap_set_bit(&m_partitions_to_reset, i); } - switch (lock_type) + if (lock_type >= TL_FIRST_WRITE) { - case TL_WRITE_ALLOW_WRITE: - case TL_WRITE_CONCURRENT_INSERT: - case TL_WRITE_DELAYED: - case TL_WRITE_DEFAULT: - case TL_WRITE_LOW_PRIORITY: - case TL_WRITE: - case TL_WRITE_ONLY: if (m_part_info->part_expr) m_part_info->part_expr->walk(&Item::register_field_in_read_map, 1, 0); error= m_part_info->vers_set_hist_part(thd); - default:; } DBUG_RETURN(error); } @@ -4498,7 +4512,7 @@ int ha_partition::write_row(const uchar * buf) bool have_auto_increment= table->next_number_field && buf == table->record[0]; MY_BITMAP *old_map; THD *thd= ha_thd(); - Sql_mode_save sms(thd); + sql_mode_t org_sql_mode= thd->variables.sql_mode; bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; DBUG_ENTER("ha_partition::write_row"); DBUG_PRINT("enter", ("partition this: %p", this)); @@ -4564,6 +4578,7 @@ int ha_partition::write_row(const uchar * buf) exit: table->auto_increment_field_not_null= saved_auto_inc_field_not_null; + thd->variables.sql_mode= org_sql_mode; DBUG_RETURN(error); } @@ -7404,7 +7419,7 @@ bool ha_partition::check_parallel_search() DBUG_PRINT("info",("partition select_lex: %p", select_lex)); if (!select_lex) goto not_parallel; - if (!select_lex->explicit_limit) + if (!select_lex->limit_params.explicit_limit) { DBUG_PRINT("info",("partition not using explicit_limit")); goto parallel; @@ -9359,6 +9374,7 @@ int ha_partition::extra(enum ha_extra_function operation) case HA_EXTRA_STARTING_ORDERED_INDEX_SCAN: case HA_EXTRA_BEGIN_ALTER_COPY: case HA_EXTRA_END_ALTER_COPY: + case HA_EXTRA_IGNORE_INSERT: DBUG_RETURN(loop_partitions(extra_cb, &operation)); default: { @@ -10137,9 +10153,9 @@ void ha_partition::append_row_to_str(String &str) for (; key_part != key_part_end; key_part++) { Field *field= key_part->field; - str.append(" "); + str.append(' '); str.append(&field->field_name); - str.append(":"); + str.append(':'); field_unpack(&str, field, rec, 0, false); } if (!is_rec0) @@ -10157,9 +10173,9 @@ void ha_partition::append_row_to_str(String &str) field_ptr++) { Field *field= *field_ptr; - str.append(" "); + str.append(' '); str.append(&field->field_name); - str.append(":"); + str.append(':'); field_unpack(&str, field, rec, 0, false); } if (!is_rec0) @@ -10197,14 +10213,14 @@ void ha_partition::print_error(int error, myf errflag) String str(buf,sizeof(buf),system_charset_info); uint32 part_id; str.length(0); - str.append("("); + str.append('('); str.append_ulonglong(m_last_part); - str.append(" != "); + str.append(STRING_WITH_LEN(" != ")); if (get_part_for_buf(m_err_rec, m_rec0, m_part_info, &part_id)) - str.append("?"); + str.append('?'); else str.append_ulonglong(part_id); - str.append(")"); + str.append(')'); append_row_to_str(str); /* Log this error, so the DBA can notice it and fix it! */ @@ -11194,9 +11210,9 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) if (num_misplaced_rows > 0) { - print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, "warning", + print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, &msg_warning, table_share->db.str, table->alias, - opt_op_name[REPAIR_PARTS], + &opt_op_name[REPAIR_PARTS], "Moved %lld misplaced rows", num_misplaced_rows); } @@ -11216,9 +11232,9 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) if (!do_repair) { /* Check. */ - print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, "error", + print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, &msg_error, table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], + &opt_op_name[CHECK_PARTS], "Found a misplaced row"); /* Break on first misplaced row! */ result= HA_ADMIN_NEEDS_UPGRADE; @@ -11243,8 +11259,9 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) str.length(0); if (result == HA_ERR_FOUND_DUPP_KEY) { - str.append("Duplicate key found, " - "please update or delete the record:\n"); + str.append(STRING_WITH_LEN("Duplicate key found, " + "please update or delete the " + "record:\n")); result= HA_ADMIN_CORRUPT; } m_err_rec= NULL; @@ -11264,9 +11281,9 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) (uint) correct_part_id, str.c_ptr_safe()); } - print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, "error", + print_admin_msg(ha_thd(), MYSQL_ERRMSG_SIZE, &msg_error, table_share->db.str, table->alias, - opt_op_name[REPAIR_PARTS], + &opt_op_name[REPAIR_PARTS], "Failed to move/insert a row" " from part %u into part %u:\n%s", (uint) read_part_id, @@ -11391,19 +11408,19 @@ int ha_partition::check_for_upgrade(HA_CHECK_OPT *check_opt) !(part_buf= generate_partition_syntax_for_frm(thd, m_part_info, &part_buf_len, NULL, NULL)) || - print_admin_msg(thd, SQL_ADMIN_MSG_TEXT_SIZE + 1, "error", + print_admin_msg(thd, SQL_ADMIN_MSG_TEXT_SIZE + 1, &msg_error, table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], + &opt_op_name[CHECK_PARTS], KEY_PARTITIONING_CHANGED_STR, db_name.c_ptr_safe(), table_name.c_ptr_safe(), part_buf)) { /* Error creating admin message (too long string?). */ - print_admin_msg(thd, MYSQL_ERRMSG_SIZE, "error", + print_admin_msg(thd, MYSQL_ERRMSG_SIZE, &msg_error, table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], + &opt_op_name[CHECK_PARTS], KEY_PARTITIONING_CHANGED_STR, db_name.c_ptr_safe(), table_name.c_ptr_safe(), "<old partition clause>, but add ALGORITHM = 1" @@ -11740,13 +11757,13 @@ int ha_partition::direct_update_rows_init(List<Item> *update_fields) table_list= table_list->parent_l; st_select_lex *select_lex= table_list->select_lex; DBUG_PRINT("info", ("partition select_lex: %p", select_lex)); - if (select_lex && select_lex->explicit_limit) + if (select_lex && select_lex->limit_params.explicit_limit) { DBUG_PRINT("info", ("partition explicit_limit=TRUE")); DBUG_PRINT("info", ("partition offset_limit: %p", - select_lex->offset_limit)); + select_lex->limit_params.offset_limit)); DBUG_PRINT("info", ("partition select_limit: %p", - select_lex->select_limit)); + select_lex->limit_params.select_limit)); DBUG_PRINT("info", ("partition FALSE by select_lex")); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } @@ -11932,13 +11949,13 @@ int ha_partition::direct_delete_rows_init() table_list= table_list->parent_l; st_select_lex *select_lex= table_list->select_lex; DBUG_PRINT("info", ("partition select_lex: %p", select_lex)); - if (select_lex && select_lex->explicit_limit) + if (select_lex && select_lex->limit_params.explicit_limit) { DBUG_PRINT("info", ("partition explicit_limit: TRUE")); DBUG_PRINT("info", ("partition offset_limit: %p", - select_lex->offset_limit)); + select_lex->limit_params.offset_limit)); DBUG_PRINT("info", ("partition select_limit: %p", - select_lex->select_limit)); + select_lex->limit_params.select_limit)); DBUG_PRINT("info", ("partition FALSE by select_lex")); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1faee3216c8..f50f59310ff 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -109,12 +109,14 @@ public: */ bool partition_name_hash_initialized; HASH partition_name_hash; + const char *partition_engine_name; /** Storage for each partitions Handler_share */ Parts_share_refs partitions_share_refs; Partition_share() : auto_inc_initialized(false), next_auto_inc_val(0), partition_name_hash_initialized(false), + partition_engine_name(NULL), partition_names(NULL) { mysql_mutex_init(key_partition_auto_inc_mutex, @@ -280,7 +282,7 @@ typedef struct st_partition_part_key_multi_range_hld extern "C" int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2); extern "C" int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2); -class ha_partition :public handler +class ha_partition final :public handler { private: enum partition_index_scan_type @@ -478,7 +480,7 @@ public: } Partition_share *get_part_share() { return part_share; } handler *clone(const char *name, MEM_ROOT *mem_root) override; - virtual void set_part_info(partition_info *part_info) override + void set_part_info(partition_info *part_info) override { m_part_info= part_info; m_is_sub_partitioned= part_info->is_sub_partitioned(); @@ -1080,8 +1082,7 @@ public: const char *index_type(uint inx) override; /* The name of the table type that will be used for display purposes */ - const char *table_type() const; - + const char *real_table_type() const override; /* The name of the row type used for the underlying tables. */ enum row_type get_row_type() const override; @@ -1606,6 +1607,7 @@ public: return h; } + bool partition_engine() override { return 1;} ha_rows part_records(partition_element *part_elem) { DBUG_ASSERT(m_part_info); diff --git a/sql/handle_connections_win.cc b/sql/handle_connections_win.cc index b61130dd6e9..ffacfcab88f 100644 --- a/sql/handle_connections_win.cc +++ b/sql/handle_connections_win.cc @@ -22,12 +22,12 @@ #include <mswsock.h> #include <mysql/psi/mysql_socket.h> #include <sddl.h> - +#include <vector> #include <handle_connections_win.h> /* From mysqld.cc */ extern HANDLE hEventShutdown; -extern MYSQL_SOCKET base_ip_sock, extra_ip_sock; +extern Dynamic_array<MYSQL_SOCKET> listen_sockets; #ifdef HAVE_POOL_OF_THREADS extern PTP_CALLBACK_ENVIRON get_threadpool_win_callback_environ(); extern void tp_win_callback_prolog(); @@ -129,6 +129,9 @@ struct Socket_Listener: public Listener /** Client socket passed to AcceptEx() call.*/ SOCKET m_client_socket; + /** Listening socket. */ + MYSQL_SOCKET m_listen_socket; + /** Buffer for sockaddrs passed to AcceptEx()/GetAcceptExSockaddrs() */ char m_buffer[2 * sizeof(sockaddr_storage) + 32]; @@ -163,7 +166,8 @@ struct Socket_Listener: public Listener */ Socket_Listener(MYSQL_SOCKET listen_socket, PTP_CALLBACK_ENVIRON callback_environ) : Listener((HANDLE)listen_socket.fd,0), - m_client_socket(INVALID_SOCKET) + m_client_socket(INVALID_SOCKET), + m_listen_socket(listen_socket) { if (callback_environ) { @@ -185,7 +189,8 @@ struct Socket_Listener: public Listener void begin_accept() { retry : - m_client_socket= socket(server_socket_ai_family, SOCK_STREAM, IPPROTO_TCP); + m_client_socket= socket(m_listen_socket.address_family, SOCK_STREAM, + IPPROTO_TCP); if (m_client_socket == INVALID_SOCKET) { sql_perror("socket() call failed."); @@ -234,7 +239,6 @@ retry : } MYSQL_SOCKET s_client{m_client_socket}; - MYSQL_SOCKET s_listen{(SOCKET)m_handle}; #ifdef HAVE_PSI_SOCKET_INTERFACE /* Parse socket addresses buffer filled by AcceptEx(), @@ -247,7 +251,8 @@ retry : &local_addr, &local_addr_len, &remote_addr, &remote_addr_len); s_client.m_psi= PSI_SOCKET_CALL(init_socket) - (key_socket_client_connection, (const my_socket*)&s_listen.fd, remote_addr, remote_addr_len); + (key_socket_client_connection, (const my_socket*)&m_listen_socket.fd, + remote_addr, remote_addr_len); #endif /* Start accepting new connection. After this point, do not use @@ -256,7 +261,7 @@ retry : /* Some chores post-AcceptEx() that we need to create a normal socket.*/ if (setsockopt(s_client.fd, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, - (char *)&s_listen.fd, sizeof(s_listen.fd))) + (char *)&m_listen_socket.fd, sizeof(m_listen_socket.fd))) { if (!abort_loop) { @@ -266,7 +271,7 @@ retry : } /* Create a new connection.*/ - handle_accepted_socket(s_client, s_listen); + handle_accepted_socket(s_client, m_listen_socket); } ~Socket_Listener() @@ -281,14 +286,12 @@ retry : */ static void init_winsock_extensions() { - SOCKET s= mysql_socket_getfd(base_ip_sock); - if (s == INVALID_SOCKET) - s= mysql_socket_getfd(extra_ip_sock); - if (s == INVALID_SOCKET) - { + if (listen_sockets.size() == 0) { /* --skip-networking was used*/ return; } + + SOCKET s= mysql_socket_getfd(listen_sockets.at(0)); GUID guid_AcceptEx= WSAID_ACCEPTEX; GUID guid_GetAcceptExSockaddrs= WSAID_GETACCEPTEXSOCKADDRS; @@ -357,7 +360,8 @@ static void init_pipe_security_descriptor() goto fail; snprintf(sddl_string, sizeof(sddl_string), SDDL_FMT, - EVERYONE_PIPE_ACCESS_MASK, current_user_string_sid); + (unsigned int)EVERYONE_PIPE_ACCESS_MASK, + current_user_string_sid); LocalFree(current_user_string_sid); if (ConvertStringSecurityDescriptorToSecurityDescriptor(sddl_string, @@ -510,6 +514,18 @@ struct Pipe_Listener : public Listener } }; + /* The shutdown event, which is set whenever*/ +static void create_shutdown_event() +{ + char shutdown_event_name[40]; + sprintf_s(shutdown_event_name, "MySQLShutdown%u", GetCurrentProcessId()); + if (!(hEventShutdown= CreateEvent(0, FALSE, FALSE, shutdown_event_name))) + { + sql_print_error("Can't create shutdown event, Windows error %u", GetLastError()); + unireg_abort(1); + } +} + /** Accept new client connections on Windows. @@ -529,22 +545,24 @@ struct Pipe_Listener : public Listener */ -#define MAX_WAIT_HANDLES 32 #define NUM_PIPE_LISTENERS 24 #define SHUTDOWN_IDX 0 #define LISTENER_START_IDX 1 -static Listener *all_listeners[MAX_WAIT_HANDLES]; -static HANDLE wait_events[MAX_WAIT_HANDLES]; -static int n_listeners; +static std::vector<Listener *> all_listeners; +static std::vector<HANDLE> wait_events; void network_init_win() { Socket_Listener::init_winsock_extensions(); /* Listen for TCP connections on "extra-port" (no threadpool).*/ - if (extra_ip_sock.fd != INVALID_SOCKET) - all_listeners[n_listeners++]= new Socket_Listener(extra_ip_sock, 0); + for (uint i= 0 ; i < listen_sockets.elements() ; i++) + { + MYSQL_SOCKET *sock= listen_sockets.get_pos(i); + if (sock->is_extra_port) + all_listeners.push_back(new Socket_Listener(*sock, 0)); + } /* Listen for named pipe connections */ if (mysqld_unix_port[0] && !opt_bootstrap && opt_enable_named_pipe) @@ -553,17 +571,22 @@ void network_init_win() Use several listeners for pipe, to reduce ERROR_PIPE_BUSY on client side. */ for (int i= 0; i < NUM_PIPE_LISTENERS; i++) - all_listeners[n_listeners++]= new Pipe_Listener(); + all_listeners.push_back(new Pipe_Listener()); } - if (base_ip_sock.fd != INVALID_SOCKET) + for (uint i= 0 ; i < listen_sockets.elements() ; i++) { - /* Wait for TCP connections.*/ - SetFileCompletionNotificationModes((HANDLE)base_ip_sock.fd, FILE_SKIP_SET_EVENT_ON_HANDLE); - all_listeners[n_listeners++]= new Socket_Listener(base_ip_sock, get_threadpool_win_callback_environ()); + MYSQL_SOCKET *sock= listen_sockets.get_pos(i); + if (sock->is_extra_port) + continue; + /* Wait for TCP connections.*/ + SetFileCompletionNotificationModes((HANDLE) sock->fd, + FILE_SKIP_SET_EVENT_ON_HANDLE); + all_listeners.push_back( + new Socket_Listener(*sock, get_threadpool_win_callback_environ())); } - if (!n_listeners && !opt_bootstrap) + if (all_listeners.size() == 0 && !opt_bootstrap) { sql_print_error("Either TCP connections or named pipe connections must be enabled."); unireg_abort(1); @@ -572,27 +595,44 @@ void network_init_win() void handle_connections_win() { - DBUG_ASSERT(hEventShutdown); int n_waits; - wait_events[SHUTDOWN_IDX]= hEventShutdown; + create_shutdown_event(); + wait_events.push_back(hEventShutdown); n_waits= 1; - for (int i= 0; i < n_listeners; i++) + for (size_t i= 0; i < all_listeners.size(); i++) { HANDLE wait_handle= all_listeners[i]->wait_handle(); if (wait_handle) { DBUG_ASSERT((i == 0) || (all_listeners[i - 1]->wait_handle() != 0)); - wait_events[n_waits++]= wait_handle; + wait_events.push_back(wait_handle); } all_listeners[i]->begin_accept(); } + mysqld_win_set_startup_complete(); + + // WaitForMultipleObjects can't wait on more than MAXIMUM_WAIT_OBJECTS + // handles simultaneously. Since MAXIMUM_WAIT_OBJECTS is only 64, there is + // a theoretical possiblity of exceeding that limit on installations where + // host name resolves to a lot of addresses. + if (wait_events.size() > MAXIMUM_WAIT_OBJECTS) + { + sql_print_warning( + "Too many wait events (%lu). Some connection listeners won't be handled. " + "Try to switch \"thread-handling\" to \"pool-of-threads\" and/or disable " + "\"extra-port\".", static_cast<ulong>(wait_events.size())); + wait_events.resize(MAXIMUM_WAIT_OBJECTS); + } + for (;;) { - DWORD idx = WaitForMultipleObjects(n_waits ,wait_events, FALSE, INFINITE); - DBUG_ASSERT((int)idx >= 0 && (int)idx < n_waits); + DBUG_ASSERT(wait_events.size() <= MAXIMUM_WAIT_OBJECTS); + DWORD idx = WaitForMultipleObjects((DWORD)wait_events.size(), + wait_events.data(), FALSE, INFINITE); + DBUG_ASSERT((int)idx >= 0 && (int)idx < (int)wait_events.size()); if (idx == SHUTDOWN_IDX) break; @@ -600,8 +640,10 @@ void handle_connections_win() all_listeners[idx - LISTENER_START_IDX]->completion_callback(); } + mysqld_win_initiate_shutdown(); + /* Cleanup */ - for (int i= 0; i < n_listeners; i++) + for (size_t i= 0; i < all_listeners.size(); i++) { Listener *listener= all_listeners[i]; if (listener->wait_handle()) diff --git a/sql/handler.cc b/sql/handler.cc index db04677e65b..d88e5f06425 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -110,7 +110,7 @@ static handlerton *installed_htons[128]; #define BITMAP_STACKBUF_SIZE (128/8) KEY_CREATE_INFO default_key_create_info= -{ HA_KEY_ALG_UNDEF, 0, 0, {NullS, 0}, {NullS, 0}, true }; +{ HA_KEY_ALG_UNDEF, 0, 0, {NullS, 0}, {NullS, 0}, true, false }; /* number of entries in handlertons[] */ ulong total_ha= 0; @@ -136,11 +136,18 @@ static const LEX_CSTRING sys_table_aliases[]= {NullS, 0} }; -const char *ha_row_type[] = { - "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "PAGE" +const LEX_CSTRING ha_row_type[]= +{ + { STRING_WITH_LEN("") }, + { STRING_WITH_LEN("FIXED") }, + { STRING_WITH_LEN("DYNAMIC") }, + { STRING_WITH_LEN("COMPRESSED") }, + { STRING_WITH_LEN("REDUNDANT") }, + { STRING_WITH_LEN("COMPACT") }, + { STRING_WITH_LEN("PAGE") } }; -const char *tx_isolation_names[] = +const char *tx_isolation_names[]= { "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ", "SERIALIZABLE", NullS}; TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"", @@ -486,7 +493,6 @@ int ha_init_errors(void) SETMSG(HA_ERR_INDEX_COL_TOO_LONG, ER_DEFAULT(ER_INDEX_COLUMN_TOO_LONG)); SETMSG(HA_ERR_INDEX_CORRUPT, ER_DEFAULT(ER_INDEX_CORRUPT)); SETMSG(HA_FTS_INVALID_DOCID, "Invalid InnoDB FTS Doc ID"); - SETMSG(HA_ERR_TABLE_IN_FK_CHECK, ER_DEFAULT(ER_TABLE_IN_FK_CHECK)); SETMSG(HA_ERR_DISK_FULL, ER_DEFAULT(ER_DISK_FULL)); SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search"); SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds"); @@ -559,7 +565,13 @@ static int hton_drop_table(handlerton *hton, const char *path) char tmp_path[FN_REFLEN]; handler *file= get_new_handler(nullptr, current_thd->mem_root, hton); if (!file) - return ENOMEM; + { + /* + If file is not defined it means that the engine can't create a + handler if share is not set or we got an out of memory error + */ + return my_errno == ENOMEM ? ENOMEM : ENOENT; + } path= get_canonical_filename(file, path, tmp_path); int error= file->delete_table(path); delete file; @@ -619,10 +631,12 @@ int ha_finalize_handlerton(st_plugin_int *plugin) } +const char *hton_no_exts[]= { 0 }; + + int ha_initialize_handlerton(st_plugin_int *plugin) { handlerton *hton; - static const char *no_exts[]= { 0 }; DBUG_ENTER("ha_initialize_handlerton"); DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str)); @@ -635,7 +649,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin) goto err_no_hton_memory; } - hton->tablefile_extensions= no_exts; + hton->tablefile_extensions= hton_no_exts; hton->discover_table_names= hton_ext_based_table_discovery; hton->drop_table= hton_drop_table; @@ -824,9 +838,10 @@ static my_bool dropdb_handlerton(THD *unused1, plugin_ref plugin, } -void ha_drop_database(char* path) +void ha_drop_database(const char* path) { - plugin_foreach(NULL, dropdb_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, path); + plugin_foreach(NULL, dropdb_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, + (char*) path); } @@ -912,7 +927,7 @@ static my_bool kill_handlerton(THD *thd, plugin_ref plugin, { handlerton *hton= plugin_hton(plugin); - mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&thd->LOCK_thd_data); if (hton->kill_query && thd_get_ha_data(thd, hton)) hton->kill_query(hton, thd, *(enum thd_kill_levels *) level); return FALSE; @@ -942,6 +957,24 @@ void ha_disable_internal_writes(bool disable) } +static my_bool signal_ddl_recovery_done(THD *, plugin_ref plugin, void *) +{ + handlerton *hton= plugin_hton(plugin); + if (hton->signal_ddl_recovery_done) + (hton->signal_ddl_recovery_done)(hton); + return 0; +} + + +void ha_signal_ddl_recovery_done() +{ + DBUG_ENTER("ha_signal_ddl_recovery_done"); + plugin_foreach(NULL, signal_ddl_recovery_done, MYSQL_STORAGE_ENGINE_PLUGIN, + NULL); + DBUG_VOID_RETURN; +} + + /***************************************************************************** Backup functions ******************************************************************************/ @@ -978,6 +1011,47 @@ void ha_end_backup() PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0); } +/* + Take a lock to block MDL_BACKUP_DDL (used by maria-backup) until + the DDL operation is taking place +*/ + +bool handler::log_not_redoable_operation(const char *operation) +{ + DBUG_ENTER("log_not_redoable_operation"); + if (table->s->tmp_table == NO_TMP_TABLE) + { + /* + Take a lock to ensure that mariadb-backup will notice the + new log entry (and re-copy the table if needed). + */ + THD *thd= table->in_use; + MDL_request mdl_backup; + backup_log_info ddl_log; + + MDL_REQUEST_INIT(&mdl_backup, MDL_key::BACKUP, "", "", MDL_BACKUP_DDL, + MDL_STATEMENT); + if (thd->mdl_context.acquire_lock(&mdl_backup, + thd->variables.lock_wait_timeout)) + DBUG_RETURN(1); + + bzero(&ddl_log, sizeof(ddl_log)); + lex_string_set(&ddl_log.query, operation); + /* + We can't use partition_engine() here as this function is called + directly by the handler for the underlaying partition table + */ +#ifdef WITH_PARTITION_STORAGE_ENGINE + ddl_log.org_partitioned= table->s->partition_info_str != 0; +#endif + lex_string_set(&ddl_log.org_storage_engine_name, table_type()); + ddl_log.org_database= table->s->db; + ddl_log.org_table= table->s->table_name; + ddl_log.org_table_id= table->s->tabledef_version; + backup_log_ddl(&ddl_log); + } + DBUG_RETURN(0); +} /* Inform plugin of the server shutdown. @@ -1467,6 +1541,24 @@ uint ha_count_rw_all(THD *thd, Ha_trx_info **ptr_ha_info) return rw_ha_count; } +/* + Returns counted number of + read-write recoverable transaction participants. +*/ +uint ha_count_rw_2pc(THD *thd, bool all) +{ + unsigned rw_ha_count= 0; + THD_TRANS *trans=all ? &thd->transaction->all : &thd->transaction->stmt; + + for (Ha_trx_info * ha_info= trans->ha_list; ha_info; + ha_info= ha_info->next()) + { + if (ha_info->is_trx_read_write() && ha_info->ht()->recover) + ++rw_ha_count; + } + return rw_ha_count; +} + /** Check if we can skip the two-phase commit. @@ -1486,7 +1578,6 @@ uint ha_count_rw_all(THD *thd, Ha_trx_info **ptr_ha_info) engines with read-write changes. */ -static uint ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list, bool all) @@ -1950,6 +2041,24 @@ int ha_commit_one_phase(THD *thd, bool all) DBUG_RETURN(res); } +static bool is_ro_1pc_trans(THD *thd, Ha_trx_info *ha_info, bool all, + bool is_real_trans) +{ + uint rw_ha_count= ha_check_and_coalesce_trx_read_only(thd, ha_info, all); + bool rw_trans= is_real_trans && + (rw_ha_count > (thd->is_current_stmt_binlog_disabled()?0U:1U)); + + return !rw_trans; +} + +static bool has_binlog_hton(Ha_trx_info *ha_info) +{ + bool rc; + for (rc= false; ha_info && !rc; ha_info= ha_info->next()) + rc= ha_info->ht() == binlog_hton; + + return rc; +} static int commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans, bool is_real_trans) @@ -1963,9 +2072,17 @@ commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans, bool is_real_trans) if (ha_info) { + int err; + + if (has_binlog_hton(ha_info) && + (err= binlog_commit(thd, all, + is_ro_1pc_trans(thd, ha_info, all, is_real_trans)))) + { + my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); + error= 1; + } for (; ha_info; ha_info= ha_info_next) { - int err; handlerton *ht= ha_info->ht(); if ((err= ht->commit(ht, thd, all))) { @@ -2192,6 +2309,15 @@ int ha_commit_or_rollback_by_xid(XID *xid, bool commit) xaop.xid= xid; xaop.result= 1; + /* + When the binlogging service is enabled complete the transaction + by it first. + */ + if (commit) + binlog_commit_by_xid(binlog_hton, xid); + else + binlog_rollback_by_xid(binlog_hton, xid); + plugin_foreach(NULL, commit ? xacommit_handlerton : xarollback_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &xaop); @@ -2287,7 +2413,7 @@ static my_xid wsrep_order_and_check_continuity(XID *list, int len) recover() step of xa. @note - there are three modes of operation: + there are four modes of operation: - automatic recover after a crash in this case commit_list != 0, tc_heuristic_recover==0 all xids from commit_list are committed, others are rolled back @@ -2298,6 +2424,9 @@ static my_xid wsrep_order_and_check_continuity(XID *list, int len) - no recovery (MySQL did not detect a crash) in this case commit_list==0, tc_heuristic_recover == 0 there should be no prepared transactions in this case. + - automatic recovery for the semisync slave server: uncommitted + transactions are rolled back and when they are in binlog it gets + truncated to the first uncommitted transaction start offset. */ struct xarecover_st { @@ -2305,8 +2434,194 @@ struct xarecover_st XID *list; HASH *commit_list; bool dry_run; + MEM_ROOT *mem_root; + bool error; +}; + +/** + Inserts a new hash member. + + returns a successfully created and inserted @c xid_recovery_member + into hash @c hash_arg, + or NULL. +*/ +static xid_recovery_member* +xid_member_insert(HASH *hash_arg, my_xid xid_arg, MEM_ROOT *ptr_mem_root, + XID *full_xid_arg, decltype(::server_id) server_id_arg) +{ + xid_recovery_member *member= (xid_recovery_member *) + alloc_root(ptr_mem_root, sizeof(xid_recovery_member)); + XID *xid_full= NULL; + + if (full_xid_arg) + xid_full= (XID*) alloc_root(ptr_mem_root, sizeof(XID)); + + if (!member || (full_xid_arg && !xid_full)) + return NULL; + + if (full_xid_arg) + *xid_full= *full_xid_arg; + *member= xid_recovery_member(xid_arg, 1, false, xid_full, server_id_arg); + + return + my_hash_insert(hash_arg, (uchar*) member) ? NULL : member; +} + +/* + Inserts a new or updates an existing hash member to increment + the member's prepare counter. + + returns false on success, + true otherwise. +*/ +static bool xid_member_replace(HASH *hash_arg, my_xid xid_arg, + MEM_ROOT *ptr_mem_root, + XID *full_xid_arg, + decltype(::server_id) server_id_arg) +{ + xid_recovery_member* member; + if ((member= (xid_recovery_member *) + my_hash_search(hash_arg, (uchar *)& xid_arg, sizeof(xid_arg)))) + member->in_engine_prepare++; + else + member= xid_member_insert(hash_arg, xid_arg, ptr_mem_root, full_xid_arg, server_id_arg); + + return member == NULL; +} + +/* + A "transport" type for recovery completion with ha_recover_complete() +*/ +struct xarecover_complete_arg +{ + xid_recovery_member* member; + Binlog_offset *binlog_coord; + uint count; }; +/* + Flagged to commit member confirms to get committed. + Otherwise when + A. ptr_commit_max is NULL (implies the normal recovery), or + B. it's not NULL (can only be so in the semisync slave case) + and the value referenced is not greater than the member's coordinate + the decision is to rollback. + When both A,B do not hold - which is the semisync slave recovery + case - the decision is to commit. + + Returns true as commmit decision + false as rollback one +*/ +static bool xarecover_decide_to_commit(xid_recovery_member* member, + Binlog_offset *ptr_commit_max) +{ + return + member->decided_to_commit ? true : + !ptr_commit_max ? false : + (member->binlog_coord < *ptr_commit_max ? // semisync slave recovery + true : false); +} + +/* + Helper function for xarecover_do_commit_or_rollback_handlerton. + For a given hton decides what to do with a xid passed in the 2nd arg + and carries out the decision. +*/ +static void xarecover_do_commit_or_rollback(handlerton *hton, + xarecover_complete_arg *arg) +{ + xid_t x; + my_bool rc; + xid_recovery_member *member= arg->member; + Binlog_offset *ptr_commit_max= arg->binlog_coord; + + if (!member->full_xid) + // Populate xid using the server_id from original transaction + x.set(member->xid, member->server_id); + else + x= *member->full_xid; + + rc= xarecover_decide_to_commit(member, ptr_commit_max) ? + hton->commit_by_xid(hton, &x) : hton->rollback_by_xid(hton, &x); + + /* + It's fine to have non-zero rc which would be from transaction + non-participant hton:s. + */ + DBUG_ASSERT(rc || member->in_engine_prepare > 0); + + if (!rc) + { + /* + This block relies on Engine to report XAER_NOTA at + "complete"_by_xid for unknown xid. + */ + member->in_engine_prepare--; + if (global_system_variables.log_warnings > 2) + sql_print_information("%s transaction with xid %llu", + member->decided_to_commit ? "Committed" : + "Rolled back", (ulonglong) member->xid); + } +} + +/* + Per hton recovery decider function. +*/ +static my_bool xarecover_do_commit_or_rollback_handlerton(THD *unused, + plugin_ref plugin, + void *arg) +{ + handlerton *hton= plugin_hton(plugin); + + if (hton->recover) + { + xarecover_do_commit_or_rollback(hton, (xarecover_complete_arg *) arg); + } + + return FALSE; +} + +/* + Completes binlog recovery for an input xid in the passed + member_arg to invoke decider functions for each handlerton. + + Returns always FALSE. +*/ +static my_bool xarecover_complete_and_count(void *member_arg, + void *param_arg) +{ + xid_recovery_member *member= (xid_recovery_member*) member_arg; + xarecover_complete_arg *complete_params= + (xarecover_complete_arg*) param_arg; + complete_params->member= member; + + (void) plugin_foreach(NULL, xarecover_do_commit_or_rollback_handlerton, + MYSQL_STORAGE_ENGINE_PLUGIN, complete_params); + + if (member->in_engine_prepare) + { + complete_params->count++; + if (global_system_variables.log_warnings > 2) + sql_print_warning("Found prepared transaction with xid %llu", + (ulonglong) member->xid); + } + + return false; +} + +/* + Completes binlog recovery to invoke decider functions for + each xid. + Returns the number of transactions remained doubtful. +*/ +uint ha_recover_complete(HASH *commit_list, Binlog_offset *coord) +{ + xarecover_complete_arg complete= { NULL, coord, 0 }; + (void) my_hash_iterate(commit_list, xarecover_complete_and_count, &complete); + + return complete.count; +} + static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin, void *arg) { @@ -2346,10 +2661,13 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin, for (int i=0; i < got; i ++) { - my_xid x= IF_WSREP(wsrep_is_wsrep_xid(&info->list[i]) ? - wsrep_xid_seqno(&info->list[i]) : - info->list[i].get_my_xid(), - info->list[i].get_my_xid()); + my_xid x= info->list[i].get_my_xid(); + bool is_server_xid= x > 0; + +#ifdef WITH_WSREP + if (!is_server_xid && wsrep_is_wsrep_xid(&info->list[i])) + x= wsrep_xid_seqno(&info->list[i]); +#endif if (!x) // not "mine" - that is generated by external TM { DBUG_EXECUTE("info",{ @@ -2368,13 +2686,29 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin, info->found_my_xids++; continue; } - // recovery mode + + /* + Regular and semisync slave server recovery only collects + xids to make decisions on them later by the caller. + */ + if (info->mem_root) + { + // remember "full" xid too when it's not in mysql format. + // Also record the transaction's original server_id. It will be used for + // populating the input XID to be searched in hash. + if (xid_member_replace(info->commit_list, x, info->mem_root, + is_server_xid? NULL : &info->list[i], + is_server_xid? info->list[i].get_trx_server_id() : server_id)) + { + info->error= true; + sql_print_error("Error in memory allocation at xarecover_handlerton"); + break; + } + } if (IF_WSREP((wsrep_emulate_bin_log && wsrep_is_wsrep_xid(info->list + i) && x <= wsrep_limit), false) || - (info->commit_list ? - my_hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 : - tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)) + tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT) { int rc= hton->commit_by_xid(hton, info->list+i); if (rc == 0) @@ -2385,7 +2719,7 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin, }); } } - else + else if (!info->mem_root) { int rc= hton->rollback_by_xid(hton, info->list+i); if (rc == 0) @@ -2404,7 +2738,7 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin, return FALSE; } -int ha_recover(HASH *commit_list) +int ha_recover(HASH *commit_list, MEM_ROOT *arg_mem_root) { struct xarecover_st info; DBUG_ENTER("ha_recover"); @@ -2412,6 +2746,8 @@ int ha_recover(HASH *commit_list) info.commit_list= commit_list; info.dry_run= (info.commit_list==0 && tc_heuristic_recover==0); info.list= NULL; + info.mem_root= arg_mem_root; + info.error= false; /* commit_list and tc_heuristic_recover cannot be set both */ DBUG_ASSERT(info.commit_list==0 || tc_heuristic_recover==0); @@ -2423,7 +2759,7 @@ int ha_recover(HASH *commit_list) DBUG_RETURN(0); if (info.commit_list) - sql_print_information("Starting crash recovery..."); + sql_print_information("Starting table crash recovery..."); for (info.len= MAX_XID_LIST_SIZE ; info.list==0 && info.len > MIN_XID_LIST_SIZE; info.len/=2) @@ -2447,17 +2783,20 @@ int ha_recover(HASH *commit_list) info.found_foreign_xids); if (info.dry_run && info.found_my_xids) { - sql_print_error("Found %d prepared transactions! It means that mysqld was " + sql_print_error("Found %d prepared transactions! It means that server was " "not shut down properly last time and critical recovery " "information (last binlog or %s file) was manually deleted " - "after a crash. You have to start mysqld with " + "after a crash. You have to start server with " "--tc-heuristic-recover switch to commit or rollback " "pending transactions.", info.found_my_xids, opt_tc_log_file); DBUG_RETURN(1); } + if (info.error) + DBUG_RETURN(1); + if (info.commit_list) - sql_print_information("Crash recovery finished."); + sql_print_information("Crash table recovery finished."); DBUG_RETURN(0); } @@ -2747,7 +3086,7 @@ const char *get_canonical_filename(handler *file, const char *path, char *tmp_path) { uint i; - if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED)) + if (!file->needs_lower_case_filenames()) return path; for (i= 0; i <= mysql_tmpdir_list.max; i++) @@ -4145,7 +4484,9 @@ void handler::print_error(int error, myf errflag) break; case HA_ERR_LOCK_DEADLOCK: { - String str, full_err_msg(ER_DEFAULT(ER_LOCK_DEADLOCK), system_charset_info); + String str, full_err_msg(ER_DEFAULT(ER_LOCK_DEADLOCK), + strlen(ER_DEFAULT(ER_LOCK_DEADLOCK)), + system_charset_info); get_error_message(error, &str); full_err_msg.append(str); @@ -4230,9 +4571,6 @@ void handler::print_error(int error, myf errflag) case HA_ERR_UNDO_REC_TOO_BIG: textno= ER_UNDO_RECORD_TOO_BIG; break; - case HA_ERR_TABLE_IN_FK_CHECK: - textno= ER_TABLE_IN_FK_CHECK; - break; case HA_ERR_COMMIT_ERROR: textno= ER_ERROR_DURING_COMMIT; break; @@ -4259,7 +4597,11 @@ void handler::print_error(int error, myf errflag) } } else - my_error(ER_GET_ERRNO, errflag, error, table_type()); + { + if (!temporary) + my_error(ER_GET_ERRNO, errflag, error, table_type()); + /* else no error message. */ + } DBUG_VOID_RETURN; } } @@ -4577,6 +4919,7 @@ bool non_existing_table_error(int error) { return (error == ENOENT || (error == EE_DELETE && my_errno == ENOENT) || + error == EE_FILENOTFOUND || error == HA_ERR_NO_SUCH_TABLE || error == HA_ERR_UNSUPPORTED || error == ER_NO_SUCH_TABLE || @@ -4922,7 +5265,8 @@ handler::check_if_supported_inplace_alter(TABLE *altered_table, ALTER_PARTITIONED | ALTER_VIRTUAL_GCOL_EXPR | ALTER_RENAME | - ALTER_RENAME_INDEX; + ALTER_RENAME_INDEX | + ALTER_INDEX_IGNORABILITY; /* Is there at least one operation that requires copy algorithm? */ if (ha_alter_info->handler_flags & ~inplace_offline_operations) @@ -4966,18 +5310,9 @@ Alter_inplace_info::Alter_inplace_info(HA_CREATE_INFO *create_info_arg, alter_info(alter_info_arg), key_info_buffer(key_info_arg), key_count(key_count_arg), - index_drop_count(0), - index_drop_buffer(nullptr), - index_add_count(0), - index_add_buffer(nullptr), rename_keys(current_thd->mem_root), - handler_ctx(nullptr), - group_commit_ctx(nullptr), - handler_flags(0), modified_part_info(modified_part_info_arg), ignore(ignore_arg), - online(false), - unsupported_reason(nullptr), error_if_not_empty(error_non_empty) {} @@ -5547,9 +5882,9 @@ int handler::calculate_checksum() @retval 1 error */ -int ha_create_table(THD *thd, const char *path, - const char *db, const char *table_name, - HA_CREATE_INFO *create_info, LEX_CUSTRING *frm) +int ha_create_table(THD *thd, const char *path, const char *db, + const char *table_name, HA_CREATE_INFO *create_info, + LEX_CUSTRING *frm, bool skip_frm_file) { int error= 1; TABLE table; @@ -5565,8 +5900,8 @@ int ha_create_table(THD *thd, const char *path, if (frm) { - bool write_frm_now= !create_info->db_type->discover_table && - !create_info->tmp_table(); + bool write_frm_now= (!create_info->db_type->discover_table && + !create_info->tmp_table() && !skip_frm_file); share.frm_image= frm; @@ -5864,7 +6199,8 @@ static my_bool discover_existence(THD *thd, plugin_ref plugin, */ bool ha_table_exists(THD *thd, const LEX_CSTRING *db, - const LEX_CSTRING *table_name, + const LEX_CSTRING *table_name, LEX_CUSTRING *table_id, + LEX_CSTRING *partition_engine_name, handlerton **hton, bool *is_sequence) { handlerton *dummy; @@ -5878,17 +6214,46 @@ bool ha_table_exists(THD *thd, const LEX_CSTRING *db, if (!is_sequence) is_sequence= &dummy2; *is_sequence= 0; + if (table_id) + { + table_id->str= 0; + table_id->length= 0; + } TDC_element *element= tdc_lock_share(thd, db->str, table_name->str); if (element && element != MY_ERRPTR) { - if (hton) - *hton= element->share->db_type(); + if (!hton) + hton= &dummy; + *hton= element->share->db_type(); +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (partition_engine_name && element->share->db_type() == partition_hton) + { + if (!static_cast<Partition_share *>(element->share->ha_share)-> + partition_engine_name) + { + /* Partition engine found, but table has never been opened */ + tdc_unlock_share(element); + goto retry_from_frm; + } + lex_string_set(partition_engine_name, + static_cast<Partition_share *>(element->share->ha_share)-> + partition_engine_name); + } +#endif *is_sequence= element->share->table_type == TABLE_TYPE_SEQUENCE; + if (*hton != view_pseudo_hton && element->share->tabledef_version.length && + table_id && + (table_id->str= (uchar*) + thd->memdup(element->share->tabledef_version.str, MY_UUID_SIZE))) + table_id->length= MY_UUID_SIZE; tdc_unlock_share(element); DBUG_RETURN(TRUE); } +#ifdef WITH_PARTITION_STORAGE_ENGINE +retry_from_frm: +#endif char path[FN_REFLEN + 1]; size_t path_len = build_table_filename(path, sizeof(path) - 1, db->str, table_name->str, "", 0); @@ -5901,7 +6266,9 @@ bool ha_table_exists(THD *thd, const LEX_CSTRING *db, { char engine_buf[NAME_CHAR_LEN + 1]; LEX_CSTRING engine= { engine_buf, 0 }; - Table_type type= dd_frm_type(thd, path, &engine); + Table_type type= dd_frm_type(thd, path, &engine, + partition_engine_name, + table_id); switch (type) { case TABLE_TYPE_UNKNOWN: @@ -5956,6 +6323,10 @@ bool ha_table_exists(THD *thd, const LEX_CSTRING *db, if (hton && share) { *hton= share->db_type(); + if (table_id && share->tabledef_version.length && + (table_id->str= + (uchar*) thd->memdup(share->tabledef_version.str, MY_UUID_SIZE))) + table_id->length= MY_UUID_SIZE; tdc_release_share(share); } @@ -7599,8 +7970,8 @@ bool HA_CREATE_INFO::check_conflicting_charset_declarations(CHARSET_INFO *cs) { my_error(ER_CONFLICTING_DECLARATIONS, MYF(0), "CHARACTER SET ", default_table_charset ? - default_table_charset->csname : "DEFAULT", - "CHARACTER SET ", cs ? cs->csname : "DEFAULT"); + default_table_charset->cs_name.str : "DEFAULT", + "CHARACTER SET ", cs ? cs->cs_name.str : "DEFAULT"); return true; } return false; diff --git a/sql/handler.h b/sql/handler.h index 75cd88b8013..de08e2c2137 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -44,6 +44,7 @@ #include <mysql/psi/mysql_table.h> #include "sql_sequence.h" #include "mem_root_array.h" +#include <utility> // pair class Alter_info; class Virtual_column_info; @@ -362,7 +363,10 @@ enum chf_create_flags { */ #define HA_NON_COMPARABLE_ROWID (1ULL << 60) -#define HA_LAST_TABLE_FLAG HA_NON_COMPARABLE_ROWID +/* Implements SELECT ... FOR UPDATE SKIP LOCKED */ +#define HA_CAN_SKIP_LOCKED (1ULL << 61) + +#define HA_LAST_TABLE_FLAG HA_CAN_SKIP_LOCKED /* bits in index_flags(index_number) for what you can do with index */ @@ -549,7 +553,7 @@ enum legacy_db_type DB_TYPE_PERFORMANCE_SCHEMA=28, DB_TYPE_S3=41, DB_TYPE_ARIA=42, - DB_TYPE_TOKUDB=43, + DB_TYPE_TOKUDB=43, /* disabled in MariaDB Server 10.5, removed in 10.6 */ DB_TYPE_SEQUENCE=44, DB_TYPE_FIRST_DYNAMIC=45, DB_TYPE_DEFAULT=127 // Must be last @@ -801,6 +805,10 @@ typedef bool Log_func(THD*, TABLE*, bool, const uchar*, const uchar*); */ #define ALTER_INDEX_ORDER (1ULL << 61) +/** + Means that the ignorability of an index is changed. +*/ +#define ALTER_INDEX_IGNORABILITY (1ULL << 62) /* Flags set in partition_flags when altering partitions @@ -892,12 +900,13 @@ struct xid_t { if ((bqual_length= bl)) memcpy(data+gl, b, bl); } - void set(ulonglong xid) + // Populate server_id if it's specified, otherwise use the current server_id + void set(ulonglong xid, decltype(::server_id) trx_server_id= server_id) { my_xid tmp; formatID= 1; set(MYSQL_XID_PREFIX_LEN, 0, MYSQL_XID_PREFIX); - memcpy(data+MYSQL_XID_PREFIX_LEN, &server_id, sizeof(server_id)); + memcpy(data+MYSQL_XID_PREFIX_LEN, &trx_server_id, sizeof(trx_server_id)); tmp= xid; memcpy(data+MYSQL_XID_OFFSET, &tmp, sizeof(tmp)); gtrid_length=MYSQL_XID_GTRID_LEN; @@ -923,6 +932,12 @@ struct xid_t { !memcmp(data, MYSQL_XID_PREFIX, MYSQL_XID_PREFIX_LEN) ? quick_get_my_xid() : 0; } + decltype(::server_id) get_trx_server_id() + { + decltype(::server_id) trx_server_id; + memcpy(&trx_server_id, data+MYSQL_XID_PREFIX_LEN, sizeof(trx_server_id)); + return trx_server_id; + } uint length() { return static_cast<uint>(sizeof(formatID)) + key_length(); @@ -939,6 +954,48 @@ struct xid_t { }; typedef struct xid_t XID; +/* + Enumerates a sequence in the order of + their creation that is in the top-down order of the index file. + Ranges from zero through MAX_binlog_id. + Not confuse the value with the binlog file numerical suffix, + neither with the binlog file line in the binlog index file. +*/ +typedef uint Binlog_file_id; +const Binlog_file_id MAX_binlog_id= UINT_MAX; +const my_off_t MAX_off_t = (~(my_off_t) 0); +/* + Compound binlog-id and byte offset of transaction's first event + in a sequence (e.g the recovery sequence) of binlog files. + Binlog_offset(0,0) is the minimum value to mean + the first byte of the first binlog file. +*/ +typedef std::pair<Binlog_file_id, my_off_t> Binlog_offset; + +/* binlog-based recovery transaction descriptor */ +struct xid_recovery_member +{ + my_xid xid; + uint in_engine_prepare; // number of engines that have xid prepared + bool decided_to_commit; + /* + Semisync recovery binlog offset. It's initialized with the maximum + unreachable offset. The max value will remain for any transaction + not found in binlog to yield its rollback decision as it's guaranteed + to be within a truncated tail part of the binlog. + */ + Binlog_offset binlog_coord; + XID *full_xid; // needed by wsrep or past it recovery + decltype(::server_id) server_id; // server id of orginal server + + xid_recovery_member(my_xid xid_arg, uint prepare_arg, bool decided_arg, + XID *full_xid_arg, decltype(::server_id) server_id_arg) + : xid(xid_arg), in_engine_prepare(prepare_arg), + decided_to_commit(decided_arg), + binlog_coord(Binlog_offset(MAX_binlog_id, MAX_off_t)), + full_xid(full_xid_arg), server_id(server_id_arg) {}; +}; + /* for recover() handlerton call */ #define MIN_XID_LIST_SIZE 128 #define MAX_XID_LIST_SIZE (1024*128) @@ -1093,6 +1150,8 @@ typedef bool (stat_print_fn)(THD *thd, const char *type, size_t type_len, enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX }; extern MYSQL_PLUGIN_IMPORT st_plugin_int *hton2plugin[MAX_HA]; +#define view_pseudo_hton ((handlerton *)1) + /* Definitions for engine-specific table/field/index options in the CREATE TABLE. @@ -1475,6 +1534,37 @@ struct handlerton THD *victim_thd, my_bool signal); int (*set_checkpoint)(handlerton *hton, const XID* xid); int (*get_checkpoint)(handlerton *hton, XID* xid); + /** + Check if the version of the table matches the version in the .frm + file. + + This is mainly used to verify in recovery to check if an inplace + ALTER TABLE succeded. + Storage engines that does not support inplace alter table does not + have to implement this function. + + @param hton handlerton + @param path Path for table + @param version The unique id that is stored in the .frm file for + CREATE and updated for each ALTER TABLE (but not for + simple renames). + This is the ID used for the final table. + @param create_id The value returned from handler->table_version() for + the original table (before ALTER TABLE). + + @retval 0 If id matches or table is newer than create_id (depending + on what version check the engine supports. This means that + The (inplace) alter table did succeed. + @retval # > 0 Alter table did not succeed. + + Related to handler::discover_check_version(). + */ + int (*check_version)(handlerton *hton, const char *path, + const LEX_CUSTRING *version, ulonglong create_id); + + /* Called for all storage handlers after ddl recovery is done */ + void (*signal_ddl_recovery_done)(handlerton *hton); + /* Optional clauses in the CREATE/ALTER TABLE */ @@ -1656,6 +1746,8 @@ struct handlerton }; +extern const char *hton_no_exts[]; + static inline LEX_CSTRING *hton_name(const handlerton *hton) { return &(hton2plugin[hton->slot]->name); @@ -1750,6 +1842,14 @@ handlerton *ha_default_tmp_handlerton(THD *thd); */ #define HTON_REQUIRES_CLOSE_AFTER_TRUNCATE (1 << 18) +/* Truncate requires that all other handlers are closed */ +#define HTON_TRUNCATE_REQUIRES_EXCLUSIVE_USE (1 << 19) +/* + Used by mysql_inplace_alter_table() to decide if we should call + hton->notify_tabledef_changed() before commit (MyRocks) or after (InnoDB). +*/ +#define HTON_REQUIRES_NOTIFY_TABLEDEF_CHANGED_AFTER_COMMIT (1 << 20) + class Ha_trx_info; struct THD_TRANS @@ -2137,9 +2237,11 @@ struct Table_scope_and_contents_source_pod_st // For trivial members { CHARSET_INFO *alter_table_convert_to_charset; LEX_CUSTRING tabledef_version; + LEX_CUSTRING org_tabledef_version; /* version of dropped table */ LEX_CSTRING connect_string; LEX_CSTRING comment; LEX_CSTRING alias; + LEX_CSTRING org_storage_engine_name, new_storage_engine_name; const char *password, *tablespace; const char *data_file_name, *index_file_name; ulonglong max_rows,min_rows; @@ -2331,6 +2433,26 @@ struct Table_specification_st: public HA_CREATE_INFO, /** + Structure describing changes to an index to be caused by ALTER TABLE. +*/ + +struct KEY_PAIR +{ + /** + Pointer to KEY object describing old version of index in + TABLE::key_info array for TABLE instance representing old + version of table. + */ + KEY *old_key; + /** + Pointer to KEY object describing new version of index in + Alter_inplace_info::key_info_buffer array. + */ + KEY *new_key; +}; + + +/** In-place alter handler context. This is a superclass intended to be subclassed by individual handlers @@ -2412,22 +2534,27 @@ public: uint key_count; /** Size of index_drop_buffer array. */ - uint index_drop_count; + uint index_drop_count= 0; /** Array of pointers to KEYs to be dropped belonging to the TABLE instance for the old version of the table. */ - KEY **index_drop_buffer; + KEY **index_drop_buffer= nullptr; /** Size of index_add_buffer array. */ - uint index_add_count; + uint index_add_count= 0; /** Array of indexes into key_info_buffer for KEYs to be added, sorted in increasing order. */ - uint *index_add_buffer; + uint *index_add_buffer= nullptr; + + KEY_PAIR *index_altered_ignorability_buffer= nullptr; + + /** Size of index_altered_ignorability_buffer array. */ + uint index_altered_ignorability_count= 0; /** Old and new index names. Used for index rename. @@ -2458,7 +2585,7 @@ public: @see inplace_alter_handler_ctx for information about object lifecycle. */ - inplace_alter_handler_ctx *handler_ctx; + inplace_alter_handler_ctx *handler_ctx= nullptr; /** If the table uses several handlers, like ha_partition uses one handler @@ -2470,13 +2597,13 @@ public: @see inplace_alter_handler_ctx for information about object lifecycle. */ - inplace_alter_handler_ctx **group_commit_ctx; + inplace_alter_handler_ctx **group_commit_ctx= nullptr; /** Flags describing in detail which operations the storage engine is to execute. Flags are defined in sql_alter.h */ - alter_table_operations handler_flags; + alter_table_operations handler_flags= 0; /* Alter operations involving parititons are strored here */ ulong partition_flags; @@ -2487,13 +2614,24 @@ public: with partitions to be dropped or changed marked as such + all partitions to be added in the new version of table marked as such. */ - partition_info *modified_part_info; + partition_info * const modified_part_info; /** true for ALTER IGNORE TABLE ... */ const bool ignore; /** true for online operation (LOCK=NONE) */ - bool online; + bool online= false; + + /** + When ha_commit_inplace_alter_table() is called the the engine can + set this to a function to be called after the ddl log + is committed. + */ + typedef void (inplace_alter_table_commit_callback)(void *); + inplace_alter_table_commit_callback *inplace_alter_table_committed= nullptr; + + /* This will be used as the argument to the above function when called */ + void *inplace_alter_table_committed_argument= nullptr; /** which ALGORITHM and LOCK are supported by the storage engine */ enum_alter_inplace_result inplace_supported; @@ -2510,10 +2648,10 @@ public: Please set to a properly localized string, for example using my_get_err_msg(), so that the error message as a whole is localized. */ - const char *unsupported_reason; + const char *unsupported_reason= nullptr; /** true when InnoDB should abort the alter when table is not empty */ - bool error_if_not_empty; + const bool error_if_not_empty; /** True when DDL should avoid downgrading the MDL */ bool mdl_exclusive_after_prepare= false; @@ -2540,6 +2678,18 @@ public: */ void report_unsupported_error(const char *not_supported, const char *try_instead) const; + void add_altered_index_ignorability(KEY *old_key, KEY *new_key) + { + KEY_PAIR *key_pair= index_altered_ignorability_buffer + + index_altered_ignorability_count++; + key_pair->old_key= old_key; + key_pair->new_key= new_key; + DBUG_PRINT("info", ("index had ignorability altered: %i to %i", + old_key->is_ignored, + new_key->is_ignored)); + } + + }; @@ -2556,6 +2706,7 @@ typedef struct st_key_create_information directly by the user (set by the parser). */ bool check_for_duplicate_indexes; + bool is_ignored; } KEY_CREATE_INFO; @@ -3959,6 +4110,15 @@ public: { return 0; } virtual int extra_opt(enum ha_extra_function operation, ulong arg) { return extra(operation); } + /* + Table version id for the the table. This should change for each + sucessfull ALTER TABLE. + This is used by the handlerton->check_version() to ask the engine + if the table definition has been updated. + Storage engines that does not support inplace alter table does not + have to support this call. + */ + virtual ulonglong table_version() const { return 0; } /** In an UPDATE or DELETE, if the row under the cursor was locked by another @@ -4077,7 +4237,9 @@ public: { return; } /* prepare InnoDB for HANDLER */ virtual void free_foreign_key_create_info(char* str) {} /** The following can be called without an open handler */ - const char *table_type() const { return hton_name(ht)->str; } + virtual const char *table_type() const { return hton_name(ht)->str; } + /* The following is same as table_table(), except for partition engine */ + virtual const char *real_table_type() const { return hton_name(ht)->str; } const char **bas_ext() const { return ht->tablefile_extensions; } virtual int get_default_no_partitions(HA_CREATE_INFO *create_info) @@ -4981,6 +5143,7 @@ public: /* XXX to be removed, see ha_partition::partition_ht() */ virtual handlerton *partition_ht() const { return ht; } + virtual bool partition_engine() { return 0;} inline int ha_write_tmp_row(uchar *buf); inline int ha_delete_tmp_row(uchar *buf); inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data); @@ -5029,6 +5192,18 @@ public: const KEY_PART_INFO &old_part, const KEY_PART_INFO &new_part) const; + +/* + If lower_case_table_names == 2 (case-preserving but case-insensitive + file system) and the storage is not HA_FILE_BASED, we need to provide + a lowercase file name for the engine. +*/ + inline bool needs_lower_case_filenames() + { + return (lower_case_table_names == 2 && !(ha_table_flags() & HA_FILE_BASED)); + } + + bool log_not_redoable_operation(const char *operation); protected: Handler_share *get_ha_share_ptr(); void set_ha_share_ptr(Handler_share *arg_ha_share); @@ -5043,7 +5218,7 @@ bool key_uses_partial_cols(TABLE_SHARE *table, uint keyno); /* Some extern variables used with handlers */ -extern const char *ha_row_type[]; +extern const LEX_CSTRING ha_row_type[]; extern MYSQL_PLUGIN_IMPORT const char *tx_isolation_names[]; extern MYSQL_PLUGIN_IMPORT const char *binlog_format_names[]; extern TYPELIB tx_isolation_typelib; @@ -5071,7 +5246,8 @@ static inline enum legacy_db_type ha_legacy_type(const handlerton *db_type) static inline const char *ha_resolve_storage_engine_name(const handlerton *db_type) { - return db_type == NULL ? "UNKNOWN" : hton_name(db_type)->str; + return (db_type == NULL ? "UNKNOWN" : + db_type == view_pseudo_hton ? "VIEW" : hton_name(db_type)->str); } static inline bool ha_check_storage_engine_flag(const handlerton *db_type, uint32 flag) @@ -5084,8 +5260,6 @@ static inline bool ha_storage_engine_is_enabled(const handlerton *db_type) return db_type && db_type->create; } -#define view_pseudo_hton ((handlerton *)1) - /* basic stuff */ int ha_init_errors(void); int ha_init(void); @@ -5097,13 +5271,14 @@ TYPELIB *ha_known_exts(void); int ha_panic(enum ha_panic_function flag); void ha_close_connection(THD* thd); void ha_kill_query(THD* thd, enum thd_kill_levels level); +void ha_signal_ddl_recovery_done(); bool ha_flush_logs(); -void ha_drop_database(char* path); +void ha_drop_database(const char* path); void ha_checkpoint_state(bool disable); void ha_commit_checkpoint_request(void *cookie, void (*pre_hook)(void *)); -int ha_create_table(THD *thd, const char *path, - const char *db, const char *table_name, - HA_CREATE_INFO *create_info, LEX_CUSTRING *frm); +int ha_create_table(THD *thd, const char *path, const char *db, + const char *table_name, HA_CREATE_INFO *create_info, + LEX_CUSTRING *frm, bool skip_frm_file); int ha_delete_table(THD *thd, handlerton *db_type, const char *path, const LEX_CSTRING *db, const LEX_CSTRING *alias, bool generate_warning); @@ -5154,6 +5329,8 @@ int ha_discover_table_names(THD *thd, LEX_CSTRING *db, MY_DIR *dirp, Discovered_table_list *result, bool reusable); bool ha_table_exists(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *table_name, + LEX_CUSTRING *table_version= 0, + LEX_CSTRING *partition_engine_name= 0, handlerton **hton= 0, bool *is_sequence= 0); bool ha_check_if_updates_are_ignored(THD *thd, handlerton *hton, const char *op); @@ -5173,7 +5350,8 @@ int ha_commit_one_phase(THD *thd, bool all); int ha_commit_trans(THD *thd, bool all); int ha_rollback_trans(THD *thd, bool all); int ha_prepare(THD *thd); -int ha_recover(HASH *commit_list); +int ha_recover(HASH *commit_list, MEM_ROOT *mem_root= NULL); +uint ha_recover_complete(HASH *commit_list, Binlog_offset *coord= NULL); /* transactions: these functions never call handlerton functions directly */ int ha_enable_transaction(THD *thd, bool on); @@ -5301,4 +5479,8 @@ int del_global_index_stat(THD *thd, TABLE* table, KEY* key_info); int del_global_table_stat(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *table); uint ha_count_rw_all(THD *thd, Ha_trx_info **ptr_ha_info); bool non_existing_table_error(int error); +uint ha_count_rw_2pc(THD *thd, bool all); +uint ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list, + bool all); + #endif /* HANDLER_INCLUDED */ diff --git a/sql/hash_filo.cc b/sql/hash_filo.cc index b359bd95786..085c12f15da 100644 --- a/sql/hash_filo.cc +++ b/sql/hash_filo.cc @@ -27,7 +27,7 @@ #include "sql_priv.h" #include "hash_filo.h" -#ifdef __WIN__ +#ifdef _WIN32 // Remove linker warning 4221 about empty file namespace { char dummy; }; -#endif // __WIN__ +#endif // _WIN32 diff --git a/sql/hostname.cc b/sql/hostname.cc index edf31c11081..7b07ab620a6 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -28,7 +28,7 @@ #include "sql_priv.h" #include "unireg.h" // SPECIAL_NO_HOST_CACHE #include "hostname.h" -#ifndef __WIN__ +#ifndef _WIN32 #include <netdb.h> // getservbyname, servent #endif #include "hash_filo.h" @@ -40,12 +40,12 @@ #ifdef __cplusplus extern "C" { // Because of SCO 3.2V4.2 #endif -#if !defined( __WIN__) +#if !defined( _WIN32) #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif #include <sys/utsname.h> -#endif // __WIN__ +#endif // _WIN32 #ifdef __cplusplus } #endif @@ -554,6 +554,13 @@ int ip_to_hostname(struct sockaddr_storage *ip_storage, } ); + DBUG_EXECUTE_IF("getnameinfo_fake_long_host", + { + strcpy(hostname_buffer, "host5678901_345678902_345678903_345678904_345678905_345678906_345678907_345678908_345678909_345678910_345678911_345678912_345678913_345678914_345678915_345678916_345678917_345678918_345678919_345678920_345678921_345678922_345678923_345678924_345678925_345"); + err_code= 0; + } + ); + /* =========================================================================== DEBUG code only (end) diff --git a/sql/item.cc b/sql/item.cc index 1a55317754e..4f5b207c9e5 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -56,7 +56,8 @@ const char *item_empty_name=""; const char *item_used_name= "\0"; static int save_field_in_field(Field *, bool *, Field *, bool); - +const Item_bool_static Item_false("FALSE", 0); +const Item_bool_static Item_true("TRUE", 1); /** Compare two Items for List<Item>::add_unique() @@ -337,7 +338,7 @@ my_decimal *Item::val_decimal_from_real(my_decimal *decimal_value) my_decimal *Item::val_decimal_from_int(my_decimal *decimal_value) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); longlong nr= val_int(); if (null_value) return 0; @@ -410,14 +411,13 @@ int Item::save_str_value_in_field(Field *field, String *result) Item::Item(THD *thd): - is_expensive_cache(-1), rsize(0), name(null_clex_str), orig_name(0), - common_flags(IS_AUTO_GENERATED_NAME) + name(null_clex_str), orig_name(0), is_expensive_cache(-1) { DBUG_ASSERT(thd); - marker= 0; - maybe_null= null_value= with_window_func= with_field= false; - in_rollup= 0; - with_param= 0; + base_flags= item_base_t::FIXED; + with_flags= item_with_t::NONE; + null_value= 0; + marker= MARKER_UNUSED; /* Initially this item is not attached to any JOIN_TAB. */ join_tab_idx= MAX_TABLES; @@ -439,6 +439,21 @@ Item::Item(THD *thd): } } +/* + This is only used for static const items +*/ + +Item::Item(): + name(null_clex_str), orig_name(0), is_expensive_cache(-1) +{ + DBUG_ASSERT(my_progname == NULL); // before main() + base_flags= item_base_t::FIXED; + with_flags= item_with_t::NONE; + null_value= 0; + marker= MARKER_UNUSED; + join_tab_idx= MAX_TABLES; +} + const TABLE_SHARE *Item::field_table_or_null() { @@ -458,20 +473,15 @@ const TABLE_SHARE *Item::field_table_or_null() */ Item::Item(THD *thd, Item *item): Type_all_attributes(*item), - join_tab_idx(item->join_tab_idx), - is_expensive_cache(-1), - rsize(0), str_value(item->str_value), name(item->name), orig_name(item->orig_name), + base_flags(item->base_flags & ~item_base_t::FIXED), + with_flags(item->with_flags), marker(item->marker), - maybe_null(item->maybe_null), - in_rollup(item->in_rollup), null_value(item->null_value), - with_param(item->with_param), - with_window_func(item->with_window_func), - with_field(item->with_field), - common_flags(item->common_flags) + is_expensive_cache(-1), + join_tab_idx(item->join_tab_idx) { next= thd->free_list; // Put in free list thd->free_list= this; @@ -492,7 +502,7 @@ void Item::print_parenthesised(String *str, enum_query_type query_type, void Item::print(String *str, enum_query_type query_type) { - str->append(full_name()); + str->append(full_name_cstring()); } @@ -516,7 +526,7 @@ void Item::print_value(String *str) String *ptr, tmp(buff,sizeof(buff),str->charset()); ptr= val_str(&tmp); if (!ptr) - str->append("NULL"); + str->append(NULL_clex_str); else { switch (cmp_type()) { @@ -540,7 +550,7 @@ void Item::cleanup() { DBUG_ENTER("Item::cleanup"); DBUG_PRINT("enter", ("this: %p", this)); - marker= 0; + marker= MARKER_UNUSED; join_tab_idx= MAX_TABLES; if (orig_name) { @@ -559,7 +569,7 @@ void Item::cleanup() bool Item::cleanup_processor(void *arg) { - if (is_fixed()) + if (fixed()) cleanup(); return FALSE; } @@ -640,8 +650,9 @@ Item_ident::Item_ident(THD *thd, Name_resolution_context *context_arg, orig_field_name(field_name_arg), context(context_arg), db_name(db_name_arg), table_name(table_name_arg), field_name(field_name_arg), - alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX), - cached_table(0), depended_from(0), can_be_depended(TRUE) + cached_table(NULL), depended_from(NULL), + cached_field_index(NO_CACHED_FIELD_INDEX), + can_be_depended(TRUE), alias_name_used(FALSE) { name= field_name_arg; } @@ -656,8 +667,9 @@ Item_ident::Item_ident(THD *thd, TABLE_LIST *view_arg, context(&view_arg->view->first_select_lex()->context), db_name(null_clex_str), table_name(view_arg->alias), field_name(field_name_arg), - alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX), - cached_table(NULL), depended_from(NULL), can_be_depended(TRUE) + cached_table(NULL), depended_from(NULL), + cached_field_index(NO_CACHED_FIELD_INDEX), + can_be_depended(TRUE), alias_name_used(FALSE) { name= field_name_arg; } @@ -676,17 +688,17 @@ Item_ident::Item_ident(THD *thd, Item_ident *item) db_name(item->db_name), table_name(item->table_name), field_name(item->field_name), - alias_name_used(item->alias_name_used), - cached_field_index(item->cached_field_index), cached_table(item->cached_table), depended_from(item->depended_from), - can_be_depended(item->can_be_depended) + cached_field_index(item->cached_field_index), + can_be_depended(item->can_be_depended), + alias_name_used(item->alias_name_used) {} void Item_ident::cleanup() { DBUG_ENTER("Item_ident::cleanup"); - bool was_fixed= fixed; + bool was_fixed= fixed(); Item_result_field::cleanup(); db_name= orig_db_name; table_name= orig_table_name; @@ -980,7 +992,7 @@ bool Item::check_cols(uint c) } -bool Item::check_type_or_binary(const char *opname, +bool Item::check_type_or_binary(const LEX_CSTRING &opname, const Type_handler *expect) const { const Type_handler *handler= type_handler(); @@ -989,111 +1001,111 @@ bool Item::check_type_or_binary(const char *opname, collation.collation == &my_charset_bin)) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_general_purpose_string(const char *opname) const +bool Item::check_type_general_purpose_string(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->is_general_purpose_string_type()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_traditional_scalar(const char *opname) const +bool Item::check_type_traditional_scalar(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->is_traditional_scalar_type()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_int(const char *opname) const +bool Item::check_type_can_return_int(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_int()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_decimal(const char *opname) const +bool Item::check_type_can_return_decimal(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_decimal()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_real(const char *opname) const +bool Item::check_type_can_return_real(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_real()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_date(const char *opname) const +bool Item::check_type_can_return_date(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_date()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_time(const char *opname) const +bool Item::check_type_can_return_time(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_time()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_str(const char *opname) const +bool Item::check_type_can_return_str(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_str()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_can_return_text(const char *opname) const +bool Item::check_type_can_return_text(const LEX_CSTRING &opname) const { const Type_handler *handler= type_handler(); if (handler->can_return_text()) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Item::check_type_scalar(const char *opname) const +bool Item::check_type_scalar(const LEX_CSTRING &opname) const { /* fixed==true usually means than the Item has an initialized @@ -1104,7 +1116,7 @@ bool Item::check_type_scalar(const char *opname) const This hack in Item_outer_ref should probably be refactored eventually. Discuss with Sanja. */ - DBUG_ASSERT(is_fixed() || type() == REF_ITEM); + DBUG_ASSERT(fixed() || type() == REF_ITEM); const Type_handler *handler= type_handler(); if (handler->is_scalar_type()) return false; @@ -1220,7 +1232,7 @@ void Item::set_name(THD *thd, const char *str, size_t length, CHARSET_INFO *cs) str++; } } - if (str != str_start && !is_autogenerated_name()) + if (str != str_start && is_explicit_name()) { char buff[SAFE_NAME_LEN]; @@ -1358,7 +1370,7 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs, const char *func_name) { DBUG_ASSERT(const_item()); - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); StringBuffer<64>tmp; String *s= val_str(&tmp); MEM_ROOT *mem_root= thd->mem_root; @@ -1495,13 +1507,18 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) { int res; TABLE *table= field->table; + THD *thd= table->in_use; + enum_check_fields org_count_cuted_fields= thd->count_cuted_fields; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); Use_relaxed_field_copy urfc(table->in_use); res= save_in_field(field, no_conversions); + + thd->count_cuted_fields= org_count_cuted_fields; dbug_tmp_restore_column_map(&table->write_set, old_map); return res; } + #ifndef DBUG_OFF static inline void mark_unsupported_func(const char *where, const char *processor_name) @@ -1636,15 +1653,15 @@ bool Item_sp_variable::fix_fields_from_item(THD *thd, Item **, const Item *it) { m_thd= thd; /* NOTE: this must be set before any this_xxx() */ - DBUG_ASSERT(it->is_fixed()); + DBUG_ASSERT(it->fixed()); max_length= it->max_length; decimals= it->decimals; unsigned_flag= it->unsigned_flag; - with_param= 1; + base_flags|= item_base_t::FIXED; + with_flags|= item_with_t::SP_VAR; if (thd->lex->current_select && thd->lex->current_select->master_unit()->item) - thd->lex->current_select->master_unit()->item->with_param= 1; - fixed= 1; + thd->lex->current_select->master_unit()->item->with_flags|= item_with_t::SP_VAR; collation.set(it->collation.collation, it->collation.derivation); return FALSE; @@ -1653,7 +1670,7 @@ bool Item_sp_variable::fix_fields_from_item(THD *thd, Item **, const Item *it) double Item_sp_variable::val_real() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); Item *it= this_item(); double ret= it->val_real(); null_value= it->null_value; @@ -1663,7 +1680,7 @@ double Item_sp_variable::val_real() longlong Item_sp_variable::val_int() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); Item *it= this_item(); longlong ret= it->val_int(); null_value= it->null_value; @@ -1673,7 +1690,7 @@ longlong Item_sp_variable::val_int() String *Item_sp_variable::val_str(String *sp) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); Item *it= this_item(); String *res= it->val_str(sp); @@ -1715,7 +1732,7 @@ bool Item_sp_variable::val_native(THD *thd, Native *to) my_decimal *Item_sp_variable::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); Item *it= this_item(); my_decimal *val= it->val_decimal(decimal_value); null_value= it->null_value; @@ -1725,7 +1742,7 @@ my_decimal *Item_sp_variable::val_decimal(my_decimal *decimal_value) bool Item_sp_variable::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); Item *it= this_item(); bool val= it->get_date(thd, ltime, fuzzydate); null_value= it->null_value; @@ -1766,7 +1783,7 @@ Item_splocal::Item_splocal(THD *thd, m_var_idx(sp_var_idx), m_type(handler == &type_handler_row ? ROW_ITEM : CONST_ITEM) { - maybe_null= TRUE; + set_maybe_null(); } @@ -1784,7 +1801,7 @@ Item_field *Item_splocal::get_variable(sp_rcontext *ctx) const bool Item_splocal::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(!fixed); + DBUG_ASSERT(fixed() == 0); Item *item= get_variable(thd->spcont); set_handler(item->type_handler()); return fix_fields_from_item(thd, ref, item); @@ -1795,7 +1812,7 @@ Item * Item_splocal::this_item() { DBUG_ASSERT(m_sp == m_thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_variable(m_thd->spcont); } @@ -1803,7 +1820,7 @@ const Item * Item_splocal::this_item() const { DBUG_ASSERT(m_sp == m_thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_variable(m_thd->spcont); } @@ -1812,7 +1829,7 @@ Item ** Item_splocal::this_item_addr(THD *thd, Item **) { DBUG_ASSERT(m_sp == thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_rcontext(thd->spcont)->get_variable_addr(m_var_idx); } @@ -1912,7 +1929,7 @@ bool Item_splocal::check_cols(uint n) bool Item_splocal_row_field::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(!fixed); + DBUG_ASSERT(fixed() == 0); Item *item= get_variable(thd->spcont)->element_index(m_field_idx); return fix_fields_from_item(thd, ref, item); } @@ -1922,7 +1939,7 @@ Item * Item_splocal_row_field::this_item() { DBUG_ASSERT(m_sp == m_thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_variable(m_thd->spcont)->element_index(m_field_idx); } @@ -1931,7 +1948,7 @@ const Item * Item_splocal_row_field::this_item() const { DBUG_ASSERT(m_sp == m_thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_variable(m_thd->spcont)->element_index(m_field_idx); } @@ -1940,7 +1957,7 @@ Item ** Item_splocal_row_field::this_item_addr(THD *thd, Item **) { DBUG_ASSERT(m_sp == thd->spcont->m_sp); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return get_variable(thd->spcont)->addr(m_field_idx); } @@ -1970,7 +1987,7 @@ bool Item_splocal_row_field::set_value(THD *thd, sp_rcontext *ctx, Item **it) bool Item_splocal_row_field_by_name::fix_fields(THD *thd, Item **it) { - DBUG_ASSERT(!fixed); + DBUG_ASSERT(fixed() == 0); m_thd= thd; if (get_rcontext(thd->spcont)->find_row_field_by_name_or_error(&m_field_idx, m_var_idx, @@ -2003,7 +2020,7 @@ void Item_splocal_row_field_by_name::print(String *str, enum_query_type) bool Item_splocal_row_field_by_name::set_value(THD *thd, sp_rcontext *ctx, Item **it) { - DBUG_ASSERT(fixed); // Make sure m_field_idx is already set + DBUG_ASSERT(fixed()); // Make sure m_field_idx is already set return Item_splocal_row_field::set_value(thd, ctx, it); } @@ -2071,7 +2088,7 @@ void Item_case_expr::print(String *str, enum_query_type) double Item_name_const::val_real() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); double ret= value_item->val_real(); null_value= value_item->null_value; return ret; @@ -2080,7 +2097,7 @@ double Item_name_const::val_real() longlong Item_name_const::val_int() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); longlong ret= value_item->val_int(); null_value= value_item->null_value; return ret; @@ -2089,7 +2106,7 @@ longlong Item_name_const::val_int() String *Item_name_const::val_str(String *sp) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); String *ret= value_item->val_str(sp); null_value= value_item->null_value; return ret; @@ -2098,7 +2115,7 @@ String *Item_name_const::val_str(String *sp) my_decimal *Item_name_const::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); my_decimal *val= value_item->val_decimal(decimal_value); null_value= value_item->null_value; return val; @@ -2106,7 +2123,7 @@ my_decimal *Item_name_const::val_decimal(my_decimal *decimal_value) bool Item_name_const::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); bool rc= value_item->get_date(thd, ltime, fuzzydate); null_value= value_item->null_value; return rc; @@ -2128,7 +2145,8 @@ Item_name_const::Item_name_const(THD *thd, Item *name_arg, Item *val): { StringBuffer<128> name_buffer; String *name_str; - Item::maybe_null= TRUE; + + set_maybe_null(); if (name_item->basic_const_item() && (name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name set_name(thd, name_str); @@ -2182,7 +2200,7 @@ bool Item_name_const::fix_fields(THD *thd, Item **ref) max_length= value_item->max_length; decimals= value_item->decimals; unsigned_flag= value_item->unsigned_flag; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -2261,7 +2279,7 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, ((Item_sum *) this)->ref_by) return; } - else if (type() == WINDOW_FUNC_ITEM || with_window_func) + else if (type() == WINDOW_FUNC_ITEM || with_window_func()) { /* Skip the else part, window functions are very special functions: @@ -2276,10 +2294,15 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, return; } } + else if (type() == FUNC_ITEM && + ((Item_func*)this)->functype() == Item_func::ROWNUM_FUNC) + { + } else { /* Not a SUM() function */ - if (unlikely((!with_sum_func() && !(split_flags & SPLIT_SUM_SELECT)))) + if (!with_sum_func() && !with_rownum_func() && + !(split_flags & SPLIT_SUM_SELECT)) { /* This is not a SUM function and there are no SUM functions inside. @@ -2499,8 +2522,11 @@ bool DTCollation::aggregate(const DTCollation &dt, uint flags) set(dt); return 0; } - CHARSET_INFO *bin= get_charset_by_csname(collation->csname, - MY_CS_BINSORT,MYF(0)); + THD *thd = current_thd; + myf utf8_flag= thd ? thd->get_utf8_flag() + : global_system_variables.old_behavior & OLD_MODE_UTF8_IS_UTF8MB3; + CHARSET_INFO *bin= get_charset_by_csname(collation->cs_name.str, + MY_CS_BINSORT,MYF(utf8_flag)); set(bin, DERIVATION_NONE); } } @@ -2513,8 +2539,8 @@ static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), + c1.collation->coll_name.str, c1.derivation_name(), + c2.collation->coll_name.str, c2.derivation_name(), fname); } @@ -2524,10 +2550,10 @@ void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3, const char *fname) { my_error(ER_CANT_AGGREGATE_3COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - c3.collation->name,c3.derivation_name(), - fname); + c1.collation->coll_name.str, c1.derivation_name(), + c2.collation->coll_name.str, c2.derivation_name(), + c3.collation->coll_name.str, c3.derivation_name(), + fname); } @@ -2545,7 +2571,8 @@ void my_coll_agg_error(Item** args, uint count, const char *fname, } -bool Type_std_attributes::agg_item_collations(DTCollation &c, const char *fname, +bool Type_std_attributes::agg_item_collations(DTCollation &c, + const LEX_CSTRING &fname, Item **av, uint count, uint flags, int item_sep) { @@ -2564,7 +2591,7 @@ bool Type_std_attributes::agg_item_collations(DTCollation &c, const char *fname, unknown_cs= 1; continue; } - my_coll_agg_error(av, count, fname, item_sep); + my_coll_agg_error(av, count, fname.str, item_sep); return TRUE; } } @@ -2572,14 +2599,14 @@ bool Type_std_attributes::agg_item_collations(DTCollation &c, const char *fname, if (unknown_cs && c.derivation != DERIVATION_EXPLICIT) { - my_coll_agg_error(av, count, fname, item_sep); + my_coll_agg_error(av, count, fname.str, item_sep); return TRUE; } if ((flags & MY_COLL_DISALLOW_NONE) && c.derivation == DERIVATION_NONE) { - my_coll_agg_error(av, count, fname, item_sep); + my_coll_agg_error(av, count, fname.str, item_sep); return TRUE; } @@ -2593,7 +2620,7 @@ bool Type_std_attributes::agg_item_collations(DTCollation &c, const char *fname, bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll, - const char *fname, + const LEX_CSTRING &fname, Item **args, uint nargs, uint flags, int item_sep) { @@ -2633,7 +2660,7 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll, args[0]= safe_args[0]; args[item_sep]= safe_args[1]; } - my_coll_agg_error(args, nargs, fname, item_sep); + my_coll_agg_error(args, nargs, fname.str, item_sep); return TRUE; } @@ -2732,8 +2759,8 @@ Item_sp::Item_sp(THD *thd, Item_sp *item): memset(&sp_mem_root, 0, sizeof(sp_mem_root)); } -const char * -Item_sp::func_name(THD *thd, bool is_package_function) const +LEX_CSTRING +Item_sp::func_name_cstring(THD *thd, bool is_package_function) const { /* Calculate length to avoid reallocation of string for sure */ size_t len= (((m_name->m_explicit_name ? m_name->m_db.length : 0) + @@ -2769,7 +2796,7 @@ Item_sp::func_name(THD *thd, bool is_package_function) const } else append_identifier(thd, &qname, &m_name->m_name); - return qname.c_ptr_safe(); + return { qname.c_ptr_safe(), qname.length() }; } void @@ -3025,7 +3052,7 @@ Item_field::Item_field(THD *thd, Field *f) */ orig_table_name= table_name; orig_field_name= field_name; - with_field= 1; + with_flags|= item_with_t::FIELD; } @@ -3075,7 +3102,7 @@ Item_field::Item_field(THD *thd, Name_resolution_context *context_arg, name= orig_field_name; } set_field(f); - with_field= 1; + with_flags|= item_with_t::FIELD; } @@ -3091,7 +3118,7 @@ Item_field::Item_field(THD *thd, Name_resolution_context *context_arg, collation.set(DERIVATION_IMPLICIT); if (select && select->parsing_place != IN_HAVING) select->select_n_where_fields++; - with_field= 1; + with_flags|= item_with_t::FIELD; } /** @@ -3106,21 +3133,21 @@ Item_field::Item_field(THD *thd, Item_field *item) any_privileges(item->any_privileges) { collation.set(DERIVATION_IMPLICIT); - with_field= 1; + with_flags|= item_with_t::FIELD; } void Item_field::set_field(Field *field_par) { field=result_field=field_par; // for easy coding with fields - maybe_null=field->maybe_null(); + set_maybe_null(field->maybe_null()); Type_std_attributes::set(field_par->type_std_attributes()); table_name= Lex_cstring_strlen(*field_par->table_name); field_name= field_par->field_name; db_name= field_par->table->s->db; alias_name_used= field_par->table->alias_name_used; - fixed= 1; + base_flags|= item_base_t::FIXED; if (field->table->s->tmp_table == SYSTEM_TMP_TABLE) any_privileges= 0; } @@ -3193,36 +3220,41 @@ bool Item_field::switch_to_nullable_fields_processor(void *arg) Field **new_fields= (Field **)arg; set_field_to_new_field(&field, new_fields); set_field_to_new_field(&result_field, new_fields); - maybe_null= field && field->maybe_null(); + set_maybe_null(field && field->maybe_null()); return 0; } -const char *Item_ident::full_name() const +LEX_CSTRING Item_ident::full_name_cstring() const { char *tmp; + size_t length; if (!table_name.str || !field_name.str) - return field_name.str ? field_name.str : name.str ? name.str : "tmp_field"; - + { + if (field_name.str) + return field_name; + if (name.str) + return name; + return { STRING_WITH_LEN("tmp_field") }; + } if (db_name.str && db_name.str[0]) { THD *thd= current_thd; tmp=(char*) thd->alloc((uint) db_name.length+ (uint) table_name.length + (uint) field_name.length+3); - strxmov(tmp,db_name.str,".",table_name.str,".",field_name.str,NullS); + length= (strxmov(tmp,db_name.str,".",table_name.str,".",field_name.str, + NullS) - tmp); } else { - if (table_name.str[0]) - { - THD *thd= current_thd; - tmp= (char*) thd->alloc((uint) table_name.length + - field_name.length + 2); - strxmov(tmp, table_name.str, ".", field_name.str, NullS); - } - else - return field_name.str; + if (!table_name.str[0]) + return field_name; + + THD *thd= current_thd; + tmp= (char*) thd->alloc((uint) table_name.length + + field_name.length + 2); + length= (strxmov(tmp, table_name.str, ".", field_name.str, NullS) - tmp); } - return tmp; + return {tmp, length}; } void Item_ident::print(String *str, enum_query_type query_type) @@ -3305,7 +3337,7 @@ void Item_ident::print(String *str, enum_query_type query_type) /* ARGSUSED */ String *Item_field::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value=field->is_null())) return 0; str->set_charset(str_value.charset()); @@ -3315,7 +3347,7 @@ String *Item_field::val_str(String *str) double Item_field::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value=field->is_null())) return 0.0; return field->val_real(); @@ -3324,7 +3356,7 @@ double Item_field::val_real() longlong Item_field::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value=field->is_null())) return 0; return field->val_int(); @@ -3383,7 +3415,7 @@ bool Item_field::val_native_result(THD *thd, Native *to) longlong Item_field::val_datetime_packed(THD *thd) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= field->is_null())) return 0; return field->val_datetime_packed(thd); @@ -3392,7 +3424,7 @@ longlong Item_field::val_datetime_packed(THD *thd) longlong Item_field::val_time_packed(THD *thd) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= field->is_null())) return 0; return field->val_time_packed(thd); @@ -3573,11 +3605,6 @@ void Item_field::fix_after_pullout(st_select_lex *new_parent, Item **ref, /* just pull to the upper context */ ctx->outer_context= context->outer_context->outer_context; } - else - { - /* No upper context (merging Derived/VIEW where context chain ends) */ - ctx->outer_context= NULL; - } ctx->table_list= context->first_name_resolution_table; ctx->select_lex= new_parent; if (context->select_lex == NULL) @@ -3695,6 +3722,7 @@ String *Item_int::val_str(String *str) return str; } + void Item_int::print(String *str, enum_query_type query_type) { StringBuffer<LONGLONG_BUFFER_SIZE+1> buf; @@ -3845,7 +3873,7 @@ void Item_string::print(String *str, enum_query_type query_type) if (print_introducer) { str->append('_'); - str->append(collation.collation->csname); + str->append(collation.collation->cs_name); } str->append('\''); @@ -3867,7 +3895,7 @@ void Item_string::print(String *str, enum_query_type query_type) changed. */ ErrConvString tmp(str_value.ptr(), str_value.length(), &my_charset_bin); - str->append(tmp.ptr()); + str->append(tmp.lex_cstring()); } else { @@ -4053,7 +4081,7 @@ Item_param::Item_param(THD *thd, const LEX_CSTRING *name_arg, before mysql_stmt_execute(), so we assuming that it can be NULL until value is set. */ - maybe_null= 1; + set_maybe_null(); } @@ -4085,7 +4113,7 @@ void Item_param::sync_clones() { Item_param *c= *c_ptr; /* Scalar-type members: */ - c->maybe_null= maybe_null; + c->copy_flags(this, item_base_t::MAYBE_NULL); c->null_value= null_value; c->Type_std_attributes::operator=(*this); c->Type_handler_hybrid_field_type::operator=(*this); @@ -4137,7 +4165,7 @@ void Item_param::set_int(longlong i, uint32 max_length_arg) collation= DTCollation_numeric(); max_length= max_length_arg; decimals= 0; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; DBUG_VOID_RETURN; } @@ -4151,7 +4179,7 @@ void Item_param::set_double(double d) collation= DTCollation_numeric(); max_length= DBL_DIG + 8; decimals= NOT_FIXED_DEC; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; DBUG_VOID_RETURN; } @@ -4183,7 +4211,7 @@ void Item_param::set_decimal(const char *str, ulong length) max_length= my_decimal_precision_to_length_no_truncation(value.m_decimal.precision(), decimals, unsigned_flag); - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; DBUG_VOID_RETURN; } @@ -4200,7 +4228,7 @@ void Item_param::set_decimal(const my_decimal *dv, bool unsigned_arg) unsigned_flag= unsigned_arg; max_length= my_decimal_precision_to_length(value.m_decimal.intg + decimals, decimals, unsigned_flag); - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; } @@ -4211,7 +4239,7 @@ void Item_param::fix_temporal(uint32 max_length_arg, uint decimals_arg) collation= DTCollation_numeric(); max_length= max_length_arg; decimals= decimals_arg; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; } @@ -4221,7 +4249,7 @@ void Item_param::set_time(const MYSQL_TIME *tm, { DBUG_ASSERT(value.type_handler()->cmp_type() == TIME_RESULT); value.time= *tm; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; fix_temporal(max_length_arg, decimals_arg); } @@ -4256,7 +4284,7 @@ void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type, &str, time_type, NULL, NULL, NULL); set_zero_time(&value.time, time_type); } - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; fix_temporal(max_length_arg, tm->second_part > 0 ? TIME_SECOND_PART_DIGITS : 0); @@ -4293,7 +4321,7 @@ bool Item_param::set_str(const char *str, ulong length, state= SHORT_DATA_VALUE; collation.set(tocs, DERIVATION_COERCIBLE); max_length= length; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; /* max_length and decimals are set after charset conversion */ /* sic: str may be not null-terminated, don't add DBUG_PRINT here */ @@ -4328,7 +4356,7 @@ bool Item_param::set_longdata(const char *str, ulong length) if (value.m_string.append(str, length, &my_charset_bin)) DBUG_RETURN(TRUE); state= LONG_DATA_VALUE; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; DBUG_RETURN(FALSE); @@ -4392,7 +4420,7 @@ bool Item_param::set_from_item(THD *thd, Item *item) DBUG_RETURN(set_limit_clause_param(val)); } } - struct st_value tmp; + st_value tmp; if (!item->save_in_value(thd, &tmp)) { const Type_handler *h= item->type_handler(); @@ -4429,7 +4457,7 @@ void Item_param::reset() value.m_string.set_charset(&my_charset_bin); collation.set(&my_charset_bin, DERIVATION_COERCIBLE); state= NO_VALUE; - maybe_null= 1; + set_maybe_null(); null_value= 0; DBUG_VOID_RETURN; } @@ -4688,7 +4716,7 @@ const String *Item_param::value_query_val_str(THD *thd, String *str) const break; } DBUG_ASSERT(str->length() <= typelen); - buf= str->c_ptr_quick(); + buf= (char*) str->ptr(); ptr= buf + str->length(); *ptr++= '\''; ptr+= (uint) my_TIME_to_str(&value.time, ptr, decimals); @@ -4794,7 +4822,7 @@ Item *Item_param::value_clone_item(THD *thd) return 0; // Should create Item_decimal. See MDEV-11361. case STRING_RESULT: return new (mem_root) Item_string(thd, name, - Lex_cstring(value.m_string.c_ptr_quick(), + Lex_cstring(value.m_string.ptr(), value.m_string.length()), value.m_string.charset(), collation.derivation, @@ -4846,11 +4874,11 @@ void Item_param::print(String *str, enum_query_type query_type) } else if (state == DEFAULT_VALUE) { - str->append("default"); + str->append(STRING_WITH_LEN("default")); } else if (state == IGNORE_VALUE) { - str->append("ignore"); + str->append(STRING_WITH_LEN("ignore")); } else { @@ -4890,7 +4918,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src) Type_std_attributes::set(src); set_handler(src->type_handler()); - maybe_null= src->maybe_null; + copy_flags(src, item_base_t::MAYBE_NULL); null_value= src->null_value; state= src->state; @@ -4937,7 +4965,7 @@ bool Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) { Item *arg= *it; - struct st_value tmp; + st_value tmp; /* The OUT parameter is bound to some data type. It's important not to touch m_type_handler, @@ -5085,7 +5113,7 @@ my_decimal *Item_copy_string::val_decimal(my_decimal *decimal_value) void Item_ref_null_helper::save_val(Field *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); (*ref)->save_val(to); owner->was_null|= null_value= (*ref)->null_value; } @@ -5093,7 +5121,7 @@ void Item_ref_null_helper::save_val(Field *to) double Item_ref_null_helper::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double tmp= (*ref)->val_result(); owner->was_null|= null_value= (*ref)->null_value; return tmp; @@ -5102,7 +5130,7 @@ double Item_ref_null_helper::val_real() longlong Item_ref_null_helper::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong tmp= (*ref)->val_int_result(); owner->was_null|= null_value= (*ref)->null_value; return tmp; @@ -5111,7 +5139,7 @@ longlong Item_ref_null_helper::val_int() my_decimal *Item_ref_null_helper::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); my_decimal *val= (*ref)->val_decimal_result(decimal_value); owner->was_null|= null_value= (*ref)->null_value; return val; @@ -5120,7 +5148,7 @@ my_decimal *Item_ref_null_helper::val_decimal(my_decimal *decimal_value) bool Item_ref_null_helper::val_bool() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); bool val= (*ref)->val_bool_result(); owner->was_null|= null_value= (*ref)->null_value; return val; @@ -5129,7 +5157,7 @@ bool Item_ref_null_helper::val_bool() String* Item_ref_null_helper::val_str(String* s) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String* tmp= (*ref)->str_result(s); owner->was_null|= null_value= (*ref)->null_value; return tmp; @@ -5316,7 +5344,7 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list) /* SELECT list element with explicit alias */ if ((*(cur_group->item))->name.str && !table_name.str && - !(*(cur_group->item))->is_autogenerated_name() && + (*(cur_group->item))->is_explicit_name() && !lex_string_cmp(system_charset_info, &(*(cur_group->item))->name, &field_name)) { @@ -5488,7 +5516,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) ref->name.str, "forward reference in item list"); return NULL; } - DBUG_ASSERT((*select_ref)->is_fixed()); + DBUG_ASSERT((*select_ref)->fixed()); return &select->ref_pointer_array[counter]; } if (group_by_ref) @@ -5607,13 +5635,11 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) */ Name_resolution_context *last_checked_context= context; Item **ref= (Item **) not_found_item; - SELECT_LEX *current_sel= thd->lex->current_select; + SELECT_LEX *current_sel= context->select_lex; Name_resolution_context *outer_context= 0; SELECT_LEX *select= 0; - /* Currently derived tables cannot be correlated */ - if ((current_sel->master_unit()->first_select()->get_linkage() != - DERIVED_TABLE_TYPE) && - current_sel->master_unit()->outer_select()) + + if (current_sel->master_unit()->outer_select()) outer_context= context->outer_context; /* @@ -5652,6 +5678,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) first_name_resolution_table, outer_context-> last_name_resolution_table, + outer_context-> + ignored_tables, reference, IGNORE_EXCEPT_NON_UNIQUE, TRUE, TRUE)) != @@ -5724,7 +5752,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) set_if_bigger(thd->lex->in_sum_func->max_arg_level, select->nest_level); set_field(*from_field); - fixed= 1; + base_flags|= item_base_t::FIXED; mark_as_dependent(thd, last_checked_context->select_lex, context->select_lex, this, ((ref_type == REF_ITEM || @@ -5768,7 +5796,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) return -1; /* Some error occurred (e.g. ambiguous names). */ if (ref != not_found_item) { - DBUG_ASSERT(*ref && (*ref)->is_fixed()); + DBUG_ASSERT(*ref && (*ref)->fixed()); prev_subselect_item->used_tables_and_const_cache_join(*ref); break; } @@ -5799,6 +5827,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) find_field_in_tables(thd, this, context->first_name_resolution_table, context->last_name_resolution_table, + context->ignored_tables, reference, REPORT_ALL_ERRORS, !any_privileges, TRUE); } @@ -5810,7 +5839,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) Item_ref *rf; /* Should have been checked in resolve_ref_in_select_and_group(). */ - DBUG_ASSERT(*ref && (*ref)->is_fixed()); + DBUG_ASSERT(*ref && (*ref)->fixed()); /* Here, a subset of actions performed by Item_ref::set_properties is not enough. So we pass ptr to NULL into Item_[direct]_ref @@ -5845,7 +5874,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + DBUG_ASSERT(!rf->fixed()); // Assured by Item_ref() if (rf->fix_fields(thd, reference) || rf->check_cols(1)) return -1; @@ -5885,7 +5914,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + DBUG_ASSERT(!rf->fixed()); // Assured by Item_ref() if (rf->fix_fields(thd, reference) || rf->check_cols(1)) return -1; return 0; @@ -5942,10 +5971,21 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) bool Item_field::fix_fields(THD *thd, Item **reference) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); Field *from_field= (Field *)not_found_field; bool outer_fixed= false; - SELECT_LEX *select= thd->lex->current_select; + SELECT_LEX *select; + if (context) + { + select= context->select_lex; + } + else + { + // No real name resolution, used somewhere in SP + DBUG_ASSERT(field); + select= NULL; + } + if (select && select->in_tvc) { @@ -5965,6 +6005,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if ((from_field= find_field_in_tables(thd, this, context->first_name_resolution_table, context->last_name_resolution_table, + context->ignored_tables, reference, thd->lex->use_only_table_context ? REPORT_ALL_ERRORS : @@ -6081,6 +6122,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (!thd->lex->current_select->no_wrap_view_item && thd->lex->in_sum_func && + select && thd->lex == select->parent_lex && thd->lex->in_sum_func->nest_level == select->nest_level) @@ -6146,7 +6188,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) } } #endif - fixed= 1; + base_flags|= item_base_t::FIXED; if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && !outer_fixed && !thd->lex->in_sum_func && select && @@ -6161,7 +6203,7 @@ mark_non_agg_field: table->pos_in_table_list can be 0 when fixing partition functions or virtual fields. */ - if (fixed && (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) && + if (fixed() && (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) && field->table->pos_in_table_list) { /* @@ -6204,7 +6246,7 @@ error: bool Item_field::post_fix_fields_part_expr_processor(void *int_arg) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); if (field->vcol_info) field->vcol_info->mark_as_in_partitioning_expr(); /* @@ -6414,7 +6456,7 @@ void Item::init_make_send_field(Send_field *tmp_field, tmp_field->org_col_name= empty_clex_str; tmp_field->table_name= empty_clex_str; tmp_field->col_name= name; - tmp_field->flags= (maybe_null ? 0 : NOT_NULL_FLAG) | + tmp_field->flags= (maybe_null() ? 0 : NOT_NULL_FLAG) | (my_binary_compare(charset_for_protocol()) ? BINARY_FLAG : 0); tmp_field->set_handler(h); @@ -6467,7 +6509,7 @@ String *Item::check_well_formed_result(String *str, bool send_error) if (send_error) { my_error(ER_INVALID_CHARACTER_STRING, MYF(0), - cs->csname, hexbuf); + cs->cs_name.str, hexbuf); return 0; } if (thd->is_strict_mode()) @@ -6481,7 +6523,7 @@ String *Item::check_well_formed_result(String *str, bool send_error) } push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING, - ER_THD(thd, ER_INVALID_CHARACTER_STRING), cs->csname, + ER_THD(thd, ER_INVALID_CHARACTER_STRING), cs->cs_name.str, hexbuf); } return str; @@ -6506,7 +6548,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst, ER_INVALID_CHARACTER_STRING, ER_THD(m_thd, ER_INVALID_CHARACTER_STRING), srccs == &my_charset_bin ? - dstcs->csname : srccs->csname, + dstcs->cs_name.str : srccs->cs_name.str, err.ptr()); return false; } @@ -6519,7 +6561,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst, push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN, ER_CANNOT_CONVERT_CHARACTER, ER_THD(m_thd, ER_CANNOT_CONVERT_CHARACTER), - srccs->csname, buf, dstcs->csname); + srccs->cs_name.str, buf, dstcs->cs_name.str); return false; } return false; @@ -6621,19 +6663,16 @@ static int save_field_in_field(Field *from, bool *null_value, DBUG_RETURN(set_field_to_null_with_conversions(to, no_conversions)); } to->set_notnull(); + (*null_value)= 0; /* If we're setting the same field as the one we're reading from there's nothing to do. This can happen in 'SET x = x' type of scenarios. */ if (to == from) - { - (*null_value)= 0; DBUG_RETURN(0); - } res= field_conv(to, from); - (*null_value)= 0; DBUG_RETURN(res); } @@ -6737,11 +6776,11 @@ int Item::save_str_in_field(Field *field, bool no_conversions) String *result; CHARSET_INFO *cs= collation.collation; char buff[MAX_FIELD_WIDTH]; // Alloc buffer for small columns - str_value.set_quick(buff, sizeof(buff), cs); + str_value.set_buffer_if_not_allocated(buff, sizeof(buff), cs); result=val_str(&str_value); if (null_value) { - str_value.set_quick(0, 0, cs); + str_value.set_buffer_if_not_allocated(0, 0, cs); return set_field_to_null_with_conversions(field, no_conversions); } @@ -6749,7 +6788,7 @@ int Item::save_str_in_field(Field *field, bool no_conversions) field->set_notnull(); int error= field->store(result->ptr(),result->length(),cs); - str_value.set_quick(0, 0, cs); + str_value.set_buffer_if_not_allocated(0, 0, cs); return error; } @@ -7067,7 +7106,7 @@ void Item_float::print(String *str, enum_query_type query_type) { if (presentation) { - str->append(presentation); + str->append(presentation, strlen(presentation)); return; } char buffer[20]; @@ -7113,12 +7152,12 @@ void Item_hex_hybrid::print(String *str, enum_query_type query_type) { uint32 len= MY_MIN(str_value.length(), sizeof(longlong)); const char *ptr= str_value.ptr() + str_value.length() - len; - str->append("0x"); + str->append("0x",2); str->append_hex(ptr, len); } -uint Item_hex_hybrid::decimal_precision() const +decimal_digits_t Item_hex_hybrid::decimal_precision() const { switch (max_length) {// HEX DEC case 0: // ---- --- @@ -7136,9 +7175,9 @@ uint Item_hex_hybrid::decimal_precision() const void Item_hex_string::print(String *str, enum_query_type query_type) { - str->append("X'"); + str->append("X'",2); str->append_hex(str_value.ptr(), str_value.length()); - str->append("'"); + str->append('\''); } @@ -7207,10 +7246,10 @@ void Item_bin_string::print(String *str, enum_query_type query_type) void Item_date_literal::print(String *str, enum_query_type query_type) { - str->append("DATE'"); + str->append(STRING_WITH_LEN("DATE'")); char buf[MAX_DATE_STRING_REP_LENGTH]; - my_date_to_str(cached_time.get_mysql_time(), buf); - str->append(buf); + int length= my_date_to_str(cached_time.get_mysql_time(), buf); + str->append(buf, length); str->append('\''); } @@ -7232,10 +7271,10 @@ bool Item_date_literal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzyd void Item_datetime_literal::print(String *str, enum_query_type query_type) { - str->append("TIMESTAMP'"); + str->append(STRING_WITH_LEN("TIMESTAMP'")); char buf[MAX_DATE_STRING_REP_LENGTH]; - my_datetime_to_str(cached_time.get_mysql_time(), buf, decimals); - str->append(buf); + int length= my_datetime_to_str(cached_time.get_mysql_time(), buf, decimals); + str->append(buf, length); str->append('\''); } @@ -7257,10 +7296,10 @@ bool Item_datetime_literal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fu void Item_time_literal::print(String *str, enum_query_type query_type) { - str->append("TIME'"); + str->append(STRING_WITH_LEN("TIME'")); char buf[MAX_DATE_STRING_REP_LENGTH]; - my_time_to_str(cached_time.get_mysql_time(), buf, decimals); - str->append(buf); + int length= my_time_to_str(cached_time.get_mysql_time(), buf, decimals); + str->append(buf, length); str->append('\''); } @@ -7422,7 +7461,7 @@ void Item_field::update_null_value() Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg) { SELECT_LEX *select= (SELECT_LEX*)select_arg; - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); if (field->table != select->context.table_list->table && type() != Item::TRIGGER_FIELD_ITEM) @@ -7457,10 +7496,10 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg) extraction of a pushable condition. The criteria of pushability of a subformula is checked by the callback function 'checker' with one parameter arg. The subformulas that are not usable are marked with - the flag NO_EXTRACTION_FL. + the flag MARKER_NO_EXTRACTION. @note This method is called before any call of build_pushable_cond. - The flag NO_EXTRACTION_FL set in a subformula allows to avoid building + The flag MARKER_NO_EXTRACTION set in a subformula allows to avoid building clones for the subformulas that are not used in the pushable condition. @note This method is called for pushdown conditions into materialized @@ -7484,14 +7523,14 @@ void Item::check_pushable_cond(Pushdown_checker checker, uchar *arg) while ((item=li++)) { item->check_pushable_cond(checker, arg); - if (item->get_extraction_flag() != NO_EXTRACTION_FL) + if (item->get_extraction_flag() != MARKER_NO_EXTRACTION) count++; else if (!and_cond) break; } if ((and_cond && count == 0) || item) { - set_extraction_flag(NO_EXTRACTION_FL); + set_extraction_flag(MARKER_NO_EXTRACTION); if (and_cond) li.rewind(); while ((item= li++)) @@ -7499,7 +7538,7 @@ void Item::check_pushable_cond(Pushdown_checker checker, uchar *arg) } } else if (!((this->*checker) (arg))) - set_extraction_flag(NO_EXTRACTION_FL); + set_extraction_flag(MARKER_NO_EXTRACTION); } @@ -7516,7 +7555,7 @@ void Item::check_pushable_cond(Pushdown_checker checker, uchar *arg) @details This method finds out what condition that can be pushed down can be extracted from this condition. If such condition C exists the - method builds the item for it. The method uses the flag NO_EXTRACTION_FL + method builds the item for it. The method uses the flag MARKER_NO_EXTRACTION set by the preliminary call of the method check_pushable_cond() to figure out whether a subformula is pushable or not. In the case when this item is a multiple equality a checker method is @@ -7551,7 +7590,7 @@ Item *Item::build_pushable_cond(THD *thd, bool is_multiple_equality= type() == Item::FUNC_ITEM && ((Item_func*) this)->functype() == Item_func::MULT_EQUAL_FUNC; - if (get_extraction_flag() == NO_EXTRACTION_FL) + if (get_extraction_flag() == MARKER_NO_EXTRACTION) return 0; if (type() == Item::COND_ITEM) @@ -7573,7 +7612,7 @@ Item *Item::build_pushable_cond(THD *thd, while ((item=li++)) { - if (item->get_extraction_flag() == NO_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_NO_EXTRACTION) { if (!cond_and) return 0; @@ -7629,7 +7668,7 @@ Item *Item::build_pushable_cond(THD *thd, return 0; return new_cond; } - else if (get_extraction_flag() != NO_EXTRACTION_FL) + else if (get_extraction_flag() != MARKER_NO_EXTRACTION) return build_clone(thd); return 0; } @@ -7680,7 +7719,7 @@ Item *Item_field::derived_field_transformer_for_having(THD *thd, uchar *arg) return this; Item *item= get_field_item_for_having(thd, this, sel); if (item) - item->marker|= SUBSTITUTION_FL; + item->marker|= MARKER_SUBSTITUTION; return item; } @@ -7688,12 +7727,13 @@ Item *Item_field::derived_field_transformer_for_having(THD *thd, uchar *arg) Item *Item_direct_view_ref::derived_field_transformer_for_having(THD *thd, uchar *arg) { - if ((*ref)->marker & SUBSTITUTION_FL) + st_select_lex *sel= (st_select_lex *)arg; + context= &sel->context; + if ((*ref)->marker & MARKER_SUBSTITUTION) { - this->marker|= SUBSTITUTION_FL; + this->marker|= MARKER_SUBSTITUTION; return this; } - st_select_lex *sel= (st_select_lex *)arg; table_map tab_map= sel->master_unit()->derived->table->map; if ((item_equal && !(item_equal->used_tables() & tab_map)) || !item_equal) @@ -7746,7 +7786,7 @@ Item *Item_field::derived_field_transformer_for_where(THD *thd, uchar *arg) { Item *producing_clone= producing_item->build_clone(thd); if (producing_clone) - producing_clone->marker|= SUBSTITUTION_FL; + producing_clone->marker|= MARKER_SUBSTITUTION; return producing_clone; } return this; @@ -7755,7 +7795,7 @@ Item *Item_field::derived_field_transformer_for_where(THD *thd, uchar *arg) Item *Item_direct_view_ref::derived_field_transformer_for_where(THD *thd, uchar *arg) { - if ((*ref)->marker & SUBSTITUTION_FL) + if ((*ref)->marker & MARKER_SUBSTITUTION) return (*ref); if (item_equal) { @@ -7777,7 +7817,7 @@ Item *Item_field::grouping_field_transformer_for_where(THD *thd, uchar *arg) Item *producing_clone= gr_field->corresponding_item->build_clone(thd); if (producing_clone) - producing_clone->marker|= SUBSTITUTION_FL; + producing_clone->marker|= MARKER_SUBSTITUTION; return producing_clone; } return this; @@ -7788,9 +7828,9 @@ Item * Item_direct_view_ref::grouping_field_transformer_for_where(THD *thd, uchar *arg) { - if ((*ref)->marker & SUBSTITUTION_FL) + if ((*ref)->marker & MARKER_SUBSTITUTION) { - this->marker|= SUBSTITUTION_FL; + this->marker|= MARKER_SUBSTITUTION; return this; } if (!item_equal) @@ -7834,7 +7874,7 @@ Item_ref::Item_ref(THD *thd, Name_resolution_context *context_arg, /* This constructor used to create some internals references over fixed items */ - if ((set_properties_only= (ref && *ref && (*ref)->is_fixed()))) + if ((set_properties_only= (ref && *ref && (*ref)->fixed()))) set_properties(); } @@ -7883,7 +7923,7 @@ Item_ref::Item_ref(THD *thd, TABLE_LIST *view_arg, Item **item, /* This constructor is used to create some internal references over fixed items */ - if ((set_properties_only= (ref && *ref && (*ref)->is_fixed()))) + if ((set_properties_only= (ref && *ref && (*ref)->fixed()))) set_properties(); } @@ -7955,8 +7995,8 @@ Item_ref::Item_ref(THD *thd, TABLE_LIST *view_arg, Item **item, bool Item_ref::fix_fields(THD *thd, Item **reference) { enum_parsing_place place= NO_MATTER; - DBUG_ASSERT(fixed == 0); - SELECT_LEX *current_sel= thd->lex->current_select; + DBUG_ASSERT(fixed() == 0); + SELECT_LEX *current_sel= context->select_lex; if (set_properties_only) { @@ -8007,7 +8047,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) goto error; /* Some error occurred (e.g. ambiguous names). */ if (ref != not_found_item) { - DBUG_ASSERT(*ref && (*ref)->is_fixed()); + DBUG_ASSERT(*ref && (*ref)->fixed()); prev_subselect_item->used_tables_and_const_cache_join(*ref); break; } @@ -8044,6 +8084,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) first_name_resolution_table, outer_context-> last_name_resolution_table, + outer_context->ignored_tables, reference, IGNORE_EXCEPT_NON_UNIQUE, TRUE, TRUE); @@ -8131,7 +8172,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) goto error; } /* Should be checked in resolve_ref_in_select_and_group(). */ - DBUG_ASSERT(*ref && (*ref)->is_fixed()); + DBUG_ASSERT(*ref && (*ref)->fixed()); mark_as_dependent(thd, last_checked_context->select_lex, context->select_lex, this, this, false); /* @@ -8160,7 +8201,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) (((*ref)->with_sum_func() && name.str && !(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE && current_sel->having_fix_field)) || - !(*ref)->is_fixed())) + !(*ref)->fixed())) { my_error(ER_ILLEGAL_REFERENCE, MYF(0), name.str, ((*ref)->with_sum_func() ? @@ -8184,16 +8225,14 @@ error: void Item_ref::set_properties() { Type_std_attributes::set(*ref); - maybe_null= (*ref)->maybe_null; /* We have to remember if we refer to a sum function, to ensure that split_sum_func() doesn't try to change the reference. */ - copy_with_sum_func(*ref); - with_param= (*ref)->with_param; - with_window_func= (*ref)->with_window_func; - with_field= (*ref)->with_field; - fixed= 1; + with_flags= (*ref)->with_flags; + base_flags|= (item_base_t::FIXED | + ((*ref)->base_flags & item_base_t::MAYBE_NULL)); + if (alias_name_used) return; if ((*ref)->type() == FIELD_ITEM) @@ -8428,7 +8467,7 @@ void Item_ref::save_val(Field *to) double Item_ref::val_real() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); double tmp=(*ref)->val_result(); null_value=(*ref)->null_value; return tmp; @@ -8437,7 +8476,7 @@ double Item_ref::val_real() longlong Item_ref::val_int() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); longlong tmp=(*ref)->val_int_result(); null_value=(*ref)->null_value; return tmp; @@ -8446,7 +8485,7 @@ longlong Item_ref::val_int() bool Item_ref::val_bool() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); bool tmp= (*ref)->val_bool_result(); null_value= (*ref)->null_value; return tmp; @@ -8455,7 +8494,7 @@ bool Item_ref::val_bool() String *Item_ref::val_str(String* tmp) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); tmp=(*ref)->str_result(tmp); null_value=(*ref)->null_value; return tmp; @@ -8464,7 +8503,7 @@ String *Item_ref::val_str(String* tmp) bool Item_ref::is_null() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); bool tmp=(*ref)->is_null_result(); null_value=(*ref)->null_value; return tmp; @@ -8485,7 +8524,7 @@ bool Item_ref::val_native(THD *thd, Native *to) longlong Item_ref::val_datetime_packed(THD *thd) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); longlong tmp= (*ref)->val_datetime_packed_result(thd); null_value= (*ref)->null_value; return tmp; @@ -8494,7 +8533,7 @@ longlong Item_ref::val_datetime_packed(THD *thd) longlong Item_ref::val_time_packed(THD *thd) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); longlong tmp= (*ref)->val_time_packed_result(thd); null_value= (*ref)->null_value; return tmp; @@ -8668,20 +8707,17 @@ Item_cache_wrapper::~Item_cache_wrapper() Item_cache_wrapper::Item_cache_wrapper(THD *thd, Item *item_arg): Item_result_field(thd), orig_item(item_arg), expr_cache(NULL), expr_value(NULL) { - DBUG_ASSERT(orig_item->is_fixed()); + DBUG_ASSERT(orig_item->fixed()); Type_std_attributes::set(orig_item); - maybe_null= orig_item->maybe_null; - copy_with_sum_func(orig_item); - with_param= orig_item->with_param; - with_field= orig_item->with_field; + + base_flags|= (item_base_t::FIXED | + (orig_item->base_flags & item_base_t::MAYBE_NULL)); + with_flags|= orig_item->with_flags; + name= item_arg->name; - m_with_subquery= orig_item->with_subquery(); - with_window_func= orig_item->with_window_func; if ((expr_value= orig_item->get_cache(thd))) expr_value->setup(thd, orig_item); - - fixed= 1; } @@ -8708,7 +8744,7 @@ void Item_cache_wrapper::print(String *str, enum_query_type query_type) return; } - str->append("<expr_cache>"); + str->append(STRING_WITH_LEN("<expr_cache>")); if (expr_cache) { init_on_demand(); @@ -8731,8 +8767,8 @@ void Item_cache_wrapper::print(String *str, enum_query_type query_type) bool Item_cache_wrapper::fix_fields(THD *thd __attribute__((unused)), Item **it __attribute__((unused))) { - DBUG_ASSERT(orig_item->is_fixed()); - DBUG_ASSERT(fixed); + DBUG_ASSERT(orig_item->fixed()); + DBUG_ASSERT(fixed()); return FALSE; } @@ -9114,7 +9150,7 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) /* view fild reference must be defined */ DBUG_ASSERT(*ref); /* (*ref)->check_cols() will be made in Item_direct_ref::fix_fields */ - if ((*ref)->is_fixed()) + if ((*ref)->fixed()) { Item *ref_item= (*ref)->real_item(); if (ref_item->type() == Item::FIELD_ITEM) @@ -9136,7 +9172,7 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) if (Item_direct_ref::fix_fields(thd, reference)) return TRUE; if (view->table && view->table->maybe_null) - maybe_null= TRUE; + set_maybe_null(); set_null_ref_table(); return FALSE; } @@ -9382,7 +9418,7 @@ Item_field::excl_dep_on_grouping_fields(st_select_lex *sel) bool Item_direct_view_ref::excl_dep_on_table(table_map tab_map) { table_map used= used_tables(); - if (used & OUTER_REF_TABLE_BIT) + if (used & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) return false; if (!(used & ~tab_map)) return true; @@ -9480,7 +9516,7 @@ bool Item_default_value::fix_fields(THD *thd, Item **items) Item *real_arg; Item_field *field_arg; Field *def_field; - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); DBUG_ASSERT(arg); /* @@ -9737,9 +9773,9 @@ bool Item_insert_value::eq(const Item *item, bool binary_cmp) const bool Item_insert_value::fix_fields(THD *thd, Item **items) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); /* We should only check that arg is in first table */ - if (!arg->is_fixed()) + if (!arg->fixed()) { bool res; TABLE_LIST *orig_next_table= context->last_name_resolution_table; @@ -9893,11 +9929,11 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items) parsing! So we have little to do in fix_fields. :) */ - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); /* Set field. */ - if (likely(field_idx != (uint)-1)) + if (likely(field_idx != NO_CACHED_FIELD_INDEX)) { #ifndef NO_EMBEDDED_ACCESS_CHECKS /* @@ -9921,7 +9957,7 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items) field= (row_version == OLD_ROW) ? triggers->old_field[field_idx] : triggers->new_field[field_idx]; set_field(field); - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -10060,7 +10096,7 @@ void Item_cache::print(String *str, enum_query_type query_type) void Item_cache::set_null() { - if (maybe_null) + if (maybe_null()) { null_value= TRUE; value_cached= TRUE; @@ -10255,13 +10291,13 @@ bool Item_cache_timestamp::val_native(THD *thd, Native *to) null_value= true; return true; } - return null_value= to->copy(m_native); + return (null_value= to->copy(m_native)); } Datetime Item_cache_timestamp::to_datetime(THD *thd) { - DBUG_ASSERT(is_fixed() == 1); + DBUG_ASSERT(fixed() == 1); if (!has_value()) { null_value= true; @@ -10700,20 +10736,6 @@ void view_error_processor(THD *thd, void *data) ((TABLE_LIST *)data)->hide_view_error(thd); } -/** - Name resolution context with resolution in only one table -*/ - -Name_resolution_context::Name_resolution_context(TABLE_LIST *table): - outer_context(0), table_list(0), select_lex(0), - error_processor_data(0), - security_ctx(0) -{ - resolve_in_select_list= FALSE; - error_processor= &dummy_error_processor; - // resolve only in this table - first_name_resolution_table= last_name_resolution_table= table; -} st_select_lex *Item_ident::get_depended_from() const { @@ -10745,7 +10767,7 @@ void Item_direct_view_ref::update_used_tables() table_map Item_direct_view_ref::used_tables() const { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); if (get_depended_from()) return OUTER_REF_TABLE_BIT; @@ -10905,7 +10927,7 @@ Item *Item_direct_ref_to_item::safe_charset_converter(THD *thd, void Item_direct_ref_to_item::change_item(THD *thd, Item *i) { - DBUG_ASSERT(i->is_fixed()); + DBUG_ASSERT(i->fixed()); thd->change_item_tree(ref, i); set_properties(); } @@ -10913,7 +10935,7 @@ void Item_direct_ref_to_item::change_item(THD *thd, Item *i) bool Item::cleanup_excluding_immutables_processor (void *arg) { - if (!(get_extraction_flag() == IMMUTABLE_FL)) + if (!(get_extraction_flag() == MARKER_IMMUTABLE)) return cleanup_processor(arg); else { @@ -10921,3 +10943,18 @@ bool Item::cleanup_excluding_immutables_processor (void *arg) return false; } } + + +bool ignored_list_includes_table(ignored_tables_list_t list, TABLE_LIST *tbl) +{ + if (!list) + return false; + List_iterator<TABLE_LIST> it(*list); + TABLE_LIST *list_tbl; + while ((list_tbl = it++)) + { + if (list_tbl == tbl) + return true; + } + return false; +} diff --git a/sql/item.h b/sql/item.h index 1273de44edb..18a3c1340c0 100644 --- a/sql/item.h +++ b/sql/item.h @@ -42,8 +42,13 @@ C_MODE_START 3. change type of m_decmal to struct st_my_decimal and move inside the union 4. move the definition to some file in /include */ -struct st_value +class st_value { +public: + st_value() {} + st_value(char *buffer, size_t buffer_size) : + m_string(buffer, buffer_size, &my_charset_bin) + {} enum enum_dynamic_column_type m_type; union { @@ -61,6 +66,10 @@ C_MODE_END class Value: public st_value { public: + Value(char *buffer, size_t buffer_size) : st_value(buffer, buffer_size) + {} + Value() + {} bool is_null() const { return m_type == DYN_COL_NULL; } bool is_longlong() const { @@ -77,14 +86,12 @@ template<size_t buffer_size> class ValueBuffer: public Value { char buffer[buffer_size]; - void reset_buffer() - { - m_string.set(buffer, buffer_size, &my_charset_bin); - } public: - ValueBuffer() + ValueBuffer(): Value(buffer, buffer_size) + {} + void reset_buffer() { - reset_buffer(); + m_string.set_buffer_if_not_allocated(buffer, buffer_size, &my_charset_bin); } }; @@ -111,7 +118,6 @@ struct KEY_FIELD; struct SARGABLE_PARAM; class RANGE_OPT_PARAM; class SEL_TREE; -class With_sum_func_cache; enum precedence { LOWEST_PRECEDENCE, @@ -147,14 +153,36 @@ bool mark_unsupported_function(const char *w1, const char *w2, #define SPLIT_SUM_SKIP_REGISTERED 1 /* Skip registered funcs */ #define SPLIT_SUM_SELECT 2 /* SELECT item; Split all parts */ +/* + Values for item->marker for cond items in the WHERE clause as used + by the optimizer. -#define NO_EXTRACTION_FL (1 << 6) -#define FULL_EXTRACTION_FL (1 << 7) -#define DELETION_FL (1 << 8) -#define IMMUTABLE_FL (1 << 9) -#define SUBSTITUTION_FL (1 << 10) -#define EXTRACTION_MASK \ - (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL | IMMUTABLE_FL) + Note that for Item_fields, the marker contains + 'select->cur_pos_in_select_list +*/ +/* Used to check GROUP BY list in the MODE_ONLY_FULL_GROUP_BY mode */ +#define MARKER_UNDEF_POS -1 +#define MARKER_UNUSED 0 +#define MARKER_CHANGE_COND 1 +#define MARKER_PROCESSED 2 +#define MARKER_CHECK_ON_READ 3 +#define MARKER_NULL_KEY 4 +#define MARKER_FOUND_IN_ORDER 6 + +/* Used as bits in marker by Item::check_pushable_cond() */ +#define MARKER_NO_EXTRACTION (1 << 6) +#define MARKER_FULL_EXTRACTION (1 << 7) +#define MARKER_DELETION (1 << 8) +#define MARKER_IMMUTABLE (1 << 9) +#define MARKER_SUBSTITUTION (1 << 10) + +/* Used as bits in marker by window functions */ +#define MARKER_SORTORDER_CHANGE (1 << 11) +#define MARKER_PARTITION_CHANGE (1 << 12) +#define MARKER_FRAME_CHANGE (1 << 13) +#define MARKER_EXTRACTION_MASK \ + (MARKER_NO_EXTRACTION | MARKER_FULL_EXTRACTION | MARKER_DELETION | \ + MARKER_IMMUTABLE) extern const char *item_empty_name; @@ -162,6 +190,9 @@ void dummy_error_processor(THD *thd, void *data); void view_error_processor(THD *thd, void *data); +typedef List<TABLE_LIST>* ignored_tables_list_t; +bool ignored_list_includes_table(ignored_tables_list_t list, TABLE_LIST *tbl); + /* Instances of Name_resolution_context store the information necessary for name resolution of Items and other context analysis of a query made in @@ -183,7 +214,7 @@ struct Name_resolution_context: Sql_alloc The name resolution context to search in when an Item cannot be resolved in this context (the context of an outer select) */ - Name_resolution_context *outer_context; + Name_resolution_context *outer_context= nullptr; /* List of tables used to resolve the items of this context. Usually these @@ -193,7 +224,7 @@ struct Name_resolution_context: Sql_alloc statements we have to change this member dynamically to ensure correct name resolution of different parts of the statement. */ - TABLE_LIST *table_list; + TABLE_LIST *table_list= nullptr; /* In most cases the two table references below replace 'table_list' above for the purpose of name resolution. The first and last name resolution @@ -201,57 +232,65 @@ struct Name_resolution_context: Sql_alloc join tree in a FROM clause. This is needed for NATURAL JOIN, JOIN ... USING and JOIN ... ON. */ - TABLE_LIST *first_name_resolution_table; + TABLE_LIST *first_name_resolution_table= nullptr; /* Last table to search in the list of leaf table references that begins with first_name_resolution_table. */ - TABLE_LIST *last_name_resolution_table; + TABLE_LIST *last_name_resolution_table= nullptr; /* Cache first_name_resolution_table in setup_natural_join_row_types */ - TABLE_LIST *natural_join_first_table; + TABLE_LIST *natural_join_first_table= nullptr; /* SELECT_LEX item belong to, in case of merged VIEW it can differ from SELECT_LEX where item was created, so we can't use table_list/field_list from there */ - st_select_lex *select_lex; + st_select_lex *select_lex= nullptr; /* Processor of errors caused during Item name resolving, now used only to hide underlying tables in errors about views (i.e. it substitute some errors for views) */ - void (*error_processor)(THD *, void *); - void *error_processor_data; + void (*error_processor)(THD *, void *)= &dummy_error_processor; + void *error_processor_data= nullptr; /* When TRUE items are resolved in this context both against the SELECT list and this->table_list. If FALSE, items are resolved only against this->table_list. */ - bool resolve_in_select_list; + bool resolve_in_select_list= false; + + /* + Bitmap of tables that should be ignored when doing name resolution. + Normally it is {0}. Non-zero values are used by table functions. + */ + ignored_tables_list_t ignored_tables= nullptr; /* Security context of this name resolution context. It's used for views and is non-zero only if the view is defined with SQL SECURITY DEFINER. */ - Security_context *security_ctx; + Security_context *security_ctx= nullptr; - Name_resolution_context() - :outer_context(0), table_list(0), select_lex(0), - error_processor_data(0), - security_ctx(0) - {} + Name_resolution_context() = default; - Name_resolution_context(TABLE_LIST *table); + /** + Name resolution context with resolution in only one table + */ + Name_resolution_context(TABLE_LIST *table) : + first_name_resolution_table(table), last_name_resolution_table(table) + {} void init() { resolve_in_select_list= FALSE; error_processor= &dummy_error_processor; - first_name_resolution_table= NULL; - last_name_resolution_table= NULL; + ignored_tables= nullptr; + first_name_resolution_table= nullptr; + last_name_resolution_table= nullptr; } void resolve_in_table_list_only(TABLE_LIST *tables) @@ -625,13 +664,6 @@ class st_select_lex_unit; class Item_func_not; class Item_splocal; -/* Item::common_flags */ -/* Indicates that name of this Item autogenerated or set by user */ -#define IS_AUTO_GENERATED_NAME 1 -/* Indicates that this item is in CYCLE clause of WITH */ -#define IS_IN_WITH_CYCLE 2 - - /** String_copier that sends Item specific warnings. */ @@ -726,20 +758,98 @@ public: #define STOP_PTR ((void *) 1) -class Item: public Value_source, - public Type_all_attributes +/* Base flags (including IN) for an item */ + +typedef uint8 item_flags_t; + +enum class item_base_t : item_flags_t { - /** - The index in the JOIN::join_tab array of the JOIN_TAB this Item is attached - to. Items are attached (or 'pushed') to JOIN_TABs during optimization by the - make_cond_for_table procedure. During query execution, this item is - evaluated when the join loop reaches the corresponding JOIN_TAB. + NONE= 0, +#define ITEM_FLAGS_MAYBE_NULL_SHIFT 0 // Must match MAYBE_NULL + MAYBE_NULL= (1<<0), // May be NULL. + IN_ROLLUP= (1<<1), // Appears in GROUP BY list + // of a query with ROLLUP. + FIXED= (1<<2), // Was fixed with fix_fields(). + IS_EXPLICIT_NAME= (1<<3), // The name of this Item was set by the user + // (or was auto generated otherwise) + IS_IN_WITH_CYCLE= (1<<4) // This item is in CYCLE clause + // of WITH. +}; - If the value of join_tab_idx >= MAX_TABLES, this means that there is no - corresponding JOIN_TAB. - */ - uint join_tab_idx; +/* Flags that tells us what kind of items the item contains */ + +enum class item_with_t : item_flags_t +{ + NONE= 0, + SP_VAR= (1<<0), // If Item contains a stored procedure variable + WINDOW_FUNC= (1<<1), // If item contains a window func + FIELD= (1<<2), // If any item except Item_sum contains a field. + SUM_FUNC= (1<<3), // If item contains a sum func + SUBQUERY= (1<<4), // If item containts a sub query + ROWNUM_FUNC= (1<<5) +}; + + +/* Make operations in item_base_t and item_with_t work like 'int' */ +static inline item_base_t operator&(const item_base_t a, const item_base_t b) +{ + return (item_base_t) (((item_flags_t) a) & ((item_flags_t) b)); +} + +static inline item_base_t & operator&=(item_base_t &a, item_base_t b) +{ + a= (item_base_t) (((item_flags_t) a) & (item_flags_t) b); + return a; +} + +static inline item_base_t operator|(const item_base_t a, const item_base_t b) +{ + return (item_base_t) (((item_flags_t) a) | ((item_flags_t) b)); +} + +static inline item_base_t & operator|=(item_base_t &a, item_base_t b) +{ + a= (item_base_t) (((item_flags_t) a) | (item_flags_t) b); + return a; +} + +static inline item_base_t operator~(const item_base_t a) +{ + return (item_base_t) ~(item_flags_t) a; +} + +static inline item_with_t operator&(const item_with_t a, const item_with_t b) +{ + return (item_with_t) (((item_flags_t) a) & ((item_flags_t) b)); +} + +static inline item_with_t & operator&=(item_with_t &a, item_with_t b) +{ + a= (item_with_t) (((item_flags_t) a) & (item_flags_t) b); + return a; +} + +static inline item_with_t operator|(const item_with_t a, const item_with_t b) +{ + return (item_with_t) (((item_flags_t) a) | ((item_flags_t) b)); +} + +static inline item_with_t & operator|=(item_with_t &a, item_with_t b) +{ + a= (item_with_t) (((item_flags_t) a) | (item_flags_t) b); + return a; +} + +static inline item_with_t operator~(const item_with_t a) +{ + return (item_with_t) ~(item_flags_t) a; +} + + +class Item :public Value_source, + public Type_all_attributes +{ static void *operator new(size_t size); public: @@ -772,22 +882,9 @@ public: EXPR_CACHE_ITEM}; enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; - enum traverse_order { POSTFIX, PREFIX }; - - /* Cache of the result of is_expensive(). */ - int8 is_expensive_cache; - - /* Reuse size, only used by SP local variable assignment, otherwise 0 */ - uint rsize; protected: - /* - str_values's main purpose is to be used to cache the value in - save_in_field - */ - String str_value; - SEL_TREE *get_mm_tree_for_const(RANGE_OPT_PARAM *param); /** @@ -797,7 +894,7 @@ protected: { const Type_handler *h= type_handler(); return h->make_and_init_table_field(root, &name, - Record_addr(maybe_null), + Record_addr(maybe_null()), *this, table); } /** @@ -812,10 +909,10 @@ protected: */ Field *tmp_table_field_from_field_type(MEM_ROOT *root, TABLE *table) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); const Type_handler *h= type_handler()->type_handler_for_tmp_table(this); return h->make_and_init_table_field(root, &name, - Record_addr(maybe_null), + Record_addr(maybe_null()), *this, table); } /** @@ -852,21 +949,21 @@ protected: /* Helper methods, to get an Item value from another Item */ double val_real_from_item(Item *item) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); double value= item->val_real(); null_value= item->null_value; return value; } longlong val_int_from_item(Item *item) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); longlong value= item->val_int(); null_value= item->null_value; return value; } String *val_str_from_item(Item *item, String *str) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); String *res= item->val_str(str); if (res) res->set_charset(collation.collation); @@ -876,7 +973,7 @@ protected: } bool val_native_from_item(THD *thd, Item *item, Native *to) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); null_value= item->val_native(thd, to); DBUG_ASSERT(null_value == item->null_value); return null_value; @@ -890,12 +987,12 @@ protected: bool val_native_with_conversion_from_item(THD *thd, Item *item, Native *to, const Type_handler *handler) { - DBUG_ASSERT(is_fixed()); - return null_value= item->val_native_with_conversion(thd, to, handler); + DBUG_ASSERT(fixed()); + return (null_value= item->val_native_with_conversion(thd, to, handler)); } my_decimal *val_decimal_from_item(Item *item, my_decimal *decimal_value) { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); my_decimal *value= item->val_decimal(decimal_value); if ((null_value= item->null_value)) value= NULL; @@ -919,9 +1016,8 @@ public: const MY_LOCALE *locale_from_val_str(); - LEX_CSTRING name; /* Name of item */ - /* Original item name (if it was renamed)*/ - const char *orig_name; + /* All variables for the Item class */ + /** Intrusive list pointer for free list. If not null, points to the next Item on some Query_arena's free list. For instance, stored procedures @@ -930,20 +1026,82 @@ public: @see Query_arena::free_list */ Item *next; - int marker; - bool maybe_null; /* If item may be null */ - bool in_rollup; /* If used in GROUP BY list - of a query with ROLLUP */ - bool null_value; /* if item is null */ - bool with_param; /* True if contains an SP parameter */ - bool with_window_func; /* True if item contains a window func */ + + /* + str_values's main purpose is to be used to cache the value in + save_in_field. Calling full_name() for Item_field will also use str_value. + */ + String str_value; + + LEX_CSTRING name; /* Name of item */ + /* Original item name (if it was renamed)*/ + const char *orig_name; + + /* All common bool variables for an Item is stored here */ + item_base_t base_flags; + item_with_t with_flags; + + /* Marker is used in some functions to temporary mark an item */ + int16 marker; + + /* + Tells is the val() value of the item is/was null. + This should not be part of the bit flags as it's changed a lot and also + we use pointers to it + */ + bool null_value; + /* Cache of the result of is_expensive(). */ + int8 is_expensive_cache; /** - True if any item except Item_sum contains a field. Set during parsing. + The index in the JOIN::join_tab array of the JOIN_TAB this Item + is attached to. Items are attached (or 'pushed') to JOIN_TABs + during optimization by the make_cond_for_table procedure. During + query execution, this item is evaluated when the join loop reaches + the corresponding JOIN_TAB. + + If the value of join_tab_idx >= MAX_TABLES, this means that there is no + corresponding JOIN_TAB. */ - bool with_field; - uint8 common_flags; - bool is_autogenerated_name() - { return (common_flags & IS_AUTO_GENERATED_NAME); } + uint8 join_tab_idx; + + inline bool maybe_null() const + { return (bool) (base_flags & item_base_t::MAYBE_NULL); } + inline bool in_rollup() const + { return (bool) (base_flags & item_base_t::IN_ROLLUP); } + inline bool fixed() const + { return (bool) (base_flags & item_base_t::FIXED); } + inline bool is_explicit_name() const + { return (bool) (base_flags & item_base_t::IS_EXPLICIT_NAME); } + inline bool is_in_with_cycle() const + { return (bool) (base_flags & item_base_t::IS_IN_WITH_CYCLE); } + + inline bool with_sp_var() const + { return (bool) (with_flags & item_with_t::SP_VAR); } + inline bool with_window_func() const + { return (bool) (with_flags & item_with_t::WINDOW_FUNC); } + inline bool with_field() const + { return (bool) (with_flags & item_with_t::FIELD); } + inline bool with_sum_func() const + { return (bool) (with_flags & item_with_t::SUM_FUNC); } + inline bool with_subquery() const + { return (bool) (with_flags & item_with_t::SUBQUERY); } + inline bool with_rownum_func() const + { return (bool) (with_flags & item_with_t::ROWNUM_FUNC); } + inline void copy_flags(const Item *org, item_base_t mask) + { + base_flags= (item_base_t) (((item_flags_t) base_flags & + ~(item_flags_t) mask) | + ((item_flags_t) org->base_flags & + (item_flags_t) mask)); + } + inline void copy_flags(const Item *org, item_with_t mask) + { + with_flags= (item_with_t) (((item_flags_t) with_flags & + ~(item_flags_t) mask) | + ((item_flags_t) org->with_flags & + (item_flags_t) mask)); + } + // alloc & destruct is done as start of select on THD::mem_root Item(THD *thd); /* @@ -955,6 +1113,7 @@ public: optimisation changes in prepared statements */ Item(THD *thd, Item *item); + Item(); /* For const item */ virtual ~Item() { #ifdef EXTRA_DEBUG @@ -978,17 +1137,20 @@ public: void share_name_with(const Item *item) { name= item->name; - common_flags= static_cast<uint8> - ((common_flags & ~IS_AUTO_GENERATED_NAME) | - (item->common_flags & IS_AUTO_GENERATED_NAME)); + copy_flags(item, item_base_t::IS_EXPLICIT_NAME); } virtual void cleanup(); virtual void make_send_field(THD *thd, Send_field *field); bool fix_fields_if_needed(THD *thd, Item **ref) { - return is_fixed() ? false : fix_fields(thd, ref); + return fixed() ? false : fix_fields(thd, ref); } + + /* + fix_fields_if_needed_for_scalar() is used where we need to filter items + that can't be scalars and want to return error for it. + */ bool fix_fields_if_needed_for_scalar(THD *thd, Item **ref) { return fix_fields_if_needed(thd, ref) || check_cols(1); @@ -1002,21 +1164,20 @@ public: return fix_fields_if_needed_for_scalar(thd, ref); } /* - By default we assume that an Item is fixed by the contstructor. + By default we assume that an Item is fixed by the constructor */ virtual bool fix_fields(THD *, Item **) { /* This should not normally be called, because usually before - fix_fields() we check is_fixed() to be false. + fix_fields() we check fixed() to be false. But historically we allow fix_fields() to be called for Items who return basic_const_item()==true. */ - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); DBUG_ASSERT(basic_const_item()); return false; } - virtual bool is_fixed() const { return true; } virtual void unfix_fields() { DBUG_ASSERT(0); @@ -1031,6 +1192,12 @@ public: {}; /* + This is for items that require a fixup after the JOIN::prepare() + is done. + */ + virtual void fix_after_optimize(THD *thd) + {} + /* This method should be used in case where we are sure that we do not need complete fix_fields() procedure. Usually this method is used by the optimizer when it has to create a new @@ -1046,7 +1213,7 @@ public: DBUG_ASSERT(0); } - bool save_in_value(THD *thd, struct st_value *value) + bool save_in_value(THD *thd, st_value *value) { return type_handler()->Item_save_in_value(thd, this, value); } @@ -1142,9 +1309,26 @@ public: { return type_handler()->max_display_length(this); } - const TYPELIB *get_typelib() const { return NULL; } - void set_maybe_null(bool maybe_null_arg) { maybe_null= maybe_null_arg; } - void set_typelib(const TYPELIB *typelib) + const TYPELIB *get_typelib() const override { return NULL; } + /* optimized setting of maybe_null without jumps. Minimizes code size */ + inline void set_maybe_null(bool maybe_null_arg) + { + base_flags= ((item_base_t) ((base_flags & ~item_base_t::MAYBE_NULL)) | + (item_base_t) (maybe_null_arg << + ITEM_FLAGS_MAYBE_NULL_SHIFT)); + } + /* This is used a lot, so make it simpler to use */ + void set_maybe_null() + { + base_flags|= item_base_t::MAYBE_NULL; + } + /* This is used when calling Type_all_attributes::set_type_maybe_null() */ + void set_type_maybe_null(bool maybe_null_arg) override + { + set_maybe_null(maybe_null_arg); + } + + void set_typelib(const TYPELIB *typelib) override { // Non-field Items (e.g. hybrid functions) never have ENUM/SET types yet. DBUG_ASSERT(0); @@ -1174,7 +1358,8 @@ public: { return NON_MONOTONIC; } /* - Convert "func_arg $CMP$ const" half-interval into "FUNC(func_arg) $CMP2$ const2" + Convert "func_arg $CMP$ const" half-interval into + "FUNC(func_arg) $CMP2$ const2" SYNOPSIS val_int_endpoint() @@ -1371,7 +1556,7 @@ public: * Item_func_rollup_const */ DBUG_ASSERT(0); - return null_value= true; + return (null_value= 1); } virtual bool val_native_result(THD *thd, Native *to) { @@ -1497,6 +1682,10 @@ public: DBUG_ASSERT(!is_expensive()); return val_bool(); } + bool can_eval_in_optimize() + { + return const_item() && !is_expensive(); + } /* save_val() is method of val_* family which stores value in the given @@ -1516,7 +1705,7 @@ public: my_decimal *val_decimal_from_string(my_decimal *decimal_value); longlong val_int_from_real() { - DBUG_ASSERT(is_fixed()); + DBUG_ASSERT(fixed()); return Converter_double_to_longlong_with_warn(val_real(), false).result(); } longlong val_int_from_str(int *error); @@ -1558,7 +1747,13 @@ public: virtual Field *get_tmp_table_field() { return 0; } virtual Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table); - virtual const char *full_name() const { return name.str ? name.str : "???"; } + inline const char *full_name() const { return full_name_cstring().str; } + virtual LEX_CSTRING full_name_cstring() const + { + if (name.str) + return name; + return { STRING_WITH_LEN("???") }; + } const char *field_name_or_null() { return real_item()->type() == Item::FIELD_ITEM ? name.str : NULL; } const TABLE_SHARE *field_table_or_null(); @@ -1631,6 +1826,14 @@ public: their method implementations typically have DBUG_ASSERT(0). */ virtual bool is_evaluable_expression() const { return true; } + + /** + * Check whether the item is a parameter ('?') of stored routine. + * Default implementation returns false. Method is overridden in the class + * Item_param where it returns true. + */ + virtual bool is_stored_routine_parameter() const { return false; } + bool check_is_evaluable_expression_or_error() { if (is_evaluable_expression()) @@ -1646,19 +1849,19 @@ public: inline uint float_length(uint decimals_par) const { return decimals < FLOATING_POINT_DECIMALS ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} /* Returns total number of decimal digits */ - virtual uint decimal_precision() const + decimal_digits_t decimal_precision() const override { return type_handler()->Item_decimal_precision(this); } /* Returns the number of integer part digits only */ - inline int decimal_int_part() const - { return my_decimal_int_part(decimal_precision(), decimals); } + inline decimal_digits_t decimal_int_part() const + { return (decimal_digits_t) my_decimal_int_part(decimal_precision(), decimals); } /* Returns the number of fractional digits only. NOT_FIXED_DEC is replaced to the maximum possible number of fractional digits, taking into account the data type. */ - uint decimal_scale() const + decimal_digits_t decimal_scale() const { return type_handler()->Item_decimal_scale(this); } @@ -1747,7 +1950,6 @@ public: LOWEST_PRECEDENCE); } virtual void print(String *str, enum_query_type query_type); - class Print: public String { public: @@ -1921,7 +2123,7 @@ public: The process of compilation is assumed to go as follows: compile() - { + { if (this->*some_analyzer(...)) { compile children if any; @@ -1986,7 +2188,7 @@ public: virtual bool limit_index_condition_pushdown_processor(void *arg) { return 0; } virtual bool exists2in_processor(void *arg) { return 0; } virtual bool find_selective_predicates_list_processor(void *arg) { return 0; } - bool cleanup_is_expensive_cache_processor(void *arg) + virtual bool cleanup_is_expensive_cache_processor(void *arg) { is_expensive_cache= (int8)(-1); return 0; @@ -1994,7 +2196,7 @@ public: virtual bool set_extraction_flag_processor(void *arg) { - set_extraction_flag(*(int*)arg); + set_extraction_flag(*(int16*)arg); return 0; } @@ -2083,7 +2285,7 @@ public: assumes that there are no multi-byte collations amongst the partition fields. */ - virtual bool check_partition_func_processor(void *arg) { return 1;} + virtual bool check_partition_func_processor(void *arg) { return true; } virtual bool post_fix_fields_part_expr_processor(void *arg) { return 0; } virtual bool rename_fields_processor(void *arg) { return 0; } /* @@ -2118,6 +2320,7 @@ public: { return mark_unsupported_function(full_name(), arg, VCOL_IMPOSSIBLE); } + virtual bool check_handler_func_processor(void *arg) { return 0; } virtual bool check_field_expression_processor(void *arg) { return 0; } virtual bool check_func_default_processor(void *arg) { return 0; } /* @@ -2233,17 +2436,18 @@ public: virtual Item* element_index(uint i) { return this; } virtual Item** addr(uint i) { return 0; } virtual bool check_cols(uint c); - bool check_type_traditional_scalar(const char *opname) const; - bool check_type_scalar(const char *opname) const; - bool check_type_or_binary(const char *opname, const Type_handler *handler) const; - bool check_type_general_purpose_string(const char *opname) const; - bool check_type_can_return_int(const char *opname) const; - bool check_type_can_return_decimal(const char *opname) const; - bool check_type_can_return_real(const char *opname) const; - bool check_type_can_return_str(const char *opname) const; - bool check_type_can_return_text(const char *opname) const; - bool check_type_can_return_date(const char *opname) const; - bool check_type_can_return_time(const char *opname) const; + bool check_type_traditional_scalar(const LEX_CSTRING &opname) const; + bool check_type_scalar(const LEX_CSTRING &opname) const; + bool check_type_or_binary(const LEX_CSTRING &opname, + const Type_handler *handler) const; + bool check_type_general_purpose_string(const LEX_CSTRING &opname) const; + bool check_type_can_return_int(const LEX_CSTRING &opname) const; + bool check_type_can_return_decimal(const LEX_CSTRING &opname) const; + bool check_type_can_return_real(const LEX_CSTRING &opname) const; + bool check_type_can_return_str(const LEX_CSTRING &opname) const; + bool check_type_can_return_text(const LEX_CSTRING &opname) const; + bool check_type_can_return_date(const LEX_CSTRING &opname) const; + bool check_type_can_return_time(const LEX_CSTRING &opname) const; // It is not row => null inside is impossible virtual bool null_inside() { return 0; } // used in row subselects to get value of elements @@ -2287,6 +2491,8 @@ public: { return this; } virtual Item *in_predicate_to_in_subs_transformer(THD *thd, uchar *arg) { return this; } + virtual Item *in_predicate_to_equality_transformer(THD *thd, uchar *arg) + { return this; } virtual Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg) { return this; } virtual Item *multiple_equality_transformer(THD *thd, uchar *arg) @@ -2407,17 +2613,7 @@ public: /* Return TRUE if the item points to a column of an outer-joined table. */ - virtual bool is_outer_field() const { DBUG_ASSERT(is_fixed()); return FALSE; } - - /** - Checks if this item or any of its descendents contains a subquery. - This is a replacement of the former Item::has_subquery() and - Item::with_subselect. - */ - virtual bool with_subquery() const { DBUG_ASSERT(is_fixed()); return false; } - - virtual bool with_sum_func() const { return false; } - virtual With_sum_func_cache* get_with_sum_func_cache() { return NULL; } + virtual bool is_outer_field() const { DBUG_ASSERT(fixed()); return FALSE; } Item* set_expr_cache(THD *thd); @@ -2429,7 +2625,7 @@ public: Item is attached. The number is an index is depth_first_tab() traversal order. */ - virtual void set_join_tab_idx(uint join_tab_idx_arg) + virtual void set_join_tab_idx(uint8 join_tab_idx_arg) { if (join_tab_idx_arg < join_tab_idx) join_tab_idx= join_tab_idx_arg; @@ -2474,17 +2670,17 @@ public: void register_in(THD *thd); bool depends_only_on(table_map view_map) - { return marker & FULL_EXTRACTION_FL; } + { return marker & MARKER_FULL_EXTRACTION; } int get_extraction_flag() - { return marker & EXTRACTION_MASK; } - void set_extraction_flag(int flags) - { - marker &= ~EXTRACTION_MASK; - marker|= flags; + { return marker & MARKER_EXTRACTION_MASK; } + void set_extraction_flag(int16 flags) + { + marker &= ~MARKER_EXTRACTION_MASK; + marker|= flags; } void clear_extraction_flag() { - marker &= ~EXTRACTION_MASK; + marker &= ~MARKER_EXTRACTION_MASK; } void check_pushable_cond(Pushdown_checker excl_dep_func, uchar *arg); bool pushable_cond_checker_for_derived(uchar *arg) @@ -2535,7 +2731,8 @@ public: DbugStringItemTypeValue(THD *thd, const Item *item) { append('('); - append(item->type_handler()->name().ptr()); + Name Item_name= item->type_handler()->name(); + append(Item_name.ptr(), Item_name.length()); append(')'); const_cast<Item*>(item)->print(this, QT_EXPLAIN); /* Append end \0 to allow usage of c_ptr() */ @@ -2543,63 +2740,7 @@ public: str_length--; } }; -#endif - -class With_sum_func_cache -{ -protected: - bool m_with_sum_func; // True if the owner item contains a sum func -public: - With_sum_func_cache() - :m_with_sum_func(false) - { } - With_sum_func_cache(const Item *a) - :m_with_sum_func(a->with_sum_func()) - { } - With_sum_func_cache(const Item *a, const Item *b) - :m_with_sum_func(a->with_sum_func() || b->with_sum_func()) - { } - With_sum_func_cache(const Item *a, const Item *b, const Item *c) - :m_with_sum_func(a->with_sum_func() || b->with_sum_func() || - c->with_sum_func()) - { } - With_sum_func_cache(const Item *a, const Item *b, const Item *c, - const Item *d) - :m_with_sum_func(a->with_sum_func() || b->with_sum_func() || - c->with_sum_func() || d->with_sum_func()) - { } - With_sum_func_cache(const Item *a, const Item *b, const Item *c, - const Item *d, const Item *e) - :m_with_sum_func(a->with_sum_func() || b->with_sum_func() || - c->with_sum_func() || d->with_sum_func() || - e->with_sum_func()) - { } - void set_with_sum_func() { m_with_sum_func= true; } - void reset_with_sum_func() { m_with_sum_func= false; } - void copy_with_sum_func(const Item *item) - { - m_with_sum_func= item->with_sum_func(); - } - void join_with_sum_func(const Item *item) - { - m_with_sum_func|= item->with_sum_func(); - } -}; - - -/* - This class is a replacement for the former member Item::with_subselect. - Determines if the descendant Item is a subselect or some of - its arguments is or contains a subselect. -*/ -class With_subquery_cache -{ -protected: - bool m_with_subquery; -public: - With_subquery_cache(): m_with_subquery(false) { } - void join(const Item *item) { m_with_subquery|= item->with_subquery(); } -}; +#endif /* DBUG_OFF */ /** @@ -2774,27 +2915,30 @@ class Item_string; class Item_fixed_hybrid: public Item { public: - bool fixed; // If item was fixed with fix_fields -public: - Item_fixed_hybrid(THD *thd): Item(thd), fixed(false) - { } + Item_fixed_hybrid(THD *thd): Item(thd) + { + base_flags&= ~item_base_t::FIXED; + } Item_fixed_hybrid(THD *thd, Item_fixed_hybrid *item) - :Item(thd, item), fixed(item->fixed) - { } - bool fix_fields(THD *thd, Item **ref) + :Item(thd, item) { - DBUG_ASSERT(!fixed); - fixed= true; + base_flags|= (item->base_flags & item_base_t::FIXED); + } + bool fix_fields(THD *thd, Item **ref) override + { + DBUG_ASSERT(!fixed()); + base_flags|= item_base_t::FIXED; return false; } - void cleanup() + void cleanup() override { Item::cleanup(); - fixed= false; + base_flags&= ~item_base_t::FIXED; } - void quick_fix_field() { fixed= true; } - void unfix_fields() { fixed= false; } - bool is_fixed() const { return fixed; } + void quick_fix_field() override + { base_flags|= item_base_t::FIXED; } + void unfix_fields() override + { base_flags&= ~item_base_t::FIXED; } }; @@ -2843,6 +2987,7 @@ protected: fix_charset_and_length(str.charset(), dv, Metadata(&str)); } Item_basic_value(THD *thd): Item(thd) {} + Item_basic_value(): Item() {} public: Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, @@ -2872,6 +3017,7 @@ class Item_basic_constant :public Item_basic_value { public: Item_basic_constant(THD *thd): Item_basic_value(thd) {}; + Item_basic_constant(): Item_basic_value() {}; bool check_vcol_func_processor(void *arg) { return false; } const Item_const *get_item_const() const { return this; } virtual Item_basic_constant *make_string_literal_concat(THD *thd, @@ -2917,7 +3063,6 @@ public: public: bool fix_fields(THD *thd, Item **) override= 0; - double val_real() override; longlong val_int() override; String *val_str(String *sp) override; @@ -2928,9 +3073,7 @@ public: public: void make_send_field(THD *thd, Send_field *field) override; - bool const_item() const override { return true; } - Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, const Tmp_field_param *param) override @@ -3232,7 +3375,7 @@ public: } int save_in_field(Field *field, bool no_conversions) override { - return value_item->save_in_field(field, no_conversions); + return value_item->save_in_field(field, no_conversions); } bool send(Protocol *protocol, st_value *buffer) override @@ -3253,10 +3396,14 @@ class Item_literal: public Item_basic_constant public: Item_literal(THD *thd): Item_basic_constant(thd) { } + Item_literal(): Item_basic_constant() + {} Type type() const override { return CONST_ITEM; } - bool check_partition_func_processor(void *) override { return false;} + bool check_partition_func_processor(void *int_arg) override { return false;} bool const_item() const override { return true; } bool basic_const_item() const override { return true; } + bool is_expensive() override { return false; } + bool cleanup_is_expensive_cache_processor(void *arg) override { return 0; } }; @@ -3264,6 +3411,7 @@ class Item_num: public Item_literal { public: Item_num(THD *thd): Item_literal(thd) { collation= DTCollation_numeric(); } + Item_num(): Item_literal() { collation= DTCollation_numeric(); } Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override; bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { @@ -3271,7 +3419,7 @@ public: } }; -#define NO_CACHED_FIELD_INDEX ((uint)(-1)) +#define NO_CACHED_FIELD_INDEX ((field_index_t) ~0U) class st_select_lex; @@ -3295,7 +3443,7 @@ public: Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, const Tmp_field_param *param) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); const Type_handler *h= type_handler()->type_handler_for_tmp_table(this); return create_tmp_field_ex_from_handler(root, table, src, param, h); } @@ -3337,17 +3485,6 @@ public: LEX_CSTRING table_name; LEX_CSTRING field_name; /* - NOTE: came from TABLE::alias_name_used and this is only a hint! - See comment for TABLE::alias_name_used. - */ - bool alias_name_used; /* true if item was resolved against alias */ - /* - Cached value of index for this field in table->field array, used by prep. - stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX - if index value is not known. - */ - uint cached_field_index; - /* Cached pointer to table which contains this field, used for the same reason by prep. stmt. too in case then we have not-fully qualified field. 0 - means no cached value. @@ -3355,6 +3492,12 @@ public: TABLE_LIST *cached_table; st_select_lex *depended_from; /* + Cached value of index for this field in table->field array, used by prepared + stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX + if index value is not known. + */ + field_index_t cached_field_index; + /* Some Items resolved in another select should not be marked as dependency of the subquery where they are. During normal name resolution, we check this. Stored procedures and prepared statements first try to resolve an @@ -3367,12 +3510,18 @@ public: this variable. */ bool can_be_depended; + /* + NOTE: came from TABLE::alias_name_used and this is only a hint! + See comment for TABLE::alias_name_used. + */ + bool alias_name_used; /* true if item was resolved against alias */ + Item_ident(THD *thd, Name_resolution_context *context_arg, const LEX_CSTRING &db_name_arg, const LEX_CSTRING &table_name_arg, const LEX_CSTRING &field_name_arg); Item_ident(THD *thd, Item_ident *item); Item_ident(THD *thd, TABLE_LIST *view_arg, const LEX_CSTRING &field_name_arg); - const char *full_name() const override; + LEX_CSTRING full_name_cstring() const override; void cleanup() override; st_select_lex *get_depended_from() const; bool remove_dependence_processor(void * arg) override; @@ -3625,7 +3774,7 @@ public: { return get_item_copy<Item_field>(thd, this); } bool is_outer_field() const override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return field->table->pos_in_table_list->outer_join; } bool check_index_dependence(void *arg) override; @@ -3713,7 +3862,8 @@ public: Item_null(THD *thd, const char *name_par=0, CHARSET_INFO *cs= &my_charset_bin): Item_basic_constant(thd) { - maybe_null= null_value= TRUE; + set_maybe_null(); + null_value= TRUE; max_length= 0; name.str= name_par ? name_par : "NULL"; name.length= strlen(name.str); @@ -3740,7 +3890,7 @@ public: void print(String *str, enum_query_type) override { - str->append(STRING_WITH_LEN("NULL")); + str->append(NULL_clex_str); } Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override; @@ -3774,7 +3924,7 @@ public: { save_in_field(result_field, no_conversions); } - bool check_partition_func_processor(void *) override { return true; } + bool check_partition_func_processor(void *int_arg) override { return true; } bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(full_name(), arg, VCOL_IMPOSSIBLE); @@ -4166,6 +4316,7 @@ public: return state == SHORT_DATA_VALUE && value.type_handler()->cmp_type() == INT_RESULT; } + bool is_stored_routine_parameter() const override { return true; } /* This method is used to make a copy of a basic constant item when propagating constants in the optimizer. The reason to create a new @@ -4188,23 +4339,18 @@ public: bool append_for_log(THD *thd, String *str) override; bool check_vcol_func_processor(void *) override { return false; } Item *get_copy(THD *) override { return nullptr; } - bool add_as_clone(THD *thd); void sync_clones(); bool register_clone(Item_param *i) { return m_clones.push_back(i); } private: void invalid_default_param() const; - bool set_value(THD *thd, sp_rcontext *ctx, Item **it) override; - void set_out_param_info(Send_field *info) override; public: const Send_field *get_out_param_info() const override; - Item_param *get_item_param() override { return this; } - void make_send_field(THD *thd, Send_field *field) override; private: @@ -4245,6 +4391,13 @@ public: name.str= str_arg; name.length= safe_strlen(name.str); unsigned_flag= flag; } + Item_int(const char *str_arg,longlong i,size_t length): + Item_num(), value(i) + { + max_length=(uint32)length; + name.str= str_arg; name.length= safe_strlen(name.str); + unsigned_flag= 1; + } Item_int(THD *thd, const char *str_arg, size_t length=64); const Type_handler *type_handler() const override { return type_handler_long_or_longlong(); } @@ -4261,8 +4414,8 @@ public: Item *clone_item(THD *thd) override; void print(String *str, enum_query_type query_type) override; Item *neg(THD *thd) override; - uint decimal_precision() const override - { return (uint) (max_length - MY_TEST(value < 0)); } + decimal_digits_t decimal_precision() const override + { return (decimal_digits_t) (max_length - MY_TEST(value < 0)); } Item *get_copy(THD *thd) override { return get_item_copy<Item_int>(thd, this); } }; @@ -4279,6 +4432,8 @@ public: Item_bool(THD *thd, const char *str_arg, longlong i): Item_int(thd, str_arg, i, 1) {} Item_bool(THD *thd, bool i) :Item_int(thd, (longlong) i, 1) { } + Item_bool(const char *str_arg, longlong i): + Item_int(str_arg, i, 1) {} bool is_bool_literal() const override { return true; } Item *neg_transformer(THD *thd) override; const Type_handler *type_handler() const override @@ -4295,17 +4450,30 @@ public: }; +class Item_bool_static :public Item_bool +{ +public: + Item_bool_static(const char *str_arg, longlong i): + Item_bool(str_arg, i) {}; + + void set_join_tab_idx(uint8 join_tab_idx_arg) override + { DBUG_ASSERT(0); } +}; + +extern const Item_bool_static Item_false, Item_true; + class Item_uint :public Item_int { public: Item_uint(THD *thd, const char *str_arg, size_t length); Item_uint(THD *thd, ulonglong i): Item_int(thd, i, 10) {} Item_uint(THD *thd, const char *str_arg, longlong i, uint length); - double val_real() { return ulonglong2double((ulonglong)value); } - Item *clone_item(THD *thd); - Item *neg(THD *thd); - uint decimal_precision() const { return max_length; } - Item *get_copy(THD *thd) + double val_real() override { return ulonglong2double((ulonglong)value); } + Item *clone_item(THD *thd) override; + Item *neg(THD *thd) override; + decimal_digits_t decimal_precision() const override + { return decimal_digits_t(max_length); } + Item *get_copy(THD *thd) override { return get_item_copy<Item_uint>(thd, this); } }; @@ -4347,9 +4515,12 @@ public: { return &type_handler_newdecimal; } longlong val_int() override { return decimal_value.to_longlong(unsigned_flag); } - double val_real() override { return decimal_value.to_double(); } - String *val_str(String *to) override { return decimal_value.to_string(to); } - my_decimal *val_decimal(my_decimal *val) override { return &decimal_value; } + double val_real() override + { return decimal_value.to_double(); } + String *val_str(String *to) override + { return decimal_value.to_string(to); } + my_decimal *val_decimal(my_decimal *val) override + { return &decimal_value; } const my_decimal *const_ptr_my_decimal() const override { return &decimal_value; } int save_in_field(Field *field, bool no_conversions) override; @@ -4360,7 +4531,8 @@ public: str->append(str_value); } Item *neg(THD *thd) override; - uint decimal_precision() const override { return decimal_value.precision(); } + decimal_digits_t decimal_precision() const override + { return decimal_value.precision(); } void set_decimal_value(my_decimal *value_par); Item *get_copy(THD *thd) override { return get_item_copy<Item_decimal>(thd, this); } @@ -4421,12 +4593,10 @@ public: uint decimal_par, uint length): Item_float(thd, NullS, val_arg, decimal_par, length), func_name(str) {} - void print(String *str, enum_query_type) override { - str->append(func_name); + str->append(func_name, strlen(func_name)); } - Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override { return const_charset_converter(thd, tocs, true, func_name); @@ -4567,8 +4737,7 @@ public: { return Item::check_well_formed_result(&str_value, send_error); } Item_basic_constant *make_string_literal_concat(THD *thd, - const LEX_CSTRING *) - override; + const LEX_CSTRING *) override; Item *make_odbc_literal(THD *thd, const LEX_CSTRING *typestr) override; Item *get_copy(THD *thd) override @@ -4706,7 +4875,7 @@ public: { unsigned_flag=1; } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { const Type_handler *h= Type_handler::get_handler_by_field_type(int_field_type); @@ -4732,14 +4901,15 @@ public: { hex_string_init(thd, str, str_length); } - const Type_handler *type_handler() const { return &type_handler_varchar; } - virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) + const Type_handler *type_handler() const override + { return &type_handler_varchar; } + Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override { return const_charset_converter(thd, tocs, true); } - const String *const_ptr_string() const { return &str_value; } - String *val_str(String*) { return &str_value; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + const String *const_ptr_string() const override { return &str_value; } + String *val_str(String*) override { return &str_value; } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } @@ -4759,9 +4929,9 @@ public: Item_hex_constant(thd, str, str_length) {} const Type_handler *type_handler() const override { return &type_handler_hex_hybrid; } - uint decimal_precision() const override; + decimal_digits_t decimal_precision() const override; double val_real() override - { + { return (double) (ulonglong) Item_hex_hybrid::val_int(); } longlong val_int() override @@ -4890,7 +5060,7 @@ public: collation= DTCollation_numeric(); decimals= 0; } - Item_temporal_literal(THD *thd, uint dec_arg): + Item_temporal_literal(THD *thd, decimal_digits_t dec_arg): Item_literal(thd) { collation= DTCollation_numeric(); @@ -4911,8 +5081,8 @@ protected: Date cached_time; bool update_null() { - return maybe_null && - (null_value= cached_time.check_date_with_warn(current_thd)); + return (maybe_null() && + (null_value= cached_time.check_date_with_warn(current_thd))); } public: Item_date_literal(THD *thd, const Date *ltime) @@ -4934,7 +5104,8 @@ public: will be checked per row, according to the execution time sql_mode. The check_date() below call should cover all cases mentioned. */ - maybe_null= cached_time.check_date(TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE); + set_maybe_null(cached_time.check_date(TIME_NO_ZERO_DATE | + TIME_NO_ZERO_IN_DATE)); } const Type_handler *type_handler() const override { return &type_handler_newdate; } @@ -4973,12 +5144,12 @@ public: /** TIME'10:10:10' */ -class Item_time_literal: public Item_temporal_literal +class Item_time_literal final: public Item_temporal_literal { protected: Time cached_time; public: - Item_time_literal(THD *thd, const Time *ltime, uint dec_arg): + Item_time_literal(THD *thd, const Time *ltime, decimal_digits_t dec_arg): Item_temporal_literal(thd, dec_arg), cached_time(*ltime) { @@ -5016,24 +5187,27 @@ public: /** TIMESTAMP'2001-01-01 10:20:30' */ + class Item_datetime_literal: public Item_temporal_literal { protected: Datetime cached_time; bool update_null() { - return maybe_null && - (null_value= cached_time.check_date_with_warn(current_thd)); + return (maybe_null() && + (null_value= cached_time.check_date_with_warn(current_thd))); } public: - Item_datetime_literal(THD *thd, const Datetime *ltime, uint dec_arg): + Item_datetime_literal(THD *thd, const Datetime *ltime, + decimal_digits_t dec_arg): Item_temporal_literal(thd, dec_arg), cached_time(*ltime) { DBUG_ASSERT(cached_time.is_valid_datetime()); max_length= MAX_DATETIME_WIDTH + (decimals ? decimals + 1 : 0); // See the comment on maybe_null in Item_date_literal - maybe_null= cached_time.check_date(TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE); + set_maybe_null(cached_time.check_date(TIME_NO_ZERO_DATE | + TIME_NO_ZERO_IN_DATE)); } const Type_handler *type_handler() const override { return &type_handler_datetime2; } @@ -5080,7 +5254,7 @@ class Item_date_literal_for_invalid_dates: public Item_date_literal WHERE date_column='2001-01-01' ... -> WHERE date_column=DATE'2001-01-01' ... - This is done to make the eqial field propagation code handle mixtures of + This is done to make the equal field propagation code handle mixtures of different temporal types in the same expressions easier (MDEV-8706), e.g. WHERE LENGTH(date_column)=10 AND date_column=TIME'00:00:00' @@ -5101,7 +5275,7 @@ public: Item_date_literal_for_invalid_dates(THD *thd, const Date *ltime) :Item_date_literal(thd, ltime) { - maybe_null= false; + base_flags&= ~item_base_t::MAYBE_NULL; } bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { @@ -5115,14 +5289,15 @@ public: An error-safe counterpart for Item_datetime_literal (see Item_date_literal_for_invalid_dates for comments) */ -class Item_datetime_literal_for_invalid_dates: public Item_datetime_literal +class Item_datetime_literal_for_invalid_dates final: public Item_datetime_literal { public: Item_datetime_literal_for_invalid_dates(THD *thd, - const Datetime *ltime, uint dec_arg) + const Datetime *ltime, + decimal_digits_t dec_arg) :Item_datetime_literal(thd, ltime, dec_arg) { - maybe_null= false; + base_flags&= ~item_base_t::MAYBE_NULL; } bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { @@ -5199,14 +5374,13 @@ public: */ class Item_func_or_sum: public Item_result_field, public Item_args, - public Used_tables_and_const_cache, - public With_subquery_cache + public Used_tables_and_const_cache { protected: bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, uint flags, int item_sep) { - return Type_std_attributes::agg_arg_charsets(c, func_name(), + return Type_std_attributes::agg_arg_charsets(c, func_name_cstring(), items, nitems, flags, item_sep); } @@ -5215,7 +5389,7 @@ protected: int item_sep= 1) { return Type_std_attributes:: - agg_arg_charsets_for_string_result(c, func_name(), + agg_arg_charsets_for_string_result(c, func_name_cstring(), items, nitems, item_sep); } bool agg_arg_charsets_for_string_result_with_comparison(DTCollation &c, @@ -5224,7 +5398,7 @@ protected: int item_sep= 1) { return Type_std_attributes:: - agg_arg_charsets_for_string_result_with_comparison(c, func_name(), + agg_arg_charsets_for_string_result_with_comparison(c, func_name_cstring(), items, nitems, item_sep); } @@ -5239,7 +5413,7 @@ protected: int item_sep= 1) { return Type_std_attributes:: - agg_arg_charsets_for_comparison(c, func_name(), items, nitems, item_sep); + agg_arg_charsets_for_comparison(c, func_name_cstring(), items, nitems, item_sep); } public: @@ -5251,16 +5425,16 @@ public: tmp.derivation == DERIVATION_NONE) { my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - (*a)->collation.collation->name, + (*a)->collation.collation->coll_name.str, (*a)->collation.derivation_name(), - (*b)->collation.collation->name, + (*b)->collation.collation->coll_name.str, (*b)->collation.derivation_name(), func_name()); return true; } - if (agg_item_set_converter(tmp, func_name(), + if (agg_item_set_converter(tmp, func_name_cstring(), a, 1, MY_COLL_CMP_CONV, 1) || - agg_item_set_converter(tmp, func_name(), + agg_item_set_converter(tmp, func_name_cstring(), b, 1, MY_COLL_CMP_CONV, 1)) return true; *cs= tmp.collation; @@ -5283,8 +5457,6 @@ public: Used_tables_and_const_cache(item) { } Item_func_or_sum(THD *thd, List<Item> &list): Item_result_field(thd), Item_args(thd, list) { } - bool with_subquery() const override - { DBUG_ASSERT(fixed); return m_with_subquery; } bool walk(Item_processor processor, bool walk_subquery, void *arg) override { if (walk_args(processor, walk_subquery, arg)) @@ -5302,12 +5474,15 @@ public: instead. Added here, to the parent class of both Item_func and Item_sum. - NOTE: for Items inherited from Item_sum, func_name() return part of - function name till first argument (including '(') to make difference in - names for functions with 'distinct' clause and without 'distinct' and - also to make printing of items inherited from Item_sum uniform. + NOTE: for Items inherited from Item_sum, func_name() and + func_name_cstring() returns part of function name till first + argument (including '(') to make difference in names for functions + with 'distinct' clause and without 'distinct' and also to make + printing of items inherited from Item_sum uniform. */ - virtual const char *func_name() const= 0; + inline const char *func_name() const + { return (char*) func_name_cstring().str; } + virtual LEX_CSTRING func_name_cstring() const= 0; virtual bool fix_length_and_dec()= 0; bool const_item() const override { return const_item_cache; } table_map used_tables() const override { return used_tables_cache; } @@ -5342,7 +5517,7 @@ public: Field *sp_result_field; Item_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name_arg); Item_sp(THD *thd, Item_sp *item); - const char *func_name(THD *thd, bool is_package_function) const; + LEX_CSTRING func_name_cstring(THD *thd, bool is_package_function) const; void cleanup(); bool sp_check_access(THD *thd); bool execute(THD *thd, bool *null_value, Item **args, uint arg_count); @@ -5356,8 +5531,7 @@ public: } }; -class Item_ref :public Item_ident, - protected With_sum_func_cache +class Item_ref :public Item_ident { protected: void set_properties(); @@ -5397,10 +5571,9 @@ public: /* Constructor need to process subselect with temporary tables (see Item) */ Item_ref(THD *thd, Item_ref *item) - :Item_ident(thd, item), With_sum_func_cache(*item), - set_properties_only(0), ref(item->ref) {} - Type type() const override { return REF_ITEM; } - Type real_type() const override + :Item_ident(thd, item), set_properties_only(0), ref(item->ref) {} + enum Type type() const override { return REF_ITEM; } + enum Type real_type() const override { return ref ? (*ref)->type() : REF_ITEM; } bool eq(const Item *item, bool binary_cmp) const override { @@ -5464,7 +5637,10 @@ public: return Item_ident::build_equal_items(thd, inherited, link_item_fields, cond_equal_ref); } - bool const_item() const override { return (*ref)->const_item(); } + bool const_item() const override + { + return (*ref)->const_item(); + } table_map not_null_tables() const override { return depended_from ? 0 : (*ref)->not_null_tables(); @@ -5477,7 +5653,10 @@ public: { (*ref)->save_in_field(result_field, no_conversions); } - Item *real_item() override { return ref ? (*ref)->real_item() : this; } + Item *real_item() override + { + return ref ? (*ref)->real_item() : this; + } const TYPELIB *get_typelib() const override { return ref ? (*ref)->get_typelib() : NULL; @@ -5511,7 +5690,7 @@ public: } void cleanup() override; Item_field *field_for_view_update() override - { return (*ref)->field_for_view_update(); } + { return (*ref)->field_for_view_update(); } Load_data_outvar *get_load_data_outvar() override { return (*ref)->get_load_data_outvar(); @@ -5553,17 +5732,14 @@ public: { return ref && (*ref)->basic_const_item(); } bool is_outer_field() const override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(ref); return (*ref)->is_outer_field(); } - Item* build_clone(THD *thd) override; - /** Checks if the item tree that ref points to contains a subquery. */ - bool with_subquery() const override { return (*ref)->with_subquery(); } Item *get_copy(THD *thd) override { return get_item_copy<Item_ref>(thd, this); } bool excl_dep_on_table(table_map tab_map) override @@ -5593,8 +5769,6 @@ public: return 0; return cleanup_processor(arg); } - bool with_sum_func() const override { return m_with_sum_func; } - With_sum_func_cache* get_with_sum_func_cache() override { return this; } Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg) override { return (*ref)->field_transformer_for_having_pushdown(thd, arg); } Item *remove_item_direct_ref() override @@ -5651,6 +5825,12 @@ public: { return get_item_copy<Item_direct_ref>(thd, this); } Item *remove_item_direct_ref() override { return (*ref)->remove_item_direct_ref(); } + + /* Should be called if ref is changed */ + inline void ref_changed() + { + set_properties(); + } }; @@ -5682,7 +5862,6 @@ public: void print(String *str, enum_query_type query_type) override { ident->print(str, query_type); } - }; @@ -5694,9 +5873,7 @@ class Expression_cache_tracker; The objects of this class can store its values in an expression cache. */ -class Item_cache_wrapper :public Item_result_field, - public With_subquery_cache, - protected With_sum_func_cache +class Item_cache_wrapper :public Item_result_field { private: /* Pointer on the cached expression */ @@ -5723,17 +5900,10 @@ public: Type type() const override { return EXPR_CACHE_ITEM; } Type real_type() const override { return orig_item->type(); } - bool with_subquery() const override - { DBUG_ASSERT(fixed); return m_with_subquery; } - bool with_sum_func() const override { return m_with_sum_func; } - With_sum_func_cache* get_with_sum_func_cache() override { return this; } - bool set_cache(THD *thd); Expression_cache_tracker* init_tracker(MEM_ROOT *mem_root); - bool fix_fields(THD *thd, Item **it) override; void cleanup() override; - Item *get_orig_item() const { return orig_item; } /* Methods of getting value which should be cached in the cache */ @@ -5757,7 +5927,8 @@ public: /* Following methods make this item transparent as much as possible */ void print(String *str, enum_query_type query_type) override; - const char *full_name() const override { return orig_item->full_name(); } + LEX_CSTRING full_name_cstring() const override + { return orig_item->full_name_cstring(); } void make_send_field(THD *thd, Send_field *field) override { orig_item->make_send_field(thd, field); } bool eq(const Item *item, bool binary_cmp) const override @@ -5773,8 +5944,12 @@ public: int save_in_field(Field *to, bool no_conversions) override; const Type_handler *type_handler() const override { return orig_item->type_handler(); } - table_map used_tables() const override { return orig_item->used_tables(); } - void update_used_tables() override { orig_item->update_used_tables(); } + table_map used_tables() const override + { return orig_item->used_tables(); } + void update_used_tables() override + { + orig_item->update_used_tables(); + } bool const_item() const override { return orig_item->const_item(); } table_map not_null_tables() const override { return orig_item->not_null_tables(); } @@ -5861,7 +6036,7 @@ public: item_equal(0), view(view_arg), null_ref_table(NULL) { - if (fixed) + if (fixed()) set_null_ref_table(); } @@ -6067,7 +6242,8 @@ public: { ref= &outer_ref; set_properties(); - fixed= 0; /* reset flag set in set_properties() */ + /* reset flag set in set_properties() */ + base_flags&= ~item_base_t::FIXED; } Item_outer_ref(THD *thd, Name_resolution_context *context_arg, Item **item, const LEX_CSTRING &table_name_arg, LEX_CSTRING &field_name_arg, @@ -6210,11 +6386,12 @@ protected: stores metadata information about the original class as well as a pointer to it. */ - Item_copy(THD *thd, Item *i): Item(thd) + Item_copy(THD *thd, Item *org): Item(thd) { - DBUG_ASSERT(i->is_fixed()); - item= i; - null_value=maybe_null=item->maybe_null; + DBUG_ASSERT(org->fixed()); + item= org; + null_value= item->maybe_null(); + copy_flags(item, item_base_t::MAYBE_NULL); Type_std_attributes::set(item); name= item->name; set_handler(item->type_handler()); @@ -6258,7 +6435,6 @@ public: Override the methods below as pure virtual to make sure all the sub-classes implement them. */ - String *val_str(String*) override = 0; my_decimal *val_decimal(my_decimal *) override = 0; double val_real() override = 0; @@ -6534,13 +6710,11 @@ public: bool check_field_expression_processor(void *arg) override; bool check_func_default_processor(void *) override { return true; } bool register_field_in_read_map(void *arg) override; - bool walk(Item_processor processor, bool walk_subquery, void *args) override { return (arg && arg->walk(processor, walk_subquery, args)) || (this->*processor)(args); } - Item *transform(THD *thd, Item_transformer transformer, uchar *args) override; Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, @@ -6591,7 +6765,7 @@ public: bool get_date(THD *, MYSQL_TIME *, date_mode_t) override { DBUG_ASSERT(0); // never should be called - return null_value= true; + return (null_value= true); } bool send(Protocol *, st_value *) override { @@ -6731,25 +6905,50 @@ class Table_triggers_list; class Item_trigger_field : public Item_field, private Settable_routine_parameter { +private: + GRANT_INFO *table_grants; public: - /* Is this item represents row from NEW or OLD row ? */ - enum row_version_type {OLD_ROW, NEW_ROW}; - row_version_type row_version; /* Next in list of all Item_trigger_field's in trigger */ Item_trigger_field *next_trg_field; - /* Index of the field in the TABLE::field array */ - uint field_idx; /* Pointer to Table_trigger_list object for table of this trigger */ Table_triggers_list *triggers; + /* Is this item represents row from NEW or OLD row ? */ + enum __attribute__((packed)) row_version_type {OLD_ROW, NEW_ROW}; + row_version_type row_version; + /* Index of the field in the TABLE::field array */ + field_index_t field_idx; - Item_trigger_field(THD *thd, Name_resolution_context *context_arg, +private: + /* + Trigger field is read-only unless it belongs to the NEW row in a + BEFORE INSERT of BEFORE UPDATE trigger. + */ + bool read_only; + + /* + 'want_privilege' holds privileges required to perform operation on + this trigger field (SELECT_ACL if we are going to read it and + UPDATE_ACL if we are going to update it). It is initialized at + parse time but can be updated later if this trigger field is used + as OUT or INOUT parameter of stored routine (in this case + set_required_privilege() is called to appropriately update + want_privilege and cleanup() is responsible for restoring of + original want_privilege once parameter's value is updated). + */ + privilege_t original_privilege; + privilege_t want_privilege; +public: + +Item_trigger_field(THD *thd, Name_resolution_context *context_arg, row_version_type row_ver_arg, const LEX_CSTRING &field_name_arg, privilege_t priv, const bool ro) :Item_field(thd, context_arg, field_name_arg), - row_version(row_ver_arg), field_idx((uint)-1), original_privilege(priv), - want_privilege(priv), table_grants(NULL), read_only (ro) - {} + table_grants(NULL), next_trg_field(NULL), triggers(NULL), + row_version(row_ver_arg), field_idx(NO_CACHED_FIELD_INDEX), + read_only (ro), original_privilege(priv), want_privilege(priv) + { + } void setup_field(THD *thd, TABLE *table, GRANT_INFO *table_grant_info); Type type() const override { return TRIGGER_FIELD_ITEM; } bool eq(const Item *item, bool binary_cmp) const override; @@ -6776,25 +6975,6 @@ public: return set_value(thd, NULL, it); } -private: - /* - 'want_privilege' holds privileges required to perform operation on - this trigger field (SELECT_ACL if we are going to read it and - UPDATE_ACL if we are going to update it). It is initialized at - parse time but can be updated later if this trigger field is used - as OUT or INOUT parameter of stored routine (in this case - set_required_privilege() is called to appropriately update - want_privilege and cleanup() is responsible for restoring of - original want_privilege once parameter's value is updated). - */ - privilege_t original_privilege; - privilege_t want_privilege; - GRANT_INFO *table_grants; - /* - Trigger field is read-only unless it belongs to the NEW row in a - BEFORE INSERT of BEFORE UPDATE trigger. - */ - bool read_only; public: bool unknown_splocal_processor(void *) override { return false; } bool check_vcol_func_processor(void *arg) override; @@ -6845,10 +7025,10 @@ public: value_cached(0), used_table_map(0) { - maybe_null= 1; + set_maybe_null(); null_value= 1; null_value_inside= true; - fixed= 1; + quick_fix_field(); } protected: Item_cache(THD *thd, const Type_handler *handler): @@ -6858,10 +7038,10 @@ protected: value_cached(0), used_table_map(0) { - maybe_null= 1; + set_maybe_null(); null_value= 1; null_value_inside= true; - fixed= 1; + quick_fix_field(); } public: @@ -6890,7 +7070,7 @@ public: virtual void keep_array() {} void print(String *str, enum_query_type query_type) override; bool eq_def(const Field *field) - { + { return cached_field ? cached_field->eq_def (field) : FALSE; } bool eq(const Item *item, bool binary_cmp) const override @@ -6901,7 +7081,8 @@ public: { if (example) { - Item::vcol_func_processor_result *res= (Item::vcol_func_processor_result*)arg; + Item::vcol_func_processor_result *res= + (Item::vcol_func_processor_result*) arg; example->check_vcol_func_processor(arg); /* Item_cache of a non-deterministic function requires re-fixing @@ -6915,8 +7096,8 @@ public: } bool fix_fields(THD *thd, Item **ref) override { - fixed= 1; - if (example && !example->is_fixed()) + quick_fix_field(); + if (example && !example->fixed()) return example->fix_fields(thd, ref); return 0; } @@ -7220,8 +7401,8 @@ public: Item_cache_double(THD *thd) :Item_cache_real(thd, &type_handler_double) { } - String* val_str(String *str); - Item *get_copy(THD *thd) + String *val_str(String *str) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_cache_double>(thd, this); } }; @@ -7232,8 +7413,8 @@ public: Item_cache_float(THD *thd) :Item_cache_real(thd, &type_handler_float) { } - String* val_str(String *str); - Item *get_copy(THD *thd) + String *val_str(String *str) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_cache_float>(thd, this); } }; @@ -7297,7 +7478,7 @@ public: Item_cache_str_for_nullif(THD *thd, const Item *item) :Item_cache_str(thd, item) { } - Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) + Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override { /** Item_cache_str::safe_charset_converter() returns a new Item_cache @@ -7311,7 +7492,7 @@ public: */ return Item::safe_charset_converter(thd, tocs); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_cache_str_for_nullif>(thd, this); } }; @@ -7399,33 +7580,21 @@ public: Item_type_holder do not need cleanup() because its time of live limited by single SP/PS execution. */ -class Item_type_holder: public Item, - public Type_handler_hybrid_field_type +class Item_type_holder: public Item, public Type_handler_hybrid_field_type { protected: const TYPELIB *enum_set_typelib; public: - Item_type_holder(THD *thd, Item *item) - :Item(thd, item), - Type_handler_hybrid_field_type(item->real_type_handler()), - enum_set_typelib(0) - { - DBUG_ASSERT(item->is_fixed()); - maybe_null= item->maybe_null; - } - Item_type_holder(THD *thd, - Item *item, - const Type_handler *handler, - const Type_all_attributes *attr, - bool maybe_null_arg) - :Item(thd), - Type_handler_hybrid_field_type(handler), + Item_type_holder(THD *thd, Item *item, const Type_handler *handler, + const Type_all_attributes *attr, bool maybe_null_arg) + :Item(thd), Type_handler_hybrid_field_type(handler), enum_set_typelib(attr->get_typelib()) { name= item->name; Type_std_attributes::set(*attr); - maybe_null= maybe_null_arg; - common_flags= item->common_flags; + set_maybe_null(maybe_null_arg); + copy_flags(item, item_base_t::IS_EXPLICIT_NAME | + item_base_t::IS_IN_WITH_CYCLE); } const Type_handler *type_handler() const override @@ -7466,11 +7635,10 @@ public: const Tmp_field_param *param) override { return Item_type_holder::real_type_handler()-> - make_and_init_table_field(root, &name, Record_addr(maybe_null), - *this, table); + make_and_init_table_field(root, &name, Record_addr(maybe_null()), + *this, table); } Item* get_copy(THD *) override { return nullptr; } - }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 63cf9c70e88..3e1f6d96803 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -35,6 +35,7 @@ #define PCRE2_STATIC 1 /* Important on Windows */ #include "pcre2.h" /* pcre2 header file */ +#include "my_json_writer.h" /* Compare row signature of two expressions @@ -90,11 +91,11 @@ static int cmp_row_type(Item* item1, Item* item2) @retval false otherwise */ -bool -Type_handler_hybrid_field_type::aggregate_for_comparison(const char *funcname, - Item **items, - uint nitems, - bool int_uint_as_dec) +bool Type_handler_hybrid_field_type:: +aggregate_for_comparison(const LEX_CSTRING &funcname, + Item **items, + uint nitems, + bool int_uint_as_dec) { uint unsigned_count= items[0]->unsigned_flag; /* @@ -120,7 +121,7 @@ Type_handler_hybrid_field_type::aggregate_for_comparison(const char *funcname, i == 1 ? items[0]->type_handler()->name().ptr() : type_handler()->name().ptr(), items[i]->type_handler()->name().ptr(), - funcname); + funcname.str); return true; } /* @@ -198,7 +199,7 @@ static uint collect_cmp_types(Item **items, uint nitems, bool skip_nulls= FALSE) longlong Item_func_not::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); bool value= args[0]->val_bool(); null_value=args[0]->null_value; return ((!null_value && value == 0) ? 1 : 0); @@ -217,7 +218,7 @@ void Item_func_not::print(String *str, enum_query_type query_type) longlong Item_func_not_all::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); bool value= args[0]->val_bool(); /* @@ -258,7 +259,7 @@ void Item_func_not_all::print(String *str, enum_query_type query_type) longlong Item_func_nop_all::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong value= args[0]->val_int(); /* @@ -328,33 +329,29 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, the optimizer might conclude that the query doesn't need to do grouping at all. */ - if ((*item)->const_item() && !(*item)->is_expensive() && + if ((*item)->can_eval_in_optimize() && !(*item)->with_sum_func()) { TABLE *table= field->table; - Sql_mode_save sql_mode(thd); - Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); MY_BITMAP *old_maps[2] = { NULL, NULL }; ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */ + bool save_field_value; /* table->read_set may not be set if we come here from a CREATE TABLE */ if (table && table->read_set) dbug_tmp_use_all_columns(table, old_maps, &table->read_set, &table->write_set); - /* For comparison purposes allow invalid dates like 2000-01-32 */ - thd->variables.sql_mode= (thd->variables.sql_mode & ~MODE_NO_ZERO_DATE) | - MODE_INVALID_DATES; /* Store the value of the field/constant because the call to save_in_field below overrides that value. Don't save field value if no data has been read yet. */ - bool save_field_value= (field_item->const_item() || - !(field->table->status & STATUS_NO_RECORD)); + save_field_value= (field_item->const_item() || + !(field->table->status & STATUS_NO_RECORD)); if (save_field_value) orig_field_val= field->val_int(); - if (!(*item)->save_in_field(field, 1) && !field->is_null()) + if (!(*item)->save_in_field_no_warnings(field, 1) && !field->is_null()) { int field_cmp= 0; // If item is a decimal value, we must reject it if it was truncated. @@ -366,8 +363,9 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, if (0 == field_cmp) { - Item *tmp= new (thd->mem_root) Item_int_with_ref(thd, field->val_int(), *item, - MY_TEST(field->flags & UNSIGNED_FLAG)); + Item *tmp= (new (thd->mem_root) + Item_int_with_ref(thd, field->val_int(), *item, + MY_TEST(field->flags & UNSIGNED_FLAG))); if (tmp) thd->change_item_tree(item, tmp); result= 1; // Item was replaced @@ -429,7 +427,7 @@ bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp) DBUG_ASSERT(functype() != LIKE_FUNC); convert_const_compared_to_int_field(thd); - return cmp->set_cmp_func(this, &args[0], &args[1], true); + return cmp->set_cmp_func(thd, this, &args[0], &args[1], true); } @@ -477,7 +475,7 @@ bool Item_bool_rowready_func2::fix_length_and_dec() items, holding the cached converted value of the original (constant) item. */ -int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, +int Arg_comparator::set_cmp_func(THD *thd, Item_func_or_sum *owner_arg, Item **a1, Item **a2) { owner= owner_arg; @@ -486,17 +484,18 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, b= a2; Item *tmp_args[2]= {*a1, *a2}; Type_handler_hybrid_field_type tmp; - if (tmp.aggregate_for_comparison(owner_arg->func_name(), tmp_args, 2, false)) + if (tmp.aggregate_for_comparison(owner_arg->func_name_cstring(), tmp_args, 2, + false)) { - DBUG_ASSERT(current_thd->is_error()); + DBUG_ASSERT(thd->is_error()); return 1; } m_compare_handler= tmp.type_handler(); - return m_compare_handler->set_comparator_func(this); + return m_compare_handler->set_comparator_func(thd, this); } -bool Arg_comparator::set_cmp_func_for_row_arguments() +bool Arg_comparator::set_cmp_func_for_row_arguments(THD *thd) { uint n= (*a)->cols(); if (n != (*b)->cols()) @@ -505,7 +504,7 @@ bool Arg_comparator::set_cmp_func_for_row_arguments() comparators= 0; return true; } - if (!(comparators= new Arg_comparator[n])) + if (!(comparators= new (thd->mem_root) Arg_comparator[n])) return true; for (uint i=0; i < n; i++) { @@ -514,25 +513,24 @@ bool Arg_comparator::set_cmp_func_for_row_arguments() my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->element_index(i)->cols()); return true; } - if (comparators[i].set_cmp_func(owner, (*a)->addr(i), - (*b)->addr(i), set_null)) + if (comparators[i].set_cmp_func(thd, owner, (*a)->addr(i), + (*b)->addr(i), set_null)) return true; } return false; } -bool Arg_comparator::set_cmp_func_row() +bool Arg_comparator::set_cmp_func_row(THD *thd) { func= is_owner_equal_func() ? &Arg_comparator::compare_e_row : &Arg_comparator::compare_row; - return set_cmp_func_for_row_arguments(); + return set_cmp_func_for_row_arguments(thd); } -bool Arg_comparator::set_cmp_func_string() +bool Arg_comparator::set_cmp_func_string(THD *thd) { - THD *thd= current_thd; func= is_owner_equal_func() ? &Arg_comparator::compare_e_string : &Arg_comparator::compare_string; if (compare_type() == STRING_RESULT && @@ -568,9 +566,8 @@ bool Arg_comparator::set_cmp_func_string() } -bool Arg_comparator::set_cmp_func_time() +bool Arg_comparator::set_cmp_func_time(THD *thd) { - THD *thd= current_thd; m_compare_collation= &my_charset_numeric; func= is_owner_equal_func() ? &Arg_comparator::compare_e_time : &Arg_comparator::compare_time; @@ -580,9 +577,8 @@ bool Arg_comparator::set_cmp_func_time() } -bool Arg_comparator::set_cmp_func_datetime() +bool Arg_comparator::set_cmp_func_datetime(THD *thd) { - THD *thd= current_thd; m_compare_collation= &my_charset_numeric; func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime : &Arg_comparator::compare_datetime; @@ -592,9 +588,8 @@ bool Arg_comparator::set_cmp_func_datetime() } -bool Arg_comparator::set_cmp_func_native() +bool Arg_comparator::set_cmp_func_native(THD *thd) { - THD *thd= current_thd; m_compare_collation= &my_charset_numeric; func= is_owner_equal_func() ? &Arg_comparator::compare_e_native : &Arg_comparator::compare_native; @@ -604,9 +599,8 @@ bool Arg_comparator::set_cmp_func_native() } -bool Arg_comparator::set_cmp_func_int() +bool Arg_comparator::set_cmp_func_int(THD *thd) { - THD *thd= current_thd; func= is_owner_equal_func() ? &Arg_comparator::compare_e_int : &Arg_comparator::compare_int_signed; if ((*a)->field_type() == MYSQL_TYPE_YEAR && @@ -635,7 +629,7 @@ bool Arg_comparator::set_cmp_func_int() } -bool Arg_comparator::set_cmp_func_real() +bool Arg_comparator::set_cmp_func_real(THD *thd) { if ((((*a)->result_type() == DECIMAL_RESULT && !(*a)->const_item() && (*b)->result_type() == STRING_RESULT && (*b)->const_item()) || @@ -650,10 +644,9 @@ bool Arg_comparator::set_cmp_func_real() Do comparison as decimal rather than float, in order not to lose precision. */ m_compare_handler= &type_handler_newdecimal; - return set_cmp_func_decimal(); + return set_cmp_func_decimal(thd); } - THD *thd= current_thd; func= is_owner_equal_func() ? &Arg_comparator::compare_e_real : &Arg_comparator::compare_real; if ((*a)->decimals < NOT_FIXED_DEC && (*b)->decimals < NOT_FIXED_DEC) @@ -669,9 +662,8 @@ bool Arg_comparator::set_cmp_func_real() return false; } -bool Arg_comparator::set_cmp_func_decimal() +bool Arg_comparator::set_cmp_func_decimal(THD *thd) { - THD *thd= current_thd; func= is_owner_equal_func() ? &Arg_comparator::compare_e_decimal : &Arg_comparator::compare_decimal; a= cache_converted_constant(thd, a, &a_cache, compare_type_handler()); @@ -1161,7 +1153,7 @@ int Arg_comparator::compare_e_str_json() bool Item_func_truth::fix_length_and_dec() { - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; null_value= 0; decimals= 0; max_length= 1; @@ -1220,7 +1212,7 @@ bool Item_in_optimizer::is_top_level_item() const void Item_in_optimizer::fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); /* This will re-calculate attributes of our Item_in_subselect: */ Item_bool_func::fix_after_pullout(new_parent, ref, merge); @@ -1355,19 +1347,18 @@ bool Item_in_optimizer::fix_left(THD *thd) used_tables_cache= args[0]->used_tables(); } eval_not_null_tables(NULL); - copy_with_sum_func(args[0]); - with_param= args[0]->with_param || args[1]->with_param; - with_field= args[0]->with_field; + with_flags|= (args[0]->with_flags | + (args[1]->with_flags & item_with_t::SP_VAR)); if ((const_item_cache= args[0]->const_item())) { cache->store(args[0]); cache->cache_value(); } - if (args[1]->is_fixed()) + if (args[1]->fixed()) { /* to avoid overriding is called to update left expression */ used_tables_and_const_cache_join(args[1]); - join_with_sum_func(args[1]); + with_flags|= args[1]->with_flags & item_with_t::SUM_FUNC; } DBUG_RETURN(0); } @@ -1375,7 +1366,7 @@ bool Item_in_optimizer::fix_left(THD *thd) bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); Item_subselect *sub= 0; uint col; @@ -1388,8 +1379,8 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) if (fix_left(thd)) return TRUE; - if (args[0]->maybe_null) - maybe_null=1; + if (args[0]->maybe_null()) + set_maybe_null(); if (args[1]->fix_fields_if_needed(thd, args + 1)) return TRUE; @@ -1400,17 +1391,16 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) my_error(ER_OPERAND_COLUMNS, MYF(0), col); return TRUE; } - if (args[1]->maybe_null) - maybe_null=1; - m_with_subquery= true; - join_with_sum_func(args[1]); - with_window_func= args[0]->with_window_func; + + base_flags|= (item_base_t::FIXED | + (args[1]->base_flags & item_base_t::MAYBE_NULL)); + with_flags|= (item_with_t::SUBQUERY | + args[1]->with_flags | + (args[0]->with_flags & + (item_with_t::SP_VAR | item_with_t::WINDOW_FUNC))); // The subquery cannot have window functions aggregated in this select - DBUG_ASSERT(!args[1]->with_window_func); - with_field= with_field || args[1]->with_field; - with_param= args[0]->with_param || args[1]->with_param; + DBUG_ASSERT(!args[1]->with_window_func()); used_tables_and_const_cache_join(args[1]); - fixed= 1; return FALSE; } @@ -1458,7 +1448,7 @@ bool Item_in_optimizer::invisible_mode() Item *Item_in_optimizer::expr_cache_insert_transformer(THD *thd, uchar *unused) { DBUG_ENTER("Item_in_optimizer::expr_cache_insert_transformer"); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); if (invisible_mode()) DBUG_RETURN(this); @@ -1483,7 +1473,7 @@ Item *Item_in_optimizer::expr_cache_insert_transformer(THD *thd, uchar *unused) void Item_in_optimizer::get_cache_parameters(List<Item> ¶meters) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); /* Add left expression to the list of the parameters of the subquery */ if (!invisible_mode()) { @@ -1574,7 +1564,7 @@ void Item_in_optimizer::get_cache_parameters(List<Item> ¶meters) longlong Item_in_optimizer::val_int() { bool tmp; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); cache->store(args[0]); cache->cache_value(); DBUG_ENTER(" Item_in_optimizer::val_int"); @@ -1721,7 +1711,7 @@ Item *Item_in_optimizer::transform(THD *thd, Item_transformer transformer, { Item *new_item; - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare()); DBUG_ASSERT(arg_count == 2); @@ -1772,7 +1762,7 @@ Item *Item_in_optimizer::transform(THD *thd, Item_transformer transformer, bool Item_in_optimizer::is_expensive_processor(void *arg) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return args[0]->is_expensive_processor(arg) || args[1]->is_expensive_processor(arg); } @@ -1780,14 +1770,14 @@ bool Item_in_optimizer::is_expensive_processor(void *arg) bool Item_in_optimizer::is_expensive() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return args[0]->is_expensive() || args[1]->is_expensive(); } longlong Item_func_eq::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value == 0 ? 1 : 0; } @@ -1798,19 +1788,20 @@ longlong Item_func_eq::val_int() bool Item_func_equal::fix_length_and_dec() { bool rc= Item_bool_rowready_func2::fix_length_and_dec(); - maybe_null=null_value=0; + base_flags&= ~item_base_t::MAYBE_NULL; + null_value=0; return rc; } longlong Item_func_equal::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return cmp.compare(); } longlong Item_func_ne::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value != 0 && !null_value ? 1 : 0; } @@ -1818,7 +1809,7 @@ longlong Item_func_ne::val_int() longlong Item_func_ge::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value >= 0 ? 1 : 0; } @@ -1826,14 +1817,14 @@ longlong Item_func_ge::val_int() longlong Item_func_gt::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value > 0 ? 1 : 0; } longlong Item_func_le::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value <= 0 && !null_value ? 1 : 0; } @@ -1841,7 +1832,7 @@ longlong Item_func_le::val_int() longlong Item_func_lt::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int value= cmp.compare(); return value < 0 && !null_value ? 1 : 0; } @@ -1849,7 +1840,7 @@ longlong Item_func_lt::val_int() longlong Item_func_strcmp::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *a= args[0]->val_str(&value1); String *b= args[1]->val_str(&value2); if (!a || !b) @@ -1951,13 +1942,11 @@ bool Item_func_interval::fix_length_and_dec() } } } - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; max_length= 2; used_tables_and_const_cache_join(row); not_null_tables_cache= row->not_null_tables(); - join_with_sum_func(row); - with_param= with_param || row->with_param; - with_field= with_field || row->with_field; + with_flags|= row->with_flags; return FALSE; } @@ -1978,7 +1967,7 @@ bool Item_func_interval::fix_length_and_dec() longlong Item_func_interval::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value; my_decimal dec_buf, *dec= NULL; uint i; @@ -2139,7 +2128,8 @@ bool Item_func_between::fix_length_and_dec() */ if (!args[0] || !args[1] || !args[2]) return TRUE; - if (m_comparator.aggregate_for_comparison(Item_func_between::func_name(), + if (m_comparator.aggregate_for_comparison(Item_func_between:: + func_name_cstring(), args, 3, false)) { DBUG_ASSERT(current_thd->is_error()); @@ -2361,7 +2351,7 @@ void Item_func_between::print(String *str, enum_query_type query_type) double Item_func_ifnull::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if (!args[0]->null_value) { @@ -2377,7 +2367,7 @@ Item_func_ifnull::real_op() longlong Item_func_ifnull::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong value=args[0]->val_int(); if (!args[0]->null_value) { @@ -2393,7 +2383,7 @@ Item_func_ifnull::int_op() my_decimal *Item_func_ifnull::decimal_op(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); my_decimal *value= args[0]->val_decimal(decimal_value); if (!args[0]->null_value) { @@ -2410,7 +2400,7 @@ my_decimal *Item_func_ifnull::decimal_op(my_decimal *decimal_value) String * Item_func_ifnull::str_op(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res =args[0]->val_str(str); if (!args[0]->null_value) { @@ -2428,7 +2418,7 @@ Item_func_ifnull::str_op(String *str) bool Item_func_ifnull::native_op(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!val_native_with_conversion_from_item(thd, args[0], to, type_handler())) return false; return val_native_with_conversion_from_item(thd, args[1], to, type_handler()); @@ -2437,7 +2427,7 @@ bool Item_func_ifnull::native_op(THD *thd, Native *to) bool Item_func_ifnull::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i= 0; i < 2; i++) { Datetime_truncation_not_needed dt(thd, args[i], @@ -2451,7 +2441,7 @@ bool Item_func_ifnull::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydat bool Item_func_ifnull::time_op(THD *thd, MYSQL_TIME *ltime) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i= 0; i < 2; i++) { if (!Time(thd, args[i]).copy_to_mysql_time(ltime)) @@ -2490,7 +2480,7 @@ bool Item_func_ifnull::time_op(THD *thd, MYSQL_TIME *ltime) bool Item_func_if::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); args[0]->top_level_item(); if (Item_func::fix_fields(thd, ref)) @@ -2742,7 +2732,7 @@ Item_func_nullif::fix_length_and_dec() decimals= args[2]->decimals; unsigned_flag= args[2]->unsigned_flag; fix_char_length(args[2]->max_char_length()); - maybe_null=1; + set_maybe_null(); m_arg0= args[0]; if (setup_args_and_comparator(thd, &cmp)) return TRUE; @@ -2826,7 +2816,7 @@ void Item_func_nullif::print(String *str, enum_query_type query_type) */ DBUG_ASSERT(arg_count == 2 || args[0] == args[2] || current_thd->lex->context_analysis_only); - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); if (arg_count == 2) args[0]->print(str, query_type); @@ -2876,7 +2866,7 @@ int Item_func_nullif::compare() double Item_func_nullif::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value; if (!compare()) { @@ -2891,7 +2881,7 @@ Item_func_nullif::real_op() longlong Item_func_nullif::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong value; if (!compare()) { @@ -2906,7 +2896,7 @@ Item_func_nullif::int_op() String * Item_func_nullif::str_op(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res; if (!compare()) { @@ -2922,7 +2912,7 @@ Item_func_nullif::str_op(String *str) my_decimal * Item_func_nullif::decimal_op(my_decimal * decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); my_decimal *res; if (!compare()) { @@ -2938,7 +2928,7 @@ Item_func_nullif::decimal_op(my_decimal * decimal_value) bool Item_func_nullif::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!compare()) return (null_value= true); Datetime_truncation_not_needed dt(thd, args[2], fuzzydate); @@ -2949,7 +2939,7 @@ Item_func_nullif::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) bool Item_func_nullif::time_op(THD *thd, MYSQL_TIME *ltime) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!compare()) return (null_value= true); return (null_value= Time(thd, args[2]).copy_to_mysql_time(ltime)); @@ -2960,7 +2950,7 @@ Item_func_nullif::time_op(THD *thd, MYSQL_TIME *ltime) bool Item_func_nullif::native_op(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!compare()) return (null_value= true); return val_native_with_conversion_from_item(thd, args[2], to, type_handler()); @@ -3053,7 +3043,7 @@ Item *Item_func_decode_oracle::find_item() String *Item_func_case::str_op(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res; Item *item= find_item(); @@ -3071,7 +3061,7 @@ String *Item_func_case::str_op(String *str) longlong Item_func_case::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); longlong res; @@ -3087,7 +3077,7 @@ longlong Item_func_case::int_op() double Item_func_case::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); double res; @@ -3104,7 +3094,7 @@ double Item_func_case::real_op() my_decimal *Item_func_case::decimal_op(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); my_decimal *res; @@ -3122,7 +3112,7 @@ my_decimal *Item_func_case::decimal_op(my_decimal *decimal_value) bool Item_func_case::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); if (!item) return (null_value= true); @@ -3133,7 +3123,7 @@ bool Item_func_case::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) bool Item_func_case::time_op(THD *thd, MYSQL_TIME *ltime) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); if (!item) return (null_value= true); @@ -3143,7 +3133,7 @@ bool Item_func_case::time_op(THD *thd, MYSQL_TIME *ltime) bool Item_func_case::native_op(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Item *item= find_item(); if (!item) return (null_value= true); @@ -3156,8 +3146,8 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref) bool res= Item_func::fix_fields(thd, ref); Item **pos= else_expr_addr(); - if (!pos || pos[0]->maybe_null) - maybe_null= 1; + if (!pos || pos[0]->maybe_null()) + set_maybe_null(); return res; } @@ -3188,9 +3178,10 @@ bool Item_func_case_simple::prepare_predicant_and_values(THD *thd, add_predicant(this, 0); for (uint i= 0 ; i < ncases; i++) { + static LEX_CSTRING case_when= { STRING_WITH_LEN("case..when") }; if (nulls_equal ? - add_value("case..when", this, i + 1) : - add_value_skip_null("case..when", this, i + 1, &have_null)) + add_value(case_when, this, i + 1) : + add_value_skip_null(case_when, this, i + 1, &have_null)) return true; } all_values_added(&tmp, &type_cnt, &m_found_types); @@ -3233,7 +3224,8 @@ bool Item_func_decode_oracle::fix_length_and_dec() */ bool Item_func_case::aggregate_then_and_else_arguments(THD *thd, uint start) { - if (aggregate_for_result(func_name(), args + start, arg_count - start, true)) + if (aggregate_for_result(func_name_cstring(), args + start, + arg_count - start, true)) return true; if (fix_attributes(args + start, arg_count - start)) @@ -3417,7 +3409,7 @@ void Item_func_case_simple::print(String *str, enum_query_type query_type) void Item_func_decode_oracle::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); args[0]->print(str, query_type); for (uint i= 1, count= when_count() ; i <= count; i++) @@ -3443,7 +3435,7 @@ void Item_func_decode_oracle::print(String *str, enum_query_type query_type) String *Item_func_coalesce::str_op(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -3457,7 +3449,7 @@ String *Item_func_coalesce::str_op(String *str) longlong Item_func_coalesce::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -3471,7 +3463,7 @@ longlong Item_func_coalesce::int_op() double Item_func_coalesce::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value=0; for (uint i=0 ; i < arg_count ; i++) { @@ -3486,7 +3478,7 @@ double Item_func_coalesce::real_op() bool Item_func_coalesce::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { Datetime_truncation_not_needed dt(thd, args[i], @@ -3500,7 +3492,7 @@ bool Item_func_coalesce::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzyd bool Item_func_coalesce::time_op(THD *thd, MYSQL_TIME *ltime) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (!Time(thd, args[i]).copy_to_mysql_time(ltime)) @@ -3512,7 +3504,7 @@ bool Item_func_coalesce::time_op(THD *thd, MYSQL_TIME *ltime) bool Item_func_coalesce::native_op(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (!val_native_with_conversion_from_item(thd, args[i], to, type_handler())) @@ -3524,7 +3516,7 @@ bool Item_func_coalesce::native_op(THD *thd, Native *to) my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value= 0; for (uint i= 0; i < arg_count; i++) { @@ -3951,7 +3943,7 @@ bool Predicant_to_list_comparator::alloc_comparators(THD *thd, uint nargs) } -bool Predicant_to_list_comparator::add_value(const char *funcname, +bool Predicant_to_list_comparator::add_value(const LEX_CSTRING &funcname, Item_args *args, uint value_index) { @@ -3973,10 +3965,11 @@ bool Predicant_to_list_comparator::add_value(const char *funcname, } -bool Predicant_to_list_comparator::add_value_skip_null(const char *funcname, - Item_args *args, - uint value_index, - bool *nulls_found) +bool Predicant_to_list_comparator:: +add_value_skip_null(const LEX_CSTRING &funcname, + Item_args *args, + uint value_index, + bool *nulls_found) { /* Skip explicit NULL constant items. @@ -4033,24 +4026,24 @@ bool Predicant_to_list_comparator::make_unique_cmp_items(THD *thd, } -cmp_item* cmp_item_sort_string::make_same() +cmp_item* cmp_item_sort_string::make_same(THD *thd) { - return new cmp_item_sort_string_in_static(cmp_charset); + return new (thd->mem_root) cmp_item_sort_string_in_static(cmp_charset); } -cmp_item* cmp_item_int::make_same() +cmp_item* cmp_item_int::make_same(THD *thd) { - return new cmp_item_int(); + return new (thd->mem_root) cmp_item_int(); } -cmp_item* cmp_item_real::make_same() +cmp_item* cmp_item_real::make_same(THD *thd) { - return new cmp_item_real(); + return new (thd->mem_root) cmp_item_real(); } -cmp_item* cmp_item_row::make_same() +cmp_item* cmp_item_row::make_same(THD *thd) { - return new cmp_item_row(); + return new (thd->mem_root) cmp_item_row(); } @@ -4114,7 +4107,7 @@ void cmp_item_row::store_value_by_template(THD *thd, cmp_item *t, Item *item) item->null_value= 0; for (uint i=0; i < n; i++) { - if (!(comparators[i]= tmpl->comparators[i]->make_same())) + if (!(comparators[i]= tmpl->comparators[i]->make_same(thd))) break; // new failed comparators[i]->store_value_by_template(thd, tmpl->comparators[i], item->element_index(i)); @@ -4198,9 +4191,9 @@ int cmp_item_decimal::compare(cmp_item *arg) } -cmp_item* cmp_item_decimal::make_same() +cmp_item* cmp_item_decimal::make_same(THD *thd) { - return new cmp_item_decimal(); + return new (thd->mem_root) cmp_item_decimal(); } @@ -4241,15 +4234,15 @@ int cmp_item_temporal::compare(cmp_item *ci) } -cmp_item *cmp_item_datetime::make_same() +cmp_item *cmp_item_datetime::make_same(THD *thd) { - return new cmp_item_datetime(); + return new (thd->mem_root) cmp_item_datetime(); } -cmp_item *cmp_item_time::make_same() +cmp_item *cmp_item_time::make_same(THD *thd) { - return new cmp_item_time(); + return new (thd->mem_root) cmp_item_time(); } @@ -4289,9 +4282,9 @@ int cmp_item_timestamp::compare(cmp_item *arg) } -cmp_item* cmp_item_timestamp::make_same() +cmp_item* cmp_item_timestamp::make_same(THD *thd) { - return new cmp_item_timestamp(); + return new (thd->mem_root) cmp_item_timestamp(); } @@ -4354,6 +4347,56 @@ Item_func_in::fix_fields(THD *thd, Item **ref) } +Item *Item_func_in::in_predicate_to_equality_transformer(THD *thd, uchar *arg) +{ + if (!array || have_null || !all_items_are_consts(args + 1, arg_count - 1)) + return this; /* Transformation is not applicable */ + + /* + If all elements in the array of constant values are equal and there are + no NULLs in the list then clause + - "a IN (e1,..,en)" can be converted to "a = e1" + - "a NOT IN (e1,..,en)" can be converted to "a != e1". + This means an object of Item_func_in can be replaced with an object of + Item_func_eq for IN (e1,..,en) clause or Item_func_ne for + NOT IN (e1,...,en). + */ + + /* + Since the array is sorted it's enough to compare the first and the last + elements to tell whether all elements are equal + */ + if (array->compare_elems(0, array->used_count - 1)) + { + /* Not all elements are equal, transformation is not possible */ + return this; + } + + Json_writer_object trace_wrapper(thd); + trace_wrapper.add("transformation", "in_predicate_to_equality") + .add("before", this); + + Item *new_item= nullptr; + if (negated) + new_item= new (thd->mem_root) Item_func_ne(thd, args[0], args[1]); + else + new_item= new (thd->mem_root) Item_func_eq(thd, args[0], args[1]); + if (new_item) + { + new_item->set_name(thd, name); + if (new_item->fix_fields(thd, &new_item)) + { + /* + If there are any problems during fixing fields, there is no need to + return an error, just discard the transformation + */ + new_item= this; + } + } + trace_wrapper.add("after", new_item); + return new_item; +} + bool Item_func_in::eval_not_null_tables(void *opt_arg) { @@ -4402,7 +4445,8 @@ bool Item_func_in::prepare_predicant_and_values(THD *thd, uint *found_types) add_predicant(this, 0); for (uint i= 1 ; i < arg_count; i++) { - if (add_value_skip_null(Item_func_in::func_name(), this, i, &have_null)) + if (add_value_skip_null(Item_func_in::func_name_cstring(), this, i, + &have_null)) return true; } all_values_added(&m_comparator, &type_cnt, found_types); @@ -4529,7 +4573,7 @@ bool cmp_item_row:: aggregate_row_elements_for_comparison(THD *thd, Type_handler_hybrid_field_type *cmp, Item_args *tmp, - const char *funcname, + const LEX_CSTRING &funcname, uint col, uint level) { @@ -4539,8 +4583,8 @@ bool cmp_item_row:: { Item *arg= tmp->arguments()[i]; push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_UNKNOWN_ERROR, "DBUG: %s[%d,%d] handler=%s", - String_space(level).c_ptr(), col, i, + ER_UNKNOWN_ERROR, "DBUG: %*s[%d,%d] handler=%s", + level, "", col, i, arg->type_handler()->name().ptr()); } } @@ -4551,8 +4595,8 @@ bool cmp_item_row:: { if (!err) push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_UNKNOWN_ERROR, "DBUG: %s=> handler=%s", - String_space(level).c_ptr(), + ER_UNKNOWN_ERROR, "DBUG: %*s=> handler=%s", + level,"", cmp->type_handler()->name().ptr()); } ); @@ -4560,13 +4604,13 @@ bool cmp_item_row:: } -bool cmp_item_row::prepare_comparators(THD *thd, const char *funcname, +bool cmp_item_row::prepare_comparators(THD *thd, const LEX_CSTRING &funcname, const Item_args *args, uint level) { DBUG_EXECUTE_IF("cmp_item", push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_UNKNOWN_ERROR, "DBUG: %sROW(%d args) level=%d", - String_space(level).c_ptr(), + ER_UNKNOWN_ERROR, "DBUG: %*sROW(%d args) level=%d", + level,"", args->argument_count(), level);); DBUG_ASSERT(args->argument_count() > 0); if (alloc_comparators(thd, args->arguments()[0]->cols())) @@ -4615,7 +4659,7 @@ bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd) if (unlikely(!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0)))) return true; cmp_item_row *cmp= &((in_row*)array)->tmp; - if (cmp->prepare_comparators(thd, func_name(), this, 0)) + if (cmp->prepare_comparators(thd, func_name_cstring(), this, 0)) return true; fix_in_vector(); return false; @@ -4654,7 +4698,7 @@ bool Item_func_in::fix_for_row_comparison_using_cmp_items(THD *thd) DBUG_ASSERT(get_comparator_type_handler(0) == &type_handler_row); DBUG_ASSERT(get_comparator_cmp_item(0)); cmp_item_row *cmp_row= (cmp_item_row*) get_comparator_cmp_item(0); - return cmp_row->prepare_comparators(thd, func_name(), this, 0); + return cmp_row->prepare_comparators(thd, func_name_cstring(), this, 0); } @@ -4696,7 +4740,7 @@ void Item_func_in::print(String *str, enum_query_type query_type) longlong Item_func_in::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (array) { bool tmp=array->find(args[0]); @@ -4752,7 +4796,7 @@ class Func_handler_bit_or_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); Longlong_null a= item->arguments()[0]->to_longlong_null(); return a.is_null() ? a : a | item->arguments()[1]->to_longlong_null(); } @@ -4765,7 +4809,7 @@ class Func_handler_bit_or_dec_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); VDec a(item->arguments()[0]); return a.is_null() ? Longlong_null() : a.to_xlonglong_null() | VDec(item->arguments()[1]).to_xlonglong_null(); @@ -4787,7 +4831,7 @@ class Func_handler_bit_and_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); Longlong_null a= item->arguments()[0]->to_longlong_null(); return a.is_null() ? a : a & item->arguments()[1]->to_longlong_null(); } @@ -4800,7 +4844,7 @@ class Func_handler_bit_and_dec_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); VDec a(item->arguments()[0]); return a.is_null() ? Longlong_null() : a.to_xlonglong_null() & VDec(item->arguments()[1]).to_xlonglong_null(); @@ -4854,7 +4898,7 @@ void Item_cond::copy_andor_arguments(THD *thd, Item_cond *item) bool Item_cond::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); List_iterator<Item> li(list); Item *item; uchar buff[sizeof(char*)]; // Max local vars in function @@ -4919,8 +4963,8 @@ Item_cond::fix_fields(THD *thd, Item **ref) return TRUE; /* purecov: inspected */ item= *li.ref(); // item can be substituted in fix_fields used_tables_cache|= item->used_tables(); - if (item->const_item() && !item->with_param && - !item->is_expensive() && !cond_has_datetime_is_null(item)) + if (item->can_eval_in_optimize() && !item->with_sp_var() && + !cond_has_datetime_is_null(item)) { if (item->eval_const_cond() == is_and_cond && top_level()) { @@ -4955,17 +4999,12 @@ Item_cond::fix_fields(THD *thd, Item **ref) const_item_cache= FALSE; } - - join_with_sum_func(item); - with_param|= item->with_param; - with_field|= item->with_field; - m_with_subquery|= item->with_subquery(); - with_window_func|= item->with_window_func; - maybe_null|= item->maybe_null; + base_flags|= item->base_flags & item_base_t::MAYBE_NULL; + with_flags|= item->with_flags; } if (fix_length_and_dec()) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -4981,8 +5020,8 @@ Item_cond::eval_not_null_tables(void *opt_arg) while ((item=li++)) { table_map tmp_table_map; - if (item->const_item() && !item->with_param && - !item->is_expensive() && !cond_has_datetime_is_null(item)) + if (item->can_eval_in_optimize() && !item->with_sp_var() && + !cond_has_datetime_is_null(item)) { if (item->eval_const_cond() == is_and_cond && top_level()) { @@ -5320,7 +5359,7 @@ void Item_cond::print(String *str, enum_query_type query_type) while ((item=li++)) { str->append(' '); - str->append(func_name()); + str->append(func_name_cstring()); str->append(' '); item->print_parenthesised(str, query_type, precedence()); } @@ -5383,7 +5422,7 @@ Item *Item_cond::build_clone(THD *thd) bool Item_cond::excl_dep_on_table(table_map tab_map) { - if (used_tables() & OUTER_REF_TABLE_BIT) + if (used_tables() & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) return false; if (!(used_tables() & ~tab_map)) return true; @@ -5445,7 +5484,7 @@ void Item_cond_and::mark_as_condition_AND_part(TABLE_LIST *embedding) longlong Item_cond_and::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); List_iterator_fast<Item> li(list); Item *item; null_value= 0; @@ -5463,7 +5502,7 @@ longlong Item_cond_and::val_int() longlong Item_cond_or::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); List_iterator_fast<Item> li(list); Item *item; null_value=0; @@ -5540,8 +5579,8 @@ bool Item_func_null_predicate::count_sargable_conds(void *arg) longlong Item_func_isnull::val_int() { - DBUG_ASSERT(fixed == 1); - if (const_item() && !args[0]->maybe_null) + DBUG_ASSERT(fixed()); + if (const_item() && !args[0]->maybe_null()) return 0; return args[0]->is_null() ? 1: 0; } @@ -5562,9 +5601,9 @@ bool Item_func_isnull::find_not_null_fields(table_map allowed) void Item_func_isnull::print(String *str, enum_query_type query_type) { - if (const_item() && !args[0]->maybe_null && + if (const_item() && !args[0]->maybe_null() && !(query_type & (QT_NO_DATA_EXPANSION | QT_VIEW_INTERNAL))) - str->append("/*always not null*/ 1"); + str->append(STRING_WITH_LEN("/*always not null*/ 1")); else args[0]->print_parenthesised(str, query_type, precedence()); str->append(STRING_WITH_LEN(" is null")); @@ -5573,9 +5612,9 @@ void Item_func_isnull::print(String *str, enum_query_type query_type) longlong Item_is_not_null_test::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_is_not_null_test::val_int"); - if (const_item() && !args[0]->maybe_null) + if (const_item() && !args[0]->maybe_null()) DBUG_RETURN(1); if (args[0]->is_null()) { @@ -5592,7 +5631,7 @@ longlong Item_is_not_null_test::val_int() */ void Item_is_not_null_test::update_used_tables() { - if (!args[0]->maybe_null) + if (!args[0]->maybe_null()) used_tables_cache= 0; /* is always true */ else args[0]->update_used_tables(); @@ -5601,7 +5640,7 @@ void Item_is_not_null_test::update_used_tables() longlong Item_func_isnotnull::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return args[0]->is_null() ? 0 : 1; } @@ -5625,7 +5664,7 @@ void Item_func_like::print(String *str, enum_query_type query_type) str->append(' '); if (negated) str->append(STRING_WITH_LEN(" not ")); - str->append(func_name()); + str->append(func_name_cstring()); str->append(' '); if (escape_used_in_parsing) { @@ -5640,7 +5679,7 @@ void Item_func_like::print(String *str, enum_query_type query_type) longlong Item_func_like::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ASSERT(escape != ESCAPE_NOT_INITIALIZED); String* res= args[0]->val_str(&cmp_value1); if (args[0]->null_value) @@ -5673,7 +5712,7 @@ bool Item_func_like::with_sargable_pattern() const if (negated) return false; - if (!args[1]->const_item() || args[1]->is_expensive()) + if (!args[1]->can_eval_in_optimize()) return false; String* res2= args[1]->val_str((String *) &cmp_value2); @@ -5805,7 +5844,7 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str, bool Item_func_like::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (Item_bool_func2::fix_fields(thd, ref) || escape_item->fix_fields_if_needed_for_scalar(thd, &escape_item) || fix_escape_item(thd, escape_item, &cmp_value1, escape_used_in_parsing, @@ -5818,8 +5857,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref) We could also do boyer-more for non-const items, but as we would have to recompute the tables for each row it's not worth it. */ - if (args[1]->const_item() && !use_strnxfrm(collation.collation) && - !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize() && !use_strnxfrm(collation.collation)) { String* res2= args[1]->val_str(&cmp_value2); if (!res2) @@ -6119,14 +6157,14 @@ void Regexp_processor_pcre::fix_owner(Item_func *owner, { if (compile(pattern_arg, true)) { - owner->maybe_null= 1; // Will always return NULL + owner->set_maybe_null(); // Will always return NULL return; } set_const(true); - owner->maybe_null= subject_arg->maybe_null; + owner->base_flags|= subject_arg->base_flags & item_base_t::MAYBE_NULL; } else - owner->maybe_null= 1; + owner->set_maybe_null(); } @@ -6145,7 +6183,7 @@ Item_func_regex::fix_length_and_dec() longlong Item_func_regex::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= re.recompile(args[1]))) return 0; @@ -6171,7 +6209,7 @@ Item_func_regexp_instr::fix_length_and_dec() longlong Item_func_regexp_instr::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= re.recompile(args[1]))) return 0; @@ -6420,7 +6458,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const longlong Item_func_xor::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int result= 0; null_value= false; for (uint i= 0; i < arg_count; i++) @@ -7013,7 +7051,7 @@ void Item_equal::update_const(THD *thd) Item *item; while ((item= it++)) { - if (item->const_item() && !item->is_expensive() && + if (item->can_eval_in_optimize() && /* Don't propagate constant status of outer-joined column. Such a constant status here is a result of: @@ -7069,7 +7107,7 @@ void Item_equal::update_const(THD *thd) bool Item_equal::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); Item_equal_fields_iterator it(*this); Item *item; Field *first_equal_field= NULL; @@ -7084,8 +7122,8 @@ bool Item_equal::fix_fields(THD *thd, Item **ref) tmp_table_map= item->not_null_tables(); not_null_tables_cache|= tmp_table_map; DBUG_ASSERT(!item->with_sum_func() && !item->with_subquery()); - if (item->maybe_null) - maybe_null= 1; + if (item->maybe_null()) + set_maybe_null(); if (!item->get_item_equal()) item->set_item_equal(this); if (link_equal_fields && item->real_item()->type() == FIELD_ITEM) @@ -7102,7 +7140,7 @@ bool Item_equal::fix_fields(THD *thd, Item **ref) last_equal_field->next_equal_field= first_equal_field; if (fix_length_and_dec()) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -7283,7 +7321,7 @@ void Item_equal::print(String *str, enum_query_type query_type) str->append('0'); return; } - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); List_iterator_fast<Item> it(equal_items); Item *item; @@ -7587,7 +7625,7 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) { if (item->excl_dep_on_grouping_fields(sel)) { - set_extraction_flag(FULL_EXTRACTION_FL); + set_extraction_flag(MARKER_FULL_EXTRACTION); return true; } } @@ -7708,7 +7746,7 @@ bool Item_equal::create_pushable_equalities(THD *thd, from peforming cleanup of the sub-items and so creating an item tree where a fixed item has non-fixed items inside it. */ - int new_flag= IMMUTABLE_FL; + int16 new_flag= MARKER_IMMUTABLE; right_item->walk(&Item::set_extraction_flag_processor, false, (void*)&new_flag); } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 25553f8d565..e12a527d8af 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1,7 +1,7 @@ #ifndef ITEM_CMPFUNC_INCLUDED #define ITEM_CMPFUNC_INCLUDED /* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2009, 2020, MariaDB + Copyright (c) 2009, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -56,7 +56,8 @@ class Arg_comparator: public Sql_alloc Item *a_cache, *b_cache; // Cached values of a and b items // when one of arguments is NULL. - int set_cmp_func(Item_func_or_sum *owner_arg, Item **a1, Item **a2); + int set_cmp_func(THD *thd, Item_func_or_sum *owner_arg, + Item **a1, Item **a2); int compare_not_null_values(longlong val1, longlong val2) { @@ -83,21 +84,21 @@ public: a_cache(0), b_cache(0) {}; public: - bool set_cmp_func_for_row_arguments(); - bool set_cmp_func_row(); - bool set_cmp_func_string(); - bool set_cmp_func_time(); - bool set_cmp_func_datetime(); - bool set_cmp_func_native(); - bool set_cmp_func_int(); - bool set_cmp_func_real(); - bool set_cmp_func_decimal(); + bool set_cmp_func_for_row_arguments(THD *thd); + bool set_cmp_func_row(THD *thd); + bool set_cmp_func_string(THD *thd); + bool set_cmp_func_time(THD *thd); + bool set_cmp_func_datetime(THD *thd); + bool set_cmp_func_native(THD *thd); + bool set_cmp_func_int(THD *thd); + bool set_cmp_func_real(THD *thd); + bool set_cmp_func_decimal(THD *thd); - inline int set_cmp_func(Item_func_or_sum *owner_arg, + inline int set_cmp_func(THD *thd, Item_func_or_sum *owner_arg, Item **a1, Item **a2, bool set_null_arg) { set_null= set_null_arg; - return set_cmp_func(owner_arg, a1, a2); + return set_cmp_func(thd, owner_arg, a1, a2); } inline int compare() { return (this->*func)(); } @@ -221,12 +222,14 @@ public: Item_bool_func(THD *thd, Item *a, Item *b, Item *c): Item_int_func(thd, a, b, c) {} Item_bool_func(THD *thd, List<Item> &list): Item_int_func(thd, list) { } Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {} - const Type_handler *type_handler() const { return &type_handler_bool; } - const Type_handler *fixed_type_handler() const { return &type_handler_bool; } - CHARSET_INFO *compare_collation() const { return NULL; } - bool fix_length_and_dec() { decimals=0; max_length=1; return FALSE; } - uint decimal_precision() const { return 1; } - bool need_parentheses_in_default() { return true; } + const Type_handler *type_handler() const override + { return &type_handler_bool; } + const Type_handler *fixed_type_handler() const override + { return &type_handler_bool; } + CHARSET_INFO *compare_collation() const override { return NULL; } + bool fix_length_and_dec() override { decimals=0; max_length=1; return FALSE; } + decimal_digits_t decimal_precision() const override { return 1; } + bool need_parentheses_in_default() override { return true; } }; @@ -238,11 +241,11 @@ public: class Item_func_truth : public Item_bool_func { public: - virtual bool val_bool(); - virtual longlong val_int(); - virtual bool fix_length_and_dec(); - virtual void print(String *str, enum_query_type query_type); - enum precedence precedence() const { return CMP_PRECEDENCE; } + bool val_bool() override; + longlong val_int() override; + bool fix_length_and_dec() override; + void print(String *str, enum_query_type query_type) override; + enum precedence precedence() const override { return CMP_PRECEDENCE; } protected: Item_func_truth(THD *thd, Item *a, bool a_value, bool a_affirmative): @@ -273,8 +276,12 @@ class Item_func_istrue : public Item_func_truth public: Item_func_istrue(THD *thd, Item *a): Item_func_truth(thd, a, true, true) {} ~Item_func_istrue() {} - virtual const char* func_name() const { return "istrue"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("istrue") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_istrue>(thd, this); } }; @@ -289,11 +296,16 @@ public: Item_func_isnottrue(THD *thd, Item *a): Item_func_truth(thd, a, true, false) {} ~Item_func_isnottrue() {} - virtual const char* func_name() const { return "isnottrue"; } - bool find_not_null_fields(table_map allowed) { return false; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("isnottrue") }; + return name; + } + bool find_not_null_fields(table_map allowed) override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isnottrue>(thd, this); } - bool eval_not_null_tables(void *) { not_null_tables_cache= 0; return false; } + bool eval_not_null_tables(void *) override + { not_null_tables_cache= 0; return false; } }; @@ -306,8 +318,12 @@ class Item_func_isfalse : public Item_func_truth public: Item_func_isfalse(THD *thd, Item *a): Item_func_truth(thd, a, false, true) {} ~Item_func_isfalse() {} - virtual const char* func_name() const { return "isfalse"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("isfalse") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isfalse>(thd, this); } }; @@ -322,11 +338,16 @@ public: Item_func_isnotfalse(THD *thd, Item *a): Item_func_truth(thd, a, false, false) {} ~Item_func_isnotfalse() {} - virtual const char* func_name() const { return "isnotfalse"; } - bool find_not_null_fields(table_map allowed) { return false; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("isnotfalse") }; + return name; + } + bool find_not_null_fields(table_map allowed) override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isnotfalse>(thd, this); } - bool eval_not_null_tables(void *) { not_null_tables_cache= 0; return false; } + bool eval_not_null_tables(void *) override + { not_null_tables_cache= 0; return false; } }; @@ -366,7 +387,9 @@ public: Item_in_optimizer(THD *thd, Item *a, Item *b): Item_bool_func(thd, a, b), cache(0), expr_cache(0), save_cache(0), result_for_null_param(UNKNOWN) - { m_with_subquery= true; } + { + with_flags|= item_with_t::SUBQUERY; + } bool fix_fields(THD *, Item **) override; bool fix_left(THD *thd); table_map not_null_tables() const override { return 0; } @@ -374,14 +397,18 @@ public: longlong val_int() override; void cleanup() override; enum Functype functype() const override { return IN_OPTIMIZER_FUNC; } - const char *func_name() const override { return "<in_optimizer>"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<in_optimizer>") }; + return name; + } Item_cache **get_cache() { return &cache; } void keep_top_level_cache(); Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override; Item *expr_cache_insert_transformer(THD *thd, uchar *unused) override; bool is_expensive_processor(void *arg) override; bool is_expensive() override; - void set_join_tab_idx(uint join_tab_idx_arg) override + void set_join_tab_idx(uint8 join_tab_idx_arg) override { args[1]->set_join_tab_idx(join_tab_idx_arg); } void get_cache_parameters(List<Item> ¶meters) override; bool is_top_level_item() const override; @@ -510,7 +537,7 @@ class Item_bool_rowready_func2 :public Item_bool_func2_with_rev { protected: Arg_comparator cmp; - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_like_args0(); } @@ -518,15 +545,16 @@ public: Item_bool_rowready_func2(THD *thd, Item *a, Item *b): Item_bool_func2_with_rev(thd, a, b), cmp(tmp_arg, tmp_arg + 1) { } - Sql_mode_dependency value_depends_on_sql_mode() const; - void print(String *str, enum_query_type query_type) + Sql_mode_dependency value_depends_on_sql_mode() const override; + void print(String *str, enum_query_type query_type) override { Item_func::print_op(str, query_type); } - enum precedence precedence() const { return CMP_PRECEDENCE; } - Item *neg_transformer(THD *thd); + enum precedence precedence() const override { return CMP_PRECEDENCE; } + Item *neg_transformer(THD *thd) override; virtual Item *negated_item(THD *thd); - Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { Item_args::propagate_equal_fields(thd, Context(ANY_SUBST, @@ -535,30 +563,31 @@ public: cond); return this; } - bool fix_length_and_dec(); - int set_cmp_func() + bool fix_length_and_dec() override; + int set_cmp_func(THD *thd) { - return cmp.set_cmp_func(this, tmp_arg, tmp_arg + 1, true); + return cmp.set_cmp_func(thd, this, tmp_arg, tmp_arg + 1, true); } - CHARSET_INFO *compare_collation() const { return cmp.compare_collation(); } - const Type_handler *compare_type_handler() const + CHARSET_INFO *compare_collation() const override + { return cmp.compare_collation(); } + const Type_handler *compare_type_handler() const override { return cmp.compare_type_handler(); } Arg_comparator *get_comparator() { return &cmp; } - void cleanup() + void cleanup() override { Item_bool_func2::cleanup(); cmp.cleanup(); } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables) override { return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, false); } - Item *build_clone(THD *thd) + Item *build_clone(THD *thd) override { Item_bool_rowready_func2 *clone= (Item_bool_rowready_func2 *) Item_func::build_clone(thd); @@ -579,20 +608,24 @@ class Item_func_xor :public Item_bool_func { public: Item_func_xor(THD *thd, Item *i1, Item *i2): Item_bool_func(thd, i1, i2) {} - enum Functype functype() const { return XOR_FUNC; } - const char *func_name() const { return "xor"; } - enum precedence precedence() const { return XOR_PRECEDENCE; } - void print(String *str, enum_query_type query_type) + enum Functype functype() const override { return XOR_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("xor") }; + return name; + } + enum precedence precedence() const override { return XOR_PRECEDENCE; } + void print(String *str, enum_query_type query_type) override { Item_func::print_op(str, query_type); } - longlong val_int(); - bool find_not_null_fields(table_map allowed) { return false; } - Item *neg_transformer(THD *thd); - Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + longlong val_int() override; + bool find_not_null_fields(table_map allowed) override { return false; } + Item *neg_transformer(THD *thd) override; + Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) override { Item_args::propagate_equal_fields(thd, Context_boolean(), cond); return this; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xor>(thd, this); } }; @@ -606,7 +639,11 @@ public: bool is_top_level_item() const override { return abort_on_null; } longlong val_int() override; enum Functype functype() const override { return NOT_FUNC; } - const char *func_name() const override { return "not"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("not") }; + return name; + } bool find_not_null_fields(table_map allowed) override { return false; } enum precedence precedence() const override { return NEG_PRECEDENCE; } Item *neg_transformer(THD *thd) override; @@ -653,15 +690,19 @@ class Item_func_trig_cond: public Item_bool_func public: Item_func_trig_cond(THD *thd, Item *a, bool *f): Item_bool_func(thd, a) { trig_var= f; } - longlong val_int() { return *trig_var ? args[0]->val_int() : 1; } - enum Functype functype() const { return TRIG_COND_FUNC; }; - const char *func_name() const { return "trigcond"; }; - bool const_item() const { return FALSE; } + longlong val_int() override { return *trig_var ? args[0]->val_int() : 1; } + enum Functype functype() const override { return TRIG_COND_FUNC; }; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("trigcond") }; + return name; + } + bool const_item() const override { return FALSE; } bool *get_trig_var() { return trig_var; } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables); - Item *get_copy(THD *thd) + SARGABLE_PARAM **sargables) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trig_cond>(thd, this); } }; @@ -677,19 +718,23 @@ public: Item_func_not_all(THD *thd, Item *a): Item_func_not(thd, a), test_sum_item(0), test_sub_item(0), show(0) {} - table_map not_null_tables() const { return 0; } - longlong val_int(); - enum Functype functype() const { return NOT_ALL_FUNC; } - const char *func_name() const { return "<not>"; } - enum precedence precedence() const + table_map not_null_tables() const override { return 0; } + longlong val_int() override; + enum Functype functype() const override { return NOT_ALL_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<not>") }; + return name; + } + enum precedence precedence() const override { return show ? Item_func::precedence() : args[0]->precedence(); } - bool fix_fields(THD *thd, Item **ref) - {return Item_func::fix_fields(thd, ref);} - virtual void print(String *str, enum_query_type query_type); + bool fix_fields(THD *thd, Item **ref) override + { return Item_func::fix_fields(thd, ref);} + void print(String *str, enum_query_type query_type) override; void set_sum_test(Item_sum_min_max *item) { test_sum_item= item; test_sub_item= 0; }; void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; test_sum_item= 0;}; bool empty_underlying_subquery(); - Item *neg_transformer(THD *thd); + Item *neg_transformer(THD *thd) override; }; @@ -698,10 +743,14 @@ class Item_func_nop_all :public Item_func_not_all public: Item_func_nop_all(THD *thd, Item *a): Item_func_not_all(thd, a) {} - longlong val_int(); - const char *func_name() const { return "<nop>"; } - Item *neg_transformer(THD *thd); - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<nop>") }; + return name; + } + Item *neg_transformer(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_nop_all>(thd, this); } }; @@ -714,24 +763,28 @@ public: Item_bool_rowready_func2(thd, a, b), abort_on_null(false), in_equality_no(UINT_MAX) {} - longlong val_int(); - enum Functype functype() const { return EQ_FUNC; } - enum Functype rev_functype() const { return EQ_FUNC; } - cond_result eq_cmp_result() const { return COND_TRUE; } - const char *func_name() const { return "="; } - void top_level_item() { abort_on_null= true; } - Item *negated_item(THD *thd); + longlong val_int() override; + enum Functype functype() const override { return EQ_FUNC; } + enum Functype rev_functype() const override { return EQ_FUNC; } + cond_result eq_cmp_result() const override { return COND_TRUE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("=") }; + return name; + } + void top_level_item() override { abort_on_null= true; } + Item *negated_item(THD *thd) override; COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref); + COND_EQUAL **cond_equal_ref) override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables) override { return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, true); } - bool check_equality(THD *thd, COND_EQUAL *cond, List<Item> *eq_list); + bool check_equality(THD *thd, COND_EQUAL *cond, List<Item> *eq_list) override; /* - If this equality is created from the subquery's IN-equality: number of the item it was created from, e.g. for @@ -740,9 +793,9 @@ public: - Otherwise, UINT_MAX */ uint in_equality_no; - virtual uint exists2in_reserved_items() { return 1; }; + uint exists2in_reserved_items() override { return 1; }; friend class Arg_comparator; - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_eq>(thd, this); } }; @@ -751,23 +804,27 @@ class Item_func_equal final :public Item_bool_rowready_func2 public: Item_func_equal(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {} - longlong val_int(); - bool fix_length_and_dec(); - table_map not_null_tables() const { return 0; } - bool find_not_null_fields(table_map allowed) { return false; } - enum Functype functype() const { return EQUAL_FUNC; } - enum Functype rev_functype() const { return EQUAL_FUNC; } - cond_result eq_cmp_result() const { return COND_TRUE; } - const char *func_name() const { return "<=>"; } - Item *neg_transformer(THD *thd) { return 0; } + longlong val_int() override; + bool fix_length_and_dec() override; + table_map not_null_tables() const override { return 0; } + bool find_not_null_fields(table_map allowed) override { return false; } + enum Functype functype() const override { return EQUAL_FUNC; } + enum Functype rev_functype() const override { return EQUAL_FUNC; } + cond_result eq_cmp_result() const override { return COND_TRUE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<=>") }; + return name; + } + Item *neg_transformer(THD *thd) override { return 0; } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables) override { return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, true); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_equal>(thd, this); } }; @@ -777,13 +834,17 @@ class Item_func_ge :public Item_bool_rowready_func2 public: Item_func_ge(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {}; - longlong val_int(); - enum Functype functype() const { return GE_FUNC; } - enum Functype rev_functype() const { return LE_FUNC; } - cond_result eq_cmp_result() const { return COND_TRUE; } - const char *func_name() const { return ">="; } - Item *negated_item(THD *thd); - Item *get_copy(THD *thd) + longlong val_int() override; + enum Functype functype() const override { return GE_FUNC; } + enum Functype rev_functype() const override { return LE_FUNC; } + cond_result eq_cmp_result() const override { return COND_TRUE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN(">=") }; + return name; + } + Item *negated_item(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ge>(thd, this); } }; @@ -793,13 +854,17 @@ class Item_func_gt :public Item_bool_rowready_func2 public: Item_func_gt(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {}; - longlong val_int(); - enum Functype functype() const { return GT_FUNC; } - enum Functype rev_functype() const { return LT_FUNC; } - cond_result eq_cmp_result() const { return COND_FALSE; } - const char *func_name() const { return ">"; } - Item *negated_item(THD *thd); - Item *get_copy(THD *thd) + longlong val_int() override; + enum Functype functype() const override { return GT_FUNC; } + enum Functype rev_functype() const override { return LT_FUNC; } + cond_result eq_cmp_result() const override { return COND_FALSE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN(">") }; + return name; + } + Item *negated_item(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_gt>(thd, this); } }; @@ -809,13 +874,17 @@ class Item_func_le :public Item_bool_rowready_func2 public: Item_func_le(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {}; - longlong val_int(); - enum Functype functype() const { return LE_FUNC; } - enum Functype rev_functype() const { return GE_FUNC; } - cond_result eq_cmp_result() const { return COND_TRUE; } - const char *func_name() const { return "<="; } - Item *negated_item(THD *thd); - Item *get_copy(THD *thd) + longlong val_int() override; + enum Functype functype() const override { return LE_FUNC; } + enum Functype rev_functype() const override { return GE_FUNC; } + cond_result eq_cmp_result() const override { return COND_TRUE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<=") }; + return name; + } + Item *negated_item(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_le>(thd, this); } }; @@ -825,13 +894,17 @@ class Item_func_lt :public Item_bool_rowready_func2 public: Item_func_lt(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {} - longlong val_int(); - enum Functype functype() const { return LT_FUNC; } - enum Functype rev_functype() const { return GT_FUNC; } - cond_result eq_cmp_result() const { return COND_FALSE; } - const char *func_name() const { return "<"; } - Item *negated_item(THD *thd); - Item *get_copy(THD *thd) + longlong val_int() override; + enum Functype functype() const override { return LT_FUNC; } + enum Functype rev_functype() const override { return GT_FUNC; } + cond_result eq_cmp_result() const override { return COND_FALSE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<") }; + return name; + } + Item *negated_item(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_lt>(thd, this); } }; @@ -840,19 +913,23 @@ class Item_func_ne :public Item_bool_rowready_func2 { protected: SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, - Field *field, Item *value); + Field *field, Item *value) override; public: Item_func_ne(THD *thd, Item *a, Item *b): Item_bool_rowready_func2(thd, a, b) {} - longlong val_int(); - enum Functype functype() const { return NE_FUNC; } - enum Functype rev_functype() const { return NE_FUNC; } - cond_result eq_cmp_result() const { return COND_FALSE; } - const char *func_name() const { return "<>"; } - Item *negated_item(THD *thd); + longlong val_int() override; + enum Functype functype() const override { return NE_FUNC; } + enum Functype rev_functype() const override { return NE_FUNC; } + cond_result eq_cmp_result() const override { return COND_FALSE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<>") }; + return name; + } + Item *negated_item(THD *thd) override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, - table_map usable_tables, SARGABLE_PARAM **sargables); - Item *get_copy(THD *thd) + table_map usable_tables, SARGABLE_PARAM **sargables) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ne>(thd, this); } }; @@ -908,37 +985,43 @@ class Item_func_between :public Item_func_opt_neg { protected: SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, - Field *field, Item *value); + Field *field, Item *value) override; bool val_int_cmp_int_finalize(longlong value, longlong a, longlong b); public: String value0,value1,value2; Item_func_between(THD *thd, Item *a, Item *b, Item *c): Item_func_opt_neg(thd, a, b, c) { } - longlong val_int() + longlong val_int() override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return m_comparator.type_handler()->Item_func_between_val_int(this); } - enum Functype functype() const { return BETWEEN; } - const char *func_name() const { return "between"; } - enum precedence precedence() const { return BETWEEN_PRECEDENCE; } - bool fix_length_and_dec(); + enum Functype functype() const override { return BETWEEN; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("between") }; + return name; + } + enum precedence precedence() const override { return BETWEEN_PRECEDENCE; } + bool fix_length_and_dec() override; bool fix_length_and_dec_string(THD *) { return agg_arg_charsets_for_comparison(cmp_collation, args, 3); } bool fix_length_and_dec_temporal(THD *); bool fix_length_and_dec_numeric(THD *); - virtual void print(String *str, enum_query_type query_type); - bool eval_not_null_tables(void *opt_arg); - bool find_not_null_fields(table_map allowed); - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); - bool count_sargable_conds(void *arg); + void print(String *str, enum_query_type query_type) override; + bool eval_not_null_tables(void *opt_arg) override; + bool find_not_null_fields(table_map allowed) override; + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; + bool count_sargable_conds(void *arg) override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + SARGABLE_PARAM **sargables) override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { Item_args::propagate_equal_fields(thd, Context(ANY_SUBST, @@ -947,7 +1030,7 @@ public: cond); return this; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_between>(thd, this); } longlong val_int_cmp_string(); @@ -962,24 +1045,28 @@ public: class Item_func_strcmp :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_str(0, 2); } String value1, value2; DTCollation cmp_collation; public: Item_func_strcmp(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} - longlong val_int(); - uint decimal_precision() const { return 1; } - const char *func_name() const { return "strcmp"; } - bool fix_length_and_dec() + longlong val_int() override; + decimal_digits_t decimal_precision() const override { return 1; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("strcmp") }; + return name; + } + bool fix_length_and_dec() override { if (agg_arg_charsets_for_comparison(cmp_collation, args, 2)) return TRUE; fix_char_length(2); // returns "1" or "0" or "-1" return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_strcmp>(thd, this); } }; @@ -996,7 +1083,7 @@ class Item_func_interval :public Item_long_func Item_row *row; bool use_decimal_comparison; interval_range *intervals; - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_like_args0(); } @@ -1004,17 +1091,21 @@ public: Item_func_interval(THD *thd, Item_row *a): Item_long_func(thd, a), row(a), intervals(0) { } - bool fix_fields(THD *, Item **); - longlong val_int(); - bool fix_length_and_dec(); - const char *func_name() const { return "interval"; } - uint decimal_precision() const { return 2; } - void print(String *str, enum_query_type query_type) + bool fix_fields(THD *, Item **) override; + longlong val_int() override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override { - str->append(func_name()); + static LEX_CSTRING name= {STRING_WITH_LEN("interval") }; + return name; + } + decimal_digits_t decimal_precision() const override { return 2; } + void print(String *str, enum_query_type query_type) override + { + str->append(func_name_cstring()); print_args(str, 0, query_type); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_interval>(thd, this); } }; @@ -1026,23 +1117,27 @@ public: Item_func_case_expression(thd, a, b) {} Item_func_coalesce(THD *thd, List<Item> &list): Item_func_case_expression(thd, list) {} - double real_op(); - longlong int_op(); - String *str_op(String *); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - bool native_op(THD *thd, Native *to); - bool fix_length_and_dec() - { - if (aggregate_for_result(func_name(), args, arg_count, true)) + double real_op() override; + longlong int_op() override; + String *str_op(String *) override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + bool native_op(THD *thd, Native *to) override; + bool fix_length_and_dec() override + { + if (aggregate_for_result(func_name_cstring(), args, arg_count, true)) return TRUE; fix_attributes(args, arg_count); return FALSE; } - const char *func_name() const { return "coalesce"; } - table_map not_null_tables() const { return 0; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("coalesce") }; + return name; + } + table_map not_null_tables() const override { return 0; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_coalesce>(thd, this); } }; @@ -1058,7 +1153,7 @@ class Item_func_case_abbreviation2 :public Item_func_case_expression protected: bool fix_length_and_dec2(Item **items) { - if (aggregate_for_result(func_name(), items, 2, true)) + if (aggregate_for_result(func_name_cstring(), items, 2, true)) return TRUE; fix_attributes(items, 2); return FALSE; @@ -1068,7 +1163,7 @@ protected: { Type_std_attributes::set(source); set_handler(source->type_handler()); - maybe_null= maybe_null_arg; + set_maybe_null(maybe_null_arg); } bool fix_length_and_dec2_eliminate_null(Item **items) @@ -1106,14 +1201,14 @@ class Item_func_ifnull :public Item_func_case_abbreviation2 public: Item_func_ifnull(THD *thd, Item *a, Item *b): Item_func_case_abbreviation2(thd, a, b) {} - double real_op(); - longlong int_op(); - String *str_op(String *str); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - bool native_op(THD *thd, Native *to); - bool fix_length_and_dec() + double real_op() override; + longlong int_op() override; + String *str_op(String *str) override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + bool native_op(THD *thd, Native *to) override; + bool fix_length_and_dec() override { /* Set nullability from args[1] by default. @@ -1125,15 +1220,19 @@ public: IFNULL(inet6_not_null_expr, 'foo') -> INET6 NULL IFNULL(inet6_not_null_expr, '::1') -> INET6 NOT NULL */ - maybe_null= args[1]->maybe_null; + copy_flags(args[1], item_base_t::MAYBE_NULL); if (Item_func_case_abbreviation2::fix_length_and_dec2(args)) return TRUE; return FALSE; } - const char *func_name() const { return "ifnull"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ifnull") }; + return name; + } - table_map not_null_tables() const { return 0; } - Item *get_copy(THD *thd) + table_map not_null_tables() const override { return 0; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ifnull>(thd, this); } }; @@ -1192,21 +1291,27 @@ public: class Item_func_if :public Item_func_case_abbreviation2_switch { protected: - Item *find_item() const { return args[0]->val_bool() ? args[1] : args[2]; } + Item *find_item() const override + { return args[0]->val_bool() ? args[1] : args[2]; } public: Item_func_if(THD *thd, Item *a, Item *b, Item *c): Item_func_case_abbreviation2_switch(thd, a, b, c) {} - bool fix_fields(THD *, Item **); - bool fix_length_and_dec() + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override { return fix_length_and_dec2_eliminate_null(args + 1); } - const char *func_name() const { return "if"; } - bool eval_not_null_tables(void *opt_arg); - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("if") }; + return name; + } + bool eval_not_null_tables(void *opt_arg) override; + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_if>(thd, this); } private: void cache_type_info(Item *source); @@ -1216,18 +1321,23 @@ private: class Item_func_nvl2 :public Item_func_case_abbreviation2_switch { protected: - Item *find_item() const { return args[0]->is_null() ? args[2] : args[1]; } + Item *find_item() const override + { return args[0]->is_null() ? args[2] : args[1]; } public: Item_func_nvl2(THD *thd, Item *a, Item *b, Item *c): Item_func_case_abbreviation2_switch(thd, a, b, c) {} - const char *func_name() const { return "nvl2"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("nvl2") }; + return name; + } + bool fix_length_and_dec() override { return fix_length_and_dec2_eliminate_null(args + 1); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_nvl2>(thd, this); } }; @@ -1274,28 +1384,33 @@ public: m_cache(NULL), m_arg0(NULL) { arg_count--; } - void cleanup() + void cleanup() override { Item_func_hybrid_field_type::cleanup(); arg_count= 2; // See the comment to the constructor } - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - double real_op(); - longlong int_op(); - String *str_op(String *str); - my_decimal *decimal_op(my_decimal *); - bool native_op(THD *thd, Native *to); - bool fix_length_and_dec(); - bool walk(Item_processor processor, bool walk_subquery, void *arg); - const char *func_name() const { return "nullif"; } - void print(String *str, enum_query_type query_type); + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + double real_op() override; + longlong int_op() override; + String *str_op(String *str) override; + my_decimal *decimal_op(my_decimal *) override; + bool native_op(THD *thd, Native *to) override; + bool fix_length_and_dec() override; + bool walk(Item_processor processor, bool walk_subquery, void *arg) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("nullif") }; + return name; + } + void print(String *str, enum_query_type query_type) override; void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, - List<Item> &fields, uint flags); - void update_used_tables(); - table_map not_null_tables() const { return 0; } - bool is_null(); + List<Item> &fields, uint flags) override; + void update_used_tables() override; + table_map not_null_tables() const override { return 0; } + bool is_null() override; Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { Context cmpctx(ANY_SUBST, cmp.compare_type_handler(), cmp.compare_collation()); @@ -1316,17 +1431,17 @@ public: cond, &args[2]); return this; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_nullif>(thd, this); } - Item *derived_field_transformer_for_having(THD *thd, uchar *arg) + Item *derived_field_transformer_for_having(THD *thd, uchar *arg) override { reset_first_arg_if_needed(); return this; } - Item *derived_field_transformer_for_where(THD *thd, uchar *arg) + Item *derived_field_transformer_for_where(THD *thd, uchar *arg) override { reset_first_arg_if_needed(); return this; } - Item *grouping_field_transformer_for_where(THD *thd, uchar *arg) + Item *grouping_field_transformer_for_where(THD *thd, uchar *arg) override { reset_first_arg_if_needed(); return this; } - Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg) + Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg) override { reset_first_arg_if_needed(); return this; } - Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg) + Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg) override { reset_first_arg_if_needed(); return this; } }; @@ -1407,16 +1522,17 @@ class in_string :public in_vector public: in_string(THD *thd, uint elements, qsort2_cmp cmp_func, CHARSET_INFO *cs); ~in_string(); - void set(uint pos,Item *item); - uchar *get_value(Item *item); - Item* create_item(THD *thd); - void value_to_item(uint pos, Item *item) + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + Item* create_item(THD *thd) override; + void value_to_item(uint pos, Item *item) override { String *str=((String*) base)+pos; Item_string_for_in_vector *to= (Item_string_for_in_vector*) item; to->set_value(str); } - const Type_handler *type_handler() const { return &type_handler_varchar; } + const Type_handler *type_handler() const override + { return &type_handler_varchar; } }; class in_longlong :public in_vector @@ -1434,16 +1550,17 @@ protected: } tmp; public: in_longlong(THD *thd, uint elements); - void set(uint pos,Item *item); - uchar *get_value(Item *item); - Item* create_item(THD *thd); - void value_to_item(uint pos, Item *item) + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + Item* create_item(THD *thd) override; + void value_to_item(uint pos, Item *item) override { ((Item_int*) item)->value= ((packed_longlong*) base)[pos].val; ((Item_int*) item)->unsigned_flag= (bool) ((packed_longlong*) base)[pos].unsigned_flag; } - const Type_handler *type_handler() const { return &type_handler_slonglong; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); }; @@ -1454,11 +1571,12 @@ class in_timestamp :public in_vector Timestamp_or_zero_datetime tmp; public: in_timestamp(THD *thd, uint elements); - void set(uint pos,Item *item); - uchar *get_value(Item *item); - Item* create_item(THD *thd); - void value_to_item(uint pos, Item *item); - const Type_handler *type_handler() const { return &type_handler_timestamp2; } + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + Item* create_item(THD *thd) override; + void value_to_item(uint pos, Item *item) override; + const Type_handler *type_handler() const override + { return &type_handler_timestamp2; } }; @@ -1489,9 +1607,10 @@ public: in_datetime(THD *thd, uint elements) :in_temporal(thd, elements) {} - void set(uint pos,Item *item); - uchar *get_value(Item *item); - const Type_handler *type_handler() const { return &type_handler_datetime2; } + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + const Type_handler *type_handler() const override + { return &type_handler_datetime2; } }; @@ -1501,9 +1620,10 @@ public: in_time(THD *thd, uint elements) :in_temporal(thd, elements) {} - void set(uint pos,Item *item); - uchar *get_value(Item *item); - const Type_handler *type_handler() const { return &type_handler_time2; } + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + const Type_handler *type_handler() const override + { return &type_handler_time2; } }; @@ -1512,14 +1632,15 @@ class in_double :public in_vector double tmp; public: in_double(THD *thd, uint elements); - void set(uint pos,Item *item); - uchar *get_value(Item *item); - Item *create_item(THD *thd); - void value_to_item(uint pos, Item *item) + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; + Item *create_item(THD *thd) override; + void value_to_item(uint pos, Item *item) override { ((Item_float*)item)->value= ((double*) base)[pos]; } - const Type_handler *type_handler() const { return &type_handler_double; } + const Type_handler *type_handler() const override + { return &type_handler_double; } }; @@ -1528,16 +1649,17 @@ class in_decimal :public in_vector my_decimal val; public: in_decimal(THD *thd, uint elements); - void set(uint pos, Item *item); - uchar *get_value(Item *item); - Item *create_item(THD *thd); - void value_to_item(uint pos, Item *item) + void set(uint pos, Item *item) override; + uchar *get_value(Item *item) override; + Item *create_item(THD *thd) override; + void value_to_item(uint pos, Item *item) override { my_decimal *dec= ((my_decimal *)base) + pos; Item_decimal *item_dec= (Item_decimal*)item; item_dec->set_decimal_value(dec); } - const Type_handler *type_handler() const { return &type_handler_newdecimal; } + const Type_handler *type_handler() const override + { return &type_handler_newdecimal; } }; @@ -1560,7 +1682,7 @@ public: virtual int cmp_not_null(const Value *value)= 0; // for optimized IN with row virtual int compare(cmp_item *item)= 0; - virtual cmp_item *make_same()= 0; + virtual cmp_item *make_same(THD *thd)= 0; virtual void store_value_by_template(THD *thd, cmp_item *tmpl, Item *item) { store_value(item); @@ -1633,11 +1755,11 @@ public: cmp_item_string *l_cmp= (cmp_item_string *) ci; return sortcmp(value_res, l_cmp->value_res, cmp_charset); } - cmp_item *make_same(); + cmp_item *make_same(THD *thd); void set_charset(CHARSET_INFO *cs) { cmp_charset= cs; - value.set_quick(value_buff, sizeof(value_buff), cs); + value.set_buffer_if_not_allocated(value_buff, sizeof(value_buff), cs); } }; @@ -1667,7 +1789,7 @@ public: cmp_item_int *l_cmp= (cmp_item_int *)ci; return (value < l_cmp->value) ? -1 : ((value == l_cmp->value) ? 0 : 1); } - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; /* @@ -1696,7 +1818,7 @@ public: } int cmp_not_null(const Value *val); int cmp(Item *arg); - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; @@ -1713,7 +1835,7 @@ public: } int cmp_not_null(const Value *val); int cmp(Item *arg); - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; @@ -1726,7 +1848,7 @@ public: int cmp_not_null(const Value *val); int cmp(Item *arg); int compare(cmp_item *ci); - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; @@ -1756,7 +1878,7 @@ public: cmp_item_real *l_cmp= (cmp_item_real *) ci; return (value < l_cmp->value)? -1 : ((value == l_cmp->value) ? 0 : 1); } - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; @@ -1769,7 +1891,7 @@ public: int cmp(Item *arg); int cmp_not_null(const Value *val); int compare(cmp_item *c); - cmp_item *make_same(); + cmp_item *make_same(THD *thd); }; @@ -1806,7 +1928,7 @@ public: cmp_item_string *l_cmp= (cmp_item_string *) ci; return sortcmp(value_res, l_cmp->value_res, cmp_charset); } - cmp_item *make_same() + cmp_item *make_same(THD *thd) { return new cmp_item_sort_string_in_static(cmp_charset); } @@ -2063,13 +2185,14 @@ public: arguments (e.g. ROWs with size) @retval false - a new element was successfully added. */ - bool add_value(const char *funcname, Item_args *args, uint value_index); + bool add_value(const LEX_CSTRING &funcname, Item_args *args, + uint value_index); /** Add a new element into m_comparators[], ignoring explicit NULL values. If the value appeared to be an explicit NULL, nulls_found[0] is set to true. */ - bool add_value_skip_null(const char *funcname, + bool add_value_skip_null(const LEX_CSTRING &funcname, Item_args *args, uint value_index, bool *nulls_found); @@ -2192,18 +2315,22 @@ public: Item_func_case(THD *thd, List<Item> &list) :Item_func_case_expression(thd, list) { } - double real_op(); - longlong int_op(); - String *str_op(String *); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - bool native_op(THD *thd, Native *to); - bool fix_fields(THD *thd, Item **ref); - table_map not_null_tables() const { return 0; } - const char *func_name() const { return "case"; } + double real_op() override; + longlong int_op() override; + String *str_op(String *) override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + bool native_op(THD *thd, Native *to) override; + bool fix_fields(THD *thd, Item **ref) override; + table_map not_null_tables() const override { return 0; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("case") }; + return name; + } CHARSET_INFO *compare_collation() const { return cmp_collation.collation; } - bool need_parentheses_in_default() { return true; } + bool need_parentheses_in_default() override { return true; } }; @@ -2218,7 +2345,8 @@ class Item_func_case_searched: public Item_func_case { uint when_count() const { return arg_count / 2; } bool with_else() const { return arg_count % 2; } - Item **else_expr_addr() const { return with_else() ? &args[arg_count - 1] : 0; } + Item **else_expr_addr() const override + { return with_else() ? &args[arg_count - 1] : 0; } public: Item_func_case_searched(THD *thd, List<Item> &list) :Item_func_case(thd, list) @@ -2226,17 +2354,18 @@ public: DBUG_ASSERT(arg_count >= 2); reorder_args(0); } - enum Functype functype() const { return CASE_SEARCHED_FUNC; } - void print(String *str, enum_query_type query_type); - bool fix_length_and_dec(); + enum Functype functype() const override { return CASE_SEARCHED_FUNC; } + void print(String *str, enum_query_type query_type) override; + bool fix_length_and_dec() override; Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { // None of the arguments are in a comparison context Item_args::propagate_equal_fields(thd, Context_identity(), cond); return this; } - Item *find_item(); - Item *get_copy(THD *thd) + Item *find_item() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_case_searched>(thd, this); } }; @@ -2259,7 +2388,8 @@ protected: uint m_found_types; uint when_count() const { return (arg_count - 1) / 2; } bool with_else() const { return arg_count % 2 == 0; } - Item **else_expr_addr() const { return with_else() ? &args[arg_count - 1] : 0; } + Item **else_expr_addr() const override + { return with_else() ? &args[arg_count - 1] : 0; } bool aggregate_switch_and_when_arguments(THD *thd, bool nulls_equal); bool prepare_predicant_and_values(THD *thd, uint *found_types, bool nulls_equal); @@ -2272,19 +2402,20 @@ public: DBUG_ASSERT(arg_count >= 3); reorder_args(1); } - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_case_simple::cleanup"); Item_func::cleanup(); Predicant_to_list_comparator::cleanup(); DBUG_VOID_RETURN; } - enum Functype functype() const { return CASE_SIMPLE_FUNC; } - void print(String *str, enum_query_type query_type); - bool fix_length_and_dec(); - Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond); - Item *find_item(); - Item *build_clone(THD *thd) + enum Functype functype() const override { return CASE_SIMPLE_FUNC; } + void print(String *str, enum_query_type query_type) override; + bool fix_length_and_dec() override; + Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override; + Item *find_item() override; + Item *build_clone(THD *thd) override { Item_func_case_simple *clone= (Item_func_case_simple *) Item_func_case::build_clone(thd); @@ -2293,7 +2424,7 @@ public: return NULL; return clone; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_case_simple>(thd, this); } }; @@ -2304,11 +2435,15 @@ public: Item_func_decode_oracle(THD *thd, List<Item> &list) :Item_func_case_simple(thd, list) { } - const char *func_name() const { return "decode_oracle"; } - void print(String *str, enum_query_type query_type); - bool fix_length_and_dec(); - Item *find_item(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("decode_oracle") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + bool fix_length_and_dec() override; + Item *find_item() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_decode_oracle>(thd, this); } }; @@ -2353,19 +2488,19 @@ class Item_func_in :public Item_func_opt_neg, { for (uint i= 0; i < nitems; i++) { - if (!items[i]->const_item() || items[i]->is_expensive()) + if (!items[i]->can_eval_in_optimize()) return false; } return true; } bool prepare_predicant_and_values(THD *thd, uint *found_types); - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_like_args0(); } protected: SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, - Field *field, Item *value); + Field *field, Item *value) override; bool transform_into_subq; bool transform_into_subq_checked; public: @@ -2394,9 +2529,9 @@ public: array(0), have_null(0), arg_types_compatible(FALSE), emb_on_expr_nest(0) { } - longlong val_int(); - bool fix_fields(THD *, Item **); - bool fix_length_and_dec(); + longlong val_int() override; + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override; bool compatible_types_scalar_bisection_possible() { DBUG_ASSERT(m_comparator.cmp_type() != ROW_RESULT); @@ -2407,7 +2542,7 @@ public: DBUG_ASSERT(m_comparator.cmp_type() == ROW_RESULT); return all_items_are_consts(args + 1, arg_count - 1) && // Bisection #2 ((is_top_level_item() && !negated) || // Bisection #3 - (!list_contains_null() && !args[0]->maybe_null)); // Bisection #4 + (!list_contains_null() && !args[0]->maybe_null())); // Bisection #4 } bool agg_all_arg_charsets_for_comparison() { @@ -2428,7 +2563,7 @@ public: bool fix_for_row_comparison_using_cmp_items(THD *thd); bool fix_for_row_comparison_using_bisection(THD *thd); - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_in::cleanup"); Item_int_func::cleanup(); @@ -2438,10 +2573,12 @@ public: DBUG_VOID_RETURN; } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, - table_map usable_tables, SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + table_map usable_tables, SARGABLE_PARAM **sargables) + override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; SEL_TREE *get_func_row_mm_tree(RANGE_OPT_PARAM *param, Item_row *key_row); Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { /* Note, we pass ANY_SUBST, this makes sure that non of the args @@ -2465,17 +2602,22 @@ public: } return this; } - virtual void print(String *str, enum_query_type query_type); - enum Functype functype() const { return IN_FUNC; } - const char *func_name() const { return "in"; } - enum precedence precedence() const { return IN_PRECEDENCE; } - bool eval_not_null_tables(void *opt_arg); - bool find_not_null_fields(table_map allowed); - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); - bool count_sargable_conds(void *arg); - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + enum Functype functype() const override { return IN_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("in") }; + return name; + } + enum precedence precedence() const override { return IN_PRECEDENCE; } + bool eval_not_null_tables(void *opt_arg) override; + bool find_not_null_fields(table_map allowed) override; + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; + bool count_sargable_conds(void *arg) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_in>(thd, this); } - Item *build_clone(THD *thd) + Item *build_clone(THD *thd) override { Item_func_in *clone= (Item_func_in *) Item_func::build_clone(thd); if (clone) @@ -2486,10 +2628,11 @@ public: } return clone; } - void mark_as_condition_AND_part(TABLE_LIST *embedding); + void mark_as_condition_AND_part(TABLE_LIST *embedding) override; bool to_be_transformed_into_in_subq(THD *thd); bool create_value_list_for_tvc(THD *thd, List< List<Item> > *values); - Item *in_predicate_to_in_subs_transformer(THD *thd, uchar *arg); + Item *in_predicate_to_in_subs_transformer(THD *thd, uchar *arg) override; + Item *in_predicate_to_equality_transformer(THD *thd, uchar *arg) override; uint32 max_length_of_left_expr(); }; @@ -2501,14 +2644,14 @@ class cmp_item_row :public cmp_item bool aggregate_row_elements_for_comparison(THD *thd, Type_handler_hybrid_field_type *cmp, Item_args *tmp, - const char *funcname, + const LEX_CSTRING &funcname, uint col, uint level); public: cmp_item_row(): comparators(0), n(0) {} ~cmp_item_row(); void store_value(Item *item); - bool prepare_comparators(THD *, const char *funcname, + bool prepare_comparators(THD *, const LEX_CSTRING &funcname, const Item_args *args, uint level); int cmp(Item *arg); int cmp_not_null(const Value *val) @@ -2517,7 +2660,7 @@ public: return TRUE; } int compare(cmp_item *arg); - cmp_item *make_same(); + cmp_item *make_same(THD *thd); void store_value_by_template(THD *thd, cmp_item *tmpl, Item *); friend class Item_func_in; cmp_item *get_comparator(uint i) { return comparators[i]; } @@ -2530,10 +2673,10 @@ class in_row :public in_vector public: in_row(THD *thd, uint elements, Item *); ~in_row(); - void set(uint pos,Item *item); - uchar *get_value(Item *item); + void set(uint pos,Item *item) override; + uchar *get_value(Item *item) override; friend class Item_func_in; - const Type_handler *type_handler() const { return &type_handler_row; } + const Type_handler *type_handler() const override { return &type_handler_row; } cmp_item *get_cmp_item() { return &tmp; } }; @@ -2542,19 +2685,20 @@ class Item_func_null_predicate :public Item_bool_func { protected: SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, - Field *field, Item *value) + Field *field, Item *value) override { DBUG_ENTER("Item_func_null_predicate::get_func_mm_tree"); DBUG_RETURN(get_mm_parts(param, field, functype(), value)); } SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, Field *field, KEY_PART *key_part, - Item_func::Functype type, Item *value); + Item_func::Functype type, Item *value) override; public: Item_func_null_predicate(THD *thd, Item *a): Item_bool_func(thd, a) { } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, - table_map usable_tables, SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) + table_map usable_tables, SARGABLE_PARAM **sargables) + override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override { DBUG_ENTER("Item_func_null_predicate::get_mm_tree"); SEL_TREE *ftree= get_full_func_mm_tree_for_args(param, args[0], NULL); @@ -2562,14 +2706,16 @@ public: ftree= Item_func::get_mm_tree(param, cond_ptr); DBUG_RETURN(ftree); } - CHARSET_INFO *compare_collation() const + CHARSET_INFO *compare_collation() const override { return args[0]->collation.collation; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { - decimals=0; max_length=1; maybe_null=0; + decimals=0; + max_length=1; + base_flags&= ~item_base_t::MAYBE_NULL; return FALSE; } - bool count_sargable_conds(void *arg); + bool count_sargable_conds(void *arg) override; }; @@ -2577,11 +2723,15 @@ class Item_func_isnull :public Item_func_null_predicate { public: Item_func_isnull(THD *thd, Item *a): Item_func_null_predicate(thd, a) {} - longlong val_int(); - enum Functype functype() const { return ISNULL_FUNC; } - const char *func_name() const { return "isnull"; } - void print(String *str, enum_query_type query_type); - enum precedence precedence() const { return CMP_PRECEDENCE; } + longlong val_int() override; + enum Functype functype() const override { return ISNULL_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("isnull") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + enum precedence precedence() const override { return CMP_PRECEDENCE; } bool arg_is_datetime_notnull_field() { @@ -2598,9 +2748,9 @@ public: } /* Optimize case of not_null_column IS NULL */ - virtual void update_used_tables() + void update_used_tables() override { - if (!args[0]->maybe_null && !arg_is_datetime_notnull_field()) + if (!args[0]->maybe_null() && !arg_is_datetime_notnull_field()) { used_tables_cache= 0; /* is always false */ const_item_cache= 1; @@ -2613,11 +2763,11 @@ public: } } COND *remove_eq_conds(THD *thd, Item::cond_result *cond_value, - bool top_level); - table_map not_null_tables() const { return 0; } - bool find_not_null_fields(table_map allowed); - Item *neg_transformer(THD *thd); - Item *get_copy(THD *thd) + bool top_level) override; + table_map not_null_tables() const override { return 0; } + bool find_not_null_fields(table_map allowed) override; + Item *neg_transformer(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isnull>(thd, this); } }; @@ -2636,16 +2786,20 @@ public: Item_is_not_null_test(THD *thd, Item_in_subselect* ow, Item *a): Item_func_isnull(thd, a), owner(ow) {} - enum Functype functype() const { return ISNOTNULLTEST_FUNC; } - longlong val_int(); - const char *func_name() const { return "<is_not_null_test>"; } - void update_used_tables(); + enum Functype functype() const override { return ISNOTNULLTEST_FUNC; } + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<is_not_null_test>") }; + return name; + } + void update_used_tables() override; /* we add RAND_TABLE_BIT to prevent moving this item from HAVING to WHERE */ - table_map used_tables() const + table_map used_tables() const override { return used_tables_cache | RAND_TABLE_BIT; } - bool const_item() const { return FALSE; } + bool const_item() const override { return FALSE; } }; @@ -2656,16 +2810,20 @@ public: Item_func_isnotnull(THD *thd, Item *a): Item_func_null_predicate(thd, a), abort_on_null(0) { } - longlong val_int(); - enum Functype functype() const { return ISNOTNULL_FUNC; } - const char *func_name() const { return "isnotnull"; } - enum precedence precedence() const { return CMP_PRECEDENCE; } - table_map not_null_tables() const + longlong val_int() override; + enum Functype functype() const override { return ISNOTNULL_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("isnotnull") }; + return name; + } + enum precedence precedence() const override { return CMP_PRECEDENCE; } + table_map not_null_tables() const override { return abort_on_null ? not_null_tables_cache : 0; } - Item *neg_transformer(THD *thd); - void print(String *str, enum_query_type query_type); - void top_level_item() { abort_on_null=1; } - Item *get_copy(THD *thd) + Item *neg_transformer(THD *thd) override; + void print(String *str, enum_query_type query_type) override; + void top_level_item() override { abort_on_null=1; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isnotnull>(thd, this); } }; @@ -2697,14 +2855,14 @@ class Item_func_like :public Item_bool_func2 bool with_sargable_pattern() const; protected: SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, - Field *field, Item *value) + Field *field, Item *value) override { DBUG_ENTER("Item_func_like::get_func_mm_tree"); DBUG_RETURN(get_mm_parts(param, field, LIKE_FUNC, value)); } SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, Field *field, KEY_PART *key_part, - Item_func::Functype type, Item *value); + Item_func::Functype type, Item *value) override; public: int escape; bool negated; @@ -2716,13 +2874,13 @@ public: bool get_negated() const { return negated; } // Used by ColumnStore - Sql_mode_dependency value_depends_on_sql_mode() const; - longlong val_int(); - enum Functype functype() const { return LIKE_FUNC; } - void print(String *str, enum_query_type query_type); - CHARSET_INFO *compare_collation() const + Sql_mode_dependency value_depends_on_sql_mode() const override; + longlong val_int() override; + enum Functype functype() const override { return LIKE_FUNC; } + void print(String *str, enum_query_type query_type) override; + CHARSET_INFO *compare_collation() const override { return cmp_collation.collation; } - cond_result eq_cmp_result() const + cond_result eq_cmp_result() const override { /** We cannot always rewrite conditions as follows: @@ -2758,9 +2916,11 @@ public: return compare_collation() == &my_charset_bin ? COND_TRUE : COND_OK; } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, - table_map usable_tables, SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + table_map usable_tables, SARGABLE_PARAM **sargables) + override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { /* LIKE differs from the regular comparison operator ('=') in the following: @@ -2793,32 +2953,36 @@ public: cond); return this; } - const char *func_name() const { return "like"; } - enum precedence precedence() const { return IN_PRECEDENCE; } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("like") }; + return name; + } + enum precedence precedence() const override { return IN_PRECEDENCE; } + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override { max_length= 1; return agg_arg_charsets_for_comparison(cmp_collation, args, 2); } - void cleanup(); + void cleanup() override; - Item *neg_transformer(THD *thd) + Item *neg_transformer(THD *thd) override { negated= !negated; return this; } - bool walk(Item_processor processor, bool walk_subquery, void *arg) + bool walk(Item_processor processor, bool walk_subquery, void *arg) override { - return walk_args(processor, walk_subquery, arg) - || escape_item->walk(processor, walk_subquery, arg) - || (this->*processor)(arg); + return (walk_args(processor, walk_subquery, arg) || + escape_item->walk(processor, walk_subquery, arg) || + (this->*processor)(arg)); } - bool find_selective_predicates_list_processor(void *arg); + bool find_selective_predicates_list_processor(void *arg) override; - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_like>(thd, this); } }; @@ -2900,24 +3064,29 @@ class Item_func_regex :public Item_bool_func public: Item_func_regex(THD *thd, Item *a, Item *b): Item_bool_func(thd, a, b) {} - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_regex::cleanup"); Item_bool_func::cleanup(); re.cleanup(); DBUG_VOID_RETURN; } - longlong val_int(); - bool fix_length_and_dec(); - const char *func_name() const { return "regexp"; } - enum precedence precedence() const { return IN_PRECEDENCE; } - Item *get_copy(THD *) { return 0; } - void print(String *str, enum_query_type query_type) + longlong val_int() override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("regexp") }; + return name; + } + enum precedence precedence() const override { return IN_PRECEDENCE; } + Item *get_copy(THD *) override { return 0; } + void print(String *str, enum_query_type query_type) override { print_op(str, query_type); } - CHARSET_INFO *compare_collation() const { return cmp_collation.collation; } + CHARSET_INFO *compare_collation() const override + { return cmp_collation.collation; } }; @@ -2929,10 +3098,10 @@ public: */ class Item_func_regexp_instr :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_str(func_name()) || - args[1]->check_type_can_return_text(func_name()); + return (args[0]->check_type_can_return_str(func_name_cstring()) || + args[1]->check_type_can_return_text(func_name_cstring())); } Regexp_processor_pcre re; DTCollation cmp_collation; @@ -2940,17 +3109,21 @@ public: Item_func_regexp_instr(THD *thd, Item *a, Item *b) :Item_long_func(thd, a, b) {} - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_regexp_instr::cleanup"); Item_int_func::cleanup(); re.cleanup(); DBUG_VOID_RETURN; } - longlong val_int(); - bool fix_length_and_dec(); - const char *func_name() const { return "regexp_instr"; } - Item *get_copy(THD *thd) { return 0; } + longlong val_int() override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("regexp_instr") }; + return name; + } + Item *get_copy(THD *thd) override { return 0; } }; @@ -2991,64 +3164,67 @@ public: DBUG_ASSERT(nlist->elements); list.append(nlist); } - bool fix_fields(THD *, Item **ref); - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); + bool fix_fields(THD *, Item **ref) override; + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; - enum Type type() const { return COND_ITEM; } + enum Type type() const override { return COND_ITEM; } List<Item>* argument_list() { return &list; } - table_map used_tables() const; - void update_used_tables() + table_map used_tables() const override; + void update_used_tables() override { used_tables_and_const_cache_init(); used_tables_and_const_cache_update_and_join(list); } COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref); + COND_EQUAL **cond_equal_ref) override; COND *remove_eq_conds(THD *thd, Item::cond_result *cond_value, - bool top_level); + bool top_level) override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); - virtual void print(String *str, enum_query_type query_type); + SARGABLE_PARAM **sargables) override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; + void print(String *str, enum_query_type query_type) override; void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, - List<Item> &fields, uint flags); + List<Item> &fields, uint flags) override; friend int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, COND **conds); - void top_level_item() { abort_on_null=1; } + void top_level_item() override { abort_on_null=1; } bool top_level() { return abort_on_null; } void copy_andor_arguments(THD *thd, Item_cond *item); - bool walk(Item_processor processor, bool walk_subquery, void *arg); - Item *do_transform(THD *thd, Item_transformer transformer, uchar *arg, bool toplevel); - Item *transform(THD *thd, Item_transformer transformer, uchar *arg) + bool walk(Item_processor processor, bool walk_subquery, void *arg) override; + Item *do_transform(THD *thd, Item_transformer transformer, uchar *arg, + bool toplevel); + Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override { return do_transform(thd, transformer, arg, 0); } Item *top_level_transform(THD *thd, Item_transformer transformer, uchar *arg) + override { return do_transform(thd, transformer, arg, 1); } - void traverse_cond(Cond_traverser, void *arg, traverse_order order); + void traverse_cond(Cond_traverser, void *arg, traverse_order order) override; void neg_arguments(THD *thd); - Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *); + Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *) override; Item *do_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, Item_transformer transformer, uchar *arg_t, bool toplevel); Item *compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, - Item_transformer transformer, uchar *arg_t) + Item_transformer transformer, uchar *arg_t) override { return do_compile(thd, analyzer, arg_p, transformer, arg_t, 0); } Item* top_level_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, - Item_transformer transformer, uchar *arg_t) + Item_transformer transformer, uchar *arg_t) override { return do_compile(thd, analyzer, arg_p, transformer, arg_t, 1); } - bool eval_not_null_tables(void *opt_arg); - bool find_not_null_fields(table_map allowed); - Item *build_clone(THD *thd); - bool excl_dep_on_table(table_map tab_map); - bool excl_dep_on_grouping_fields(st_select_lex *sel); + bool eval_not_null_tables(void *opt_arg) override; + bool find_not_null_fields(table_map allowed) override; + Item *build_clone(THD *thd) override; + bool excl_dep_on_table(table_map tab_map) override; + bool excl_dep_on_grouping_fields(st_select_lex *sel) override; }; template <template<class> class LI, class T> class Item_equal_iterator; @@ -3199,53 +3375,58 @@ public: void merge_into_list(THD *thd, List<Item_equal> *list, bool save_merged, bool only_intersected); void update_const(THD *thd); - enum Functype functype() const { return MULT_EQUAL_FUNC; } - longlong val_int(); - const char *func_name() const { return "multiple equal"; } + enum Functype functype() const override { return MULT_EQUAL_FUNC; } + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("multiple equal") }; + return name; + } void sort(Item_field_cmpfunc compare, void *arg); - bool fix_length_and_dec(); - bool fix_fields(THD *thd, Item **ref); - void cleanup() + bool fix_length_and_dec() override; + bool fix_fields(THD *thd, Item **ref) override; + void cleanup() override { delete eval_item; eval_item= NULL; } - void update_used_tables(); - bool find_not_null_fields(table_map allowed); + void update_used_tables() override; + bool find_not_null_fields(table_map allowed) override; COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref); + COND_EQUAL **cond_equal_ref) override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); - bool walk(Item_processor processor, bool walk_subquery, void *arg); - Item *transform(THD *thd, Item_transformer transformer, uchar *arg); - virtual void print(String *str, enum_query_type query_type); + SARGABLE_PARAM **sargables) override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; + bool walk(Item_processor processor, bool walk_subquery, void *arg) override; + Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override; + void print(String *str, enum_query_type query_type) override; const Type_handler *compare_type_handler() const { return m_compare_handler; } - CHARSET_INFO *compare_collation() const { return m_compare_collation; } + CHARSET_INFO *compare_collation() const override + { return m_compare_collation; } void set_context_field(Item_field *ctx_field) { context_field= ctx_field; } void set_link_equal_fields(bool flag) { link_equal_fields= flag; } - Item* get_copy(THD *thd) { return 0; } + Item* get_copy(THD *thd) override { return 0; } /* This does not comply with the specification of the virtual method, but Item_equal items are processed distinguishly anyway */ - bool excl_dep_on_table(table_map tab_map) + bool excl_dep_on_table(table_map tab_map) override { return used_tables() & tab_map; } - bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred); - bool excl_dep_on_grouping_fields(st_select_lex *sel); + bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred) override; + bool excl_dep_on_grouping_fields(st_select_lex *sel) override; bool create_pushable_equalities(THD *thd, List<Item> *equalities, Pushdown_checker checker, uchar *arg, bool clone_const); /* Return the number of elements in this multiple equality */ uint elements_count() { return equal_items.elements; } friend class Item_equal_fields_iterator; - bool count_sargable_conds(void *arg); - Item *multiple_equality_transformer(THD *thd, uchar *arg); + bool count_sargable_conds(void *arg) override; + Item *multiple_equality_transformer(THD *thd, uchar *arg) override; friend class Item_equal_iterator<List_iterator_fast,Item>; friend class Item_equal_iterator<List_iterator,Item>; friend Item *eliminate_item_equal(THD *thd, COND *cond, @@ -3377,24 +3558,30 @@ public: Item_cond_and(THD *thd, Item *i1,Item *i2): Item_cond(thd, i1, i2) {} Item_cond_and(THD *thd, Item_cond_and *item): Item_cond(thd, item) {} Item_cond_and(THD *thd, List<Item> &list_arg): Item_cond(thd, list_arg) {} - enum Functype functype() const { return COND_AND_FUNC; } - longlong val_int(); - const char *func_name() const { return "and"; } - enum precedence precedence() const { return AND_PRECEDENCE; } - table_map not_null_tables() const + enum Functype functype() const override { return COND_AND_FUNC; } + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("and") }; + return name; + } + enum precedence precedence() const override { return AND_PRECEDENCE; } + table_map not_null_tables() const override { return abort_on_null ? not_null_tables_cache: and_tables_cache; } - Item *copy_andor_structure(THD *thd); - Item *neg_transformer(THD *thd); - void mark_as_condition_AND_part(TABLE_LIST *embedding); - virtual uint exists2in_reserved_items() { return list.elements; }; + Item *copy_andor_structure(THD *thd) override; + Item *neg_transformer(THD *thd) override; + void mark_as_condition_AND_part(TABLE_LIST *embedding) override; + uint exists2in_reserved_items() override { return list.elements; }; COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref); - bool set_format_by_check_constraint(Send_field_extended_metadata *to) const; + COND_EQUAL **cond_equal_ref) override; + bool set_format_by_check_constraint(Send_field_extended_metadata *to) const + override; void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, - table_map usable_tables, SARGABLE_PARAM **sargables); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); - Item *get_copy(THD *thd) + table_map usable_tables, SARGABLE_PARAM **sargables) + override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_cond_and>(thd, this); } }; @@ -3411,14 +3598,18 @@ public: Item_cond_or(THD *thd, Item *i1,Item *i2): Item_cond(thd, i1, i2) {} Item_cond_or(THD *thd, Item_cond_or *item): Item_cond(thd, item) {} Item_cond_or(THD *thd, List<Item> &list_arg): Item_cond(thd, list_arg) {} - enum Functype functype() const { return COND_OR_FUNC; } - longlong val_int(); - const char *func_name() const { return "or"; } - enum precedence precedence() const { return OR_PRECEDENCE; } - table_map not_null_tables() const { return and_tables_cache; } - Item *copy_andor_structure(THD *thd); - Item *neg_transformer(THD *thd); - Item *get_copy(THD *thd) + enum Functype functype() const override { return COND_OR_FUNC; } + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("or") }; + return name; + } + enum precedence precedence() const override { return OR_PRECEDENCE; } + table_map not_null_tables() const override { return and_tables_cache; } + Item *copy_andor_structure(THD *thd) override; + Item *neg_transformer(THD *thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_cond_or>(thd, this); } }; @@ -3426,10 +3617,14 @@ class Item_func_dyncol_check :public Item_bool_func { public: Item_func_dyncol_check(THD *thd, Item *str): Item_bool_func(thd, str) {} - longlong val_int(); - const char *func_name() const { return "column_check"; } - bool need_parentheses_in_default() { return false; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_check") }; + return name; + } + bool need_parentheses_in_default() override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_check>(thd, this); } }; @@ -3438,10 +3633,14 @@ class Item_func_dyncol_exists :public Item_bool_func public: Item_func_dyncol_exists(THD *thd, Item *str, Item *num): Item_bool_func(thd, str, num) {} - longlong val_int(); - const char *func_name() const { return "column_exists"; } - bool need_parentheses_in_default() { return false; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_exists") }; + return name; + } + bool need_parentheses_in_default() override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_exists>(thd, this); } }; @@ -3458,7 +3657,7 @@ public: } void print(String *str, enum_query_type query_type) { - Cursor_ref::print_func(str, func_name()); + Cursor_ref::print_func(str, func_name_cstring()); } }; @@ -3468,9 +3667,13 @@ class Item_func_cursor_isopen: public Item_func_cursor_bool_attr public: Item_func_cursor_isopen(THD *thd, const LEX_CSTRING *name, uint offset) :Item_func_cursor_bool_attr(thd, name, offset) { } - const char *func_name() const { return "%ISOPEN"; } - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("%ISOPEN") }; + return name; + } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cursor_isopen>(thd, this); } }; @@ -3479,10 +3682,17 @@ class Item_func_cursor_found: public Item_func_cursor_bool_attr { public: Item_func_cursor_found(THD *thd, const LEX_CSTRING *name, uint offset) - :Item_func_cursor_bool_attr(thd, name, offset) { maybe_null= true; } - const char *func_name() const { return "%FOUND"; } - longlong val_int(); - Item *get_copy(THD *thd) + :Item_func_cursor_bool_attr(thd, name, offset) + { + set_maybe_null(); + } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("%FOUND") }; + return name; + } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cursor_found>(thd, this); } }; @@ -3491,10 +3701,17 @@ class Item_func_cursor_notfound: public Item_func_cursor_bool_attr { public: Item_func_cursor_notfound(THD *thd, const LEX_CSTRING *name, uint offset) - :Item_func_cursor_bool_attr(thd, name, offset) { maybe_null= true; } - const char *func_name() const { return "%NOTFOUND"; } - longlong val_int(); - Item *get_copy(THD *thd) + :Item_func_cursor_bool_attr(thd, name, offset) + { + set_maybe_null(); + } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("%NOTFOUND") }; + return name; + } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cursor_notfound>(thd, this); } }; diff --git a/sql/item_create.cc b/sql/item_create.cc index feae487ff0c..73e3befc9d6 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -30,6 +30,7 @@ will be resolved later. */ #include "sql_class.h" // set_var.h: THD +#include "sql_parse.h" // sql_command_flags #include "set_var.h" #include "sp_head.h" #include "sp.h" @@ -2179,6 +2180,20 @@ protected: }; +class Create_func_to_char : public Create_native_func +{ +public: + Item *create_native(THD *thd, const LEX_CSTRING *name, List<Item> *item_list) + override; + + static Create_func_to_char s_singleton; + +protected: + Create_func_to_char() {} + virtual ~Create_func_to_char() {} +}; + + class Create_func_to_days : public Create_func_arg1 { public: @@ -2282,6 +2297,17 @@ protected: virtual ~Create_func_uuid() {} }; +class Create_func_sys_guid : public Create_func_arg0 +{ +public: + virtual Item *create_builder(THD *thd); + + static Create_func_sys_guid s_singleton; + +protected: + Create_func_sys_guid() {} + virtual ~Create_func_sys_guid() {} +}; class Create_func_uuid_short : public Create_func_arg0 { @@ -2438,7 +2464,7 @@ static bool has_named_parameters(List<Item> *params) List_iterator<Item> it(*params); while ((param= it++)) { - if (! param->is_autogenerated_name()) + if (param->is_explicit_name()) return true; } } @@ -2691,7 +2717,7 @@ Create_func_arg1::create_func(THD *thd, const LEX_CSTRING *name, Item *param_1= item_list->pop(); - if (unlikely(! param_1->is_autogenerated_name())) + if (unlikely(param_1->is_explicit_name())) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -2719,8 +2745,8 @@ Create_func_arg2::create_func(THD *thd, const LEX_CSTRING *name, Item *param_1= item_list->pop(); Item *param_2= item_list->pop(); - if (unlikely(!param_1->is_autogenerated_name() || - !param_2->is_autogenerated_name())) + if (unlikely(param_1->is_explicit_name() || + param_2->is_explicit_name())) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -2749,9 +2775,9 @@ Create_func_arg3::create_func(THD *thd, const LEX_CSTRING *name, Item *param_2= item_list->pop(); Item *param_3= item_list->pop(); - if (unlikely(!param_1->is_autogenerated_name() || - !param_2->is_autogenerated_name() || - !param_3->is_autogenerated_name())) + if (unlikely(param_1->is_explicit_name() || + param_2->is_explicit_name() || + param_3->is_explicit_name())) { my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str); return NULL; @@ -3637,7 +3663,7 @@ Create_func_json_exists Create_func_json_exists::s_singleton; Item* Create_func_json_exists::create_2_arg(THD *thd, Item *arg1, Item *arg2) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_exists(thd, arg1, arg2); } @@ -3663,7 +3689,7 @@ Create_func_json_detailed::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_format(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3673,7 +3699,7 @@ Create_func_json_loose Create_func_json_loose::s_singleton; Item* Create_func_json_loose::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_format(thd, arg1, Item_func_json_format::LOOSE); } @@ -3684,7 +3710,7 @@ Create_func_json_compact Create_func_json_compact::s_singleton; Item* Create_func_json_compact::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_format(thd, arg1, Item_func_json_format::COMPACT); } @@ -3695,7 +3721,7 @@ Create_func_json_valid Create_func_json_valid::s_singleton; Item* Create_func_json_valid::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_valid(thd, arg1); } @@ -3705,7 +3731,7 @@ Create_func_json_type Create_func_json_type::s_singleton; Item* Create_func_json_type::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_type(thd, arg1); } @@ -3715,7 +3741,7 @@ Create_func_json_depth Create_func_json_depth::s_singleton; Item* Create_func_json_depth::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_depth(thd, arg1); } @@ -3725,7 +3751,7 @@ Create_func_json_value Create_func_json_value::s_singleton; Item* Create_func_json_value::create_2_arg(THD *thd, Item *arg1, Item *arg2) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_value(thd, arg1, arg2); } @@ -3735,7 +3761,7 @@ Create_func_json_query Create_func_json_query::s_singleton; Item* Create_func_json_query::create_2_arg(THD *thd, Item *arg1, Item *arg2) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_query(thd, arg1, arg2); } @@ -3745,7 +3771,7 @@ Create_func_json_quote Create_func_json_quote::s_singleton; Item* Create_func_json_quote::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_quote(thd, arg1); } @@ -3755,7 +3781,7 @@ Create_func_json_unquote Create_func_json_unquote::s_singleton; Item* Create_func_json_unquote::create_1_arg(THD *thd, Item *arg1) { - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return new (thd->mem_root) Item_func_json_unquote(thd, arg1); } @@ -3786,7 +3812,7 @@ Create_func_json_array::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array(thd); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3812,7 +3838,7 @@ Create_func_json_array_append::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array_append(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3838,7 +3864,7 @@ Create_func_json_array_insert::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_array_insert(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3865,7 +3891,7 @@ Create_func_json_insert::create_native(THD *thd, const LEX_CSTRING *name, thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3892,7 +3918,7 @@ Create_func_json_set::create_native(THD *thd, const LEX_CSTRING *name, thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3919,7 +3945,7 @@ Create_func_json_replace::create_native(THD *thd, const LEX_CSTRING *name, thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3945,7 +3971,7 @@ Create_func_json_remove::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_remove(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -3978,7 +4004,7 @@ Create_func_json_object::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_object(thd); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4003,7 +4029,7 @@ Create_func_json_length::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_length(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4028,7 +4054,7 @@ Create_func_json_merge::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_merge(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4078,7 +4104,7 @@ Create_func_json_contains::create_native(THD *thd, const LEX_CSTRING *name, my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4104,7 +4130,7 @@ Create_func_json_keys::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_keys(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4130,7 +4156,7 @@ Create_func_json_contains_path::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_contains_path(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4156,7 +4182,7 @@ Create_func_json_extract::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_extract(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -4182,7 +4208,7 @@ Create_func_json_search::create_native(THD *thd, const LEX_CSTRING *name, func= new (thd->mem_root) Item_func_json_search(thd, *item_list); } - status_var_increment(current_thd->status_var.feature_json); + status_var_increment(thd->status_var.feature_json); return func; } @@ -5190,6 +5216,44 @@ Create_func_to_base64::create_1_arg(THD *thd, Item *arg1) } +Create_func_to_char Create_func_to_char::s_singleton; + +Item* +Create_func_to_char::create_native(THD *thd, const LEX_CSTRING *name, + List<Item> *item_list) +{ + Item *func= NULL; + int arg_count= 0; + + if (item_list != NULL) + arg_count= item_list->elements; + + switch (arg_count) { + case 1: + { + Item *param_1= item_list->pop(); + Item *i0= new (thd->mem_root) Item_string_sys(thd, "YYYY-MM-DD HH24:MI:SS", 21); + func= new (thd->mem_root) Item_func_tochar(thd, param_1, i0); + break; + } + case 2: + { + Item *param_1= item_list->pop(); + Item *param_2= item_list->pop(); + func= new (thd->mem_root) Item_func_tochar(thd, param_1, param_2); + break; + } + default: + { + my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str); + break; + } + } + + return func; +} + + Create_func_to_days Create_func_to_days::s_singleton; Item* @@ -5288,7 +5352,18 @@ Create_func_uuid::create_builder(THD *thd) DBUG_ENTER("Create_func_uuid::create"); thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); thd->lex->safe_to_cache_query= 0; - DBUG_RETURN(new (thd->mem_root) Item_func_uuid(thd)); + DBUG_RETURN(new (thd->mem_root) Item_func_uuid(thd, 0)); +} + +Create_func_sys_guid Create_func_sys_guid::s_singleton; + +Item* +Create_func_sys_guid::create_builder(THD *thd) +{ + DBUG_ENTER("Create_func_sys_guid::create"); + thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); + thd->lex->safe_to_cache_query= 0; + DBUG_RETURN(new (thd->mem_root) Item_func_uuid(thd, 1)); } @@ -5632,11 +5707,13 @@ Native_func_registry func_array[] = BUILDER(Create_func_substr_oracle)}, { { STRING_WITH_LEN("SUBSTRING_INDEX") }, BUILDER(Create_func_substr_index)}, { { STRING_WITH_LEN("SUBTIME") }, BUILDER(Create_func_subtime)}, + { { STRING_WITH_LEN("SYS_GUID") }, BUILDER(Create_func_sys_guid)}, { { STRING_WITH_LEN("TAN") }, BUILDER(Create_func_tan)}, { { STRING_WITH_LEN("TIMEDIFF") }, BUILDER(Create_func_timediff)}, { { STRING_WITH_LEN("TIME_FORMAT") }, BUILDER(Create_func_time_format)}, { { STRING_WITH_LEN("TIME_TO_SEC") }, BUILDER(Create_func_time_to_sec)}, { { STRING_WITH_LEN("TO_BASE64") }, BUILDER(Create_func_to_base64)}, + { { STRING_WITH_LEN("TO_CHAR") }, BUILDER(Create_func_to_char)}, { { STRING_WITH_LEN("TO_DAYS") }, BUILDER(Create_func_to_days)}, { { STRING_WITH_LEN("TO_SECONDS") }, BUILDER(Create_func_to_seconds)}, { { STRING_WITH_LEN("UCASE") }, BUILDER(Create_func_ucase)}, diff --git a/sql/item_func.cc b/sql/item_func.cc index 81a53a88dc6..eb499b429cd 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -136,12 +136,7 @@ void Item_func::sync_with_sum_func_and_with_field(List<Item> &list) List_iterator_fast<Item> li(list); Item *item; while ((item= li++)) - { - join_with_sum_func(item); - with_window_func|= item->with_window_func; - with_field|= item->with_field; - with_param|= item->with_param; - } + with_flags|= item->with_flags; } @@ -173,7 +168,7 @@ bool Item_func::check_argument_types_or_binary(const Type_handler *handler, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_or_binary(func_name(), handler)) + if (args[i]->check_type_or_binary(func_name_cstring(), handler)) return true; } return false; @@ -186,7 +181,7 @@ bool Item_func::check_argument_types_traditional_scalar(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_traditional_scalar(func_name())) + if (args[i]->check_type_traditional_scalar(func_name_cstring())) return true; } return false; @@ -199,7 +194,7 @@ bool Item_func::check_argument_types_can_return_int(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_int(func_name())) + if (args[i]->check_type_can_return_int(func_name_cstring())) return true; } return false; @@ -212,7 +207,7 @@ bool Item_func::check_argument_types_can_return_real(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_real(func_name())) + if (args[i]->check_type_can_return_real(func_name_cstring())) return true; } return false; @@ -225,7 +220,7 @@ bool Item_func::check_argument_types_can_return_text(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_text(func_name())) + if (args[i]->check_type_can_return_text(func_name_cstring())) return true; } return false; @@ -238,7 +233,7 @@ bool Item_func::check_argument_types_can_return_str(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_str(func_name())) + if (args[i]->check_type_can_return_str(func_name_cstring())) return true; } return false; @@ -251,7 +246,7 @@ bool Item_func::check_argument_types_can_return_date(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_date(func_name())) + if (args[i]->check_type_can_return_date(func_name_cstring())) return true; } return false; @@ -264,7 +259,7 @@ bool Item_func::check_argument_types_can_return_time(uint start, for (uint i= start; i < end ; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_can_return_time(func_name())) + if (args[i]->check_type_can_return_time(func_name_cstring())) return true; } return false; @@ -276,7 +271,7 @@ bool Item_func::check_argument_types_scalar(uint start, uint end) const for (uint i= start; i < end; i++) { DBUG_ASSERT(i < arg_count); - if (args[i]->check_type_scalar(func_name())) + if (args[i]->check_type_scalar(func_name_cstring())) return true; } return false; @@ -300,7 +295,7 @@ bool Item_func::check_argument_types_scalar(uint start, uint end) const Sets as a side effect the following class variables: maybe_null Set if any argument may return NULL with_sum_func Set if any of the arguments contains a sum function - with_window_func Set if any of the arguments contain a window function + with_window_func() Set if any of the arguments contain a window function with_field Set if any of the arguments contains or is a field used_tables_cache Set to union of the tables used by arguments @@ -321,7 +316,7 @@ bool Item_func::check_argument_types_scalar(uint start, uint end) const bool Item_func::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); Item **arg,**arg_end; uchar buff[STACK_BUFF_ALLOC]; // Max argument in function @@ -356,23 +351,17 @@ Item_func::fix_fields(THD *thd, Item **ref) return TRUE; /* purecov: inspected */ item= *arg; - if (item->maybe_null) - maybe_null=1; - - join_with_sum_func(item); - with_param= with_param || item->with_param; - with_window_func= with_window_func || item->with_window_func; - with_field= with_field || item->with_field; + base_flags|= item->base_flags & item_base_t::MAYBE_NULL; + with_flags|= item->with_flags; used_tables_and_const_cache_join(item); not_null_tables_cache|= item->not_null_tables(); - m_with_subquery|= item->with_subquery(); } } if (check_arguments()) return true; if (fix_length_and_dec()) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -384,11 +373,11 @@ Item_func::quick_fix_field() { for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) { - if (!(*arg)->is_fixed()) + if (!(*arg)->fixed()) (*arg)->quick_fix_field(); } } - fixed= 1; + base_flags|= item_base_t::FIXED; } @@ -602,9 +591,12 @@ void Item_func::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List<Item> &fields, uint flags) { Item **arg, **arg_end; + DBUG_ENTER("Item_func::split_sum_func"); + for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++) (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg, flags | SPLIT_SUM_SKIP_REGISTERED); + DBUG_VOID_RETURN; } @@ -616,7 +608,7 @@ table_map Item_func::not_null_tables() const void Item_func::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); print_args(str, 0, query_type); str->append(')'); @@ -640,7 +632,7 @@ void Item_func::print_op(String *str, enum_query_type query_type) { args[i]->print_parenthesised(str, query_type, precedence()); str->append(' '); - str->append(func_name()); + str->append(func_name_cstring()); str->append(' '); } args[arg_count-1]->print_parenthesised(str, query_type, higher_precedence()); @@ -683,7 +675,7 @@ bool Item_hybrid_func::fix_attributes(Item **items, uint nitems) { bool rc= Item_hybrid_func::type_handler()-> Item_hybrid_func_fix_attributes(current_thd, - func_name(), this, this, + func_name_cstring(), this, this, items, nitems); DBUG_ASSERT(!rc || current_thd->is_error()); return rc; @@ -692,7 +684,7 @@ bool Item_hybrid_func::fix_attributes(Item **items, uint nitems) String *Item_real_func::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ @@ -703,7 +695,7 @@ String *Item_real_func::val_str(String *str) my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ @@ -744,14 +736,14 @@ void Item_func::signal_divide_by_null() Item *Item_func::get_tmp_table_item(THD *thd) { - if (!Item_func::with_sum_func() && !const_item()) + if (!with_sum_func() && !const_item()) return new (thd->mem_root) Item_temptable_field(thd, result_field); return copy_or_same(thd); } double Item_int_func::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return unsigned_flag ? (double) ((ulonglong) val_int()) : (double) val_int(); } @@ -759,7 +751,7 @@ double Item_int_func::val_real() String *Item_int_func::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong nr=val_int(); if (null_value) return 0; @@ -1081,11 +1073,12 @@ void Item_real_typecast::print(String *str, enum_query_type query_type) { char len_buf[20*3 + 1]; char *end; + Name name= type_handler()->name(); str->append(STRING_WITH_LEN("cast(")); args[0]->print(str, query_type); str->append(STRING_WITH_LEN(" as ")); - str->append(type_handler()->name().ptr()); + str->append(name.ptr(), name.length()); if (decimals != NOT_FIXED_DEC) { str->append('('); @@ -1367,7 +1360,7 @@ my_decimal *Item_func_minus::decimal_op(my_decimal *decimal_value) double Item_func_mul::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real() * args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; @@ -1377,7 +1370,7 @@ double Item_func_mul::real_op() longlong Item_func_mul::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong a= args[0]->val_int(); longlong b= args[1]->val_int(); longlong res; @@ -1499,7 +1492,7 @@ bool Item_func_mul::fix_length_and_dec(void) double Item_func_div::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); double val2= args[1]->val_real(); if ((null_value= args[0]->null_value || args[1]->null_value)) @@ -1587,7 +1580,7 @@ bool Item_func_div::fix_length_and_dec() DBUG_ENTER("Item_func_div::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); prec_increment= current_thd->variables.div_precincrement; - maybe_null= 1; // division by zero + set_maybe_null(); // division by zero const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_div; DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;); @@ -1604,7 +1597,7 @@ bool Item_func_div::fix_length_and_dec() /* Integer division */ longlong Item_func_int_div::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); /* Perform division using DECIMAL math if either of the operands has a @@ -1665,7 +1658,7 @@ bool Item_func_int_div::fix_length_and_dec() uint32 prec= args[0]->decimal_int_part(); set_if_smaller(prec, MY_INT64_NUM_DECIMAL_DIGITS); fix_char_length(prec); - maybe_null=1; + set_maybe_null(); unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag; return false; } @@ -1673,7 +1666,7 @@ bool Item_func_int_div::fix_length_and_dec() longlong Item_func_mod::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Longlong_hybrid val0= args[0]->to_longlong_hybrid(); Longlong_hybrid val1= args[1]->to_longlong_hybrid(); @@ -1697,7 +1690,7 @@ longlong Item_func_mod::int_op() double Item_func_mod::real_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); double val2= args[1]->val_real(); if ((null_value= args[0]->null_value || args[1]->null_value)) @@ -1745,7 +1738,7 @@ bool Item_func_mod::fix_length_and_dec() { DBUG_ENTER("Item_func_mod::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); - maybe_null= true; // division by zero + set_maybe_null(); // division by zero const Type_aggregator *aggregator= &type_handler_data->m_type_aggregator_for_mod; DBUG_EXECUTE_IF("num_op", aggregator= &type_handler_data->m_type_aggregator_non_commutative_test;); DBUG_ASSERT(!aggregator->is_commutative()); @@ -1980,7 +1973,7 @@ bool Item_func_abs::fix_length_and_dec() /** Gateway to natural LOG function. */ double Item_func_ln::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value= args[0]->null_value)) return 0.0; @@ -2000,7 +1993,7 @@ double Item_func_ln::val_real() */ double Item_func_log::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value= args[0]->null_value)) return 0.0; @@ -2026,7 +2019,7 @@ double Item_func_log::val_real() double Item_func_log2::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) @@ -2041,7 +2034,7 @@ double Item_func_log2::val_real() double Item_func_log10::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value= args[0]->null_value)) return 0.0; @@ -2055,7 +2048,7 @@ double Item_func_log10::val_real() double Item_func_exp::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; /* purecov: inspected */ @@ -2064,7 +2057,7 @@ double Item_func_exp::val_real() double Item_func_sqrt::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value < 0))) return 0.0; /* purecov: inspected */ @@ -2073,7 +2066,7 @@ double Item_func_sqrt::val_real() double Item_func_pow::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); double val2= args[1]->val_real(); if ((null_value=(args[0]->null_value || args[1]->null_value))) @@ -2085,7 +2078,7 @@ double Item_func_pow::val_real() double Item_func_acos::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); /* One can use this to defer SELECT processing. */ DEBUG_SYNC(current_thd, "before_acos_function"); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) @@ -2097,7 +2090,7 @@ double Item_func_acos::val_real() double Item_func_asin::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) volatile double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) @@ -2107,7 +2100,7 @@ double Item_func_asin::val_real() double Item_func_atan::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; @@ -2123,7 +2116,7 @@ double Item_func_atan::val_real() double Item_func_cos::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; @@ -2132,7 +2125,7 @@ double Item_func_cos::val_real() double Item_func_sin::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; @@ -2141,7 +2134,7 @@ double Item_func_sin::val_real() double Item_func_tan::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; @@ -2151,7 +2144,7 @@ double Item_func_tan::val_real() double Item_func_cot::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; @@ -2168,7 +2161,7 @@ class Func_handler_shift_left_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return item->arguments()[0]->to_longlong_null() << item->arguments()[1]->to_longlong_null(); } @@ -2181,7 +2174,7 @@ class Func_handler_shift_left_decimal_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return VDec(item->arguments()[0]).to_xlonglong_null() << item->arguments()[1]->to_longlong_null(); } @@ -2202,7 +2195,7 @@ class Func_handler_shift_right_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->fixed == 1); + DBUG_ASSERT(item->fixed()); return item->arguments()[0]->to_longlong_null() >> item->arguments()[1]->to_longlong_null(); } @@ -2215,7 +2208,7 @@ class Func_handler_shift_right_decimal_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return VDec(item->arguments()[0]).to_xlonglong_null() >> item->arguments()[1]->to_longlong_null(); } @@ -2236,7 +2229,7 @@ class Func_handler_bit_neg_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return ~ item->arguments()[0]->to_longlong_null(); } }; @@ -2248,7 +2241,7 @@ class Func_handler_bit_neg_decimal_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return ~ VDec(item->arguments()[0]).to_xlonglong_null(); } }; @@ -2378,13 +2371,14 @@ my_decimal *Item_func_ceiling::decimal_op(my_decimal *decimal_value) } -bool Item_func_ceiling::date_op(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) +bool Item_func_ceiling::date_op(THD *thd, MYSQL_TIME *to, + date_mode_t fuzzydate) { Datetime::Options opt(thd, TIME_FRAC_TRUNCATE); Datetime *tm= new (to) Datetime(thd, args[0], opt); tm->ceiling(thd); null_value= !tm->is_valid_datetime(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2395,7 +2389,7 @@ bool Item_func_ceiling::time_op(THD *thd, MYSQL_TIME *to) Time *tm= new (to) Time(thd, args[0], opt); tm->ceiling(); null_value= !tm->is_valid_time(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2447,7 +2441,7 @@ bool Item_func_floor::date_op(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) Datetime::Options opt(thd, TIME_FRAC_TRUNCATE); Datetime *tm= new (to) Datetime(thd, args[0], opt, 0); null_value= !tm->is_valid_datetime(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2458,7 +2452,7 @@ bool Item_func_floor::time_op(THD *thd, MYSQL_TIME *to) Time *tm= new (to) Time(thd, args[0], opt); tm->floor(); null_value= !tm->is_valid_time(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2526,7 +2520,7 @@ void Item_func_round::fix_arg_temporal(const Type_handler *h, uint int_part_length) { set_handler(h); - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { Longlong_hybrid_null dec= args[1]->to_longlong_hybrid_null(); fix_attributes_temporal(int_part_length, @@ -2553,7 +2547,7 @@ void Item_func_round::fix_arg_datetime() return NULL. */ if (!truncate) - maybe_null= true; + set_maybe_null(); fix_arg_temporal(&type_handler_datetime2, MAX_DATETIME_WIDTH); } @@ -2562,7 +2556,7 @@ bool Item_func_round::test_if_length_can_increase() { if (truncate) return false; - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { // Length can increase in some cases: e.g. ROUND(9,-1) -> 10. Longlong_hybrid val1= args[1]->to_longlong_hybrid(); @@ -2734,7 +2728,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) dec= INT_MIN; if (!(null_value= (value.is_null() || args[1]->null_value || - value.round_to(decimal_value, (uint) dec, + value.round_to(decimal_value, (int) dec, truncate ? TRUNCATE : HALF_UP) > 1))) return decimal_value; return 0; @@ -2750,7 +2744,7 @@ bool Item_func_round::time_op(THD *thd, MYSQL_TIME *to) Time *tm= new (to) Time(thd, args[0], opt, dec.to_uint(TIME_SECOND_PART_DIGITS)); null_value= !tm->is_valid_time() || dec.is_null(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2764,7 +2758,7 @@ bool Item_func_round::date_op(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) Datetime *tm= new (to) Datetime(thd, args[0], opt, dec.to_uint(TIME_SECOND_PART_DIGITS)); null_value= !tm->is_valid_datetime() || dec.is_null(); - DBUG_ASSERT(maybe_null || !null_value); + DBUG_ASSERT(maybe_null() || !null_value); return null_value; } @@ -2841,7 +2835,7 @@ void Item_func_rand::update_used_tables() double Item_func_rand::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (arg_count) { if (!args[0]->const_item()) @@ -2862,7 +2856,7 @@ double Item_func_rand::val_real() longlong Item_func_sign::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); null_value=args[0]->null_value; return value < 0.0 ? -1 : (value > 0 ? 1 : 0); @@ -2871,7 +2865,7 @@ longlong Item_func_sign::val_int() double Item_func_units::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0; @@ -2906,7 +2900,7 @@ bool Item_func_min_max::get_date_native(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { longlong UNINIT_VAR(min_max); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); for (uint i=0; i < arg_count ; i++) { @@ -2932,7 +2926,7 @@ bool Item_func_min_max::get_date_native(THD *thd, MYSQL_TIME *ltime, bool Item_func_min_max::get_time_native(THD *thd, MYSQL_TIME *ltime) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Time value(thd, args[0], Time::Options(thd), decimals); if (!value.is_valid_time()) @@ -3001,7 +2995,7 @@ double Item_func_min_max::val_real_native() longlong Item_func_min_max::val_int_native() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong value=0; for (uint i=0; i < arg_count ; i++) { @@ -3022,7 +3016,7 @@ longlong Item_func_min_max::val_int_native() my_decimal *Item_func_min_max::val_decimal_native(my_decimal *dec) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); my_decimal tmp_buf, *tmp, *UNINIT_VAR(res); for (uint i=0; i < arg_count ; i++) @@ -3056,7 +3050,7 @@ my_decimal *Item_func_min_max::val_decimal_native(my_decimal *dec) bool Item_func_min_max::val_native(THD *thd, Native *native) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); const Type_handler *handler= Item_hybrid_func::type_handler(); NativeBuffer<STRING_BUFFER_USUAL_SIZE> cur; for (uint i= 0; i < arg_count; i++) @@ -3078,7 +3072,7 @@ bool Item_func_min_max::val_native(THD *thd, Native *native) longlong Item_func_bit_length::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); return (null_value= !res) ? 0 : (longlong) res->length() * 8; } @@ -3086,7 +3080,7 @@ longlong Item_func_bit_length::val_int() longlong Item_func_octet_length::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=args[0]->val_str(&value); if (!res) { @@ -3100,7 +3094,7 @@ longlong Item_func_octet_length::val_int() longlong Item_func_char_length::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=args[0]->val_str(&value); if (!res) { @@ -3114,7 +3108,7 @@ longlong Item_func_char_length::val_int() longlong Item_func_coercibility::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value= 0; return (longlong) args[0]->collation.derivation; } @@ -3122,7 +3116,7 @@ longlong Item_func_coercibility::val_int() longlong Item_func_locate::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *a=args[0]->val_str(&value1); String *b=args[1]->val_str(&value2); if (!a || !b) @@ -3180,7 +3174,7 @@ void Item_func_locate::print(String *str, enum_query_type query_type) longlong Item_func_field::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (cmp_type == STRING_RESULT) { @@ -3235,7 +3229,8 @@ longlong Item_func_field::val_int() bool Item_func_field::fix_length_and_dec() { - maybe_null=0; max_length=3; + base_flags&= ~item_base_t::MAYBE_NULL; + max_length=3; cmp_type= args[0]->result_type(); for (uint i=1; i < arg_count ; i++) cmp_type= item_cmp_type(cmp_type, args[i]->result_type()); @@ -3247,7 +3242,7 @@ bool Item_func_field::fix_length_and_dec() longlong Item_func_ascii::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=args[0]->val_str(&value); if (!res) { @@ -3260,7 +3255,7 @@ longlong Item_func_ascii::val_int() longlong Item_func_ord::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=args[0]->val_str(&value); if (!res) { @@ -3317,7 +3312,7 @@ static const char separator=','; longlong Item_func_find_in_set::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (enum_value) { // enum_value is set iff args[0]->const_item() in fix_length_and_dec(). @@ -3399,7 +3394,7 @@ class Func_handler_bit_count_int_to_slong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return item->arguments()[0]->to_longlong_null().bit_count(); } }; @@ -3411,7 +3406,7 @@ class Func_handler_bit_count_decimal_to_slong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return VDec(item->arguments()[0]).to_xlonglong_null().bit_count(); } }; @@ -3478,7 +3473,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, args=arguments; /* Fix all arguments */ - func->maybe_null=0; + func->base_flags&= ~item_base_t::MAYBE_NULL; func->used_tables_and_const_cache_init(); if ((f_args.arg_count=arg_count)) @@ -3493,7 +3488,6 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, } uint i; Item **arg,**arg_end; - With_sum_func_cache *with_sum_func_cache= func->get_with_sum_func_cache(); for (i=0, arg=arguments, arg_end=arguments+arg_count; arg != arg_end ; arg++,i++) @@ -3515,15 +3509,8 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, */ if (item->collation.collation->state & MY_CS_BINSORT) func->collation.set(&my_charset_bin); - if (item->maybe_null) - func->maybe_null=1; - if (with_sum_func_cache) - with_sum_func_cache->join_with_sum_func(item); - func->with_window_func= func->with_window_func || - item->with_window_func; - func->with_field= func->with_field || item->with_field; - func->with_param= func->with_param || item->with_param; - func->With_subquery_cache::join(item); + func->base_flags|= item->base_flags & item_base_t::MAYBE_NULL; + func->with_flags|= item->with_flags; func->used_tables_and_const_cache_join(item); f_args.arg_type[i]=item->result_type(); } @@ -3542,7 +3529,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, if (func->fix_length_and_dec()) DBUG_RETURN(TRUE); initid.max_length=func->max_length; - initid.maybe_null=func->maybe_null; + initid.maybe_null=func->maybe_null(); initid.const_item=func->const_item_cache; initid.decimals=func->decimals; initid.ptr=0; @@ -3562,7 +3549,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, f_args.args[i]= NULL; /* Non-const unless updated below. */ f_args.lengths[i]= arguments[i]->max_length; - f_args.maybe_null[i]= (char) arguments[i]->maybe_null; + f_args.maybe_null[i]= (char) arguments[i]->maybe_null(); f_args.attributes[i]= arguments[i]->name.str; f_args.attribute_lengths[i]= (ulong)arguments[i]->name.length; @@ -3608,7 +3595,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func, goto err_exit; } func->max_length=MY_MIN(initid.max_length,MAX_BLOB_WIDTH); - func->maybe_null=initid.maybe_null; + func->set_maybe_null(initid.maybe_null); /* The above call for init() can reset initid.const_item to "false", e.g. when the UDF function wants to be non-deterministic. @@ -3761,7 +3748,7 @@ void Item_udf_func::cleanup() void Item_udf_func::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); for (uint i=0 ; i < arg_count ; i++) { @@ -3777,7 +3764,7 @@ double Item_func_udf_float::val_real() { double res; my_bool tmp_null_value; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_udf_float::val"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -3789,7 +3776,7 @@ double Item_func_udf_float::val_real() String *Item_func_udf_float::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ @@ -3802,7 +3789,7 @@ longlong Item_func_udf_int::val_int() { longlong res; my_bool tmp_null_value; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_udf_int::val_int"); res= udf.val_int(&tmp_null_value); null_value= tmp_null_value; @@ -3812,7 +3799,7 @@ longlong Item_func_udf_int::val_int() String *Item_func_udf_int::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong nr=val_int(); if (null_value) return 0; @@ -3825,7 +3812,7 @@ my_decimal *Item_func_udf_decimal::val_decimal(my_decimal *dec_buf) { my_decimal *res; my_bool tmp_null_value; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_udf_decimal::val_decimal"); DBUG_PRINT("info",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -3849,7 +3836,7 @@ bool Item_func_udf_str::fix_length_and_dec() String *Item_func_udf_str::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=udf.val_str(str,&str_value); null_value = !res; return res; @@ -3875,7 +3862,7 @@ bool udf_handler::get_arguments() { return 0; } longlong Item_master_pos_wait::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD* thd = current_thd; String *log_name = args[0]->val_str(&value); int event_count= 0; @@ -3937,7 +3924,7 @@ err: longlong Item_master_gtid_wait::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong result= 0; String *gtid_pos __attribute__((unused)) = args[0]->val_str(&value); DBUG_ENTER("Item_master_gtid_wait::val_int"); @@ -4200,7 +4187,7 @@ static int ull_name_ok(String *name) longlong Item_func_get_lock::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); double timeout= args[1]->val_real(); THD *thd= current_thd; @@ -4307,7 +4294,7 @@ longlong Item_func_get_lock::val_int() */ longlong Item_func_release_all_locks::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; ulong num_unlocked= 0; DBUG_ENTER("Item_func_release_all_locks::val_int"); @@ -4333,7 +4320,7 @@ longlong Item_func_release_all_locks::val_int() longlong Item_func_release_lock::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); THD *thd= current_thd; DBUG_ENTER("Item_func_release_lock::val_int"); @@ -4382,7 +4369,7 @@ longlong Item_func_release_lock::val_int() longlong Item_func_is_free_lock::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); THD *thd= current_thd; null_value= 1; @@ -4400,7 +4387,7 @@ longlong Item_func_is_free_lock::val_int() longlong Item_func_is_used_lock::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); THD *thd= current_thd; null_value= 1; @@ -4422,7 +4409,7 @@ longlong Item_func_is_used_lock::val_int() longlong Item_func_last_insert_id::val_int() { THD *thd= current_thd; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (arg_count) { longlong value= args[0]->val_int(); @@ -4454,7 +4441,7 @@ bool Item_func_last_insert_id::fix_fields(THD *thd, Item **ref) longlong Item_func_benchmark::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff), &my_charset_bin); my_decimal tmp_decimal; @@ -4574,7 +4561,7 @@ longlong Item_func_sleep::val_int() double timeout; int error; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); timeout= args[0]->val_real(); /* @@ -4716,7 +4703,7 @@ end: bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); /* fix_fields will call Item_func_set_user_var::fix_length_and_dec */ if (Item_func::fix_fields(thd, ref) || set_entry(thd, TRUE)) return TRUE; @@ -4792,7 +4779,7 @@ bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref) bool Item_func_set_user_var::fix_length_and_dec() { - maybe_null=args[0]->maybe_null; + base_flags|= (args[0]->base_flags & item_base_t::MAYBE_NULL); decimals=args[0]->decimals; if (args[0]->collation.derivation == DERIVATION_NUMERIC) { @@ -5239,7 +5226,7 @@ Item_func_set_user_var::update() double Item_func_set_user_var::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(0); update(); // Store expression return m_var_entry->val_real(&null_value); @@ -5247,7 +5234,7 @@ double Item_func_set_user_var::val_real() longlong Item_func_set_user_var::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(0); update(); // Store expression return m_var_entry->val_int(&null_value); @@ -5255,7 +5242,7 @@ longlong Item_func_set_user_var::val_int() String *Item_func_set_user_var::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(0); update(); // Store expression return m_var_entry->val_str(&null_value, str, decimals); @@ -5264,7 +5251,7 @@ String *Item_func_set_user_var::val_str(String *str) my_decimal *Item_func_set_user_var::val_decimal(my_decimal *val) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(0); update(); // Store expression return m_var_entry->val_decimal(&null_value, val); @@ -5273,7 +5260,7 @@ my_decimal *Item_func_set_user_var::val_decimal(my_decimal *val) double Item_func_set_user_var::val_result() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return m_var_entry->val_real(&null_value); @@ -5281,7 +5268,7 @@ double Item_func_set_user_var::val_result() longlong Item_func_set_user_var::val_int_result() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return m_var_entry->val_int(&null_value); @@ -5289,7 +5276,7 @@ longlong Item_func_set_user_var::val_int_result() bool Item_func_set_user_var::val_bool_result() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return m_var_entry->val_int(&null_value) != 0; @@ -5297,7 +5284,7 @@ bool Item_func_set_user_var::val_bool_result() String *Item_func_set_user_var::str_result(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return m_var_entry->val_str(&null_value, str, decimals); @@ -5306,7 +5293,7 @@ String *Item_func_set_user_var::str_result(String *str) my_decimal *Item_func_set_user_var::val_decimal_result(my_decimal *val) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return m_var_entry->val_decimal(&null_value, val); @@ -5315,7 +5302,7 @@ my_decimal *Item_func_set_user_var::val_decimal_result(my_decimal *val) bool Item_func_set_user_var::is_null_result() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); check(TRUE); update(); // Store expression return is_null(); @@ -5421,12 +5408,12 @@ int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions, String *result; CHARSET_INFO *cs= collation.collation; char buff[MAX_FIELD_WIDTH]; // Alloc buffer for small columns - str_value.set_quick(buff, sizeof(buff), cs); + str_value.set_buffer_if_not_allocated(buff, sizeof(buff), cs); result= m_var_entry->val_str(&null_value, &str_value, decimals); if (null_value) { - str_value.set_quick(0, 0, cs); + str_value.set_buffer_if_not_allocated(0, 0, cs); return set_field_to_null_with_conversions(field, no_conversions); } @@ -5434,7 +5421,7 @@ int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions, field->set_notnull(); error=field->store(result->ptr(),result->length(),cs); - str_value.set_quick(0, 0, cs); + str_value.set_buffer_if_not_allocated(0, 0, cs); } else if (result_type() == REAL_RESULT) { @@ -5468,7 +5455,7 @@ int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions, String * Item_func_get_user_var::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_get_user_var::val_str"); if (!m_var_entry) DBUG_RETURN((String*) 0); // No such variable @@ -5478,7 +5465,7 @@ Item_func_get_user_var::val_str(String *str) double Item_func_get_user_var::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!m_var_entry) return 0.0; // No such variable return (m_var_entry->val_real(&null_value)); @@ -5487,7 +5474,7 @@ double Item_func_get_user_var::val_real() my_decimal *Item_func_get_user_var::val_decimal(my_decimal *dec) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!m_var_entry) return 0; return m_var_entry->val_decimal(&null_value, dec); @@ -5496,7 +5483,7 @@ my_decimal *Item_func_get_user_var::val_decimal(my_decimal *dec) longlong Item_func_get_user_var::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!m_var_entry) return 0; // No such variable return (m_var_entry->val_int(&null_value)); @@ -5649,7 +5636,7 @@ bool Item_func_get_user_var::fix_length_and_dec() { THD *thd=current_thd; int error; - maybe_null=1; + set_maybe_null(); decimals=NOT_FIXED_DEC; max_length=MAX_BLOB_WIDTH; @@ -5751,7 +5738,7 @@ bool Item_func_get_user_var::set_value(THD *thd, bool Item_user_var_as_out_param::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(!is_fixed()); + DBUG_ASSERT(!fixed()); DBUG_ASSERT(thd->lex->exchange); if (!(entry= get_variable(&thd->user_vars, &org_name, 1))) return TRUE; @@ -5859,7 +5846,7 @@ void Item_func_get_system_var::update_null_value() bool Item_func_get_system_var::fix_length_and_dec() { const char *cptr; - maybe_null= TRUE; + set_maybe_null(); max_length= 0; if (var->check_type(var_type)) @@ -6207,12 +6194,12 @@ bool Item_func_match::init_search(THD *thd, bool no_order) } if (join_key && !no_order) - flags|=FT_SORTED; + match_flags|=FT_SORTED; if (key != NO_SUCH_KEY) THD_STAGE_INFO(table->in_use, stage_fulltext_initialization); - ft_handler= table->file->ft_init_ext(flags, key, ft_tmp); + ft_handler= table->file->ft_init_ext(match_flags, key, ft_tmp); if (!ft_handler) DBUG_RETURN(1); @@ -6225,12 +6212,12 @@ bool Item_func_match::init_search(THD *thd, bool no_order) bool Item_func_match::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); Item *UNINIT_VAR(item); // Safe as arg_count is > 1 status_var_increment(thd->status_var.feature_fulltext); - maybe_null=1; + set_maybe_null(); join_key=0; /* @@ -6313,7 +6300,7 @@ bool Item_func_match::fix_index() We will skip execution if the item is not fixed with fix_field */ - if (!fixed) + if (!fixed()) return false; if (key == NO_SUCH_KEY) @@ -6325,9 +6312,9 @@ bool Item_func_match::fix_index() for (keynr=0 ; keynr < table->s->keys ; keynr++) { if ((table->key_info[keynr].flags & HA_FULLTEXT) && - (flags & FT_BOOL ? table->keys_in_use_for_query.is_set(keynr) : - table->s->keys_in_use.is_set(keynr))) - + (match_flags & FT_BOOL ? + table->keys_in_use_for_query.is_set(keynr) : + table->s->usable_indexes(table->in_use).is_set(keynr))) { ft_to_key[fts]=keynr; ft_cnt[fts]=0; @@ -6402,7 +6389,7 @@ bool Item_func_match::eq(const Item *item, bool binary_cmp) const { if (item->type() != FUNC_ITEM || ((Item_func*)item)->functype() != FT_FUNC || - flags != ((Item_func_match*)item)->flags) + match_flags != ((Item_func_match*)item)->match_flags) return 0; Item_func_match *ifm=(Item_func_match*) item; @@ -6417,7 +6404,7 @@ bool Item_func_match::eq(const Item *item, bool binary_cmp) const double Item_func_match::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_match::val"); if (ft_handler == NULL) DBUG_RETURN(-1.0); @@ -6450,9 +6437,9 @@ void Item_func_match::print(String *str, enum_query_type query_type) print_args(str, 1, query_type); str->append(STRING_WITH_LEN(" against (")); args[0]->print(str, query_type); - if (flags & FT_BOOL) + if (match_flags & FT_BOOL) str->append(STRING_WITH_LEN(" in boolean mode")); - else if (flags & FT_EXPAND) + else if (match_flags & FT_EXPAND) str->append(STRING_WITH_LEN(" with query expansion")); str->append(STRING_WITH_LEN("))")); } @@ -6464,7 +6451,7 @@ class Func_handler_bit_xor_int_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return item->arguments()[0]->to_longlong_null() ^ item->arguments()[1]->to_longlong_null(); } @@ -6477,7 +6464,7 @@ class Func_handler_bit_xor_dec_to_ulonglong: public: Longlong_null to_longlong_null(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); return VDec(item->arguments()[0]).to_xlonglong_null() ^ VDec(item->arguments()[1]).to_xlonglong_null(); } @@ -6553,7 +6540,7 @@ Item *get_system_var(THD *thd, enum_var_type var_type, longlong Item_func_row_count::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; return thd->get_row_count_func(); @@ -6566,7 +6553,7 @@ Item_func_sp::Item_func_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name, const Sp_handler *sph): Item_func(thd), Item_sp(thd, context_arg, name), m_handler(sph) { - maybe_null= 1; + set_maybe_null(); } @@ -6575,7 +6562,7 @@ Item_func_sp::Item_func_sp(THD *thd, Name_resolution_context *context_arg, List<Item> &list): Item_func(thd, list), Item_sp(thd, context_arg, name_arg), m_handler(sph) { - maybe_null= 1; + set_maybe_null(); } @@ -6586,11 +6573,11 @@ Item_func_sp::cleanup() Item_func::cleanup(); } -const char * -Item_func_sp::func_name() const +LEX_CSTRING +Item_func_sp::func_name_cstring() const { - THD *thd= current_thd; - return Item_sp::func_name(thd, m_handler == &sp_handler_package_function); + return Item_sp::func_name_cstring(current_thd, + m_handler == &sp_handler_package_function); } @@ -6631,7 +6618,7 @@ bool Item_func_sp::fix_length_and_dec() Type_std_attributes::set(sp_result_field->type_std_attributes()); // There is a bug in the line below. See MDEV-11292 for details. collation.derivation= DERIVATION_COERCIBLE; - maybe_null= 1; + set_maybe_null(); DBUG_RETURN(FALSE); } @@ -6673,14 +6660,14 @@ const Type_handler *Item_func_sp::type_handler() const longlong Item_func_found_rows::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return current_thd->found_rows(); } longlong Item_func_oracle_sql_rowcount::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; /* In case when a query like this: @@ -6695,7 +6682,7 @@ longlong Item_func_oracle_sql_rowcount::val_int() longlong Item_func_sqlcode::val_int() { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(!null_value); Diagnostics_area::Sql_condition_iterator it= current_thd->get_stmt_da()->sql_conditions(); @@ -6711,7 +6698,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) { bool res; DBUG_ENTER("Item_func_sp::fix_fields"); - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); sp_head *sp= m_handler->sp_find_routine(thd, m_name, true); /* @@ -6763,7 +6750,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) to make m_sp and result_field members available to fix_length_and_dec(), which is called from Item_func::fix_fields(). */ - res= init_result_field(thd, max_length, maybe_null, &null_value, &name); + res= init_result_field(thd, max_length, maybe_null(), &null_value, &name); if (res) DBUG_RETURN(TRUE); @@ -6879,14 +6866,18 @@ void uuid_short_init() (((ulonglong) server_start_time) << 24)); } - -longlong Item_func_uuid_short::val_int() +ulonglong server_uuid_value() { ulonglong val; mysql_mutex_lock(&LOCK_short_uuid_generator); val= uuid_value++; mysql_mutex_unlock(&LOCK_short_uuid_generator); - return (longlong) val; + return val; +} + +longlong Item_func_uuid_short::val_int() +{ + return (longlong) server_uuid_value(); } @@ -6896,7 +6887,7 @@ longlong Item_func_uuid_short::val_int() void Item_func_last_value::evaluate_sideeffects() { - DBUG_ASSERT(fixed == 1 && arg_count > 0); + DBUG_ASSERT(fixed() && arg_count > 0); for (uint i= 0; i < arg_count-1 ; i++) args[i]->val_int(); } @@ -6959,12 +6950,12 @@ bool Item_func_last_value::fix_length_and_dec() { last_value= args[arg_count -1]; Type_std_attributes::set(last_value); - maybe_null= last_value->maybe_null; + set_maybe_null(last_value->maybe_null()); return FALSE; } -void Cursor_ref::print_func(String *str, const char *func_name) +void Cursor_ref::print_func(String *str, const LEX_CSTRING &func_name) { append_identifier(current_thd, str, &m_cursor_name); str->append(func_name); @@ -7093,7 +7084,7 @@ void Item_func_nextval::print(String *str, enum_query_type query_type) bool use_db_name= d_name.str && d_name.str[0]; THD *thd= current_thd; // Don't trust 'table' - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); /* @@ -7217,7 +7208,7 @@ void Item_func_setval::print(String *str, enum_query_type query_type) bool use_db_name= d_name.str && d_name.str[0]; THD *thd= current_thd; // Don't trust 'table' - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); /* @@ -7252,3 +7243,77 @@ void Item_func_setval::print(String *str, enum_query_type query_type) str->append_ulonglong(round); str->append(')'); } + + +/* + Return how many row combinations has accepted so far + 1 + + The + 1 is to ensure that, for example, 'WHERE ROWNUM <=1' returns one row +*/ + +longlong Item_func_rownum::val_int() +{ + if (!accepted_rows) + { + /* + Rownum is not properly set up. Probably used in wrong context when + it should not be used. In this case returning 0 is probably the best + solution. + */ + return 0; + } + return (longlong) *accepted_rows+1; +} + + +Item_func_rownum::Item_func_rownum(THD *thd): + Item_longlong_func(thd),accepted_rows(0) +{ + /* + Remember the select context. + Add the function to the list fix_after_optimize in the select context + so that we can easily initializef all rownum functions with the pointers + to the row counters. + */ + select= thd->lex->current_select; + select->fix_after_optimize.push_back(this, thd->mem_root); + + /* + Mark that query is using rownum() and ensure that this select is + not merged with other selects + */ + select->with_rownum= 1; + thd->lex->with_rownum= 1; + thd->lex->uncacheable(UNCACHEABLE_RAND); + with_flags= with_flags | item_with_t::ROWNUM_FUNC; + + /* If this command changes data, mark it as unsafe for statement logging */ + if (sql_command_flags[thd->lex->sql_command] & + (CF_UPDATES_DATA | CF_DELETES_DATA)) + thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); +} + + +/* + Store a reference to the variable that contains number of accepted rows +*/ + +void Item_func_rownum::fix_after_optimize(THD *thd) +{ + accepted_rows= &select->join->accepted_rows; +} + +/* + Inform all ROWNUM() function where the number of rows are stored +*/ + +void fix_rownum_pointers(THD *thd, SELECT_LEX *select_lex, ha_rows *ptr) +{ + List_iterator<Item> li(select_lex->fix_after_optimize); + while (Item *item= li++) + { + if (item->type() == Item::FUNC_ITEM && + ((Item_func*) item)->functype() == Item_func::ROWNUM_FUNC) + ((Item_func_rownum*) item)->store_pointer_to_row_counter(ptr); + } +} diff --git a/sql/item_func.h b/sql/item_func.h index de515100146..3ab28443002 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,7 +1,7 @@ #ifndef ITEM_FUNC_INCLUDED #define ITEM_FUNC_INCLUDED /* Copyright (c) 2000, 2016, Oracle and/or its affiliates. - Copyright (c) 2009, 2020, MariaDB + Copyright (c) 2009, 2021, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -35,8 +35,7 @@ extern "C" /* Bug in BSDI include file */ #include <cmath> -class Item_func :public Item_func_or_sum, - protected With_sum_func_cache +class Item_func :public Item_func_or_sum { void sync_with_sum_func_and_with_field(List<Item> &list); protected: @@ -76,9 +75,9 @@ public: SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC, EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC, NEG_FUNC, GSYSVAR_FUNC, IN_OPTIMIZER_FUNC, DYNCOL_FUNC, - JSON_EXTRACT_FUNC, JSON_VALID_FUNC, + JSON_EXTRACT_FUNC, JSON_VALID_FUNC, ROWNUM_FUNC, CASE_SEARCHED_FUNC, // Used by ColumnStore/Spider - CASE_SIMPLE_FUNC // Used by ColumnStore/spider + CASE_SIMPLE_FUNC, // Used by ColumnStore/spider, }; static scalar_comparison_op functype_to_scalar_comparison_op(Functype type) { @@ -94,46 +93,37 @@ public: DBUG_ASSERT(0); return SCALAR_CMP_EQ; } - enum Type type() const { return FUNC_ITEM; } + enum Type type() const override { return FUNC_ITEM; } virtual enum Functype functype() const { return UNKNOWN_FUNC; } Item_func(THD *thd): Item_func_or_sum(thd) { - with_field= 0; - with_param= 0; + DBUG_ASSERT(with_flags == item_with_t::NONE); + with_flags= item_with_t::NONE; } - Item_func(THD *thd, Item *a) - :Item_func_or_sum(thd, a), With_sum_func_cache(a) + Item_func(THD *thd, Item *a): Item_func_or_sum(thd, a) { - with_param= a->with_param; - with_field= a->with_field; + with_flags= a->with_flags; } - Item_func(THD *thd, Item *a, Item *b) - :Item_func_or_sum(thd, a, b), With_sum_func_cache(a, b) + Item_func(THD *thd, Item *a, Item *b): + Item_func_or_sum(thd, a, b) { - with_param= a->with_param || b->with_param; - with_field= a->with_field || b->with_field; + with_flags= a->with_flags | b->with_flags; } - Item_func(THD *thd, Item *a, Item *b, Item *c) - :Item_func_or_sum(thd, a, b, c), With_sum_func_cache(a, b, c) + Item_func(THD *thd, Item *a, Item *b, Item *c): + Item_func_or_sum(thd, a, b, c) { - with_field= a->with_field || b->with_field || c->with_field; - with_param= a->with_param || b->with_param || c->with_param; + with_flags|= a->with_flags | b->with_flags | c->with_flags; } - Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d) - :Item_func_or_sum(thd, a, b, c, d), With_sum_func_cache(a, b, c, d) + Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d): + Item_func_or_sum(thd, a, b, c, d) { - with_field= a->with_field || b->with_field || - c->with_field || d->with_field; - with_param= a->with_param || b->with_param || - c->with_param || d->with_param; + with_flags= a->with_flags | b->with_flags | c->with_flags | d->with_flags; } - Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e) - :Item_func_or_sum(thd, a, b, c, d, e), With_sum_func_cache(a, b, c, d, e) + Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e): + Item_func_or_sum(thd, a, b, c, d, e) { - with_field= a->with_field || b->with_field || - c->with_field || d->with_field || e->with_field; - with_param= a->with_param || b->with_param || - c->with_param || d->with_param || e->with_param; + with_flags= (a->with_flags | b->with_flags | c->with_flags | d->with_flags | + e->with_flags); } Item_func(THD *thd, List<Item> &list): Item_func_or_sum(thd, list) @@ -141,33 +131,34 @@ public: set_arguments(thd, list); } // Constructor used for Item_cond_and/or (see Item comment) - Item_func(THD *thd, Item_func *item) - :Item_func_or_sum(thd, item), With_sum_func_cache(item), + Item_func(THD *thd, Item_func *item): + Item_func_or_sum(thd, item), not_null_tables_cache(item->not_null_tables_cache) { } - bool fix_fields(THD *, Item **ref); - void cleanup() + bool fix_fields(THD *, Item **ref) override; + void cleanup() override { Item_func_or_sum::cleanup(); used_tables_and_const_cache_init(); } - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); - void quick_fix_field(); - table_map not_null_tables() const; - void update_used_tables() + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; + void quick_fix_field() override; + table_map not_null_tables() const override; + void update_used_tables() override { used_tables_and_const_cache_init(); used_tables_and_const_cache_update_and_join(arg_count, args); } COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref); - SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) + COND_EQUAL **cond_equal_ref) override; + SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override { DBUG_ENTER("Item_func::get_mm_tree"); DBUG_RETURN(const_item() ? get_mm_tree_for_const(param) : NULL); } - bool eq(const Item *item, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const override; virtual Item *key_item() const { return args[0]; } void set_arguments(THD *thd, List<Item> &list) { @@ -176,11 +167,12 @@ public: list.empty(); // Fields are used } void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, - List<Item> &fields, uint flags); - virtual void print(String *str, enum_query_type query_type); + List<Item> &fields, uint flags) override; + void print(String *str, enum_query_type query_type) override; void print_op(String *str, enum_query_type query_type); void print_args(String *str, uint from, enum_query_type query_type); - bool is_null() { + bool is_null() override + { update_null_value(); return null_value; } @@ -188,9 +180,9 @@ public: void signal_divide_by_null(); friend class udf_handler; - Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) + Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) override { return tmp_table_field_from_field_type(root, table); } - Item *get_tmp_table_item(THD *thd); + Item *get_tmp_table_item(THD *thd) override; void fix_char_length_ulonglong(ulonglong max_char_length_arg) { @@ -199,18 +191,18 @@ public: if (max_result_length >= MAX_BLOB_WIDTH) { max_length= MAX_BLOB_WIDTH; - maybe_null= 1; + set_maybe_null(); } else max_length= (uint32) max_result_length; } - Item *transform(THD *thd, Item_transformer transformer, uchar *arg); + Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override; Item* compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, - Item_transformer transformer, uchar *arg_t); + Item_transformer transformer, uchar *arg_t) override; void traverse_cond(Cond_traverser traverser, - void * arg, traverse_order order); - bool eval_not_null_tables(void *opt_arg); - bool find_not_null_fields(table_map allowed); + void * arg, traverse_order order) override; + bool eval_not_null_tables(void *opt_arg) override; + bool find_not_null_fields(table_map allowed) override; // bool is_expensive_processor(void *arg); // virtual bool is_expensive() { return 0; } inline void raise_numeric_overflow(const char *type_name) @@ -267,7 +259,7 @@ public: bool has_timestamp_args() { - DBUG_ASSERT(fixed == TRUE); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (args[i]->type() == Item::FIELD_ITEM && @@ -279,7 +271,7 @@ public: bool has_date_args() { - DBUG_ASSERT(fixed == TRUE); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (args[i]->type() == Item::FIELD_ITEM && @@ -292,7 +284,7 @@ public: bool has_time_args() { - DBUG_ASSERT(fixed == TRUE); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (args[i]->type() == Item::FIELD_ITEM && @@ -305,7 +297,7 @@ public: bool has_datetime_args() { - DBUG_ASSERT(fixed == TRUE); + DBUG_ASSERT(fixed()); for (uint i= 0; i < arg_count; i++) { if (args[i]->type() == Item::FIELD_ITEM && @@ -316,6 +308,7 @@ public: } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { /* By default only substitution for a field whose two different values @@ -331,22 +324,22 @@ public: return used_tables() & RAND_TABLE_BIT; } - bool excl_dep_on_table(table_map tab_map) + bool excl_dep_on_table(table_map tab_map) override { - if (used_tables() & OUTER_REF_TABLE_BIT) + if (used_tables() & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) return false; return !(used_tables() & ~tab_map) || Item_args::excl_dep_on_table(tab_map); } - bool excl_dep_on_grouping_fields(st_select_lex *sel) + bool excl_dep_on_grouping_fields(st_select_lex *sel) override { if (has_rand_bit() || with_subquery()) return false; return Item_args::excl_dep_on_grouping_fields(sel); } - bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred) + bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred) override { return Item_args::excl_dep_on_in_subq_left_part(subq_pred); } @@ -359,24 +352,24 @@ public: representation of a TIMESTAMP argument verbatim, and thus does not depend on the timezone. */ - virtual bool check_valid_arguments_processor(void *bool_arg) + bool check_valid_arguments_processor(void *bool_arg) override { return has_timestamp_args(); } - virtual bool find_function_processor (void *arg) + bool find_function_processor (void *arg) override { return functype() == *(Functype *) arg; } - void no_rows_in_result() + void no_rows_in_result() override { for (uint i= 0; i < arg_count; i++) { args[i]->no_rows_in_result(); } } - void restore_to_before_no_rows_in_result() + void restore_to_before_no_rows_in_result() override { for (uint i= 0; i < arg_count; i++) { @@ -393,11 +386,8 @@ public: - or replaced to an Item_int_with_ref */ bool setup_args_and_comparator(THD *thd, Arg_comparator *cmp); - - bool with_sum_func() const { return m_with_sum_func; } - With_sum_func_cache* get_with_sum_func_cache() { return this; } - Item_func *get_item_func() { return this; } - bool is_simplified_cond_processor(void *arg) + Item_func *get_item_func() override { return this; } + bool is_simplified_cond_processor(void *arg) override { return const_item() && !val_int(); } }; @@ -412,17 +402,18 @@ public: { collation= DTCollation_numeric(); } Item_real_func(THD *thd, List<Item> &list): Item_func(thd, list) { collation= DTCollation_numeric(); } - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *decimal_value); - longlong val_int() + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *decimal_value) override; + longlong val_int() override { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return Converter_double_to_longlong(val_real(), unsigned_flag).result(); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_real(thd, ltime, fuzzydate); } - const Type_handler *type_handler() const { return &type_handler_double; } - bool fix_length_and_dec() + const Type_handler *type_handler() const override + { return &type_handler_double; } + bool fix_length_and_dec() override { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); @@ -448,7 +439,7 @@ public: Item_hybrid_func(THD *thd, List<Item> &list): Item_func(thd, list) { } Item_hybrid_func(THD *thd, Item_hybrid_func *item) :Item_func(thd, item), Type_handler_hybrid_field_type(item) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return Type_handler_hybrid_field_type::type_handler(); } void fix_length_and_dec_long_or_longlong(uint char_length, bool unsigned_arg) { @@ -510,14 +501,14 @@ public: } double val_real(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); StringBuffer<64> tmp; String *res= item->val_str(&tmp); return res ? item->double_from_string_with_check(res) : 0.0; } longlong val_int(Item_handled_func *item) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); StringBuffer<22> tmp; String *res= item->val_str(&tmp); return res ? item->longlong_from_string_with_check(res) : 0; @@ -762,43 +753,43 @@ public: { m_func_handler= handler; } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return m_func_handler->return_type_handler(this); } - Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) + Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); const Type_handler *h= m_func_handler->type_handler_for_create_select(this); return h->make_and_init_table_field(root, &name, - Record_addr(maybe_null), + Record_addr(maybe_null()), *this, table); } - String *val_str(String *to) + String *val_str(String *to) override { return m_func_handler->val_str(this, to); } - String *val_str_ascii(String *to) + String *val_str_ascii(String *to) override { return m_func_handler->val_str_ascii(this, to); } - double val_real() + double val_real() override { return m_func_handler->val_real(this); } - longlong val_int() + longlong val_int() override { return m_func_handler->val_int(this); } - my_decimal *val_decimal(my_decimal *to) + my_decimal *val_decimal(my_decimal *to) override { return m_func_handler->val_decimal(this, to); } - bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) override { return m_func_handler->get_date(thd, this, to, fuzzydate); } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { return m_func_handler->val_native(thd, this, to); } @@ -901,42 +892,42 @@ public: Item_hybrid_func(thd, list) { collation= DTCollation_numeric(); } - double val_real() + double val_real() override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_hybrid_field_type::type_handler()-> Item_func_hybrid_field_type_val_real(this); } - longlong val_int() + longlong val_int() override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_hybrid_field_type::type_handler()-> Item_func_hybrid_field_type_val_int(this); } - my_decimal *val_decimal(my_decimal *dec) + my_decimal *val_decimal(my_decimal *dec) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_hybrid_field_type::type_handler()-> Item_func_hybrid_field_type_val_decimal(this, dec); } - String *val_str(String*str) + String *val_str(String*str) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); String *res= Item_func_hybrid_field_type::type_handler()-> Item_func_hybrid_field_type_val_str(this, str); DBUG_ASSERT(null_value == (res == NULL)); return res; } - bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode) + bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_hybrid_field_type::type_handler()-> Item_func_hybrid_field_type_get_date_with_warn(thd, this, to, mode); } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return native_op(thd, to); } @@ -1102,7 +1093,7 @@ public: class Item_num_op :public Item_func_numhybrid { protected: - bool check_arguments() const + bool check_arguments() const override { return false; // Checked by aggregate_for_num_op() } @@ -1110,7 +1101,7 @@ public: Item_num_op(THD *thd, Item *a, Item *b): Item_func_numhybrid(thd, a, b) {} virtual void result_precision()= 0; - virtual inline void print(String *str, enum_query_type query_type) + void print(String *str, enum_query_type query_type) override { print_op(str, query_type); } @@ -1140,7 +1131,7 @@ public: if (decimals == 0 && downcast_decimal_to_int) set_handler(type_handler_long_or_longlong()); } - bool need_parentheses_in_default() { return true; } + bool need_parentheses_in_default() override { return true; } }; @@ -1168,16 +1159,16 @@ public: { collation= DTCollation_numeric(); fix_char_length(21); } Item_int_func(THD *thd, Item_int_func *item) :Item_func(thd, item) { collation= DTCollation_numeric(); } - double val_real(); - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *decimal_value) + double val_real() override; + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *decimal_value) override { return val_decimal_from_int(decimal_value); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_int(thd, ltime, fuzzydate); } - const Type_handler *type_handler() const= 0; - bool fix_length_and_dec() { return FALSE; } + const Type_handler *type_handler() const override= 0; + bool fix_length_and_dec() override { return FALSE; } }; @@ -1190,13 +1181,13 @@ public: Item_long_func(THD *thd, Item *a, Item *b, Item *c): Item_int_func(thd, a, b, c) {} Item_long_func(THD *thd, List<Item> &list): Item_int_func(thd, list) { } Item_long_func(THD *thd, Item_long_func *item) :Item_int_func(thd, item) {} - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { if (unsigned_flag) return &type_handler_ulong; return &type_handler_slong; } - bool fix_length_and_dec() { max_length= 11; return FALSE; } + bool fix_length_and_dec() override { max_length= 11; return FALSE; } }; @@ -1205,12 +1196,17 @@ class Item_func_hash: public Item_int_func public: Item_func_hash(THD *thd, List<Item> &item): Item_int_func(thd, item) {} - longlong val_int(); - bool fix_length_and_dec(); - const Type_handler *type_handler() const { return &type_handler_slong; } - Item *get_copy(THD *thd) + longlong val_int() override; + bool fix_length_and_dec() override; + const Type_handler *type_handler() const override + { return &type_handler_slong; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_hash>(thd, this); } - const char *func_name() const { return "<hash>"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<hash>") }; + return name; + } }; class Item_longlong_func: public Item_int_func @@ -1224,7 +1220,7 @@ public: Item_int_func(thd, a, b, c, d) {} Item_longlong_func(THD *thd, List<Item> &list): Item_int_func(thd, list) { } Item_longlong_func(THD *thd, Item_longlong_func *item) :Item_int_func(thd, item) {} - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { if (unsigned_flag) return &type_handler_ulonglong; @@ -1242,7 +1238,7 @@ protected: Cursor_ref(const LEX_CSTRING *name, uint offset) :m_cursor_name(*name), m_cursor_offset(offset) { } - void print_func(String *str, const char *func_name); + void print_func(String *str, const LEX_CSTRING &func_name); }; @@ -1252,18 +1248,25 @@ class Item_func_cursor_rowcount: public Item_longlong_func, { public: Item_func_cursor_rowcount(THD *thd, const LEX_CSTRING *name, uint offset) - :Item_longlong_func(thd), Cursor_ref(name, offset) { maybe_null= true; } - const char *func_name() const { return "%ROWCOUNT"; } - longlong val_int(); - bool check_vcol_func_processor(void *arg) + :Item_longlong_func(thd), Cursor_ref(name, offset) + { + set_maybe_null(); + } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("%ROWCOUNT") }; + return name; + } + longlong val_int() override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), arg, VCOL_SESSION_FUNC); } - void print(String *str, enum_query_type query_type) + void print(String *str, enum_query_type query_type) override { - return Cursor_ref::print_func(str, func_name()); + return Cursor_ref::print_func(str, func_name_cstring()); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cursor_rowcount>(thd, this); } }; @@ -1275,15 +1278,20 @@ class Item_func_connection_id :public Item_long_func public: Item_func_connection_id(THD *thd): Item_long_func(thd) { unsigned_flag=1; } - const char *func_name() const { return "connection_id"; } - bool fix_length_and_dec(); - bool fix_fields(THD *thd, Item **ref); - longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } - bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override { - return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); + static LEX_CSTRING name= {STRING_WITH_LEN("connection_id") }; + return name; + } + bool fix_length_and_dec() override; + bool fix_fields(THD *thd, Item **ref) override; + longlong val_int() override { DBUG_ASSERT(fixed()); return value; } + bool check_vcol_func_processor(void *arg) override + { + return mark_unsupported_function(func_name(), "()", arg, + VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_connection_id>(thd, this); } }; @@ -1295,13 +1303,17 @@ public: { unsigned_flag= 0; } - const char *func_name() const { return "cast_as_signed"; } - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_signed") }; + return name; + } + const Type_handler *type_handler() const override { return Type_handler::type_handler_long_or_longlong(max_char_length(), false); } - longlong val_int() + longlong val_int() override { longlong value= args[0]->val_int_signed_typecast(); null_value= args[0]->null_value; @@ -1336,14 +1348,15 @@ public: set_if_bigger(char_length, 1U + (unsigned_flag ? 0 : 1)); fix_char_length(char_length); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_func_signed_fix_length_and_dec(this); } - virtual void print(String *str, enum_query_type query_type); - uint decimal_precision() const { return args[0]->decimal_precision(); } - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + decimal_digits_t decimal_precision() const override + { return args[0]->decimal_precision(); } + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_signed>(thd, this); } }; @@ -1355,26 +1368,30 @@ public: { unsigned_flag= 1; } - const char *func_name() const { return "cast_as_unsigned"; } - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_unsigned") }; + return name; + } + const Type_handler *type_handler() const override { if (max_char_length() <= MY_INT32_NUM_DECIMAL_DIGITS - 1) return &type_handler_ulong; return &type_handler_ulonglong; } - longlong val_int() + longlong val_int() override { longlong value= args[0]->val_int_unsigned_typecast(); null_value= args[0]->null_value; return value; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_func_unsigned_fix_length_and_dec(this); } - uint decimal_precision() const { return max_length; } - virtual void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + decimal_digits_t decimal_precision() const override { return max_length; } + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_unsigned>(thd, this); } }; @@ -1383,34 +1400,39 @@ class Item_decimal_typecast :public Item_func { my_decimal decimal_value; public: - Item_decimal_typecast(THD *thd, Item *a, uint len, uint dec) + Item_decimal_typecast(THD *thd, Item *a, uint len, decimal_digits_t dec) :Item_func(thd, a) { - decimals= (uint8) dec; + decimals= dec; collation= DTCollation_numeric(); fix_char_length(my_decimal_precision_to_length_no_truncation(len, dec, unsigned_flag)); } - String *val_str(String *str) { return VDec(this).to_string(str); } - double val_real() { return VDec(this).to_double(); } - longlong val_int() { return VDec(this).to_longlong(unsigned_flag); } - my_decimal *val_decimal(my_decimal*); - bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode) + String *val_str(String *str) override { return VDec(this).to_string(str); } + double val_real() override { return VDec(this).to_double(); } + longlong val_int() override { return VDec(this).to_longlong(unsigned_flag); } + my_decimal *val_decimal(my_decimal*) override; + bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode) override { return decimal_to_datetime_with_warn(thd, VDec(this).ptr(), to, mode, NULL, NULL); } - const Type_handler *type_handler() const { return &type_handler_newdecimal; } + const Type_handler *type_handler() const override + { return &type_handler_newdecimal; } void fix_length_and_dec_generic() {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_decimal_typecast_fix_length_and_dec(this); } - const char *func_name() const { return "decimal_typecast"; } - virtual void print(String *str, enum_query_type query_type); - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("decimal_typecast") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_decimal_typecast>(thd, this); } }; @@ -1428,7 +1450,10 @@ public: } bool need_parentheses_in_default() { return true; } void print(String *str, enum_query_type query_type); - void fix_length_and_dec_generic() { maybe_null= 1; } + void fix_length_and_dec_generic() + { + set_maybe_null(); + } }; @@ -1438,18 +1463,23 @@ public: Item_float_typecast(THD *thd, Item *a) :Item_real_typecast(thd, a, MAX_FLOAT_STR_LENGTH, NOT_FIXED_DEC) { } - const Type_handler *type_handler() const { return &type_handler_float; } - bool fix_length_and_dec() + const Type_handler *type_handler() const override + { return &type_handler_float; } + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_float_typecast_fix_length_and_dec(this); } - const char *func_name() const { return "float_typecast"; } - double val_real() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("float_typecast") }; + return name; + } + double val_real() override { return (double) (float) val_real_with_truncate(FLT_MAX); } - String *val_str(String*str) + String *val_str(String*str) override { Float nr(Item_float_typecast::val_real()); if (null_value) @@ -1457,7 +1487,7 @@ public: nr.to_string(str, decimals); return str; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_float_typecast>(thd, this); } }; @@ -1468,14 +1498,18 @@ public: Item_double_typecast(THD *thd, Item *a, uint len, uint dec): Item_real_typecast(thd, a, len, dec) { } - bool fix_length_and_dec() + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_double_typecast_fix_length_and_dec(this); } - const char *func_name() const { return "double_typecast"; } - double val_real() { return val_real_with_truncate(DBL_MAX); } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("double_typecast") }; + return name; + } + double val_real() override { return val_real_with_truncate(DBL_MAX); } + Item *get_copy(THD *thd) override { return get_item_copy<Item_double_typecast>(thd, this); } }; @@ -1495,13 +1529,17 @@ class Item_func_plus :public Item_func_additive_op public: Item_func_plus(THD *thd, Item *a, Item *b): Item_func_additive_op(thd, a, b) {} - const char *func_name() const { return "+"; } - enum precedence precedence() const { return ADD_PRECEDENCE; } - bool fix_length_and_dec(); - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("+") }; + return name; + } + enum precedence precedence() const override { return ADD_PRECEDENCE; } + bool fix_length_and_dec() override; + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_plus>(thd, this); } }; @@ -1513,13 +1551,17 @@ public: Item_func_additive_op(thd, a, b), m_depends_on_sql_mode_no_unsigned_subtraction(false) { } - const char *func_name() const { return "-"; } - enum precedence precedence() const { return ADD_PRECEDENCE; } - Sql_mode_dependency value_depends_on_sql_mode() const; - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - bool fix_length_and_dec(); + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("-") }; + return name; + } + enum precedence precedence() const override { return ADD_PRECEDENCE; } + Sql_mode_dependency value_depends_on_sql_mode() const override; + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + bool fix_length_and_dec() override; void fix_unsigned_flag(); void fix_length_and_dec_double() { @@ -1536,7 +1578,7 @@ public: Item_func_additive_op::fix_length_and_dec_int(); fix_unsigned_flag(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_minus>(thd, this); } }; @@ -1546,16 +1588,20 @@ class Item_func_mul :public Item_num_op public: Item_func_mul(THD *thd, Item *a, Item *b): Item_num_op(thd, a, b) {} - const char *func_name() const { return "*"; } - enum precedence precedence() const { return MUL_PRECEDENCE; } - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - void result_precision(); - bool fix_length_and_dec(); - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("*") }; + return name; + } + enum precedence precedence() const override { return MUL_PRECEDENCE; } + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + void result_precision() override; + bool fix_length_and_dec() override; + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_mul>(thd, this); } }; @@ -1565,16 +1611,20 @@ class Item_func_div :public Item_num_op public: uint prec_increment; Item_func_div(THD *thd, Item *a, Item *b): Item_num_op(thd, a, b) {} - longlong int_op() { DBUG_ASSERT(0); return 0; } - double real_op(); - my_decimal *decimal_op(my_decimal *); - const char *func_name() const { return "/"; } - enum precedence precedence() const { return MUL_PRECEDENCE; } - bool fix_length_and_dec(); + longlong int_op() override { DBUG_ASSERT(0); return 0; } + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("/") }; + return name; + } + enum precedence precedence() const override { return MUL_PRECEDENCE; } + bool fix_length_and_dec() override; void fix_length_and_dec_double(); void fix_length_and_dec_int(); - void result_precision(); - Item *get_copy(THD *thd) + void result_precision() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_div>(thd, this); } }; @@ -1584,21 +1634,25 @@ class Item_func_int_div :public Item_int_func public: Item_func_int_div(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "DIV"; } - enum precedence precedence() const { return MUL_PRECEDENCE; } - const Type_handler *type_handler() const + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("DIV") }; + return name; + } + enum precedence precedence() const override { return MUL_PRECEDENCE; } + const Type_handler *type_handler() const override { return type_handler_long_or_longlong(); } - bool fix_length_and_dec(); - void print(String *str, enum_query_type query_type) + bool fix_length_and_dec() override; + void print(String *str, enum_query_type query_type) override { print_op(str, query_type); } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_int_div>(thd, this); } }; @@ -1607,13 +1661,17 @@ class Item_func_mod :public Item_num_op { public: Item_func_mod(THD *thd, Item *a, Item *b): Item_num_op(thd, a, b) {} - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - const char *func_name() const { return "MOD"; } - enum precedence precedence() const { return MUL_PRECEDENCE; } - void result_precision(); - bool fix_length_and_dec(); + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("MOD") }; + return name; + } + enum precedence precedence() const override { return MUL_PRECEDENCE; } + void result_precision() override; + bool fix_length_and_dec() override; void fix_length_and_dec_double() { Item_num_op::fix_length_and_dec_double(); @@ -1630,9 +1688,9 @@ public: DBUG_ASSERT(decimals == 0); set_handler(type_handler_long_or_longlong()); } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - Item *get_copy(THD *thd) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_mod>(thd, this); } }; @@ -1641,24 +1699,29 @@ class Item_func_neg :public Item_func_num1 { public: Item_func_neg(THD *thd, Item *a): Item_func_num1(thd, a) {} - double real_op(); - longlong int_op(); - my_decimal *decimal_op(my_decimal *); - const char *func_name() const { return "-"; } - enum Functype functype() const { return NEG_FUNC; } - enum precedence precedence() const { return NEG_PRECEDENCE; } - void print(String *str, enum_query_type query_type) - { - str->append(func_name()); + double real_op() override; + longlong int_op() override; + my_decimal *decimal_op(my_decimal *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("-") }; + return name; + } + enum Functype functype() const override { return NEG_FUNC; } + enum precedence precedence() const override { return NEG_PRECEDENCE; } + void print(String *str, enum_query_type query_type) override + { + str->append(func_name_cstring()); args[0]->print_parenthesised(str, query_type, precedence()); } void fix_length_and_dec_int(); void fix_length_and_dec_double(); void fix_length_and_dec_decimal(); - bool fix_length_and_dec(); - uint decimal_precision() const { return args[0]->decimal_precision(); } - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + decimal_digits_t decimal_precision() const override + { return args[0]->decimal_precision(); } + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_neg>(thd, this); } }; @@ -1667,15 +1730,19 @@ class Item_func_abs :public Item_func_num1 { public: Item_func_abs(THD *thd, Item *a): Item_func_num1(thd, a) {} - double real_op(); - longlong int_op(); - my_decimal *decimal_op(my_decimal *); - const char *func_name() const { return "abs"; } + double real_op() override; + longlong int_op() override; + my_decimal *decimal_op(my_decimal *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("abs") }; + return name; + } void fix_length_and_dec_int(); void fix_length_and_dec_double(); void fix_length_and_dec_decimal(); - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_abs>(thd, this); } }; @@ -1683,15 +1750,16 @@ public: class Item_dec_func :public Item_real_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_real(0, arg_count); } public: Item_dec_func(THD *thd, Item *a): Item_real_func(thd, a) {} Item_dec_func(THD *thd, Item *a, Item *b): Item_real_func(thd, a, b) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { - decimals=NOT_FIXED_DEC; max_length=float_length(decimals); - maybe_null=1; + decimals= NOT_FIXED_DEC; + max_length= float_length(decimals); + set_maybe_null(); return FALSE; } }; @@ -1700,9 +1768,13 @@ class Item_func_exp :public Item_dec_func { public: Item_func_exp(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "exp"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("exp") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_exp>(thd, this); } }; @@ -1711,9 +1783,13 @@ class Item_func_ln :public Item_dec_func { public: Item_func_ln(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "ln"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ln") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ln>(thd, this); } }; @@ -1723,9 +1799,13 @@ class Item_func_log :public Item_dec_func public: Item_func_log(THD *thd, Item *a): Item_dec_func(thd, a) {} Item_func_log(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} - double val_real(); - const char *func_name() const { return "log"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("log") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_log>(thd, this); } }; @@ -1734,9 +1814,13 @@ class Item_func_log2 :public Item_dec_func { public: Item_func_log2(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "log2"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("log2") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_log2>(thd, this); } }; @@ -1745,9 +1829,13 @@ class Item_func_log10 :public Item_dec_func { public: Item_func_log10(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "log10"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("log10") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_log10>(thd, this); } }; @@ -1756,9 +1844,13 @@ class Item_func_sqrt :public Item_dec_func { public: Item_func_sqrt(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "sqrt"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sqrt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sqrt>(thd, this); } }; @@ -1767,9 +1859,13 @@ class Item_func_pow :public Item_dec_func { public: Item_func_pow(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} - double val_real(); - const char *func_name() const { return "pow"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("pow") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_pow>(thd, this); } }; @@ -1778,9 +1874,13 @@ class Item_func_acos :public Item_dec_func { public: Item_func_acos(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "acos"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("acos") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_acos>(thd, this); } }; @@ -1788,9 +1888,13 @@ class Item_func_asin :public Item_dec_func { public: Item_func_asin(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "asin"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("asin") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_asin>(thd, this); } }; @@ -1799,9 +1903,13 @@ class Item_func_atan :public Item_dec_func public: Item_func_atan(THD *thd, Item *a): Item_dec_func(thd, a) {} Item_func_atan(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} - double val_real(); - const char *func_name() const { return "atan"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("atan") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_atan>(thd, this); } }; @@ -1809,9 +1917,13 @@ class Item_func_cos :public Item_dec_func { public: Item_func_cos(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "cos"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cos") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cos>(thd, this); } }; @@ -1819,9 +1931,13 @@ class Item_func_sin :public Item_dec_func { public: Item_func_sin(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "sin"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sin") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sin>(thd, this); } }; @@ -1829,9 +1945,13 @@ class Item_func_tan :public Item_dec_func { public: Item_func_tan(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "tan"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("tan") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_tan>(thd, this); } }; @@ -1839,9 +1959,13 @@ class Item_func_cot :public Item_dec_func { public: Item_func_cot(THD *thd, Item *a): Item_dec_func(thd, a) {} - double val_real(); - const char *func_name() const { return "cot"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cot") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_cot>(thd, this); } }; @@ -1850,8 +1974,8 @@ class Item_func_int_val :public Item_func_hybrid_field_type { public: Item_func_int_val(THD *thd, Item *a): Item_func_hybrid_field_type(thd, a) {} - bool check_partition_func_processor(void *int_arg) { return FALSE; } - bool check_vcol_func_processor(void *arg) { return FALSE; } + bool check_partition_func_processor(void *int_arg) override { return FALSE; } + bool check_vcol_func_processor(void *arg) override { return FALSE; } virtual decimal_round_mode round_mode() const= 0; void fix_length_and_dec_double(); void fix_length_and_dec_int_or_decimal(); @@ -1864,11 +1988,12 @@ public: { fix_attributes_datetime(0); set_handler(&type_handler_datetime2); - maybe_null= true; // E.g. CEILING(TIMESTAMP'0000-01-01 23:59:59.9') + // Thinks like CEILING(TIMESTAMP'0000-01-01 23:59:59.9') returns NULL + set_maybe_null(); } - bool fix_length_and_dec(); - String *str_op(String *str) { DBUG_ASSERT(0); return 0; } - bool native_op(THD *thd, Native *to) + bool fix_length_and_dec() override; + String *str_op(String *str) override { DBUG_ASSERT(0); return 0; } + bool native_op(THD *thd, Native *to) override { DBUG_ASSERT(0); return true; @@ -1880,14 +2005,18 @@ class Item_func_ceiling :public Item_func_int_val { public: Item_func_ceiling(THD *thd, Item *a): Item_func_int_val(thd, a) {} - const char *func_name() const { return "ceiling"; } - decimal_round_mode round_mode() const { return CEILING; } - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ceiling") }; + return name; + } + decimal_round_mode round_mode() const override { return CEILING; } + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ceiling>(thd, this); } }; @@ -1896,14 +2025,18 @@ class Item_func_floor :public Item_func_int_val { public: Item_func_floor(THD *thd, Item *a): Item_func_int_val(thd, a) {} - const char *func_name() const { return "floor"; } - decimal_round_mode round_mode() const { return FLOOR; } - longlong int_op(); - double real_op(); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("floor") }; + return name; + } + decimal_round_mode round_mode() const override { return FLOOR; } + longlong int_op() override; + double real_op() override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_floor>(thd, this); } }; @@ -1918,18 +2051,23 @@ class Item_func_round :public Item_func_hybrid_field_type public: Item_func_round(THD *thd, Item *a, Item *b, bool trunc_arg) :Item_func_hybrid_field_type(thd, a, b), truncate(trunc_arg) {} - const char *func_name() const { return truncate ? "truncate" : "round"; } - double real_op(); - longlong int_op(); - my_decimal *decimal_op(my_decimal *); - bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool time_op(THD *thd, MYSQL_TIME *ltime); - bool native_op(THD *thd, Native *to) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING truncate_name= {STRING_WITH_LEN("truncate") }; + static LEX_CSTRING round_name= {STRING_WITH_LEN("round") }; + return truncate ? truncate_name : round_name; + } + double real_op() override; + longlong int_op() override; + my_decimal *decimal_op(my_decimal *) override; + bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool time_op(THD *thd, MYSQL_TIME *ltime) override; + bool native_op(THD *thd, Native *to) override { DBUG_ASSERT(0); return true; } - String *str_op(String *str) + String *str_op(String *str) override { DBUG_ASSERT(0); return NULL; @@ -1943,7 +2081,7 @@ public: void fix_arg_time(); void fix_arg_datetime(); void fix_arg_temporal(const Type_handler *h, uint int_part_length); - bool fix_length_and_dec() + bool fix_length_and_dec() override { /* We don't want to translate ENUM/SET to CHAR here. @@ -1951,7 +2089,7 @@ public: */ return args[0]->real_type_handler()->Item_func_round_fix_length_and_dec(this); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_round>(thd, this); } }; @@ -1960,62 +2098,129 @@ class Item_func_rand :public Item_real_func { struct my_rnd_struct *rand; bool first_eval; // TRUE if val_real() is called 1st time - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, arg_count); } void seed_random (Item * val); public: Item_func_rand(THD *thd, Item *a): Item_real_func(thd, a), rand(0), first_eval(TRUE) {} Item_func_rand(THD *thd): Item_real_func(thd) {} - double val_real(); - const char *func_name() const { return "rand"; } - bool const_item() const { return 0; } - void update_used_tables(); - bool fix_fields(THD *thd, Item **ref); - void cleanup() { first_eval= TRUE; Item_real_func::cleanup(); } - bool check_vcol_func_processor(void *arg) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rand") }; + return name; + } + bool const_item() const override { return 0; } + void update_used_tables() override; + bool fix_fields(THD *thd, Item **ref) override; + void cleanup() override { first_eval= TRUE; Item_real_func::cleanup(); } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rand>(thd, this); } }; +class Item_func_rownum final :public Item_longlong_func +{ + /* + This points to a variable that contains the number of rows + accpted so far in the result set + */ + ha_rows *accepted_rows; + SELECT_LEX *select; +public: + Item_func_rownum(THD *thd); + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rownum") }; + return name; + } + enum Functype functype() const override { return ROWNUM_FUNC; } + void update_used_tables() override {} + bool const_item() const override { return 0; } + void fix_after_optimize(THD *thd) override; + bool fix_length_and_dec() override + { + unsigned_flag= 1; + used_tables_cache= RAND_TABLE_BIT; + const_item_cache=0; + set_maybe_null(); + return FALSE; + } + void cleanup() override + { + Item_longlong_func::cleanup(); + /* Ensure we don't point to freed memory */ + accepted_rows= 0; + } + bool check_vcol_func_processor(void *arg) override + { + return mark_unsupported_function(func_name(), "()", arg, + VCOL_IMPOSSIBLE); + } + bool check_handler_func_processor(void *arg) override + { + return mark_unsupported_function(func_name(), "()", arg, + VCOL_IMPOSSIBLE); + } + Item *get_copy(THD *thd) override { return 0; } + /* This function is used in insert, update and delete */ + void store_pointer_to_row_counter(ha_rows *row_counter) + { + accepted_rows= row_counter; + } +}; + +void fix_rownum_pointers(THD *thd, SELECT_LEX *select_lex, ha_rows *ptr); + + class Item_func_sign :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_real(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_real(func_name_cstring()); } public: Item_func_sign(THD *thd, Item *a): Item_long_func(thd, a) {} - const char *func_name() const { return "sign"; } - uint decimal_precision() const { return 1; } - bool fix_length_and_dec() { fix_char_length(2); return FALSE; } - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sign") }; + return name; + } + decimal_digits_t decimal_precision() const override { return 1; } + bool fix_length_and_dec() override { fix_char_length(2); return FALSE; } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sign>(thd, this); } }; class Item_func_units :public Item_real_func { - char *name; + LEX_CSTRING name; double mul,add; - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_real(0, arg_count); } public: Item_func_units(THD *thd, char *name_arg, Item *a, double mul_arg, double add_arg): - Item_real_func(thd, a), name(name_arg), mul(mul_arg), add(add_arg) {} - double val_real(); - const char *func_name() const { return name; } - bool fix_length_and_dec() + Item_real_func(thd, a), mul(mul_arg), add(add_arg) + { + name.str= name_arg; + name.length= strlen(name_arg); + } + double val_real() override; + LEX_CSTRING func_name_cstring() const override { return name; } + bool fix_length_and_dec() override { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_units>(thd, this); } }; @@ -2036,7 +2241,7 @@ class Item_func_min_max :public Item_hybrid_func String tmp_value; int cmp_sign; protected: - bool check_arguments() const + bool check_arguments() const override { return false; // Checked by aggregate_for_min_max() } @@ -2052,37 +2257,37 @@ public: bool get_date_native(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); bool get_time_native(THD *thd, MYSQL_TIME *res); - double val_real() + double val_real() override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_min_max::type_handler()-> Item_func_min_max_val_real(this); } - longlong val_int() + longlong val_int() override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_min_max::type_handler()-> Item_func_min_max_val_int(this); } - String *val_str(String *str) + String *val_str(String *str) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_min_max::type_handler()-> Item_func_min_max_val_str(this, str); } - my_decimal *val_decimal(my_decimal *dec) + my_decimal *val_decimal(my_decimal *dec) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_min_max::type_handler()-> Item_func_min_max_val_decimal(this, dec); } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return Item_func_min_max::type_handler()-> Item_func_min_max_get_date(thd, this, res, fuzzydate); } - bool val_native(THD *thd, Native *to); + bool val_native(THD *thd, Native *to) override; void aggregate_attributes_real(Item **items, uint nitems) { /* @@ -2103,9 +2308,9 @@ public: Item_func::aggregate_attributes_real(items, nitems); max_length= float_length(decimals); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { - if (aggregate_for_min_max(func_name(), args, arg_count)) + if (aggregate_for_min_max(func_name_cstring(), args, arg_count)) return true; fix_attributes(args, arg_count); return false; @@ -2116,8 +2321,12 @@ class Item_func_min :public Item_func_min_max { public: Item_func_min(THD *thd, List<Item> &list): Item_func_min_max(thd, list, 1) {} - const char *func_name() const { return "least"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("least") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_min>(thd, this); } }; @@ -2125,8 +2334,12 @@ class Item_func_max :public Item_func_min_max { public: Item_func_max(THD *thd, List<Item> &list): Item_func_min_max(thd, list, -1) {} - const char *func_name() const { return "greatest"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("greatest") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_max>(thd, this); } }; @@ -2143,35 +2356,41 @@ public: { name= a->name; } - double val_real() { return val_real_from_item(args[0]); } - longlong val_int() { return val_int_from_item(args[0]); } - String *val_str(String *str) { return val_str_from_item(args[0], str); } - bool val_native(THD *thd, Native *to) - { return val_native_from_item(thd, args[0], to); } - my_decimal *val_decimal(my_decimal *dec) + double val_real() override { return val_real_from_item(args[0]); } + longlong val_int() override { return val_int_from_item(args[0]); } + String *val_str(String *str) override + { return val_str_from_item(args[0], str); } + bool val_native(THD *thd, Native *to) override + { return val_native_from_item(thd, args[0], to); } + my_decimal *val_decimal(my_decimal *dec) override { return val_decimal_from_item(args[0], dec); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_item(thd, args[0], ltime, fuzzydate); } - const char *func_name() const { return "rollup_const"; } - bool const_item() const { return 0; } - const Type_handler *type_handler() const { return args[0]->type_handler(); } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rollup_const") }; + return name; + } + bool const_item() const override { return 0; } + const Type_handler *type_handler() const override + { return args[0]->type_handler(); } + bool fix_length_and_dec() override { Type_std_attributes::set(*args[0]); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rollup_const>(thd, this); } }; class Item_long_func_length: public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_str(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_str(func_name_cstring()); } public: Item_long_func_length(THD *thd, Item *a): Item_long_func(thd, a) {} - bool fix_length_and_dec() { max_length=10; return FALSE; } + bool fix_length_and_dec() override { max_length=10; return FALSE; } }; @@ -2180,9 +2399,13 @@ class Item_func_octet_length :public Item_long_func_length String value; public: Item_func_octet_length(THD *thd, Item *a): Item_long_func_length(thd, a) {} - longlong val_int(); - const char *func_name() const { return "octet_length"; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("octet_length") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_octet_length>(thd, this); } }; @@ -2191,14 +2414,18 @@ class Item_func_bit_length :public Item_longlong_func String value; public: Item_func_bit_length(THD *thd, Item *a): Item_longlong_func(thd, a) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { max_length= 11; // 0x100000000*8 = 34,359,738,368 return FALSE; } - longlong val_int(); - const char *func_name() const { return "bit_length"; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("bit_length") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_length>(thd, this); } }; @@ -2207,34 +2434,48 @@ class Item_func_char_length :public Item_long_func_length String value; public: Item_func_char_length(THD *thd, Item *a): Item_long_func_length(thd, a) {} - longlong val_int(); - const char *func_name() const { return "char_length"; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("char_length") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_char_length>(thd, this); } }; class Item_func_coercibility :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_str(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_str(func_name_cstring()); } public: Item_func_coercibility(THD *thd, Item *a): Item_long_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "coercibility"; } - bool fix_length_and_dec() { max_length=10; maybe_null= 0; return FALSE; } - bool eval_not_null_tables(void *) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("coercibility") }; + return name; + } + bool fix_length_and_dec() override + { + max_length=10; + base_flags&= ~item_base_t::MAYBE_NULL; + return FALSE; + } + bool eval_not_null_tables(void *) override { not_null_tables_cache= 0; return false; } - bool find_not_null_fields(table_map allowed) + bool find_not_null_fields(table_map allowed) override { return false; } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { return this; } - bool const_item() const { return true; } - Item *get_copy(THD *thd) + bool const_item() const override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_coercibility>(thd, this); } }; @@ -2247,10 +2488,10 @@ public: */ class Item_func_locate :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_str(0, 2) || - (arg_count > 2 && args[2]->check_type_can_return_int(func_name())); + (arg_count > 2 && args[2]->check_type_can_return_int(func_name_cstring())); } String value1,value2; DTCollation cmp_collation; @@ -2259,15 +2500,19 @@ public: :Item_long_func(thd, a, b) {} Item_func_locate(THD *thd, Item *a, Item *b, Item *c) :Item_long_func(thd, a, b, c) {} - const char *func_name() const { return "locate"; } - longlong val_int(); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("locate") }; + return name; + } + longlong val_int() override; + bool fix_length_and_dec() override { max_length= MY_INT32_NUM_DECIMAL_DIGITS; return agg_arg_charsets_for_comparison(cmp_collation, args, 2); } - virtual void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_locate>(thd, this); } }; @@ -2279,45 +2524,57 @@ class Item_func_field :public Item_long_func DTCollation cmp_collation; public: Item_func_field(THD *thd, List<Item> &list): Item_long_func(thd, list) {} - longlong val_int(); - const char *func_name() const { return "field"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("field") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_field>(thd, this); } }; class Item_func_ascii :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_str(0, arg_count); } String value; public: Item_func_ascii(THD *thd, Item *a): Item_long_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "ascii"; } - bool fix_length_and_dec() { max_length=3; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ascii") }; + return name; + } + bool fix_length_and_dec() override { max_length=3; return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ascii>(thd, this); } }; class Item_func_ord :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_str(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_str(func_name_cstring()); } String value; public: Item_func_ord(THD *thd, Item *a): Item_long_func(thd, a) {} - bool fix_length_and_dec() { fix_char_length(7); return FALSE; } - longlong val_int(); - const char *func_name() const { return "ord"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override { fix_char_length(7); return FALSE; } + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ord") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ord>(thd, this); } }; class Item_func_find_in_set :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_str(0, 2); } String value,value2; uint enum_value; @@ -2326,10 +2583,14 @@ class Item_func_find_in_set :public Item_long_func public: Item_func_find_in_set(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b), enum_value(0) {} - longlong val_int(); - const char *func_name() const { return "find_in_set"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("find_in_set") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_find_in_set>(thd, this); } }; @@ -2337,7 +2598,7 @@ public: class Item_func_bit_operator: public Item_handled_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, arg_count); } protected: bool fix_length_and_dec_op1_std(const Handler *ha_int, const Handler *ha_dec) @@ -2356,11 +2617,11 @@ public: :Item_handled_func(thd, a) {} Item_func_bit_operator(THD *thd, Item *a, Item *b) :Item_handled_func(thd, a, b) {} - void print(String *str, enum_query_type query_type) + void print(String *str, enum_query_type query_type) override { print_op(str, query_type); } - bool need_parentheses_in_default() { return true; } + bool need_parentheses_in_default() override { return true; } }; class Item_func_bit_or :public Item_func_bit_operator @@ -2368,10 +2629,14 @@ class Item_func_bit_or :public Item_func_bit_operator public: Item_func_bit_or(THD *thd, Item *a, Item *b) :Item_func_bit_operator(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "|"; } - enum precedence precedence() const { return BITOR_PRECEDENCE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("|") }; + return name; + } + enum precedence precedence() const override { return BITOR_PRECEDENCE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_or>(thd, this); } }; @@ -2380,22 +2645,30 @@ class Item_func_bit_and :public Item_func_bit_operator public: Item_func_bit_and(THD *thd, Item *a, Item *b) :Item_func_bit_operator(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "&"; } - enum precedence precedence() const { return BITAND_PRECEDENCE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("&") }; + return name; + } + enum precedence precedence() const override { return BITAND_PRECEDENCE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_and>(thd, this); } }; class Item_func_bit_count :public Item_handled_func { - bool check_arguments() const - { return args[0]->check_type_can_return_int(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_int(func_name_cstring()); } public: Item_func_bit_count(THD *thd, Item *a): Item_handled_func(thd, a) {} - const char *func_name() const { return "bit_count"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("bit_count") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_count>(thd, this); } }; @@ -2404,10 +2677,14 @@ class Item_func_shift_left :public Item_func_bit_operator public: Item_func_shift_left(THD *thd, Item *a, Item *b) :Item_func_bit_operator(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "<<"; } - enum precedence precedence() const { return SHIFT_PRECEDENCE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<<") }; + return name; + } + enum precedence precedence() const override { return SHIFT_PRECEDENCE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_shift_left>(thd, this); } }; @@ -2416,10 +2693,14 @@ class Item_func_shift_right :public Item_func_bit_operator public: Item_func_shift_right(THD *thd, Item *a, Item *b) :Item_func_bit_operator(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return ">>"; } - enum precedence precedence() const { return SHIFT_PRECEDENCE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN(">>") }; + return name; + } + enum precedence precedence() const override { return SHIFT_PRECEDENCE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_shift_right>(thd, this); } }; @@ -2427,65 +2708,82 @@ class Item_func_bit_neg :public Item_func_bit_operator { public: Item_func_bit_neg(THD *thd, Item *a): Item_func_bit_operator(thd, a) {} - bool fix_length_and_dec(); - const char *func_name() const { return "~"; } - enum precedence precedence() const { return NEG_PRECEDENCE; } - void print(String *str, enum_query_type query_type) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("~") }; + return name; + } + enum precedence precedence() const override { return NEG_PRECEDENCE; } + void print(String *str, enum_query_type query_type) override { - str->append(func_name()); + str->append(func_name_cstring()); args[0]->print_parenthesised(str, query_type, precedence()); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_neg>(thd, this); } }; class Item_func_last_insert_id :public Item_longlong_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_last_insert_id(THD *thd): Item_longlong_func(thd) {} Item_func_last_insert_id(THD *thd, Item *a): Item_longlong_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "last_insert_id"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("last_insert_id") }; + return name; + } + bool fix_length_and_dec() override { unsigned_flag= true; if (arg_count) max_length= args[0]->max_length; return FALSE; } - bool fix_fields(THD *thd, Item **ref); - bool check_vcol_func_processor(void *arg) + bool fix_fields(THD *thd, Item **ref) override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_last_insert_id>(thd, this); } }; class Item_func_benchmark :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_int(func_name()) || - args[1]->check_type_scalar(func_name()); + return args[0]->check_type_can_return_int(func_name_cstring()) || + args[1]->check_type_scalar(func_name_cstring()); } public: Item_func_benchmark(THD *thd, Item *count_expr, Item *expr): Item_long_func(thd, count_expr, expr) {} - longlong val_int(); - const char *func_name() const { return "benchmark"; } - bool fix_length_and_dec() { max_length=1; maybe_null=0; return FALSE; } - virtual void print(String *str, enum_query_type query_type); - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("benchmark") }; + return name; + } + bool fix_length_and_dec() override + { + max_length=1; + base_flags&= ~item_base_t::MAYBE_NULL; + return FALSE; + } + void print(String *str, enum_query_type query_type) override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_benchmark>(thd, this); } }; @@ -2495,24 +2793,28 @@ void item_func_sleep_free(void); class Item_func_sleep :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_real(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_real(func_name_cstring()); } public: Item_func_sleep(THD *thd, Item *a): Item_long_func(thd, a) {} - bool fix_length_and_dec() { fix_char_length(1); return FALSE; } - bool const_item() const { return 0; } - const char *func_name() const { return "sleep"; } - table_map used_tables() const + bool fix_length_and_dec() override { fix_char_length(1); return FALSE; } + bool const_item() const override { return 0; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sleep") }; + return name; + } + table_map used_tables() const override { return used_tables_cache | RAND_TABLE_BIT; } - bool is_expensive() { return 1; } - longlong val_int(); - bool check_vcol_func_processor(void *arg) + bool is_expensive() override { return 1; } + longlong val_int() override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sleep>(thd, this); } }; @@ -2533,7 +2835,7 @@ class Item_udf_func :public Item_func } protected: udf_handler udf; - bool is_expensive_processor(void *arg) { return TRUE; } + bool is_expensive_processor(void *arg) override { return TRUE; } class VDec_udf: public Dec_ptr_and_buffer { @@ -2552,18 +2854,22 @@ public: Item_func(thd), udf(udf_arg) {} Item_udf_func(THD *thd, udf_func *udf_arg, List<Item> &list): Item_func(thd, list), udf(udf_arg) {} - const char *func_name() const { return udf.name(); } - enum Functype functype() const { return UDF_FUNC; } - bool fix_fields(THD *thd, Item **ref) + LEX_CSTRING func_name_cstring() const override + { + const char *tmp= udf.name(); + return { tmp, strlen(tmp) }; + } + enum Functype functype() const override { return UDF_FUNC; } + bool fix_fields(THD *thd, Item **ref) override { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); bool res= udf.fix_fields(thd, this, arg_count, args); set_non_deterministic_if_needed(); - fixed= 1; + base_flags|= item_base_t::FIXED; return res; } void fix_num_length_and_dec(); - void update_used_tables() + void update_used_tables() override { /* TODO: Make a member in UDF_INIT and return if a UDF is deterministic or @@ -2612,27 +2918,28 @@ public: set_non_deterministic_if_needed(); } } - void cleanup(); - bool eval_not_null_tables(void *opt_arg) + void cleanup() override; + bool eval_not_null_tables(void *opt_arg) override { not_null_tables_cache= 0; return 0; } - bool find_not_null_fields(table_map allowed) + bool find_not_null_fields(table_map allowed) override { return false; } - bool is_expensive() { return 1; } - virtual void print(String *str, enum_query_type query_type); - bool check_vcol_func_processor(void *arg) + bool is_expensive() override { return 1; } + void print(String *str, enum_query_type query_type) override; + bool check_vcol_func_processor(void *arg) override { - return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC); + return mark_unsupported_function(func_name(), "()", arg, + VCOL_NON_DETERMINISTIC); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } - bool excl_dep_on_grouping_fields(st_select_lex *sel) + bool excl_dep_on_grouping_fields(st_select_lex *sel) override { return false; } }; @@ -2645,13 +2952,13 @@ class Item_func_udf_float :public Item_udf_func Item_func_udf_float(THD *thd, udf_func *udf_arg, List<Item> &list): Item_udf_func(thd, udf_arg, list) {} - longlong val_int() + longlong val_int() override { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return Converter_double_to_longlong(Item_func_udf_float::val_real(), unsigned_flag).result(); } - my_decimal *val_decimal(my_decimal *dec_buf) + my_decimal *val_decimal(my_decimal *dec_buf) override { double res=val_real(); if (null_value) @@ -2659,11 +2966,12 @@ class Item_func_udf_float :public Item_udf_func double2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf); return dec_buf; } - double val_real(); - String *val_str(String *str); - const Type_handler *type_handler() const { return &type_handler_double; } - bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; } - Item *get_copy(THD *thd) + double val_real() override; + String *val_str(String *str) override; + const Type_handler *type_handler() const override + { return &type_handler_double; } + bool fix_length_and_dec() override { fix_num_length_and_dec(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_udf_float>(thd, this); } }; @@ -2676,21 +2984,21 @@ public: Item_func_udf_int(THD *thd, udf_func *udf_arg, List<Item> &list): Item_udf_func(thd, udf_arg, list) {} - longlong val_int(); - double val_real() { return (double) Item_func_udf_int::val_int(); } - my_decimal *val_decimal(my_decimal *decimal_value) + longlong val_int() override; + double val_real() override { return (double) Item_func_udf_int::val_int(); } + my_decimal *val_decimal(my_decimal *decimal_value) override { return val_decimal_from_int(decimal_value); } - String *val_str(String *str); - const Type_handler *type_handler() const + String *val_str(String *str) override; + const Type_handler *type_handler() const override { if (unsigned_flag) return &type_handler_ulonglong; return &type_handler_slonglong; } - bool fix_length_and_dec() { decimals= 0; max_length= 21; return FALSE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override { decimals= 0; max_length= 21; return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_udf_int>(thd, this); } }; @@ -2702,22 +3010,23 @@ public: Item_udf_func(thd, udf_arg) {} Item_func_udf_decimal(THD *thd, udf_func *udf_arg, List<Item> &list): Item_udf_func(thd, udf_arg, list) {} - longlong val_int() + longlong val_int() override { return VDec_udf(this, &udf).to_longlong(unsigned_flag); } - double val_real() + double val_real() override { return VDec_udf(this, &udf).to_double(); } - my_decimal *val_decimal(my_decimal *); - String *val_str(String *str) + my_decimal *val_decimal(my_decimal *) override; + String *val_str(String *str) override { return VDec_udf(this, &udf).to_string_round(str, decimals); } - const Type_handler *type_handler() const { return &type_handler_newdecimal; } - bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; } - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_newdecimal; } + bool fix_length_and_dec() override { fix_num_length_and_dec(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_udf_decimal>(thd, this); } }; @@ -2729,8 +3038,8 @@ public: Item_udf_func(thd, udf_arg) {} Item_func_udf_str(THD *thd, udf_func *udf_arg, List<Item> &list): Item_udf_func(thd, udf_arg, list) {} - String *val_str(String *); - double val_real() + String *val_str(String *) override; + double val_real() override { int err_not_used; char *end_not_used; @@ -2739,14 +3048,14 @@ public: return res ? res->charset()->strntod((char*) res->ptr(), res->length(), &end_not_used, &err_not_used) : 0.0; } - longlong val_int() + longlong val_int() override { int err_not_used; String *res; res=val_str(&str_value); return res ? res->charset()->strntoll(res->ptr(),res->length(),10, (char**) 0, &err_not_used) : (longlong) 0; } - my_decimal *val_decimal(my_decimal *dec_buf) + my_decimal *val_decimal(my_decimal *dec_buf) override { String *res=val_str(&str_value); if (!res) @@ -2754,9 +3063,10 @@ public: string2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf); return dec_buf; } - const Type_handler *type_handler() const { return string_type_handler(); } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return string_type_handler(); } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_udf_str>(thd, this); } }; @@ -2769,7 +3079,7 @@ class Item_func_udf_float :public Item_real_func Item_real_func(thd) {} Item_func_udf_float(THD *thd, udf_func *udf_arg, List<Item> &list): Item_real_func(thd, list) {} - double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed()); return 0.0; } }; @@ -2780,8 +3090,9 @@ public: Item_int_func(thd) {} Item_func_udf_int(THD *thd, udf_func *udf_arg, List<Item> &list): Item_int_func(thd, list) {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } - longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } + longlong val_int() { DBUG_ASSERT(fixed()); return 0; } }; @@ -2792,8 +3103,9 @@ public: Item_int_func(thd) {} Item_func_udf_decimal(THD *thd, udf_func *udf_arg, List<Item> &list): Item_int_func(thd, list) {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } - my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed == 1); return 0; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } + my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed()); return 0; } }; @@ -2805,10 +3117,11 @@ public: Item_func_udf_str(THD *thd, udf_func *udf_arg, List<Item> &list): Item_func(thd, list) {} String *val_str(String *) - { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val_real() { DBUG_ASSERT(fixed == 1); null_value= 1; return 0.0; } - longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - bool fix_length_and_dec() { maybe_null=1; max_length=0; return FALSE; } + { DBUG_ASSERT(fixed()); null_value=1; return 0; } + double val_real() { DBUG_ASSERT(fixed()); null_value= 1; return 0.0; } + longlong val_int() { DBUG_ASSERT(fixed()); null_value=1; return 0; } + bool fix_length_and_dec() override + { base_flags|= item_base_t::MAYBE_NULL; max_length=0; return FALSE; } }; #endif /* HAVE_DLOPEN */ @@ -2823,13 +3136,13 @@ class Item_func_lock :public Item_long_func Item_func_lock(THD *thd): Item_long_func(thd) { } Item_func_lock(THD *thd, Item *a): Item_long_func(thd, a) {} Item_func_lock(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} - table_map used_tables() const + table_map used_tables() const override { return used_tables_cache | RAND_TABLE_BIT; } - bool const_item() const { return 0; } - bool is_expensive() { return 1; } - bool check_vcol_func_processor(void *arg) + bool const_item() const override { return 0; } + bool is_expensive() override { return 1; } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } @@ -2838,17 +3151,26 @@ class Item_func_lock :public Item_long_func class Item_func_get_lock final :public Item_func_lock { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_general_purpose_string(func_name()) || - args[1]->check_type_can_return_real(func_name()); + return args[0]->check_type_general_purpose_string(func_name_cstring()) || + args[1]->check_type_can_return_real(func_name_cstring()); } String value; public: Item_func_get_lock(THD *thd, Item *a, Item *b) :Item_func_lock(thd, a, b) {} longlong val_int() final; - const char *func_name() const final { return "get_lock"; } - bool fix_length_and_dec() { max_length= 1; maybe_null= 1; return FALSE; } + LEX_CSTRING func_name_cstring() const override final + { + static LEX_CSTRING name= {STRING_WITH_LEN("get_lock") }; + return name; + } + bool fix_length_and_dec() override + { + max_length= 1; + set_maybe_null(); + return FALSE; + } Item *get_copy(THD *thd) final { return get_item_copy<Item_func_get_lock>(thd, this); } }; @@ -2860,7 +3182,11 @@ public: Item_func_release_all_locks(THD *thd): Item_func_lock(thd) { unsigned_flag= 1; } longlong val_int() final; - const char *func_name() const final { return "release_all_locks"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("release_all_locks") }; + return name; + } Item *get_copy(THD *thd) final { return get_item_copy<Item_func_release_all_locks>(thd, this); } }; @@ -2868,14 +3194,23 @@ public: class Item_func_release_lock final :public Item_func_lock { - bool check_arguments() const - { return args[0]->check_type_general_purpose_string(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_general_purpose_string(func_name_cstring()); } String value; public: Item_func_release_lock(THD *thd, Item *a): Item_func_lock(thd, a) {} longlong val_int() final; - const char *func_name() const { return "release_lock"; } - bool fix_length_and_dec() { max_length= 1; maybe_null= 1; return FALSE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("release_lock") }; + return name; + } + bool fix_length_and_dec() override + { + max_length= 1; + set_maybe_null(); + return FALSE; + } Item *get_copy(THD *thd) final { return get_item_copy<Item_func_release_lock>(thd, this); } }; @@ -2885,13 +3220,13 @@ public: class Item_master_pos_wait :public Item_longlong_func { - bool check_arguments() const + bool check_arguments() const override { return - args[0]->check_type_general_purpose_string(func_name()) || - args[1]->check_type_can_return_int(func_name()) || - (arg_count > 2 && args[2]->check_type_can_return_int(func_name())) || - (arg_count > 3 && args[3]->check_type_general_purpose_string(func_name())); + args[0]->check_type_general_purpose_string(func_name_cstring()) || + args[1]->check_type_can_return_int(func_name_cstring()) || + (arg_count > 2 && args[2]->check_type_can_return_int(func_name_cstring())) || + (arg_count > 3 && args[3]->check_type_general_purpose_string(func_name_cstring())); } String value; public: @@ -2901,24 +3236,33 @@ public: Item_longlong_func(thd, a, b, c) {} Item_master_pos_wait(THD *thd, Item *a, Item *b, Item *c, Item *d): Item_longlong_func(thd, a, b, c, d) {} - longlong val_int(); - const char *func_name() const { return "master_pos_wait"; } - bool fix_length_and_dec() { max_length=21; maybe_null=1; return FALSE; } - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("master_pos_wait") }; + return name; + } + bool fix_length_and_dec() override + { + max_length=21; + set_maybe_null(); + return FALSE; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_master_pos_wait>(thd, this); } }; class Item_master_gtid_wait :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_general_purpose_string(func_name()) || - (arg_count > 1 && args[1]->check_type_can_return_real(func_name())); + return args[0]->check_type_general_purpose_string(func_name_cstring()) || + (arg_count > 1 && args[1]->check_type_can_return_real(func_name_cstring())); } String value; public: @@ -2926,14 +3270,18 @@ public: :Item_long_func(thd, a) {} Item_master_gtid_wait(THD *thd, Item *a, Item *b) :Item_long_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "master_gtid_wait"; } - bool fix_length_and_dec() { max_length=2; return FALSE; } - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("master_gtid_wait") }; + return name; + } + bool fix_length_and_dec() override { max_length=2; return FALSE; } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_master_gtid_wait>(thd, this); } }; @@ -2962,7 +3310,7 @@ public: Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, const Tmp_field_param *param) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return create_tmp_field_ex_from_handler(root, table, src, param, type_handler()); } @@ -3012,46 +3360,51 @@ public: null_item(item->null_item), save_result(item->save_result) {} - enum Functype functype() const { return SUSERVAR_FUNC; } - double val_real(); - longlong val_int(); - String *val_str(String *str); - my_decimal *val_decimal(my_decimal *); - double val_result(); - longlong val_int_result(); - bool val_bool_result(); - String *str_result(String *str); - my_decimal *val_decimal_result(my_decimal *); - bool is_null_result(); + enum Functype functype() const override { return SUSERVAR_FUNC; } + double val_real() override; + longlong val_int() override; + String *val_str(String *str) override; + my_decimal *val_decimal(my_decimal *) override; + double val_result() override; + longlong val_int_result() override; + bool val_bool_result() override; + String *str_result(String *str) override; + my_decimal *val_decimal_result(my_decimal *) override; + bool is_null_result() override; bool update_hash(void *ptr, size_t length, enum Item_result type, CHARSET_INFO *cs, bool unsigned_arg); - bool send(Protocol *protocol, st_value *buffer); - void make_send_field(THD *thd, Send_field *tmp_field); + bool send(Protocol *protocol, st_value *buffer) override; + void make_send_field(THD *thd, Send_field *tmp_field) override; bool check(bool use_result_field); void save_item_result(Item *item); bool update(); - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(); - void print(String *str, enum_query_type query_type); - enum precedence precedence() const { return ASSIGN_PRECEDENCE; } + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override; + void print(String *str, enum_query_type query_type) override; + enum precedence precedence() const override { return ASSIGN_PRECEDENCE; } void print_as_stmt(String *str, enum_query_type query_type); - const char *func_name() const { return "set_user_var"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("set_user_var") }; + return name; + } int save_in_field(Field *field, bool no_conversions, bool can_use_result_field); - int save_in_field(Field *field, bool no_conversions) + int save_in_field(Field *field, bool no_conversions) override { return save_in_field(field, no_conversions, 1); } void save_org_in_field(Field *field, fast_field_copier data __attribute__ ((__unused__))) - { (void)save_in_field(field, 1, 0); } - bool register_field_in_read_map(void *arg); - bool register_field_in_bitmap(void *arg); + override + { (void) save_in_field(field, 1, 0); } + bool register_field_in_read_map(void *arg) override; + bool register_field_in_bitmap(void *arg) override; bool set_entry(THD *thd, bool create_if_not_exists); - void cleanup(); - Item *get_copy(THD *thd) + void cleanup() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_set_user_var>(thd, this); } - bool excl_dep_on_table(table_map tab_map) { return false; } + bool excl_dep_on_table(table_map tab_map) override { return false; } }; @@ -3061,30 +3414,34 @@ class Item_func_get_user_var :public Item_func_user_var, public: Item_func_get_user_var(THD *thd, const LEX_CSTRING *a): Item_func_user_var(thd, a) {} - enum Functype functype() const { return GUSERVAR_FUNC; } + enum Functype functype() const override { return GUSERVAR_FUNC; } LEX_CSTRING get_name() { return name; } - double val_real(); - longlong val_int(); - my_decimal *val_decimal(my_decimal*); - String *val_str(String* str); - bool fix_length_and_dec(); - virtual void print(String *str, enum_query_type query_type); + double val_real() override; + longlong val_int() override; + my_decimal *val_decimal(my_decimal*) override; + String *val_str(String* str) override; + bool fix_length_and_dec() override; + void print(String *str, enum_query_type query_type) override; /* We must always return variables as strings to guard against selects of type select @t1:=1,@t1,@t:="hello",@t from foo where (@t1:= t2.b) */ - const char *func_name() const { return "get_user_var"; } - bool const_item() const; - table_map used_tables() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("get_user_var") }; + return name; + } + bool const_item() const override; + table_map used_tables() const override { return const_item() ? 0 : RAND_TABLE_BIT; } - bool eq(const Item *item, bool binary_cmp) const; - Item *get_copy(THD *thd) + bool eq(const Item *item, bool binary_cmp) const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_get_user_var>(thd, this); } private: - bool set_value(THD *thd, sp_rcontext *ctx, Item **it); + bool set_value(THD *thd, sp_rcontext *ctx, Item **it) override; public: - Settable_routine_parameter *get_settable_routine_parameter() + Settable_routine_parameter *get_settable_routine_parameter() override { return this; } @@ -3113,54 +3470,55 @@ public: org_name= *a; set_name(thd, a->str, a->length, system_charset_info); } - Load_data_outvar *get_load_data_outvar() + Load_data_outvar *get_load_data_outvar() override { return this; } - bool load_data_set_null(THD *thd, const Load_data_param *param) + bool load_data_set_null(THD *thd, const Load_data_param *param) override { set_null_value(param->charset()); return false; } - bool load_data_set_no_data(THD *thd, const Load_data_param *param) + bool load_data_set_no_data(THD *thd, const Load_data_param *param) override { set_null_value(param->charset()); return false; } bool load_data_set_value(THD *thd, const char *pos, uint length, - const Load_data_param *param) + const Load_data_param *param) override { set_value(pos, length, param->charset()); return false; } - void load_data_print_for_log_event(THD *thd, String *to) const; - bool load_data_add_outvar(THD *thd, Load_data_param *param) const + void load_data_print_for_log_event(THD *thd, String *to) const override; + bool load_data_add_outvar(THD *thd, Load_data_param *param) const override { return param->add_outvar_user_var(thd); } - uint load_data_fixed_length() const + uint load_data_fixed_length() const override { return 0; } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { DBUG_ASSERT(0); return NULL; } /* We should return something different from FIELD_ITEM here */ - enum Type type() const { return CONST_ITEM;} - double val_real(); - longlong val_int(); - String *val_str(String *str); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - my_decimal *val_decimal(my_decimal *decimal_buffer); + enum Type type() const override { return CONST_ITEM;} + double val_real() override; + longlong val_int() override; + String *val_str(String *str) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + my_decimal *val_decimal(my_decimal *decimal_buffer) override; /* fix_fields() binds variable name with its entry structure */ - bool fix_fields(THD *thd, Item **ref); + bool fix_fields(THD *thd, Item **ref) override; void set_null_value(CHARSET_INFO* cs); void set_value(const char *str, uint length, CHARSET_INFO* cs); - const Type_handler *type_handler() const { return &type_handler_double; } - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_double; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_user_var_as_out_param>(thd, this); } }; @@ -3188,24 +3546,28 @@ public: enum_var_type var_type_arg, LEX_CSTRING *component_arg, const char *name_arg, size_t name_len_arg); - enum Functype functype() const { return GSYSVAR_FUNC; } - void update_null_value(); - bool fix_length_and_dec(); - void print(String *str, enum_query_type query_type); - bool const_item() const { return true; } - table_map used_tables() const { return 0; } - const Type_handler *type_handler() const; - double val_real(); - longlong val_int(); - String* val_str(String*); - my_decimal *val_decimal(my_decimal *dec_buf) + enum Functype functype() const override { return GSYSVAR_FUNC; } + void update_null_value() override; + bool fix_length_and_dec() override; + void print(String *str, enum_query_type query_type) override; + bool const_item() const override { return true; } + table_map used_tables() const override { return 0; } + const Type_handler *type_handler() const override; + double val_real() override; + longlong val_int() override; + String* val_str(String*) override; + my_decimal *val_decimal(my_decimal *dec_buf) override { return val_decimal_from_real(dec_buf); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } /* TODO: fix to support views */ - const char *func_name() const { return "get_system_var"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("get_system_var") }; + return name; + } /** Indicates whether this system variable is written to the binlog or not. @@ -3215,11 +3577,11 @@ public: @return true if the variable is written to the binlog, false otherwise. */ bool is_written_to_binlog(); - bool eq(const Item *item, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const override; - void cleanup(); - bool check_vcol_func_processor(void *arg); - Item *get_copy(THD *thd) + void cleanup() override; + bool check_vcol_func_processor(void *arg) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_get_system_var>(thd, this); } }; @@ -3229,7 +3591,7 @@ public: class Item_func_match :public Item_real_func { public: - uint key, flags; + uint key, match_flags; bool join_key; DTCollation cmp_collation; FT_INFO *ft_handler; @@ -3240,9 +3602,9 @@ public: String search_value; // key_item()'s value converted to cmp_collation Item_func_match(THD *thd, List<Item> &a, uint b): - Item_real_func(thd, a), key(0), flags(b), join_key(0), ft_handler(0), + Item_real_func(thd, a), key(0), match_flags(b), join_key(0), ft_handler(0), table(0), master(0), concat_ws(0) { } - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_match::cleanup"); Item_real_func::cleanup(); @@ -3253,34 +3615,38 @@ public: table= 0; // required by Item_func_match::eq() DBUG_VOID_RETURN; } - bool is_expensive_processor(void *arg) { return TRUE; } - enum Functype functype() const { return FT_FUNC; } - const char *func_name() const { return "match"; } - bool eval_not_null_tables(void *opt_arg) + bool is_expensive_processor(void *arg) override { return TRUE; } + enum Functype functype() const override { return FT_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("match") }; + return name; + } + bool eval_not_null_tables(void *opt_arg) override { not_null_tables_cache= 0; return 0; } - bool find_not_null_fields(table_map allowed) + bool find_not_null_fields(table_map allowed) override { return false; } - bool fix_fields(THD *thd, Item **ref); - bool eq(const Item *, bool binary_cmp) const; + bool fix_fields(THD *thd, Item **ref) override; + bool eq(const Item *, bool binary_cmp) const override; /* The following should be safe, even if we compare doubles */ - longlong val_int() { DBUG_ASSERT(fixed == 1); return val_real() != 0.0; } - double val_real(); - virtual void print(String *str, enum_query_type query_type); + longlong val_int() override { DBUG_ASSERT(fixed()); return val_real() != 0.0; } + double val_real() override; + void print(String *str, enum_query_type query_type) override; bool fix_index(); bool init_search(THD *thd, bool no_order); - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function("match ... against()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_match>(thd, this); } - Item *build_clone(THD *thd) { return 0; } + Item *build_clone(THD *thd) override { return 0; } private: /** Check whether storage engine for given table, @@ -3303,7 +3669,7 @@ private: bool allows_search_on_non_indexed_columns(TABLE* table_arg) { // Only Boolean search may support non_indexed columns - if (!(flags & FT_BOOL)) + if (!(match_flags & FT_BOOL)) return false; DBUG_ASSERT(table_arg && table_arg->file); @@ -3323,54 +3689,69 @@ class Item_func_bit_xor : public Item_func_bit_operator public: Item_func_bit_xor(THD *thd, Item *a, Item *b) :Item_func_bit_operator(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "^"; } - enum precedence precedence() const { return BITXOR_PRECEDENCE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("^") }; + return name; + } + enum precedence precedence() const override { return BITXOR_PRECEDENCE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_bit_xor>(thd, this); } }; class Item_func_is_free_lock :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_general_purpose_string(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_general_purpose_string(func_name_cstring()); } String value; public: Item_func_is_free_lock(THD *thd, Item *a): Item_long_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "is_free_lock"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override { - decimals=0; max_length=1; maybe_null=1; + static LEX_CSTRING name= {STRING_WITH_LEN("is_free_lock") }; + return name; + } + bool fix_length_and_dec() override + { + decimals=0; + max_length=1; + set_maybe_null(); return FALSE; } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_is_free_lock>(thd, this); } }; class Item_func_is_used_lock :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_general_purpose_string(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_general_purpose_string(func_name_cstring()); } String value; public: Item_func_is_used_lock(THD *thd, Item *a): Item_long_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "is_used_lock"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("is_used_lock") }; + return name; + } + bool fix_length_and_dec() override { - decimals=0; max_length=10; maybe_null=1; + decimals=0; max_length=10; + set_maybe_null(); return FALSE; } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_is_used_lock>(thd, this); } }; @@ -3415,14 +3796,23 @@ class Item_func_row_count :public Item_longlong_func { public: Item_func_row_count(THD *thd): Item_longlong_func(thd) {} - longlong val_int(); - const char *func_name() const { return "row_count"; } - bool fix_length_and_dec() { decimals= 0; maybe_null=0; return FALSE; } - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("row_count") }; + return name; + } + bool fix_length_and_dec() override + { + decimals= 0; + base_flags&= ~item_base_t::MAYBE_NULL; + return FALSE; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_row_count>(thd, this); } }; @@ -3442,10 +3832,10 @@ private: bool execute(); protected: - bool is_expensive_processor(void *arg) + bool is_expensive_processor(void *arg) override { return is_expensive(); } - bool check_arguments() const + bool check_arguments() const override { // sp_prepare_func_item() checks that the number of columns is correct return false; @@ -3461,53 +3851,53 @@ public: virtual ~Item_func_sp() {} - void update_used_tables(); + void update_used_tables() override; - void cleanup(); + void cleanup() override; - const char *func_name() const; + LEX_CSTRING func_name_cstring() const override; - const Type_handler *type_handler() const; + const Type_handler *type_handler() const override; Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param); - Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) + const Tmp_field_param *param) override; + Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) override { return result_type() != STRING_RESULT ? sp_result_field : create_table_field_from_handler(root, table); } - void make_send_field(THD *thd, Send_field *tmp_field); + void make_send_field(THD *thd, Send_field *tmp_field) override; - longlong val_int() + longlong val_int() override { if (execute()) return (longlong) 0; return sp_result_field->val_int(); } - double val_real() + double val_real() override { if (execute()) return 0.0; return sp_result_field->val_real(); } - my_decimal *val_decimal(my_decimal *dec_buf) + my_decimal *val_decimal(my_decimal *dec_buf) override { if (execute()) return NULL; return sp_result_field->val_decimal(dec_buf); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { if (execute()) return true; return sp_result_field->get_date(ltime, fuzzydate); } - String *val_str(String *str) + String *val_str(String *str) override { String buf; char buff[20]; @@ -3526,26 +3916,26 @@ public: return str; } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { if (execute()) return true; - return null_value= sp_result_field->val_native(to); + return (null_value= sp_result_field->val_native(to)); } - void update_null_value() + void update_null_value() override { execute(); } - virtual bool change_context_processor(void *cntx) - { context= (Name_resolution_context *)cntx; return FALSE; } + bool change_context_processor(void *cntx) override + { context= (Name_resolution_context *)cntx; return FALSE; } - virtual enum Functype functype() const { return FUNC_SP; } + enum Functype functype() const override { return FUNC_SP; } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(void); - bool is_expensive(); + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec(void) override; + bool is_expensive() override; inline Field *get_sp_result_field() { @@ -3556,20 +3946,20 @@ public: return m_name; } - bool check_vcol_func_processor(void *arg); - bool limit_index_condition_pushdown_processor(void *opt_arg) + bool check_vcol_func_processor(void *arg) override; + bool limit_index_condition_pushdown_processor(void *opt_arg) override { return TRUE; } - Item *get_copy(THD *) { return 0; } - bool eval_not_null_tables(void *opt_arg) + Item *get_copy(THD *) override { return 0; } + bool eval_not_null_tables(void *opt_arg) override { not_null_tables_cache= 0; return 0; } - bool excl_dep_on_grouping_fields(st_select_lex *sel) + bool excl_dep_on_grouping_fields(st_select_lex *sel) override { return false; } - bool find_not_null_fields(table_map allowed) + bool find_not_null_fields(table_map allowed) override { return false; } @@ -3580,14 +3970,23 @@ class Item_func_found_rows :public Item_longlong_func { public: Item_func_found_rows(THD *thd): Item_longlong_func(thd) {} - longlong val_int(); - const char *func_name() const { return "found_rows"; } - bool fix_length_and_dec() { decimals= 0; maybe_null=0; return FALSE; } - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("found_rows") }; + return name; + } + bool fix_length_and_dec() override + { + decimals= 0; + base_flags&= ~item_base_t::MAYBE_NULL; + return FALSE; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_found_rows>(thd, this); } }; @@ -3596,17 +3995,21 @@ class Item_func_oracle_sql_rowcount :public Item_longlong_func { public: Item_func_oracle_sql_rowcount(THD *thd): Item_longlong_func(thd) {} - longlong val_int(); - const char *func_name() const { return "SQL%ROWCOUNT"; } - void print(String *str, enum_query_type query_type) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("SQL%ROWCOUNT") }; + return name; + } + void print(String *str, enum_query_type query_type) override { - str->append(func_name()); + str->append(func_name_cstring()); } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_oracle_sql_rowcount>(thd, this); } }; @@ -3615,44 +4018,55 @@ class Item_func_sqlcode: public Item_long_func { public: Item_func_sqlcode(THD *thd): Item_long_func(thd) { } - longlong val_int(); - const char *func_name() const { return "SQLCODE"; } - void print(String *str, enum_query_type query_type) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("SQLCODE") }; + return name; + } + void print(String *str, enum_query_type query_type) override { - str->append(func_name()); + str->append(func_name_cstring()); } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { - maybe_null= null_value= false; + base_flags&= ~item_base_t::MAYBE_NULL; + null_value= false; max_length= 11; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sqlcode>(thd, this); } }; void uuid_short_init(); +ulonglong server_uuid_value(); class Item_func_uuid_short :public Item_longlong_func { public: Item_func_uuid_short(THD *thd): Item_longlong_func(thd) {} - const char *func_name() const { return "uuid_short"; } - longlong val_int(); - bool const_item() const { return false; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("uuid_short") }; + return name; + } + longlong val_int() override; + bool const_item() const override { return false; } + bool fix_length_and_dec() override { max_length= 21; unsigned_flag=1; return FALSE; } - table_map used_tables() const { return RAND_TABLE_BIT; } - bool check_vcol_func_processor(void *arg) + table_map used_tables() const override { return RAND_TABLE_BIT; } + bool check_vcol_func_processor(void *arg) override { - return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC); + return mark_unsupported_function(func_name(), "()", arg, + VCOL_NON_DETERMINISTIC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_uuid_short>(thd, this); } }; @@ -3663,32 +4077,37 @@ protected: Item *last_value; public: Item_func_last_value(THD *thd, List<Item> &list): Item_func(thd, list) {} - double val_real(); - longlong val_int(); - String *val_str(String *); - my_decimal *val_decimal(my_decimal *); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool val_native(THD *thd, Native *); - bool fix_length_and_dec(); - const char *func_name() const { return "last_value"; } - const Type_handler *type_handler() const { return last_value->type_handler(); } - bool eval_not_null_tables(void *) + double val_real() override; + longlong val_int() override; + String *val_str(String *) override; + my_decimal *val_decimal(my_decimal *) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool val_native(THD *thd, Native *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("last_value") }; + return name; + } + const Type_handler *type_handler() const override + { return last_value->type_handler(); } + bool eval_not_null_tables(void *) override { not_null_tables_cache= 0; return 0; } - bool find_not_null_fields(table_map allowed) + bool find_not_null_fields(table_map allowed) override { return false; } - bool const_item() const { return 0; } + bool const_item() const override { return 0; } void evaluate_sideeffects(); - void update_used_tables() + void update_used_tables() override { Item_func::update_used_tables(); - maybe_null= last_value->maybe_null; + copy_flags(last_value, item_base_t::MAYBE_NULL); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_last_value>(thd, this); } }; @@ -3703,13 +4122,17 @@ protected: public: Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg): Item_longlong_func(thd), table_list(table_list_arg) {} - longlong val_int(); - const char *func_name() const { return "nextval"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("nextval") }; + return name; + } + bool fix_length_and_dec() override { unsigned_flag= 0; max_length= MAX_BIGINT_WIDTH; - maybe_null= 1; /* In case of errors */ + set_maybe_null(); /* In case of errors */ return FALSE; } /* @@ -3728,11 +4151,11 @@ public: table= table_list->next_local->table; } } - bool const_item() const { return 0; } - Item *get_copy(THD *thd) + bool const_item() const override { return 0; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_nextval>(thd, this); } - void print(String *str, enum_query_type query_type); - bool check_vcol_func_processor(void *arg) + void print(String *str, enum_query_type query_type) override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_NEXTVAL); } @@ -3746,9 +4169,13 @@ class Item_func_lastval :public Item_func_nextval public: Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg): Item_func_nextval(thd, table_list_arg) {} - longlong val_int(); - const char *func_name() const { return "lastval"; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("lastval") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_lastval>(thd, this); } }; @@ -3766,10 +4193,14 @@ public: : Item_func_nextval(thd, table_list_arg), nextval(nextval_arg), round(round_arg), is_used(is_used_arg) {} - longlong val_int(); - const char *func_name() const { return "setval"; } - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("setval") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_setval>(thd, this); } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index cd0504b89ee..0010e86557d 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -47,14 +47,14 @@ bool Item_geometry_func::fix_length_and_dec() collation.set(&my_charset_bin); decimals=0; max_length= (uint32) UINT_MAX32; - maybe_null= 1; + set_maybe_null(); return FALSE; } String *Item_func_geometry_from_text::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Geometry_buffer buffer; String arg_val; String *wkt= args[0]->val_str_ascii(&arg_val); @@ -81,7 +81,7 @@ String *Item_func_geometry_from_text::val_str(String *str) String *Item_func_geometry_from_wkb::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *wkb; Geometry_buffer buffer; @@ -100,12 +100,12 @@ String *Item_func_geometry_from_wkb::val_str(String *str) srid= (uint32)args[1]->val_int(); str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) { null_value= TRUE; /* purecov: inspected */ return 0; /* purecov: inspected */ } - str->length(0); str->q_append(srid); if ((null_value= (args[0]->null_value || @@ -115,13 +115,9 @@ String *Item_func_geometry_from_wkb::val_str(String *str) } -void report_json_error_ex(String *js, json_engine_t *je, - const char *fname, int n_param, - Sql_condition::enum_warning_level lv); - String *Item_func_geometry_from_json::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Geometry_buffer buffer; String *js= args[0]->val_str_ascii(&tmp_js); uint32 srid= 0; @@ -148,9 +144,9 @@ String *Item_func_geometry_from_json::val_str(String *str) srid= (uint32)args[2]->val_int(); str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) return 0; - str->length(0); str->q_append(srid); json_scan_start(&je, js->charset(), (const uchar *) js->ptr(), @@ -178,7 +174,8 @@ String *Item_func_geometry_from_json::val_str(String *str) my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeomFromGeoJSON"); break; default: - report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN); + report_json_error_ex(js->ptr(), &je, func_name(), 0, + Sql_condition::WARN_LEVEL_WARN); return NULL; } @@ -196,7 +193,7 @@ String *Item_func_geometry_from_json::val_str(String *str) String *Item_func_as_wkt::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; @@ -221,14 +218,14 @@ bool Item_func_as_wkt::fix_length_and_dec() { collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); max_length= (uint32) UINT_MAX32; - maybe_null= 1; + set_maybe_null(); return FALSE; } String *Item_func_as_wkb::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; @@ -248,14 +245,14 @@ bool Item_func_as_geojson::fix_length_and_dec() { collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); max_length=MAX_BLOB_WIDTH; - maybe_null= 1; + set_maybe_null(); return FALSE; } String *Item_func_as_geojson::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); uint max_dec= FLOATING_POINT_DECIMALS; @@ -296,7 +293,7 @@ String *Item_func_as_geojson::val_str_ascii(String *str) goto error; } - if ((geom->as_json(str, max_dec, &dummy) || str->append("}", 1))) + if ((geom->as_json(str, max_dec, &dummy) || str->append('}'))) goto error; return str; @@ -309,7 +306,7 @@ error: String *Item_func_geometry_type::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *swkb= args[0]->val_str(str); Geometry_buffer buffer; Geometry *geom= NULL; @@ -328,7 +325,7 @@ String *Item_func_geometry_type::val_str_ascii(String *str) String *Item_func_envelope::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; @@ -440,7 +437,7 @@ int Item_func_boundary::Transporter::start_collection(int n_objects) String *Item_func_boundary::val_str(String *str_value) { DBUG_ENTER("Item_func_boundary::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); @@ -459,9 +456,9 @@ String *Item_func_boundary::val_str(String *str_value) goto mem_error; str_value->set_charset(&my_charset_bin); + str_value->length(0); if (str_value->reserve(SRID_SIZE, 512)) goto mem_error; - str_value->length(0); str_value->q_append(srid); if (!Geometry::create_from_opresult(&buffer, str_value, res_receiver)) @@ -478,7 +475,7 @@ mem_error: String *Item_func_centroid::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; @@ -490,9 +487,9 @@ String *Item_func_centroid::val_str(String *str) return 0; str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) return 0; - str->length(0); srid= uint4korr(swkb->ptr()); str->q_append(srid); @@ -539,7 +536,7 @@ String *Item_func_convexhull::val_str(String *str_value) Gcalc_heap::Info *cur_pi; DBUG_ENTER("Item_func_convexhull::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *swkb= args[0]->val_str(&tmp_value); if ((null_value= @@ -619,9 +616,9 @@ String *Item_func_convexhull::val_str(String *str_value) build_result: str_value->set_charset(&my_charset_bin); + str_value->length(0); if (str_value->reserve(SRID_SIZE, 512)) goto mem_error; - str_value->length(0); str_value->q_append(srid); if (!Geometry::create_from_opresult(&buffer, str_value, res_receiver)) @@ -648,7 +645,7 @@ String *Item_func_convexhull::val_str(String *str_value) ch_node *left_first, *left_cur, *right_first, *right_cur; DBUG_ENTER("Item_func_convexhull::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *swkb= args[0]->val_str(&tmp_value); if ((null_value= @@ -765,9 +762,9 @@ skip_point:; build_result: str_value->set_charset(&my_charset_bin); + str_value->length(0); if (str_value->reserve(SRID_SIZE, 512)) goto mem_error; - str_value->length(0); str_value->q_append(srid); if (!Geometry::create_from_opresult(&buffer, str_value, res_receiver)) @@ -789,7 +786,7 @@ mem_error: String *Item_func_spatial_decomp::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; @@ -803,9 +800,9 @@ String *Item_func_spatial_decomp::val_str(String *str) srid= uint4korr(swkb->ptr()); str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) goto err; - str->length(0); str->q_append(srid); switch (decomp_func) { case SP_STARTPOINT: @@ -836,7 +833,7 @@ err: String *Item_func_spatial_decomp_n::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_val; String *swkb= args[0]->val_str(&arg_val); long n= (long) args[1]->val_int(); @@ -850,10 +847,10 @@ String *Item_func_spatial_decomp_n::val_str(String *str) return 0; str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) goto err; srid= uint4korr(swkb->ptr()); - str->length(0); str->q_append(srid); switch (decomp_func_n) { @@ -895,7 +892,7 @@ err: String *Item_func_point::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double x= args[0]->val_real(); double y= args[1]->val_real(); uint32 srid= 0; @@ -928,7 +925,7 @@ String *Item_func_point::val_str(String *str) String *Item_func_spatial_collection::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String arg_value; uint i; uint32 srid= 0; @@ -1130,35 +1127,35 @@ Item_func_spatial_rel::get_mm_leaf(RANGE_OPT_PARAM *param, } -const char *Item_func_spatial_mbr_rel::func_name() const +LEX_CSTRING Item_func_spatial_mbr_rel::func_name_cstring() const { switch (spatial_rel) { case SP_CONTAINS_FUNC: - return "mbrcontains"; + return { STRING_WITH_LEN("mbrcontains") }; case SP_WITHIN_FUNC: - return "mbrwithin"; + return { STRING_WITH_LEN("mbrwithin") } ; case SP_EQUALS_FUNC: - return "mbrequals"; + return { STRING_WITH_LEN("mbrequals") }; case SP_DISJOINT_FUNC: - return "mbrdisjoint"; + return { STRING_WITH_LEN("mbrdisjoint") }; case SP_INTERSECTS_FUNC: - return "mbrintersects"; + return { STRING_WITH_LEN("mbrintersects") }; case SP_TOUCHES_FUNC: - return "mbrtouches"; + return { STRING_WITH_LEN("mbrtouches") }; case SP_CROSSES_FUNC: - return "mbrcrosses"; + return { STRING_WITH_LEN("mbrcrosses") }; case SP_OVERLAPS_FUNC: - return "mbroverlaps"; + return { STRING_WITH_LEN("mbroverlaps") }; default: DBUG_ASSERT(0); // Should never happened - return "mbrsp_unknown"; + return { STRING_WITH_LEN("mbrsp_unknown") }; } } longlong Item_func_spatial_mbr_rel::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res1= args[0]->val_str(&tmp_value1); String *res2= args[1]->val_str(&tmp_value2); Geometry_buffer buffer1, buffer2; @@ -1201,28 +1198,28 @@ longlong Item_func_spatial_mbr_rel::val_int() } -const char *Item_func_spatial_precise_rel::func_name() const +LEX_CSTRING Item_func_spatial_precise_rel::func_name_cstring() const { switch (spatial_rel) { case SP_CONTAINS_FUNC: - return "st_contains"; + return { STRING_WITH_LEN("st_contains") }; case SP_WITHIN_FUNC: - return "st_within"; + return { STRING_WITH_LEN("st_within") }; case SP_EQUALS_FUNC: - return "st_equals"; + return { STRING_WITH_LEN("st_equals") }; case SP_DISJOINT_FUNC: - return "st_disjoint"; + return { STRING_WITH_LEN("st_disjoint") }; case SP_INTERSECTS_FUNC: - return "st_intersects"; + return { STRING_WITH_LEN("st_intersects") }; case SP_TOUCHES_FUNC: - return "st_touches"; + return { STRING_WITH_LEN("st_touches") }; case SP_CROSSES_FUNC: - return "st_crosses"; + return { STRING_WITH_LEN("st_crosses") }; case SP_OVERLAPS_FUNC: - return "st_overlaps"; + return { STRING_WITH_LEN("st_overlaps") } ; default: DBUG_ASSERT(0); // Should never happened - return "sp_unknown"; + return { STRING_WITH_LEN("sp_unknown") }; } } @@ -1367,7 +1364,7 @@ public: longlong Item_func_spatial_relate::val_int() { DBUG_ENTER("Item_func_spatial_relate::val_int"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Geometry_ptr_with_buffer_and_mbr g1, g2; int result= 0; @@ -1404,7 +1401,7 @@ exit: longlong Item_func_spatial_precise_rel::val_int() { DBUG_ENTER("Item_func_spatial_precise_rel::val_int"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Geometry_ptr_with_buffer_and_mbr g1, g2; int result= 0; uint shape_a, shape_b; @@ -1541,7 +1538,7 @@ Item_func_spatial_operation::~Item_func_spatial_operation() String *Item_func_spatial_operation::val_str(String *str_value) { DBUG_ENTER("Item_func_spatial_operation::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Geometry_ptr_with_buffer_and_mbr g1, g2; uint32 srid= 0; Gcalc_operation_transporter trn(&func, &collector); @@ -1578,9 +1575,9 @@ String *Item_func_spatial_operation::val_str(String *str_value) str_value->set_charset(&my_charset_bin); + str_value->length(0); if (str_value->reserve(SRID_SIZE, 512)) goto exit; - str_value->length(0); str_value->q_append(srid); if (!Geometry::create_from_opresult(&g1.buffer, str_value, res_receiver)) @@ -1594,20 +1591,20 @@ exit: } -const char *Item_func_spatial_operation::func_name() const +LEX_CSTRING Item_func_spatial_operation::func_name_cstring() const { switch (spatial_op) { case Gcalc_function::op_intersection: - return "st_intersection"; + return { STRING_WITH_LEN("st_intersection") }; case Gcalc_function::op_difference: - return "st_difference"; + return { STRING_WITH_LEN("st_difference") }; case Gcalc_function::op_union: - return "st_union"; + return { STRING_WITH_LEN("st_union") }; case Gcalc_function::op_symdifference: - return "st_symdifference"; + return { STRING_WITH_LEN("st_symdifference") }; default: DBUG_ASSERT(0); // Should never happen - return "sp_unknown"; + return { STRING_WITH_LEN("sp_unknown") }; } } @@ -1993,7 +1990,7 @@ int Item_func_buffer::Transporter::complete_ring() String *Item_func_buffer::val_str(String *str_value) { DBUG_ENTER("Item_func_buffer::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *obj= args[0]->val_str(str_value); double dist= args[1]->val_real(); Geometry_buffer buffer; @@ -2048,9 +2045,9 @@ String *Item_func_buffer::val_str(String *str_value) return_empty_result: str_value->set_charset(&my_charset_bin); + str_value->length(0); if (str_value->reserve(SRID_SIZE, 512)) goto mem_error; - str_value->length(0); str_value->q_append(srid); if (!Geometry::create_from_opresult(&buffer, str_value, res_receiver)) @@ -2068,7 +2065,7 @@ mem_error: longlong Item_func_isempty::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String tmp; String *swkb= args[0]->val_str(&tmp); Geometry_buffer buffer; @@ -2090,7 +2087,7 @@ longlong Item_func_issimple::val_int() const char *c_end; DBUG_ENTER("Item_func_issimple::val_int"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); null_value= 0; if ((args[0]->null_value || @@ -2153,7 +2150,7 @@ mem_error: longlong Item_func_isclosed::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String tmp; String *swkb= args[0]->val_str(&tmp); Geometry_buffer buffer; @@ -2177,7 +2174,7 @@ longlong Item_func_isclosed::val_int() longlong Item_func_isring::val_int() { /* It's actually a combination of two functions - IsClosed and IsSimple */ - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String tmp; String *swkb= args[0]->val_str(&tmp); Geometry_buffer buffer; @@ -2208,7 +2205,7 @@ longlong Item_func_isring::val_int() longlong Item_func_dimension::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 dim= 0; // In case of error String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2225,7 +2222,7 @@ longlong Item_func_dimension::val_int() longlong Item_func_numinteriorring::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 num= 0; // In case of error String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2241,7 +2238,7 @@ longlong Item_func_numinteriorring::val_int() longlong Item_func_numgeometries::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 num= 0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2257,7 +2254,7 @@ longlong Item_func_numgeometries::val_int() longlong Item_func_numpoints::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 num= 0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2274,7 +2271,7 @@ longlong Item_func_numpoints::val_int() double Item_func_x::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double res= 0.0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2290,7 +2287,7 @@ double Item_func_x::val_real() double Item_func_y::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double res= 0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2306,7 +2303,7 @@ double Item_func_y::val_real() double Item_func_area::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double res= 0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2322,7 +2319,7 @@ double Item_func_area::val_real() double Item_func_glength::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double res= 0; // In case of errors String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2339,7 +2336,7 @@ double Item_func_glength::val_real() longlong Item_func_srid::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *swkb= args[0]->val_str(&value); Geometry_buffer buffer; @@ -2366,7 +2363,7 @@ double Item_func_distance::val_real() Gcalc_operation_transporter trn(&func, &collector); DBUG_ENTER("Item_func_distance::val_real"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res1= args[0]->val_str(&tmp_value1); String *res2= args[1]->val_str(&tmp_value2); Geometry_buffer buffer1, buffer2; @@ -2663,9 +2660,6 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, String *Item_func_pointonsurface::val_str(String *str) { Gcalc_operation_transporter trn(&func, &collector); - - DBUG_ENTER("Item_func_pointonsurface::val_str"); - DBUG_ASSERT(fixed == 1); String *res= args[0]->val_str(&tmp_value); Geometry_buffer buffer; Geometry *g; @@ -2675,6 +2669,8 @@ String *Item_func_pointonsurface::val_str(String *str) String *result= 0; const Gcalc_scan_iterator::point *pprev= NULL; uint32 srid; + DBUG_ENTER("Item_func_pointonsurface::val_str"); + DBUG_ASSERT(fixed()); null_value= 1; if ((args[0]->null_value || @@ -2741,10 +2737,10 @@ String *Item_func_pointonsurface::val_str(String *str) goto exit; str->set_charset(&my_charset_bin); + str->length(0); if (str->reserve(SRID_SIZE, 512)) goto mem_error; - str->length(0); srid= uint4korr(res->ptr()); str->q_append(srid); diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 0ccb5edc9bb..e7f465170ab 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -42,8 +42,9 @@ public: Item_geometry_func(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {} Item_geometry_func(THD *thd, List<Item> &list): Item_str_func(thd, list) {} - bool fix_length_and_dec(); - const Type_handler *type_handler() const { return &type_handler_geometry; } + bool fix_length_and_dec() override; + const Type_handler *type_handler() const override + { return &type_handler_geometry; } }; @@ -54,10 +55,10 @@ class Item_real_func_args_geometry: public Item_real_func { protected: String value; - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count == 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } public: @@ -71,10 +72,10 @@ public: */ class Item_long_func_args_geometry: public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count == 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } protected: @@ -92,10 +93,10 @@ class Item_bool_func_args_geometry: public Item_bool_func { protected: String value; - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count == 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } public: @@ -110,10 +111,10 @@ public: class Item_str_ascii_func_args_geometry: public Item_str_ascii_func { protected: - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } public: @@ -132,10 +133,10 @@ public: class Item_binary_func_args_geometry: public Item_str_func { protected: - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } public: @@ -150,10 +151,10 @@ public: class Item_geometry_func_args_geometry: public Item_geometry_func { protected: - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 1); - return Type_handler_geometry::check_type_geom_or_binary(func_name(), + return Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]); } public: @@ -170,10 +171,10 @@ public: class Item_real_func_args_geometry_geometry: public Item_real_func { protected: - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 2); - return Type_handler_geometry::check_types_geom_or_binary(func_name(), + return Type_handler_geometry::check_types_geom_or_binary(func_name_cstring(), args, 0, 2); } public: @@ -189,10 +190,10 @@ class Item_bool_func_args_geometry_geometry: public Item_bool_func { protected: String value; - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 2); - return Type_handler_geometry::check_types_geom_or_binary(func_name(), + return Type_handler_geometry::check_types_geom_or_binary(func_name_cstring(), args, 0, 2); } public: @@ -203,36 +204,44 @@ public: class Item_func_geometry_from_text: public Item_geometry_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_general_purpose_string(func_name()) || + return args[0]->check_type_general_purpose_string(func_name_cstring()) || check_argument_types_can_return_int(1, MY_MIN(2, arg_count)); } public: Item_func_geometry_from_text(THD *thd, Item *a): Item_geometry_func(thd, a) {} Item_func_geometry_from_text(THD *thd, Item *a, Item *srid): Item_geometry_func(thd, a, srid) {} - const char *func_name() const { return "st_geometryfromtext"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_geometryfromtext") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_geometry_from_text>(thd, this); } }; class Item_func_geometry_from_wkb: public Item_geometry_func { - bool check_arguments() const + bool check_arguments() const override { return - Type_handler_geometry::check_type_geom_or_binary(func_name(), args[0]) || + Type_handler_geometry::check_type_geom_or_binary(func_name_cstring(), args[0]) || check_argument_types_can_return_int(1, MY_MIN(2, arg_count)); } public: Item_func_geometry_from_wkb(THD *thd, Item *a): Item_geometry_func(thd, a) {} Item_func_geometry_from_wkb(THD *thd, Item *a, Item *srid): Item_geometry_func(thd, a, srid) {} - const char *func_name() const { return "st_geometryfromwkb"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_geometryfromwkb") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_geometry_from_wkb>(thd, this); } }; @@ -240,10 +249,10 @@ public: class Item_func_geometry_from_json: public Item_geometry_func { String tmp_js; - bool check_arguments() const + bool check_arguments() const override { // TODO: check with Alexey, for better args[1] and args[2] type control - return args[0]->check_type_general_purpose_string(func_name()) || + return args[0]->check_type_general_purpose_string(func_name_cstring()) || check_argument_types_traditional_scalar(1, MY_MIN(3, arg_count)); } public: @@ -252,9 +261,13 @@ public: Item_geometry_func(thd, js, opt) {} Item_func_geometry_from_json(THD *thd, Item *js, Item *opt, Item *srid): Item_geometry_func(thd, js, opt, srid) {} - const char *func_name() const { return "st_geomfromgeojson"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_geomfromgeojson") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_geometry_from_json>(thd, this); } }; @@ -264,10 +277,14 @@ class Item_func_as_wkt: public Item_str_ascii_func_args_geometry public: Item_func_as_wkt(THD *thd, Item *a) :Item_str_ascii_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_astext"; } - String *val_str_ascii(String *); - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_astext") }; + return name; + } + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_as_wkt>(thd, this); } }; @@ -276,25 +293,30 @@ class Item_func_as_wkb: public Item_binary_func_args_geometry public: Item_func_as_wkb(THD *thd, Item *a) :Item_binary_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_aswkb"; } - String *val_str(String *); - const Type_handler *type_handler() const { return &type_handler_long_blob; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_aswkb") }; + return name; + } + String *val_str(String *) override; + const Type_handler *type_handler() const override + { return &type_handler_long_blob; } + bool fix_length_and_dec() override { collation.set(&my_charset_bin); decimals=0; max_length= (uint32) UINT_MAX32; - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_as_wkb>(thd, this); } }; class Item_func_as_geojson: public Item_str_ascii_func_args_geometry { - bool check_arguments() const + bool check_arguments() const override { // TODO: check with Alexey, for better args[1] and args[2] type control return Item_str_ascii_func_args_geometry::check_arguments() || @@ -307,10 +329,14 @@ public: :Item_str_ascii_func_args_geometry(thd, js, max_dec_digits) {} Item_func_as_geojson(THD *thd, Item *js, Item *max_dec_digits, Item *opt) :Item_str_ascii_func_args_geometry(thd, js, max_dec_digits, opt) {} - const char *func_name() const { return "st_asgeojson"; } - bool fix_length_and_dec(); - String *val_str_ascii(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_asgeojson") }; + return name; + } + bool fix_length_and_dec() override; + String *val_str_ascii(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_as_geojson>(thd, this); } }; @@ -320,16 +346,20 @@ class Item_func_geometry_type: public Item_str_ascii_func_args_geometry public: Item_func_geometry_type(THD *thd, Item *a) :Item_str_ascii_func_args_geometry(thd, a) {} - String *val_str_ascii(String *); - const char *func_name() const { return "st_geometrytype"; } - bool fix_length_and_dec() + String *val_str_ascii(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_geometrytype") }; + return name; + } + bool fix_length_and_dec() override { // "GeometryCollection" is the longest fix_length_and_charset(20, default_charset()); - maybe_null= 1; + set_maybe_null(); return FALSE; }; - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_geometry_type>(thd, this); } }; @@ -362,9 +392,13 @@ public: :Item_geometry_func_args_geometry(thd, a), res_heap(8192, sizeof(ch_node)) {} - const char *func_name() const { return "st_convexhull"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_convexhull") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_convexhull>(thd, this); } }; @@ -374,13 +408,17 @@ class Item_func_centroid: public Item_geometry_func_args_geometry public: Item_func_centroid(THD *thd, Item *a) :Item_geometry_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_centroid"; } - String *val_str(String *); - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_centroid") }; + return name; + } + String *val_str(String *) override; + const Type_handler *type_handler() const override { return &type_handler_point; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_centroid>(thd, this); } }; @@ -389,13 +427,17 @@ class Item_func_envelope: public Item_geometry_func_args_geometry public: Item_func_envelope(THD *thd, Item *a) :Item_geometry_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_envelope"; } - String *val_str(String *); - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_envelope") }; + return name; + } + String *val_str(String *) override; + const Type_handler *type_handler() const override { return &type_handler_polygon; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_envelope>(thd, this); } }; @@ -427,28 +469,36 @@ class Item_func_boundary: public Item_geometry_func_args_geometry public: Item_func_boundary(THD *thd, Item *a) :Item_geometry_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_boundary"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_boundary") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_boundary>(thd, this); } }; class Item_func_point: public Item_geometry_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_real(0, 2); } public: Item_func_point(THD *thd, Item *a, Item *b): Item_geometry_func(thd, a, b) {} Item_func_point(THD *thd, Item *a, Item *b, Item *srid): Item_geometry_func(thd, a, b, srid) {} - const char *func_name() const { return "point"; } - String *val_str(String *); - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("point") }; + return name; + } + String *val_str(String *) override; + const Type_handler *type_handler() const override { return &type_handler_point; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_point>(thd, this); } }; @@ -458,64 +508,71 @@ class Item_func_spatial_decomp: public Item_geometry_func_args_geometry public: Item_func_spatial_decomp(THD *thd, Item *a, Item_func::Functype ft): Item_geometry_func_args_geometry(thd, a) { decomp_func = ft; } - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - switch (decomp_func) - { + static LEX_CSTRING startpoint= {STRING_WITH_LEN("st_startpoint") }; + static LEX_CSTRING endpoint= {STRING_WITH_LEN("st_endpoint") }; + static LEX_CSTRING exteriorring= {STRING_WITH_LEN("st_exteriorring") }; + static LEX_CSTRING unknown= {STRING_WITH_LEN("spatial_decomp_unknown") }; + switch (decomp_func) { case SP_STARTPOINT: - return "st_startpoint"; + return startpoint; case SP_ENDPOINT: - return "st_endpoint"; + return endpoint; case SP_EXTERIORRING: - return "st_exteriorring"; + return exteriorring; default: DBUG_ASSERT(0); // Should never happened - return "spatial_decomp_unknown"; + return unknown; } } - String *val_str(String *); - Item *get_copy(THD *thd) + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_decomp>(thd, this); } }; class Item_func_spatial_decomp_n: public Item_geometry_func_args_geometry { enum Functype decomp_func_n; - bool check_arguments() const + bool check_arguments() const override { return Item_geometry_func_args_geometry::check_arguments() || - args[1]->check_type_can_return_int(func_name()); + args[1]->check_type_can_return_int(func_name_cstring()); } public: Item_func_spatial_decomp_n(THD *thd, Item *a, Item *b, Item_func::Functype ft) :Item_geometry_func_args_geometry(thd, a, b), decomp_func_n(ft) { } - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - switch (decomp_func_n) - { + static LEX_CSTRING pointn= {STRING_WITH_LEN("st_pointn") }; + static LEX_CSTRING geometryn= {STRING_WITH_LEN("st_geometryn") }; + static LEX_CSTRING interiorringn= {STRING_WITH_LEN("st_interiorringn") }; + static LEX_CSTRING unknown= {STRING_WITH_LEN("spatial_decomp_unknown") }; + + switch (decomp_func_n) { case SP_POINTN: - return "st_pointn"; + return pointn; case SP_GEOMETRYN: - return "st_geometryn"; + return geometryn; case SP_INTERIORRINGN: - return "st_interiorringn"; + return interiorringn; default: DBUG_ASSERT(0); // Should never happened - return "spatial_decomp_n_unknown"; + return unknown; } } - String *val_str(String *); - Item *get_copy(THD *thd) + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_decomp_n>(thd, this); } }; class Item_func_spatial_collection: public Item_geometry_func { - bool check_arguments() const + bool check_arguments() const override { - return Type_handler_geometry::check_types_geom_or_binary(func_name(), args, + return Type_handler_geometry::check_types_geom_or_binary(func_name_cstring(), args, 0, arg_count); } enum Geometry::wkbType coll_type; @@ -528,14 +585,14 @@ public: coll_type=ct; item_type=it; } - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { if (Item_geometry_func::fix_length_and_dec()) return TRUE; for (unsigned int i= 0; i < arg_count; ++i) { - if (args[i]->is_fixed() && args[i]->field_type() != MYSQL_TYPE_GEOMETRY) + if (args[i]->fixed() && args[i]->field_type() != MYSQL_TYPE_GEOMETRY) { String str; args[i]->print(&str, QT_NO_DATA_EXPANSION); @@ -558,12 +615,16 @@ public: Geometry::wkb_geometrycollection, Geometry::wkb_point) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return &type_handler_geometrycollection; } - const char *func_name() const { return "geometrycollection"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("geometrycollection") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_geometrycollection>(thd, this); } }; @@ -576,9 +637,14 @@ public: Geometry::wkb_linestring, Geometry::wkb_point) { } - const Type_handler *type_handler() const { return &type_handler_linestring; } - const char *func_name() const { return "linestring"; } - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_linestring; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("linestring") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_linestring>(thd, this); } }; @@ -591,9 +657,14 @@ public: Geometry::wkb_polygon, Geometry::wkb_linestring) { } - const Type_handler *type_handler() const { return &type_handler_polygon; } - const char *func_name() const { return "polygon"; } - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_polygon; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("polygon") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_polygon>(thd, this); } }; @@ -606,12 +677,16 @@ public: Geometry::wkb_multilinestring, Geometry::wkb_linestring) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return &type_handler_multilinestring; } - const char *func_name() const { return "multilinestring"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("multilinestring") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_multilinestring>(thd, this); } }; @@ -624,12 +699,16 @@ public: Geometry::wkb_multipoint, Geometry::wkb_point) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return &type_handler_multipoint; } - const char *func_name() const { return "multipoint"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("multipoint") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_multipoint>(thd, this); } }; @@ -642,12 +721,16 @@ public: Geometry::wkb_multipolygon, Geometry::wkb_polygon) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return &type_handler_multipolygon; } - const char *func_name() const { return "multipolygon"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("multipolygon") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_multipolygon>(thd, this); } }; @@ -664,21 +747,21 @@ protected: String tmp_value1, tmp_value2; SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, Field *field, KEY_PART *key_part, - Item_func::Functype type, Item *value); - bool check_arguments() const + Item_func::Functype type, Item *value) override; + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 2); - return Type_handler_geometry::check_types_geom_or_binary(func_name(), + return Type_handler_geometry::check_types_geom_or_binary(func_name_cstring(), args, 0, 2); } public: Item_func_spatial_rel(THD *thd, Item *a, Item *b, enum Functype sp_rel): Item_bool_func2_with_rev(thd, a, b), spatial_rel(sp_rel) { - maybe_null= true; + set_maybe_null(); } - enum Functype functype() const { return spatial_rel; } - enum Functype rev_functype() const + enum Functype functype() const override { return spatial_rel; } + enum Functype rev_functype() const override { switch (spatial_rel) { @@ -690,16 +773,16 @@ public: return spatial_rel; } } - bool is_null() { (void) val_int(); return null_value; } + bool is_null() override { (void) val_int(); return null_value; } void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables) override { return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, false); } - bool need_parentheses_in_default() { return false; } - Item *build_clone(THD *thd) { return 0; } + bool need_parentheses_in_default() override { return false; } + Item *build_clone(THD *thd) override { return 0; } }; @@ -709,9 +792,9 @@ public: Item_func_spatial_mbr_rel(THD *thd, Item *a, Item *b, enum Functype sp_rel): Item_func_spatial_rel(thd, a, b, sp_rel) { } - longlong val_int(); - const char *func_name() const; - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_mbr_rel>(thd, this); } }; @@ -725,9 +808,9 @@ public: Item_func_spatial_precise_rel(THD *thd, Item *a, Item *b, enum Functype sp_rel): Item_func_spatial_rel(thd, a, b, sp_rel), collector() { } - longlong val_int(); - const char *func_name() const; - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_precise_rel>(thd, this); } }; @@ -738,19 +821,23 @@ class Item_func_spatial_relate: public Item_bool_func_args_geometry_geometry Gcalc_scan_iterator scan_it; Gcalc_function func; String tmp_value1, tmp_value2, tmp_matrix; - bool check_arguments() const + bool check_arguments() const override { return Item_bool_func_args_geometry_geometry::check_arguments() || - args[2]->check_type_general_purpose_string(func_name()); + args[2]->check_type_general_purpose_string(func_name_cstring()); } public: Item_func_spatial_relate(THD *thd, Item *a, Item *b, Item *matrix): Item_bool_func_args_geometry_geometry(thd, a, b, matrix) { } - longlong val_int(); - const char *func_name() const { return "st_relate"; } - bool need_parentheses_in_default() { return false; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_relate") }; + return name; + } + bool need_parentheses_in_default() override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_relate>(thd, this); } }; @@ -759,12 +846,12 @@ public: Spatial operations */ -class Item_func_spatial_operation: public Item_geometry_func +class Item_func_spatial_operation final: public Item_geometry_func { - bool check_arguments() const + bool check_arguments() const override { DBUG_ASSERT(arg_count >= 2); - return Type_handler_geometry::check_types_geom_or_binary(func_name(), + return Type_handler_geometry::check_types_geom_or_binary(func_name_cstring(), args, 0, 2); } public: @@ -781,23 +868,23 @@ public: Item_geometry_func(thd, a, b), spatial_op(sp_op) {} virtual ~Item_func_spatial_operation(); - String *val_str(String *); - const char *func_name() const; - virtual inline void print(String *str, enum_query_type query_type) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override; + void print(String *str, enum_query_type query_type) override { Item_func::print(str, query_type); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_spatial_operation>(thd, this); } }; -class Item_func_buffer: public Item_geometry_func_args_geometry +class Item_func_buffer final : public Item_geometry_func_args_geometry { - bool check_arguments() const + bool check_arguments() const override { return Item_geometry_func_args_geometry::check_arguments() || - args[1]->check_type_can_return_real(func_name()); + args[1]->check_type_can_return_real(func_name_cstring()); } protected: class Transporter : public Gcalc_operation_transporter @@ -842,9 +929,13 @@ protected: public: Item_func_buffer(THD *thd, Item *obj, Item *distance) :Item_geometry_func_args_geometry(thd, obj, distance) {} - const char *func_name() const { return "st_buffer"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_buffer") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_buffer>(thd, this); } }; @@ -854,11 +945,16 @@ class Item_func_isempty: public Item_bool_func_args_geometry public: Item_func_isempty(THD *thd, Item *a) :Item_bool_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_isempty"; } - bool fix_length_and_dec() { maybe_null= 1; return FALSE; } - bool need_parentheses_in_default() { return false; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_isempty") }; + return name; + } + bool fix_length_and_dec() override + { set_maybe_null(); return FALSE; } + bool need_parentheses_in_default() override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isempty>(thd, this); } }; @@ -871,11 +967,15 @@ class Item_func_issimple: public Item_long_func_args_geometry public: Item_func_issimple(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_issimple"; } - bool fix_length_and_dec() { decimals=0; max_length=2; return FALSE; } - uint decimal_precision() const { return 1; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_issimple") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2; return FALSE; } + decimal_digits_t decimal_precision() const override { return 1; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_issimple>(thd, this); } }; @@ -884,11 +984,15 @@ class Item_func_isclosed: public Item_long_func_args_geometry public: Item_func_isclosed(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_isclosed"; } - bool fix_length_and_dec() { decimals=0; max_length=2; return FALSE; } - uint decimal_precision() const { return 1; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_isclosed") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2; return FALSE; } + decimal_digits_t decimal_precision() const override { return 1; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isclosed>(thd, this); } }; @@ -896,9 +1000,13 @@ class Item_func_isring: public Item_func_issimple { public: Item_func_isring(THD *thd, Item *a): Item_func_issimple(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_isring"; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_isring") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_isring>(thd, this); } }; @@ -907,10 +1015,15 @@ class Item_func_dimension: public Item_long_func_args_geometry public: Item_func_dimension(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_dimension"; } - bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_dimension") }; + return name; + } + bool fix_length_and_dec() override + { max_length= 10; set_maybe_null(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dimension>(thd, this); } }; @@ -919,16 +1032,20 @@ class Item_func_x: public Item_real_func_args_geometry { public: Item_func_x(THD *thd, Item *a): Item_real_func_args_geometry(thd, a) {} - double val_real(); - const char *func_name() const { return "st_x"; } - bool fix_length_and_dec() + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_x") }; + return name; + } + bool fix_length_and_dec() override { if (Item_real_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_x>(thd, this); } }; @@ -937,16 +1054,20 @@ class Item_func_y: public Item_real_func_args_geometry { public: Item_func_y(THD *thd, Item *a): Item_real_func_args_geometry(thd, a) {} - double val_real(); - const char *func_name() const { return "st_y"; } - bool fix_length_and_dec() + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_y") }; + return name; + } + bool fix_length_and_dec() override { if (Item_real_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_y>(thd, this); } }; @@ -956,10 +1077,15 @@ class Item_func_numgeometries: public Item_long_func_args_geometry public: Item_func_numgeometries(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_numgeometries"; } - bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_numgeometries") }; + return name; + } + bool fix_length_and_dec() override + { max_length= 10; set_maybe_null(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_numgeometries>(thd, this); } }; @@ -969,10 +1095,15 @@ class Item_func_numinteriorring: public Item_long_func_args_geometry public: Item_func_numinteriorring(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_numinteriorrings"; } - bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_numinteriorrings") }; + return name; + } + bool fix_length_and_dec() override + { max_length= 10; set_maybe_null(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_numinteriorring>(thd, this); } }; @@ -982,10 +1113,15 @@ class Item_func_numpoints: public Item_long_func_args_geometry public: Item_func_numpoints(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "st_numpoints"; } - bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_numpoints") }; + return name; + } + bool fix_length_and_dec() override + { max_length= 10; set_maybe_null(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_numpoints>(thd, this); } }; @@ -994,16 +1130,20 @@ class Item_func_area: public Item_real_func_args_geometry { public: Item_func_area(THD *thd, Item *a): Item_real_func_args_geometry(thd, a) {} - double val_real(); - const char *func_name() const { return "st_area"; } - bool fix_length_and_dec() + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_area") }; + return name; + } + bool fix_length_and_dec() override { if (Item_real_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_area>(thd, this); } }; @@ -1014,16 +1154,20 @@ class Item_func_glength: public Item_real_func_args_geometry public: Item_func_glength(THD *thd, Item *a) :Item_real_func_args_geometry(thd, a) {} - double val_real(); - const char *func_name() const { return "st_length"; } - bool fix_length_and_dec() + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_length") }; + return name; + } + bool fix_length_and_dec() override { if (Item_real_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_glength>(thd, this); } }; @@ -1033,10 +1177,15 @@ class Item_func_srid: public Item_long_func_args_geometry public: Item_func_srid(THD *thd, Item *a) :Item_long_func_args_geometry(thd, a) {} - longlong val_int(); - const char *func_name() const { return "srid"; } - bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; } - Item *get_copy(THD *thd) + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("srid") }; + return name; + } + bool fix_length_and_dec() override + { max_length= 10; set_maybe_null(); return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_srid>(thd, this); } }; @@ -1051,9 +1200,13 @@ class Item_func_distance: public Item_real_func_args_geometry_geometry public: Item_func_distance(THD *thd, Item *a, Item *b) :Item_real_func_args_geometry_geometry(thd, a, b) {} - double val_real(); - const char *func_name() const { return "st_distance"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_distance") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_distance>(thd, this); } }; @@ -1065,9 +1218,13 @@ class Item_func_sphere_distance: public Item_real_func public: Item_func_sphere_distance(THD *thd, List<Item> &list): Item_real_func(thd, list) {} - double val_real(); - const char *func_name() const { return "st_distance_sphere"; } - Item *get_copy(THD *thd) + double val_real() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_distance_sphere") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sphere_distance>(thd, this); } }; @@ -1081,13 +1238,17 @@ class Item_func_pointonsurface: public Item_geometry_func_args_geometry public: Item_func_pointonsurface(THD *thd, Item *a) :Item_geometry_func_args_geometry(thd, a) {} - const char *func_name() const { return "st_pointonsurface"; } - String *val_str(String *); - const Type_handler *type_handler() const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_pointonsurface") }; + return name; + } + String *val_str(String *) override; + const Type_handler *type_handler() const override { return &type_handler_point; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_pointonsurface>(thd, this); } }; @@ -1098,14 +1259,18 @@ class Item_func_gis_debug: public Item_long_func public: Item_func_gis_debug(THD *thd, Item *a): Item_long_func(thd, a) { null_value= false; } - bool fix_length_and_dec() { fix_char_length(10); return FALSE; } - const char *func_name() const { return "st_gis_debug"; } - longlong val_int(); - bool check_vcol_func_processor(void *arg) + bool fix_length_and_dec() override { fix_char_length(10); return FALSE; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("st_gis_debug") }; + return name; + } + longlong val_int() override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_gis_debug>(thd, this); } }; #endif diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 0f1fefac7b2..bbfd5f012eb 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -38,7 +38,7 @@ /* Compare ASCII string against the string with the specified character set. - Only compares the equality, case insencitive. + Only compares the equality, case insensitive. */ static bool eq_ascii_string(const CHARSET_INFO *cs, const char *ascii, @@ -133,7 +133,7 @@ static const char tab_arr[TAB_SIZE_LIMIT+1]= " "; static int append_tab(String *js, int depth, int tab_size) { - if (js->append("\n", 1)) + if (js->append('\n')) return 1; for (int i=0; i<depth; i++) { @@ -264,12 +264,11 @@ static int json_nice(json_engine_t *je, String *nice_js, int curr_state= -1; int64_t value_len= 0; String curr_str{}; - + nice_js->length(0); nice_js->set_charset(je->s.cs); nice_js->alloc(je->s.str_end - je->s.c_str + 32); - - + DBUG_ASSERT(mode != Item_func_json_format::DETAILED || (tab_size >= 0 && tab_size <= TAB_SIZE_LIMIT)); @@ -392,7 +391,7 @@ handle_value: }; } while (json_scan_next(je) == 0); - return je->s.error; + return je->s.error || *je->killed_ptr; error: return 1; @@ -400,15 +399,15 @@ error: #define report_json_error(js, je, n_param) \ - report_json_error_ex(js, je, func_name(), n_param, \ + report_json_error_ex(js->ptr(), je, func_name(), n_param, \ Sql_condition::WARN_LEVEL_WARN) -void report_json_error_ex(String *js, json_engine_t *je, +void report_json_error_ex(const char *js, json_engine_t *je, const char *fname, int n_param, Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (int)((const char *) je->s.c_str - js->ptr()); + int position= (int)((const char *) je->s.c_str - js); uint code; n_param++; @@ -438,16 +437,22 @@ void report_json_error_ex(String *js, json_engine_t *je, case JE_DEPTH: code= ER_JSON_DEPTH; - push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT, - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT, + n_param, fname, position); return; default: return; } - push_warning_printf(thd, lv, code, ER_THD(thd, code), - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + n_param, fname, position); } @@ -457,15 +462,15 @@ void report_json_error_ex(String *js, json_engine_t *je, #define TRIVIAL_PATH_NOT_ALLOWED 3 #define report_path_error(js, je, n_param) \ - report_path_error_ex(js, je, func_name(), n_param,\ + report_path_error_ex(js->ptr(), je, func_name(), n_param,\ Sql_condition::WARN_LEVEL_WARN) -static void report_path_error_ex(String *ps, json_path_t *p, - const char *fname, int n_param, - Sql_condition::enum_warning_level lv) +void report_path_error_ex(const char *ps, json_path_t *p, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (int)((const char *) p->s.c_str - ps->ptr() + 1); + int position= (int)((const char *) p->s.c_str - ps + 1); uint code; n_param++; @@ -484,8 +489,11 @@ static void report_path_error_ex(String *ps, json_path_t *p, case JE_DEPTH: code= ER_JSON_PATH_DEPTH; - push_warning_printf(thd, lv, code, ER_THD(thd, code), - JSON_DEPTH_LIMIT, n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + JSON_DEPTH_LIMIT, n_param, fname, position); return; case NO_WILDCARD_ALLOWED: @@ -500,12 +508,14 @@ static void report_path_error_ex(String *ps, json_path_t *p, default: return; } - push_warning_printf(thd, lv, code, ER_THD(thd, code), - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + n_param, fname, position); } - /* Checks if the path has '.*' '[*]' or '**' constructions and sets the NO_WILDCARD_ALLOWED error if the case. @@ -539,7 +549,7 @@ bool Item_func_json_exists::fix_length_and_dec() { if (Item_bool_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); path.set_constant_flag(args[1]->const_item()); return FALSE; } @@ -593,7 +603,7 @@ bool Item_func_json_value::fix_length_and_dec() collation.set(args[0]->collation); max_length= args[0]->max_length; set_constant_flag(args[1]->const_item()); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -603,7 +613,7 @@ bool Item_func_json_query::fix_length_and_dec() collation.set(args[0]->collation); max_length= args[0]->max_length; set_constant_flag(args[1]->const_item()); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -732,9 +742,9 @@ String *Item_func_json_quote::val_str(String *str) str->length(0); str->set_charset(&my_charset_utf8mb4_bin); - if (str->append("\"", 1) || + if (str->append('"') || st_append_escaped(str, s) || - str->append("\"", 1)) + str->append('"')) { /* Report an error. */ null_value= 1; @@ -750,7 +760,7 @@ bool Item_func_json_unquote::fix_length_and_dec() collation.set(&my_charset_utf8mb3_general_ci, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); max_length= args[0]->max_length; - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -871,7 +881,7 @@ bool Item_func_json_extract::fix_length_and_dec() max_length= args[0]->max_length * (arg_count - 1); mark_constant_paths(paths, args+1, arg_count-1); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -946,7 +956,7 @@ String *Item_func_json_extract::read_json(String *str, str->set_charset(js->charset()); str->length(0); - if (possible_multiple_values && str->append("[", 1)) + if (possible_multiple_values && str->append('[')) goto error; } @@ -1008,14 +1018,12 @@ String *Item_func_json_extract::read_json(String *str, goto return_null; } - if (possible_multiple_values && str->append("]", 1)) + if (possible_multiple_values && str->append(']')) goto error; /* Out of memory. */ js= str; json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); - tmp_js.length(0); - tmp_js.set_charset(js->charset()); if (json_nice(&je, &tmp_js, Item_func_json_format::LOOSE)) goto error; @@ -1142,7 +1150,7 @@ bool Item_func_json_contains::fix_length_and_dec() { a2_constant= args[1]->const_item(); a2_parsed= FALSE; - maybe_null= 1; + set_maybe_null(); if (arg_count > 2) path.set_constant_flag(args[2]->const_item()); return Item_bool_func::fix_length_and_dec(); @@ -1275,7 +1283,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value) if (value->value_type != JSON_VALUE_STRING) return FALSE; /* - TODO: make proper json-json comparison here that takes excapint + TODO: make proper json-json comparison here that takes excipient into account. */ return value->value_len == js->value_len && @@ -1401,7 +1409,7 @@ bool Item_func_json_contains_path::fix_length_and_dec() { ooa_constant= args[1]->const_item(); ooa_parsed= FALSE; - maybe_null= 1; + set_maybe_null(); mark_constant_paths(paths, args+2, arg_count-2); return Item_bool_func::fix_length_and_dec(); } @@ -1563,7 +1571,7 @@ longlong Item_func_json_contains_path::val_int() n_found= arg_count - 2; } else - n_found= 0; /* Jost to prevent 'uninitialized value' warnings */ + n_found= 0; /* Just to prevent 'uninitialized value' warnings */ result= 0; while (json_get_path_next(&je, &p) == 0) @@ -1613,7 +1621,7 @@ null_return: `CONVERT(arg USING charset)` is actually a general purpose string expression, not a JSON expression. */ -static bool is_json_type(const Item *item) +bool is_json_type(const Item *item) { for ( ; ; ) { @@ -1661,15 +1669,15 @@ static int append_json_value(String *str, Item *item, String *tmp_val) if (item->result_type() == STRING_RESULT) { - return str->append("\"", 1) || + return str->append('"') || st_append_escaped(str, sv) || - str->append("\"", 1); + str->append('"'); } return st_append_escaped(str, sv); } append_null: - return str->append("null", 4); + return str->append(STRING_WITH_LEN("null")); } @@ -1707,15 +1715,15 @@ static int append_json_value_from_field(String *str, if (i->result_type() == STRING_RESULT) { - return str->append("\"", 1) || + return str->append('"') || st_append_escaped(str, sv) || - str->append("\"", 1); + str->append('"'); } return st_append_escaped(str, sv); } append_null: - return str->append("null", 4); + return str->append(STRING_WITH_LEN("null")); } @@ -1725,7 +1733,7 @@ static int append_json_keyname(String *str, Item *item, String *tmp_val) if (item->null_value) goto append_null; - return str->append("\"", 1) || + return str->append('"') || st_append_escaped(str, sv) || str->append("\": ", 3); @@ -1755,7 +1763,7 @@ bool Item_func_json_array::fix_length_and_dec() return TRUE; for (n_arg=0 ; n_arg < arg_count ; n_arg++) - char_length+= args[n_arg]->max_char_length() + 4; + char_length+= static_cast<ulonglong>(args[n_arg]->max_char_length()) + 4; fix_char_length_ulonglong(char_length); tmp_val.set_charset(collation.collation); @@ -1765,13 +1773,13 @@ bool Item_func_json_array::fix_length_and_dec() String *Item_func_json_array::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint n_arg; str->length(0); str->set_charset(collation.collation); - if (str->append("[", 1) || + if (str->append('[') || ((arg_count > 0) && append_json_value(str, args[0], &tmp_val))) goto err_return; @@ -1782,7 +1790,7 @@ String *Item_func_json_array::val_str(String *str) goto err_return; } - if (str->append("]", 1)) + if (str->append(']')) goto err_return; if (result_limit == 0) @@ -1814,11 +1822,12 @@ bool Item_func_json_array_append::fix_length_and_dec() for (n_arg= 1; n_arg < arg_count; n_arg+= 2) { paths[n_arg/2].set_constant_flag(args[n_arg]->const_item()); - char_length+= args[n_arg/2+1]->max_char_length() + 4; + char_length+= + static_cast<ulonglong>(args[n_arg+1]->max_char_length()) + 4; } fix_char_length_ulonglong(char_length); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -1830,8 +1839,9 @@ String *Item_func_json_array_append::val_str(String *str) uint n_arg, n_path; size_t str_rest_len; const uchar *ar_end; + THD *thd= current_thd; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= args[0]->null_value)) return 0; @@ -1857,6 +1867,7 @@ String *Item_func_json_array_append::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); + je.killed_ptr= (uchar*)&thd->killed; c_path->cur_step= c_path->p.steps; @@ -1911,11 +1922,11 @@ String *Item_func_json_array_append::val_str(String *str) else c_to= je.value_end; - if (str->append("[", 1) || + if (str->append('[') || str->append((const char *) c_from, c_to - c_from) || str->append(", ", 2) || append_json_value(str, args[n_arg+1], &tmp_val) || - str->append("]", 1) || + str->append(']') || str->append((const char *) je.s.c_str, js->end() - (const char *) je.s.c_str)) goto return_null; /* Out of memory. */ @@ -1937,8 +1948,7 @@ String *Item_func_json_array_append::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); - str->length(0); - str->set_charset(js->charset()); + je.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je, str, Item_func_json_format::LOOSE)) goto js_error; @@ -1948,6 +1958,7 @@ js_error: report_json_error(js, &je, 0); return_null: + thd->check_killed(); // to get the error message right null_value= 1; return 0; } @@ -1958,8 +1969,9 @@ String *Item_func_json_array_insert::val_str(String *str) json_engine_t je; String *js= args[0]->val_json(&tmp_js); uint n_arg, n_path; + THD *thd= current_thd; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= args[0]->null_value)) return 0; @@ -1995,6 +2007,7 @@ String *Item_func_json_array_insert::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); + je.killed_ptr= (uchar*)&thd->killed; c_path->cur_step= c_path->p.steps; @@ -2034,7 +2047,7 @@ String *Item_func_json_array_insert::val_str(String *str) goto js_error; } - if (unlikely(je.s.error)) + if (unlikely(je.s.error || *je.killed_ptr)) goto js_error; str->length(0); @@ -2078,8 +2091,7 @@ String *Item_func_json_array_insert::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); - str->length(0); - str->set_charset(js->charset()); + je.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je, str, Item_func_json_format::LOOSE)) goto js_error; @@ -2088,6 +2100,7 @@ String *Item_func_json_array_insert::val_str(String *str) js_error: report_json_error(js, &je, 0); return_null: + thd->check_killed(); // to get the error message right null_value= 1; return 0; } @@ -2095,13 +2108,13 @@ return_null: String *Item_func_json_object::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint n_arg; str->length(0); str->set_charset(collation.collation); - if (str->append("{", 1) || + if (str->append('{') || (arg_count > 0 && (append_json_keyname(str, args[0], &tmp_val) || append_json_value(str, args[1], &tmp_val)))) @@ -2115,7 +2128,7 @@ String *Item_func_json_object::val_str(String *str) goto err_return; } - if (str->append("}", 1)) + if (str->append('}')) goto err_return; if (result_limit == 0) @@ -2162,7 +2175,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2) json_string_set_cs(&key_name, je1->s.cs); - if (str->append("{", 1)) + if (str->append('{')) return 3; while (json_scan_next(je1) == 0 && je1->state != JST_OBJ_END) @@ -2188,7 +2201,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2) *je2= sav_je2; } - if (str->append("\"", 1) || + if (str->append('"') || append_simple(str, key_start, key_end - key_start) || str->append("\":", 2)) return 3; @@ -2256,8 +2269,7 @@ merged_j1: return 2; continue; } - if (json_skip_key(je2) || - json_skip_level(je1)) + if (json_skip_key(je2) || json_skip_level(je1)) return 1; goto continue_j2; } @@ -2273,7 +2285,7 @@ merged_j1: if (json_skip_key(je2)) return 1; - if (str->append("\"", 1) || + if (str->append('"') || append_simple(str, key_start, je2->s.c_str - key_start)) return 3; @@ -2281,7 +2293,7 @@ continue_j2: continue; } - if (str->append("}", 1)) + if (str->append('}')) return 3; } else @@ -2301,7 +2313,7 @@ continue_j2: } else { - if (str->append("[", 1)) + if (str->append('[')) return 3; if (je1->value_type == JSON_VALUE_OBJECT) { @@ -2343,7 +2355,7 @@ continue_j2: return 3; if (je2->value_type != JSON_VALUE_ARRAY && - str->append("]", 1)) + str->append(']')) return 3; } @@ -2353,10 +2365,11 @@ continue_j2: String *Item_func_json_merge::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); json_engine_t je1, je2; String *js1= args[0]->val_json(&tmp_js1), *js2=NULL; uint n_arg; + THD *thd= current_thd; LINT_INIT(js2); if (args[0]->null_value) @@ -2373,9 +2386,11 @@ String *Item_func_json_merge::val_str(String *str) json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(), (const uchar *) js1->ptr() + js1->length()); + je1.killed_ptr= (uchar*)&thd->killed; json_scan_start(&je2, js2->charset(),(const uchar *) js2->ptr(), (const uchar *) js2->ptr() + js2->length()); + je2.killed_ptr= (uchar*)&thd->killed; if (do_merge(str, &je1, &je2)) goto error_return; @@ -2397,8 +2412,7 @@ String *Item_func_json_merge::val_str(String *str) json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(), (const uchar *) js1->ptr() + js1->length()); - str->length(0); - str->set_charset(js1->charset()); + je1.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je1, str, Item_func_json_format::LOOSE)) goto error_return; @@ -2410,6 +2424,7 @@ error_return: report_json_error(js1, &je1, 0); if (je2.s.error) report_json_error(js2, &je2, n_arg); + thd->check_killed(); // to get the error message right null_return: null_value= 1; return NULL; @@ -2442,7 +2457,7 @@ static int copy_value_patch(String *str, json_engine_t *je) } /* JSON_VALUE_OBJECT */ - if (str->append("{", 1)) + if (str->append('{')) return 1; while (json_scan_next(je) == 0 && je->state != JST_OBJ_END) { @@ -2465,12 +2480,12 @@ static int copy_value_patch(String *str, json_engine_t *je) else first_key= 0; - if (str->append("\"", 1) || + if (str->append('"') || append_simple(str, key_start, je->value_begin - key_start) || copy_value_patch(str, je)) return 1; } - if (str->append("}", 1)) + if (str->append('}')) return 1; return 0; @@ -2506,7 +2521,7 @@ static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2, *empty_result= FALSE; json_string_set_cs(&key_name, je1->s.cs); - if (str->append("{", 1)) + if (str->append('{')) return 3; while (json_scan_next(je1) == 0 && je1->state != JST_OBJ_END) @@ -2532,7 +2547,7 @@ static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2, *je2= sav_je2; } - if (str->append("\"", 1) || + if (str->append('"') || append_simple(str, key_start, key_end - key_start) || str->append("\":", 2)) return 3; @@ -2623,7 +2638,7 @@ merged_j1: if (!first_key && str->append(", ", 2)) return 3; - if (str->append("\"", 1) || + if (str->append('"') || append_simple(str, key_start, key_end - key_start) || str->append("\":", 2)) return 3; @@ -2644,7 +2659,7 @@ continue_j2: continue; } - if (str->append("}", 1)) + if (str->append('}')) return 3; } else @@ -2663,11 +2678,12 @@ continue_j2: String *Item_func_json_merge_patch::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); json_engine_t je1, je2; String *js1= args[0]->val_json(&tmp_js1), *js2=NULL; uint n_arg; bool empty_result, merge_to_null; + THD *thd= current_thd; /* To report errors properly if some JSON is invalid. */ je1.s.error= je2.s.error= 0; @@ -2684,6 +2700,7 @@ String *Item_func_json_merge_patch::val_str(String *str) json_scan_start(&je2, js2->charset(),(const uchar *) js2->ptr(), (const uchar *) js2->ptr() + js2->length()); + je2.killed_ptr= (uchar*)&thd->killed; if (merge_to_null) { @@ -2703,12 +2720,13 @@ String *Item_func_json_merge_patch::val_str(String *str) json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(), (const uchar *) js1->ptr() + js1->length()); + je1.killed_ptr= (uchar*)&thd->killed; if (do_merge_patch(str, &je1, &je2, &empty_result)) goto error_return; if (empty_result) - str->append("null"); + str->append(STRING_WITH_LEN("null")); cont_point: { @@ -2731,8 +2749,7 @@ cont_point: json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(), (const uchar *) js1->ptr() + js1->length()); - str->length(0); - str->set_charset(js1->charset()); + je1.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je1, str, Item_func_json_format::LOOSE)) goto error_return; @@ -2744,6 +2761,7 @@ error_return: report_json_error(js1, &je1, 0); if (je2.s.error) report_json_error(js2, &je2, n_arg); + thd->check_killed(); // to get the error message right null_return: null_value= 1; return NULL; @@ -2754,7 +2772,7 @@ bool Item_func_json_length::fix_length_and_dec() { if (arg_count > 1) path.set_constant_flag(args[1]->const_item()); - maybe_null= 1; + set_maybe_null(); max_length= 10; return FALSE; } @@ -2900,7 +2918,7 @@ bool Item_func_json_type::fix_length_and_dec() { collation.set(&my_charset_utf8mb3_general_ci); max_length= 12; - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -2965,11 +2983,12 @@ bool Item_func_json_insert::fix_length_and_dec() for (n_arg= 1; n_arg < arg_count; n_arg+= 2) { paths[n_arg/2].set_constant_flag(args[n_arg]->const_item()); - char_length+= args[n_arg/2+1]->max_char_length() + 4; + char_length+= + static_cast<ulonglong>(args[n_arg+1]->max_char_length()) + 4; } fix_char_length_ulonglong(char_length); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -2980,8 +2999,9 @@ String *Item_func_json_insert::val_str(String *str) String *js= args[0]->val_json(&tmp_js); uint n_arg, n_path; json_string_t key_name; + THD *thd= current_thd; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if ((null_value= args[0]->null_value)) return 0; @@ -3020,6 +3040,7 @@ String *Item_func_json_insert::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); + je.killed_ptr= (uchar*)&thd->killed; if (c_path->p.last_step < c_path->p.steps) goto v_found; @@ -3069,7 +3090,7 @@ String *Item_func_json_insert::val_str(String *str) str->length(0); /* Wrap the value as an array. */ if (append_simple(str, js->ptr(), (const char *) v_from - js->ptr()) || - (do_array_autowrap && str->append("[", 1))) + (do_array_autowrap && str->append('['))) goto js_error; /* Out of memory. */ if (je.value_type == JSON_VALUE_OBJECT) @@ -3082,7 +3103,7 @@ String *Item_func_json_insert::val_str(String *str) (append_simple(str, v_from, je.s.c_str - v_from) || str->append(", ", 2))) || append_json_value(str, args[n_arg+1], &tmp_val) || - (do_array_autowrap && str->append("]", 1)) || + (do_array_autowrap && str->append(']')) || append_simple(str, je.s.c_str, js->end()-(const char *) je.s.c_str)) goto js_error; /* Out of memory. */ @@ -3153,7 +3174,7 @@ String *Item_func_json_insert::val_str(String *str) str->length(0); if (append_simple(str, js->ptr(), v_to - js->ptr()) || (n_key > 0 && str->append(", ", 2)) || - str->append("\"", 1) || + str->append('"') || append_simple(str, lp->key, lp->key_end - lp->key) || str->append("\":", 2) || append_json_value(str, args[n_arg+1], &tmp_val) || @@ -3201,7 +3222,7 @@ continue_point: json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); - str->length(0); + je.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je, str, Item_func_json_format::LOOSE)) goto js_error; @@ -3209,6 +3230,7 @@ continue_point: js_error: report_json_error(js, &je, 0); + thd->check_killed(); // to get the error message right return_null: null_value= 1; return 0; @@ -3221,7 +3243,7 @@ bool Item_func_json_remove::fix_length_and_dec() max_length= args[0]->max_length; mark_constant_paths(paths, args+1, arg_count-1); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -3232,8 +3254,9 @@ String *Item_func_json_remove::val_str(String *str) String *js= args[0]->val_json(&tmp_js); uint n_arg, n_path; json_string_t key_name; + THD *thd= current_thd; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (args[0]->null_value) goto null_return; @@ -3278,6 +3301,7 @@ String *Item_func_json_remove::val_str(String *str) json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); + je.killed_ptr= (uchar*)&thd->killed; c_path->cur_step= c_path->p.steps; @@ -3386,8 +3410,7 @@ v_found: json_scan_start(&je, js->charset(),(const uchar *) js->ptr(), (const uchar *) js->ptr() + js->length()); - str->length(0); - str->set_charset(js->charset()); + je.killed_ptr= (uchar*)&thd->killed; if (json_nice(&je, str, Item_func_json_format::LOOSE)) goto js_error; @@ -3395,6 +3418,7 @@ v_found: return str; js_error: + thd->check_killed(); // to get the error message right report_json_error(js, &je, 0); null_return: null_value= 1; @@ -3406,7 +3430,7 @@ bool Item_func_json_keys::fix_length_and_dec() { collation.set(args[0]->collation); max_length= args[0]->max_length; - maybe_null= 1; + set_maybe_null(); if (arg_count > 1) path.set_constant_flag(args[1]->const_item()); return FALSE; @@ -3498,7 +3522,7 @@ skip_search: goto null_return; str->length(0); - if (str->append("[", 1)) + if (str->append('[')) goto err_return; /* Out of memory. */ /* Parse the OBJECT collecting the keys. */ while (json_scan_next(&je) == 0 && je.state != JST_OBJ_END) @@ -3521,9 +3545,9 @@ skip_search: if (!check_key_in_list(str, key_start, key_len)) { if ((n_keys > 0 && str->append(", ", 2)) || - str->append("\"", 1) || + str->append('"') || append_simple(str, key_start, key_len) || - str->append("\"", 1)) + str->append('"')) goto err_return; n_keys++; } @@ -3538,7 +3562,7 @@ skip_search: } } - if (unlikely(je.s.error || str->append("]", 1))) + if (unlikely(je.s.error || str->append(']'))) goto err_return; null_value= 0; @@ -3576,7 +3600,7 @@ bool Item_func_json_search::fix_length_and_dec() /* It's rather difficult to estimate the length of the result. - I belive arglen^2 is the reasonable upper limit. + I believe arglen^2 is the reasonable upper limit. */ if (args[0]->max_length > SQR_MAX_BLOB_WIDTH) max_length= MAX_BLOB_WIDTH; @@ -3591,7 +3615,7 @@ bool Item_func_json_search::fix_length_and_dec() if (arg_count > 4) mark_constant_paths(paths, args+4, arg_count-4); - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -3642,14 +3666,14 @@ static int append_json_path(String *str, const json_path_t *p) else /*JSON_PATH_ARRAY*/ { - if (str->append("[", 1) || + if (str->append('[') || str->append_ulonglong(c->n_item) || - str->append("]", 1)) + str->append(']')) return TRUE; } } - return str->append("\"", 1); + return str->append('"'); } @@ -3710,7 +3734,7 @@ String *Item_func_json_search::val_str(String *str) { if (n_path_found == 2) { - if (str->append("[", 1) || + if (str->append('[') || append_json_path(str, &sav_path)) goto js_error; } @@ -3736,7 +3760,7 @@ end: } else { - if (str->append("]", 1)) + if (str->append(']')) goto js_error; } @@ -3752,21 +3776,21 @@ null_return: } -const char *Item_func_json_format::func_name() const +LEX_CSTRING Item_func_json_format::func_name_cstring() const { switch (fmt) { case COMPACT: - return "json_compact"; + return { STRING_WITH_LEN("json_compact") }; case LOOSE: - return "json_loose"; + return { STRING_WITH_LEN("json_loose") }; case DETAILED: - return "json_detailed"; + return { STRING_WITH_LEN("json_detailed") }; default: DBUG_ASSERT(0); }; - return ""; + return NULL_clex_str; } @@ -3775,7 +3799,7 @@ bool Item_func_json_format::fix_length_and_dec() decimals= 0; collation.set(args[0]->collation); max_length= args[0]->max_length; - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -3785,6 +3809,7 @@ String *Item_func_json_format::val_str(String *str) String *js= args[0]->val_json(&tmp_js); json_engine_t je; int tab_size= 4; + THD *thd= current_thd; if ((null_value= args[0]->null_value)) return 0; @@ -3808,13 +3833,13 @@ String *Item_func_json_format::val_str(String *str) json_scan_start(&je, js->charset(), (const uchar *) js->ptr(), (const uchar *) js->ptr()+js->length()); + je.killed_ptr= (uchar*)&thd->killed; - str->length(0); - str->set_charset(js->charset()); if (json_nice(&je, str, fmt, tab_size)) { null_value= 1; report_json_error(js, &je, 0); + thd->check_killed(); // to get the error message right return 0; } @@ -3974,7 +3999,7 @@ Item_func_json_objectagg(THD *thd, Item_func_json_objectagg *item) { quick_group= FALSE; result.set_charset(collation.collation); - result.append("{"); + result.append('{'); } @@ -3982,14 +4007,14 @@ bool Item_func_json_objectagg::fix_fields(THD *thd, Item **ref) { uint i; /* for loop variable */ - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); memcpy(orig_args, args, sizeof(Item*) * arg_count); if (init_sum_func_check(thd)) return TRUE; - maybe_null= 1; + set_maybe_null(); /* Fix fields for select list and ORDER clause @@ -3999,9 +4024,7 @@ Item_func_json_objectagg::fix_fields(THD *thd, Item **ref) { if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i])) return TRUE; - m_with_subquery|= args[i]->with_subquery(); - with_param|= args[i]->with_param; - with_window_func|= args[i]->with_window_func; + with_flags|= args[i]->with_flags; } /* skip charset aggregation for order columns */ @@ -4018,7 +4041,7 @@ Item_func_json_objectagg::fix_fields(THD *thd, Item **ref) if (check_sum_func(thd, ref)) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -4057,11 +4080,11 @@ bool Item_func_json_objectagg::add() null_value= 0; if (result.length() > 1) - result.append(", "); + result.append(STRING_WITH_LEN(", ")); - result.append("\""); + result.append('"'); result.append(*key); - result.append("\":"); + result.append(STRING_WITH_LEN("\":")); buf.length(0); append_json_value(&result, args[1], &buf); @@ -4072,11 +4095,11 @@ bool Item_func_json_objectagg::add() String* Item_func_json_objectagg::val_str(String* str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0; - result.append("}"); + result.append('}'); return &result; } diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 9472e184124..6cdd8851663 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -41,6 +41,13 @@ public: }; +void report_path_error_ex(const char *ps, json_path_t *p, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv); +void report_json_error_ex(const char *js, json_engine_t *je, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv); + class Json_engine_scan: public json_engine_t { public: @@ -75,23 +82,28 @@ protected: public: Item_func_json_valid(THD *thd, Item *json) : Item_bool_func(thd, json) {} - longlong val_int(); - const char *func_name() const { return "json_valid"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_valid") }; + return name; + } + bool fix_length_and_dec() override { if (Item_bool_func::fix_length_and_dec()) return TRUE; - maybe_null= 1; + set_maybe_null(); return FALSE; } bool set_format_by_check_constraint(Send_field_extended_metadata *to) const + override { static const Lex_cstring fmt(STRING_WITH_LEN("json")); return to->set_format_name(fmt); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_valid>(thd, this); } - enum Functype functype() const { return JSON_VALID_FUNC; } + enum Functype functype() const override { return JSON_VALID_FUNC; } }; @@ -104,11 +116,15 @@ protected: public: Item_func_json_exists(THD *thd, Item *js, Item *i_path): Item_bool_func(thd, js, i_path) {} - const char *func_name() const { return "json_exists"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_exists") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_exists>(thd, this); } - longlong val_int(); + longlong val_int() override; }; @@ -137,7 +153,11 @@ class Item_func_json_value: public Item_str_func, public: Item_func_json_value(THD *thd, Item *js, Item *i_path): Item_str_func(thd, js, i_path) {} - const char *func_name() const override { return "json_value"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_value") }; + return name; + } bool fix_length_and_dec() override ; String *val_str(String *to) override { @@ -161,7 +181,11 @@ class Item_func_json_query: public Item_json_func, public: Item_func_json_query(THD *thd, Item *js, Item *i_path): Item_json_func(thd, js, i_path) {} - const char *func_name() const override { return "json_query"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_query") }; + return name; + } bool fix_length_and_dec() override; String *val_str(String *to) override { @@ -186,10 +210,14 @@ protected: public: Item_func_json_quote(THD *thd, Item *s): Item_str_func(thd, s) {} - const char *func_name() const { return "json_quote"; } - bool fix_length_and_dec(); - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_quote") }; + return name; + } + bool fix_length_and_dec() override; + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_quote>(thd, this); } }; @@ -201,10 +229,14 @@ protected: String *read_json(json_engine_t *je); public: Item_func_json_unquote(THD *thd, Item *s): Item_str_func(thd, s) {} - const char *func_name() const { return "json_unquote"; } - bool fix_length_and_dec(); - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_unquote") }; + return name; + } + bool fix_length_and_dec() override; + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_unquote>(thd, this); } }; @@ -232,15 +264,19 @@ public: char **out_val, int *value_len); Item_func_json_extract(THD *thd, List<Item> &list): Item_json_str_multipath(thd, list) {} - const char *func_name() const { return "json_extract"; } - enum Functype functype() const { return JSON_EXTRACT_FUNC; } - bool fix_length_and_dec(); - String *val_str(String *); - longlong val_int(); - double val_real(); - my_decimal *val_decimal(my_decimal *); - uint get_n_paths() const { return arg_count - 1; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_extract") }; + return name; + } + enum Functype functype() const override { return JSON_EXTRACT_FUNC; } + bool fix_length_and_dec() override; + String *val_str(String *) override; + longlong val_int() override; + double val_real() override; + my_decimal *val_decimal(my_decimal *) override; + uint get_n_paths() const override { return arg_count - 1; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_extract>(thd, this); } }; @@ -256,10 +292,14 @@ protected: public: Item_func_json_contains(THD *thd, List<Item> &list): Item_bool_func(thd, list) {} - const char *func_name() const { return "json_contains"; } - bool fix_length_and_dec(); - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_contains") }; + return name; + } + bool fix_length_and_dec() override; + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_contains>(thd, this); } }; @@ -277,12 +317,16 @@ protected: public: Item_func_json_contains_path(THD *thd, List<Item> &list): Item_bool_func(thd, list), tmp_paths(0) {} - const char *func_name() const { return "json_contains_path"; } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(); - void cleanup(); - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_contains_path") }; + return name; + } + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override; + void cleanup() override; + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_contains_path>(thd, this); } }; @@ -297,10 +341,14 @@ public: Item_json_func(thd) {} Item_func_json_array(THD *thd, List<Item> &list): Item_json_func(thd, list) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "json_array"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_array") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_array>(thd, this); } }; @@ -313,11 +361,15 @@ protected: public: Item_func_json_array_append(THD *thd, List<Item> &list): Item_json_str_multipath(thd, list) {} - bool fix_length_and_dec(); - String *val_str(String *); - uint get_n_paths() const { return arg_count/2; } - const char *func_name() const { return "json_array_append"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + String *val_str(String *) override; + uint get_n_paths() const override { return arg_count/2; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_array_append") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_array_append>(thd, this); } }; @@ -327,9 +379,13 @@ class Item_func_json_array_insert: public Item_func_json_array_append public: Item_func_json_array_insert(THD *thd, List<Item> &list): Item_func_json_array_append(thd, list) {} - String *val_str(String *); - const char *func_name() const { return "json_array_insert"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_array_insert") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_array_insert>(thd, this); } }; @@ -341,9 +397,13 @@ public: Item_func_json_array(thd) {} Item_func_json_object(THD *thd, List<Item> &list): Item_func_json_array(thd, list) {} - String *val_str(String *); - const char *func_name() const { return "json_object"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_object") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_object>(thd, this); } }; @@ -355,9 +415,13 @@ protected: public: Item_func_json_merge(THD *thd, List<Item> &list): Item_func_json_array(thd, list) {} - String *val_str(String *); - const char *func_name() const { return "json_merge_preserve"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_merge_preserve") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_merge>(thd, this); } }; @@ -366,19 +430,23 @@ class Item_func_json_merge_patch: public Item_func_json_merge public: Item_func_json_merge_patch(THD *thd, List<Item> &list): Item_func_json_merge(thd, list) {} - const char *func_name() const { return "json_merge_patch"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_merge_patch") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_merge_patch>(thd, this); } }; class Item_func_json_length: public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_text(func_name()) || + return args[0]->check_type_can_return_text(func_name_cstring()) || (arg_count > 1 && - args[1]->check_type_general_purpose_string(func_name())); + args[1]->check_type_general_purpose_string(func_name_cstring())); } protected: json_path_with_flags path; @@ -387,26 +455,34 @@ protected: public: Item_func_json_length(THD *thd, List<Item> &list): Item_long_func(thd, list) {} - const char *func_name() const { return "json_length"; } - bool fix_length_and_dec(); - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_length") }; + return name; + } + bool fix_length_and_dec() override; + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_length>(thd, this); } }; class Item_func_json_depth: public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_text(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_text(func_name_cstring()); } protected: String tmp_js; public: Item_func_json_depth(THD *thd, Item *js): Item_long_func(thd, js) {} - const char *func_name() const { return "json_depth"; } - bool fix_length_and_dec() { max_length= 10; return FALSE; } - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_depth") }; + return name; + } + bool fix_length_and_dec() override { max_length= 10; return FALSE; } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_depth>(thd, this); } }; @@ -417,10 +493,14 @@ protected: String tmp_js; public: Item_func_json_type(THD *thd, Item *js): Item_str_func(thd, js) {} - const char *func_name() const { return "json_type"; } - bool fix_length_and_dec(); - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_type") }; + return name; + } + bool fix_length_and_dec() override; + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_type>(thd, this); } }; @@ -435,15 +515,18 @@ public: Item_func_json_insert(bool i_mode, bool r_mode, THD *thd, List<Item> &list): Item_json_str_multipath(thd, list), mode_insert(i_mode), mode_replace(r_mode) {} - bool fix_length_and_dec(); - String *val_str(String *); - uint get_n_paths() const { return arg_count/2; } - const char *func_name() const + bool fix_length_and_dec() override; + String *val_str(String *) override; + uint get_n_paths() const override { return arg_count/2; } + LEX_CSTRING func_name_cstring() const override { - return mode_insert ? - (mode_replace ? "json_set" : "json_insert") : "json_replace"; + static LEX_CSTRING json_set= {STRING_WITH_LEN("json_set") }; + static LEX_CSTRING json_insert= {STRING_WITH_LEN("json_insert") }; + static LEX_CSTRING json_replace= {STRING_WITH_LEN("json_replace") }; + return (mode_insert ? + (mode_replace ? json_set : json_insert) : json_replace); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_insert>(thd, this); } }; @@ -455,11 +538,15 @@ protected: public: Item_func_json_remove(THD *thd, List<Item> &list): Item_json_str_multipath(thd, list) {} - bool fix_length_and_dec(); - String *val_str(String *); - uint get_n_paths() const { return arg_count - 1; } - const char *func_name() const { return "json_remove"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + String *val_str(String *) override; + uint get_n_paths() const override { return arg_count - 1; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_remove") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_remove>(thd, this); } }; @@ -473,10 +560,14 @@ protected: public: Item_func_json_keys(THD *thd, List<Item> &list): Item_str_func(thd, list) {} - const char *func_name() const { return "json_keys"; } - bool fix_length_and_dec(); - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_keys") }; + return name; + } + bool fix_length_and_dec() override; + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_keys>(thd, this); } }; @@ -496,12 +587,16 @@ protected: public: Item_func_json_search(THD *thd, List<Item> &list): Item_json_str_multipath(thd, list) {} - const char *func_name() const { return "json_search"; } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(); - String *val_str(String *); - uint get_n_paths() const { return arg_count > 4 ? arg_count - 4 : 0; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_search") }; + return name; + } + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override; + String *val_str(String *) override; + uint get_n_paths() const override { return arg_count > 4 ? arg_count - 4 : 0; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_search>(thd, this); } }; @@ -525,11 +620,11 @@ public: Item_func_json_format(THD *thd, List<Item> &list): Item_json_func(thd, list), fmt(DETAILED) {} - const char *func_name() const; - bool fix_length_and_dec(); - String *val_str(String *str); - String *val_json(String *str); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override; + bool fix_length_and_dec() override; + String *val_str(String *str) override; + String *val_json(String *str) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_json_format>(thd, this); } }; @@ -564,7 +659,11 @@ public: return Type_handler_json_common::json_type_handler_sum(this); } - const char *func_name() const override { return "json_arrayagg("; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_arrayagg(") }; + return name; + } enum Sumfunctype sum_func() const override { return JSON_ARRAYAGG_FUNC; } String* val_str(String *str) override; @@ -583,14 +682,18 @@ public: Item_sum(thd, key, value) { quick_group= FALSE; - result.append("{"); + result.append('{'); } Item_func_json_objectagg(THD *thd, Item_func_json_objectagg *item); void cleanup() override; - enum Sumfunctype sum_func() const override {return JSON_OBJECTAGG_FUNC;} - const char *func_name() const override { return "json_objectagg"; } + enum Sumfunctype sum_func () const override { return JSON_OBJECTAGG_FUNC;} + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("json_objectagg") }; + return name; + } const Type_handler *type_handler() const override { return Type_handler_json_common::json_type_handler_sum(this); @@ -619,5 +722,6 @@ public: { return get_item_copy<Item_func_json_objectagg>(thd, this); } }; +extern bool is_json_type(const Item *item); #endif /* ITEM_JSONFUNC_INCLUDED */ diff --git a/sql/item_row.cc b/sql/item_row.cc index 767787497ce..3981392b0ae 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -36,9 +36,10 @@ void Item_row::illegal_method_call(const char *method) bool Item_row::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); null_value= 0; - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; + Item **arg, **arg_end; for (arg= args, arg_end= args + arg_count; arg != arg_end ; arg++) { @@ -60,14 +61,10 @@ bool Item_row::fix_fields(THD *thd, Item **ref) with_null|= 1; } } - maybe_null|= item->maybe_null; - join_with_sum_func(item); - with_window_func = with_window_func || item->with_window_func; - with_field= with_field || item->with_field; - m_with_subquery|= item->with_subquery(); - with_param|= item->with_param; + base_flags|= (item->base_flags & item_base_t::MAYBE_NULL); + with_flags|= item->with_flags; } - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } diff --git a/sql/item_row.h b/sql/item_row.h index 2872a498d55..fbf632ba3b7 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -35,9 +35,7 @@ */ class Item_row: public Item_fixed_hybrid, private Item_args, - private Used_tables_and_const_cache, - private With_subquery_cache, - private With_sum_func_cache + private Used_tables_and_const_cache { table_map not_null_tables_cache; /** @@ -53,108 +51,106 @@ public: Item_row(THD *thd, Item_row *row) :Item_fixed_hybrid(thd), Item_args(thd, static_cast<Item_args*>(row)), Used_tables_and_const_cache(), - With_sum_func_cache(*row), not_null_tables_cache(0), with_null(0) { } - bool with_subquery() const { DBUG_ASSERT(fixed); return m_with_subquery; } - enum Type type() const { return ROW_ITEM; }; - const Type_handler *type_handler() const { return &type_handler_row; } + enum Type type() const override { return ROW_ITEM; }; + const Type_handler *type_handler() const override { return &type_handler_row; } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { return NULL; // Check with Vicentiu why it's called for Item_row } void illegal_method_call(const char *); - bool is_null() { return null_value; } - void make_send_field(THD *thd, Send_field *) + bool is_null() override { return null_value; } + void make_send_field(THD *thd, Send_field *) override { illegal_method_call((const char*)"make_send_field"); }; - double val_real() + double val_real() override { illegal_method_call((const char*)"val"); return 0; }; - longlong val_int() + longlong val_int() override { illegal_method_call((const char*)"val_int"); return 0; }; - String *val_str(String *) + String *val_str(String *) override { illegal_method_call((const char*)"val_str"); return 0; }; - my_decimal *val_decimal(my_decimal *) + my_decimal *val_decimal(my_decimal *) override { illegal_method_call((const char*)"val_decimal"); return 0; }; - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { illegal_method_call((const char*)"get_date"); return true; } - bool fix_fields(THD *thd, Item **ref); - void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge); - void cleanup(); + bool fix_fields(THD *thd, Item **ref) override; + void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) + override; + void cleanup() override; void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, - List<Item> &fields, uint flags); - bool with_sum_func() const { return m_with_sum_func; } - With_sum_func_cache* get_with_sum_func_cache() { return this; } - table_map used_tables() const { return used_tables_cache; }; - bool const_item() const { return const_item_cache; }; - void update_used_tables() + List<Item> &fields, uint flags) override; + table_map used_tables() const override { return used_tables_cache; }; + bool const_item() const override { return const_item_cache; }; + void update_used_tables() override { used_tables_and_const_cache_init(); used_tables_and_const_cache_update_and_join(arg_count, args); } - table_map not_null_tables() const { return not_null_tables_cache; } - virtual void print(String *str, enum_query_type query_type); + table_map not_null_tables() const override { return not_null_tables_cache; } + void print(String *str, enum_query_type query_type) override; - bool walk(Item_processor processor, bool walk_subquery, void *arg) + bool walk(Item_processor processor, bool walk_subquery, void *arg) override { if (walk_args(processor, walk_subquery, arg)) return true; return (this->*processor)(arg); } - Item *transform(THD *thd, Item_transformer transformer, uchar *arg); - bool eval_not_null_tables(void *opt_arg); - bool find_not_null_fields(table_map allowed); + Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override; + bool eval_not_null_tables(void *opt_arg) override; + bool find_not_null_fields(table_map allowed) override; - uint cols() const { return arg_count; } - Item* element_index(uint i) { return args[i]; } - Item** addr(uint i) { return args + i; } - bool check_cols(uint c); - bool null_inside() { return with_null; }; - void bring_value(); + uint cols() const override { return arg_count; } + Item* element_index(uint i) override { return args[i]; } + Item** addr(uint i) override { return args + i; } + bool check_cols(uint c) override; + bool null_inside() override { return with_null; }; + void bring_value() override; Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { Item_args::propagate_equal_fields(thd, Context_identity(), cond); return this; } - bool excl_dep_on_table(table_map tab_map) + bool excl_dep_on_table(table_map tab_map) override { return Item_args::excl_dep_on_table(tab_map); } - bool excl_dep_on_grouping_fields(st_select_lex *sel) + bool excl_dep_on_grouping_fields(st_select_lex *sel) override { return Item_args::excl_dep_on_grouping_fields(sel); } - bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred) + bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred) override { return Item_args::excl_dep_on_in_subq_left_part(subq_pred); } - bool check_vcol_func_processor(void *arg) {return FALSE; } - Item *get_copy(THD *thd) + bool check_vcol_func_processor(void *arg) override {return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_row>(thd, this); } - Item *build_clone(THD *thd); + Item *build_clone(THD *thd) override; }; #endif /* ITEM_ROW_INCLUDED */ diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index cffeeb7256e..7eee96b3a19 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2017, Oracle and/or its affiliates. - Copyright (c) 2009, 2020, MariaDB Corporation. + Copyright (c) 2009, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -55,7 +55,7 @@ C_MODE_END #include <sql_repl.h> #include "sql_statistics.h" -size_t username_char_length= 80; +size_t username_char_length= USERNAME_CHAR_LENGTH; /* Calculate max length of string from length argument to LEFT and RIGHT @@ -91,7 +91,7 @@ static uint32 max_length_for_string(Item *item) */ String *Item_func::val_str_from_val_str_ascii(String *str, String *ascii_buffer) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!(collation.collation->state & MY_CS_NONASCII)) { @@ -124,14 +124,15 @@ bool Item_str_func::fix_fields(THD *thd, Item **ref) In Item_str_func::check_well_formed_result() we may set null_value flag on the same condition as in test() below. */ - maybe_null= maybe_null || thd->is_strict_mode(); + if (thd->is_strict_mode()) + set_maybe_null(); return res; } my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); StringBuffer<64> tmp; String *res= val_str(&tmp); return res ? decimal_from_string_with_check(decimal_value, res) : 0; @@ -140,7 +141,7 @@ my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value) double Item_str_func::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); StringBuffer<64> tmp; String *res= val_str(&tmp); return res ? double_from_string_with_check(res) : 0.0; @@ -149,7 +150,7 @@ double Item_str_func::val_real() longlong Item_str_func::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); StringBuffer<22> tmp; String *res= val_str(&tmp); return res ? longlong_from_string_with_check(res) : 0; @@ -158,7 +159,7 @@ longlong Item_str_func::val_int() String *Item_func_md5::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String * sptr= args[0]->val_str(str); if (sptr) { @@ -183,7 +184,7 @@ String *Item_func_md5::val_str_ascii(String *str) String *Item_func_sha::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String * sptr= args[0]->val_str(str); if (sptr) /* If we got value different from NULL */ { @@ -213,7 +214,7 @@ bool Item_func_sha::fix_length_and_dec() String *Item_func_sha2::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); unsigned char digest_buf[512/8]; // enough for SHA512 String *input_string; const char *input_ptr; @@ -286,7 +287,7 @@ String *Item_func_sha2::val_str_ascii(String *str) bool Item_func_sha2::fix_length_and_dec() { - maybe_null= 1; + set_maybe_null(); max_length = 0; int sha_variant= (int)(args[1]->const_item() ? args[1]->val_int() : 512); @@ -333,7 +334,7 @@ void Item_aes_crypt::create_key(String *user_key, uchar *real_key) String *Item_aes_crypt::val_str(String *str2) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); StringBuffer<80> user_key_buf; String *sptr= args[0]->val_str(&tmp_value); String *user_key= args[1]->val_str(&user_key_buf); @@ -376,7 +377,7 @@ bool Item_func_aes_encrypt::fix_length_and_dec() bool Item_func_aes_decrypt::fix_length_and_dec() { max_length=args[0]->max_length; - maybe_null= 1; + set_maybe_null(); what= ENCRYPTION_FLAG_DECRYPT; return FALSE; } @@ -384,11 +385,11 @@ bool Item_func_aes_decrypt::fix_length_and_dec() bool Item_func_to_base64::fix_length_and_dec() { - maybe_null= args[0]->maybe_null; + base_flags|= args[0]->base_flags & item_base_t::MAYBE_NULL; collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); if (args[0]->max_length > (uint) my_base64_encode_max_arg_length()) { - maybe_null= 1; + set_maybe_null(); fix_char_length_ulonglong((ulonglong) my_base64_encode_max_arg_length()); } else @@ -444,7 +445,8 @@ bool Item_func_from_base64::fix_length_and_dec() int length= my_base64_needed_decoded_length((int) args[0]->max_length); fix_char_length_ulonglong((ulonglong) length); } - maybe_null= 1; // Can be NULL, e.g. in case of badly formed input string + // Can be NULL, e.g. in case of badly formed input string + set_maybe_null(); return FALSE; } @@ -505,7 +507,7 @@ const char *representation_by_type[]= {"%.3f", "%.5f"}; String *Item_func_decode_histogram::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[STRING_BUFFER_USUAL_SIZE]; String *res, tmp(buff, sizeof(buff), &my_charset_bin); int type; @@ -554,7 +556,7 @@ String *Item_func_decode_histogram::val_str(String *str) size_t size= my_snprintf(numbuf, sizeof(numbuf), representation_by_type[type], val - prev); str->append(numbuf, size); - str->append(","); + str->append(','); prev= val; } /* show delta with max */ @@ -612,7 +614,7 @@ bool Item_func_concat::realloc_result(String *str, uint length) const String *Item_func_concat::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; String *res; @@ -641,7 +643,7 @@ null: String *Item_func_concat_operator_oracle::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; String *res= NULL; uint i; @@ -721,7 +723,7 @@ bool Item_func_concat::fix_length_and_dec() String *Item_func_des_encrypt::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE; DES_cblock ivec; @@ -820,7 +822,7 @@ error: String *Item_func_des_decrypt::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE; DES_cblock ivec; @@ -911,7 +913,7 @@ wrong_key: String *Item_func_concat_ws::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char tmp_str_buff[10]; String tmp_sep_str(tmp_str_buff, sizeof(tmp_str_buff),default_charset_info), *sep_str, *res, *res2,*use_as_buff; @@ -1079,7 +1081,7 @@ bool Item_func_concat_ws::fix_length_and_dec() String *Item_func_reverse::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&tmp_value); const char *ptr, *end; char *tmp; @@ -1147,7 +1149,7 @@ bool Item_func_reverse::fix_length_and_dec() String *Item_func_replace::val_str_internal(String *str, String *empty_string_for_null) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res,*res2,*res3; int offset; uint from_length,to_length; @@ -1375,7 +1377,7 @@ bool Item_func_regexp_replace::append_replacement(String *str, String *Item_func_regexp_replace::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff0[MAX_FIELD_WIDTH]; char buff2[MAX_FIELD_WIDTH]; String tmp0(buff0,sizeof(buff0),&my_charset_bin); @@ -1454,7 +1456,7 @@ bool Item_func_regexp_substr::fix_length_and_dec() String *Item_func_regexp_substr::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff0[MAX_FIELD_WIDTH]; String tmp0(buff0,sizeof(buff0),&my_charset_bin); String *source= args[0]->val_str(&tmp0); @@ -1491,7 +1493,7 @@ err: String *Item_func_insert::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res,*res2; longlong start, length; /* must be longlong to avoid truncation */ @@ -1572,7 +1574,7 @@ bool Item_func_insert::fix_length_and_dec() String *Item_str_conv::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res; size_t alloced_length, len; @@ -1614,7 +1616,7 @@ bool Item_func_ucase::fix_length_and_dec() String *Item_func_left::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(str); /* must be longlong to avoid truncation */ @@ -1639,7 +1641,7 @@ String *Item_func_left::val_str(String *str) void Item_str_func::left_right_max_length() { uint32 char_length= args[0]->max_char_length(); - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { uint32 length= max_length_for_string(args[1]); set_if_smaller(char_length, length); @@ -1660,7 +1662,7 @@ bool Item_func_left::fix_length_and_dec() String *Item_func_right::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(str); /* must be longlong to avoid truncation */ longlong length= args[1]->val_int(); @@ -1696,7 +1698,7 @@ bool Item_func_right::fix_length_and_dec() String *Item_func_substr::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res = args[0]->val_str(str); /* must be longlong to avoid truncation */ longlong start= get_position(); @@ -1782,7 +1784,7 @@ bool Item_func_substr_index::fix_length_and_dec() String *Item_func_substr_index::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff),system_charset_info); String *res= args[0]->val_str(&tmp_value); @@ -1932,7 +1934,7 @@ String *Item_func_substr_index::val_str(String *str) String *Item_func_ltrim::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[MAX_FIELD_WIDTH], *ptr, *end; String tmp(buff,sizeof(buff),system_charset_info); String *res, *remove_str; @@ -1977,7 +1979,7 @@ String *Item_func_ltrim::val_str(String *str) String *Item_func_rtrim::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[MAX_FIELD_WIDTH], *ptr, *end; String tmp(buff, sizeof(buff), system_charset_info); String *res, *remove_str; @@ -2056,7 +2058,7 @@ String *Item_func_rtrim::val_str(String *str) String *Item_func_trim::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); char buff[MAX_FIELD_WIDTH], *ptr, *end; const char *r_ptr; String tmp(buff, sizeof(buff), system_charset_info); @@ -2145,7 +2147,7 @@ void Item_func_trim::print(String *str, enum_query_type query_type) Item_func::print(str, query_type); return; } - str->append(Item_func_trim::func_name()); + str->append(Item_func_trim::func_name_cstring()); str->append(func_name_ext()); str->append('('); str->append(mode_name()); @@ -2164,7 +2166,7 @@ void Item_func_trim::print(String *str, enum_query_type query_type) */ Sql_mode_dependency Item_func_trim::value_depends_on_sql_mode() const { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); if (arg_count == 1) // RTRIM(expr) return (args[0]->value_depends_on_sql_mode() & Sql_mode_dependency(~0, ~MODE_PAD_CHAR_TO_FULL_LENGTH)). @@ -2203,7 +2205,7 @@ bool Item_func_password::fix_fields(THD *thd, Item **ref) String *Item_func_password::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(str); switch (alg){ case NEW: @@ -2254,7 +2256,7 @@ char *Item_func_password::alloc(THD *thd, const char *password, String *Item_func_encrypt::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); #ifdef HAVE_CRYPT String *res =args[0]->val_str(str); @@ -2315,7 +2317,8 @@ bool Item_func_encode::seed() bool Item_func_encode::fix_length_and_dec() { max_length=args[0]->max_length; - maybe_null=args[0]->maybe_null || args[1]->maybe_null; + base_flags|= ((args[0]->base_flags | args[1]->base_flags) & + item_base_t::MAYBE_NULL); collation.set(&my_charset_bin); /* Precompute the seed state if the item is constant. */ seeded= args[1]->const_item() && @@ -2326,7 +2329,7 @@ bool Item_func_encode::fix_length_and_dec() String *Item_func_encode::val_str(String *str) { String *res; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!(res=args[0]->val_str(str))) { @@ -2362,7 +2365,7 @@ void Item_func_decode::crypto_transform(String *res) String *Item_func_database::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; if (thd->db.str == NULL) { @@ -2378,7 +2381,7 @@ String *Item_func_database::val_str(String *str) String *Item_func_sqlerrm::val_str(String *str) { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(!null_value); Diagnostics_area::Sql_condition_iterator it= current_thd->get_stmt_da()->sql_conditions(); @@ -2402,7 +2405,7 @@ String *Item_func_sqlerrm::val_str(String *str) */ bool Item_func_user::init(const char *user, const char *host) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); // For system threads (e.g. replication SQL thread) user may be empty if (user) @@ -2478,10 +2481,12 @@ bool Item_func_current_role::fix_fields(THD *thd, Item **ref) system_charset_info)) return 1; str_value.mark_as_const(); - null_value= maybe_null= 0; + null_value= 0; + base_flags&= ~item_base_t::MAYBE_NULL; return 0; } - null_value= maybe_null= 1; + null_value= 1; + set_maybe_null(); return 0; } @@ -2537,7 +2542,7 @@ static bool my_uni_isalpha(int wc) String *Item_func_soundex::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&tmp_value); char last_ch,ch; CHARSET_INFO *cs= collation.collation; @@ -2668,7 +2673,7 @@ bool Item_func_format::fix_length_and_dec() the number of decimals and round to the next integer. */ bool need_extra_digit_for_rounding= args[0]->decimals > 0; - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { Longlong_hybrid tmp= args[1]->to_longlong_hybrid(); if (!args[1]->null_value) @@ -2708,7 +2713,7 @@ String *Item_func_format::val_str_ascii(String *str) /* Number of characters used to represent the decimals, including '.' */ uint32 dec_length; const MY_LOCALE *lc; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); dec= (int) args[1]->val_int(); if (args[1]->null_value) @@ -2816,14 +2821,14 @@ bool Item_func_elt::fix_length_and_dec() set_if_bigger(decimals,args[i]->decimals); } fix_char_length(char_length); - maybe_null=1; // NULL if wrong first arg + set_maybe_null(); // NULL if wrong first arg return FALSE; } double Item_func_elt::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint tmp; null_value=1; if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) @@ -2836,7 +2841,7 @@ double Item_func_elt::val_real() longlong Item_func_elt::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint tmp; null_value=1; if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) @@ -2850,7 +2855,7 @@ longlong Item_func_elt::val_int() String *Item_func_elt::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint tmp; null_value=1; if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) @@ -2880,7 +2885,7 @@ bool Item_func_make_set::fix_length_and_dec() String *Item_func_make_set::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); ulonglong bits; bool first_found=0; Item **ptr=args+1; @@ -2933,13 +2938,13 @@ String *Item_func_make_set::val_str(String *str) void Item_func_char::print(String *str, enum_query_type query_type) { - str->append(Item_func_char::func_name()); + str->append(Item_func_char::func_name_cstring()); str->append('('); print_args(str, 0, query_type); if (collation.collation != &my_charset_bin) { str->append(STRING_WITH_LEN(" using ")); - str->append(collation.collation->csname); + str->append(collation.collation->cs_name); } str->append(')'); } @@ -2947,7 +2952,7 @@ void Item_func_char::print(String *str, enum_query_type query_type) String *Item_func_char::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); str->length(0); str->set_charset(collation.collation); for (uint i=0 ; i < arg_count ; i++) @@ -2989,7 +2994,7 @@ void Item_func_char::append_char(String *str, int32 num) String *Item_func_chr::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); str->length(0); str->set_charset(collation.collation); int32 num=(int32) args[0]->val_int(); @@ -3032,7 +3037,7 @@ bool Item_func_repeat::fix_length_and_dec() if (agg_arg_charsets_for_string_result(collation, args, 1)) return TRUE; DBUG_ASSERT(collation.collation != NULL); - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { uint32 length= max_length_for_string(args[1]); ulonglong char_length= (ulonglong) args[0]->max_char_length() * length; @@ -3040,7 +3045,7 @@ bool Item_func_repeat::fix_length_and_dec() return false; } max_length= MAX_BLOB_WIDTH; - maybe_null= true; + set_maybe_null(); return false; } @@ -3051,7 +3056,7 @@ bool Item_func_repeat::fix_length_and_dec() String *Item_func_repeat::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint length,tot_length; char *to; /* must be longlong to avoid truncation */ @@ -3106,13 +3111,13 @@ err: bool Item_func_space::fix_length_and_dec() { collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); - if (args[0]->const_item() && !args[0]->is_expensive()) + if (args[0]->can_eval_in_optimize()) { fix_char_length_ulonglong(max_length_for_string(args[0])); return false; } max_length= MAX_BLOB_WIDTH; - maybe_null= true; + set_maybe_null(); return false; } @@ -3167,14 +3172,14 @@ bool Item_func_binlog_gtid_pos::fix_length_and_dec() { collation.set(system_charset_info); max_length= MAX_BLOB_WIDTH; - maybe_null= 1; + set_maybe_null(); return FALSE; } String *Item_func_binlog_gtid_pos::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); #ifndef HAVE_REPLICATION null_value= 0; str->copy("", 0, system_charset_info); @@ -3217,8 +3222,9 @@ bool Item_func_pad::fix_length_and_dec() if (arg_count == 3) { String *str; - if (!args[2]->basic_const_item() || !(str= args[2]->val_str(&pad_str)) || !str->length()) - maybe_null= true; + if (!args[2]->basic_const_item() || !(str= args[2]->val_str(&pad_str)) || + !str->length()) + set_maybe_null(); // Handle character set for args[0] and args[2]. if (agg_arg_charsets_for_string_result(collation, &args[0], 2, 2)) return TRUE; @@ -3231,13 +3237,13 @@ bool Item_func_pad::fix_length_and_dec() } DBUG_ASSERT(collation.collation->mbmaxlen > 0); - if (args[1]->const_item() && !args[1]->is_expensive()) + if (args[1]->can_eval_in_optimize()) { fix_char_length_ulonglong(max_length_for_string(args[1])); return false; } max_length= MAX_BLOB_WIDTH; - maybe_null= true; + set_maybe_null(); return false; } @@ -3249,7 +3255,7 @@ bool Item_func_pad::fix_length_and_dec() */ Sql_mode_dependency Item_func_rpad::value_depends_on_sql_mode() const { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(arg_count >= 2); if (!args[1]->value_depends_on_sql_mode_const_item() || (arg_count == 3 && !args[2]->value_depends_on_sql_mode_const_item())) @@ -3277,7 +3283,7 @@ Sql_mode_dependency Item_func_rpad::value_depends_on_sql_mode() const String *Item_func_rpad::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 res_byte_length,res_char_length,pad_char_length,pad_byte_length; char *to; const char *ptr_pad; @@ -3371,7 +3377,7 @@ String *Item_func_rpad::val_str(String *str) String *Item_func_lpad::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint32 res_char_length,pad_char_length; /* must be longlong to avoid truncation */ longlong count= args[1]->val_int(); @@ -3463,7 +3469,7 @@ err: String *Item_func_conv::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(str); char *endptr,ans[65],*ptr; longlong dec; @@ -3512,9 +3518,30 @@ String *Item_func_conv::val_str(String *str) } +/* + This function is needed as Item_func_conc_charset stores cached values + in str_value. +*/ + +int Item_func_conv_charset::save_in_field(Field *field, bool no_conversions) +{ + String *result; + CHARSET_INFO *cs= collation.collation; + + result= val_str(&str_value); + if (null_value) + return set_field_to_null_with_conversions(field, no_conversions); + + /* NOTE: If null_value == FALSE, "result" must be not NULL. */ + field->set_notnull(); + int error= field->store(result->ptr(),result->length(),cs); + return error; +} + + String *Item_func_conv_charset::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (use_cached_value) return null_value ? 0 : &str_value; String *arg= args[0]->val_str(&tmp_value); @@ -3538,13 +3565,13 @@ void Item_func_conv_charset::print(String *str, enum_query_type query_type) str->append(STRING_WITH_LEN("convert(")); args[0]->print(str, query_type); str->append(STRING_WITH_LEN(" using ")); - str->append(collation.collation->csname); + str->append(collation.collation->cs_name); str->append(')'); } String *Item_func_set_collation::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); str=args[0]->val_str(str); if ((null_value=args[0]->null_value)) return 0; @@ -3559,7 +3586,8 @@ bool Item_func_set_collation::fix_length_and_dec() if (!my_charset_same(collation.collation, m_set_collation)) { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), - m_set_collation->name, collation.collation->csname); + m_set_collation->coll_name.str, + collation.collation->cs_name.str); return TRUE; } collation.set(m_set_collation, DERIVATION_EXPLICIT, @@ -3580,30 +3608,30 @@ void Item_func_set_collation::print(String *str, enum_query_type query_type) { args[0]->print_parenthesised(str, query_type, precedence()); str->append(STRING_WITH_LEN(" collate ")); - str->append(m_set_collation->name); + str->append(m_set_collation->coll_name); } String *Item_func_charset::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint dummy_errors; CHARSET_INFO *cs= args[0]->charset_for_protocol(); null_value= 0; - str->copy(cs->csname, (uint) strlen(cs->csname), + str->copy(cs->cs_name.str, cs->cs_name.length, &my_charset_latin1, collation.collation, &dummy_errors); return str; } String *Item_func_collation::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint dummy_errors; CHARSET_INFO *cs= args[0]->charset_for_protocol(); null_value= 0; - str->copy(cs->name, (uint) strlen(cs->name), - &my_charset_latin1, collation.collation, &dummy_errors); + str->copy(cs->coll_name.str, cs->coll_name.length, &my_charset_latin1, + collation.collation, &dummy_errors); return str; } @@ -3612,7 +3640,7 @@ bool Item_func_weight_string::fix_length_and_dec() { CHARSET_INFO *cs= args[0]->collation.collation; collation.set(&my_charset_bin, args[0]->collation.derivation); - flags= my_strxfrm_flag_normalize(flags, cs->levels_for_order); + weigth_flags= my_strxfrm_flag_normalize(weigth_flags, cs->levels_for_order); /* Use result_length if it was given explicitly in constructor, otherwise calculate max_length using argument's max_length @@ -3625,7 +3653,7 @@ bool Item_func_weight_string::fix_length_and_dec() args[0]->max_char_length() : nweights * cs->levels_for_order; max_length= (uint32) cs->strnxfrmlen(char_length * cs->mbmaxlen); } - maybe_null= 1; + set_maybe_null(); return FALSE; } @@ -3636,7 +3664,7 @@ String *Item_func_weight_string::val_str(String *str) String *res; CHARSET_INFO *cs= args[0]->collation.collation; size_t tmp_length, frm_length; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (args[0]->result_type() != STRING_RESULT || !(res= args[0]->val_str(&tmp_value))) @@ -3671,7 +3699,7 @@ String *Item_func_weight_string::val_str(String *str) to know the true number of characters. */ if (!(char_length= nweights)) - char_length= (flags & MY_STRXFRM_PAD_WITH_SPACE) ? + char_length= (weigth_flags & MY_STRXFRM_PAD_WITH_SPACE) ? res->numchars() : (res->length() / cs->mbminlen); } tmp_length= cs->strnxfrmlen(char_length * cs->mbmaxlen); @@ -3696,7 +3724,7 @@ String *Item_func_weight_string::val_str(String *str) frm_length= cs->strnxfrm((char*) str->ptr(), tmp_length, nweights ? nweights : (uint) tmp_length, res->ptr(), res->length(), - flags); + weigth_flags); DBUG_ASSERT(frm_length <= tmp_length); str->length(frm_length); @@ -3711,7 +3739,7 @@ nl: void Item_func_weight_string::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); args[0]->print(str, query_type); str->append(','); @@ -3719,7 +3747,7 @@ void Item_func_weight_string::print(String *str, enum_query_type query_type) str->append(','); str->append_ulonglong(nweights); str->append(','); - str->append_ulonglong(flags); + str->append_ulonglong(weigth_flags); str->append(')'); } @@ -3767,7 +3795,7 @@ String *Item_func_unhex::val_str(String *str) char *to; String *res; uint length; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); res= args[0]->val_str(&tmp_value); if (!res || str->alloc(length= (1+res->length())/2)) @@ -3803,7 +3831,7 @@ String *Item_func_unhex::val_str(String *str) #ifndef DBUG_OFF String *Item_func_like_range::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); longlong nbytes= args[1]->val_int(); String *res= args[0]->val_str(str); size_t min_len, max_len; @@ -3847,11 +3875,12 @@ void Item_func_binary::print(String *str, enum_query_type query_type) String *Item_load_file::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *file_name; File file; MY_STAT stat_info; char path[FN_REFLEN]; + ulonglong file_size; DBUG_ENTER("load_file"); if (!(file_name= args[0]->val_str(str)) @@ -3876,10 +3905,11 @@ String *Item_load_file::val_str(String *str) /* my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), file_name->c_ptr()); */ goto err; } + file_size= stat_info.st_size; { THD *thd= current_thd; - if (stat_info.st_size > (long) thd->variables.max_allowed_packet) + if (file_size >= thd->variables.max_allowed_packet) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -3888,7 +3918,7 @@ String *Item_load_file::val_str(String *str) goto err; } } - if (tmp_value.alloc((size_t)stat_info.st_size)) + if (tmp_value.alloc((ulong)file_size)) goto err; if ((file= mysql_file_open(key_file_loadfile, file_name->ptr(), O_RDONLY, MYF(0))) < 0) @@ -3912,7 +3942,7 @@ err: String* Item_func_export_set::val_str(String* str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String yes_buf, no_buf, sep_buf; const ulonglong the_set = (ulonglong) args[0]->val_int(); const String *yes= args[1]->val_str(&yes_buf); @@ -4032,7 +4062,7 @@ bool Item_func_export_set::fix_length_and_dec() String *Item_func_quote::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); /* Bit mask that has 1 for set for the position of the following characters: 0, \, ' and ^Z @@ -4174,7 +4204,7 @@ null: longlong Item_func_uncompressed_length::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&value); if (!res) { @@ -4211,7 +4241,7 @@ longlong Item_func_uncompressed_length::val_int() longlong Item_func_crc32::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res=args[0]->val_str(&value); if (!res) { @@ -4232,7 +4262,7 @@ String *Item_func_compress::val_str(String *str) String *res; Byte *body; char *tmp, *last_char; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!(res= args[0]->val_str(&tmp_value))) { @@ -4294,7 +4324,7 @@ String *Item_func_compress::val_str(String *str) String *Item_func_uncompress::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res= args[0]->val_str(&tmp_value); ulong new_size; int err; @@ -4354,15 +4384,20 @@ err: String *Item_func_uuid::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uchar guid[MY_UUID_SIZE]; + size_t length= (without_separators ? + MY_UUID_ORACLE_STRING_LENGTH : + MY_UUID_STRING_LENGTH); - str->alloc(MY_UUID_STRING_LENGTH+1); - str->length(MY_UUID_STRING_LENGTH); + str->alloc(length+1); + str->length(length); str->set_charset(system_charset_info); my_uuid(guid); - my_uuid2str(guid, (char *)str->ptr()); - + if (without_separators) + my_uuid2str_oracle(guid, (char *)str->ptr()); + else + my_uuid2str(guid, (char *)str->ptr()); return str; } @@ -4409,7 +4444,7 @@ bool Item_func_dyncol_create::fix_fields(THD *thd, Item **ref) bool Item_func_dyncol_create::fix_length_and_dec() { max_length= MAX_BLOB_WIDTH; - maybe_null= TRUE; + set_maybe_null(); collation.set(&my_charset_bin); decimals= 0; return FALSE; @@ -4639,7 +4674,7 @@ void Item_func_dyncol_create::print_arguments(String *str, if (defs[i].cs) { str->append(STRING_WITH_LEN(" charset ")); - str->append(defs[i].cs->csname); + str->append(defs[i].cs->cs_name); str->append(' '); } break; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 0c4967a5247..d0bdbeabaa5 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -64,14 +64,15 @@ public: Item_func(thd, a, b, c, d, e) { decimals=NOT_FIXED_DEC; } Item_str_func(THD *thd, List<Item> &list): Item_func(thd, list) { decimals=NOT_FIXED_DEC; } - longlong val_int(); - double val_real(); - my_decimal *val_decimal(my_decimal *); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + longlong val_int() override; + double val_real() override; + my_decimal *val_decimal(my_decimal *) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_string(thd, ltime, fuzzydate); } - const Type_handler *type_handler() const { return string_type_handler(); } + const Type_handler *type_handler() const override + { return string_type_handler(); } void left_right_max_length(); - bool fix_fields(THD *thd, Item **ref); + bool fix_fields(THD *thd, Item **ref) override; }; @@ -88,11 +89,11 @@ public: Item_str_ascii_func(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} Item_str_ascii_func(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {} - String *val_str(String *str) + String *val_str(String *str) override { return val_str_from_val_str_ascii(str, &ascii_buf); } - String *val_str_ascii(String *)= 0; + String *val_str_ascii(String *) override= 0; }; @@ -143,14 +144,18 @@ class Item_func_md5 :public Item_str_ascii_checksum_func { public: Item_func_md5(THD *thd, Item *a): Item_str_ascii_checksum_func(thd, a) {} - String *val_str_ascii(String *); - bool fix_length_and_dec() + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override { fix_length_and_charset(32, default_charset()); return FALSE; } - const char *func_name() const { return "md5"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("md5") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_md5>(thd, this); } }; @@ -159,10 +164,14 @@ class Item_func_sha :public Item_str_ascii_checksum_func { public: Item_func_sha(THD *thd, Item *a): Item_str_ascii_checksum_func(thd, a) {} - String *val_str_ascii(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "sha"; } - Item *get_copy(THD *thd) + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sha") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sha>(thd, this); } }; @@ -171,10 +180,14 @@ class Item_func_sha2 :public Item_str_ascii_checksum_func public: Item_func_sha2(THD *thd, Item *a, Item *b) :Item_str_ascii_checksum_func(thd, a, b) {} - String *val_str_ascii(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "sha2"; } - Item *get_copy(THD *thd) + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sha2") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sha2>(thd, this); } }; @@ -184,10 +197,14 @@ class Item_func_to_base64 :public Item_str_ascii_checksum_func public: Item_func_to_base64(THD *thd, Item *a) :Item_str_ascii_checksum_func(thd, a) {} - String *val_str_ascii(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "to_base64"; } - Item *get_copy(THD *thd) + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("to_base64") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_to_base64>(thd, this); } }; @@ -197,10 +214,14 @@ class Item_func_from_base64 :public Item_str_binary_checksum_func public: Item_func_from_base64(THD *thd, Item *a) :Item_str_binary_checksum_func(thd, a) { } - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "from_base64"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("from_base64") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_from_base64>(thd, this); } }; @@ -225,9 +246,13 @@ class Item_func_aes_encrypt :public Item_aes_crypt public: Item_func_aes_encrypt(THD *thd, Item *a, Item *b) :Item_aes_crypt(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "aes_encrypt"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("aes_encrypt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_aes_encrypt>(thd, this); } }; @@ -236,9 +261,13 @@ class Item_func_aes_decrypt :public Item_aes_crypt public: Item_func_aes_decrypt(THD *thd, Item *a, Item *b): Item_aes_crypt(thd, a, b) {} - bool fix_length_and_dec(); - const char *func_name() const { return "aes_decrypt"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("aes_decrypt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_aes_decrypt>(thd, this); } }; @@ -259,10 +288,14 @@ protected: public: Item_func_concat(THD *thd, List<Item> &list): Item_str_func(thd, list) {} Item_func_concat(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "concat"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("concat") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_concat>(thd, this); } }; @@ -280,9 +313,13 @@ public: Item_func_concat_operator_oracle(THD *thd, Item *a, Item *b) :Item_func_concat(thd, a, b) { } - String *val_str(String *); - const char *func_name() const { return "concat_operator_oracle"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("concat_operator_oracle") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_concat_operator_oracle>(thd, this); } @@ -294,16 +331,20 @@ class Item_func_decode_histogram :public Item_str_func public: Item_func_decode_histogram(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { collation.set(system_charset_info); max_length= MAX_BLOB_WIDTH; - maybe_null= 1; + set_maybe_null(); return FALSE; } - const char *func_name() const { return "decode_histogram"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("decode_histogram") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_decode_histogram>(thd, this); } }; @@ -312,11 +353,15 @@ class Item_func_concat_ws :public Item_str_func String tmp_value; public: Item_func_concat_ws(THD *thd, List<Item> &list): Item_str_func(thd, list) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "concat_ws"; } - table_map not_null_tables() const { return 0; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("concat_ws") }; + return name; + } + table_map not_null_tables() const override { return 0; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_concat_ws>(thd, this); } }; @@ -325,10 +370,14 @@ class Item_func_reverse :public Item_str_func String tmp_value; public: Item_func_reverse(THD *thd, Item *a): Item_str_func(thd, a) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "reverse"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("reverse") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_reverse>(thd, this); } }; @@ -339,11 +388,15 @@ class Item_func_replace :public Item_str_func public: Item_func_replace(THD *thd, Item *org, Item *find, Item *replace): Item_str_func(thd, org, find, replace) {} - String *val_str(String *to) { return val_str_internal(to, NULL); }; - bool fix_length_and_dec(); + String *val_str(String *to) override { return val_str_internal(to, NULL); }; + bool fix_length_and_dec() override; String *val_str_internal(String *str, String *empty_string_for_null); - const char *func_name() const { return "replace"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("replace") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_replace>(thd, this); } }; @@ -354,9 +407,14 @@ class Item_func_replace_oracle :public Item_func_replace public: Item_func_replace_oracle(THD *thd, Item *org, Item *find, Item *replace): Item_func_replace(thd, org, find, replace) {} - String *val_str(String *to) { return val_str_internal(to, &tmp_emtpystr); }; - const char *func_name() const { return "replace_oracle"; } - Item *get_copy(THD *thd) + String *val_str(String *to) override + { return val_str_internal(to, &tmp_emtpystr); }; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("replace_oracle") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_replace_oracle>(thd, this); } }; @@ -371,17 +429,21 @@ public: Item_func_regexp_replace(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {} - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_regexp_replace::cleanup"); Item_str_func::cleanup(); re.cleanup(); DBUG_VOID_RETURN; } - String *val_str(String *str); - bool fix_length_and_dec(); - const char *func_name() const { return "regexp_replace"; } - Item *get_copy(THD *thd) { return 0;} + String *val_str(String *str) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("regexp_replace") }; + return name; + } + Item *get_copy(THD *thd) override { return 0;} }; @@ -392,17 +454,21 @@ public: Item_func_regexp_substr(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} - void cleanup() + void cleanup() override { DBUG_ENTER("Item_func_regexp_substr::cleanup"); Item_str_func::cleanup(); re.cleanup(); DBUG_VOID_RETURN; } - String *val_str(String *str); - bool fix_length_and_dec(); - const char *func_name() const { return "regexp_substr"; } - Item *get_copy(THD *thd) { return 0; } + String *val_str(String *str) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("regexp_substr") }; + return name; + } + Item *get_copy(THD *thd) override { return 0; } }; @@ -413,10 +479,14 @@ public: Item_func_insert(THD *thd, Item *org, Item *start, Item *length, Item *new_str): Item_str_func(thd, org, start, length, new_str) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "insert"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("insert") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_insert>(thd, this); } }; @@ -429,7 +499,7 @@ protected: String tmp_value; public: Item_str_conv(THD *thd, Item *item): Item_str_func(thd, item) {} - String *val_str(String *); + String *val_str(String *) override; }; @@ -437,9 +507,13 @@ class Item_func_lcase :public Item_str_conv { public: Item_func_lcase(THD *thd, Item *item): Item_str_conv(thd, item) {} - const char *func_name() const { return "lcase"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("lcase") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_lcase>(thd, this); } }; @@ -447,9 +521,13 @@ class Item_func_ucase :public Item_str_conv { public: Item_func_ucase(THD *thd, Item *item): Item_str_conv(thd, item) {} - const char *func_name() const { return "ucase"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ucase") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ucase>(thd, this); } }; @@ -459,10 +537,14 @@ class Item_func_left :public Item_str_func String tmp_value; public: Item_func_left(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "left"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("left") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_left>(thd, this); } }; @@ -472,10 +554,14 @@ class Item_func_right :public Item_str_func String tmp_value; public: Item_func_right(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "right"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("right") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_right>(thd, this); } }; @@ -489,33 +575,41 @@ public: Item_func_substr(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} Item_func_substr(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "substr"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("substr") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_substr>(thd, this); } }; class Item_func_substr_oracle :public Item_func_substr { protected: - longlong get_position() + longlong get_position() override { longlong pos= args[1]->val_int(); return pos == 0 ? 1 : pos; } - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } public: Item_func_substr_oracle(THD *thd, Item *a, Item *b): Item_func_substr(thd, a, b) {} Item_func_substr_oracle(THD *thd, Item *a, Item *b, Item *c): Item_func_substr(thd, a, b, c) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { bool res= Item_func_substr::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - const char *func_name() const { return "substr_oracle"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("substr_oracle") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_substr_oracle>(thd, this); } }; @@ -525,10 +619,14 @@ class Item_func_substr_index :public Item_str_func public: Item_func_substr_index(THD *thd, Item *a,Item *b,Item *c): Item_str_func(thd, a, b, c) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "substring_index"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("substring_index") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_substr_index>(thd, this); } }; @@ -557,17 +655,25 @@ protected: { return trimmed_value(res, 0, res->length()); } - virtual const char *func_name_ext() const { return ""; } + virtual LEX_CSTRING func_name_ext() const + { + static LEX_CSTRING name_ext= {STRING_WITH_LEN("") }; + return name_ext; + } public: Item_func_trim(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} Item_func_trim(THD *thd, Item *a): Item_str_func(thd, a) {} - Sql_mode_dependency value_depends_on_sql_mode() const; - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "trim"; } - void print(String *str, enum_query_type query_type); - virtual const char *mode_name() const { return "both"; } - Item *get_copy(THD *thd) + Sql_mode_dependency value_depends_on_sql_mode() const override; + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("trim") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + virtual LEX_CSTRING mode_name() const { return { "both", 4}; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trim>(thd, this); } }; @@ -575,21 +681,29 @@ public: class Item_func_trim_oracle :public Item_func_trim { protected: - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } - const char *func_name_ext() const { return "_oracle"; } + LEX_CSTRING func_name_ext() const override + { + static LEX_CSTRING name_ext= {STRING_WITH_LEN("_oracle") }; + return name_ext; + } public: Item_func_trim_oracle(THD *thd, Item *a, Item *b): Item_func_trim(thd, a, b) {} Item_func_trim_oracle(THD *thd, Item *a): Item_func_trim(thd, a) {} - const char *func_name() const { return "trim_oracle"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("trim_oracle") }; + return name; + } + bool fix_length_and_dec() override { bool res= Item_func_trim::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trim_oracle>(thd, this); } }; @@ -599,14 +713,19 @@ class Item_func_ltrim :public Item_func_trim public: Item_func_ltrim(THD *thd, Item *a, Item *b): Item_func_trim(thd, a, b) {} Item_func_ltrim(THD *thd, Item *a): Item_func_trim(thd, a) {} - Sql_mode_dependency value_depends_on_sql_mode() const + Sql_mode_dependency value_depends_on_sql_mode() const override { return Item_func::value_depends_on_sql_mode(); } - String *val_str(String *); - const char *func_name() const { return "ltrim"; } - const char *mode_name() const { return "leading"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ltrim") }; + return name; + } + LEX_CSTRING mode_name() const override + { return { STRING_WITH_LEN("leading") }; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ltrim>(thd, this); } }; @@ -614,21 +733,29 @@ public: class Item_func_ltrim_oracle :public Item_func_ltrim { protected: - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } - const char *func_name_ext() const { return "_oracle"; } + LEX_CSTRING func_name_ext() const override + { + static LEX_CSTRING name_ext= {STRING_WITH_LEN("_oracle") }; + return name_ext; + } public: Item_func_ltrim_oracle(THD *thd, Item *a, Item *b): Item_func_ltrim(thd, a, b) {} Item_func_ltrim_oracle(THD *thd, Item *a): Item_func_ltrim(thd, a) {} - const char *func_name() const { return "ltrim_oracle"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("ltrim_oracle") }; + return name; + } + bool fix_length_and_dec() override { bool res= Item_func_ltrim::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_ltrim_oracle>(thd, this); } }; @@ -638,10 +765,15 @@ class Item_func_rtrim :public Item_func_trim public: Item_func_rtrim(THD *thd, Item *a, Item *b): Item_func_trim(thd, a, b) {} Item_func_rtrim(THD *thd, Item *a): Item_func_trim(thd, a) {} - String *val_str(String *); - const char *func_name() const { return "rtrim"; } - const char *mode_name() const { return "trailing"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rtrim") }; + return name; + } + LEX_CSTRING mode_name() const override + { return { STRING_WITH_LEN("trailing") }; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rtrim>(thd, this); } }; @@ -649,21 +781,29 @@ public: class Item_func_rtrim_oracle :public Item_func_rtrim { protected: - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } - const char *func_name_ext() const { return "_oracle"; } + LEX_CSTRING func_name_ext() const override + { + static LEX_CSTRING name_ext= {STRING_WITH_LEN("_oracle") }; + return name_ext; + } public: Item_func_rtrim_oracle(THD *thd, Item *a, Item *b): Item_func_rtrim(thd, a, b) {} Item_func_rtrim_oracle(THD *thd, Item *a): Item_func_rtrim(thd, a) {} - const char *func_name() const { return "rtrim_oracle"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rtrim_oracle") }; + return name; + } + bool fix_length_and_dec() override { bool res= Item_func_rtrim::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rtrim_oracle>(thd, this); } }; @@ -688,9 +828,9 @@ public: Item_str_ascii_checksum_func(thd, a), alg(NEW), deflt(1) {} Item_func_password(THD *thd, Item *a, PW_Alg al): Item_str_ascii_checksum_func(thd, a), alg(al), deflt(0) {} - String *val_str_ascii(String *str); - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec() + String *val_str_ascii(String *str) override; + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override { fix_length_and_charset((alg == 1 ? SCRAMBLED_PASSWORD_CHAR_LENGTH : @@ -698,11 +838,15 @@ public: default_charset()); return FALSE; } - const char *func_name() const { return ((deflt || alg == 1) ? - "password" : "old_password"); } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING password_normal= {STRING_WITH_LEN("password") }; + static LEX_CSTRING password_old= {STRING_WITH_LEN("old_password") }; + return (deflt || alg == 1) ? password_normal : password_old; + } static char *alloc(THD *thd, const char *password, size_t pass_len, enum PW_Alg al); - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_password>(thd, this); } }; @@ -716,16 +860,20 @@ public: :Item_str_binary_checksum_func(thd, a) {} Item_func_des_encrypt(THD *thd, Item *a, Item *b) :Item_str_binary_checksum_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { - maybe_null=1; + set_maybe_null(); /* 9 = MAX ((8- (arg_len % 8)) + 1) */ max_length = args[0]->max_length + 9; return FALSE; } - const char *func_name() const { return "des_encrypt"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("des_encrypt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_des_encrypt>(thd, this); } }; @@ -737,18 +885,22 @@ public: :Item_str_binary_checksum_func(thd, a) {} Item_func_des_decrypt(THD *thd, Item *a, Item *b) :Item_str_binary_checksum_func(thd, a, b) {} - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { - maybe_null=1; + set_maybe_null(); /* 9 = MAX ((8- (arg_len % 8)) + 1) */ max_length= args[0]->max_length; if (max_length >= 9U) max_length-= 9U; return FALSE; } - const char *func_name() const { return "des_decrypt"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("des_decrypt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_des_decrypt>(thd, this); } }; @@ -777,14 +929,23 @@ public: { constructor_helper(); } - String *val_str(String *); - bool fix_length_and_dec() { maybe_null=1; max_length = 13; return FALSE; } - const char *func_name() const { return "encrypt"; } - bool check_vcol_func_processor(void *arg) + String *val_str(String *) override; + bool fix_length_and_dec() override + { + set_maybe_null(); + max_length = 13; + return FALSE; + } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("encrypt") }; + return name; + } + bool check_vcol_func_processor(void *arg) override { return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_encrypt>(thd, this); } }; @@ -801,10 +962,14 @@ protected: public: Item_func_encode(THD *thd, Item *a, Item *seed_arg): Item_str_binary_checksum_func(thd, a, seed_arg) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "encode"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("encode") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_encode>(thd, this); } protected: virtual void crypto_transform(String *); @@ -818,11 +983,15 @@ class Item_func_decode :public Item_func_encode { public: Item_func_decode(THD *thd, Item *a, Item *seed_arg): Item_func_encode(thd, a, seed_arg) {} - const char *func_name() const { return "decode"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("decode") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_decode>(thd, this); } protected: - void crypto_transform(String *); + void crypto_transform(String *) override; }; @@ -851,16 +1020,21 @@ class Item_func_database :public Item_func_sysconst { public: Item_func_database(THD *thd): Item_func_sysconst(thd) {} - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { max_length= NAME_CHAR_LEN * system_charset_info->mbmaxlen; - maybe_null=1; + set_maybe_null(); return FALSE; } - const char *func_name() const { return "database"; } - const char *fully_qualified_func_name() const { return "database()"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("database") }; + return name; + } + const char *fully_qualified_func_name() const override + { return "database()"; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_database>(thd, this); } }; @@ -869,20 +1043,26 @@ class Item_func_sqlerrm :public Item_func_sysconst { public: Item_func_sqlerrm(THD *thd): Item_func_sysconst(thd) {} - String *val_str(String *); - const char *func_name() const { return "SQLERRM"; } - const char *fully_qualified_func_name() const { return "SQLERRM"; } - void print(String *str, enum_query_type query_type) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("SQLERRM") }; + return name; + } + const char *fully_qualified_func_name() const override + { return "SQLERRM"; } + void print(String *str, enum_query_type query_type) override { - str->append(func_name()); + str->append(func_name_cstring()); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { max_length= 512 * system_charset_info->mbmaxlen; - null_value= maybe_null= false; + null_value= false; + base_flags&= ~item_base_t::MAYBE_NULL; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sqlerrm>(thd, this); } }; @@ -897,25 +1077,30 @@ public: { str_value.set("", 0, system_charset_info); } - String *val_str(String *) + String *val_str(String *) override { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return (null_value ? 0 : &str_value); } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec() + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override { max_length= (uint32) (username_char_length + HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN; return FALSE; } - const char *func_name() const { return "user"; } - const char *fully_qualified_func_name() const { return "user()"; } - int save_in_field(Field *field, bool no_conversions) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("user") }; + return name; + } + const char *fully_qualified_func_name() const override + { return "user()"; } + int save_in_field(Field *field, bool no_conversions) override { return save_str_value_in_field(field, &str_value); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_user>(thd, this); } }; @@ -927,10 +1112,15 @@ class Item_func_current_user :public Item_func_user public: Item_func_current_user(THD *thd, Name_resolution_context *context_arg): Item_func_user(thd), context(context_arg) {} - bool fix_fields(THD *thd, Item **ref); - const char *func_name() const { return "current_user"; } - const char *fully_qualified_func_name() const { return "current_user()"; } - bool check_vcol_func_processor(void *arg) + bool fix_fields(THD *thd, Item **ref) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("current_user") }; + return name; + } + const char *fully_qualified_func_name() const override + { return "current_user()"; } + bool check_vcol_func_processor(void *arg) override { context= 0; return mark_unsupported_function(fully_qualified_func_name(), arg, @@ -946,29 +1136,33 @@ class Item_func_current_role :public Item_func_sysconst public: Item_func_current_role(THD *thd, Name_resolution_context *context_arg): Item_func_sysconst(thd), context(context_arg) {} - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec() + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override { max_length= (uint32) username_char_length * SYSTEM_CHARSET_MBMAXLEN; return FALSE; } - int save_in_field(Field *field, bool no_conversions) + int save_in_field(Field *field, bool no_conversions) override { return save_str_value_in_field(field, &str_value); } - const char *func_name() const { return "current_role"; } - const char *fully_qualified_func_name() const { return "current_role()"; } - String *val_str(String *) + LEX_CSTRING func_name_cstring() const override { - DBUG_ASSERT(fixed == 1); + static LEX_CSTRING name= {STRING_WITH_LEN("current_role") }; + return name; + } + const char *fully_qualified_func_name() const override + { return "current_role()"; } + String *val_str(String *) override + { + DBUG_ASSERT(fixed()); return null_value ? NULL : &str_value; } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { - context= 0; return mark_unsupported_function(fully_qualified_func_name(), arg, VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_current_role>(thd, this); } }; @@ -978,10 +1172,14 @@ class Item_func_soundex :public Item_str_func String tmp_value; public: Item_func_soundex(THD *thd, Item *a): Item_str_func(thd, a) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "soundex"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("soundex") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_soundex>(thd, this); } }; @@ -990,12 +1188,16 @@ class Item_func_elt :public Item_str_func { public: Item_func_elt(THD *thd, List<Item> &list): Item_str_func(thd, list) {} - double val_real(); - longlong val_int(); - String *val_str(String *str); - bool fix_length_and_dec(); - const char *func_name() const { return "elt"; } - Item *get_copy(THD *thd) + double val_real() override; + longlong val_int() override; + String *val_str(String *str) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("elt") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_elt>(thd, this); } }; @@ -1006,10 +1208,14 @@ class Item_func_make_set :public Item_str_func public: Item_func_make_set(THD *thd, List<Item> &list): Item_str_func(thd, list) {} - String *val_str(String *str); - bool fix_length_and_dec(); - const char *func_name() const { return "make_set"; } - Item *get_copy(THD *thd) + String *val_str(String *str) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("make_set") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_make_set>(thd, this); } }; @@ -1023,10 +1229,14 @@ public: Item_func_format(THD *thd, Item *org, Item *dec, Item *lang): Item_str_ascii_func(thd, org, dec, lang) {} - String *val_str_ascii(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "format"; } - Item *get_copy(THD *thd) + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("format") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_format>(thd, this); } }; @@ -1042,16 +1252,20 @@ public: Item_func_char(THD *thd, Item *arg1, CHARSET_INFO *cs): Item_str_func(thd, arg1) { collation.set(cs); } - String *val_str(String *); + String *val_str(String *) override; void append_char(String * str, int32 num); - bool fix_length_and_dec() + bool fix_length_and_dec() override { max_length= arg_count * 4; return FALSE; } - const char *func_name() const { return "char"; } - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("char") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_char>(thd, this); } }; @@ -1060,14 +1274,18 @@ class Item_func_chr :public Item_func_char public: Item_func_chr(THD *thd, Item *arg1, CHARSET_INFO *cs): Item_func_char(thd, arg1, cs) {} - String *val_str(String *); - bool fix_length_and_dec() + String *val_str(String *) override; + bool fix_length_and_dec() override { max_length= 4; return FALSE; } - const char *func_name() const { return "chr"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("chr") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_chr>(thd, this); } }; @@ -1077,10 +1295,14 @@ class Item_func_repeat :public Item_str_func public: Item_func_repeat(THD *thd, Item *arg1, Item *arg2): Item_str_func(thd, arg1, arg2) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "repeat"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("repeat") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_repeat>(thd, this); } }; @@ -1089,10 +1311,14 @@ class Item_func_space :public Item_str_func { public: Item_func_space(THD *thd, Item *arg1): Item_str_func(thd, arg1) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "space"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("space") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_space>(thd, this); } }; @@ -1102,14 +1328,18 @@ class Item_func_binlog_gtid_pos :public Item_str_func public: Item_func_binlog_gtid_pos(THD *thd, Item *arg1, Item *arg2): Item_str_func(thd, arg1, arg2) {} - String *val_str(String *); - bool fix_length_and_dec(); - const char *func_name() const { return "binlog_gtid_pos"; } - bool check_vcol_func_processor(void *arg) + String *val_str(String *) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("binlog_gtid_pos") }; + return name; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_binlog_gtid_pos>(thd, this); } }; @@ -1123,7 +1353,7 @@ public: Item_str_func(thd, arg1, arg2, arg3) {} Item_func_pad(THD *thd, Item *arg1, Item *arg2): Item_str_func(thd, arg1, arg2) {} - bool fix_length_and_dec(); + bool fix_length_and_dec() override; }; @@ -1134,31 +1364,39 @@ public: Item_func_pad(thd, arg1, arg2, arg3) {} Item_func_rpad(THD *thd, Item *arg1, Item *arg2): Item_func_pad(thd, arg1, arg2) {} - String *val_str(String *); - const char *func_name() const { return "rpad"; } - Sql_mode_dependency value_depends_on_sql_mode() const; - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rpad") }; + return name; + } + Sql_mode_dependency value_depends_on_sql_mode() const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rpad>(thd, this); } }; class Item_func_rpad_oracle :public Item_func_rpad { - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } public: Item_func_rpad_oracle(THD *thd, Item *arg1, Item *arg2, Item *arg3): Item_func_rpad(thd, arg1, arg2, arg3) {} Item_func_rpad_oracle(THD *thd, Item *arg1, Item *arg2): Item_func_rpad(thd, arg1, arg2) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { bool res= Item_func_rpad::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - const char *func_name() const { return "rpad_oracle"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("rpad_oracle") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_rpad_oracle>(thd, this); } }; @@ -1170,30 +1408,38 @@ public: Item_func_pad(thd, arg1, arg2, arg3) {} Item_func_lpad(THD *thd, Item *arg1, Item *arg2): Item_func_pad(thd, arg1, arg2) {} - String *val_str(String *); - const char *func_name() const { return "lpad"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("lpad") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_lpad>(thd, this); } }; class Item_func_lpad_oracle :public Item_func_lpad { - String *make_empty_result(String *str) + String *make_empty_result(String *str) override { null_value= 1; return NULL; } public: Item_func_lpad_oracle(THD *thd, Item *arg1, Item *arg2, Item *arg3): Item_func_lpad(thd, arg1, arg2, arg3) {} Item_func_lpad_oracle(THD *thd, Item *arg1, Item *arg2): Item_func_lpad(thd, arg1, arg2) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { bool res= Item_func_lpad::fix_length_and_dec(); - maybe_null= true; + set_maybe_null(); return res; } - const char *func_name() const { return "lpad_oracle"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("lpad_oracle") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_lpad_oracle>(thd, this); } }; @@ -1203,16 +1449,20 @@ class Item_func_conv :public Item_str_func public: Item_func_conv(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {} - const char *func_name() const { return "conv"; } - String *val_str(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("conv") }; + return name; + } + String *val_str(String *) override; + bool fix_length_and_dec() override { collation.set(default_charset()); fix_char_length(64); - maybe_null= 1; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_conv>(thd, this); } }; @@ -1231,16 +1481,20 @@ protected: public: Item_func_hex(THD *thd, Item *a): Item_str_ascii_checksum_func(thd, a), m_arg0_type_handler(NULL) {} - const char *func_name() const { return "hex"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("hex") }; + return name; + } String *val_str_ascii_from_val_int(String *str); String *val_str_ascii_from_val_real(String *str); String *val_str_ascii_from_val_str(String *str); - String *val_str_ascii(String *str) + String *val_str_ascii(String *str) override { - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); return m_arg0_type_handler->Item_func_hex_val_str_ascii(this, str); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); decimals=0; @@ -1248,7 +1502,7 @@ public: m_arg0_type_handler= args[0]->type_handler(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_hex>(thd, this); } }; @@ -1259,18 +1513,22 @@ public: Item_func_unhex(THD *thd, Item *a): Item_str_func(thd, a) { /* there can be bad hex strings */ - maybe_null= 1; + set_maybe_null(); } - const char *func_name() const { return "unhex"; } - String *val_str(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("unhex") }; + return name; + } + String *val_str(String *) override; + bool fix_length_and_dec() override { collation.set(&my_charset_bin); decimals=0; max_length=(1+args[0]->max_length)/2; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_unhex>(thd, this); } }; @@ -1285,9 +1543,11 @@ protected: public: Item_func_like_range(THD *thd, Item *a, Item *b, bool is_min_arg): Item_str_func(thd, a, b), is_min(is_min_arg) - { maybe_null= 1; } - String *val_str(String *); - bool fix_length_and_dec() + { + set_maybe_null(); + } + String *val_str(String *) override; + bool fix_length_and_dec() override { collation.set(args[0]->collation); decimals=0; @@ -1302,8 +1562,12 @@ class Item_func_like_range_min :public Item_func_like_range public: Item_func_like_range_min(THD *thd, Item *a, Item *b): Item_func_like_range(thd, a, b, true) { } - const char *func_name() const { return "like_range_min"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("like_range_min") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_like_range_min>(thd, this); } }; @@ -1313,8 +1577,12 @@ class Item_func_like_range_max :public Item_func_like_range public: Item_func_like_range_max(THD *thd, Item *a, Item *b): Item_func_like_range(thd, a, b, false) { } - const char *func_name() const { return "like_range_max"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("like_range_max") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_like_range_max>(thd, this); } }; #endif @@ -1324,25 +1592,29 @@ class Item_func_binary :public Item_str_func { public: Item_func_binary(THD *thd, Item *a): Item_str_func(thd, a) {} - String *val_str(String *a) + String *val_str(String *a) override { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *tmp=args[0]->val_str(a); null_value=args[0]->null_value; if (tmp) tmp->set_charset(&my_charset_bin); return tmp; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { collation.set(&my_charset_bin); max_length=args[0]->max_length; return FALSE; } - void print(String *str, enum_query_type query_type); - const char *func_name() const { return "cast_as_binary"; } - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_binary") }; + return name; + } + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_binary>(thd, this); } }; @@ -1352,20 +1624,24 @@ class Item_load_file :public Item_str_func String tmp_value; public: Item_load_file(THD *thd, Item *a): Item_str_func(thd, a) {} - String *val_str(String *); - const char *func_name() const { return "load_file"; } - bool fix_length_and_dec() + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("load_file") }; + return name; + } + bool fix_length_and_dec() override { collation.set(&my_charset_bin, DERIVATION_COERCIBLE); - maybe_null=1; + set_maybe_null(); max_length=MAX_BLOB_WIDTH; return FALSE; } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_load_file>(thd, this); } }; @@ -1379,10 +1655,14 @@ class Item_func_export_set: public Item_str_func Item_str_func(thd, a, b, c, d) {} Item_func_export_set(THD *thd, Item *a, Item *b, Item* c, Item* d, Item* e): Item_str_func(thd, a, b, c, d, e) {} - String *val_str(String *str); - bool fix_length_and_dec(); - const char *func_name() const { return "export_set"; } - Item *get_copy(THD *thd) + String *val_str(String *str) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("export_set") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_export_set>(thd, this); } }; @@ -1392,9 +1672,13 @@ class Item_func_quote :public Item_str_func String tmp_value; public: Item_func_quote(THD *thd, Item *a): Item_str_func(thd, a) {} - const char *func_name() const { return "quote"; } - String *val_str(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("quote") }; + return name; + } + String *val_str(String *) override; + bool fix_length_and_dec() override { collation.set(args[0]->collation); ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + @@ -1402,7 +1686,7 @@ public: max_length= (uint32) MY_MIN(max_result_length, MAX_BLOB_WIDTH); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_quote>(thd, this); } }; @@ -1422,7 +1706,7 @@ public: Item_str_func(thd, a) { collation.set(cs, DERIVATION_IMPLICIT); - if (cache_if_const && args[0]->const_item() && !args[0]->is_expensive()) + if (cache_if_const && args[0]->can_eval_in_optimize()) { uint errors= 0; String tmp, *str= args[0]->val_str(&tmp); @@ -1454,8 +1738,8 @@ public: (cs->mbmaxlen > 1 || !(cs->state & MY_CS_NONASCII)))); } } - String *val_str(String *); - longlong val_int() + String *val_str(String *) override; + longlong val_int() override { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_int(); @@ -1464,7 +1748,7 @@ public: return 0; return res; } - double val_real() + double val_real() override { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_real(); @@ -1473,7 +1757,7 @@ public: return 0; return res; } - my_decimal *val_decimal(my_decimal *d) + my_decimal *val_decimal(my_decimal *d) override { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::val_decimal(d); @@ -1482,7 +1766,7 @@ public: return NULL; return res; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { if (args[0]->result_type() == STRING_RESULT) return Item_str_func::get_date(thd, ltime, fuzzydate); @@ -1491,11 +1775,16 @@ public: return 1; return res; } - bool fix_length_and_dec(); - const char *func_name() const { return "convert"; } - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("convert") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_conv_charset>(thd, this); } + int save_in_field(Field*, bool) override; }; class Item_func_set_collation :public Item_str_func @@ -1504,20 +1793,24 @@ class Item_func_set_collation :public Item_str_func public: Item_func_set_collation(THD *thd, Item *a, CHARSET_INFO *set_collation): Item_str_func(thd, a), m_set_collation(set_collation) {} - String *val_str(String *); - bool fix_length_and_dec(); - bool eq(const Item *item, bool binary_cmp) const; - const char *func_name() const { return "collate"; } - enum precedence precedence() const { return COLLATE_PRECEDENCE; } - enum Functype functype() const { return COLLATE_FUNC; } - void print(String *str, enum_query_type query_type); - Item_field *field_for_view_update() + String *val_str(String *) override; + bool fix_length_and_dec() override; + bool eq(const Item *item, bool binary_cmp) const override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("collate") }; + return name; + } + enum precedence precedence() const override { return COLLATE_PRECEDENCE; } + enum Functype functype() const override { return COLLATE_FUNC; } + void print(String *str, enum_query_type query_type) override; + Item_field *field_for_view_update() override { /* this function is transparent for view updating */ return args[0]->field_for_view_update(); } - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_set_collation>(thd, this); } }; @@ -1526,17 +1819,18 @@ class Item_func_expr_str_metadata :public Item_str_func { public: Item_func_expr_str_metadata(THD *thd, Item *a): Item_str_func(thd, a) { } - bool fix_length_and_dec() + bool fix_length_and_dec() override { collation.set(system_charset_info); max_length= 64 * collation.collation->mbmaxlen; // should be enough - maybe_null= 0; + base_flags&= ~item_base_t::MAYBE_NULL; return FALSE; }; - table_map not_null_tables() const { return 0; } + table_map not_null_tables() const override { return 0; } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { return this; } - bool const_item() const { return true; } + bool const_item() const override { return true; } }; @@ -1545,9 +1839,13 @@ class Item_func_charset :public Item_func_expr_str_metadata public: Item_func_charset(THD *thd, Item *a) :Item_func_expr_str_metadata(thd, a) { } - String *val_str(String *); - const char *func_name() const { return "charset"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("charset") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_charset>(thd, this); } }; @@ -1557,9 +1855,13 @@ class Item_func_collation :public Item_func_expr_str_metadata public: Item_func_collation(THD *thd, Item *a) :Item_func_expr_str_metadata(thd, a) {} - String *val_str(String *); - const char *func_name() const { return "collation"; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("collation") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_collation>(thd, this); } }; @@ -1567,7 +1869,7 @@ public: class Item_func_weight_string :public Item_str_func { String tmp_value; - uint flags; + uint weigth_flags; uint nweights; uint result_length; public: @@ -1576,40 +1878,49 @@ public: Item_str_func(thd, a) { nweights= nweights_arg; - flags= flags_arg; + weigth_flags= flags_arg; result_length= result_length_arg; } - const char *func_name() const { return "weight_string"; } - String *val_str(String *); - bool fix_length_and_dec(); - bool eq(const Item *item, bool binary_cmp) const + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("weight_string") }; + return name; + } + String *val_str(String *) override; + bool fix_length_and_dec() override; + bool eq(const Item *item, bool binary_cmp) const override { if (!Item_str_func::eq(item, binary_cmp)) return false; Item_func_weight_string *that= (Item_func_weight_string *)item; - return this->flags == that->flags && + return this->weigth_flags == that->weigth_flags && this->nweights == that->nweights && this->result_length == that->result_length; } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + override { return this; } - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_weight_string>(thd, this); } }; class Item_func_crc32 :public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_str(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_str(func_name_cstring()); } String value; public: Item_func_crc32(THD *thd, Item *a): Item_long_func(thd, a) { unsigned_flag= 1; } - const char *func_name() const { return "crc32"; } - bool fix_length_and_dec() { max_length=10; return FALSE; } - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("crc32") }; + return name; + } + bool fix_length_and_dec() override { max_length=10; return FALSE; } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_crc32>(thd, this); } }; @@ -1619,10 +1930,18 @@ class Item_func_uncompressed_length : public Item_long_func_length public: Item_func_uncompressed_length(THD *thd, Item *a) :Item_long_func_length(thd, a) {} - const char *func_name() const{return "uncompressed_length";} - bool fix_length_and_dec() { max_length=10; maybe_null= true; return FALSE; } - longlong val_int(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("uncompressed_length") }; + return name; + } + bool fix_length_and_dec() override + { + max_length=10; + set_maybe_null(); + return FALSE; } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_uncompressed_length>(thd, this); } }; @@ -1638,14 +1957,18 @@ class Item_func_compress: public Item_str_binary_checksum_func public: Item_func_compress(THD *thd, Item *a) :Item_str_binary_checksum_func(thd, a) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { max_length= (args[0]->max_length * 120) / 100 + 12; return FALSE; } - const char *func_name() const{return "compress";} - String *val_str(String *) ZLIB_DEPENDED_FUNCTION - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("compress") }; + return name; + } + String *val_str(String *) override ZLIB_DEPENDED_FUNCTION + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_compress>(thd, this); } }; @@ -1655,37 +1978,53 @@ class Item_func_uncompress: public Item_str_binary_checksum_func public: Item_func_uncompress(THD *thd, Item *a) :Item_str_binary_checksum_func(thd, a) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { - maybe_null= 1; max_length= MAX_BLOB_WIDTH; + set_maybe_null(); + max_length= MAX_BLOB_WIDTH; return FALSE; } - const char *func_name() const{return "uncompress";} - String *val_str(String *) ZLIB_DEPENDED_FUNCTION - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("uncompress") }; + return name; + } + String *val_str(String *) override ZLIB_DEPENDED_FUNCTION + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_uncompress>(thd, this); } }; class Item_func_uuid: public Item_str_func { + /* Set if uuid should be returned without separators (Oracle sys_guid) */ + bool without_separators; public: - Item_func_uuid(THD *thd): Item_str_func(thd) {} - bool fix_length_and_dec() +Item_func_uuid(THD *thd, bool without_separators_arg): Item_str_func(thd), + without_separators(without_separators_arg) + {} + bool fix_length_and_dec() override { collation.set(DTCollation_numeric()); - fix_char_length(MY_UUID_STRING_LENGTH); + fix_char_length(without_separators ? MY_UUID_ORACLE_STRING_LENGTH : + MY_UUID_STRING_LENGTH); return FALSE; } - bool const_item() const { return false; } - table_map used_tables() const { return RAND_TABLE_BIT; } - const char *func_name() const{ return "uuid"; } - String *val_str(String *); - bool check_vcol_func_processor(void *arg) + bool const_item() const override { return false; } + table_map used_tables() const override { return RAND_TABLE_BIT; } + LEX_CSTRING func_name_cstring() const override { - return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC); + static LEX_CSTRING mariadb_name= {STRING_WITH_LEN("uuid") }; + static LEX_CSTRING oracle_name= {STRING_WITH_LEN("sys_guid") }; + return without_separators ? oracle_name : mariadb_name; } - Item *get_copy(THD *thd) + String *val_str(String *) override; + bool check_vcol_func_processor(void *arg) override + { + return mark_unsupported_function(func_name(), "()", arg, + VCOL_NON_DETERMINISTIC); + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_uuid>(thd, this); } }; @@ -1702,13 +2041,17 @@ protected: void print_arguments(String *str, enum_query_type query_type); public: Item_func_dyncol_create(THD *thd, List<Item> &args, DYNCALL_CREATE_DEF *dfs); - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(); - const char *func_name() const{ return "column_create"; } - String *val_str(String *); - void print(String *str, enum_query_type query_type); - enum Functype functype() const { return DYNCOL_FUNC; } - Item *get_copy(THD *thd) + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_create") }; + return name; + } + String *val_str(String *) override; + void print(String *str, enum_query_type query_type) override; + enum Functype functype() const override { return DYNCOL_FUNC; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_create>(thd, this); } }; @@ -1719,10 +2062,14 @@ public: Item_func_dyncol_add(THD *thd, List<Item> &args_arg, DYNCALL_CREATE_DEF *dfs): Item_func_dyncol_create(thd, args_arg, dfs) {} - const char *func_name() const{ return "column_add"; } - String *val_str(String *); - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_add") }; + return name; + } + String *val_str(String *) override; + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_add>(thd, this); } }; @@ -1731,16 +2078,20 @@ class Item_func_dyncol_json: public Item_str_func public: Item_func_dyncol_json(THD *thd, Item *str): Item_str_func(thd, str) {collation.set(DYNCOL_UTF);} - const char *func_name() const{ return "column_json"; } - String *val_str(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_json") }; + return name; + } + String *val_str(String *) override; + bool fix_length_and_dec() override { max_length= MAX_BLOB_WIDTH; - maybe_null= 1; + set_maybe_null(); decimals= 0; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_json>(thd, this); } }; @@ -1753,15 +2104,23 @@ class Item_dyncol_get: public Item_str_func public: Item_dyncol_get(THD *thd, Item *str, Item *num): Item_str_func(thd, str, num) {} - bool fix_length_and_dec() - { maybe_null= 1;; max_length= MAX_BLOB_WIDTH; return FALSE; } + bool fix_length_and_dec() override + { + set_maybe_null(); + max_length= MAX_BLOB_WIDTH; + return FALSE; + } /* Mark that collation can change between calls */ - bool dynamic_result() { return 1; } + bool dynamic_result() override { return 1; } - const char *func_name() const { return "column_get"; } - String *val_str(String *); - longlong val_int(); - longlong val_int_signed_typecast() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_get") }; + return name; + } + String *val_str(String *) override; + longlong val_int() override; + longlong val_int_signed_typecast() override { unsigned_flag= false; // Mark that we want to have a signed value longlong value= val_int(); // val_int() can change unsigned_flag @@ -1769,7 +2128,7 @@ public: push_note_converted_to_negative_complement(current_thd); return value; } - longlong val_int_unsigned_typecast() + longlong val_int_unsigned_typecast() override { unsigned_flag= true; // Mark that we want to have an unsigned value longlong value= val_int(); // val_int() can change unsigned_flag @@ -1777,12 +2136,12 @@ public: push_note_converted_to_positive_complement(current_thd); return value; } - double val_real(); - my_decimal *val_decimal(my_decimal *); + double val_real() override; + my_decimal *val_decimal(my_decimal *) override; bool get_dyn_value(THD *thd, DYNAMIC_COLUMN_VALUE *val, String *tmp); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_dyncol_get>(thd, this); } }; @@ -1792,11 +2151,19 @@ class Item_func_dyncol_list: public Item_str_func public: Item_func_dyncol_list(THD *thd, Item *str): Item_str_func(thd, str) {collation.set(DYNCOL_UTF);} - bool fix_length_and_dec() - { maybe_null= 1; max_length= MAX_BLOB_WIDTH; return FALSE; }; - const char *func_name() const{ return "column_list"; } - String *val_str(String *); - Item *get_copy(THD *thd) + bool fix_length_and_dec() override + { + set_maybe_null(); + max_length= MAX_BLOB_WIDTH; + return FALSE; + } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("column_list") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dyncol_list>(thd, this); } }; @@ -1810,14 +2177,19 @@ class Item_temptable_rowid :public Item_str_func public: TABLE *table; Item_temptable_rowid(TABLE *table_arg); - const Type_handler *type_handler() const { return &type_handler_string; } + const Type_handler *type_handler() const override + { return &type_handler_string; } Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) { return create_table_field_from_handler(root, table); } - String *val_str(String *str); - enum Functype functype() const { return TEMPTABLE_ROWID; } - const char *func_name() const { return "<rowid>"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + String *val_str(String *str) override; + enum Functype functype() const override { return TEMPTABLE_ROWID; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("<rowid>") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_temptable_rowid>(thd, this); } }; #ifdef WITH_WSREP @@ -1829,15 +2201,19 @@ class Item_func_wsrep_last_written_gtid: public Item_str_ascii_func String gtid_str; public: Item_func_wsrep_last_written_gtid(THD *thd): Item_str_ascii_func(thd) {} - const char *func_name() const { return "wsrep_last_written_gtid"; } - String *val_str_ascii(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("wsrep_last_written_gtid") }; + return name; + } + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override { max_length= WSREP_GTID_STR_LEN; - maybe_null= true; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_wsrep_last_written_gtid>(thd, this); } }; @@ -1846,15 +2222,19 @@ class Item_func_wsrep_last_seen_gtid: public Item_str_ascii_func String gtid_str; public: Item_func_wsrep_last_seen_gtid(THD *thd): Item_str_ascii_func(thd) {} - const char *func_name() const { return "wsrep_last_seen_gtid"; } - String *val_str_ascii(String *); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("wsrep_last_seen_gtid") }; + return name; + } + String *val_str_ascii(String *) override; + bool fix_length_and_dec() override { max_length= WSREP_GTID_STR_LEN; - maybe_null= true; + set_maybe_null(); return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_wsrep_last_seen_gtid>(thd, this); } }; @@ -1864,10 +2244,15 @@ class Item_func_wsrep_sync_wait_upto: public Item_int_func public: Item_func_wsrep_sync_wait_upto(THD *thd, Item *a): Item_int_func(thd, a) {} Item_func_wsrep_sync_wait_upto(THD *thd, Item *a, Item* b): Item_int_func(thd, a, b) {} - const Type_handler *type_handler() const { return &type_handler_string; } - const char *func_name() const { return "wsrep_sync_wait_upto_gtid"; } - longlong val_int(); - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_string; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("wsrep_sync_wait_upto_gtid") }; + return name; + } + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_wsrep_sync_wait_upto>(thd, this); } }; #endif /* WITH_WSREP */ diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 76844ae92b1..860ba1dbf91 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2002, 2016, Oracle and/or its affiliates. - Copyright (c) 2010, 2021, MariaDB + Copyright (c) 2010, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -67,6 +67,7 @@ Item_subselect::Item_subselect(THD *thd_arg): #ifndef DBUG_OFF exec_counter= 0; #endif + with_flags|= item_with_t::SUBQUERY; reset(); /* Item value is NULL if select_result_interceptor didn't change this value @@ -117,6 +118,7 @@ void Item_subselect::init(st_select_lex *select_lex, else { SELECT_LEX *outer_select= unit->outer_select(); + THD *thd= unit->thd; /* do not take into account expression inside aggregate functions because they can access original table fields @@ -126,9 +128,11 @@ void Item_subselect::init(st_select_lex *select_lex, outer_select->parsing_place); if (unit->is_unit_op() && (unit->first_select()->next_select() || unit->fake_select_lex)) - engine= new subselect_union_engine(unit, result, this); + engine= new (thd->mem_root) + subselect_union_engine(unit, result, this); else - engine= new subselect_single_select_engine(select_lex, result, this); + engine= new (thd->mem_root) + subselect_single_select_engine(select_lex, result, this); } DBUG_PRINT("info", ("engine: %p", engine)); DBUG_VOID_RETURN; @@ -255,7 +259,7 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref) status_var_increment(thd_param->status_var.feature_subquery); - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); engine->set_thd((thd= thd_param)); if (!done_first_fix_fields) { @@ -344,7 +348,7 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref) if (uncacheable & UNCACHEABLE_RAND) used_tables_cache|= RAND_TABLE_BIT; } - fixed= 1; + base_flags|= item_base_t::FIXED; end: done_first_fix_fields= FALSE; @@ -644,6 +648,12 @@ bool Item_subselect::is_expensive() } +/* + @brief + Apply item processor for all scalar (i.e. Item*) expressions that + occur in the nested join. +*/ + static int walk_items_for_table_list(Item_processor processor, bool walk_subquery, void *argument, @@ -658,6 +668,14 @@ int walk_items_for_table_list(Item_processor processor, if ((res= table->on_expr->walk(processor, walk_subquery, argument))) return res; } + if (Table_function_json_table *tf= table->table_function) + { + if ((res= tf->walk_items(processor, walk_subquery, argument))) + { + return res; + } + } + if (table->nested_join) { if ((res= walk_items_for_table_list(processor, walk_subquery, argument, @@ -726,7 +744,6 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery, for (SELECT_LEX *lex= unit->first_select(); lex; lex= lex->next_select()) { List_iterator<Item> li(lex->item_list); - Item *item; ORDER *order; if (lex->where && (lex->where)->walk(processor, walk_subquery, argument)) @@ -735,11 +752,11 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery, argument)) return 1; - if (walk_items_for_table_list(processor, walk_subquery, argument, - *lex->join_list)) + if (walk_items_for_table_list(processor, walk_subquery, argument, + *lex->join_list)) return 1; - while ((item=li++)) + while (Item *item= li++) { if (item->walk(processor, walk_subquery, argument)) return 1; @@ -763,9 +780,9 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery, bool Item_subselect::exec() { subselect_engine *org_engine= engine; - DBUG_ENTER("Item_subselect::exec"); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); + DBUG_ASSERT(thd); DBUG_ASSERT(!eliminated); DBUG_EXECUTE_IF("Item_subselect", @@ -824,7 +841,7 @@ int Item_in_subselect::optimize(double *out_rows, double *cost) { int res; DBUG_ENTER("Item_in_subselect::optimize"); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); SELECT_LEX *save_select= thd->lex->current_select; JOIN *join= unit->first_select()->join; @@ -941,7 +958,9 @@ bool Item_in_subselect::expr_cache_is_needed(THD *thd) bool Item_in_subselect::exec() { DBUG_ENTER("Item_in_subselect::exec"); - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); + DBUG_ASSERT(thd); + /* Initialize the cache of the left predicate operand. This has to be done as late as now, because Cached_item directly contains a resolved field (not @@ -1006,7 +1025,7 @@ bool Item_subselect::const_item() const Item *Item_subselect::get_tmp_table_item(THD *thd_arg) { - if (!Item_subselect::with_sum_func() && !const_item()) + if (!with_sum_func() && !const_item()) return new (thd->mem_root) Item_temptable_field(thd_arg, result_field); return copy_or_same(thd_arg); } @@ -1035,13 +1054,18 @@ void Item_subselect::print(String *str, enum_query_type query_type) if (unit && unit->first_select()) { char buf[64]; - ll2str(unit->first_select()->select_number, buf, 10, 0); - str->append(buf); + size_t length= (size_t) + (longlong10_to_str(unit->first_select()->select_number, buf, 10) - + buf); + str->append(buf, length); } else - str->append("NULL"); // TODO: what exactly does this mean? + { + // TODO: Explain what exactly does this mean? + str->append(NULL_clex_str); + } - str->append(")"); + str->append(')'); return; } if (engine) @@ -1051,7 +1075,7 @@ void Item_subselect::print(String *str, enum_query_type query_type) str->append(')'); } else - str->append("(...)"); + str->append(STRING_WITH_LEN("(...)")); } @@ -1060,7 +1084,7 @@ Item_singlerow_subselect::Item_singlerow_subselect(THD *thd, st_select_lex *sele { DBUG_ENTER("Item_singlerow_subselect::Item_singlerow_subselect"); init(select_lex, new (thd->mem_root) select_singlerow_subselect(thd, this)); - maybe_null= 1; + set_maybe_null(); max_columns= UINT_MAX; DBUG_VOID_RETURN; } @@ -1097,7 +1121,7 @@ Item_maxmin_subselect::Item_maxmin_subselect(THD *thd, new (thd->mem_root) select_max_min_finder_subselect(thd, this, max_arg, parent->substype() == Item_subselect::ALL_SUBS)); max_columns= 1; - maybe_null= 1; + set_maybe_null(); max_columns= 1; /* @@ -1204,29 +1228,40 @@ Item_singlerow_subselect::select_transformer(JOIN *join) DBUG_ASSERT(join->thd == thd); SELECT_LEX *select_lex= join->select_lex; - Query_arena *arena= thd->stmt_arena; + Query_arena *arena, backup; + arena= thd->activate_stmt_arena_if_needed(&backup); - if (!select_lex->master_unit()->is_unit_op() && - !select_lex->table_list.elements && - select_lex->item_list.elements == 1 && - !select_lex->item_list.head()->with_sum_func() && + auto need_to_pull_out_item = [](enum_parsing_place context_analysis_place, + Item *item) { + return + !item->with_sum_func() && /* - We can't change name of Item_field or Item_ref, because it will - prevent its correct resolving, but we should save name of - removed item => we do not make optimization if top item of - list is field or reference. - TODO: solve above problem + We can't change name of Item_field or Item_ref, because it will + prevent its correct resolving, but we should save name of + removed item => we do not make optimization if top item of + list is field or reference. + TODO: solve above problem */ - !(select_lex->item_list.head()->type() == FIELD_ITEM || - select_lex->item_list.head()->type() == REF_ITEM) && - !join->conds && !join->having && + item->type() != FIELD_ITEM && item->type() != REF_ITEM && /* - switch off this optimization for prepare statement, - because we do not rollback this changes - TODO: make rollback for it, or special name resolving mode in 5.0. + The item can be pulled out to upper level in case it doesn't represent + the constant in the clause 'ORDER/GROUP BY (constant)'. */ - !arena->is_stmt_prepare_or_first_sp_execute() - ) + !((item->is_order_clause_position() || + item->is_stored_routine_parameter()) && + (context_analysis_place == IN_ORDER_BY || + context_analysis_place == IN_GROUP_BY) + ); + }; + + if (!select_lex->master_unit()->is_unit_op() && + !select_lex->table_list.elements && + select_lex->item_list.elements == 1 && + !join->conds && !join->having && + need_to_pull_out_item( + join->select_lex->outer_select()->context_analysis_place, + select_lex->item_list.head()) && + thd->stmt_arena->state != Query_arena::STMT_INITIALIZED_FOR_SP) { have_to_be_excluded= 1; if (thd->lex->describe) @@ -1244,6 +1279,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join) substitution->fix_after_pullout(select_lex->outer_select(), &substitution, TRUE); } + if (arena) + thd->restore_active_arena(arena, &backup); DBUG_RETURN(false); } @@ -1288,11 +1325,11 @@ bool Item_singlerow_subselect::fix_length_and_dec() */ if (engine->no_tables() && engine->engine_type() != subselect_engine::UNION_ENGINE) - maybe_null= engine->may_be_null(); + set_maybe_null(engine->may_be_null()); else { for (uint i= 0; i < max_columns; i++) - row[i]->maybe_null= TRUE; + row[i]->set_maybe_null(); } return FALSE; } @@ -1391,7 +1428,7 @@ void Item_singlerow_subselect::bring_value() double Item_singlerow_subselect::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) return value->val_real(); if (!exec() && !value->null_value) @@ -1408,7 +1445,7 @@ double Item_singlerow_subselect::val_real() longlong Item_singlerow_subselect::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) { longlong val= value->val_int(); @@ -1430,7 +1467,7 @@ longlong Item_singlerow_subselect::val_int() String *Item_singlerow_subselect::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) { String *res= value->val_str(str); @@ -1453,7 +1490,7 @@ String *Item_singlerow_subselect::val_str(String *str) bool Item_singlerow_subselect::val_native(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) return value->val_native(thd, to); if (!exec() && !value->null_value) @@ -1471,7 +1508,7 @@ bool Item_singlerow_subselect::val_native(THD *thd, Native *to) my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) { my_decimal *val= value->val_decimal(decimal_value); @@ -1494,7 +1531,7 @@ my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) bool Item_singlerow_subselect::val_bool() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) { bool val= value->val_bool(); @@ -1517,7 +1554,7 @@ bool Item_singlerow_subselect::val_bool() bool Item_singlerow_subselect::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) { bool val= value->get_date(thd, ltime, fuzzydate); @@ -1549,7 +1586,7 @@ Item_exists_subselect::Item_exists_subselect(THD *thd, init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this)); max_columns= UINT_MAX; null_value= FALSE; //can't be NULL - maybe_null= 0; //can't be NULL + base_flags&= ~item_base_t::MAYBE_NULL; //can't be NULL value= 0; DBUG_VOID_RETURN; } @@ -1598,7 +1635,7 @@ Item_in_subselect::Item_in_subselect(THD *thd, Item * left_exp, func= &eq_creator; init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this)); max_columns= UINT_MAX; - maybe_null= 1; + set_maybe_null(); reset(); //if test_limit will fail then error will be reported to client test_limit(select_lex->master_unit()); @@ -1651,9 +1688,9 @@ bool Item_exists_subselect::fix_length_and_dec() DBUG_ENTER("Item_exists_subselect::fix_length_and_dec"); init_length_and_dec(); // If limit is not set or it is constant more than 1 - if (!unit->global_parameters()->select_limit || - (unit->global_parameters()->select_limit->basic_const_item() && - unit->global_parameters()->select_limit->val_int() > 1)) + if (!unit->global_parameters()->limit_params.select_limit || + (unit->global_parameters()->limit_params.select_limit->basic_const_item() && + unit->global_parameters()->limit_params.select_limit->val_int() > 1)) { /* We need only 1 row to determine existence (i.e. any EXISTS that is not @@ -1662,9 +1699,9 @@ bool Item_exists_subselect::fix_length_and_dec() Item *item= new (thd->mem_root) Item_int(thd, (int32) 1); if (!item) DBUG_RETURN(TRUE); - thd->change_item_tree(&unit->global_parameters()->select_limit, + thd->change_item_tree(&unit->global_parameters()->limit_params.select_limit, item); - unit->global_parameters()->explicit_limit= 1; // we set the limit + unit->global_parameters()->limit_params.explicit_limit= 1; // we set the limit DBUG_PRINT("info", ("Set limit to 1")); } DBUG_RETURN(FALSE); @@ -1738,7 +1775,7 @@ void Item_exists_subselect::no_rows_in_result() double Item_exists_subselect::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!forced_const && exec()) { reset(); @@ -1749,7 +1786,7 @@ double Item_exists_subselect::val_real() longlong Item_exists_subselect::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!forced_const && exec()) { reset(); @@ -1774,7 +1811,7 @@ longlong Item_exists_subselect::val_int() String *Item_exists_subselect::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!forced_const && exec()) reset(); str->set((ulonglong)value,&my_charset_bin); @@ -1797,7 +1834,7 @@ String *Item_exists_subselect::val_str(String *str) my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!forced_const && exec()) reset(); int2my_decimal(E_DEC_FATAL_ERROR, value, 0, decimal_value); @@ -1807,7 +1844,7 @@ my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) bool Item_exists_subselect::val_bool() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (!forced_const && exec()) { reset(); @@ -1823,7 +1860,7 @@ double Item_in_subselect::val_real() As far as Item_in_subselect called only from Item_in_optimizer this method should not be used */ - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) return value; DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || @@ -1847,7 +1884,7 @@ longlong Item_in_subselect::val_int() method should not be used */ DBUG_ASSERT(0); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) return value; DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || @@ -1871,7 +1908,7 @@ String *Item_in_subselect::val_str(String *str) method should not be used */ DBUG_ASSERT(0); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) goto value_is_ready; DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || @@ -1895,7 +1932,7 @@ value_is_ready: bool Item_in_subselect::val_bool() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (forced_const) return value; DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || @@ -1923,7 +1960,7 @@ my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) || ! engine->is_executed()); null_value= was_null= FALSE; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (exec()) { reset(); @@ -2089,13 +2126,13 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join) 3 There is tables 4 It is not ALL subquery with possible NULLs in the SELECT list */ - if (!select_lex->group_list.elements && /*1*/ - !select_lex->having && /*1*/ - !select_lex->with_sum_func && /*1*/ - !(select_lex->next_select()) && /*2*/ - select_lex->table_list.elements && /*3*/ - (!select_lex->ref_pointer_array[0]->maybe_null || /*4*/ - substype() != Item_subselect::ALL_SUBS)) /*4*/ + if (!select_lex->group_list.elements && /*1*/ + !select_lex->having && /*1*/ + !select_lex->with_sum_func && /*1*/ + !(select_lex->next_select()) && /*2*/ + select_lex->table_list.elements && /*3*/ + (!select_lex->ref_pointer_array[0]->maybe_null() || /*4*/ + substype() != Item_subselect::ALL_SUBS)) /*4*/ { Item_sum_min_max *item; nesting_map save_allow_sum_func; @@ -2185,7 +2222,7 @@ bool Item_in_subselect::fix_having(Item *having, SELECT_LEX *select_lex) { bool fix_res= 0; DBUG_ASSERT(thd); - if (!having->is_fixed()) + if (!having->fixed()) { select_lex->having_fix_field= 1; fix_res= having->fix_fields(thd, 0); @@ -2261,8 +2298,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, if (join_having || select_lex->with_sum_func || select_lex->group_list.elements) { - const char *tmp= this->full_name(); - LEX_CSTRING field_name= {tmp, safe_strlen(tmp)}; + LEX_CSTRING field_name= this->full_name_cstring(); Item *item= func->create(thd, expr, new (thd->mem_root) Item_ref_null_helper( thd, @@ -2272,7 +2308,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, ref_pointer_array[0], {STRING_WITH_LEN("<ref>")}, field_name)); - if (!abort_on_null && left_expr->maybe_null) + if (!abort_on_null && left_expr->maybe_null()) { /* We can encounter "NULL IN (SELECT ...)". Wrap the added condition @@ -2305,10 +2341,10 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, Item *orig_item= item; item= func->create(thd, expr, item); - if (!abort_on_null && orig_item->maybe_null) + if (!abort_on_null && orig_item->maybe_null()) { having= new (thd->mem_root) Item_is_not_null_test(thd, this, having); - if (left_expr->maybe_null) + if (left_expr->maybe_null()) { disable_cond_guard_for_const_null_left_expr(0); if (!(having= new (thd->mem_root) Item_func_trig_cond(thd, having, @@ -2327,7 +2363,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, If we may encounter NULL IN (SELECT ...) and care whether subquery result is NULL or FALSE, wrap condition in a trig_cond. */ - if (!abort_on_null && left_expr->maybe_null) + if (!abort_on_null && left_expr->maybe_null()) { disable_cond_guard_for_const_null_left_expr(0); if (!(item= new (thd->mem_root) Item_func_trig_cond(thd, item, @@ -2357,7 +2393,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, &select_lex->ref_pointer_array[0], no_matter_name, field_name)); - if (!abort_on_null && left_expr->maybe_null) + if (!abort_on_null && left_expr->maybe_null()) { disable_cond_guard_for_const_null_left_expr(0); if (!(new_having= new (thd->mem_root) @@ -2368,6 +2404,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, new_having->name= in_having_cond; if (fix_having(new_having, select_lex)) DBUG_RETURN(true); + *having_item= new_having; } } @@ -2522,9 +2559,9 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item *item_having_part2= 0; for (uint i= 0; i < cols_num; i++) { - DBUG_ASSERT((left_expr->is_fixed() && + DBUG_ASSERT((left_expr->fixed() && - select_lex->ref_pointer_array[i]->is_fixed()) || + select_lex->ref_pointer_array[i]->fixed()) || (select_lex->ref_pointer_array[i]->type() == REF_ITEM && ((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() == Item_ref::OUTER_REF)); @@ -2555,7 +2592,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, list_ref)); Item *col_item= new (thd->mem_root) Item_cond_or(thd, item_eq, item_isnull); - if (!abort_on_null && left_expr->element_index(i)->maybe_null && + if (!abort_on_null && left_expr->element_index(i)->maybe_null() && get_cond_guard(i)) { disable_cond_guard_for_const_null_left_expr(i); @@ -2574,7 +2611,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, ref_pointer_array[i], no_matter_name, list_ref)); - if (!abort_on_null && left_expr->element_index(i)->maybe_null && + if (!abort_on_null && left_expr->element_index(i)->maybe_null() && get_cond_guard(i) ) { disable_cond_guard_for_const_null_left_expr(i); @@ -2593,8 +2630,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, for (uint i= 0; i < cols_num; i++) { Item *item, *item_isnull; - DBUG_ASSERT((left_expr->is_fixed() && - select_lex->ref_pointer_array[i]->is_fixed()) || + DBUG_ASSERT((left_expr->fixed() && + select_lex->ref_pointer_array[i]->fixed()) || (select_lex->ref_pointer_array[i]->type() == REF_ITEM && ((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() == Item_ref::OUTER_REF)); @@ -2615,7 +2652,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, ref_pointer_array[i], no_matter_name, list_ref)); - if (!abort_on_null && select_lex->ref_pointer_array[i]->maybe_null) + if (!abort_on_null && select_lex->ref_pointer_array[i]->maybe_null()) { Item *having_col_item= new (thd->mem_root) @@ -2635,7 +2672,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, no_matter_name, list_ref)); item= new (thd->mem_root) Item_cond_or(thd, item, item_isnull); - if (left_expr->element_index(i)->maybe_null && get_cond_guard(i)) + if (left_expr->element_index(i)->maybe_null() && get_cond_guard(i)) { disable_cond_guard_for_const_null_left_expr(i); if (!(item= new (thd->mem_root) @@ -2647,7 +2684,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, } *having_item= and_items(thd, *having_item, having_col_item); } - if (!abort_on_null && left_expr->element_index(i)->maybe_null && + if (!abort_on_null && left_expr->element_index(i)->maybe_null() && get_cond_guard(i)) { if (!(item= new (thd->mem_root) @@ -2727,7 +2764,7 @@ bool Item_in_subselect::create_in_to_exists_cond(JOIN *join_arg) /* The IN=>EXISTS transformation makes non-correlated subqueries correlated. */ - if (!left_expr->const_item() || left_expr->is_expensive()) + if (!left_expr->can_eval_in_optimize()) { join_arg->select_lex->uncacheable|= UNCACHEABLE_DEPENDENT_INJECTED; join_arg->select_lex->master_unit()->uncacheable|= @@ -2841,9 +2878,9 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg) select_lex->having->top_level_item(); join_arg->having= select_lex->having; } - join_arg->thd->change_item_tree(&unit->global_parameters()->select_limit, - new (thd->mem_root) - Item_int(thd, (int32) 1)); + SELECT_LEX *global_parameters= unit->global_parameters(); + join_arg->thd->change_item_tree(&global_parameters->limit_params.select_limit, + new (thd->mem_root) Item_int(thd, (int32) 1)); unit->lim.set_single_row(); DBUG_RETURN(false); @@ -3025,7 +3062,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) Query_arena *arena= NULL, backup; int res= FALSE; List<Item> outer; - Dynamic_array<EQ_FIELD_OUTER> eqs(5, 5); + Dynamic_array<EQ_FIELD_OUTER> eqs(PSI_INSTRUMENT_MEM, 5, 5); bool will_be_correlated; DBUG_ENTER("Item_exists_subselect::exists2in_processor"); @@ -3056,10 +3093,10 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) (1a) or is a "LIMIT 0" (see MDEV-19429) (2). there is an OFFSET clause */ - if ((first_select->select_limit && // (1) - (!first_select->select_limit->basic_const_item() || // (1) - first_select->select_limit->val_uint() == 0)) || // (1a) - first_select->offset_limit) // (2) + if ((first_select->limit_params.select_limit && // (1) + (!first_select->limit_params.select_limit->basic_const_item() || // (1) + first_select->limit_params.select_limit->val_uint() == 0)) || // (1a) + first_select->limit_params.offset_limit) // (2) { DBUG_RETURN(FALSE); } @@ -3125,7 +3162,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) first_select->ref_pointer_array[i]= (Item *)local_field; /* remove the parts from condition */ - if (!upper_not || !local_field->maybe_null) + if (!upper_not || !local_field->maybe_null()) *eq_ref= new (thd->mem_root) Item_int(thd, 1); else { @@ -3166,7 +3203,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) /* make EXISTS->IN permanet (see Item_subselect::init()) */ set_exists_transformed(); - first_select->select_limit= NULL; + first_select->limit_params.select_limit= NULL; if (!(in_subs= new (thd->mem_root) Item_in_subselect(thd, left_exp, first_select))) { @@ -3263,7 +3300,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) Item *exp; if (eqs.elements() == 1) { - exp= (optimizer->arguments()[0]->maybe_null ? + exp= (optimizer->arguments()[0]->maybe_null() ? (Item*) new (thd->mem_root) Item_cond_and(thd, new (thd->mem_root) @@ -3279,7 +3316,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) } else { - List<Item> *and_list= new List<Item>; + List<Item> *and_list= new (thd->mem_root) List<Item>; if (!and_list) { res= TRUE; @@ -3287,7 +3324,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) } for (size_t i= 0; i < eqs.elements(); i++) { - if (optimizer->arguments()[0]->maybe_null) + if (optimizer->arguments()[0]->maybe_null()) { and_list-> push_front(new (thd->mem_root) @@ -3499,7 +3536,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref) else if (Item_subselect::fix_fields(thd_arg, ref)) goto err; - fixed= TRUE; + base_flags|= item_base_t::FIXED; thd->where= save_where; DBUG_RETURN(FALSE); @@ -3559,7 +3596,8 @@ bool Item_in_subselect::setup_mat_engine() select_engine= (subselect_single_select_engine*) engine; /* Create/initialize execution objects. */ - if (!(mat_engine= new subselect_hash_sj_engine(thd, this, select_engine))) + if (!(mat_engine= new (thd->mem_root) + subselect_hash_sj_engine(thd, this, select_engine))) DBUG_RETURN(TRUE); if (mat_engine->prepare(thd) || @@ -3597,7 +3635,7 @@ bool Item_in_subselect::init_left_expr_cache() if (!outer_join || !outer_join->table_count || !outer_join->tables_list) return TRUE; - if (!(left_expr_cache= new List<Cached_item>)) + if (!(left_expr_cache= new (thd->mem_root) List<Cached_item>)) return TRUE; for (uint i= 0; i < left_expr->cols(); i++) @@ -3618,7 +3656,7 @@ bool Item_in_subselect::init_cond_guards() DBUG_ASSERT(thd); uint cols_num= left_expr->cols(); if (!abort_on_null && !pushed_cond_guards && - (left_expr->maybe_null || cols_num > 1)) + (left_expr->maybe_null() || cols_num > 1)) { if (!(pushed_cond_guards= (bool*)thd->alloc(sizeof(bool) * cols_num))) return TRUE; @@ -3650,7 +3688,8 @@ void Item_allany_subselect::print(String *str, enum_query_type query_type) { left_expr->print(str, query_type); str->append(' '); - str->append(func->symbol(all)); + const char *name= func->symbol(all); + str->append(name, strlen(name)); str->append(all ? " all " : " any ", 5); } Item_subselect::print(str, query_type); @@ -3828,8 +3867,9 @@ int subselect_single_select_engine::prepare(THD *thd) { select_lex->cleanup(); } - join= new JOIN(thd, select_lex->item_list, - select_lex->options | SELECT_NO_UNLOCK, result); + join= (new (thd->mem_root) + JOIN(thd, select_lex->item_list, + select_lex->options | SELECT_NO_UNLOCK, result)); if (!join || !result) return 1; /* Fatal error is set already. */ prepared= 1; @@ -3900,7 +3940,7 @@ bool subselect_engine::set_row(List<Item> &item_list, Item_cache **row) set_handler(sel_item->type_handler()); item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; - maybe_null= sel_item->maybe_null; + maybe_null= sel_item->maybe_null(); if (!(row[i]= sel_item->get_cache(thd))) return TRUE; row[i]->setup(thd, sel_item); @@ -3956,11 +3996,10 @@ int join_read_next_same_or_null(READ_RECORD *info); int subselect_single_select_engine::exec() { - DBUG_ENTER("subselect_single_select_engine::exec"); - char const *save_where= thd->where; SELECT_LEX *save_select= thd->lex->current_select; thd->lex->current_select= select_lex; + DBUG_ENTER("subselect_single_select_engine::exec"); if (join->optimization_state == JOIN::NOT_OPTIMIZED) { @@ -4174,7 +4213,7 @@ bool subselect_uniquesubquery_engine::copy_ref_key(bool skip_constants) enum store_key::store_key_result store_res; if (skip_constants && (*copy)->store_key_is_const()) continue; - store_res= (*copy)->copy(); + store_res= (*copy)->copy(thd); tab->ref.key_err= store_res; if (store_res == store_key::STORE_KEY_FATAL) @@ -4226,6 +4265,7 @@ int subselect_uniquesubquery_engine::exec() table->status= 0; Item_in_subselect *in_subs= item->get_IN_subquery(); DBUG_ASSERT(in_subs); + DBUG_ASSERT(thd); if (!tab->preread_init_done && tab->preread_init()) DBUG_RETURN(1); @@ -4386,6 +4426,7 @@ int subselect_indexsubquery_engine::exec() bool null_finding= 0; TABLE *table= tab->table; Item_in_subselect *in_subs= item->get_IN_subquery(); + DBUG_ASSERT(thd); in_subs->value= 0; empty_result_set= TRUE; @@ -4849,7 +4890,7 @@ subselect_hash_sj_engine::get_strategy_using_schema() outer_col= item_in->left_expr->element_index(i); inner_col= inner_col_it++; - if (!inner_col->maybe_null && !outer_col->maybe_null) + if (!inner_col->maybe_null() && !outer_col->maybe_null()) bitmap_set_bit(&non_null_key_parts, i); else { @@ -4903,7 +4944,7 @@ subselect_hash_sj_engine::get_strategy_using_data() If column 'i' doesn't contain NULLs, and the corresponding outer reference cannot have a NULL value, then 'i' is a non-nullable column. */ - if (result_sink->get_null_count_of_col(i) == 0 && !outer_col->maybe_null) + if (result_sink->get_null_count_of_col(i) == 0 && !outer_col->maybe_null()) { bitmap_clear_bit(&partial_match_key_parts, i); bitmap_set_bit(&non_null_key_parts, i); @@ -5245,12 +5286,9 @@ bool subselect_hash_sj_engine::make_semi_join_conds() tmp_table_ref->init_one_table(&empty_clex_str, &table_name, NULL, TL_READ); tmp_table_ref->table= tmp_table; - context= new Name_resolution_context; - context->init(); - context->first_name_resolution_table= - context->last_name_resolution_table= tmp_table_ref; + context= new Name_resolution_context(tmp_table_ref); semi_join_conds_context= context; - + for (uint i= 0; i < item_in->left_expr->cols(); i++) { /* New equi-join condition for the current column. */ @@ -5313,8 +5351,9 @@ subselect_hash_sj_engine::make_unique_engine() tab->preread_init_done= FALSE; tab->ref.tmp_table_index_lookup_init(thd, tmp_key, it, FALSE); - DBUG_RETURN(new subselect_uniquesubquery_engine(thd, tab, item_in, - semi_join_conds)); + DBUG_RETURN(new (thd->mem_root) + subselect_uniquesubquery_engine(thd, tab, item_in, + semi_join_conds)); } @@ -5604,7 +5643,6 @@ int subselect_hash_sj_engine::exec() SELECT_LEX *save_select= thd->lex->current_select; subselect_partial_match_engine *pm_engine= NULL; int res= 0; - DBUG_ENTER("subselect_hash_sj_engine::exec"); /* @@ -5710,23 +5748,26 @@ int subselect_hash_sj_engine::exec() if (strategy == PARTIAL_MATCH_MERGE) { pm_engine= - new subselect_rowid_merge_engine((subselect_uniquesubquery_engine*) - lookup_engine, tmp_table, - count_pm_keys, - has_covering_null_row, - has_covering_null_columns, - count_columns_with_nulls, - item, result, - semi_join_conds->argument_list()); + (new (thd->mem_root) + subselect_rowid_merge_engine(thd, + (subselect_uniquesubquery_engine*) + lookup_engine, tmp_table, + count_pm_keys, + has_covering_null_row, + has_covering_null_columns, + count_columns_with_nulls, + item, result, + semi_join_conds->argument_list())); if (!pm_engine || pm_engine->prepare(thd) || ((subselect_rowid_merge_engine*) pm_engine)-> init(nn_key_parts, &partial_match_key_parts)) { /* - The call to init() would fail if there was not enough memory to allocate - all buffers for the rowid merge strategy. In this case revert to table - scanning which doesn't need any big buffers. + The call to init() would fail if there was not enough memory + to allocate all buffers for the rowid merge strategy. In + this case revert to table scanning which doesn't need any + big buffers. */ delete pm_engine; pm_engine= NULL; @@ -5737,13 +5778,15 @@ int subselect_hash_sj_engine::exec() if (strategy == PARTIAL_MATCH_SCAN) { if (!(pm_engine= - new subselect_table_scan_engine((subselect_uniquesubquery_engine*) - lookup_engine, tmp_table, - item, result, - semi_join_conds->argument_list(), - has_covering_null_row, - has_covering_null_columns, - count_columns_with_nulls)) || + (new (thd->mem_root) + subselect_table_scan_engine(thd, + (subselect_uniquesubquery_engine*) + lookup_engine, tmp_table, + item, result, + semi_join_conds->argument_list(), + has_covering_null_row, + has_covering_null_columns, + count_columns_with_nulls))) || pm_engine->prepare(thd)) { /* This is an irrecoverable error. */ @@ -6173,32 +6216,41 @@ bool Ordered_key::next_same() void Ordered_key::print(String *str) { uint i; - str->append("{idx="); + + /* We have to pre-allocate string as we are using qs_append() */ + if (str->alloc(str->length() + + 5+10+4+ (NAME_LEN+2)*key_column_count+ + 20+11+21+10+FLOATING_POINT_BUFFER*3+50 + )) + return; + str->append(STRING_WITH_LEN("{idx=")); str->qs_append(keyid); - str->append(", ("); - for (i= 0; i < key_column_count - 1; i++) + str->append(STRING_WITH_LEN(", (")); + for (i= 0; i < key_column_count ; i++) { str->append(&key_columns[i]->field->field_name); - str->append(", "); + str->append(STRING_WITH_LEN(", ")); } - str->append(&key_columns[i]->field->field_name); - str->append("), "); + if (key_column_count) + str->length(str->length() - 2); + str->append(STRING_WITH_LEN("), ")); - str->append("null_bitmap: (bits="); + str->append(STRING_WITH_LEN("null_bitmap: (bits=")); str->qs_append(null_key.n_bits); - str->append(", nulls= "); + str->append(STRING_WITH_LEN(", nulls= ")); str->qs_append((double)null_count); - str->append(", min_null= "); + str->append(STRING_WITH_LEN(", min_null= ")); str->qs_append((double)min_null_row); - str->append(", max_null= "); + str->append(STRING_WITH_LEN(", max_null= ")); str->qs_append((double)max_null_row); - str->append("), "); + str->append(STRING_WITH_LEN("), ")); str->append('}'); } subselect_partial_match_engine::subselect_partial_match_engine( + THD *thd_arg, subselect_uniquesubquery_engine *engine_arg, TABLE *tmp_table_arg, Item_subselect *item_arg, select_result_interceptor *result_arg, @@ -6212,13 +6264,16 @@ subselect_partial_match_engine::subselect_partial_match_engine( has_covering_null_row(has_covering_null_row_arg), has_covering_null_columns(has_covering_null_columns_arg), count_columns_with_nulls(count_columns_with_nulls_arg) -{} +{ + thd= thd_arg; +} int subselect_partial_match_engine::exec() { Item_in_subselect *item_in= item->get_IN_subquery(); int lookup_res; + DBUG_ASSERT(thd); DBUG_ASSERT(!(item_in->left_expr_has_null() && item_in->is_top_level_item())); @@ -6374,8 +6429,9 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, /* Create the only non-NULL key if there is any. */ if (non_null_key_parts) { - non_null_key= new Ordered_key(cur_keyid, tmp_table, left, - 0, 0, 0, row_num_to_rowid); + non_null_key= (new (thd->mem_root) + Ordered_key(cur_keyid, tmp_table, left, + 0, 0, 0, row_num_to_rowid)); if (non_null_key->init(non_null_key_parts)) return TRUE; merge_keys[cur_keyid]= non_null_key; @@ -6404,13 +6460,13 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, result_sink->get_null_count_of_col(i) == row_count) continue; - merge_keys[cur_keyid]= new Ordered_key( - cur_keyid, tmp_table, - left->element_index(i), - result_sink->get_null_count_of_col(i), - result_sink->get_min_null_of_col(i), - result_sink->get_max_null_of_col(i), - row_num_to_rowid); + merge_keys[cur_keyid]= new (thd->mem_root) + Ordered_key(cur_keyid, tmp_table, + left->element_index(i), + result_sink->get_null_count_of_col(i), + result_sink->get_min_null_of_col(i), + result_sink->get_max_null_of_col(i), + row_num_to_rowid); if (merge_keys[cur_keyid]->init(i)) return TRUE; merge_keys[cur_keyid]->first(); @@ -6820,6 +6876,7 @@ end: subselect_table_scan_engine::subselect_table_scan_engine( + THD *thd, subselect_uniquesubquery_engine *engine_arg, TABLE *tmp_table_arg, Item_subselect *item_arg, @@ -6828,7 +6885,7 @@ subselect_table_scan_engine::subselect_table_scan_engine( bool has_covering_null_row_arg, bool has_covering_null_columns_arg, uint count_columns_with_nulls_arg) - :subselect_partial_match_engine(engine_arg, tmp_table_arg, item_arg, + :subselect_partial_match_engine(thd, engine_arg, tmp_table_arg, item_arg, result_arg, equi_join_conds_arg, has_covering_null_row_arg, has_covering_null_columns_arg, diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 1e53e552ed9..9cc02eb9c59 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -47,8 +47,7 @@ class Cached_item; /* base class for subselects */ class Item_subselect :public Item_result_field, - protected Used_tables_and_const_cache, - protected With_sum_func_cache + protected Used_tables_and_const_cache { /* Set to TRUE if the value is assigned for the subselect @@ -192,9 +191,6 @@ public: return null_value; } bool fix_fields(THD *thd, Item **ref) override; - bool with_subquery() const override { DBUG_ASSERT(fixed); return true; } - bool with_sum_func() const override { return m_with_sum_func; } - With_sum_func_cache* get_with_sum_func_cache() override { return this; } bool mark_as_dependent(THD *thd, st_select_lex *select, Item *item); void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge) override; @@ -307,29 +303,30 @@ public: Item_singlerow_subselect(THD *thd_arg): Item_subselect(thd_arg), value(0), row (0) {} - void cleanup(); - subs_type substype() { return SINGLEROW_SUBS; } + void cleanup() override; + subs_type substype() override { return SINGLEROW_SUBS; } - void reset(); - void no_rows_in_result(); - bool select_transformer(JOIN *join); + void reset() override; + void no_rows_in_result() override; + bool select_transformer(JOIN *join) override; void store(uint i, Item* item); - double val_real(); - longlong val_int (); - String *val_str (String *); - bool val_native(THD *thd, Native *); - my_decimal *val_decimal(my_decimal *); - bool val_bool(); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - const Type_handler *type_handler() const; - bool fix_length_and_dec(); + double val_real() override; + longlong val_int() override; + String *val_str(String *) override; + bool val_native(THD *thd, Native *) override; + my_decimal *val_decimal(my_decimal *) override; + bool val_bool() override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + const Type_handler *type_handler() const override; + bool fix_length_and_dec() override; - uint cols() const; - Item* element_index(uint i) { return reinterpret_cast<Item*>(row[i]); } - Item** addr(uint i) { return (Item**)row + i; } - bool check_cols(uint c); - bool null_inside(); - void bring_value(); + uint cols() const override; + Item* element_index(uint i) override + { return reinterpret_cast<Item*>(row[i]); } + Item** addr(uint i) override { return (Item**)row + i; } + bool check_cols(uint c) override; + bool null_inside() override; + void bring_value() override; /** This method is used to implement a special case of semantic tree @@ -345,7 +342,7 @@ public: */ st_select_lex* invalidate_and_restore_select_lex(); - Item* expr_cache_insert_transformer(THD *thd, uchar *unused); + Item* expr_cache_insert_transformer(THD *thd, uchar *unused) override; friend class select_singlerow_subselect; }; @@ -360,12 +357,12 @@ protected: public: Item_maxmin_subselect(THD *thd, Item_subselect *parent, st_select_lex *select_lex, bool max); - virtual void print(String *str, enum_query_type query_type); - void cleanup(); + void print(String *str, enum_query_type query_type) override; + void cleanup() override; bool any_value() { return was_values; } void register_value() { was_values= TRUE; } - void reset_value_registration() { was_values= FALSE; } - void no_rows_in_result(); + void reset_value_registration() override { was_values= FALSE; } + void no_rows_in_result() override; }; /* exists subselect */ @@ -670,7 +667,7 @@ public: void disable_cond_guard_for_const_null_left_expr(int i) { - if (left_expr->const_item() && !left_expr->is_expensive()) + if (left_expr->can_eval_in_optimize()) { if (left_expr->element_index(i)->is_null()) set_cond_guard_var(i,FALSE); @@ -994,7 +991,10 @@ public: subselect_uniquesubquery_engine(THD *thd_arg, st_join_table *tab_arg, Item_in_subselect *subs, Item *where) :subselect_engine(subs, 0), tab(tab_arg), cond(where) - { DBUG_ASSERT(subs); } + { + thd= thd_arg; + DBUG_ASSERT(subs); + } ~subselect_uniquesubquery_engine(); void cleanup(); int prepare(THD *); @@ -1411,7 +1411,8 @@ protected: protected: virtual bool partial_match()= 0; public: - subselect_partial_match_engine(subselect_uniquesubquery_engine *engine_arg, + subselect_partial_match_engine(THD *thd, + subselect_uniquesubquery_engine *engine_arg, TABLE *tmp_table_arg, Item_subselect *item_arg, select_result_interceptor *result_arg, List<Item> *equi_join_conds_arg, @@ -1505,7 +1506,8 @@ protected: bool exists_complementing_null_row(MY_BITMAP *keys_to_complement); bool partial_match(); public: - subselect_rowid_merge_engine(subselect_uniquesubquery_engine *engine_arg, + subselect_rowid_merge_engine(THD *thd, + subselect_uniquesubquery_engine *engine_arg, TABLE *tmp_table_arg, uint merge_keys_count_arg, bool has_covering_null_row_arg, bool has_covering_null_columns_arg, @@ -1513,7 +1515,7 @@ public: Item_subselect *item_arg, select_result_interceptor *result_arg, List<Item> *equi_join_conds_arg) - :subselect_partial_match_engine(engine_arg, tmp_table_arg, + :subselect_partial_match_engine(thd, engine_arg, tmp_table_arg, item_arg, result_arg, equi_join_conds_arg, has_covering_null_row_arg, has_covering_null_columns_arg, @@ -1532,7 +1534,8 @@ class subselect_table_scan_engine: public subselect_partial_match_engine protected: bool partial_match(); public: - subselect_table_scan_engine(subselect_uniquesubquery_engine *engine_arg, + subselect_table_scan_engine(THD *thd, + subselect_uniquesubquery_engine *engine_arg, TABLE *tmp_table_arg, Item_subselect *item_arg, select_result_interceptor *result_arg, List<Item> *equi_join_conds_arg, diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 9baf945644e..6fd33fe84b1 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2020, MariaDB + Copyright (c) 2008, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -48,6 +48,25 @@ size_t Item_sum::ram_limitation(THD *thd) } +/* + Force create_tmp_table() to convert BIT columns to BIGINT. + This is needed because BIT fields store parts of their data in table's + null bits, and we don't have methods to compare two table records with + bit fields. +*/ + +static void store_bit_fields_as_bigint_in_tempory_table(List<Item> *list) +{ + List_iterator_fast<Item> li(*list); + Item *item; + while ((item= li++)) + { + if (item->type() == Item::FIELD_ITEM && + ((Item_field*) item)->field->type() == FIELD_TYPE_BIT) + item->marker= MARKER_NULL_KEY; + } +} + /** Prepare an aggregate function item for checking context conditions. @@ -168,7 +187,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) Aggregation happes before window function computation, so there are no values to aggregate over. */ - if (with_window_func) + if (with_window_func()) { my_message(ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG, ER_THD(thd, ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG), @@ -407,7 +426,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) for (sl= thd->lex->current_select; sl && sl != aggr_sel && sl->master_unit()->item; sl= sl->master_unit()->outer_select() ) - sl->master_unit()->item->get_with_sum_func_cache()->set_with_sum_func(); + sl->master_unit()->item->with_flags|= item_with_t::SUM_FUNC; } thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); @@ -478,7 +497,7 @@ Item_sum::Item_sum(THD *thd, Item_sum *item): init_aggregator(); with_distinct= item->with_distinct; if (item->aggr) - set_aggregator(item->aggr->Aggrtype()); + set_aggregator(thd, item->aggr->Aggrtype()); } @@ -488,7 +507,7 @@ void Item_sum::mark_as_sum_func() cur_select->n_sum_items++; cur_select->with_sum_func= 1; const_item_cache= false; - with_field= 0; + with_flags= (with_flags | item_with_t::SUM_FUNC) & ~item_with_t::FIELD; window_func_sum_expr_flag= false; } @@ -496,8 +515,8 @@ void Item_sum::mark_as_sum_func() void Item_sum::print(String *str, enum_query_type query_type) { /* orig_args is not filled with valid values until fix_fields() */ - Item **pargs= fixed ? orig_args : args; - str->append(func_name()); + Item **pargs= fixed() ? orig_args : args; + str->append(func_name_cstring()); /* TODO: The fact that func_name() may return a name with an extra '(' @@ -579,7 +598,7 @@ Item *Item_sum::set_arg(uint i, THD *thd, Item *new_val) } -int Item_sum::set_aggregator(Aggregator::Aggregator_type aggregator) +int Item_sum::set_aggregator(THD *thd, Aggregator::Aggregator_type aggregator) { /* Dependent subselects may be executed multiple times, making @@ -600,10 +619,10 @@ int Item_sum::set_aggregator(Aggregator::Aggregator_type aggregator) switch (aggregator) { case Aggregator::DISTINCT_AGGREGATOR: - aggr= new Aggregator_distinct(this); + aggr= new (thd->mem_root) Aggregator_distinct(this); break; case Aggregator::SIMPLE_AGGREGATOR: - aggr= new Aggregator_simple(this); + aggr= new (thd->mem_root) Aggregator_simple(this); break; }; return aggr ? FALSE : TRUE; @@ -763,7 +782,7 @@ bool Aggregator_distinct::setup(THD *thd) List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; - if (!(tmp_table_param= new TMP_TABLE_PARAM)) + if (!(tmp_table_param= new (thd->mem_root) TMP_TABLE_PARAM)) return TRUE; /* Create a table with an unique key over all parameters */ @@ -781,24 +800,15 @@ bool Aggregator_distinct::setup(THD *thd) tmp_table_param->force_copy_fields= item_sum->has_force_copy_fields(); DBUG_ASSERT(table == 0); /* - Make create_tmp_table() convert BIT columns to BIGINT. - This is needed because BIT fields store parts of their data in table's - null bits, and we don't have methods to compare two table records, which - is needed by Unique which is used when HEAP table is used. + Convert bit fields to bigint's in temporary table. + Needed by Unique which is used when HEAP table is used. */ - { - List_iterator_fast<Item> li(list); - Item *item; - while ((item= li++)) - { - if (item->type() == Item::FIELD_ITEM && - ((Item_field*)item)->field->type() == FIELD_TYPE_BIT) - item->marker=4; - } - } + store_bit_fields_as_bigint_in_tempory_table(&list); + if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1, 0, - (select_lex->options | thd->variables.option_bits), + (select_lex->options | + thd->variables.option_bits), HA_POS_ERROR, &empty_clex_str))) return TRUE; table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows @@ -863,8 +873,9 @@ bool Aggregator_distinct::setup(THD *thd) } } DBUG_ASSERT(tree == 0); - tree= new Unique(compare_key, cmp_arg, tree_key_length, - item_sum->ram_limitation(thd)); + tree= (new (thd->mem_root) + Unique(compare_key, cmp_arg, tree_key_length, + item_sum->ram_limitation(thd))); /* The only time tree_key_length could be 0 is if someone does count(distinct) on a char(0) field - stupid thing to do, @@ -890,10 +901,11 @@ bool Aggregator_distinct::setup(THD *thd) mem_root. */ - item_sum->null_value= item_sum->maybe_null= 1; + item_sum->null_value= 1; + item_sum->set_maybe_null(); item_sum->quick_group= 0; - DBUG_ASSERT(item_sum->get_arg(0)->is_fixed()); + DBUG_ASSERT(item_sum->get_arg(0)->fixed()); arg= item_sum->get_arg(0); if (arg->const_item()) @@ -920,8 +932,9 @@ bool Aggregator_distinct::setup(THD *thd) simple_raw_key_cmp because the table contains numbers only; decimals are converted to binary representation as well. */ - tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length, - item_sum->ram_limitation(thd)); + tree= (new (thd->mem_root) + Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length, + item_sum->ram_limitation(thd))); DBUG_RETURN(tree == 0); } @@ -1050,7 +1063,7 @@ void Aggregator_distinct::endup() if (item_sum->sum_func() == Item_sum::COUNT_FUNC || item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC) { - DBUG_ASSERT(item_sum->fixed == 1); + DBUG_ASSERT(item_sum->fixed()); Item_sum_count *sum= (Item_sum_count *)item_sum; if (tree && tree->elements == 0) { @@ -1110,21 +1123,20 @@ my_decimal *Item_sum_int::val_decimal(my_decimal *decimal_value) bool Item_sum_num::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) return TRUE; decimals=0; - maybe_null= sum_func() != COUNT_FUNC; + set_maybe_null(sum_func() != COUNT_FUNC); for (uint i=0 ; i < arg_count ; i++) { if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i])) return TRUE; set_if_bigger(decimals, args[i]->decimals); - m_with_subquery|= args[i]->with_subquery(); - with_param|= args[i]->with_param; - with_window_func|= args[i]->with_window_func; + /* We should ignore FIELD's in arguments to sum functions */ + with_flags|= (args[i]->with_flags & ~item_with_t::FIELD); } result_field=0; max_length=float_length(decimals); @@ -1135,7 +1147,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref) if (arg_count) memcpy (orig_args, args, sizeof (Item *) * arg_count); - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -1144,7 +1156,7 @@ bool Item_sum_min_max::fix_fields(THD *thd, Item **ref) { DBUG_ENTER("Item_sum_min_max::fix_fields"); - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) DBUG_RETURN(TRUE); @@ -1153,10 +1165,8 @@ Item_sum_min_max::fix_fields(THD *thd, Item **ref) if (args[0]->fix_fields_if_needed_for_scalar(thd, &args[0])) DBUG_RETURN(TRUE); - m_with_subquery= args[0]->with_subquery(); - with_param= args[0]->with_param; - with_window_func|= args[0]->with_window_func; - + /* We should ignore FIELD's in arguments to sum functions */ + with_flags|= (args[0]->with_flags & ~item_with_t::FIELD); if (fix_length_and_dec()) DBUG_RETURN(TRUE); @@ -1168,7 +1178,7 @@ Item_sum_min_max::fix_fields(THD *thd, Item **ref) DBUG_RETURN(TRUE); orig_args[0]= args[0]; - fixed= 1; + base_flags|= item_base_t::FIXED; DBUG_RETURN(FALSE); } @@ -1238,7 +1248,8 @@ bool Item_sum_min_max::fix_length_and_dec() DBUG_ASSERT(args[0]->field_type() == args[0]->real_item()->field_type()); DBUG_ASSERT(args[0]->result_type() == args[0]->real_item()->result_type()); /* MIN/MAX can return NULL for empty set indepedent of the used column */ - maybe_null= null_value= true; + set_maybe_null(); + null_value= true; return args[0]->type_handler()->Item_sum_hybrid_fix_length_and_dec(this); } @@ -1276,9 +1287,9 @@ void Item_sum_min_max::setup_hybrid(THD *thd, Item *item, Item *value_arg) /* Don't cache value, as it will change */ if (!item->const_item()) arg_cache->set_used_tables(RAND_TABLE_BIT); - cmp= new Arg_comparator(); + cmp= new (thd->mem_root) Arg_comparator(); if (cmp) - cmp->set_cmp_func(this, (Item**)&arg_cache, (Item**)&value, FALSE); + cmp->set_cmp_func(thd, this, (Item**)&arg_cache, (Item**)&value, FALSE); DBUG_VOID_RETURN; } @@ -1309,7 +1320,7 @@ Item_sum_sp::Item_sum_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name_arg, sp_head *sp, List<Item> &list) :Item_sum(thd, list), Item_sp(thd, context_arg, name_arg) { - maybe_null= 1; + set_maybe_null(); quick_group= 0; m_sp= sp; } @@ -1318,7 +1329,7 @@ Item_sum_sp::Item_sum_sp(THD *thd, Name_resolution_context *context_arg, sp_name *name_arg, sp_head *sp) :Item_sum(thd), Item_sp(thd, context_arg, name_arg) { - maybe_null= 1; + set_maybe_null(); quick_group= 0; m_sp= sp; } @@ -1326,14 +1337,14 @@ Item_sum_sp::Item_sum_sp(THD *thd, Name_resolution_context *context_arg, Item_sum_sp::Item_sum_sp(THD *thd, Item_sum_sp *item): Item_sum(thd, item), Item_sp(thd, item) { - maybe_null= item->maybe_null; + base_flags|= (item->base_flags & item_base_t::MAYBE_NULL); quick_group= item->quick_group; } bool Item_sum_sp::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) return TRUE; decimals= 0; @@ -1347,7 +1358,7 @@ Item_sum_sp::fix_fields(THD *thd, Item **ref) return TRUE; } - if (init_result_field(thd, max_length, maybe_null, &null_value, &name)) + if (init_result_field(thd, max_length, maybe_null(), &null_value, &name)) return TRUE; for (uint i= 0 ; i < arg_count ; i++) @@ -1355,8 +1366,8 @@ Item_sum_sp::fix_fields(THD *thd, Item **ref) if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i])) return TRUE; set_if_bigger(decimals, args[i]->decimals); - m_with_subquery|= args[i]->with_subquery(); - with_window_func|= args[i]->with_window_func; + /* We should ignore FIELD's in arguments to sum functions */ + with_flags|= (args[i]->with_flags & ~item_with_t::FIELD); } result_field= NULL; max_length= float_length(decimals); @@ -1369,7 +1380,7 @@ Item_sum_sp::fix_fields(THD *thd, Item **ref) if (arg_count) memcpy(orig_args, args, sizeof(Item *) * arg_count); - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -1457,17 +1468,15 @@ Item_sum_sp::fix_length_and_dec() DBUG_RETURN(res); } -const char * -Item_sum_sp::func_name() const +LEX_CSTRING Item_sum_sp::func_name_cstring() const { - THD *thd= current_thd; - return Item_sp::func_name(thd, false); + return Item_sp::func_name_cstring(current_thd, false); } Item* Item_sum_sp::copy_or_same(THD *thd) { Item_sum_sp *copy_item= new (thd->mem_root) Item_sum_sp(thd, this); - copy_item->init_result_field(thd, max_length, maybe_null, + copy_item->init_result_field(thd, max_length, maybe_null(), ©_item->null_value, ©_item->name); return copy_item; } @@ -1555,7 +1564,8 @@ void Item_sum_sum::fix_length_and_dec_decimal() bool Item_sum_sum::fix_length_and_dec() { DBUG_ENTER("Item_sum_sum::fix_length_and_dec"); - maybe_null=null_value=1; + set_maybe_null(); + null_value=1; if (args[0]->cast_to_int_type_handler()-> Item_sum_sum_fix_length_and_dec(this)) DBUG_RETURN(TRUE); @@ -1696,7 +1706,7 @@ void Item_sum_sum::add_helper(bool perform_removal) longlong Item_sum_sum::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (aggr) aggr->endup(); if (result_type() == DECIMAL_RESULT) @@ -1707,7 +1717,7 @@ longlong Item_sum_sum::val_int() double Item_sum_sum::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (aggr) aggr->endup(); if (result_type() == DECIMAL_RESULT) @@ -1831,7 +1841,7 @@ bool Aggregator_simple::arg_is_null(bool use_null_value) { for (uint i= 0; i < item_count; i++) { - if (item[i]->maybe_null && item[i]->is_null()) + if (item[i]->maybe_null() && item[i]->is_null()) return true; } } @@ -1863,7 +1873,7 @@ bool Aggregator_distinct::arg_is_null(bool use_null_value) } return use_null_value ? item_sum->args[0]->null_value : - (item_sum->args[0]->maybe_null && item_sum->args[0]->is_null()); + (item_sum->args[0]->maybe_null() && item_sum->args[0]->is_null()); } @@ -1928,7 +1938,7 @@ void Item_sum_count::remove() longlong Item_sum_count::val_int() { DBUG_ENTER("Item_sum_count::val_int"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (aggr) aggr->endup(); DBUG_RETURN((longlong)count); @@ -1977,7 +1987,8 @@ bool Item_sum_avg::fix_length_and_dec() { DBUG_ENTER("Item_sum_avg::fix_length_and_dec"); prec_increment= current_thd->variables.div_precincrement; - maybe_null=null_value=1; + set_maybe_null(); + null_value=1; if (args[0]->cast_to_int_type_handler()-> Item_sum_avg_fix_length_and_dec(this)) DBUG_RETURN(TRUE); @@ -2043,7 +2054,7 @@ void Item_sum_avg::remove() double Item_sum_avg::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (aggr) aggr->endup(); if (!count) @@ -2059,7 +2070,7 @@ my_decimal *Item_sum_avg::val_decimal(my_decimal *val) { my_decimal cnt; const my_decimal *sum_dec; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (aggr) aggr->endup(); if (!count) @@ -2098,7 +2109,7 @@ String *Item_sum_avg::val_str(String *str) double Item_sum_std::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); double nr= Item_sum_variance::val_real(); if (std::isnan(nr)) { @@ -2207,7 +2218,8 @@ void Item_sum_variance::fix_length_and_dec_decimal() bool Item_sum_variance::fix_length_and_dec() { DBUG_ENTER("Item_sum_variance::fix_length_and_dec"); - maybe_null= null_value= 1; + set_maybe_null(); + null_value= 1; prec_increment= current_thd->variables.div_precincrement; /* @@ -2250,7 +2262,7 @@ Field *Item_sum_variance::create_tmp_field(MEM_ROOT *root, &name, &my_charset_bin); } else - field= new (root) Field_double(max_length, maybe_null, &name, decimals, + field= new (root) Field_double(max_length, maybe_null(), &name, decimals, TRUE); if (field != NULL) @@ -2280,7 +2292,7 @@ bool Item_sum_variance::add() double Item_sum_variance::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); /* 'sample' is a 1/0 boolean value. If it is 1/true, id est this is a sample @@ -2377,7 +2389,7 @@ void Item_sum_min_max::clear() bool Item_sum_min_max::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return true; bool retval= value->get_date(thd, ltime, fuzzydate); @@ -2400,7 +2412,7 @@ void Item_sum_min_max::direct_add(Item *item) double Item_sum_min_max::val_real() { DBUG_ENTER("Item_sum_min_max::val_real"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) DBUG_RETURN(0.0); double retval= value->val_real(); @@ -2412,7 +2424,7 @@ double Item_sum_min_max::val_real() longlong Item_sum_min_max::val_int() { DBUG_ENTER("Item_sum_min_max::val_int"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) DBUG_RETURN(0); longlong retval= value->val_int(); @@ -2425,7 +2437,7 @@ longlong Item_sum_min_max::val_int() my_decimal *Item_sum_min_max::val_decimal(my_decimal *val) { DBUG_ENTER("Item_sum_min_max::val_decimal"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) DBUG_RETURN(0); my_decimal *retval= value->val_decimal(val); @@ -2439,7 +2451,7 @@ String * Item_sum_min_max::val_str(String *str) { DBUG_ENTER("Item_sum_min_max::val_str"); - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) DBUG_RETURN(0); String *retval= value->val_str(str); @@ -2451,7 +2463,7 @@ Item_sum_min_max::val_str(String *str) bool Item_sum_min_max::val_native(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return true; return val_native_from_item(thd, value, to); @@ -2587,7 +2599,7 @@ bool Item_sum_max::add() longlong Item_sum_bit::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); return (longlong) bits; } @@ -2774,7 +2786,7 @@ void Item_sum_min_max::reset_field() { longlong nr= arg0->val_int(); - if (maybe_null) + if (maybe_null()) { if (arg0->null_value) { @@ -2792,7 +2804,7 @@ void Item_sum_min_max::reset_field() { double nr= arg0->val_real(); - if (maybe_null) + if (maybe_null()) { if (arg0->null_value) { @@ -2809,7 +2821,7 @@ void Item_sum_min_max::reset_field() { VDec arg_dec(arg0); - if (maybe_null) + if (maybe_null()) { if (arg_dec.is_null()) result_field->set_null(); @@ -2884,7 +2896,7 @@ void Item_sum_count::reset_field() direct_counted= FALSE; direct_reseted_field= TRUE; } - else if (!args[0]->maybe_null || !args[0]->is_null()) + else if (!args[0]->maybe_null() || !args[0]->is_null()) nr= 1; DBUG_PRINT("info", ("nr: %lld", nr)); int8store(res,nr); @@ -3019,7 +3031,7 @@ void Item_sum_count::update_field() direct_counted= direct_reseted_field= FALSE; nr+= direct_count; } - else if (!args[0]->maybe_null || !args[0]->is_null()) + else if (!args[0]->maybe_null() || !args[0]->is_null()) nr++; DBUG_PRINT("info", ("nr: %lld", nr)); int8store(res,nr); @@ -3375,7 +3387,7 @@ void Item_udf_sum::cleanup() void Item_udf_sum::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); for (uint i=0 ; i < arg_count ; i++) { @@ -3396,7 +3408,7 @@ double Item_sum_udf_float::val_real() { my_bool tmp_null_value; double res; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_sum_udf_float::val"); DBUG_PRINT("enter",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -3422,7 +3434,7 @@ my_decimal *Item_sum_udf_decimal::val_decimal(my_decimal *dec_buf) { my_decimal *res; my_bool tmp_null_value; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_func_udf_decimal::val_decimal"); DBUG_PRINT("enter",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -3448,7 +3460,7 @@ longlong Item_sum_udf_int::val_int() { my_bool tmp_null_value; longlong res; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_sum_udf_int::val_int"); DBUG_PRINT("enter",("result_type: %d arg_count: %d", args[0]->result_type(), arg_count)); @@ -3494,7 +3506,7 @@ my_decimal *Item_sum_udf_str::val_decimal(my_decimal *dec) String *Item_sum_udf_str::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ENTER("Item_sum_udf_str::str"); String *res=udf.val_str(str,&str_value); null_value = !res; @@ -4168,6 +4180,8 @@ bool Item_func_group_concat::add(bool exclude_nulls) if (field->is_null_in_record((const uchar*) table->record[0]) && exclude_nulls) return 0; // Skip row if it contains null + + buf.set_buffer_if_not_allocated(&my_charset_bin); if (tree && (res= field->val_str(&buf))) row_str_len+= res->length(); } @@ -4224,12 +4238,12 @@ bool Item_func_group_concat::fix_fields(THD *thd, Item **ref) { uint i; /* for loop variable */ - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) return TRUE; - maybe_null= 1; + set_maybe_null(); /* Fix fields for select list and ORDER clause @@ -4239,9 +4253,8 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) { if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i])) return TRUE; - m_with_subquery|= args[i]->with_subquery(); - with_param|= args[i]->with_param; - with_window_func|= args[i]->with_window_func; + /* We should ignore FIELD's in arguments to sum functions */ + with_flags|= (args[i]->with_flags & ~item_with_t::FIELD); } /* skip charset aggregation for order columns */ @@ -4280,7 +4293,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) if (check_sum_func(thd, ref)) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; return FALSE; } @@ -4299,7 +4312,7 @@ bool Item_func_group_concat::setup(THD *thd) if (table || tree) DBUG_RETURN(FALSE); - if (!(tmp_table_param= new TMP_TABLE_PARAM)) + if (!(tmp_table_param= new (thd->mem_root) TMP_TABLE_PARAM)) DBUG_RETURN(TRUE); /* Push all not constant fields to the list and create a temp table */ @@ -4351,20 +4364,13 @@ bool Item_func_group_concat::setup(THD *thd) if (order_or_distinct) { /* - Force the create_tmp_table() to convert BIT columns to INT - as we cannot compare two table records containing BIT fields + Convert bit fields to bigint's in the temporary table. + Needed as we cannot compare two table records containing BIT fields stored in the the tree used for distinct/order by. Moreover we don't even save in the tree record null bits where BIT fields store parts of their data. */ - List_iterator_fast<Item> li(all_fields); - Item *item; - while ((item= li++)) - { - if (item->type() == Item::FIELD_ITEM && - ((Item_field*) item)->field->type() == FIELD_TYPE_BIT) - item->marker= 4; - } + store_bit_fields_as_bigint_in_tempory_table(&all_fields); } /* @@ -4388,7 +4394,7 @@ bool Item_func_group_concat::setup(THD *thd) with ORDER BY | DISTINCT and BLOB field count > 0. */ if (order_or_distinct && table->s->blob_fields) - table->blob_storage= new Blob_mem_storage(); + table->blob_storage= new (thd->mem_root) Blob_mem_storage(); /* Need sorting or uniqueness: init tree and choose a function to sort. @@ -4414,10 +4420,11 @@ bool Item_func_group_concat::setup(THD *thd) } if (distinct) - unique_filter= new Unique(get_comparator_function_for_distinct(), - (void*)this, - tree_key_length + get_null_bytes(), - ram_limitation(thd)); + unique_filter= (new (thd->mem_root) + Unique(get_comparator_function_for_distinct(), + (void*)this, + tree_key_length + get_null_bytes(), + ram_limitation(thd))); if ((row_limit && row_limit->cmp_type() != INT_RESULT) || (offset_limit && offset_limit->cmp_type() != INT_RESULT)) { @@ -4443,7 +4450,7 @@ void Item_func_group_concat::make_unique() String* Item_func_group_concat::val_str(String* str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0; @@ -4532,7 +4539,7 @@ uint Item_func_group_concat::get_null_bytes() void Item_func_group_concat::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); if (distinct) str->append(STRING_WITH_LEN("distinct ")); for (uint i= 0; i < arg_count_field; i++) diff --git a/sql/item_sum.h b/sql/item_sum.h index 96604e5cf1d..7e89b2bd1d1 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -411,7 +411,7 @@ public: Item_sum(THD *thd, List<Item> &list); //Copy constructor, need to perform subselects with temporary tables Item_sum(THD *thd, Item_sum *item); - enum Type type() const { return SUM_FUNC_ITEM; } + enum Type type() const override { return SUM_FUNC_ITEM; } virtual enum Sumfunctype sum_func () const=0; bool is_aggr_sum_func() { @@ -461,14 +461,18 @@ public: Updated value is then saved in the field. */ virtual void update_field()=0; - virtual bool fix_length_and_dec() - { maybe_null=1; null_value=1; return FALSE; } + bool fix_length_and_dec() override + { + set_maybe_null(); + null_value=1; + return FALSE; + } virtual Item *result_item(THD *thd, Field *field); - void update_used_tables (); + void update_used_tables() override; COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, - COND_EQUAL **cond_equal_ref) + COND_EQUAL **cond_equal_ref) override { /* Item_sum (and derivants) of the original WHERE/HAVING clauses @@ -479,7 +483,7 @@ public: return Item::build_equal_items(thd, inherited, link_item_fields, cond_equal_ref); } - bool is_null() { return null_value; } + bool is_null() override { return null_value; } /** make_const() Called if we've managed to calculate the value of this Item in @@ -492,8 +496,8 @@ public: const_item_cache= true; } void reset_forced_const() { const_item_cache= false; } - virtual bool const_during_execution() const { return false; } - virtual void print(String *str, enum_query_type query_type); + bool const_during_execution() const override { return false; } + void print(String *str, enum_query_type query_type) override; void fix_num_length_and_dec(); /** @@ -506,22 +510,22 @@ public: may be initialized to 0 by clear() and to NULL by no_rows_in_result(). */ - virtual void no_rows_in_result() + void no_rows_in_result() override { - set_aggregator(with_distinct ? + set_aggregator(current_thd, with_distinct ? Aggregator::DISTINCT_AGGREGATOR : Aggregator::SIMPLE_AGGREGATOR); aggregator_clear(); } virtual void make_unique() { force_copy_fields= TRUE; } - Item *get_tmp_table_item(THD *thd); + Item *get_tmp_table_item(THD *thd) override; virtual Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table); Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { return create_tmp_field(root, param->group(), table); } - virtual bool collect_outer_ref_processor(void *param); + bool collect_outer_ref_processor(void *param) override; bool init_sum_func_check(THD *thd); bool check_sum_func(THD *thd, Item **ref); bool register_sum_func(THD *thd, Item **ref); @@ -531,7 +535,7 @@ public: Item *get_arg(uint i) const { return args[i]; } Item *set_arg(uint i, THD *thd, Item *new_val); uint get_arg_count() const { return arg_count; } - virtual Item **get_args() { return fixed ? orig_args : args; } + virtual Item **get_args() { return fixed() ? orig_args : args; } /* Initialization of distinct related members */ void init_aggregator() @@ -572,7 +576,7 @@ public: May be called multiple times. */ - int set_aggregator(Aggregator::Aggregator_type aggregator); + int set_aggregator(THD *thd, Aggregator::Aggregator_type aggregator); virtual void clear()= 0; virtual bool add()= 0; @@ -581,14 +585,12 @@ public: virtual bool supports_removal() const { return false; } virtual void remove() { DBUG_ASSERT(0); } - virtual void cleanup(); - bool check_vcol_func_processor(void *arg); + void cleanup() override; + bool check_vcol_func_processor(void *arg) override; virtual void setup_window_func(THD *thd, Window_spec *window_spec) {} void mark_as_window_func_sum_expr() { window_func_sum_expr_flag= true; } bool is_window_func_sum_expr() { return window_func_sum_expr_flag; } virtual void setup_caches(THD *thd) {}; - - bool with_sum_func() const { return true; } virtual void set_partition_row_count(ulonglong count) { DBUG_ASSERT(0); } }; @@ -746,23 +748,24 @@ public: Item_sum_double(THD *thd, Item *item_par): Item_sum_num(thd, item_par) {} Item_sum_double(THD *thd, List<Item> &list): Item_sum_num(thd, list) {} Item_sum_double(THD *thd, Item_sum_double *item) :Item_sum_num(thd, item) {} - longlong val_int() + longlong val_int() override { return val_int_from_real(); } - String *val_str(String*str) + String *val_str(String*str) override { return val_string_from_real(str); } - my_decimal *val_decimal(my_decimal *to) + my_decimal *val_decimal(my_decimal *to) override { return val_decimal_from_real(to); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_real(thd, ltime, fuzzydate); } - const Type_handler *type_handler() const { return &type_handler_double; } + const Type_handler *type_handler() const override + { return &type_handler_double; } }; @@ -773,15 +776,20 @@ public: Item_sum_int(THD *thd, Item *item_par): Item_sum_num(thd, item_par) {} Item_sum_int(THD *thd, List<Item> &list): Item_sum_num(thd, list) {} Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {} - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + double val_real() override { DBUG_ASSERT(fixed()); return (double) val_int(); } + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_int(thd, ltime, fuzzydate); } - bool fix_length_and_dec() - { decimals=0; max_length=21; maybe_null=null_value=0; return FALSE; } + bool fix_length_and_dec() override + { + decimals=0; + max_length=21; + base_flags&= ~item_base_t::MAYBE_NULL; + null_value=0; + return FALSE; } }; @@ -797,7 +805,7 @@ protected: my_decimal direct_sum_decimal; my_decimal dec_buffs[2]; uint curr_dec_buff; - bool fix_length_and_dec(); + bool fix_length_and_dec() override; public: Item_sum_sum(THD *thd, Item *item_par, bool distinct): @@ -807,40 +815,42 @@ public: set_distinct(distinct); } Item_sum_sum(THD *thd, Item_sum_sum *item); - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func() const override { return has_with_distinct() ? SUM_DISTINCT_FUNC : SUM_FUNC; } - void cleanup(); + void cleanup() override; void direct_add(my_decimal *add_sum_decimal); void direct_add(double add_sum_real, bool add_sum_is_null); - void clear(); - bool add(); - double val_real(); - longlong val_int(); - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + void clear() override; + bool add() override; + double val_real() override; + longlong val_int() override; + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return Type_handler_hybrid_field_type::type_handler(); } void fix_length_and_dec_double(); void fix_length_and_dec_decimal(); - void reset_field(); - void update_field(); - void no_rows_in_result() {} - const char *func_name() const + void reset_field() override; + void update_field() override; + void no_rows_in_result() override {} + LEX_CSTRING func_name_cstring() const override { - return has_with_distinct() ? "sum(distinct " : "sum("; + static LEX_CSTRING name_distinct= { STRING_WITH_LEN("sum(distinct ")}; + static LEX_CSTRING name_normal= { STRING_WITH_LEN("sum(") }; + return has_with_distinct() ? name_distinct : name_normal; } - Item *copy_or_same(THD* thd); - void remove(); - Item *get_copy(THD *thd) + Item *copy_or_same(THD* thd) override; + void remove() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_sum>(thd, this); } - bool supports_removal() const + bool supports_removal() const override { return true; } @@ -860,10 +870,10 @@ class Item_sum_count :public Item_sum_int friend class Aggregator_distinct; - void clear(); - bool add(); - void cleanup(); - void remove(); + void clear() override; + bool add() override; + void cleanup() override; + void remove() override; public: Item_sum_count(THD *thd, Item *item_par): @@ -889,30 +899,33 @@ public: Item_sum_int(thd, item), direct_counted(FALSE), direct_reseted_field(FALSE), count(item->count) {} - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func () const override { return has_with_distinct() ? COUNT_DISTINCT_FUNC : COUNT_FUNC; } - void no_rows_in_result() { count=0; } + void no_rows_in_result() override { count=0; } void make_const(longlong count_arg) { count=count_arg; Item_sum::make_const(); } - const Type_handler *type_handler() const { return &type_handler_slonglong; } - longlong val_int(); - void reset_field(); - void update_field(); + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } + longlong val_int() override; + void reset_field() override; + void update_field() override; void direct_add(longlong add_count); - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - return has_with_distinct() ? "count(distinct " : "count("; + static LEX_CSTRING name_distinct= { STRING_WITH_LEN("count(distinct ")}; + static LEX_CSTRING name_normal= { STRING_WITH_LEN("count(") }; + return has_with_distinct() ? name_distinct : name_normal; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_count>(thd, this); } - bool supports_removal() const + bool supports_removal() const override { return true; } @@ -937,38 +950,40 @@ public: void fix_length_and_dec_double(); void fix_length_and_dec_decimal(); - bool fix_length_and_dec(); - enum Sumfunctype sum_func () const + bool fix_length_and_dec() override; + enum Sumfunctype sum_func () const override { return has_with_distinct() ? AVG_DISTINCT_FUNC : AVG_FUNC; } - void clear(); - bool add(); - void remove(); - double val_real(); + void clear() override; + bool add() override; + void remove() override; + double val_real() override; // In SPs we might force the "wrong" type with select into a declare variable - longlong val_int() { return val_int_from_real(); } - my_decimal *val_decimal(my_decimal *); - String *val_str(String *str); - void reset_field(); - void update_field(); - Item *result_item(THD *thd, Field *field); - void no_rows_in_result() {} - const char *func_name() const + longlong val_int() override { return val_int_from_real(); } + my_decimal *val_decimal(my_decimal *) override; + String *val_str(String *str) override; + void reset_field() override; + void update_field() override; + Item *result_item(THD *thd, Field *field) override; + void no_rows_in_result() override {} + LEX_CSTRING func_name_cstring() const override { - return has_with_distinct() ? "avg(distinct " : "avg("; + static LEX_CSTRING name_distinct= { STRING_WITH_LEN("avg(distinct ")}; + static LEX_CSTRING name_normal= { STRING_WITH_LEN("avg(") }; + return has_with_distinct() ? name_distinct : name_normal; } - Item *copy_or_same(THD* thd); - Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table); - void cleanup() + Item *copy_or_same(THD* thd) override; + Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) override; + void cleanup() override { count= 0; Item_sum_sum::cleanup(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_avg>(thd, this); } - bool supports_removal() const + bool supports_removal() const override { return true; } @@ -1040,8 +1055,12 @@ public: void update_field() override final; Item *result_item(THD *thd, Field *field) override; void no_rows_in_result() override final {} - const char *func_name() const override - { return sample ? "var_samp(" : "variance("; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name_sample= { STRING_WITH_LEN("var_samp(")}; + static LEX_CSTRING name_normal= { STRING_WITH_LEN("variance(") }; + return sample ? name_sample : name_normal; + } Item *copy_or_same(THD* thd) override; Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) override final; @@ -1069,8 +1088,12 @@ class Item_sum_std final :public Item_sum_variance enum Sumfunctype sum_func () const override final { return STD_FUNC; } double val_real() override final; Item *result_item(THD *thd, Field *field) override final; - const char *func_name() const override final - { return sample ? "stddev_samp(" : "std("; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING std_name= {STRING_WITH_LEN("std(") }; + static LEX_CSTRING stddev_samp_name= {STRING_WITH_LEN("stddev_samp(") }; + return sample ? stddev_samp_name : std_name; + } Item *copy_or_same(THD* thd) override final; Item *get_copy(THD *thd) override final { return get_item_copy<Item_sum_std>(thd, this); } @@ -1093,7 +1116,7 @@ public: :Item_sum(thd, item), Type_handler_hybrid_field_type(item) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return Type_handler_hybrid_field_type::type_handler(); } bool fix_length_and_dec_generic(); bool fix_length_and_dec_numeric(const Type_handler *h); @@ -1126,35 +1149,36 @@ public: direct_added(FALSE), value(item->value), arg_cache(0), cmp_sign(item->cmp_sign), was_values(item->was_values) { } - bool fix_fields(THD *, Item **); - bool fix_length_and_dec(); + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override; void setup_hybrid(THD *thd, Item *item, Item *value_arg); - void clear(); + void clear() override; void direct_add(Item *item); - double val_real(); - longlong val_int(); - my_decimal *val_decimal(my_decimal *); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - void reset_field(); - String *val_str(String *); - bool val_native(THD *thd, Native *); - const Type_handler *real_type_handler() const + double val_real() override; + longlong val_int() override; + my_decimal *val_decimal(my_decimal *) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + void reset_field() override; + String *val_str(String *) override; + bool val_native(THD *thd, Native *) override; + const Type_handler *real_type_handler() const override { return get_arg(0)->real_type_handler(); } - const TYPELIB *get_typelib() const { return args[0]->get_typelib(); } - void update_field(); + const TYPELIB *get_typelib() const override { return args[0]->get_typelib(); } + void update_field() override; void min_max_update_str_field(); void min_max_update_real_field(); void min_max_update_int_field(); void min_max_update_decimal_field(); void min_max_update_native_field(); - void cleanup(); + void cleanup() override; bool any_value() { return was_values; } - void no_rows_in_result(); - void restore_to_before_no_rows_in_result(); - Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table); - void setup_caches(THD *thd) { setup_hybrid(thd, arguments()[0], NULL); } + void no_rows_in_result() override; + void restore_to_before_no_rows_in_result() override; + Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) override; + void setup_caches(THD *thd) override + { setup_hybrid(thd, arguments()[0], NULL); } }; @@ -1163,12 +1187,16 @@ class Item_sum_min final :public Item_sum_min_max public: Item_sum_min(THD *thd, Item *item_par): Item_sum_min_max(thd, item_par, 1) {} Item_sum_min(THD *thd, Item_sum_min *item) :Item_sum_min_max(thd, item) {} - enum Sumfunctype sum_func () const {return MIN_FUNC;} + enum Sumfunctype sum_func () const override {return MIN_FUNC;} - bool add(); - const char *func_name() const { return "min("; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool add() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_name= {STRING_WITH_LEN("min(") }; + return sum_name; + } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_min>(thd, this); } }; @@ -1178,12 +1206,16 @@ class Item_sum_max final :public Item_sum_min_max public: Item_sum_max(THD *thd, Item *item_par): Item_sum_min_max(thd, item_par, -1) {} Item_sum_max(THD *thd, Item_sum_max *item) :Item_sum_min_max(thd, item) {} - enum Sumfunctype sum_func () const {return MAX_FUNC;} + enum Sumfunctype sum_func () const override {return MAX_FUNC;} - bool add(); - const char *func_name() const { return "max("; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool add() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_name= {STRING_WITH_LEN("max(") }; + return sum_name; + } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_max>(thd, this); } }; @@ -1202,20 +1234,23 @@ public: if (as_window_function) memcpy(bit_counters, item->bit_counters, sizeof(bit_counters)); } - enum Sumfunctype sum_func () const {return SUM_BIT_FUNC;} - void clear(); - longlong val_int(); - void reset_field(); - void update_field(); - const Type_handler *type_handler() const { return &type_handler_ulonglong; } - bool fix_length_and_dec() + enum Sumfunctype sum_func () const override { return SUM_BIT_FUNC;} + void clear() override; + longlong val_int() override; + void reset_field() override; + void update_field() override; + const Type_handler *type_handler() const override + { return &type_handler_ulonglong; } + bool fix_length_and_dec() override { - if (args[0]->check_type_can_return_int(func_name())) + if (args[0]->check_type_can_return_int(func_name_cstring())) return true; - decimals= 0; max_length=21; unsigned_flag= 1; maybe_null= null_value= 0; + decimals= 0; max_length=21; unsigned_flag= 1; + base_flags&= ~item_base_t::MAYBE_NULL; + null_value= 0; return FALSE; } - void cleanup() + void cleanup() override { bits= reset_bits; if (as_window_function) @@ -1224,11 +1259,12 @@ public: } void setup_window_func(THD *thd __attribute__((unused)), Window_spec *window_spec __attribute__((unused))) + override { as_window_function= TRUE; clear_as_window(); } - void remove() + void remove() override { if (as_window_function) { @@ -1239,7 +1275,7 @@ public: DBUG_ASSERT(0); } - bool supports_removal() const + bool supports_removal() const override { return true; } @@ -1267,14 +1303,18 @@ class Item_sum_or final :public Item_sum_bit public: Item_sum_or(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, 0) {} Item_sum_or(THD *thd, Item_sum_or *item) :Item_sum_bit(thd, item) {} - bool add(); - const char *func_name() const { return "bit_or("; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool add() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_name= {STRING_WITH_LEN("bit_or(") }; + return sum_name; + } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_or>(thd, this); } private: - void set_bits_from_counters(); + void set_bits_from_counters() override; }; @@ -1284,14 +1324,18 @@ public: Item_sum_and(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, ULONGLONG_MAX) {} Item_sum_and(THD *thd, Item_sum_and *item) :Item_sum_bit(thd, item) {} - bool add(); - const char *func_name() const { return "bit_and("; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool add() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_min_name= {STRING_WITH_LEN("bit_and(") }; + return sum_min_name; + } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_and>(thd, this); } private: - void set_bits_from_counters(); + void set_bits_from_counters() override; }; class Item_sum_xor final :public Item_sum_bit @@ -1299,14 +1343,18 @@ class Item_sum_xor final :public Item_sum_bit public: Item_sum_xor(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, 0) {} Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {} - bool add(); - const char *func_name() const { return "bit_xor("; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool add() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_min_name= {STRING_WITH_LEN("bit_xor(") }; + return sum_min_name; + } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_xor>(thd, this); } private: - void set_bits_from_counters(); + void set_bits_from_counters() override; }; class sp_head; @@ -1361,7 +1409,7 @@ struct st_sp_security_context; Example: DECLARE CONTINUE HANDLER FOR NOT FOUND RETURN ret_val; */ -class Item_sum_sp final :public Item_sum, +class Item_sum_sp :public Item_sum, public Item_sp { private: @@ -1375,48 +1423,48 @@ public: sp_head *sp, List<Item> &list); Item_sum_sp(THD *thd, Item_sum_sp *item); - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func () const override { return SP_AGGREGATE_FUNC; } - Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) + Field *create_field_for_create_select(MEM_ROOT *root, TABLE *table) override { return create_table_field_from_handler(root, table); } - bool fix_length_and_dec(); - bool fix_fields(THD *thd, Item **ref); - const char *func_name() const; - const Type_handler *type_handler() const; - bool add(); + bool fix_length_and_dec() override; + bool fix_fields(THD *thd, Item **ref) override; + LEX_CSTRING func_name_cstring() const override; + const Type_handler *type_handler() const override; + bool add() override; /* val_xx functions */ - longlong val_int() + longlong val_int() override { if(execute()) return 0; return sp_result_field->val_int(); } - double val_real() + double val_real() override { if(execute()) return 0.0; return sp_result_field->val_real(); } - my_decimal *val_decimal(my_decimal *dec_buf) + my_decimal *val_decimal(my_decimal *dec_buf) override { if(execute()) return NULL; return sp_result_field->val_decimal(dec_buf); } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { - return null_value= execute() || sp_result_field->val_native(to); + return (null_value= execute()) || sp_result_field->val_native(to); } - String *val_str(String *str) + String *val_str(String *str) override { String buf; char buff[20]; @@ -1434,11 +1482,11 @@ public: str->copy(buf); return str; } - void reset_field(){DBUG_ASSERT(0);} - void update_field(){DBUG_ASSERT(0);} - void clear(); - void cleanup(); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + void reset_field() override{DBUG_ASSERT(0);} + void update_field() override{DBUG_ASSERT(0);} + void clear() override; + void cleanup() override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return execute() || sp_result_field->get_date(ltime, fuzzydate); } @@ -1446,9 +1494,9 @@ public: { return sp_result_field; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_sp>(thd, this); } - Item *copy_or_same(THD *thd); + Item *copy_or_same(THD *thd) override; }; /* Items to get the value of a stored sum function */ @@ -1462,23 +1510,23 @@ public: :Item(thd), field(item->result_field) { name= item->name; - maybe_null= true; + set_maybe_null(); decimals= item->decimals; max_length= item->max_length; unsigned_flag= item->unsigned_flag; } - table_map used_tables() const { return (table_map) 1L; } + table_map used_tables() const override { return (table_map) 1L; } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { return create_tmp_field_ex_simple(root, table, src, param); } - void save_in_result_field(bool no_conversions) { DBUG_ASSERT(0); } - bool check_vcol_func_processor(void *arg) + void save_in_result_field(bool no_conversions) override { DBUG_ASSERT(0); } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(name.str, arg, VCOL_IMPOSSIBLE); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } @@ -1493,8 +1541,8 @@ public: Item_avg_field(THD *thd, Item_sum_avg *item) :Item_sum_field(thd, item), prec_increment(item->prec_increment) { } - enum Type type() const { return FIELD_AVG_ITEM; } - bool is_null() { update_null_value(); return null_value; } + enum Type type() const override { return FIELD_AVG_ITEM; } + bool is_null() override { update_null_value(); return null_value; } }; @@ -1504,12 +1552,15 @@ public: Item_avg_field_double(THD *thd, Item_sum_avg *item) :Item_avg_field(thd, item) { } - const Type_handler *type_handler() const { return &type_handler_double; } - longlong val_int() { return val_int_from_real(); } - my_decimal *val_decimal(my_decimal *dec) { return val_decimal_from_real(dec); } - String *val_str(String *str) { return val_string_from_real(str); } - double val_real(); - Item *get_copy(THD *thd) + const Type_handler *type_handler() const override + { return &type_handler_double; } + longlong val_int() override { return val_int_from_real(); } + my_decimal *val_decimal(my_decimal *dec) override + { return val_decimal_from_real(dec); } + String *val_str(String *str) override + { return val_string_from_real(str); } + double val_real() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_avg_field_double>(thd, this); } }; @@ -1524,21 +1575,22 @@ public: f_scale(item->f_scale), dec_bin_size(item->dec_bin_size) { } - const Type_handler *type_handler() const { return &type_handler_newdecimal; } - double val_real() + const Type_handler *type_handler() const override + { return &type_handler_newdecimal; } + double val_real() override { return VDec(this).to_double(); } - longlong val_int() + longlong val_int() override { return VDec(this).to_longlong(unsigned_flag); } - String *val_str(String *str) + String *val_str(String *str) override { return VDec(this).to_string_round(str, decimals); } - my_decimal *val_decimal(my_decimal *); - Item *get_copy(THD *thd) + my_decimal *val_decimal(my_decimal *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_avg_field_decimal>(thd, this); } }; @@ -1550,16 +1602,17 @@ public: Item_variance_field(THD *thd, Item_sum_variance *item) :Item_sum_field(thd, item), sample(item->sample) { } - enum Type type() const {return FIELD_VARIANCE_ITEM; } - double val_real(); - longlong val_int() { return val_int_from_real(); } - String *val_str(String *str) + enum Type type() const override {return FIELD_VARIANCE_ITEM; } + double val_real() override; + longlong val_int() override { return val_int_from_real(); } + String *val_str(String *str) override { return val_string_from_real(str); } - my_decimal *val_decimal(my_decimal *dec_buf) + my_decimal *val_decimal(my_decimal *dec_buf) override { return val_decimal_from_real(dec_buf); } - bool is_null() { update_null_value(); return null_value; } - const Type_handler *type_handler() const { return &type_handler_double; } - Item *get_copy(THD *thd) + bool is_null() override { update_null_value(); return null_value; } + const Type_handler *type_handler() const override + { return &type_handler_double; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_variance_field>(thd, this); } }; @@ -1570,9 +1623,9 @@ public: Item_std_field(THD *thd, Item_sum_std *item) :Item_variance_field(thd, item) { } - enum Type type() const { return FIELD_STD_ITEM; } - double val_real(); - Item *get_copy(THD *thd) + enum Type type() const override { return FIELD_STD_ITEM; } + double val_real() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_std_field>(thd, this); } }; @@ -1598,15 +1651,19 @@ public: Item_udf_sum(THD *thd, Item_udf_sum *item) :Item_sum(thd, item), udf(item->udf) { udf.not_original= TRUE; } - const char *func_name() const { return udf.name(); } - bool fix_fields(THD *thd, Item **ref) + LEX_CSTRING func_name_cstring() const override { - DBUG_ASSERT(fixed == 0); + const char *tmp= udf.name(); + return {tmp, strlen(tmp) }; + } + bool fix_fields(THD *thd, Item **ref) override + { + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) return TRUE; - fixed= 1; + base_flags|= item_base_t::FIXED; /* We set const_item_cache to false in constructors. It can be later changed to "true", in a Item_sum::make_const() call. @@ -1631,18 +1688,18 @@ public: memcpy (orig_args, args, sizeof (Item *) * arg_count); return check_sum_func(thd, ref); } - enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } + enum Sumfunctype sum_func () const override { return UDF_SUM_FUNC; } virtual bool have_field_update(void) const { return 0; } - void clear(); - bool add(); - bool supports_removal() const; - void remove(); - void reset_field() {}; - void update_field() {}; - void cleanup(); - virtual void print(String *str, enum_query_type query_type); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + void clear() override; + bool add() override; + bool supports_removal() const override; + void remove() override; + void reset_field() override {}; + void update_field() override {} + void cleanup() override; + void print(String *str, enum_query_type query_type) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } @@ -1658,14 +1715,16 @@ class Item_sum_udf_float :public Item_udf_sum Item_udf_sum(thd, udf_arg, list) {} Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_udf_sum(thd, item) {} - longlong val_int() { return val_int_from_real(); } - double val_real(); - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *); - const Type_handler *type_handler() const { return &type_handler_double; } - bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + longlong val_int() override { return val_int_from_real(); } + double val_real() override; + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *) override; + const Type_handler *type_handler() const override + { return &type_handler_double; } + bool fix_length_and_dec() override + { fix_num_length_and_dec(); return FALSE; } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_udf_float>(thd, this); } }; @@ -1679,20 +1738,20 @@ public: Item_udf_sum(thd, udf_arg, list) {} Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) :Item_udf_sum(thd, item) {} - longlong val_int(); - double val_real() - { DBUG_ASSERT(fixed == 1); return (double) Item_sum_udf_int::val_int(); } - String *val_str(String*str); - my_decimal *val_decimal(my_decimal *); - const Type_handler *type_handler() const + longlong val_int() override; + double val_real() override + { DBUG_ASSERT(fixed()); return (double) Item_sum_udf_int::val_int(); } + String *val_str(String*str) override; + my_decimal *val_decimal(my_decimal *) override; + const Type_handler *type_handler() const override { if (unsigned_flag) return &type_handler_ulonglong; return &type_handler_slonglong; } - bool fix_length_and_dec() { decimals=0; max_length=21; return FALSE; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + bool fix_length_and_dec() override { decimals=0; max_length=21; return FALSE; } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_udf_int>(thd, this); } }; @@ -1706,8 +1765,8 @@ public: Item_udf_sum(thd, udf_arg, list) {} Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) :Item_udf_sum(thd, item) {} - String *val_str(String *); - double val_real() + String *val_str(String *) override; + double val_real() override { int err_not_used; char *end_not_used; @@ -1716,7 +1775,7 @@ public: return res ? res->charset()->strntod((char*) res->ptr(),res->length(), &end_not_used, &err_not_used) : 0.0; } - longlong val_int() + longlong val_int() override { int err_not_used; char *end; @@ -1729,11 +1788,12 @@ public: end= (char*) res->ptr()+res->length(); return cs->strtoll10(res->ptr(), &end, &err_not_used); } - my_decimal *val_decimal(my_decimal *dec); - const Type_handler *type_handler() const { return string_type_handler(); } - bool fix_length_and_dec(); - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + my_decimal *val_decimal(my_decimal *dec) override; + const Type_handler *type_handler() const override + { return string_type_handler(); } + bool fix_length_and_dec() override; + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_udf_str>(thd, this); } }; @@ -1747,23 +1807,25 @@ public: Item_udf_sum(thd, udf_arg, list) {} Item_sum_udf_decimal(THD *thd, Item_sum_udf_decimal *item) :Item_udf_sum(thd, item) {} - String *val_str(String *str) + String *val_str(String *str) override { return VDec(this).to_string_round(str, decimals); } - double val_real() + double val_real() override { return VDec(this).to_double(); } - longlong val_int() + longlong val_int() override { return VDec(this).to_longlong(unsigned_flag); } - my_decimal *val_decimal(my_decimal *); - const Type_handler *type_handler() const { return &type_handler_newdecimal; } - bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; } - Item *copy_or_same(THD* thd); - Item *get_copy(THD *thd) + my_decimal *val_decimal(my_decimal *) override; + const Type_handler *type_handler() const override + { return &type_handler_newdecimal; } + bool fix_length_and_dec() override + { fix_num_length_and_dec(); return FALSE; } + Item *copy_or_same(THD* thd) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_udf_decimal>(thd, this); } }; @@ -1779,7 +1841,7 @@ class Item_sum_udf_float :public Item_sum_double Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_sum_double(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed()); return 0.0; } void clear() {} bool add() { return 0; } void reset_field() { DBUG_ASSERT(0); }; @@ -1797,8 +1859,8 @@ public: Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) :Item_sum_double(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } - double val_real() { DBUG_ASSERT(fixed == 1); return 0; } + longlong val_int() { DBUG_ASSERT(fixed()); return 0; } + double val_real() { DBUG_ASSERT(fixed()); return 0; } void clear() {} bool add() { return 0; } void reset_field() { DBUG_ASSERT(0); }; @@ -1816,8 +1878,8 @@ class Item_sum_udf_decimal :public Item_sum_double Item_sum_udf_decimal(THD *thd, Item_sum_udf_float *item) :Item_sum_double(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } - my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed == 1); return 0; } + double val_real() { DBUG_ASSERT(fixed()); return 0.0; } + my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed()); return 0; } void clear() {} bool add() { return 0; } void reset_field() { DBUG_ASSERT(0); }; @@ -1835,10 +1897,11 @@ public: Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) :Item_sum_double(thd, item) {} String *val_str(String *) - { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val_real() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } - longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - bool fix_length_and_dec() { maybe_null=1; max_length=0; return FALSE; } + { DBUG_ASSERT(fixed()); null_value=1; return 0; } + double val_real() { DBUG_ASSERT(fixed()); null_value=1; return 0.0; } + longlong val_int() { DBUG_ASSERT(fixed()); null_value=1; return 0; } + bool fix_length_and_dec() override + { base_flags|= item_base_t::MAYBE_NULL; max_length=0; return FALSE; } enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } void clear() {} bool add() { return 0; } @@ -1961,27 +2024,31 @@ public: Item_func_group_concat(THD *thd, Item_func_group_concat *item); ~Item_func_group_concat(); - void cleanup(); + void cleanup() override; - enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;} - const char *func_name() const { return "group_concat("; } - const Type_handler *type_handler() const + enum Sumfunctype sum_func () const override {return GROUP_CONCAT_FUNC;} + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING sum_name= {STRING_WITH_LEN("group_concat(") }; + return sum_name; + } + const Type_handler *type_handler() const override { if (too_big_for_varchar()) return &type_handler_blob; return &type_handler_varchar; } - void clear(); - bool add() + void clear() override; + bool add() override { return add(skip_nulls()); } - void reset_field() { DBUG_ASSERT(0); } // not used - void update_field() { DBUG_ASSERT(0); } // not used - bool fix_fields(THD *,Item **); - bool setup(THD *thd); - void make_unique(); - double val_real() + void reset_field() override { DBUG_ASSERT(0); } // not used + void update_field() override { DBUG_ASSERT(0); } // not used + bool fix_fields(THD *,Item **) override; + bool setup(THD *thd) override; + void make_unique() override; + double val_real() override { int error; const char *end; @@ -1991,7 +2058,7 @@ public: end= res->ptr() + res->length(); return (my_strtod(res->ptr(), (char**) &end, &error)); } - longlong val_int() + longlong val_int() override { String *res; char *end_ptr; @@ -2001,21 +2068,21 @@ public: end_ptr= (char*) res->ptr()+ res->length(); return my_strtoll10(res->ptr(), &end_ptr, &error); } - my_decimal *val_decimal(my_decimal *decimal_value) + my_decimal *val_decimal(my_decimal *decimal_value) override { return val_decimal_from_string(decimal_value); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return get_date_from_string(thd, ltime, fuzzydate); } - String* val_str(String* str); - Item *copy_or_same(THD* thd); - void no_rows_in_result() {} - void print(String *str, enum_query_type query_type); - bool change_context_processor(void *cntx) + String *val_str(String *str) override; + Item *copy_or_same(THD* thd) override; + void no_rows_in_result() override {} + void print(String *str, enum_query_type query_type) override; + bool change_context_processor(void *cntx) override { context= (Name_resolution_context *)cntx; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_group_concat>(thd, this); } qsort_cmp2 get_comparator_function_for_distinct(); qsort_cmp2 get_comparator_function_for_order_by(); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 1f86741e706..0a24578ce85 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -59,7 +59,6 @@ /** Day number for Dec 31st, 9999. */ #define MAX_DAY_NUMBER 3652424L - Func_handler_date_add_interval_datetime_arg0_time func_handler_date_add_interval_datetime_arg0_time; @@ -788,7 +787,7 @@ static bool get_interval_info(const char *str, size_t length,CHARSET_INFO *cs, longlong Item_func_period_add::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); ulong period=(ulong) args[0]->val_int(); int months=(int) args[1]->val_int(); @@ -803,7 +802,7 @@ longlong Item_func_period_add::val_int() longlong Item_func_period_diff::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); ulong period1=(ulong) args[0]->val_int(); ulong period2=(ulong) args[1]->val_int(); @@ -817,7 +816,7 @@ longlong Item_func_period_diff::val_int() longlong Item_func_to_days::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.daynr(); @@ -827,7 +826,7 @@ longlong Item_func_to_days::val_int() longlong Item_func_to_seconds::val_int_endpoint(bool left_endp, bool *incl_endp) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); // val_int_endpoint() is called only if args[0] is a temporal Item_field Datetime_from_temporal dt(current_thd, args[0], TIME_FUZZY_DATES); if ((null_value= !dt.is_valid_datetime())) @@ -845,7 +844,7 @@ longlong Item_func_to_seconds::val_int_endpoint(bool left_endp, longlong Item_func_to_seconds::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; /* Unlike val_int_endpoint(), we cannot use Datetime_from_temporal here. @@ -895,7 +894,7 @@ enum_monotonicity_info Item_func_to_seconds::get_monotonicity_info() const longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); // val_int_endpoint() is only called if args[0] is a temporal Item_field Datetime_from_temporal dt(current_thd, args[0], TIME_CONV_NONE); longlong res; @@ -949,7 +948,7 @@ longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp) longlong Item_func_dayofyear::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.dayofyear(); @@ -957,7 +956,7 @@ longlong Item_func_dayofyear::val_int() longlong Item_func_dayofmonth::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->day; @@ -965,7 +964,7 @@ longlong Item_func_dayofmonth::val_int() longlong Item_func_month::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->month; @@ -976,18 +975,18 @@ bool Item_func_monthname::fix_length_and_dec() { THD* thd= current_thd; CHARSET_INFO *cs= thd->variables.collation_connection; - locale= thd->variables.lc_time_names; + locale= thd->variables.lc_time_names; collation.set(cs, DERIVATION_COERCIBLE, locale->repertoire()); decimals=0; max_length= locale->max_month_name_length * collation.collation->mbmaxlen; - maybe_null=1; + set_maybe_null(); return FALSE; } String* Item_func_monthname::val_str(String* str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); const char *month_name; uint err; THD *thd= current_thd; @@ -1008,7 +1007,7 @@ String* Item_func_monthname::val_str(String* str) longlong Item_func_quarter::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.quarter(); @@ -1016,7 +1015,7 @@ longlong Item_func_quarter::val_int() longlong Item_func_hour::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->hour; @@ -1024,7 +1023,7 @@ longlong Item_func_hour::val_int() longlong Item_func_minute::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->minute; @@ -1035,7 +1034,7 @@ longlong Item_func_minute::val_int() */ longlong Item_func_second::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->second; @@ -1083,7 +1082,7 @@ uint week_mode(uint mode) longlong Item_func_week::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); uint week_format; THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd)); @@ -1099,7 +1098,7 @@ longlong Item_func_week::val_int() longlong Item_func_yearweek::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : @@ -1109,7 +1108,7 @@ longlong Item_func_yearweek::val_int() longlong Item_func_weekday::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime dt(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd)); if ((null_value= !dt.is_valid_datetime())) @@ -1125,14 +1124,14 @@ bool Item_func_dayname::fix_length_and_dec() collation.set(cs, DERIVATION_COERCIBLE, locale->repertoire()); decimals=0; max_length= locale->max_day_name_length * collation.collation->mbmaxlen; - maybe_null=1; + set_maybe_null(); return FALSE; } String* Item_func_dayname::val_str(String* str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); const char *day_name; uint err; THD *thd= current_thd; @@ -1150,7 +1149,7 @@ String* Item_func_dayname::val_str(String* str) longlong Item_func_year::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd)); return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->year; @@ -1183,7 +1182,7 @@ enum_monotonicity_info Item_func_year::get_monotonicity_info() const longlong Item_func_year::val_int_endpoint(bool left_endp, bool *incl_endp) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); // val_int_endpoint() is cally only if args[0] is a temporal Item_field Datetime_from_temporal dt(current_thd, args[0], TIME_CONV_NONE); if ((null_value= !dt.is_valid_datetime())) @@ -1216,7 +1215,7 @@ longlong Item_func_year::val_int_endpoint(bool left_endp, bool *incl_endp) bool Item_func_unix_timestamp::get_timestamp_value(my_time_t *seconds, ulong *second_part) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (args[0]->type() == FIELD_ITEM) { // Optimize timestamp field Field *field=((Item_field*) args[0])->field; @@ -1276,7 +1275,7 @@ enum_monotonicity_info Item_func_unix_timestamp::get_monotonicity_info() const longlong Item_func_unix_timestamp::val_int_endpoint(bool left_endp, bool *incl_endp) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ASSERT(arg_count == 1 && args[0]->type() == Item::FIELD_ITEM && args[0]->field_type() == MYSQL_TYPE_TIMESTAMP); @@ -1291,7 +1290,7 @@ longlong Item_func_unix_timestamp::val_int_endpoint(bool left_endp, bool *incl_e longlong Item_func_time_to_sec::int_op() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); return ((null_value= !tm.is_valid_time())) ? 0 : tm.to_seconds(); @@ -1300,7 +1299,7 @@ longlong Item_func_time_to_sec::int_op() my_decimal *Item_func_time_to_sec::decimal_op(my_decimal* buf) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); if ((null_value= !tm.is_valid_time())) @@ -1575,7 +1574,7 @@ bool Item_func_curtime::get_date(THD *thd, MYSQL_TIME *res, void Item_func_curtime::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); if (decimals) str->append_ulonglong(decimals); @@ -1637,7 +1636,7 @@ bool Item_func_now::fix_fields(THD *thd, Item **items) void Item_func_now::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); if (decimals) str->append_ulonglong(decimals); @@ -1726,7 +1725,7 @@ bool Item_func_sysdate_local::get_date(THD *thd, MYSQL_TIME *res, bool Item_func_sec_to_time::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); VSec9 sec(thd, args[0], "seconds", LONGLONG_MAX); if ((null_value= sec.is_null())) return true; @@ -1774,7 +1773,7 @@ bool Item_func_date_format::fix_length_and_dec() collation.collation->mbmaxlen; set_if_smaller(max_length,MAX_BLOB_WIDTH); } - maybe_null=1; // If wrong date + set_maybe_null(); // If wrong date return FALSE; } @@ -1888,7 +1887,7 @@ String *Item_func_date_format::val_str(String *str) MYSQL_TIME l_time; uint size; const MY_LOCALE *lc= 0; - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); date_conv_mode_t mode= is_time_format ? TIME_TIME_ONLY : TIME_CONV_NONE; THD *thd= current_thd; @@ -1927,6 +1926,812 @@ null_date: return 0; } +/* + Oracle has many formatting models, we list all but only part of them + are implemented, because some models depend on oracle functions + which mariadb is not supported. + + Models for datetime, used by TO_CHAR/TO_DATE. Normal format characters are + stored as short integer < 128, while format characters are stored as a + integer > 128 +*/ + +enum enum_tochar_formats +{ + FMT_BASE= 128, + FMT_AD, + FMT_AD_DOT, + FMT_AM, + FMT_AM_DOT, + FMT_BC, + FMT_BC_DOT, + FMT_CC, + FMT_SCC, + FMT_D, + FMT_DAY, + FMT_DD, + FMT_DDD, + FMT_DL, + FMT_DS, + FMT_DY, + FMT_E, + FMT_EE, + FMT_FF, + FMT_FM, + FMT_FX, + FMT_HH, + FMT_HH12, + FMT_HH24, + FMT_IW, + FMT_I, + FMT_IY, + FMT_IYY, + FMT_IYYY, + FMT_J, + FMT_MI, + FMT_MM, + FMT_MON, + FMT_MONTH, + FMT_PM, + FMT_PM_DOT, + FMT_RM, + FMT_RR, + FMT_RRRR, + FMT_SS, + FMT_SSSSSS, + FMT_TS, + FMT_TZD, + FMT_TZH, + FMT_TZM, + FMT_TZR, + FMT_W, + FMT_WW, + FMT_X, + FMT_Y, + FMT_YY, + FMT_YYY, + FMT_YYYY, + FMT_YYYY_COMMA, + FMT_YEAR, + FMT_SYYYY, + FMT_SYEAR +}; + +/** + Flip 'quotation_flag' if we found a quote (") character. + + @param cftm Character or FMT... format descriptor + @param quotation_flag Points to 'true' if we are inside a quoted string + + @return true If we are inside a quoted string or if we found a '"' character + @return false Otherwise +*/ + +static inline bool check_quotation(uint16 cfmt, bool *quotation_flag) +{ + if (cfmt == '"') + { + *quotation_flag= !*quotation_flag; + return true; + } + return *quotation_flag; +} + +#define INVALID_CHARACTER(x) (((x) >= 'A' && (x) <= 'Z') ||((x) >= '0' && (x) <= '9') || (x) >= 127 || ((x) < 32)) + + +/** + Special characters are directly output in the result + + @return 0 If found not acceptable character + @return # Number of copied characters +*/ + +static uint parse_special(char cfmt, const char *ptr, const char *end, + uint16 *array) +{ + int offset= 0; + char tmp1; + + /* Non-printable character and Multibyte encoded characters */ + if (INVALID_CHARACTER(cfmt)) + return 0; + + /* + * '&' with text is used for variable input, but '&' with other + * special charaters like '|'. '*' is used as separator + */ + if (cfmt == '&' && ptr + 1 < end) + { + tmp1= my_toupper(system_charset_info, *(ptr+1)); + if (tmp1 >= 'A' && tmp1 <= 'Z') + return 0; + } + + do { + /* + Continuously store the special characters in fmt_array until non-special + characters appear + */ + *array++= (uint16) (uchar) *ptr++; + offset++; + if (ptr == end) + break; + tmp1= my_toupper(system_charset_info, *ptr); + } while (!INVALID_CHARACTER(tmp1) && tmp1 != '"'); + return offset; +} + + +/** + Parse the format string, convert it to an compact array and calculate the + length of output string + + @param format Format string + @param fmt_len Function will store max length of formated date string here + + @return 0 ok. fmt_len is updated + @return 1 error. In this case 'warning_string' is set to error message +*/ + +bool Item_func_tochar::parse_format_string(const String *format, uint *fmt_len) +{ + const char *ptr, *end; + uint16 *tmp_fmt= fmt_array; + uint tmp_len= 0; + int offset= 0; + bool quotation_flag= false; + + ptr= format->ptr(); + end= ptr + format->length(); + + if (format->length() > MAX_DATETIME_FORMAT_MODEL_LEN) + { + warning_message.append(STRING_WITH_LEN("datetime format string is too " + "long")); + return 1; + } + + for (; ptr < end; ptr++, tmp_fmt++) + { + uint ulen; + char cfmt, next_char; + + cfmt= my_toupper(system_charset_info, *ptr); + + /* + Oracle datetime format support text in double quotation marks like + 'YYYY"abc"MM"xyz"DD', When this happens, store the text and quotation + marks, and use the text as a separator in make_date_time_oracle. + + NOTE: the quotation mark is not print in return value. for example: + select TO_CHAR(sysdate, 'YYYY"abc"MM"xyzDD"') will return 2021abc01xyz11 + */ + if (check_quotation(cfmt, "ation_flag)) + { + *tmp_fmt= *ptr; + tmp_len+= 1; + continue; + } + + switch (cfmt) { + case 'A': // AD/A.D./AM/A.M. + if (ptr+1 >= end) + goto error; + next_char= my_toupper(system_charset_info, *(ptr+1)); + if (next_char == 'D') + { + *tmp_fmt= FMT_AD; + ptr+= 1; + tmp_len+= 2; + } + else if (next_char == 'M') + { + *tmp_fmt= FMT_AM; + ptr+= 1; + tmp_len+= 2; + } + else if (next_char == '.' && ptr+3 < end && *(ptr+3) == '.') + { + if (my_toupper(system_charset_info, *(ptr+2)) == 'D') + { + *tmp_fmt= FMT_AD_DOT; + ptr+= 3; + tmp_len+= 4; + } + else if (my_toupper(system_charset_info, *(ptr+2)) == 'M') + { + *tmp_fmt= FMT_AM_DOT; + ptr+= 3; + tmp_len+= 4; + } + else + goto error; + } + else + goto error; + break; + case 'B': // BC and B.C + if (ptr+1 >= end) + goto error; + next_char= my_toupper(system_charset_info, *(ptr+1)); + if (next_char == 'C') + { + *tmp_fmt= FMT_BC; + ptr+= 1; + tmp_len+= 2; + } + else if (next_char == '.' && ptr+3 < end && + my_toupper(system_charset_info, *(ptr+2)) == 'C' && + *(ptr+3) == '.') + { + *tmp_fmt= FMT_BC_DOT; + ptr+= 3; + tmp_len+= 4; + } + else + goto error; + break; + case 'P': // PM or P.M. + next_char= my_toupper(system_charset_info, *(ptr+1)); + if (next_char == 'M') + { + *tmp_fmt= FMT_PM; + ptr+= 1; + tmp_len+= 2; + } + else if (next_char == '.' && + my_toupper(system_charset_info, *(ptr+2)) == 'M' && + my_toupper(system_charset_info, *(ptr+3)) == '.') + { + *tmp_fmt= FMT_PM_DOT; + ptr+= 3; + tmp_len+= 4; + } + else + goto error; + break; + case 'Y': // Y, YY, YYY o YYYYY + if (ptr + 1 == end || my_toupper(system_charset_info, *(ptr+1)) != 'Y') + { + *tmp_fmt= FMT_Y; + tmp_len+= 1; + break; + } + if (ptr + 2 == end || + my_toupper(system_charset_info, *(ptr+2)) != 'Y') /* YY */ + { + *tmp_fmt= FMT_YY; + ulen= 2; + } + else + { + if (ptr + 3 < end && my_toupper(system_charset_info, *(ptr+3)) == 'Y') + { + *tmp_fmt= FMT_YYYY; + ulen= 4; + } + else + { + *tmp_fmt= FMT_YYY; + ulen= 3; + } + } + ptr+= ulen-1; + tmp_len+= ulen; + break; + + case 'R': // RR or RRRR + if (ptr + 1 == end || my_toupper(system_charset_info, *(ptr+1)) != 'R') + goto error; + + if (ptr + 2 == end || my_toupper(system_charset_info, *(ptr+2)) != 'R') + { + *tmp_fmt= FMT_RR; + ulen= 2; + } + else + { + if (ptr + 3 >= end || my_toupper(system_charset_info, *(ptr+3)) != 'R') + goto error; + *tmp_fmt= FMT_RRRR; + ulen= 4; + } + ptr+= ulen-1; + tmp_len+= ulen; + break; + case 'M': + { + char tmp1; + if (ptr + 1 >= end) + goto error; + + tmp1= my_toupper(system_charset_info, *(ptr+1)); + if (tmp1 == 'M') + { + *tmp_fmt= FMT_MM; + tmp_len+= 2; + ptr+= 1; + } + else if (tmp1 == 'I') + { + *tmp_fmt= FMT_MI; + tmp_len+= 2; + ptr+= 1; + } + else if (tmp1 == 'O') + { + if (ptr + 2 >= end) + goto error; + char tmp2= my_toupper(system_charset_info, *(ptr+2)); + if (tmp2 != 'N') + goto error; + + if (ptr + 4 >= end || + my_toupper(system_charset_info, *(ptr+3)) != 'T' || + my_toupper(system_charset_info, *(ptr+4)) != 'H') + { + *tmp_fmt= FMT_MON; + tmp_len+= 3; + ptr+= 2; + } + else + { + *tmp_fmt= FMT_MONTH; + tmp_len+= (locale->max_month_name_length * + my_charset_utf8mb3_bin.mbmaxlen); + ptr+= 4; + } + } + else + goto error; + } + break; + case 'D': // DD, DY, or DAY + { + if (ptr + 1 >= end) + goto error; + char tmp1= my_toupper(system_charset_info, *(ptr+1)); + + if (tmp1 == 'D') + { + *tmp_fmt= FMT_DD; + tmp_len+= 2; + } + else if (tmp1 == 'Y') + { + *tmp_fmt= FMT_DY; + tmp_len+= 3; + } + else if (tmp1 == 'A') // DAY + { + if (ptr + 2 == end || my_toupper(system_charset_info, *(ptr+2)) != 'Y') + goto error; + *tmp_fmt= FMT_DAY; + tmp_len+= locale->max_day_name_length * my_charset_utf8mb3_bin.mbmaxlen; + ptr+= 1; + } + else + goto error; + ptr+= 1; + } + break; + case 'H': // HH, HH12 or HH23 + { + char tmp1, tmp2, tmp3; + if (ptr + 1 >= end) + goto error; + tmp1= my_toupper(system_charset_info, *(ptr+1)); + + if (tmp1 != 'H') + goto error; + + if (ptr+3 >= end) + { + *tmp_fmt= FMT_HH; + ptr+= 1; + } + else + { + tmp2= *(ptr+2); + tmp3= *(ptr+3); + + if (tmp2 == '1' && tmp3 == '2') + { + *tmp_fmt= FMT_HH12; + ptr+= 3; + } + else if (tmp2 == '2' && tmp3 == '4') + { + *tmp_fmt= FMT_HH24; + ptr+= 3; + } + else + { + *tmp_fmt= FMT_HH; + ptr+= 1; + } + } + tmp_len+= 2; + break; + } + case 'S': // SS + if (ptr + 1 == end || my_toupper(system_charset_info, *(ptr+1)) != 'S') + goto error; + + *tmp_fmt= FMT_SS; + tmp_len+= 2; + ptr+= 1; + break; + case '|': + /* + If only one '|' just ignore it, else append others, for example: + TO_CHAR('2000-11-05', 'YYYY|MM||||DD') --> 200011|||05 + */ + if (ptr + 1 == end || *(ptr+1) != '|') + { + tmp_fmt--; + break; + } + ptr++; // Skip first '|' + do + { + *tmp_fmt++= *ptr++; + tmp_len++; + } while ((ptr < end) && *ptr == '|'); + ptr--; // Fix ptr for above for loop + tmp_fmt--; + break; + + default: + offset= parse_special(cfmt, ptr, end, tmp_fmt); + if (!offset) + goto error; + /* ptr++ is in the for loop, so we must move ptr to offset-1 */ + ptr+= (offset-1); + tmp_fmt+= (offset-1); + tmp_len+= offset; + break; + } + } + *fmt_len= tmp_len; + *tmp_fmt= 0; + return 0; + +error: + warning_message.append(STRING_WITH_LEN("date format not recognized at ")); + warning_message.append(ptr, MY_MIN(8, end- ptr)); + return 1; +} + + +static inline bool append_val(int val, int size, String *str) +{ + ulong len= 0; + char intbuff[15]; + + len= (ulong) (int10_to_str(val, intbuff, 10) - intbuff); + return str->append_with_prefill(intbuff, len, size, '0'); +} + + +static bool make_date_time_oracle(const uint16 *fmt_array, + const MYSQL_TIME *l_time, + const MY_LOCALE *locale, + String *str) +{ + bool quotation_flag= false; + const uint16 *ptr= fmt_array; + uint hours_i; + uint weekday; + + str->length(0); + + while (*ptr) + { + if (check_quotation(*ptr, "ation_flag)) + { + /* don't display '"' in the result, so if it is '"', skip it */ + if (*ptr != '"') + { + DBUG_ASSERT(*ptr <= 255); + str->append((char) *ptr); + } + ptr++; + continue; + } + + switch (*ptr) { + + case FMT_AM: + case FMT_PM: + if (l_time->hour > 11) + str->append("PM", 2); + else + str->append("AM", 2); + break; + + case FMT_AM_DOT: + case FMT_PM_DOT: + if (l_time->hour > 11) + str->append(STRING_WITH_LEN("P.M.")); + else + str->append(STRING_WITH_LEN("A.M.")); + break; + + case FMT_AD: + case FMT_BC: + if (l_time->year > 0) + str->append(STRING_WITH_LEN("AD")); + else + str->append(STRING_WITH_LEN("BC")); + break; + + case FMT_AD_DOT: + case FMT_BC_DOT: + if (l_time->year > 0) + str->append(STRING_WITH_LEN("A.D.")); + else + str->append(STRING_WITH_LEN("B.C.")); + break; + + case FMT_Y: + if (append_val(l_time->year%10, 1, str)) + goto err_exit; + break; + + case FMT_YY: + case FMT_RR: + if (append_val(l_time->year%100, 2, str)) + goto err_exit; + break; + + case FMT_YYY: + if (append_val(l_time->year%1000, 3, str)) + goto err_exit; + break; + + case FMT_YYYY: + case FMT_RRRR: + if (append_val(l_time->year, 4, str)) + goto err_exit; + break; + + case FMT_MM: + if (append_val(l_time->month, 2, str)) + goto err_exit; + break; + + case FMT_MON: + { + if (l_time->month == 0) + { + str->append("00", 2); + } + else + { + const char *month_name= (locale->ab_month_names-> + type_names[l_time->month-1]); + size_t m_len= strlen(month_name); + str->append(month_name, m_len, system_charset_info); + } + } + break; + + case FMT_MONTH: + { + if (l_time->month == 0) + { + str->append("00", 2); + } + else + { + const char *month_name= (locale->month_names-> + type_names[l_time->month-1]); + size_t month_byte_len= strlen(month_name); + size_t month_char_len; + str->append(month_name, month_byte_len, system_charset_info); + month_char_len= my_numchars_mb(&my_charset_utf8mb3_general_ci, + month_name, month_name + + month_byte_len); + if (str->fill(str->length() + locale->max_month_name_length - + month_char_len, ' ')) + goto err_exit; + } + } + break; + + case FMT_DD: + if (append_val(l_time->day, 2, str)) + goto err_exit; + break; + + case FMT_DY: + { + if (l_time->day == 0) + str->append("00", 2); + else + { + weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, + l_time->day), 0); + const char *day_name= locale->ab_day_names->type_names[weekday]; + str->append(day_name, strlen(day_name), system_charset_info); + } + } + break; + + case FMT_DAY: + { + if (l_time->day == 0) + str->append("00", 2, system_charset_info); + else + { + const char *day_name; + size_t day_byte_len, day_char_len; + weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, + l_time->day), 0); + day_name= locale->day_names->type_names[weekday]; + day_byte_len= strlen(day_name); + str->append(day_name, day_byte_len, system_charset_info); + day_char_len= my_numchars_mb(&my_charset_utf8mb3_general_ci, + day_name, day_name + day_byte_len); + if (str->fill(str->length() + locale->max_day_name_length - + day_char_len, ' ')) + goto err_exit; + } + } + break; + + case FMT_HH12: + case FMT_HH: + hours_i= (l_time->hour%24 + 11)%12+1; + if (append_val(hours_i, 2, str)) + goto err_exit; + break; + + case FMT_HH24: + if (append_val(l_time->hour, 2, str)) + goto err_exit; + break; + + case FMT_MI: + if (append_val(l_time->minute, 2, str)) + goto err_exit; + break; + + case FMT_SS: + if (append_val(l_time->second, 2, str)) + goto err_exit; + break; + + default: + str->append((char) *ptr); + } + + ptr++; + }; + return false; + +err_exit: + return true; +} + + +bool Item_func_tochar::fix_length_and_dec() +{ + thd= current_thd; + CHARSET_INFO *cs= thd->variables.collation_connection; + Item *arg1= args[1]->this_item(); + my_repertoire_t repertoire= arg1->collation.repertoire; + StringBuffer<STRING_BUFFER_USUAL_SIZE> buffer; + String *str; + + locale= thd->variables.lc_time_names; + if (!thd->variables.lc_time_names->is_ascii) + repertoire|= MY_REPERTOIRE_EXTENDED; + collation.set(cs, arg1->collation.derivation, repertoire); + + /* first argument must be datetime or string */ + enum_field_types arg0_mysql_type= args[0]->field_type(); + + max_length= 0; + switch (arg0_mysql_type) { + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_STRING: + break; + default: + { + my_printf_error(ER_STD_INVALID_ARGUMENT, + ER(ER_STD_INVALID_ARGUMENT), + MYF(0), + "data type of first argument must be type " + "date/datetime/time or string", + func_name()); + return TRUE; + } + } + if (args[1]->basic_const_item() && (str= args[1]->val_str(&buffer))) + { + uint ulen; + fixed_length= 1; + if (parse_format_string(str, &ulen)) + { + my_printf_error(ER_STD_INVALID_ARGUMENT, + ER(ER_STD_INVALID_ARGUMENT), + MYF(0), + warning_message.c_ptr(), + func_name()); + return TRUE; + } + max_length= (uint32) (ulen * collation.collation->mbmaxlen); + } + else + { + fixed_length= 0; + max_length= (uint32) MY_MIN(arg1->max_length * 10 * + collation.collation->mbmaxlen, + MAX_BLOB_WIDTH); + } + set_maybe_null(); + return FALSE; +} + + +String *Item_func_tochar::val_str(String* str) + { + StringBuffer<64> format_buffer; + String *format; + MYSQL_TIME l_time; + const MY_LOCALE *lc= locale; + date_conv_mode_t mode= TIME_CONV_NONE; + size_t max_result_length= max_length; + + if (warning_message.length()) + goto null_date; + + if ((null_value= args[0]->get_date(thd, &l_time, + Temporal::Options(mode, thd)))) + return 0; + + if (!fixed_length) + { + uint ulen; + if (!(format= args[1]->val_str(&format_buffer)) || !format->length() || + parse_format_string(format, &ulen)) + goto null_date; + max_result_length= ((size_t) ulen) * collation.collation->mbmaxlen; + } + + if (str->alloc(max_result_length)) + goto null_date; + + /* Create the result string */ + str->set_charset(collation.collation); + if (!make_date_time_oracle(fmt_array, &l_time, lc, str)) + return str; + +null_date: + + if (warning_message.length()) + { + push_warning_printf(thd, + Sql_condition::WARN_LEVEL_WARN, + ER_STD_INVALID_ARGUMENT, + ER_THD(thd, ER_STD_INVALID_ARGUMENT), + warning_message.c_ptr(), + func_name()); + if (!fixed_length) + warning_message.length(0); + } + + null_value= 1; + return 0; +} + bool Item_func_from_unixtime::fix_length_and_dec() { @@ -1937,7 +2742,7 @@ bool Item_func_from_unixtime::fix_length_and_dec() Type_temporal_attributes_not_fixed_dec(MAX_DATETIME_WIDTH, args[0]->decimals, false), DTCollation_numeric()); - maybe_null= true; + set_maybe_null(); return FALSE; } @@ -2064,7 +2869,7 @@ bool Item_date_add_interval::fix_length_and_dec() { set_func_handler(&func_handler_date_add_interval_string); } - maybe_null= true; + set_maybe_null(); return m_func_handler->fix_length_and_dec(this); } @@ -2109,16 +2914,19 @@ static const char *interval_names[]= void Item_date_add_interval::print(String *str, enum_query_type query_type) { args[0]->print_parenthesised(str, query_type, INTERVAL_PRECEDENCE); - str->append(date_sub_interval?" - interval ":" + interval "); + static LEX_CSTRING minus_interval= { STRING_WITH_LEN(" - interval ") }; + static LEX_CSTRING plus_interval= { STRING_WITH_LEN(" + interval ") }; + LEX_CSTRING *tmp= date_sub_interval ? &minus_interval : &plus_interval; + str->append(tmp); args[1]->print(str, query_type); str->append(' '); - str->append(interval_names[int_type]); + str->append(interval_names[int_type], strlen(interval_names[int_type])); } void Item_extract::print(String *str, enum_query_type query_type) { str->append(STRING_WITH_LEN("extract(")); - str->append(interval_names[int_type]); + str->append(interval_names[int_type], strlen(interval_names[int_type])); str->append(STRING_WITH_LEN(" from ")); args[0]->print(str, query_type); str->append(')'); @@ -2141,7 +2949,7 @@ bool Item_extract::check_arguments() const bool Item_extract::fix_length_and_dec() { - maybe_null=1; // If wrong date + set_maybe_null(); // If wrong date uint32 daylen= args[0]->cmp_type() == TIME_RESULT ? 2 : TIME_MAX_INTERVAL_DAY_CHAR_LENGTH; switch (int_type) { @@ -2182,7 +2990,7 @@ uint Extract_source::week(THD *thd) const longlong Item_extract::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Extract_source dt(thd, args[0], m_date_mode); if ((null_value= !dt.is_valid_extract_source())) @@ -2260,7 +3068,8 @@ void Item_func::print_cast_temporal(String *str, enum_query_type query_type) if (decimals && decimals != NOT_FIXED_DEC) { str->append('('); - str->append(llstr(decimals, buf)); + size_t length= (size_t) (longlong10_to_str(decimals, buf, -10) - buf); + str->append(buf, length); str->append(')'); } str->append(')'); @@ -2274,18 +3083,16 @@ void Item_char_typecast::print(String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" as char")); if (cast_length != ~0U) { + char buf[20]; + size_t length= (size_t) (longlong10_to_str(cast_length, buf, 10) - buf); str->append('('); - char buffer[20]; - // my_charset_bin is good enough for numbers - String st(buffer, sizeof(buffer), &my_charset_bin); - st.set(static_cast<ulonglong>(cast_length), &my_charset_bin); - str->append(st); + str->append(buf, length); str->append(')'); } if (cast_cs) { str->append(STRING_WITH_LEN(" charset ")); - str->append(cast_cs->csname); + str->append(cast_cs->cs_name); } str->append(')'); } @@ -2356,7 +3163,7 @@ uint Item_char_typecast::adjusted_length_with_warn(uint length) String *Item_char_typecast::val_str_generic(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); String *res; if (has_explicit_length()) @@ -2412,7 +3219,7 @@ end: String *Item_char_typecast::val_str_binary_from_native(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); DBUG_ASSERT(cast_cs == &my_charset_bin); NativeBuffer<STRING_BUFFER_USUAL_SIZE> native; @@ -2504,7 +3311,8 @@ Item_char_typecast::fix_length_and_dec_native_to_binary(uint32 octet_length) { collation.set(&my_charset_bin, DERIVATION_IMPLICIT); max_length= has_explicit_length() ? (uint32) cast_length : octet_length; - maybe_null|= current_thd->is_strict_mode(); + if (current_thd->is_strict_mode()) + set_maybe_null(); } @@ -2549,7 +3357,8 @@ void Item_char_typecast::fix_length_and_dec_internal(CHARSET_INFO *from_cs) args[0]->collation.collation->mbmaxlen)); max_length= char_length * cast_cs->mbmaxlen; // Add NULL-ability in strict mode. See Item_str_func::fix_fields() - maybe_null= maybe_null || current_thd->is_strict_mode(); + if (current_thd->is_strict_mode()) + set_maybe_null(); } @@ -2610,7 +3419,7 @@ Sql_mode_dependency Item_datetime_typecast::value_depends_on_sql_mode() const bool Item_func_makedate::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); long year, days, daynr= (long) args[1]->val_int(); VYear vyear(args[0]); @@ -2673,7 +3482,7 @@ bool Item_func_add_time::fix_length_and_dec() &func_handler_add_time_string_sub); } - maybe_null= true; + set_maybe_null(); return m_func_handler->fix_length_and_dec(this); } @@ -2688,7 +3497,7 @@ bool Item_func_add_time::fix_length_and_dec() bool Item_func_timediff::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); int l_sign= 1; MYSQL_TIME l_time1,l_time2,l_time3; @@ -2720,7 +3529,7 @@ bool Item_func_timediff::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzy bool Item_func_maketime::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); Longlong_hybrid hour(args[0]->val_int(), args[0]->unsigned_flag); longlong minute= args[1]->val_int(); VSec9 sec(thd, args[2], "seconds", 59); @@ -2761,7 +3570,7 @@ bool Item_func_maketime::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzy longlong Item_func_microsecond::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); THD *thd= current_thd; Time tm(thd, args[0], Time::Options_for_cast(thd)); return ((null_value= !tm.is_valid_time())) ? @@ -2879,7 +3688,7 @@ null_date: void Item_func_timestamp_diff::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); switch (int_type) { @@ -2925,7 +3734,7 @@ void Item_func_timestamp_diff::print(String *str, enum_query_type query_type) String *Item_func_get_format::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); const char *format_name; KNOWN_DATE_TIME_FORMAT *format; String *val= args[0]->val_str_ascii(str); @@ -2958,7 +3767,7 @@ String *Item_func_get_format::val_str_ascii(String *str) void Item_func_get_format::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); switch (type) { @@ -3058,7 +3867,7 @@ bool Item_func_str_to_date::fix_length_and_dec() if (collation.collation->mbminlen > 1) internal_charset= &my_charset_utf8mb4_general_ci; - maybe_null= true; + set_maybe_null(); set_func_handler(&func_handler_str_to_date_datetime_usec); if ((const_item= args[1]->const_item())) diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index a910b2cb723..9b78d6c159e 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -32,8 +32,8 @@ bool get_interval_value(THD *thd, Item *args, class Item_long_func_date_field: public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_date(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_date(func_name_cstring()); } public: Item_long_func_date_field(THD *thd, Item *a) :Item_long_func(thd, a) { } @@ -42,8 +42,8 @@ public: class Item_long_func_time_field: public Item_long_func { - bool check_arguments() const - { return args[0]->check_type_can_return_time(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_time(func_name_cstring()); } public: Item_long_func_time_field(THD *thd, Item *a) :Item_long_func(thd, a) { } @@ -52,37 +52,45 @@ public: class Item_func_period_add :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, 2); } public: Item_func_period_add(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "period_add"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("period_add") }; + return name; + } + bool fix_length_and_dec() override { max_length=6*MY_CHARSET_BIN_MB_MAXLEN; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_period_add>(thd, this); } }; class Item_func_period_diff :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, 2); } public: Item_func_period_diff(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "period_diff"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("period_diff") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=6*MY_CHARSET_BIN_MB_MAXLEN; return FALSE; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_period_diff>(thd, this); } }; @@ -91,53 +99,61 @@ class Item_func_to_days :public Item_long_func_date_field { public: Item_func_to_days(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "to_days"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("to_days") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=6*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - enum_monotonicity_info get_monotonicity_info() const; - longlong val_int_endpoint(bool left_endp, bool *incl_endp); - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + enum_monotonicity_info get_monotonicity_info() const override; + longlong val_int_endpoint(bool left_endp, bool *incl_endp) override; + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_to_days>(thd, this); } }; class Item_func_to_seconds :public Item_longlong_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_date(0, arg_count); } public: Item_func_to_seconds(THD *thd, Item *a): Item_longlong_func(thd, a) {} - longlong val_int(); - const char *func_name() const { return "to_seconds"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("to_seconds") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; fix_char_length(12); - maybe_null= 1; + set_maybe_null(); return FALSE; } - enum_monotonicity_info get_monotonicity_info() const; - longlong val_int_endpoint(bool left_endp, bool *incl_endp); - bool check_partition_func_processor(void *bool_arg) { return FALSE;} + enum_monotonicity_info get_monotonicity_info() const override; + longlong val_int_endpoint(bool left_endp, bool *incl_endp) override; + bool check_partition_func_processor(void *bool_arg) override { return FALSE;} /* Only meaningful with date part and optional time part */ - bool check_valid_arguments_processor(void *int_arg) + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_to_seconds>(thd, this); } }; @@ -146,22 +162,26 @@ class Item_func_dayofmonth :public Item_long_func_date_field { public: Item_func_dayofmonth(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "dayofmonth"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("dayofmonth") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dayofmonth>(thd, this); } }; @@ -171,22 +191,26 @@ class Item_func_month :public Item_long_func public: Item_func_month(THD *thd, Item *a): Item_long_func(thd, a) { } - longlong val_int(); - const char *func_name() const { return "month"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("month") }; + return name; + } + bool fix_length_and_dec() override { decimals= 0; fix_char_length(2); - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_month>(thd, this); } }; @@ -196,19 +220,23 @@ class Item_func_monthname :public Item_str_func MY_LOCALE *locale; public: Item_func_monthname(THD *thd, Item *a): Item_str_func(thd, a) {} - const char *func_name() const { return "monthname"; } - String *val_str(String *str); - bool fix_length_and_dec(); - bool check_partition_func_processor(void *int_arg) {return TRUE;} - bool check_valid_arguments_processor(void *int_arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("monthname") }; + return name; + } + String *val_str(String *str) override; + bool fix_length_and_dec() override; + bool check_partition_func_processor(void *int_arg) override {return TRUE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_monthname>(thd, this); } }; @@ -217,22 +245,26 @@ class Item_func_dayofyear :public Item_long_func_date_field { public: Item_func_dayofyear(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "dayofyear"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("dayofyear") }; + return name; + } + bool fix_length_and_dec() override { decimals= 0; fix_char_length(3); - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dayofyear>(thd, this); } }; @@ -241,22 +273,26 @@ class Item_func_hour :public Item_long_func_time_field { public: Item_func_hour(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "hour"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("hour") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_time_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_hour>(thd, this); } }; @@ -265,22 +301,26 @@ class Item_func_minute :public Item_long_func_time_field { public: Item_func_minute(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "minute"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("minute") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_time_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_minute>(thd, this); } }; @@ -289,22 +329,26 @@ class Item_func_quarter :public Item_long_func_date_field { public: Item_func_quarter(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "quarter"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("quarter") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=1*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_quarter>(thd, this); } }; @@ -313,85 +357,97 @@ class Item_func_second :public Item_long_func_time_field { public: Item_func_second(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "second"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("second") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_time_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_second>(thd, this); } }; class Item_func_week :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_date(func_name()) || - (arg_count > 1 && args[1]->check_type_can_return_int(func_name())); + return args[0]->check_type_can_return_date(func_name_cstring()) || + (arg_count > 1 && args[1]->check_type_can_return_int(func_name_cstring())); } public: Item_func_week(THD *thd, Item *a): Item_long_func(thd, a) {} Item_func_week(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "week"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("week") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=2*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { if (arg_count == 2) return FALSE; return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - bool check_valid_arguments_processor(void *int_arg) + bool check_valid_arguments_processor(void *int_arg) override { return arg_count == 2; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_week>(thd, this); } }; class Item_func_yearweek :public Item_long_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_date(func_name()) || - args[1]->check_type_can_return_int(func_name()); + return args[0]->check_type_can_return_date(func_name_cstring()) || + args[1]->check_type_can_return_int(func_name_cstring()); } public: Item_func_yearweek(THD *thd, Item *a, Item *b) :Item_long_func(thd, a, b) {} - longlong val_int(); - const char *func_name() const { return "yearweek"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("yearweek") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; max_length=6*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_yearweek>(thd, this); } }; @@ -400,24 +456,28 @@ class Item_func_year :public Item_long_func_date_field { public: Item_func_year(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "year"; } - enum_monotonicity_info get_monotonicity_info() const; - longlong val_int_endpoint(bool left_endp, bool *incl_endp); - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("year") }; + return name; + } + enum_monotonicity_info get_monotonicity_info() const override; + longlong val_int_endpoint(bool left_endp, bool *incl_endp) override; + bool fix_length_and_dec() override { decimals=0; max_length=4*MY_CHARSET_BIN_MB_MAXLEN; - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_year>(thd, this); } }; @@ -428,29 +488,31 @@ class Item_func_weekday :public Item_long_func public: Item_func_weekday(THD *thd, Item *a, bool type_arg): Item_long_func(thd, a), odbc_type(type_arg) { } - longlong val_int(); - const char *func_name() const + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override { - return (odbc_type ? "dayofweek" : "weekday"); + static LEX_CSTRING dayofweek= {STRING_WITH_LEN("dayofweek") }; + static LEX_CSTRING weekday= {STRING_WITH_LEN("weekday") }; + return (odbc_type ? dayofweek : weekday); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { decimals= 0; fix_char_length(1); - maybe_null=1; + set_maybe_null(); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_weekday>(thd, this); } }; @@ -459,20 +521,25 @@ class Item_func_dayname :public Item_str_func MY_LOCALE *locale; public: Item_func_dayname(THD *thd, Item *a): Item_str_func(thd, a) {} - const char *func_name() const { return "dayname"; } - String *val_str(String *str); - const Type_handler *type_handler() const { return &type_handler_varchar; } - bool fix_length_and_dec(); - bool check_partition_func_processor(void *int_arg) {return TRUE;} - bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("dayname") }; + return name; + } + String *val_str(String *str) override; + const Type_handler *type_handler() const override + { return &type_handler_varchar; } + bool fix_length_and_dec() override; + bool check_partition_func_processor(void *int_arg) override {return TRUE;} + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - bool check_valid_arguments_processor(void *int_arg) + bool check_valid_arguments_processor(void *int_arg) override { return !has_date_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_dayname>(thd, this); } }; @@ -487,7 +554,7 @@ public: DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS); decimals= dec; max_length=17 + (decimals ? decimals + 1 : 0); - maybe_null= true; + set_maybe_null(); if (decimals) set_handler(&type_handler_newdecimal); else @@ -510,34 +577,38 @@ public: Item_func_unix_timestamp(THD *thd): Item_func_seconds_hybrid(thd) {} Item_func_unix_timestamp(THD *thd, Item *a): Item_func_seconds_hybrid(thd, a) {} - const char *func_name() const { return "unix_timestamp"; } - enum_monotonicity_info get_monotonicity_info() const; - longlong val_int_endpoint(bool left_endp, bool *incl_endp); - bool check_partition_func_processor(void *int_arg) {return FALSE;} + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("unix_timestamp") }; + return name; + } + enum_monotonicity_info get_monotonicity_info() const override; + longlong val_int_endpoint(bool left_endp, bool *incl_endp) override; + bool check_partition_func_processor(void *int_arg) override {return FALSE;} /* UNIX_TIMESTAMP() depends on the current timezone (and thus may not be used as a partitioning function) when its argument is NOT of the TIMESTAMP type. */ - bool check_valid_arguments_processor(void *int_arg) + bool check_valid_arguments_processor(void *int_arg) override { return !has_timestamp_args(); } - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { if (arg_count) return FALSE; return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { fix_length_and_dec_generic(arg_count ? args[0]->datetime_precision(current_thd) : 0); return FALSE; } - longlong int_op(); - my_decimal *decimal_op(my_decimal* buf); - Item *get_copy(THD *thd) + longlong int_op() override; + my_decimal *decimal_op(my_decimal* buf) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_unix_timestamp>(thd, this); } }; @@ -547,21 +618,25 @@ class Item_func_time_to_sec :public Item_func_seconds_hybrid public: Item_func_time_to_sec(THD *thd, Item *item): Item_func_seconds_hybrid(thd, item) {} - const char *func_name() const { return "time_to_sec"; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("time_to_sec") }; + return name; + } + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_time_args(); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { fix_length_and_dec_generic(args[0]->time_precision(current_thd)); return FALSE; } - longlong int_op(); - my_decimal *decimal_op(my_decimal* buf); - Item *get_copy(THD *thd) + longlong int_op() override; + my_decimal *decimal_op(my_decimal* buf) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_time_to_sec>(thd, this); } }; @@ -572,15 +647,20 @@ public: Item_datefunc(THD *thd): Item_func(thd) { } Item_datefunc(THD *thd, Item *a): Item_func(thd, a) { } Item_datefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) { } - const Type_handler *type_handler() const { return &type_handler_newdate; } - longlong val_int() { return Date(this).to_longlong(); } - double val_real() { return Date(this).to_double(); } - String *val_str(String *to) { return Date(this).to_string(to); } - my_decimal *val_decimal(my_decimal *to) { return Date(this).to_decimal(to); } - bool fix_length_and_dec() + const Type_handler *type_handler() const override + { return &type_handler_newdate; } + longlong val_int() override + { return Date(this).to_longlong(); } + double val_real() override + { return Date(this).to_double(); } + String *val_str(String *to) override + { return Date(this).to_string(to); } + my_decimal *val_decimal(my_decimal *to) override + { return Date(this).to_decimal(to); } + bool fix_length_and_dec() override { fix_attributes_date(); - maybe_null= (arg_count > 0); + set_maybe_null(arg_count > 0); return FALSE; } }; @@ -593,15 +673,18 @@ public: Item_timefunc(THD *thd, Item *a): Item_func(thd, a) {} Item_timefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {} Item_timefunc(THD *thd, Item *a, Item *b, Item *c): Item_func(thd, a, b ,c) {} - const Type_handler *type_handler() const { return &type_handler_time2; } - longlong val_int() { return Time(this).to_longlong(); } - double val_real() { return Time(this).to_double(); } - String *val_str(String *to) { return Time(this).to_string(to, decimals); } - my_decimal *val_decimal(my_decimal *to) { return Time(this).to_decimal(to); } - bool val_native(THD *thd, Native *to) - { - return Time(thd, this).to_native(to, decimals); - } + const Type_handler *type_handler() const override + { return &type_handler_time2; } + longlong val_int() override + { return Time(this).to_longlong(); } + double val_real() override + { return Time(this).to_double(); } + String *val_str(String *to) override + { return Time(this).to_string(to, decimals); } + my_decimal *val_decimal(my_decimal *to) override + { return Time(this).to_decimal(to); } + bool val_native(THD *thd, Native *to) override + { return Time(thd, this).to_native(to, decimals); } }; @@ -613,11 +696,14 @@ public: Item_datetimefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {} Item_datetimefunc(THD *thd, Item *a, Item *b, Item *c): Item_func(thd, a, b ,c) {} - const Type_handler *type_handler() const { return &type_handler_datetime2; } - longlong val_int() { return Datetime(this).to_longlong(); } - double val_real() { return Datetime(this).to_double(); } - String *val_str(String *to) { return Datetime(this).to_string(to, decimals); } - my_decimal *val_decimal(my_decimal *to) { return Datetime(this).to_decimal(to); } + const Type_handler *type_handler() const override + { return &type_handler_datetime2; } + longlong val_int() override { return Datetime(this).to_longlong(); } + double val_real() override { return Datetime(this).to_double(); } + String *val_str(String *to) override + { return Datetime(this).to_string(to, decimals); } + my_decimal *val_decimal(my_decimal *to) override + { return Datetime(this).to_decimal(to); } }; @@ -630,20 +716,21 @@ class Item_func_curtime :public Item_timefunc public: Item_func_curtime(THD *thd, uint dec): Item_timefunc(thd), last_query_id(0) { decimals= dec; } - bool fix_fields(THD *, Item **); - bool fix_length_and_dec() { fix_attributes_time(decimals); return FALSE; } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override + { fix_attributes_time(decimals); return FALSE; } + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; /* Abstract method that defines which time zone is used for conversion. Converts time current time in my_time_t representation to broken-down MYSQL_TIME representation using UTC-SYSTEM or per-thread time zone. */ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0; - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC); } - void print(String *str, enum_query_type query_type); + void print(String *str, enum_query_type query_type) override; }; @@ -651,9 +738,13 @@ class Item_func_curtime_local :public Item_func_curtime { public: Item_func_curtime_local(THD *thd, uint dec): Item_func_curtime(thd, dec) {} - const char *func_name() const { return "curtime"; } - virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("curtime") }; + return name; + } + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_curtime_local>(thd, this); } }; @@ -662,9 +753,13 @@ class Item_func_curtime_utc :public Item_func_curtime { public: Item_func_curtime_utc(THD *thd, uint dec): Item_func_curtime(thd, dec) {} - const char *func_name() const { return "utc_time"; } - virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("utc_time") }; + return name; + } + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_curtime_utc>(thd, this); } }; @@ -677,9 +772,9 @@ class Item_func_curdate :public Item_datefunc MYSQL_TIME ltime; public: Item_func_curdate(THD *thd): Item_datefunc(thd), last_query_id(0) {} - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0; - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC); } @@ -690,9 +785,13 @@ class Item_func_curdate_local :public Item_func_curdate { public: Item_func_curdate_local(THD *thd): Item_func_curdate(thd) {} - const char *func_name() const { return "curdate"; } - void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("curdate") }; + return name; + } + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_curdate_local>(thd, this); } }; @@ -701,9 +800,13 @@ class Item_func_curdate_utc :public Item_func_curdate { public: Item_func_curdate_utc(THD *thd): Item_func_curdate(thd) {} - const char *func_name() const { return "utc_date"; } - void store_now_in_TIME(THD* thd, MYSQL_TIME *now_time); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("utc_date") }; + return name; + } + void store_now_in_TIME(THD* thd, MYSQL_TIME *now_time) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_curdate_utc>(thd, this); } }; @@ -717,12 +820,12 @@ class Item_func_now :public Item_datetimefunc public: Item_func_now(THD *thd, uint dec): Item_datetimefunc(thd), last_query_id(0) { decimals= dec; } - bool fix_fields(THD *, Item **); - bool fix_length_and_dec() + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override { fix_attributes_datetime(decimals); return FALSE;} - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0; - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { /* NOW is safe for replication as slaves will run with same time as @@ -730,7 +833,7 @@ public: */ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC); } - void print(String *str, enum_query_type query_type); + void print(String *str, enum_query_type query_type) override; }; @@ -738,11 +841,15 @@ class Item_func_now_local :public Item_func_now { public: Item_func_now_local(THD *thd, uint dec): Item_func_now(thd, dec) {} - const char *func_name() const { return "current_timestamp"; } - int save_in_field(Field *field, bool no_conversions); - virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - virtual enum Functype functype() const { return NOW_FUNC; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("current_timestamp") }; + return name; + } + int save_in_field(Field *field, bool no_conversions) override; + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + enum Functype functype() const override { return NOW_FUNC; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_now_local>(thd, this); } }; @@ -751,15 +858,19 @@ class Item_func_now_utc :public Item_func_now { public: Item_func_now_utc(THD *thd, uint dec): Item_func_now(thd, dec) {} - const char *func_name() const { return "utc_timestamp"; } - virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - virtual enum Functype functype() const { return NOW_UTC_FUNC; } - virtual bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("utc_timestamp") }; + return name; + } + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + enum Functype functype() const override { return NOW_UTC_FUNC; } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC | VCOL_NON_DETERMINISTIC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_now_utc>(thd, this); } }; @@ -772,46 +883,54 @@ class Item_func_sysdate_local :public Item_func_now { public: Item_func_sysdate_local(THD *thd, uint dec): Item_func_now(thd, dec) {} - bool const_item() const { return 0; } - const char *func_name() const { return "sysdate"; } - void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time); - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - table_map used_tables() const { return RAND_TABLE_BIT; } - bool check_vcol_func_processor(void *arg) + bool const_item() const override { return 0; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sysdate") }; + return name; + } + void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time) override; + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + table_map used_tables() const override { return RAND_TABLE_BIT; } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC | VCOL_NON_DETERMINISTIC); } - virtual enum Functype functype() const { return SYSDATE_FUNC; } - Item *get_copy(THD *thd) + enum Functype functype() const override { return SYSDATE_FUNC; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sysdate_local>(thd, this); } }; class Item_func_from_days :public Item_datefunc { - bool check_arguments() const - { return args[0]->check_type_can_return_int(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_int(func_name_cstring()); } public: Item_func_from_days(THD *thd, Item *a): Item_datefunc(thd, a) {} - const char *func_name() const { return "from_days"; } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("from_days") }; + return name; + } + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return has_date_args() || has_time_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_from_days>(thd, this); } }; class Item_func_date_format :public Item_str_func { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_date(func_name()) || + return args[0]->check_type_can_return_date(func_name_cstring()) || check_argument_types_can_return_text(1, arg_count); } const MY_LOCALE *locale; @@ -824,18 +943,22 @@ public: Item_str_func(thd, a, b), locale(0), is_time_format(false) {} Item_func_date_format(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c), locale(0), is_time_format(false) {} - String *val_str(String *str); - const char *func_name() const { return "date_format"; } - bool fix_length_and_dec(); + String *val_str(String *str) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("date_format") }; + return name; + } + bool fix_length_and_dec() override; uint format_length(const String *format); - bool eq(const Item *item, bool binary_cmp) const; - bool check_vcol_func_processor(void *arg) + bool eq(const Item *item, bool binary_cmp) const override; + bool check_vcol_func_processor(void *arg) override { if (arg_count > 2) return false; return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_date_format>(thd, this); } }; @@ -844,28 +967,87 @@ class Item_func_time_format: public Item_func_date_format public: Item_func_time_format(THD *thd, Item *a, Item *b): Item_func_date_format(thd, a, b) { is_time_format= true; } - const char *func_name() const { return "time_format"; } - bool check_vcol_func_processor(void *arg) { return false; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("time_format") }; + return name; + } + bool check_vcol_func_processor(void *arg) override { return false; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_time_format>(thd, this); } }; +/* the max length of datetime format models string in Oracle is 144 */ +#define MAX_DATETIME_FORMAT_MODEL_LEN 144 + +class Item_func_tochar :public Item_str_func +{ + const MY_LOCALE *locale; + THD *thd; + String warning_message; + bool fixed_length; + + /* + When datetime format models is parsed, use uint16 integers to + represent the format models and store in fmt_array. + */ + uint16 fmt_array[MAX_DATETIME_FORMAT_MODEL_LEN+1]; + + bool check_arguments() const override + { + return check_argument_types_can_return_text(1, arg_count); + } + +public: + Item_func_tochar(THD *thd, Item *a, Item *b): + Item_str_func(thd, a, b), locale(0) + { + /* NOTE: max length of warning message is 64 */ + warning_message.alloc(64); + warning_message.length(0); + } + ~Item_func_tochar() { warning_message.free(); } + String *val_str(String *str) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("to_char") }; + return name; + } + bool fix_length_and_dec() override; + bool parse_format_string(const String *format, uint *fmt_len); + + bool check_vcol_func_processor(void *arg) override + { + if (arg_count > 2) + return false; + return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); + } + + Item *get_copy(THD *thd) override + { return get_item_copy<Item_func_tochar>(thd, this); } +}; + + class Item_func_from_unixtime :public Item_datetimefunc { - bool check_arguments() const - { return args[0]->check_type_can_return_decimal(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_decimal(func_name_cstring()); } Time_zone *tz; public: Item_func_from_unixtime(THD *thd, Item *a): Item_datetimefunc(thd, a) {} - const char *func_name() const { return "from_unixtime"; } - bool fix_length_and_dec(); - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("from_unixtime") }; + return name; + } + bool fix_length_and_dec() override; + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_from_unixtime>(thd, this); } }; @@ -886,9 +1068,9 @@ class Time_zone; */ class Item_func_convert_tz :public Item_datetimefunc { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_date(func_name()) || + return args[0]->check_type_can_return_date(func_name_cstring()) || check_argument_types_can_return_text(1, arg_count); } /* @@ -902,35 +1084,43 @@ class Item_func_convert_tz :public Item_datetimefunc public: Item_func_convert_tz(THD *thd, Item *a, Item *b, Item *c): Item_datetimefunc(thd, a, b, c), from_tz_cached(0), to_tz_cached(0) {} - const char *func_name() const { return "convert_tz"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("convert_tz") }; + return name; + } + bool fix_length_and_dec() override { fix_attributes_datetime(args[0]->datetime_precision(current_thd)); - maybe_null= true; + set_maybe_null(); return FALSE; } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - void cleanup(); - Item *get_copy(THD *thd) + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + void cleanup() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_convert_tz>(thd, this); } }; class Item_func_sec_to_time :public Item_timefunc { - bool check_arguments() const - { return args[0]->check_type_can_return_decimal(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_decimal(func_name_cstring()); } public: Item_func_sec_to_time(THD *thd, Item *item): Item_timefunc(thd, item) {} - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - bool fix_length_and_dec() + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + bool fix_length_and_dec() override { fix_attributes_time(args[0]->decimals); - maybe_null= true; + set_maybe_null(); return FALSE; } - const char *func_name() const { return "sec_to_time"; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("sec_to_time") }; + return name; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_sec_to_time>(thd, this); } }; @@ -944,13 +1134,17 @@ public: bool neg_arg): Item_handled_func(thd, a, b), int_type(type_arg), date_sub_interval(neg_arg) {} - const char *func_name() const { return "date_add_interval"; } - bool fix_length_and_dec(); - bool eq(const Item *item, bool binary_cmp) const; - void print(String *str, enum_query_type query_type); - enum precedence precedence() const { return INTERVAL_PRECEDENCE; } - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("date_add_interval") }; + return name; + } + bool fix_length_and_dec() override; + bool eq(const Item *item, bool binary_cmp) const override; + void print(String *str, enum_query_type query_type) override; + enum precedence precedence() const override { return INTERVAL_PRECEDENCE; } + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_date_add_interval>(thd, this); } }; @@ -999,25 +1193,29 @@ class Item_extract :public Item_int_func, m_date_mode(date_mode_t(0)), int_type(type_arg) { } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return Type_handler_hybrid_field_type::type_handler(); } - longlong val_int(); - enum Functype functype() const { return EXTRACT_FUNC; } - const char *func_name() const { return "extract"; } - bool check_arguments() const; - bool fix_length_and_dec(); - bool eq(const Item *item, bool binary_cmp) const; - void print(String *str, enum_query_type query_type); - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) + longlong val_int() override; + enum Functype functype() const override { return EXTRACT_FUNC; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("extract") }; + return name; + } + bool check_arguments() const override; + bool fix_length_and_dec() override; + bool eq(const Item *item, bool binary_cmp) const override; + void print(String *str, enum_query_type query_type) override; + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { if (int_type != INTERVAL_WEEK) return FALSE; return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } - bool check_valid_arguments_processor(void *int_arg) + bool check_valid_arguments_processor(void *int_arg) override { switch (int_type) { case INTERVAL_YEAR: @@ -1053,7 +1251,7 @@ class Item_extract :public Item_int_func, } return true; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_extract>(thd, this); } }; @@ -1080,9 +1278,13 @@ public: Item_char_typecast(THD *thd, Item *a, uint length_arg, CHARSET_INFO *cs_arg): Item_handled_func(thd, a), cast_length(length_arg), cast_cs(cs_arg), m_suppress_warning_to_error_escalation(false) {} - enum Functype functype() const { return CHAR_TYPECAST_FUNC; } - bool eq(const Item *item, bool binary_cmp) const; - const char *func_name() const { return "cast_as_char"; } + enum Functype functype() const override { return CHAR_TYPECAST_FUNC; } + bool eq(const Item *item, bool binary_cmp) const override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_char") }; + return name; + } CHARSET_INFO *cast_charset() const { return cast_cs; } String *val_str_generic(String *a); String *val_str_binary_from_native(String *a); @@ -1090,13 +1292,13 @@ public: void fix_length_and_dec_numeric(); void fix_length_and_dec_str(); void fix_length_and_dec_native_to_binary(uint32 octet_length); - bool fix_length_and_dec() + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_char_typecast_fix_length_and_dec(this); } - void print(String *str, enum_query_type query_type); - bool need_parentheses_in_default() { return true; } - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + bool need_parentheses_in_default() override { return true; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_char_typecast>(thd, this); } }; @@ -1123,17 +1325,21 @@ class Item_date_typecast :public Item_datefunc { public: Item_date_typecast(THD *thd, Item *a): Item_datefunc(thd, a) {} - const char *func_name() const { return "cast_as_date"; } - void print(String *str, enum_query_type query_type) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_date") }; + return name; + } + void print(String *str, enum_query_type query_type) override { print_cast_temporal(str, query_type); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool fix_length_and_dec() + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool fix_length_and_dec() override { return args[0]->type_handler()->Item_date_typecast_fix_length_and_dec(this); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_date_typecast>(thd, this); } }; @@ -1143,19 +1349,23 @@ class Item_time_typecast :public Item_timefunc public: Item_time_typecast(THD *thd, Item *a, uint dec_arg): Item_timefunc(thd, a) { decimals= dec_arg; } - const char *func_name() const { return "cast_as_time"; } - void print(String *str, enum_query_type query_type) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_time") }; + return name; + } + void print(String *str, enum_query_type query_type) override { print_cast_temporal(str, query_type); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool fix_length_and_dec() + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool fix_length_and_dec() override { return args[0]->type_handler()-> Item_time_typecast_fix_length_and_dec(this); } - Sql_mode_dependency value_depends_on_sql_mode() const; - Item *get_copy(THD *thd) + Sql_mode_dependency value_depends_on_sql_mode() const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_time_typecast>(thd, this); } }; @@ -1165,70 +1375,83 @@ class Item_datetime_typecast :public Item_datetimefunc public: Item_datetime_typecast(THD *thd, Item *a, uint dec_arg): Item_datetimefunc(thd, a) { decimals= dec_arg; } - const char *func_name() const { return "cast_as_datetime"; } - void print(String *str, enum_query_type query_type) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("cast_as_datetime") }; + return name; + } + void print(String *str, enum_query_type query_type) override { print_cast_temporal(str, query_type); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - bool fix_length_and_dec() + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + bool fix_length_and_dec() override { return args[0]->type_handler()-> Item_datetime_typecast_fix_length_and_dec(this); } - Sql_mode_dependency value_depends_on_sql_mode() const; - Item *get_copy(THD *thd) + Sql_mode_dependency value_depends_on_sql_mode() const override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_datetime_typecast>(thd, this); } }; class Item_func_makedate :public Item_datefunc { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_makedate(THD *thd, Item *a, Item *b): Item_datefunc(thd, a, b) {} - const char *func_name() const { return "makedate"; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("makedate") }; + return name; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_makedate>(thd, this); } }; class Item_func_timestamp :public Item_datetimefunc { - bool check_arguments() const + bool check_arguments() const override { - return args[0]->check_type_can_return_date(func_name()) || - args[1]->check_type_can_return_time(func_name()); + return args[0]->check_type_can_return_date(func_name_cstring()) || + args[1]->check_type_can_return_time(func_name_cstring()); } public: Item_func_timestamp(THD *thd, Item *a, Item *b) :Item_datetimefunc(thd, a, b) { } - const char *func_name() const { return "timestamp"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("timestamp") }; + return name; + } + bool fix_length_and_dec() override { THD *thd= current_thd; uint dec0= args[0]->datetime_precision(thd); uint dec1= Interval_DDhhmmssff::fsp(thd, args[1]); fix_attributes_datetime(MY_MAX(dec0, dec1)); - maybe_null= true; + set_maybe_null(); return false; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { Datetime dt(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd)); if (!dt.is_valid_datetime()) - return null_value= true; + return (null_value= 1); + Interval_DDhhmmssff it(thd, args[1]); if (!it.is_valid_interval_DDhhmmssff()) - return null_value= true; + return (null_value= true); return (null_value= Sec6_add(dt.get_mysql_time(), it.get_mysql_time(), 1). to_datetime(ltime)); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_timestamp>(thd, this); } }; @@ -1253,54 +1476,67 @@ public: Item_func_add_time(THD *thd, Item *a, Item *b, bool neg_arg) :Item_handled_func(thd, a, b), sign(neg_arg ? -1 : 1) { } - bool fix_length_and_dec(); - const char *func_name() const { return sign > 0 ? "addtime" : "subtime"; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING addtime= { STRING_WITH_LEN("addtime") }; + static LEX_CSTRING subtime= { STRING_WITH_LEN("subtime") }; + return sign > 0 ? addtime : subtime; + } + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_add_time>(thd, this); } }; class Item_func_timediff :public Item_timefunc { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_time(0, arg_count); } public: Item_func_timediff(THD *thd, Item *a, Item *b): Item_timefunc(thd, a, b) {} - const char *func_name() const { return "timediff"; } - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("timediff") }; + return name; + } + bool fix_length_and_dec() override { THD *thd= current_thd; uint dec= MY_MAX(args[0]->time_precision(thd), args[1]->time_precision(thd)); fix_attributes_time(dec); - maybe_null= true; + set_maybe_null(); return FALSE; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - Item *get_copy(THD *thd) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_timediff>(thd, this); } }; class Item_func_maketime :public Item_timefunc { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_int(0, 2) || - args[2]->check_type_can_return_decimal(func_name()); + args[2]->check_type_can_return_decimal(func_name_cstring()); } public: Item_func_maketime(THD *thd, Item *a, Item *b, Item *c): Item_timefunc(thd, a, b, c) {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { fix_attributes_time(args[2]->decimals); - maybe_null= true; + set_maybe_null(); return FALSE; } - const char *func_name() const { return "maketime"; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("maketime") }; + return name; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_maketime>(thd, this); } }; @@ -1309,29 +1545,33 @@ class Item_func_microsecond :public Item_long_func_time_field { public: Item_func_microsecond(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} - longlong val_int(); - const char *func_name() const { return "microsecond"; } - bool fix_length_and_dec() + longlong val_int() override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("microsecond") }; + return name; + } + bool fix_length_and_dec() override { decimals=0; - maybe_null=1; + set_maybe_null(); fix_char_length(6); return FALSE; } - bool check_partition_func_processor(void *int_arg) {return FALSE;} - bool check_vcol_func_processor(void *arg) { return FALSE;} - bool check_valid_arguments_processor(void *int_arg) + bool check_partition_func_processor(void *int_arg) override {return FALSE;} + bool check_vcol_func_processor(void *arg) override { return FALSE;} + bool check_valid_arguments_processor(void *int_arg) override { return !has_time_args(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_microsecond>(thd, this); } }; class Item_func_timestamp_diff :public Item_longlong_func { - bool check_arguments() const + bool check_arguments() const override { return check_argument_types_can_return_date(0, arg_count); } const interval_type int_type; public: @@ -1340,16 +1580,20 @@ public: public: Item_func_timestamp_diff(THD *thd, Item *a, Item *b, interval_type type_arg): Item_longlong_func(thd, a, b), int_type(type_arg) {} - const char *func_name() const { return "timestampdiff"; } - longlong val_int(); - bool fix_length_and_dec() + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("timestampdiff") }; + return name; + } + longlong val_int() override; + bool fix_length_and_dec() override { decimals=0; - maybe_null=1; + set_maybe_null(); return FALSE; } - virtual void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_timestamp_diff>(thd, this); } }; @@ -1366,17 +1610,21 @@ public: Item_func_get_format(THD *thd, timestamp_type type_arg, Item *a): Item_str_ascii_func(thd, a), type(type_arg) {} - String *val_str_ascii(String *str); - const char *func_name() const { return "get_format"; } - bool fix_length_and_dec() + String *val_str_ascii(String *str) override; + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("get_format") }; + return name; + } + bool fix_length_and_dec() override { - maybe_null= 1; + set_maybe_null(); decimals=0; fix_length_and_charset(17, default_charset()); return FALSE; } - virtual void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_get_format>(thd, this); } }; @@ -1394,22 +1642,30 @@ public: {} bool get_date_common(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate, timestamp_type); - const char *func_name() const { return "str_to_date"; } - bool fix_length_and_dec(); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("str_to_date") }; + return name; + } + bool fix_length_and_dec() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_str_to_date>(thd, this); } }; class Item_func_last_day :public Item_datefunc { - bool check_arguments() const - { return args[0]->check_type_can_return_date(func_name()); } + bool check_arguments() const override + { return args[0]->check_type_can_return_date(func_name_cstring()); } public: Item_func_last_day(THD *thd, Item *a): Item_datefunc(thd, a) {} - const char *func_name() const { return "last_day"; } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("last_day") }; + return name; + } + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_last_day>(thd, this); } }; @@ -1590,17 +1846,17 @@ public: bool get_date(THD *thd, Item_handled_func *item, MYSQL_TIME *to, date_mode_t fuzzy) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); Datetime::Options opt(TIME_CONV_NONE, thd); Datetime dt(thd, item->arguments()[0], opt); if (!dt.is_valid_datetime()) - return item->null_value= true; + return (item->null_value= true); Interval_DDhhmmssff it(thd, item->arguments()[1]); if (!it.is_valid_interval_DDhhmmssff()) - return item->null_value= true; + return (item->null_value= true); return (item->null_value= (Sec6_add(dt.get_mysql_time(), it.get_mysql_time(), m_sign). - to_datetime(to))); + to_datetime(to))); } }; @@ -1624,13 +1880,13 @@ public: bool get_date(THD *thd, Item_handled_func *item, MYSQL_TIME *to, date_mode_t fuzzy) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); Time t(thd, item->arguments()[0]); if (!t.is_valid_time()) - return item->null_value= true; + return (item->null_value= true); Interval_DDhhmmssff i(thd, item->arguments()[1]); if (!i.is_valid_interval_DDhhmmssff()) - return item->null_value= true; + return (item->null_value= true); return (item->null_value= (Sec6_add(t.get_mysql_time(), i.get_mysql_time(), m_sign). to_time(thd, to, item->decimals))); @@ -1661,7 +1917,7 @@ public: bool get_date(THD *thd, Item_handled_func *item, MYSQL_TIME *to, date_mode_t fuzzy) const { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); // Detect a proper timestamp type based on the argument values Temporal_hybrid l_time1(thd, item->arguments()[0], Temporal::Options(TIME_TIME_ONLY, thd)); diff --git a/sql/item_vers.cc b/sql/item_vers.cc index 9a594533628..3f648cde890 100644 --- a/sql/item_vers.cc +++ b/sql/item_vers.cc @@ -29,14 +29,14 @@ bool Item_func_history::val_bool() { Item_field *f= static_cast<Item_field *>(args[0]); - DBUG_ASSERT(f->fixed); + DBUG_ASSERT(f->fixed()); DBUG_ASSERT(f->field->flags & VERS_ROW_END); return !f->field->is_max(); } void Item_func_history::print(String *str, enum_query_type query_type) { - str->append(func_name()); + str->append(func_name_cstring()); str->append('('); args[0]->print(str, query_type); str->append(')'); diff --git a/sql/item_vers.h b/sql/item_vers.h index 0799d04a0bc..7cd5d847b15 100644 --- a/sql/item_vers.h +++ b/sql/item_vers.h @@ -33,22 +33,23 @@ public: DBUG_ASSERT(a->type() == Item::FIELD_ITEM); } - virtual bool val_bool(); - virtual longlong val_int() + bool val_bool() override; + longlong val_int() override { return val_bool(); } + bool fix_length_and_dec() override { - return (val_bool() ? 1 : 0); - } - bool fix_length_and_dec() - { - maybe_null= 0; + set_maybe_null(); null_value= 0; decimals= 0; max_length= 1; return FALSE; } - virtual const char* func_name() const { return "is_history"; } - virtual void print(String *str, enum_query_type query_type); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("is_history") }; + return name; + } + void print(String *str, enum_query_type query_type) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_history>(thd, this); } }; @@ -57,18 +58,16 @@ class Item_func_trt_ts: public Item_datetimefunc TR_table::field_id_t trt_field; public: Item_func_trt_ts(THD *thd, Item* a, TR_table::field_id_t _trt_field); - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - if (trt_field == TR_table::FLD_BEGIN_TS) - { - return "trt_begin_ts"; - } - return "trt_commit_ts"; + static LEX_CSTRING begin_name= {STRING_WITH_LEN("trt_begin_ts") }; + static LEX_CSTRING commit_name= {STRING_WITH_LEN("trt_commit_ts") }; + return (trt_field == TR_table::FLD_BEGIN_TS) ? begin_name : commit_name; } - bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); - Item *get_copy(THD *thd) + bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trt_ts>(thd, this); } - bool fix_length_and_dec() + bool fix_length_and_dec() override { fix_attributes_datetime(decimals); return FALSE; } }; @@ -84,31 +83,34 @@ public: Item_func_trt_id(THD *thd, Item* a, TR_table::field_id_t _trt_field, bool _backwards= false); Item_func_trt_id(THD *thd, Item* a, Item* b, TR_table::field_id_t _trt_field); - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - switch (trt_field) - { + static LEX_CSTRING trx_name= {STRING_WITH_LEN("trt_trx_id") }; + static LEX_CSTRING commit_name= {STRING_WITH_LEN("trt_commit_id") }; + static LEX_CSTRING iso_name= {STRING_WITH_LEN("trt_iso_level") }; + + switch (trt_field) { case TR_table::FLD_TRX_ID: - return "trt_trx_id"; + return trx_name; case TR_table::FLD_COMMIT_ID: - return "trt_commit_id"; + return commit_name; case TR_table::FLD_ISO_LEVEL: - return "trt_iso_level"; + return iso_name; default: DBUG_ASSERT(0); } - return NULL; + return NULL_clex_str; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { bool res= Item_int_func::fix_length_and_dec(); max_length= 20; return res; } - longlong val_int(); - Item *get_copy(THD *thd) + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trt_id>(thd, this); } }; @@ -119,12 +121,13 @@ protected: public: Item_func_trt_trx_sees(THD *thd, Item* a, Item* b); - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - return "trt_trx_sees"; + static LEX_CSTRING name= {STRING_WITH_LEN("trt_trx_sees") }; + return name; } - longlong val_int(); - Item *get_copy(THD *thd) + longlong val_int() override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_trt_trx_sees>(thd, this); } }; @@ -137,9 +140,10 @@ public: { accept_eq= true; } - const char *func_name() const + LEX_CSTRING func_name_cstring() const override { - return "trt_trx_sees_eq"; + static LEX_CSTRING name= {STRING_WITH_LEN("trt_trx_sees_eq") }; + return name; } }; diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc index 63f5499b857..4fef4fa10b2 100644 --- a/sql/item_windowfunc.cc +++ b/sql/item_windowfunc.cc @@ -84,7 +84,7 @@ Item_window_func::update_used_tables() bool Item_window_func::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (!thd->lex->current_select || (thd->lex->current_select->context_analysis_place != SELECT_LIST && @@ -99,13 +99,15 @@ Item_window_func::fix_fields(THD *thd, Item **ref) if (window_spec->window_frame && is_frame_prohibited()) { - my_error(ER_NOT_ALLOWED_WINDOW_FRAME, MYF(0), window_func()->func_name()); + my_error(ER_NOT_ALLOWED_WINDOW_FRAME, MYF(0), + window_func()->func_name()); return true; } if (window_spec->order_list->elements == 0 && is_order_list_mandatory()) { - my_error(ER_NO_ORDER_LIST_IN_WINDOW_SPEC, MYF(0), window_func()->func_name()); + my_error(ER_NO_ORDER_LIST_IN_WINDOW_SPEC, MYF(0), + window_func()->func_name()); return true; } @@ -121,15 +123,16 @@ Item_window_func::fix_fields(THD *thd, Item **ref) return true; const_item_cache= false; - with_window_func= true; + + with_flags= (with_flags & ~item_with_t::SUM_FUNC) | item_with_t::WINDOW_FUNC; if (fix_length_and_dec()) return TRUE; max_length= window_func()->max_length; - maybe_null= window_func()->maybe_null; + set_maybe_null(window_func()->maybe_null()); - fixed= 1; + base_flags|= item_base_t::FIXED; set_phase_to_initial(); return false; } @@ -181,7 +184,8 @@ bool Item_window_func::check_result_type_of_order_item() if (rtype != REAL_RESULT && rtype != INT_RESULT && rtype != DECIMAL_RESULT && rtype != TIME_RESULT) { - my_error(ER_WRONG_TYPE_FOR_PERCENTILE_FUNC, MYF(0), window_func()->func_name()); + my_error(ER_WRONG_TYPE_FOR_PERCENTILE_FUNC, MYF(0), + window_func()->func_name()); return true; } return false; @@ -336,7 +340,7 @@ void Item_sum_percent_rank::setup_window_func(THD *thd, Window_spec *window_spec bool Item_sum_hybrid_simple::fix_fields(THD *thd, Item **ref) { - DBUG_ASSERT(fixed == 0); + DBUG_ASSERT(fixed() == 0); if (init_sum_func_check(thd)) return TRUE; @@ -345,14 +349,11 @@ bool Item_sum_hybrid_simple::fix_fields(THD *thd, Item **ref) { if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i])) return TRUE; - with_window_func|= args[i]->with_window_func; + with_flags|= args[i]->with_flags; } - for (uint i= 0; i < arg_count && !m_with_subquery; i++) - m_with_subquery|= args[i]->with_subquery(); - if (fix_length_and_dec()) - return true; + return TRUE; setup_hybrid(thd, args[0]); result_field=0; @@ -360,17 +361,17 @@ bool Item_sum_hybrid_simple::fix_fields(THD *thd, Item **ref) if (check_sum_func(thd, ref)) return TRUE; for (uint i= 0; i < arg_count; i++) - { orig_args[i]= args[i]; - } - fixed= 1; + + base_flags|= item_base_t::FIXED; return FALSE; } bool Item_sum_hybrid_simple::fix_length_and_dec() { - maybe_null= null_value= true; + set_maybe_null(); + null_value= true; return args[0]->type_handler()->Item_sum_hybrid_fix_length_and_dec(this); } @@ -396,7 +397,7 @@ void Item_sum_hybrid_simple::setup_hybrid(THD *thd, Item *item) double Item_sum_hybrid_simple::val_real() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0.0; double retval= value->val_real(); @@ -407,7 +408,7 @@ double Item_sum_hybrid_simple::val_real() longlong Item_sum_hybrid_simple::val_int() { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0; longlong retval= value->val_int(); @@ -418,7 +419,7 @@ longlong Item_sum_hybrid_simple::val_int() my_decimal *Item_sum_hybrid_simple::val_decimal(my_decimal *val) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0; my_decimal *retval= value->val_decimal(val); @@ -430,7 +431,7 @@ my_decimal *Item_sum_hybrid_simple::val_decimal(my_decimal *val) String * Item_sum_hybrid_simple::val_str(String *str) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return 0; String *retval= value->val_str(str); @@ -441,7 +442,7 @@ Item_sum_hybrid_simple::val_str(String *str) bool Item_sum_hybrid_simple::val_native(THD *thd, Native *to) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return true; return val_native_from_item(thd, value, to); @@ -449,7 +450,7 @@ bool Item_sum_hybrid_simple::val_native(THD *thd, Native *to) bool Item_sum_hybrid_simple::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - DBUG_ASSERT(fixed == 1); + DBUG_ASSERT(fixed()); if (null_value) return true; bool retval= value->get_date(thd, ltime, fuzzydate); @@ -490,7 +491,7 @@ void Item_sum_hybrid_simple::reset_field() { longlong nr=args[0]->val_int(); - if (maybe_null) + if (maybe_null()) { if (args[0]->null_value) { @@ -507,7 +508,7 @@ void Item_sum_hybrid_simple::reset_field() { double nr= args[0]->val_real(); - if (maybe_null) + if (maybe_null()) { if (args[0]->null_value) { @@ -524,7 +525,7 @@ void Item_sum_hybrid_simple::reset_field() { VDec arg_dec(args[0]); - if (maybe_null) + if (maybe_null()) { if (arg_dec.is_null()) result_field->set_null(); @@ -557,7 +558,7 @@ void Item_window_func::print(String *str, enum_query_type query_type) return; } window_func()->print(str, query_type); - str->append(" over "); + str->append(STRING_WITH_LEN(" over ")); if (!window_spec) str->append(window_name); else @@ -566,11 +567,11 @@ void Item_window_func::print(String *str, enum_query_type query_type) void Item_window_func::print_for_percentile_functions(String *str, enum_query_type query_type) { window_func()->print(str, query_type); - str->append(" within group "); + str->append(STRING_WITH_LEN(" within group ")); str->append('('); window_spec->print_order(str,query_type); str->append(')'); - str->append(" over "); + str->append(STRING_WITH_LEN(" over ")); str->append('('); window_spec->print_partition(str,query_type); str->append(')'); diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h index 99ef738ac69..ce9d89e62dd 100644 --- a/sql/item_windowfunc.h +++ b/sql/item_windowfunc.h @@ -118,37 +118,39 @@ public: Item_sum_row_number(THD *thd) : Item_sum_int(thd), count(0) {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } - void clear() + void clear() override { count= 0; } - bool add() + bool add() override { count++; return false; } - void reset_field() { DBUG_ASSERT(0); } - void update_field() {} + void reset_field() override { DBUG_ASSERT(0); } + void update_field() override {} - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return ROW_NUMBER_FUNC; } - longlong val_int() + longlong val_int() override { return count; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "row_number"; + static LEX_CSTRING name= {STRING_WITH_LEN("row_number") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_row_number>(thd, this); } }; @@ -181,38 +183,40 @@ public: Item_sum_rank(THD *thd) : Item_sum_int(thd), peer_tracker(NULL) {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } - void clear() + void clear() override { /* This is called on partition start */ cur_rank= 1; row_number= 0; } - bool add(); + bool add() override; - longlong val_int() + longlong val_int() override { return cur_rank; } - void reset_field() { DBUG_ASSERT(0); } - void update_field() {} + void reset_field() override { DBUG_ASSERT(0); } + void update_field() override {} - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func () const override { return RANK_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "rank"; + static LEX_CSTRING name= {STRING_WITH_LEN("rank") }; + return name; } - void setup_window_func(THD *thd, Window_spec *window_spec); + void setup_window_func(THD *thd, Window_spec *window_spec) override; - void cleanup() + void cleanup() override { if (peer_tracker) { @@ -221,7 +225,7 @@ public: } Item_sum_int::cleanup(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_rank>(thd, this); } }; @@ -255,35 +259,37 @@ class Item_sum_dense_rank: public Item_sum_int XXX(cvicentiu) This class could potentially be implemented in the rank class, with a switch for the DENSE case. */ - void clear() + void clear() override { dense_rank= 0; first_add= true; } - bool add(); - void reset_field() { DBUG_ASSERT(0); } - void update_field() {} - longlong val_int() + bool add() override; + void reset_field() override { DBUG_ASSERT(0); } + void update_field() override {} + longlong val_int() override { return dense_rank; } Item_sum_dense_rank(THD *thd) : Item_sum_int(thd), dense_rank(0), first_add(true), peer_tracker(NULL) {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } - enum Sumfunctype sum_func () const + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } + enum Sumfunctype sum_func () const override { return DENSE_RANK_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "dense_rank"; + static LEX_CSTRING name= {STRING_WITH_LEN("dense_rank") }; + return name; } - void setup_window_func(THD *thd, Window_spec *window_spec); + void setup_window_func(THD *thd, Window_spec *window_spec) override; - void cleanup() + void cleanup() override { if (peer_tracker) { @@ -292,7 +298,7 @@ class Item_sum_dense_rank: public Item_sum_int } Item_sum_int::cleanup(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_dense_rank>(thd, this); } }; @@ -309,22 +315,22 @@ class Item_sum_hybrid_simple : public Item_sum_hybrid value(NULL) { } - bool add(); - bool fix_fields(THD *, Item **); - bool fix_length_and_dec(); + bool add() override; + bool fix_fields(THD *, Item **) override; + bool fix_length_and_dec() override; void setup_hybrid(THD *thd, Item *item); - double val_real(); - longlong val_int(); - my_decimal *val_decimal(my_decimal *); - void reset_field(); - String *val_str(String *); - bool val_native(THD *thd, Native *to); - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate); - const Type_handler *type_handler() const + double val_real() override; + longlong val_int() override; + my_decimal *val_decimal(my_decimal *) override; + void reset_field() override; + String *val_str(String *) override; + bool val_native(THD *thd, Native *to) override; + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; + const Type_handler *type_handler() const override { return Type_handler_hybrid_field_type::type_handler(); } - void update_field(); - Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table); - void clear() + void update_field() override; + Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) override; + void clear() override { value->clear(); null_value= 1; @@ -345,17 +351,18 @@ class Item_sum_first_value : public Item_sum_hybrid_simple Item_sum_hybrid_simple(thd, arg_expr) {} - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func () const override { return FIRST_VALUE_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "first_value"; + static LEX_CSTRING name= {STRING_WITH_LEN("first_value") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_first_value>(thd, this); } }; @@ -371,77 +378,84 @@ class Item_sum_last_value : public Item_sum_hybrid_simple Item_sum_last_value(THD* thd, Item* arg_expr) : Item_sum_hybrid_simple(thd, arg_expr) {} - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return LAST_VALUE_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "last_value"; + static LEX_CSTRING name= {STRING_WITH_LEN("last_value") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_last_value>(thd, this); } }; + class Item_sum_nth_value : public Item_sum_hybrid_simple { public: Item_sum_nth_value(THD *thd, Item *arg_expr, Item* offset_expr) : Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {} - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return NTH_VALUE_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "nth_value"; + static LEX_CSTRING name= {STRING_WITH_LEN("nth_value") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_nth_value>(thd, this); } }; + class Item_sum_lead : public Item_sum_hybrid_simple { public: Item_sum_lead(THD *thd, Item *arg_expr, Item* offset_expr) : Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {} - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return LEAD_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "lead"; + static LEX_CSTRING name= {STRING_WITH_LEN("lead") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_lead>(thd, this); } }; + class Item_sum_lag : public Item_sum_hybrid_simple { public: Item_sum_lag(THD *thd, Item *arg_expr, Item* offset_expr) : Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {} - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return LAG_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "lag"; + static LEX_CSTRING name= {STRING_WITH_LEN("lag") }; + return name; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_lag>(thd, this); } }; @@ -495,7 +509,7 @@ class Item_sum_percent_rank: public Item_sum_double, Item_sum_percent_rank(THD *thd) : Item_sum_double(thd), cur_rank(1), peer_tracker(NULL) {} - longlong val_int() + longlong val_int() override { /* Percent rank is a real value so calling the integer value should never @@ -505,7 +519,7 @@ class Item_sum_percent_rank: public Item_sum_double, return 0; } - double val_real() + double val_real() override { /* We can not get the real value without knowing the number of rows @@ -518,43 +532,45 @@ class Item_sum_percent_rank: public Item_sum_double, static_cast<double>(cur_rank - 1) / (partition_rows - 1) : 0; } - enum Sumfunctype sum_func () const + enum Sumfunctype sum_func () const override { return PERCENT_RANK_FUNC; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "percent_rank"; + static LEX_CSTRING name= {STRING_WITH_LEN("percent_rank") }; + return name; } - void update_field() {} + void update_field() override {} - void clear() + void clear() override { cur_rank= 1; row_number= 0; } - bool add(); - const Type_handler *type_handler() const { return &type_handler_double; } + bool add() override; + const Type_handler *type_handler() const override + { return &type_handler_double; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { decimals = 10; // TODO-cvicentiu find out how many decimals the standard // requires. return FALSE; } - void setup_window_func(THD *thd, Window_spec *window_spec); + void setup_window_func(THD *thd, Window_spec *window_spec) override; - void reset_field() { DBUG_ASSERT(0); } + void reset_field() override { DBUG_ASSERT(0); } - void set_partition_row_count(ulonglong count) + void set_partition_row_count(ulonglong count) override { Partition_row_count::set_partition_row_count(count); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_percent_rank>(thd, this); } private: @@ -563,7 +579,7 @@ class Item_sum_percent_rank: public Item_sum_double, Group_bound_tracker *peer_tracker; - void cleanup() + void cleanup() override { if (peer_tracker) { @@ -596,51 +612,53 @@ class Item_sum_cume_dist: public Item_sum_double, Item_sum_cume_dist(THD *thd) :Item_sum_double(thd) { } Item_sum_cume_dist(THD *thd, Item *arg) :Item_sum_double(thd, arg) { } - double val_real() + double val_real() override { return calc_val_real(&null_value, current_row_count_); } - bool add() + bool add() override { current_row_count_++; return false; } - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return CUME_DIST_FUNC; } - void clear() + void clear() override { current_row_count_= 0; partition_row_count_= 0; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "cume_dist"; + static LEX_CSTRING name= {STRING_WITH_LEN("cume_dist") }; + return name; } - void update_field() {} - const Type_handler *type_handler() const { return &type_handler_double; } + void update_field() override {} + const Type_handler *type_handler() const override + { return &type_handler_double; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { decimals = 10; // TODO-cvicentiu find out how many decimals the standard // requires. return FALSE; } - void reset_field() { DBUG_ASSERT(0); } + void reset_field() override { DBUG_ASSERT(0); } - void set_partition_row_count(ulonglong count) + void set_partition_row_count(ulonglong count) override { Partition_row_count::set_partition_row_count(count); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_cume_dist>(thd, this); } }; @@ -654,7 +672,7 @@ class Item_sum_ntile : public Item_sum_int, Item_sum_int(thd, num_quantiles_expr), n_old_val_(0) { } - longlong val_int() + longlong val_int() override { if (get_row_count() == 0) { @@ -681,41 +699,43 @@ class Item_sum_ntile : public Item_sum_int, return (current_row_count_ - 1 - extra_rows) / quantile_size + 1; } - bool add() + bool add() override { current_row_count_++; return false; } - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return NTILE_FUNC; } - void clear() + void clear() override { current_row_count_= 0; partition_row_count_= 0; n_old_val_= 0; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "ntile"; + static LEX_CSTRING name= {STRING_WITH_LEN("ntile") }; + return name; } - void update_field() {} + void update_field() override {} - const Type_handler *type_handler() const { return &type_handler_slonglong; } + const Type_handler *type_handler() const override + { return &type_handler_slonglong; } - void reset_field() { DBUG_ASSERT(0); } + void reset_field() override { DBUG_ASSERT(0); } - void set_partition_row_count(ulonglong count) + void set_partition_row_count(ulonglong count) override { Partition_row_count::set_partition_row_count(count); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_ntile>(thd, this); } private: @@ -734,7 +754,7 @@ public: value(NULL), val_calculated(FALSE), first_call(TRUE), prev_value(0), order_item(NULL){} - double val_real() + double val_real() override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -745,7 +765,7 @@ public: return value->val_real(); } - longlong val_int() + longlong val_int() override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -756,7 +776,7 @@ public: return value->val_int(); } - my_decimal* val_decimal(my_decimal* dec) + my_decimal* val_decimal(my_decimal* dec) override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -767,7 +787,7 @@ public: return value->val_decimal(dec); } - String* val_str(String *str) + String* val_str(String *str) override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -778,7 +798,7 @@ public: return value->val_str(str); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -789,7 +809,7 @@ public: return value->get_date(thd, ltime, fuzzydate); } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -800,7 +820,7 @@ public: return value->val_native(thd, to); } - bool add() + bool add() override { Item *arg= get_arg(0); if (arg->is_null()) @@ -841,12 +861,12 @@ public: return false; } - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return PERCENTILE_DISC_FUNC; } - void clear() + void clear() override { val_calculated= false; first_call= true; @@ -855,34 +875,35 @@ public: current_row_count_= 0; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "percentile_disc"; + static LEX_CSTRING name= {STRING_WITH_LEN("percentile_disc") }; + return name; } - void update_field() {} - const Type_handler *type_handler() const + void update_field() override {} + const Type_handler *type_handler() const override {return Type_handler_hybrid_field_type::type_handler();} - bool fix_length_and_dec() + bool fix_length_and_dec() override { decimals = 10; // TODO-cvicentiu find out how many decimals the standard // requires. return FALSE; } - void reset_field() { DBUG_ASSERT(0); } + void reset_field() override { DBUG_ASSERT(0); } - void set_partition_row_count(ulonglong count) + void set_partition_row_count(ulonglong count) override { Partition_row_count::set_partition_row_count(count); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_percentile_disc>(thd, this); } - void setup_window_func(THD *thd, Window_spec *window_spec); + void setup_window_func(THD *thd, Window_spec *window_spec) override; void setup_hybrid(THD *thd, Item *item); - bool fix_fields(THD *thd, Item **ref); + bool fix_fields(THD *thd, Item **ref) override; private: Item_cache *value; @@ -901,7 +922,7 @@ public: floor_value(NULL), ceil_value(NULL), first_call(TRUE),prev_value(0), ceil_val_calculated(FALSE), floor_val_calculated(FALSE), order_item(NULL){} - double val_real() + double val_real() override { if (get_row_count() == 0 || get_arg(0)->is_null()) { @@ -928,7 +949,7 @@ public: return ret_val; } - bool add() + bool add() override { Item *arg= get_arg(0); if (arg->is_null()) @@ -978,12 +999,12 @@ public: return false; } - enum Sumfunctype sum_func() const + enum Sumfunctype sum_func() const override { return PERCENTILE_CONT_FUNC; } - void clear() + void clear() override { first_call= true; floor_value->clear(); @@ -994,31 +1015,32 @@ public: current_row_count_= 0; } - const char*func_name() const + LEX_CSTRING func_name_cstring() const override { - return "percentile_cont"; + static LEX_CSTRING name= {STRING_WITH_LEN("percentile_cont") }; + return name; } - void update_field() {} + void update_field() override {} - bool fix_length_and_dec() + bool fix_length_and_dec() override { decimals = 10; // TODO-cvicentiu find out how many decimals the standard // requires. return FALSE; } - void reset_field() { DBUG_ASSERT(0); } + void reset_field() override { DBUG_ASSERT(0); } - void set_partition_row_count(ulonglong count) + void set_partition_row_count(ulonglong count) override { Partition_row_count::set_partition_row_count(count); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_sum_percentile_cont>(thd, this); } - void setup_window_func(THD *thd, Window_spec *window_spec); + void setup_window_func(THD *thd, Window_spec *window_spec) override; void setup_hybrid(THD *thd, Item *item); - bool fix_fields(THD *thd, Item **ref); + bool fix_fields(THD *thd, Item **ref) override; private: Item_cache *floor_value; @@ -1056,7 +1078,7 @@ public: Item_sum *window_func() const { return (Item_sum *) args[0]; } - void update_used_tables(); + void update_used_tables() override; /* This is used by filesort to mark the columns it needs to read (because they @@ -1067,7 +1089,7 @@ public: have been computed. In that case, window function will need to read its temp.table field. In order to allow that, mark that field in the read_set. */ - bool register_field_in_read_map(void *arg) + bool register_field_in_read_map(void *arg) override { TABLE *table= (TABLE*) arg; if (result_field && (result_field->table == table || !table)) @@ -1170,11 +1192,11 @@ public: */ void setup_partition_border_check(THD *thd); - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return ((Item_sum *) args[0])->type_handler(); } - enum Item::Type type() const { return Item::WINDOW_FUNC_ITEM; } + enum Item::Type type() const override { return Item::WINDOW_FUNC_ITEM; } private: /* @@ -1217,7 +1239,7 @@ public: read_value_from_result_field= true; } - bool is_null() + bool is_null() override { if (force_return_blank) return true; @@ -1228,7 +1250,7 @@ public: return window_func()->is_null(); } - double val_real() + double val_real() override { double res; if (force_return_blank) @@ -1249,7 +1271,7 @@ public: return res; } - longlong val_int() + longlong val_int() override { longlong res; if (force_return_blank) @@ -1270,7 +1292,7 @@ public: return res; } - String* val_str(String* str) + String* val_str(String* str) override { String *res; if (force_return_blank) @@ -1293,7 +1315,7 @@ public: return res; } - bool val_native(THD *thd, Native *to) + bool val_native(THD *thd, Native *to) override { if (force_return_blank) return null_value= true; @@ -1302,7 +1324,7 @@ public: return val_native_from_item(thd, window_func(), to); } - my_decimal* val_decimal(my_decimal* dec) + my_decimal* val_decimal(my_decimal* dec) override { my_decimal *res; if (force_return_blank) @@ -1325,7 +1347,7 @@ public: return res; } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { bool res; if (force_return_blank) @@ -1349,23 +1371,27 @@ public: } void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, - List<Item> &fields, uint flags); + List<Item> &fields, uint flags) override; - bool fix_length_and_dec() + bool fix_length_and_dec() override { Type_std_attributes::set(window_func()); return FALSE; } - const char* func_name() const { return "WF"; } + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("WF") }; + return name; + } - bool fix_fields(THD *thd, Item **ref); + bool fix_fields(THD *thd, Item **ref) override; bool resolve_window_name(THD *thd); - void print(String *str, enum_query_type query_type); + void print(String *str, enum_query_type query_type) override; - Item *get_copy(THD *thd) { return 0; } + Item *get_copy(THD *thd) override { return 0; } }; diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 6e863b1ffef..2f4d34afc6d 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2005, 2019, Oracle and/or its affiliates. - Copyright (c) 2009, 2020, MariaDB + Copyright (c) 2009, 2021, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -141,21 +141,21 @@ public: fltend= (MY_XPATH_FLT*) tmp_native_value.end(); nodeset->length(0); } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { return &type_handler_xpath_nodeset; } - const Type_handler *fixed_type_handler() const + const Type_handler *fixed_type_handler() const override { return &type_handler_xpath_nodeset; } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { DBUG_ASSERT(0); return NULL; } - String *val_str(String *str) + String *val_str(String *str) override { prepare_nodes(); val_native(current_thd, &tmp2_native_value); @@ -189,7 +189,7 @@ public: } return str; } - bool fix_length_and_dec() + bool fix_length_and_dec() override { max_length= MAX_BLOB_WIDTH; collation.collation= pxml->charset(); @@ -198,8 +198,11 @@ public: const_item_cache= false; return FALSE; } - const char *func_name() const { return "nodeset"; } - bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("nodeset") }; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), arg, VCOL_IMPOSSIBLE); } @@ -213,9 +216,12 @@ class Item_nodeset_func_rootelement :public Item_nodeset_func public: Item_nodeset_func_rootelement(THD *thd, String *pxml): Item_nodeset_func(thd, pxml) {} - const char *func_name() const { return "xpath_rootelement"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_rootelement") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_rootelement>(thd, this); } }; @@ -226,9 +232,12 @@ class Item_nodeset_func_union :public Item_nodeset_func public: Item_nodeset_func_union(THD *thd, Item *a, Item *b, String *pxml): Item_nodeset_func(thd, a, b, pxml) {} - const char *func_name() const { return "xpath_union"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_union") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_union>(thd, this); } }; @@ -242,7 +251,10 @@ public: Item_nodeset_func_axisbyname(THD *thd, Item *a, const char *n_arg, uint l_arg, String *pxml): Item_nodeset_func(thd, a, pxml), node_name(n_arg), node_namelen(l_arg) { } - const char *func_name() const { return "xpath_axisbyname"; } + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_axisbyname") }; + } bool validname(MY_XML_NODE *n) { if (node_name[0] == '*') @@ -260,9 +272,12 @@ public: Item_nodeset_func_selfbyname(THD *thd, Item *a, const char *n_arg, uint l_arg, String *pxml): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} - const char *func_name() const { return "xpath_selfbyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_selfbyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_selfbyname>(thd, this); } }; @@ -274,9 +289,12 @@ public: Item_nodeset_func_childbyname(THD *thd, Item *a, const char *n_arg, uint l_arg, String *pxml): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} - const char *func_name() const { return "xpath_childbyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_childbyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_childbyname>(thd, this); } }; @@ -290,9 +308,12 @@ public: String *pxml, bool need_self_arg): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml), need_self(need_self_arg) {} - const char *func_name() const { return "xpath_descendantbyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_descendantbyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_descendantbyname>(thd, this); } }; @@ -306,9 +327,12 @@ public: String *pxml, bool need_self_arg): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml), need_self(need_self_arg) {} - const char *func_name() const { return "xpath_ancestorbyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_ancestorbyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_ancestorbyname>(thd, this); } }; @@ -320,9 +344,13 @@ public: Item_nodeset_func_parentbyname(THD *thd, Item *a, const char *n_arg, uint l_arg, String *pxml): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} - const char *func_name() const { return "xpath_parentbyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_parentbyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_parentbyname>(thd, this); } }; @@ -334,9 +362,12 @@ public: Item_nodeset_func_attributebyname(THD *thd, Item *a, const char *n_arg, uint l_arg, String *pxml): Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} - const char *func_name() const { return "xpath_attributebyname"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_attributebyname") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_attributebyname>(thd, this); } }; @@ -351,9 +382,12 @@ class Item_nodeset_func_predicate :public Item_nodeset_func public: Item_nodeset_func_predicate(THD *thd, Item *a, Item *b, String *pxml): Item_nodeset_func(thd, a, b, pxml) {} - const char *func_name() const { return "xpath_predicate"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_predicate") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_predicate>(thd, this); } }; @@ -364,9 +398,12 @@ class Item_nodeset_func_elementbyindex :public Item_nodeset_func public: Item_nodeset_func_elementbyindex(THD *thd, Item *a, Item *b, String *pxml): Item_nodeset_func(thd, a, b, pxml) { } - const char *func_name() const { return "xpath_elementbyindex"; } - bool val_native(THD *thd, Native *nodeset); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_elementbyindex") }; + } + bool val_native(THD *thd, Native *nodeset) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_func_elementbyindex>(thd, this); } }; @@ -384,8 +421,11 @@ class Item_xpath_cast_bool :public Item_bool_func public: Item_xpath_cast_bool(THD *thd, Item *a, String *pxml_arg): Item_bool_func(thd, a), pxml(pxml_arg) {} - const char *func_name() const { return "xpath_cast_bool"; } - longlong val_int() + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_cast_bool") }; + } + longlong val_int() override { if (args[0]->fixed_type_handler() == &type_handler_xpath_nodeset) { @@ -394,7 +434,7 @@ public: } return args[0]->val_real() ? 1 : 0; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_xpath_cast_bool>(thd, this); } }; @@ -406,9 +446,12 @@ class Item_xpath_cast_number :public Item_real_func { public: Item_xpath_cast_number(THD *thd, Item *a): Item_real_func(thd, a) {} - const char *func_name() const { return "xpath_cast_number"; } - virtual double val_real() { return args[0]->val_real(); } - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_cast_number") }; + } + double val_real() override { return args[0]->val_real(); } + Item *get_copy(THD *thd) override { return get_item_copy<Item_xpath_cast_number>(thd, this); } }; @@ -422,12 +465,13 @@ public: Native *native_cache; Item_nodeset_context_cache(THD *thd, Native *native_arg, String *pxml): Item_nodeset_func(thd, pxml), native_cache(native_arg) { } - bool val_native(THD *thd, Native *nodeset) + bool val_native(THD *, Native *nodeset) override { return nodeset->copy(*native_cache); } - bool fix_length_and_dec() { max_length= MAX_BLOB_WIDTH;; return FALSE; } - Item *get_copy(THD *thd) + bool fix_length_and_dec() override + { max_length= MAX_BLOB_WIDTH; return FALSE; } + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_context_cache>(thd, this); } }; @@ -439,16 +483,19 @@ class Item_func_xpath_position :public Item_long_func public: Item_func_xpath_position(THD *thd, Item *a, String *p): Item_long_func(thd, a), pxml(p) {} - const char *func_name() const { return "xpath_position"; } - bool fix_length_and_dec() { max_length=10; return FALSE; } - longlong val_int() + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_position") }; + } + bool fix_length_and_dec() override { max_length=10; return FALSE; } + longlong val_int() override { args[0]->val_native(current_thd, &tmp_native_value); if (tmp_native_value.elements() == 1) return tmp_native_value.element(0).pos + 1; return 0; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xpath_position>(thd, this); } }; @@ -460,9 +507,12 @@ class Item_func_xpath_count :public Item_long_func public: Item_func_xpath_count(THD *thd, Item *a, String *p): Item_long_func(thd, a), pxml(p) {} - const char *func_name() const { return "xpath_count"; } - bool fix_length_and_dec() { max_length=10; return FALSE; } - longlong val_int() + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_count") }; + } + bool fix_length_and_dec() override { max_length=10; return FALSE; } + longlong val_int() override { uint predicate_supplied_context_size; args[0]->val_native(current_thd, &tmp_native_value); @@ -471,7 +521,7 @@ public: return predicate_supplied_context_size; return tmp_native_value.elements(); } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xpath_count>(thd, this); } }; @@ -484,8 +534,11 @@ public: Item_func_xpath_sum(THD *thd, Item *a, String *p): Item_real_func(thd, a), pxml(p) {} - const char *func_name() const { return "xpath_sum"; } - double val_real() + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_sum") }; + } + double val_real() override { double sum= 0; args[0]->val_native(current_thd, &tmp_native_value); @@ -516,7 +569,7 @@ public: } return sum; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xpath_sum>(thd, this); } }; @@ -556,18 +609,21 @@ public: Item_nodeset_to_const_comparator(THD *thd, Item *nodeset, Item *cmpfunc, String *p): Item_bool_func(thd, nodeset, cmpfunc), pxml(p) {} - const char *func_name() const { return "xpath_nodeset_to_const_comparator"; } - bool check_vcol_func_processor(void *arg) + LEX_CSTRING func_name_cstring() const override + { + return { STRING_WITH_LEN("xpath_nodeset_to_const_comparator") }; + } + bool check_vcol_func_processor(void *arg) override { return mark_unsupported_function(func_name(), arg, VCOL_IMPOSSIBLE); } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { DBUG_ASSERT(0); return NULL; } - longlong val_int() + longlong val_int() override { Item_func *comp= (Item_func*)args[1]; Item_string_xml_non_const *fake= @@ -598,7 +654,7 @@ public: } return 0; } - Item *get_copy(THD *thd) + Item *get_copy(THD *thd) override { return get_item_copy<Item_nodeset_to_const_comparator>(thd, this); } }; @@ -1176,13 +1232,13 @@ my_xpath_keyword(MY_XPATH *x, static Item *create_func_true(MY_XPATH *xpath, Item **args, uint nargs) { - return new (xpath->thd->mem_root) Item_bool(xpath->thd, "xpath_bool", 1); + return (Item*) &Item_true; } static Item *create_func_false(MY_XPATH *xpath, Item **args, uint nargs) { - return new (xpath->thd->mem_root) Item_bool(xpath->thd, "xpath_bool", 0); + return (Item*) &Item_false; } @@ -2736,7 +2792,7 @@ bool Item_xml_str_func::fix_fields(THD *thd, Item **ref) /* UCS2 is not supported */ my_printf_error(ER_UNKNOWN_ERROR, "Character set '%s' is not supported by XPATH", - MYF(0), collation.collation->csname); + MYF(0), collation.collation->cs_name.str); return true; } diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h index 806739d1139..e2ffe2fa630 100644 --- a/sql/item_xmlfunc.h +++ b/sql/item_xmlfunc.h @@ -110,16 +110,16 @@ protected: public: Item_xml_str_func(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) { - maybe_null= TRUE; + set_maybe_null(); } Item_xml_str_func(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) { - maybe_null= TRUE; + set_maybe_null(); } - bool fix_fields(THD *thd, Item **ref); - bool fix_length_and_dec(); - bool const_item() const + bool fix_fields(THD *thd, Item **ref) override; + bool fix_length_and_dec() override; + bool const_item() const override { return const_item_cache && (!nodeset_func || nodeset_func->const_item()); } @@ -131,9 +131,13 @@ class Item_func_xml_extractvalue: public Item_xml_str_func public: Item_func_xml_extractvalue(THD *thd, Item *a, Item *b): Item_xml_str_func(thd, a, b) {} - const char *func_name() const { return "extractvalue"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("extractvalue") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xml_extractvalue>(thd, this); } }; @@ -148,9 +152,13 @@ class Item_func_xml_update: public Item_xml_str_func public: Item_func_xml_update(THD *thd, Item *a, Item *b, Item *c): Item_xml_str_func(thd, a, b, c) {} - const char *func_name() const { return "updatexml"; } - String *val_str(String *); - Item *get_copy(THD *thd) + LEX_CSTRING func_name_cstring() const override + { + static LEX_CSTRING name= {STRING_WITH_LEN("updatexml") }; + return name; + } + String *val_str(String *) override; + Item *get_copy(THD *thd) override { return get_item_copy<Item_func_xml_update>(thd, this); } }; diff --git a/sql/json_table.cc b/sql/json_table.cc new file mode 100644 index 00000000000..65fe3c9a659 --- /dev/null +++ b/sql/json_table.cc @@ -0,0 +1,1463 @@ +/* + Copyright (c) 2020, MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA +*/ + +#include "mariadb.h" +#include "sql_priv.h" +#include "sql_class.h" /* TMP_TABLE_PARAM */ +#include "table.h" +#include "sql_type_json.h" +#include "item_jsonfunc.h" +#include "json_table.h" +#include "sql_show.h" +#include "sql_select.h" +#include "create_tmp_table.h" +#include "sql_parse.h" + +#define HA_ERR_JSON_TABLE (HA_ERR_LAST+1) + +/* + Allocating memory and *also* using it (reading and + writing from it) because some build instructions cause + compiler to optimize out stack_used_up. Since alloca() + here depends on stack_used_up, it doesnt get executed + correctly and causes json_debug_nonembedded to fail + ( --error ER_STACK_OVERRUN_NEED_MORE does not occur). +*/ +#define ALLOCATE_MEM_ON_STACK(A) do \ + { \ + uchar *array= (uchar*)alloca(A); \ + array[0]= 1; \ + array[0]++; \ + array[0] ? array[0]++ : array[0]--; \ + } while(0) + +class table_function_handlerton +{ +public: + handlerton m_hton; + table_function_handlerton() + { + bzero(&m_hton, sizeof(m_hton)); + m_hton.tablefile_extensions= hton_no_exts; + m_hton.slot= HA_SLOT_UNDEF; + } +}; + + +static table_function_handlerton table_function_hton; + +/* + @brief + Collect a set of tables that a given table function cannot have + references to. + + @param + table_func The table function we are connecting info for + join_list The nested join to be processed + disallowed_tables Collect the tables here. + + @detail + According to the SQL standard, a table function can refer to any table + that's "preceding" it in the FROM clause. + + The other limitation we would like to enforce is that the inner side of + an outer join cannot refer to the outer side. An example: + + SELECT * from JSON_TABLE(t1.col, ...) left join t1 on ... + + This function implements both of the above restrictions. + + Basic idea: the "join_list" contains the tables in the order that's a + reverse of the order they were specified in the query. + If we walk the join_list, we will encounter: + 1. First, the tables that table function cannot refer to (collect them in a + bitmap) + 2. Then, the table function itself (put it in the bitmap, too, as self- + references are not allowed, and stop the walk) + 3. Tables that the table function CAN refer to (we don't walk these as + we've stopped on step #2). + + The above can be applied recursively for nested joins (this covers NATURAL + JOIN, and JOIN ... USING constructs). + + Enforcing the "refer to only preceding tables" rule means that outer side + of LEFT JOIN cannot refer to the inner side. + + Handing RIGHT JOINs: There are no RIGHT JOINs in the join_list data + structures. They were converted to LEFT JOINs (see calls to st_select_lex:: + convert_right_join). This conversion changes the order of tables, but + we are ok with operating on the tables "in the left join order". + + @return + 0 - Continue + 1 - Finish the process, success + -1 - Finish the process, failure +*/ + +static +int get_disallowed_table_deps_for_list(MEM_ROOT *mem_root, + TABLE_LIST *table_func, + List<TABLE_LIST> *join_list, + List<TABLE_LIST> *disallowed_tables) +{ + TABLE_LIST *table; + NESTED_JOIN *nested_join; + List_iterator<TABLE_LIST> li(*join_list); + + DBUG_EXECUTE_IF("json_check_min_stack_requirement", + { + long arbitrary_var; + long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); + ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); + }); + if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) + return 1; + + while ((table= li++)) + { + if ((nested_join= table->nested_join)) + { + int res; + if ((res= get_disallowed_table_deps_for_list(mem_root, table_func, + &nested_join->join_list, + disallowed_tables))) + return res; + } + else + { + if (disallowed_tables->push_back(table, mem_root)) + return -1; + if (table == table_func) + { + // This is the JSON_TABLE(...) that are we're computing dependencies + // for. + return 1; // Finish the processing + } + } + } + return 0; // Continue +} + + +/* + @brief + Given a join and a table function in it (specified by its table_func_bit), + produce a bitmap of tables that the table function can NOT have references + to. + + @detail + See get_disallowed_table_deps_for_list + + @return + NULL - Out of memory + Other - A list of tables that the function cannot have references to. May + be empty. +*/ + +static +List<TABLE_LIST>* get_disallowed_table_deps(MEM_ROOT *mem_root, + SELECT_LEX *select, + TABLE_LIST *table_func) +{ + List<TABLE_LIST> *disallowed_tables; + + if (!(disallowed_tables = new (mem_root) List<TABLE_LIST>)) + return NULL; + + int res= get_disallowed_table_deps_for_list(mem_root, table_func, + select->join_list, + disallowed_tables); + + // The collection process must have finished + DBUG_ASSERT(res != 0); + + if (res == -1) + return NULL; // Out of memory + + return disallowed_tables; +} + + +/* + A table that produces output rows for JSON_TABLE(). +*/ + +class ha_json_table: public handler +{ + Table_function_json_table *m_jt; + + String *m_js; // The JSON document we're reading + String m_tmps; // Buffer for the above + + int fill_column_values(THD *thd, uchar * buf, uchar *pos); + +public: + ha_json_table(TABLE_SHARE *share_arg, Table_function_json_table *jt): + handler(&table_function_hton.m_hton, share_arg), m_jt(jt) + { + /* + set the mark_trx_read_write_done to avoid the + handler::mark_trx_read_write_internal() call. + It relies on &ha_thd()->ha_data[ht->slot].ha_info[0] to be set. + But we don't set the ha_data for the ha_json_table, and + that call makes no sence for ha_json_table. + */ + mark_trx_read_write_done= 1; + + /* See ha_json_table::position for format definition */ + ref_length= m_jt->m_columns.elements * 4; + } + ~ha_json_table() {} + handler *clone(const char *name, MEM_ROOT *mem_root) override { return NULL; } + /* Rows also use a fixed-size format */ + enum row_type get_row_type() const override { return ROW_TYPE_FIXED; } + const char *table_type() const override + { + return "JSON_TABLE function"; + } + ulonglong table_flags() const override + { + return (HA_FAST_KEY_READ | /*HA_NO_BLOBS |*/ HA_NULL_IN_KEY | + HA_CAN_SQL_HANDLER | + HA_REC_NOT_IN_SEQ | HA_NO_TRANSACTIONS | + HA_HAS_RECORDS); + } + ulong index_flags(uint inx, uint part, bool all_parts) const override + { + return HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR; + } + ha_rows records() override { return HA_POS_ERROR; } + + int open(const char *name, int mode, uint test_if_locked) override + { return 0; } + int close(void) override { return 0; } + int rnd_init(bool scan) override; + int rnd_next(uchar *buf) override; + int rnd_pos(uchar * buf, uchar *pos) override; + void position(const uchar *record) override; + int info(uint) override; + int extra(enum ha_extra_function operation) override { return 0; } + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) override + { return NULL; } + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) + override { return 1; } + /* Give no message. */ + bool get_error_message(int error, String *buf) override + { + buf->length(0); + return TRUE; + } +}; + + +/* + Helper class that creates the temporary table that + represents the table function in the query. +*/ + +class Create_json_table final: public Create_tmp_table +{ +public: + Create_json_table() : + Create_tmp_table((ORDER*) 0, 0, 0, 0, 0) + {} + virtual ~Create_json_table() {}; + TABLE *start(THD *thd, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt, + const LEX_CSTRING *table_alias); + bool choose_engine(THD *thd, TABLE *table, TMP_TABLE_PARAM *param) override + { + return 0; // Engine already choosen + } + bool add_json_table_fields(THD *thd, TABLE *table, + Table_function_json_table *jt); + bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, + Table_function_json_table *jt); +}; + + +/* + @brief + Start scanning the JSON document in [str ... end] + + @detail + Note: non-root nested paths are set to scan one JSON node (that is, a + "subdocument"). +*/ + +void Json_table_nested_path::scan_start(CHARSET_INFO *i_cs, + const uchar *str, const uchar *end) +{ + json_get_path_start(&m_engine, i_cs, str, end, &m_cur_path); + m_cur_nested= NULL; + m_null= false; + m_ordinality_counter= 0; +} + + +/* + @brief + Find the next JSON element that matches the search path. +*/ + +int Json_table_nested_path::scan_next() +{ + bool no_records_found= false; + if (m_cur_nested) + { + for (;;) + { + if (m_cur_nested->scan_next() == 0) + return 0; + if (!(m_cur_nested= m_cur_nested->m_next_nested)) + break; +handle_new_nested: + m_cur_nested->scan_start(m_engine.s.cs, m_engine.value_begin, + m_engine.s.str_end); + } + if (no_records_found) + return 0; + } + + DBUG_ASSERT(!m_cur_nested); + + while (!json_get_path_next(&m_engine, &m_cur_path)) + { + if (json_path_compare(&m_path, &m_cur_path, m_engine.value_type)) + continue; + /* path found. */ + ++m_ordinality_counter; + + if (!m_nested) + return 0; + + m_cur_nested= m_nested; + no_records_found= true; + goto handle_new_nested; + } + + m_null= true; + return 1; +} + + +int ha_json_table::rnd_init(bool scan) +{ + Json_table_nested_path &p= m_jt->m_nested_path; + DBUG_ENTER("ha_json_table::rnd_init"); + + if ((m_js= m_jt->m_json->val_str(&m_tmps))) + { + p.scan_start(m_js->charset(), + (const uchar *) m_js->ptr(), (const uchar *) m_js->end()); + } + + DBUG_RETURN(0); +} + + +/* + @brief + Store JSON value in an SQL field, doing necessary special conversions + for JSON's null, true, and false. +*/ + +static void store_json_in_field(Field *f, const json_engine_t *je) +{ + switch (je->value_type) + { + case JSON_VALUE_NULL: + f->set_null(); + return; + + case JSON_VALUE_TRUE: + case JSON_VALUE_FALSE: + { + Item_result rt= f->result_type(); + if (rt == INT_RESULT || rt == DECIMAL_RESULT || rt == REAL_RESULT) + { + f->store(je->value_type == JSON_VALUE_TRUE, false); + return; + } + break; + } + default: + break; + }; + f->store((const char *) je->value, (uint32) je->value_len, je->s.cs); +} + + +static int store_json_in_json(Field *f, json_engine_t *je) +{ + const uchar *from= je->value_begin; + const uchar *to; + + if (json_value_scalar(je)) + to= je->value_end; + else + { + int error; + if ((error= json_skip_level(je))) + return error; + to= je->s.c_str; + } + f->store((const char *) from, (uint32) (to - from), je->s.cs); + return 0; +} + + +bool Json_table_nested_path::check_error(const char *str) +{ + if (m_engine.s.error) + { + report_json_error_ex(str, &m_engine, "JSON_TABLE", 0, + Sql_condition::WARN_LEVEL_ERROR); + return true; // Error + } + return false; // Ok +} + + +int ha_json_table::rnd_next(uchar *buf) +{ + if (!m_js) + return HA_ERR_END_OF_FILE; + + /* + Step 1: Move the root nested path to the next record (this implies moving + its child nested paths accordingly) + */ + if (m_jt->m_nested_path.scan_next()) + { + if (m_jt->m_nested_path.check_error(m_js->ptr())) + { + /* + We already reported an error, so returning an + error code that just doesn't produce extra + messages. + */ + return HA_ERR_JSON_TABLE; + } + return HA_ERR_END_OF_FILE; + } + + /* + Step 2: Read values for all columns (the columns refer to nested paths + they are in). + */ + return fill_column_values(table->in_use, buf, NULL) ? HA_ERR_JSON_TABLE : 0; +} + + +/* + @brief + Fill values of table columns, taking data either from Json_nested_path + objects, or from the rowid value + + @param pos NULL means the data should be read from Json_nested_path + objects. + Non-null value is a pointer to previously saved rowid (see + ha_json_table::position() for description) +*/ + +int ha_json_table::fill_column_values(THD *thd, uchar * buf, uchar *pos) +{ + MY_BITMAP *orig_map= dbug_tmp_use_all_columns(table, &table->write_set); + int error= 0; + Counting_error_handler er_handler; + Field **f= table->field; + Json_table_column *jc; + List_iterator_fast<Json_table_column> jc_i(m_jt->m_columns); + my_ptrdiff_t ptrdiff= buf - table->record[0]; + Abort_on_warning_instant_set ao_set(table->in_use, FALSE); + enum_check_fields cf_orig= table->in_use->count_cuted_fields; + + table->in_use->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; + + thd->push_internal_handler(&er_handler); + + while (!error && (jc= jc_i++)) + { + bool is_null_value; + uint int_pos= 0; /* just to make compilers happy. */ + + if (!bitmap_is_set(table->read_set, (*f)->field_index)) + { + /* + If the RESPONSE_ERROR is set for the column, we have + to unpack it even if it's not in the read_set - to check + for possible errors. + */ + if (jc->m_on_empty.m_response != Json_table_column::RESPONSE_ERROR && + jc->m_on_error.m_response != Json_table_column::RESPONSE_ERROR) + goto cont_loop; + } + + (*f)->move_field_offset(ptrdiff); + + /* + Read the NULL flag: + - if we are reading from a rowid value, 0 means SQL NULL. + - if scanning json document, read it from the nested path + */ + if (pos) + is_null_value= !(int_pos= uint4korr(pos)); + else + is_null_value= jc->m_nest->m_null; + + if (is_null_value) + { + (*f)->set_null(); + } + else + { + (*f)->set_notnull(); + switch (jc->m_column_type) + { + case Json_table_column::FOR_ORDINALITY: + { + /* + Read the cardinality counter: + - read it from nested path when scanning the json document + - or, read it from rowid when in rnd_pos() call + */ + longlong counter= pos? int_pos: jc->m_nest->m_ordinality_counter; + (*f)->store(counter, TRUE); + break; + } + case Json_table_column::PATH: + case Json_table_column::EXISTS_PATH: + { + json_engine_t je; + json_path_step_t *cur_step; + uint array_counters[JSON_DEPTH_LIMIT]; + int not_found; + const uchar* node_start; + const uchar* node_end; + + /* + Get the JSON context node that we will need to evaluate PATH or + EXISTS against: + - when scanning the json document, read it from nested path + - when in rnd_pos call, the rowid has the start offset. + */ + if (pos) + { + node_start= (const uchar *) (m_js->ptr() + (int_pos-1)); + node_end= (const uchar *) m_js->end(); + } + else + { + node_start= jc->m_nest->get_value(); + node_end= jc->m_nest->get_value_end(); + } + + json_scan_start(&je, m_js->charset(), node_start, node_end); + + cur_step= jc->m_path.steps; + not_found= json_find_path(&je, &jc->m_path, &cur_step, array_counters) || + json_read_value(&je); + + if (jc->m_column_type == Json_table_column::EXISTS_PATH) + { + (*f)->store(!not_found); + } + else /*PATH*/ + { + if (not_found) + { + error= jc->m_on_empty.respond(jc, *f, ER_JSON_TABLE_ERROR_ON_FIELD); + } + else + { + if (jc->m_format_json) + { + if (!(error= store_json_in_json(*f, &je))) + error= er_handler.errors; + } + else if (!(error= !json_value_scalar(&je))) + { + store_json_in_field(*f, &je); + error= er_handler.errors; + } + + if (error) + { + error= jc->m_on_error.respond(jc, *f, + ER_JSON_TABLE_SCALAR_EXPECTED); + er_handler.errors= 0; + } + else + { + /* + If the path contains wildcards, check if there are + more matches for it in json and report an error if so. + */ + if (jc->m_path.types_used & + (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD) && + (json_scan_next(&je) || + !json_find_path(&je, &jc->m_path, &cur_step, + array_counters))) + { + error= jc->m_on_error.respond(jc, *f, + ER_JSON_TABLE_MULTIPLE_MATCHES); + } + } + } + } + break; + } + }; + } + (*f)->move_field_offset(-ptrdiff); + +cont_loop: + f++; + if (pos) + pos+= 4; + } + + dbug_tmp_restore_column_map(&table->write_set, orig_map); + thd->pop_internal_handler(); + thd->count_cuted_fields= cf_orig; + return error; +} + + +int ha_json_table::rnd_pos(uchar * buf, uchar *pos) +{ + return fill_column_values(table->in_use, buf, pos) ? HA_ERR_JSON_TABLE : 0; +} + + +/* + The reference has 4 bytes for every column of the JSON_TABLE. + There it keeps 0 for the NULL values, ordinality index for + the ORDINALITY columns and the offset of the field's data in + the JSON for other column types. +*/ +void ha_json_table::position(const uchar *record) +{ + uchar *c_ref= ref; + Json_table_column *jc; + List_iterator_fast<Json_table_column> jc_i(m_jt->m_columns); + + while ((jc= jc_i++)) + { + if (jc->m_nest->m_null) + { + int4store(c_ref, 0); + } + else + { + switch (jc->m_column_type) + { + case Json_table_column::FOR_ORDINALITY: + int4store(c_ref, jc->m_nest->m_ordinality_counter); + break; + case Json_table_column::PATH: + case Json_table_column::EXISTS_PATH: + { + size_t pos= jc->m_nest->get_value() - + (const uchar *) m_js->ptr() + 1; + int4store(c_ref, pos); + break; + } + }; + } + c_ref+= 4; + } +} + + +int ha_json_table::info(uint) +{ + /* + We don't want 0 or 1 in stats.records. + Though this value shouldn't matter as the optimizer + supposed to use Table_function_json_table::get_estimates + to obtain this data. + */ + stats.records= 4; + return 0; +} + + +/** + Create a json table according to a field list. + + @param thd thread handle + @param param a description used as input to create the table + @param jt json_table specificaion + @param table_alias alias +*/ + +TABLE *Create_json_table::start(THD *thd, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt, + const LEX_CSTRING *table_alias) +{ + TABLE *table; + TABLE_SHARE *share; + DBUG_ENTER("Create_json_table::start"); + + param->tmp_name= "json"; + if (!(table= Create_tmp_table::start(thd, param, table_alias))) + DBUG_RETURN(0); + share= table->s; + share->not_usable_by_query_cache= FALSE; + share->db_plugin= NULL; + if (!(table->file= new (&table->mem_root) ha_json_table(share, jt))) + DBUG_RETURN(NULL); + table->file->init(); + DBUG_RETURN(table); +} + + +bool Create_json_table::finalize(THD *thd, TABLE *table, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt) +{ + DBUG_ENTER("Create_json_table::finalize"); + DBUG_ASSERT(table); + + if (Create_tmp_table::finalize(thd, table, param, 1, 0)) + DBUG_RETURN(true); + + table->db_stat= HA_OPEN_KEYFILE; + if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR, + HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE))) + DBUG_RETURN(true); + + table->set_created(); + table->s->max_rows= ~(ha_rows) 0; + param->end_write_records= HA_POS_ERROR; + DBUG_RETURN(0); +} + + +/* + @brief + Read the JSON_TABLE's field definitions from @jt and add the fields to + table @table. +*/ + +bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table, + Table_function_json_table *jt) +{ + TABLE_SHARE *share= table->s; + Json_table_column *jc; + uint fieldnr= 0; + MEM_ROOT *mem_root_save= thd->mem_root; + List_iterator_fast<Json_table_column> jc_i(jt->m_columns); + Column_derived_attributes da(NULL); + DBUG_ENTER("add_json_table_fields"); + + thd->mem_root= &table->mem_root; + current_counter= other; + + while ((jc= jc_i++)) + { + Create_field *sql_f= jc->m_field; + List_iterator_fast<Json_table_column> it2(jt->m_columns); + Json_table_column *jc2; + /* + Initialize length from its original value (number of characters), + which was set in the parser. This is necessary if we're + executing a prepared statement for the second time. + */ + sql_f->length= sql_f->char_length; + if (!sql_f->charset) + sql_f->charset= &my_charset_utf8mb4_general_ci; + + if (sql_f->prepare_stage1(thd, thd->mem_root, table->file, + table->file->ha_table_flags(), &da)) + goto err_exit; + + while ((jc2= it2++) != jc) + { + if (lex_string_cmp(system_charset_info, + &sql_f->field_name, &jc2->m_field->field_name) == 0) + { + my_error(ER_DUP_FIELDNAME, MYF(0), sql_f->field_name.str); + goto err_exit; + } + } + it2.rewind(); + } + + jc_i.rewind(); + + while ((jc= jc_i++)) + { + Create_field *sql_f= jc->m_field; + Record_addr addr(!(sql_f->flags & NOT_NULL_FLAG)); + Bit_addr bit(addr.null()); + uint uneven_delta; + + sql_f->prepare_stage2(table->file, table->file->ha_table_flags()); + + if (!sql_f->charset) + sql_f->charset= &my_charset_utf8mb4_bin; + + Field *f= sql_f->type_handler()->make_table_field_from_def(share, + thd->mem_root, &sql_f->field_name, addr, bit, sql_f, sql_f->flags); + if (!f) + goto err_exit; + f->init(table); + uneven_delta= m_uneven_bit_length; + add_field(table, f, fieldnr++, 0); + m_uneven_bit[current_counter]+= (m_uneven_bit_length - uneven_delta); + } + + share->fields= fieldnr; + share->blob_fields= m_blob_count; + table->field[fieldnr]= 0; // End marker + share->blob_field[m_blob_count]= 0; // End marker + share->column_bitmap_size= bitmap_buffer_size(share->fields); + + thd->mem_root= mem_root_save; + + DBUG_RETURN(FALSE); +err_exit: + thd->mem_root= mem_root_save; + DBUG_RETURN(TRUE); +} + + +/* + @brief + Given a TABLE_LIST representing JSON_TABLE(...) syntax, create a temporary + table for it. + + @detail + The temporary table will have: + - fields whose names/datatypes are specified in JSON_TABLE(...) syntax + - a ha_json_table as the storage engine. + + The uses of the temporary table are: + - name resolution: the query may have references to the columns of + JSON_TABLE(...). A TABLE object will allow to resolve them. + - query execution: ha_json_table will produce JSON_TABLE's rows. +*/ + +TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table) +{ + TMP_TABLE_PARAM tp; + TABLE *table; + uint field_count= sql_table->table_function->m_columns.elements+1; + + DBUG_ENTER("create_table_for_function"); + + tp.init(); + tp.table_charset= system_charset_info; + tp.field_count= field_count; + { + Create_json_table maker; + + if (!(table= maker.start(thd, &tp, + sql_table->table_function, &sql_table->alias)) || + maker.add_json_table_fields(thd, table, sql_table->table_function) || + maker.finalize(thd, table, &tp, sql_table->table_function)) + { + if (table) + free_tmp_table(thd, table); + DBUG_RETURN(NULL); + } + } + sql_table->schema_table_name.length= 0; + + my_bitmap_map* bitmaps= + (my_bitmap_map*) thd->alloc(bitmap_buffer_size(field_count)); + my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count, + FALSE); + table->read_set= &table->def_read_set; + bitmap_clear_all(table->read_set); + table->alias_name_used= true; + table->next= thd->derived_tables; + thd->derived_tables= table; + table->s->tmp_table= INTERNAL_TMP_TABLE; + table->grant.privilege= SELECT_ACL; + + sql_table->table= table; + + DBUG_RETURN(table); +} + + +int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path, + CHARSET_INFO *cs) +{ + set(ctype); + m_explicit_cs= cs; + if (json_path_setup(&m_path, thd->variables.collation_connection, + (const uchar *) path.str, (const uchar *)(path.str + path.length))) + { + report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1, + Sql_condition::WARN_LEVEL_ERROR); + return 1; + } + + /* + This is done so the ::print function can just print the path string. + Can be removed if we redo that function to print the path using it's + anctual content. Not sure though if we should. + */ + m_path.s.c_str= (const uchar *) path.str; + + if (ctype == PATH) + m_format_json= m_field->type_handler() == &type_handler_long_blob_json; + + return 0; +} + + +static int print_path(String *str, const json_path_t *p) +{ + return str->append('\'') || + str->append_for_single_quote((const char *) p->s.c_str, + p->s.str_end - p->s.c_str) || + str->append('\''); +} + + +/* + Print the string representation of the Json_table_column. + + @param thd - the thread + @param f - the remaining array of Field-s from the table + if the Json_table_column + @param str - the string where to print +*/ +int Json_table_column::print(THD *thd, Field **f, String *str) +{ + StringBuffer<MAX_FIELD_WIDTH> column_type(str->charset()); + + if (append_identifier(thd, str, &m_field->field_name) || + str->append(' ')) + return 1; + + switch (m_column_type) + { + case FOR_ORDINALITY: + if (str->append(STRING_WITH_LEN("FOR ORDINALITY"))) + return 1; + break; + case EXISTS_PATH: + case PATH: + { + static const LEX_CSTRING path= { STRING_WITH_LEN(" PATH ") }; + static const LEX_CSTRING exists_path= { STRING_WITH_LEN(" EXISTS PATH ") }; + + (*f)->sql_type(column_type); + + if (str->append(column_type) || + ((*f)->has_charset() && m_explicit_cs && + (str->append(STRING_WITH_LEN(" CHARSET ")) || + str->append(&m_explicit_cs->cs_name))) || + str->append(m_column_type == PATH ? &path : &exists_path) || + print_path(str, &m_path)) + return 1; + break; + } + }; + + if (m_on_empty.print("EMPTY", str) || + m_on_error.print("ERROR", str)) + return 1; + + return 0; +} + + +int Json_table_nested_path::set_path(THD *thd, const LEX_CSTRING &path) +{ + if (json_path_setup(&m_path, thd->variables.collation_connection, + (const uchar *) path.str, (const uchar *)(path.str + path.length))) + { + report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1, + Sql_condition::WARN_LEVEL_ERROR); + return 1; + } + + /* + This is done so the ::print function can just print the path string. + Can be removed if we redo that function to print the path using its + actual content. Not sure though if we should. + */ + m_path.s.c_str= (const uchar *) path.str; + return 0; +} + + +/* + @brief + Perform the action of this response on field @f (emit an error, or set @f + to NULL, or set it to default value). + error_num supposed to have the error message with field_name and table_name + arguments. +*/ + +int Json_table_column::On_response::respond(Json_table_column *jc, Field *f, + uint error_num) +{ + switch (m_response) + { + case Json_table_column::RESPONSE_NOT_SPECIFIED: + case Json_table_column::RESPONSE_NULL: + f->set_null(); + break; + case Json_table_column::RESPONSE_ERROR: + f->set_null(); + my_error(error_num, MYF(0), f->field_name.str, f->table->alias.ptr()); + return 1; + case Json_table_column::RESPONSE_DEFAULT: + f->set_notnull(); + f->store(m_default.str, + m_default.length, jc->m_defaults_cs); + break; + } + return 0; +} + + +int Json_table_column::On_response::print(const char *name, String *str) const +{ + LEX_CSTRING resp; + const LEX_CSTRING *ds= NULL; + if (m_response == Json_table_column::RESPONSE_NOT_SPECIFIED) + return 0; + + switch (m_response) + { + case Json_table_column::RESPONSE_NULL: + lex_string_set3(&resp, STRING_WITH_LEN("NULL")); + break; + case Json_table_column::RESPONSE_ERROR: + lex_string_set3(&resp, STRING_WITH_LEN("ERROR")); + break; + case Json_table_column::RESPONSE_DEFAULT: + { + lex_string_set3(&resp, STRING_WITH_LEN("DEFAULT")); + ds= &m_default; + break; + } + default: + lex_string_set3(&resp, "", 0); + DBUG_ASSERT(FALSE); /* should never happen. */ + } + + return (str->append(' ') || str->append(resp) || + (ds && (str->append(STRING_WITH_LEN(" '")) || + str->append_for_single_quote(ds->str, ds->length) || + str->append('\''))) || + str->append(STRING_WITH_LEN(" ON ")) || + str->append(name, strlen(name))); +} + + +void Table_function_json_table::start_nested_path(Json_table_nested_path *np) +{ + np->m_parent= cur_parent; + *last_sibling_hook= np; + + // Make the newly added path the parent + cur_parent= np; + last_sibling_hook= &np->m_nested; +} + + +void Table_function_json_table::end_nested_path() +{ + last_sibling_hook= &cur_parent->m_next_nested; + cur_parent= cur_parent->m_parent; +} + + +/* + @brief Create a name resolution context for doing name resolution in table + function argument. + + @seealso + push_new_name_resolution_context +*/ + +bool push_table_function_arg_context(LEX *lex, MEM_ROOT *alloc) +{ + // Walk the context stack until we find a context that is used for resolving + // the SELECT's WHERE clause. + List_iterator<Name_resolution_context> it(lex->context_stack); + Name_resolution_context *ctx; + while ((ctx= it++)) + { + if (ctx->select_lex && ctx == &ctx->select_lex->context) + break; + } + DBUG_ASSERT(ctx); + + // Then, create a copy of it and return it. + Name_resolution_context *new_ctx= new (alloc) Name_resolution_context; + + // Note: not all fields of *ctx are initialized yet at this point. + // We will get all of the fields filled in Table_function_json_table::setup + // (search for the "Prepare the name resolution context" comment). + *new_ctx= *ctx; + return lex->push_context(new_ctx); +} + + +/* + @brief + Perform name-resolution phase tasks + + @detail + The only argument that needs name resolution is the first parameter which + has the JSON text: + + JSON_TABLE(json_doc, ... ) + + The argument may refer to other tables and uses special name resolution + rules (see get_disallowed_table_deps_for_list for details). This function + sets up Name_resolution_context object appropriately before calling + fix_fields for the argument. + + @return + false OK + true Fatal error +*/ + +bool Table_function_json_table::setup(THD *thd, TABLE_LIST *sql_table, + SELECT_LEX *s_lex) +{ + thd->where= "JSON_TABLE argument"; + + if (!m_context_setup_done) + { + m_context_setup_done= true; + // Prepare the name resolution context. First, copy the context that is + // used for name resolution of the WHERE clause + *m_context= s_lex->context; + + // Then, restrict it to only allow to refer to tables that come before the + // table function reference + if (!(m_context->ignored_tables= + get_disallowed_table_deps(thd->stmt_arena->mem_root, s_lex, + sql_table))) + return TRUE; // Error + } + + bool save_is_item_list_lookup; + save_is_item_list_lookup= s_lex->is_item_list_lookup; + s_lex->is_item_list_lookup= 0; + + // Do the same what setup_without_group() does: do not count the referred + // fields in non_agg_field_used: + const bool saved_non_agg_field_used= s_lex->non_agg_field_used(); + + bool res= m_json->fix_fields_if_needed_for_scalar(thd, &m_json); + + s_lex->is_item_list_lookup= save_is_item_list_lookup; + s_lex->set_non_agg_field_used(saved_non_agg_field_used); + + if (res) + return TRUE; // Error + + return FALSE; +} + +int Table_function_json_table::walk_items(Item_processor processor, + bool walk_subquery, void *argument) +{ + return m_json->walk(processor, walk_subquery, argument); +} + +void Table_function_json_table::get_estimates(ha_rows *out_rows, + double *scan_time, + double *startup_cost) +{ + *out_rows= 40; + *scan_time= 0.0; + *startup_cost= 0.0; +} + + +/* + Check if a column belongs to the nested path + or a path that nested into it. + It only supposed to be used in the Json_table_nested_path::print, and + since the nested path should have at least one field we + don't have to loop through the m_next_nested. +*/ +bool Json_table_nested_path::column_in_this_or_nested( + const Json_table_nested_path *p, const Json_table_column *jc) +{ + for (; p; p= p->m_nested) + { + if (jc->m_nest == p) + return TRUE; + } + return FALSE; +} + + +/* + Print the string representation of the Json_nested_path object. + Which is the COLUMNS(...) part of the JSON_TABLE definition. + + @param thd - the thread + @param f - the remaining part of the array of Field* objects + taken from the TABLE. + It's needed as Json_table_column objects + don't have links to the related Field-s. + @param str - the string where to print + @param it - the remaining part of the Json_table_column list + @param last_column - the last column taken from the list. +*/ + +int Json_table_nested_path::print(THD *thd, Field ***f, String *str, + List_iterator_fast<Json_table_column> &it, + Json_table_column **last_column) +{ + Json_table_nested_path *c_path= this; + Json_table_nested_path *c_nested= m_nested; + Json_table_column *jc= *last_column; + bool first_column= TRUE; + + if (str->append(STRING_WITH_LEN("COLUMNS ("))) + return 1; + + /* loop while jc belongs to the current or nested paths. */ + while(jc && + (jc->m_nest == c_path || column_in_this_or_nested(c_nested, jc))) + { + if (first_column) + first_column= FALSE; + else if (str->append(STRING_WITH_LEN(", "))) + return 1; + + if (jc->m_nest == c_path) + { + if (jc->print(thd, *f, str)) + return 1; + if ((jc= it++)) + ++(*f); + } + else + { + DBUG_ASSERT(column_in_this_or_nested(c_nested, jc)); + if (str->append(STRING_WITH_LEN("NESTED PATH ")) || + print_path(str, &jc->m_nest->m_path) || + str->append(' ') || + c_nested->print(thd, f, str, it, &jc)) + return 1; + c_nested= c_nested->m_next_nested; + } + } + + if (str->append(STRING_WITH_LEN(")"))) + return 1; + + *last_column= jc; + return 0; +} + + +/* + Print the SQL definition of the JSON_TABLE. + Used mostly as a part of the CREATE VIEW statement. + + @param thd - the thread + @param sql_table - the corresponding TABLE_LIST object + @param str - the string where to print + @param query_type - the query type +*/ +int Table_function_json_table::print(THD *thd, TABLE_LIST *sql_table, + String *str, enum_query_type query_type) +{ + List_iterator_fast<Json_table_column> jc_i(m_columns); + Json_table_column *jc= jc_i++; + Field **f_list= sql_table->table->field; + + DBUG_ENTER("Table_function_json_table::print"); + + if (str->append(STRING_WITH_LEN("JSON_TABLE("))) + DBUG_RETURN(TRUE); + + m_json->print(str, query_type); + + if (str->append(STRING_WITH_LEN(", ")) || + print_path(str, &m_nested_path.m_path) || + str->append(' ') || + m_nested_path.print(thd, &f_list, str, jc_i, &jc) || + str->append(')')) + DBUG_RETURN(TRUE); + + DBUG_RETURN(0); +} + + +void Table_function_json_table::fix_after_pullout(TABLE_LIST *sql_table, + st_select_lex *new_parent, bool merge) +{ + m_json->fix_after_pullout(new_parent, &m_json, merge); + sql_table->dep_tables= used_tables(); +} + + +/* + @brief + Recursively make all tables in the join_list also depend on deps. +*/ + +static void add_extra_deps(List<TABLE_LIST> *join_list, table_map deps) +{ + TABLE_LIST *table; + List_iterator<TABLE_LIST> li(*join_list); + + DBUG_EXECUTE_IF("json_check_min_stack_requirement", + { + long arbitrary_var; + long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); + ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); + }); + if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) + return; + + while ((table= li++)) + { + table->dep_tables |= deps; + NESTED_JOIN *nested_join; + if ((nested_join= table->nested_join)) + { + // set the deps inside, too + add_extra_deps(&nested_join->join_list, deps); + } + } +} + + +/* + @brief + Add table dependencies that are directly caused by table functions, also + add extra dependencies so that the join optimizer does not construct + "dead-end" join prefixes. + + @detail + There are two kinds of limitations on join order: + 1A. Outer joins require that inner tables follow outer. + 1B. Tables within a join nest must be present in the join order + "without interleaving". See check_interleaving_with_nj for details. + + 2. Table function argument may refer to *any* table that precedes the + current table in the query text. The table maybe outside of the current + nested join and/or inside another nested join. + + One may think that adding dependency according to #2 would be sufficient, + but this is not the case. + + @example + + select ... + from + t20 left join t21 on t20.a=t21.a + join + (t31 left join (t32 join + JSON_TABLE(t21.js, + '$' COLUMNS (ab INT PATH '$.a')) AS jt + ) on t31.a<3 + ) + + Here, jt's argument refers to t21. + + Table dependencies are: + t21 -> t20 + t32 -> t31 + jt -> t21 t31 (also indirectly depends on t20 through t21) + + This allows to construct a "dead-end" join prefix, like: + + t31, t32 + + Here, "no interleaving" rule requires the next table to be jt, but we + can't add it, because it depends on t21 which is not in the join prefix. + + @end example + + Dead-end join prefixes do not work with join prefix pruning done for + @@optimizer_prune_level: it is possible that all non-dead-end prefixes are + pruned away. + + The solution is as follows: if there is an outer join that contains + (directly on indirectly) a table function JT which has a reference JREF + outside of the outer join: + + left join ( T_I ... json_table(JREF, ...) as JT ...) + + then make *all* tables T_I also dependent on outside references in JREF. + This way, the optimizer will put table T_I into the join prefix only when + JT can be put there as well, and "dead-end" prefixes will not be built. + + @param join_list List of tables to process. Initial invocation should + supply the JOIN's top-level table list. + @param nest_tables Bitmap of all tables in the join list. + + @return Bitmap of all outside references that tables in join_list have +*/ + +table_map add_table_function_dependencies(List<TABLE_LIST> *join_list, + table_map nest_tables) +{ + TABLE_LIST *table; + table_map res= 0; + List_iterator<TABLE_LIST> li(*join_list); + + DBUG_EXECUTE_IF("json_check_min_stack_requirement", + { + long arbitrary_var; + long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); + ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); + }); + if ((res=check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))) + return res; + + // Recursively compute extra dependencies + while ((table= li++)) + { + NESTED_JOIN *nested_join; + if ((nested_join= table->nested_join)) + { + res |= add_table_function_dependencies(&nested_join->join_list, + nested_join->used_tables); + } + else if (table->table_function) + { + table->dep_tables |= table->table_function->used_tables(); + res |= table->dep_tables; + } + } + res= res & ~nest_tables & ~PSEUDO_TABLE_BITS; + // Then, make all "peers" have them: + if (res) + add_extra_deps(join_list, res); + + return res; +} + + diff --git a/sql/json_table.h b/sql/json_table.h new file mode 100644 index 00000000000..7316edd4ee6 --- /dev/null +++ b/sql/json_table.h @@ -0,0 +1,292 @@ +#ifndef JSON_TABLE_INCLUDED +#define JSON_TABLE_INCLUDED + +/* Copyright (c) 2020, MariaDB Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + + +#include <json_lib.h> + +class Json_table_column; + +/* + The Json_table_nested_path represents the 'current nesting' level + for a set of JSON_TABLE columns. + Each column (Json_table_column instance) is linked with corresponding + 'nested path' object and gets its piece of JSON to parse during the computation + phase. + The root 'nested_path' is always present as a part of Table_function_json_table, + then other 'nested_paths' can be created and linked into a tree structure when new + 'NESTED PATH' is met. The nested 'nested_paths' are linked with 'm_nested', the same-level + 'nested_paths' are linked with 'm_next_nested'. + So for instance + JSON_TABLE( '...', '$[*]' + COLUMNS( a INT PATH '$.a' , + NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$', + NESTED PATH '$.c[*]' COLUMNS(x INT PATH '$')), + NESTED PATH '$.n[*]' COLUMNS (z INT PATH '$')) + results in 4 'nested_path' created: + root nested_b nested_c nested_n + m_path '$[*]' '$.b[*]' '$.c[*]' '$.n[*] + m_nested &nested_b &nested_c NULL NULL + n_next_nested NULL &nested_n NULL NULL + + and 4 columns created: + a b x z + m_nest &root &nested_b &nested_c &nested_n +*/ + +class Json_table_nested_path : public Sql_alloc +{ +public: + json_path_t m_path; /* The JSON Path to get the rows from */ + bool m_null; // TRUE <=> producing a NULL-complemented row. + + /*** Construction interface ***/ + Json_table_nested_path(): + m_null(TRUE), m_nested(NULL), m_next_nested(NULL) + {} + + int set_path(THD *thd, const LEX_CSTRING &path); + + /*** Methods for performing a scan ***/ + void scan_start(CHARSET_INFO *i_cs, const uchar *str, const uchar *end); + int scan_next(); + bool check_error(const char *str); + + /*** Members for getting the values we've scanned to ***/ + const uchar *get_value() { return m_engine.value_begin; } + const uchar *get_value_end() { return m_engine.s.str_end; } + + /* Counts the rows produced. Used by FOR ORDINALITY columns */ + longlong m_ordinality_counter; + + int print(THD *thd, Field ***f, String *str, + List_iterator_fast<Json_table_column> &it, + Json_table_column **last_column); +private: + /* The head of the list of nested NESTED PATH statements. */ + Json_table_nested_path *m_nested; + + /* in the above list items are linked with the */ + Json_table_nested_path *m_next_nested; + + /*** Members describing NESTED PATH structure ***/ + /* Parent nested path. The "root" path has this NULL */ + Json_table_nested_path *m_parent; + + /*** Members describing current JSON Path scan state ***/ + /* The JSON Parser and JSON Path evaluator */ + json_engine_t m_engine; + + /* The path the parser is currently pointing to */ + json_path_t m_cur_path; + + /* The child NESTED PATH we're currently scanning */ + Json_table_nested_path *m_cur_nested; + + static bool column_in_this_or_nested(const Json_table_nested_path *p, + const Json_table_column *jc); + friend class Table_function_json_table; +}; + + +/* + @brief + Describes the column definition in JSON_TABLE(...) syntax. + + @detail + Has methods for printing/handling errors but otherwise it's a static + object. +*/ + +class Json_table_column : public Sql_alloc +{ +public: + enum enum_type + { + FOR_ORDINALITY, + PATH, + EXISTS_PATH + }; + + enum enum_on_type + { + ON_EMPTY, + ON_ERROR + }; + + enum enum_on_response + { + RESPONSE_NOT_SPECIFIED, + RESPONSE_ERROR, + RESPONSE_NULL, + RESPONSE_DEFAULT + }; + + struct On_response + { + public: + Json_table_column::enum_on_response m_response; + LEX_CSTRING m_default; + int respond(Json_table_column *jc, Field *f, uint error_num); + int print(const char *name, String *str) const; + bool specified() const { return m_response != RESPONSE_NOT_SPECIFIED; } + }; + + enum_type m_column_type; + bool m_format_json; + json_path_t m_path; + On_response m_on_error; + On_response m_on_empty; + Create_field *m_field; + Json_table_nested_path *m_nest; + CHARSET_INFO *m_explicit_cs; + CHARSET_INFO *m_defaults_cs; + + void set(enum_type ctype) + { + m_column_type= ctype; + } + int set(THD *thd, enum_type ctype, const LEX_CSTRING &path, CHARSET_INFO *cs); + Json_table_column(Create_field *f, Json_table_nested_path *nest) : + m_field(f), m_nest(nest), m_explicit_cs(NULL) + { + m_on_error.m_response= RESPONSE_NOT_SPECIFIED; + m_on_empty.m_response= RESPONSE_NOT_SPECIFIED; + } + int print(THD *tnd, Field **f, String *str); +}; + + +/* + Class represents the table function, the function + that returns the table as a result so supposed to appear + in the FROM list of the SELECT statement. + At the moment there is only one such function JSON_TABLE, + so the class named after it, but should be refactored + into the hierarchy root if we create more of that functions. + + As the parser finds the table function in the list it + creates an instance of Table_function_json_table storing it + into the TABLE_LIST::table_function. + Then the ha_json_table instance is created based on it in + the create_table_for_function(). + + == Replication: whether JSON_TABLE is deterministic == + + In sql_yacc.yy, we set BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION whenever + JSON_TABLE is used. The reasoning behind this is as follows: + + In the current MariaDB code, evaluation of JSON_TABLE is deterministic, + that is, for a given input string JSON_TABLE will always produce the same + set of rows in the same order. However one can think of JSON documents + that one can consider indentical which will produce different output. + In order to be feature-proof and withstand changes like: + - sorting JSON object members by name (like MySQL does) + - changing the way duplicate object members are handled + we mark the function as SBR-unsafe. + (If there is ever an issue with this, marking the function as SBR-safe + is a non-intrusive change we will always be able to make) +*/ + +class Table_function_json_table : public Sql_alloc +{ +public: + /*** Basic properties of the original JSON_TABLE(...) ***/ + Item *m_json; /* The JSON value to be parsed. */ + + /* The COLUMNS(...) part representation. */ + Json_table_nested_path m_nested_path; + + /* The list of table column definitions. */ + List<Json_table_column> m_columns; + + /*** Name resolution functions ***/ + bool setup(THD *thd, TABLE_LIST *sql_table, SELECT_LEX *s_lex); + + int walk_items(Item_processor processor, bool walk_subquery, + void *argument); + + /*** Functions for interaction with the Query Optimizer ***/ + void fix_after_pullout(TABLE_LIST *sql_table, + st_select_lex *new_parent, bool merge); + void update_used_tables() { m_json->update_used_tables(); } + + table_map used_tables() const { return m_json->used_tables(); } + bool join_cache_allowed() const + { + /* + Can use join cache when we have an outside reference. + If there's dependency on any other table or randomness, + cannot use it. + */ + return !(used_tables() & ~OUTER_REF_TABLE_BIT); + } + void get_estimates(ha_rows *out_rows, + double *scan_time, double *startup_cost); + + int print(THD *thd, TABLE_LIST *sql_table, + String *str, enum_query_type query_type); + + /*** Construction interface to be used from the parser ***/ + Table_function_json_table(Item *json): + m_json(json), + m_context_setup_done(false) + { + cur_parent= &m_nested_path; + last_sibling_hook= &m_nested_path.m_nested; + } + + void start_nested_path(Json_table_nested_path *np); + void end_nested_path(); + Json_table_nested_path *get_cur_nested_path() { return cur_parent; } + void set_name_resolution_context(Name_resolution_context *arg) + { + m_context= arg; + } + + /* SQL Parser: current column in JSON_TABLE (...) syntax */ + Json_table_column *m_cur_json_table_column; + + /* SQL Parser: charset of the current text literal */ + CHARSET_INFO *m_text_literal_cs; + +private: + /* Context to be used for resolving the first argument. */ + Name_resolution_context *m_context; + + bool m_context_setup_done; + + /* Current NESTED PATH level being parsed */ + Json_table_nested_path *cur_parent; + + /* + Pointer to the list tail where we add the next NESTED PATH. + It points to the cur_parnt->m_nested for the first nested + and prev_nested->m_next_nested for the coesequent ones. + */ + Json_table_nested_path **last_sibling_hook; +}; + +bool push_table_function_arg_context(LEX *lex, MEM_ROOT *alloc); + +TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table); + +table_map add_table_function_dependencies(List<TABLE_LIST> *join_list, + table_map nest_tables); + +#endif /* JSON_TABLE_INCLUDED */ + diff --git a/sql/key.cc b/sql/key.cc index 79f1c55b61c..f2cebfe6d82 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -360,7 +360,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, { if (field->is_null()) { - to->append(STRING_WITH_LEN("NULL")); + to->append(NULL_clex_str); DBUG_VOID_RETURN; } CHARSET_INFO *cs= field->charset(); @@ -393,7 +393,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, if (max_length < field->pack_length()) tmp.length(MY_MIN(tmp.length(),max_length)); ErrConvString err(&tmp); - to->append(err.ptr()); + to->append(err.lex_cstring()); } else to->append(STRING_WITH_LEN("???")); @@ -434,7 +434,7 @@ void key_unpack(String *to, TABLE *table, KEY *key) { if (table->record[0][key_part->null_offset] & key_part->null_bit) { - to->append(STRING_WITH_LEN("NULL")); + to->append(NULL_clex_str); continue; } } diff --git a/sql/lex.h b/sql/lex.h index e344b32ae83..cbf9d9d51b2 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -211,6 +211,7 @@ SYMBOL symbols[] = { { "ELSE", SYM(ELSE)}, { "ELSEIF", SYM(ELSEIF_MARIADB_SYM)}, { "ELSIF", SYM(ELSIF_MARIADB_SYM)}, + { "EMPTY", SYM(EMPTY_SYM)}, { "ENABLE", SYM(ENABLE_SYM)}, { "ENCLOSED", SYM(ENCLOSED)}, { "END", SYM(END)}, @@ -289,6 +290,7 @@ SYMBOL symbols[] = { { "IDENTIFIED", SYM(IDENTIFIED_SYM)}, { "IF", SYM(IF_SYM)}, { "IGNORE", SYM(IGNORE_SYM)}, + { "IGNORED", SYM(IGNORED_SYM)}, { "IGNORE_DOMAIN_IDS", SYM(IGNORE_DOMAIN_IDS_SYM)}, { "IGNORE_SERVER_IDS", SYM(IGNORE_SERVER_IDS_SYM)}, { "IMMEDIATE", SYM(IMMEDIATE_SYM)}, @@ -327,6 +329,7 @@ SYMBOL symbols[] = { { "INVOKER", SYM(INVOKER_SYM)}, { "JOIN", SYM(JOIN_SYM)}, { "JSON", SYM(JSON_SYM)}, + { "JSON_TABLE", SYM(JSON_TABLE_SYM)}, { "KEY", SYM(KEY_SYM)}, { "KEYS", SYM(KEYS)}, { "KEY_BLOCK_SIZE", SYM(KEY_BLOCK_SIZE)}, @@ -351,6 +354,7 @@ SYMBOL symbols[] = { { "LOCALTIME", SYM(NOW_SYM)}, { "LOCALTIMESTAMP", SYM(NOW_SYM)}, { "LOCK", SYM(LOCK_SYM)}, + { "LOCKED", SYM(LOCKED_SYM)}, { "LOCKS", SYM(LOCKS_SYM)}, { "LOGFILE", SYM(LOGFILE_SYM)}, { "LOGS", SYM(LOGS_SYM)}, @@ -400,6 +404,7 @@ SYMBOL symbols[] = { { "MICROSECOND", SYM(MICROSECOND_SYM)}, { "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */ { "MIGRATE", SYM(MIGRATE_SYM)}, + { "MINUS", SYM(MINUS_ORACLE_SYM)}, { "MINUTE", SYM(MINUTE_SYM)}, { "MINUTE_MICROSECOND", SYM(MINUTE_MICROSECOND_SYM)}, { "MINUTE_SECOND", SYM(MINUTE_SECOND_SYM)}, @@ -419,6 +424,7 @@ SYMBOL symbols[] = { { "NATIONAL", SYM(NATIONAL_SYM)}, { "NATURAL", SYM(NATURAL)}, { "NCHAR", SYM(NCHAR_SYM)}, + { "NESTED", SYM(NESTED_SYM)}, { "NEVER", SYM(NEVER_SYM)}, { "NEW", SYM(NEW_SYM)}, { "NEXT", SYM(NEXT_SYM)}, @@ -453,6 +459,7 @@ SYMBOL symbols[] = { { "OPTIONALLY", SYM(OPTIONALLY)}, { "OR", SYM(OR_SYM)}, { "ORDER", SYM(ORDER_SYM)}, + { "ORDINALITY", SYM(ORDINALITY_SYM)}, { "OTHERS", SYM(OTHERS_MARIADB_SYM)}, { "OUT", SYM(OUT_SYM)}, { "OUTER", SYM(OUTER)}, @@ -466,6 +473,7 @@ SYMBOL symbols[] = { { "PAGE_CHECKSUM", SYM(PAGE_CHECKSUM_SYM)}, { "PARSER", SYM(PARSER_SYM)}, { "PARSE_VCOL_EXPR", SYM(PARSE_VCOL_EXPR_SYM)}, + { "PATH", SYM(PATH_SYM)}, { "PERIOD", SYM(PERIOD_SYM)}, { "PARTIAL", SYM(PARTIAL)}, { "PARTITION", SYM(PARTITION_SYM)}, @@ -554,6 +562,7 @@ SYMBOL symbols[] = { { "ROUTINE", SYM(ROUTINE_SYM)}, { "ROW", SYM(ROW_SYM)}, { "ROWCOUNT", SYM(ROWCOUNT_SYM)}, /* Oracle-N */ + { "ROWNUM", SYM(ROWNUM_SYM)}, /* Oracle-R */ { "ROWS", SYM(ROWS_SYM)}, { "ROWTYPE", SYM(ROWTYPE_MARIADB_SYM)}, { "ROW_COUNT", SYM(ROW_COUNT_SYM)}, @@ -583,6 +592,7 @@ SYMBOL symbols[] = { { "SIGNAL", SYM(SIGNAL_SYM)}, { "SIGNED", SYM(SIGNED_SYM)}, { "SIMPLE", SYM(SIMPLE_SYM)}, + { "SKIP", SYM(SKIP_SYM)}, { "SLAVE", SYM(SLAVE)}, { "SLAVES", SYM(SLAVES)}, { "SLAVE_POS", SYM(SLAVE_POS_SYM)}, @@ -640,6 +650,7 @@ SYMBOL symbols[] = { { "SUSPEND", SYM(SUSPEND_SYM)}, { "SWAPS", SYM(SWAPS_SYM)}, { "SWITCHES", SYM(SWITCHES_SYM)}, + { "SYSDATE", SYM(SYSDATE)}, { "SYSTEM", SYM(SYSTEM)}, { "SYSTEM_TIME", SYM(SYSTEM_TIME_SYM)}, { "TABLE", SYM(TABLE_SYM)}, @@ -737,6 +748,7 @@ SYMBOL symbols[] = { SYMBOL sql_functions[] = { { "ADDDATE", SYM(ADDDATE_SYM)}, + { "ADD_MONTHS", SYM(ADD_MONTHS_SYM)}, { "BIT_AND", SYM(BIT_AND)}, { "BIT_OR", SYM(BIT_OR)}, { "BIT_XOR", SYM(BIT_XOR)}, @@ -779,7 +791,6 @@ SYMBOL sql_functions[] = { { "SUBSTR", SYM(SUBSTRING)}, { "SUBSTRING", SYM(SUBSTRING)}, { "SUM", SYM(SUM_SYM)}, - { "SYSDATE", SYM(SYSDATE)}, { "SYSTEM_USER", SYM(USER_SYM)}, { "TRIM", SYM(TRIM)}, { "TRIM_ORACLE", SYM(TRIM_ORACLE)}, diff --git a/sql/lock.cc b/sql/lock.cc index 70b04e53583..1099a5c2fb1 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -143,7 +143,7 @@ lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) or hold any type of lock in a session, since this would be a DOS attack. */ - if ((t->reginfo.lock_type >= TL_READ_NO_INSERT) + if ((t->reginfo.lock_type >= TL_FIRST_WRITE) || (thd->lex->sql_command == SQLCOM_LOCK_TABLES)) { my_error(ER_CANT_LOCK_LOG_TABLE, MYF(0)); @@ -151,7 +151,7 @@ lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) } } - if (t->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE) + if (t->reginfo.lock_type >= TL_FIRST_WRITE) { if (t->s->table_category == TABLE_CATEGORY_SYSTEM) system_count++; @@ -173,7 +173,7 @@ lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) DBUG_ASSERT(t->s->tmp_table || thd->mdl_context.is_lock_owner(MDL_key::TABLE, t->s->db.str, t->s->table_name.str, - t->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE ? + t->reginfo.lock_type >= TL_FIRST_WRITE ? MDL_SHARED_WRITE : MDL_SHARED_READ)); /* @@ -182,7 +182,7 @@ lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags) */ if (!(flags & MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY) && !t->s->tmp_table) { - if (t->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE && + if (t->reginfo.lock_type >= TL_FIRST_WRITE && !ignore_read_only && opt_readonly && !thd->slave_thread) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); @@ -390,7 +390,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count) lock_type=F_WRLCK; /* Lock exclusive */ if ((*tables)->db_stat & HA_READ_ONLY || ((*tables)->reginfo.lock_type >= TL_READ && - (*tables)->reginfo.lock_type <= TL_READ_NO_INSERT)) + (*tables)->reginfo.lock_type < TL_FIRST_WRITE)) lock_type=F_RDLCK; if (unlikely((error=(*tables)->file->ha_external_lock(thd,lock_type)))) @@ -484,7 +484,7 @@ int mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock) for (i=found=0 ; i < sql_lock->table_count ; i++) { DBUG_ASSERT(sql_lock->table[i]->lock_position == i); - if ((uint) sql_lock->table[i]->reginfo.lock_type > TL_WRITE_ALLOW_WRITE) + if ((uint) sql_lock->table[i]->reginfo.lock_type >= TL_FIRST_WRITE) { swap_variables(TABLE *, *table, sql_lock->table[i]); table++; @@ -504,7 +504,7 @@ int mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock) THR_LOCK_DATA **lock=sql_lock->locks; for (i=found=0 ; i < sql_lock->lock_count ; i++) { - if (sql_lock->locks[i]->type >= TL_WRITE_ALLOW_WRITE) + if (sql_lock->locks[i]->type >= TL_FIRST_WRITE) { swap_variables(THR_LOCK_DATA *, *lock, sql_lock->locks[i]); lock++; diff --git a/sql/log.cc b/sql/log.cc index 58e644bca4b..4fff1e3d3aa 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -39,6 +39,7 @@ #include "rpl_rli.h" #include "sql_audit.h" #include "mysqld.h" +#include "ddl_log.h" #include <my_dir.h> #include <m_ctype.h> // For test_if_number @@ -62,6 +63,12 @@ #include "wsrep_trans_observer.h" #endif /* WITH_WSREP */ +#ifdef HAVE_REPLICATION +#include "semisync_master.h" +#include "semisync_slave.h" +#include <utility> // pair +#endif + /* max size of the log message */ #define MAX_LOG_BUFFER_SIZE 1024 #define MAX_TIME_SIZE 32 @@ -87,16 +94,12 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv); static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv); static bool binlog_savepoint_rollback_can_release_mdl(handlerton *hton, THD *thd); -static int binlog_commit(handlerton *hton, THD *thd, bool all); static int binlog_rollback(handlerton *hton, THD *thd, bool all); static int binlog_prepare(handlerton *hton, THD *thd, bool all); -static int binlog_xa_recover_dummy(handlerton *hton, XID *xid_list, uint len); -static int binlog_commit_by_xid(handlerton *hton, XID *xid); -static int binlog_rollback_by_xid(handlerton *hton, XID *xid); static int binlog_start_consistent_snapshot(handlerton *hton, THD *thd); static int binlog_flush_cache(THD *thd, binlog_cache_mngr *cache_mngr, Log_event *end_ev, bool all, bool using_stmt, - bool using_trx); + bool using_trx, bool is_ro_1pc); static const LEX_CSTRING write_error_msg= { STRING_WITH_LEN("error writing to the binary log") }; @@ -1670,17 +1673,13 @@ int binlog_init(void *p) binlog_hton->savepoint_rollback= binlog_savepoint_rollback; binlog_hton->savepoint_rollback_can_release_mdl= binlog_savepoint_rollback_can_release_mdl; - binlog_hton->commit= binlog_commit; + binlog_hton->commit= [](handlerton *, THD *thd, bool all) { return 0; }; binlog_hton->rollback= binlog_rollback; binlog_hton->drop_table= [](handlerton *, const char*) { return -1; }; if (WSREP_ON || opt_bin_log) { binlog_hton->prepare= binlog_prepare; binlog_hton->start_consistent_snapshot= binlog_start_consistent_snapshot; - binlog_hton->commit_by_xid= binlog_commit_by_xid; - binlog_hton->rollback_by_xid= binlog_rollback_by_xid; - // recover needs to be set to make xa{commit,rollback}_handlerton effective - binlog_hton->recover= binlog_xa_recover_dummy; } binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN | HTON_NO_ROLLBACK; return 0; @@ -1747,7 +1746,7 @@ static int binlog_close_connection(handlerton *hton, THD *thd) static int binlog_flush_cache(THD *thd, binlog_cache_mngr *cache_mngr, Log_event *end_ev, bool all, bool using_stmt, - bool using_trx) + bool using_trx, bool is_ro_1pc= false) { int error= 0; DBUG_ENTER("binlog_flush_cache"); @@ -1774,7 +1773,8 @@ binlog_flush_cache(THD *thd, binlog_cache_mngr *cache_mngr, */ error= mysql_bin_log.write_transaction_to_binlog(thd, cache_mngr, end_ev, all, - using_stmt, using_trx); + using_stmt, using_trx, + is_ro_1pc); } else { @@ -1848,7 +1848,8 @@ inline size_t serialize_with_xid(XID *xid, char *buf, nonzero if an error pops up when flushing the cache. */ static inline int -binlog_commit_flush_trx_cache(THD *thd, bool all, binlog_cache_mngr *cache_mngr) +binlog_commit_flush_trx_cache(THD *thd, bool all, binlog_cache_mngr *cache_mngr, + bool ro_1pc) { DBUG_ENTER("binlog_commit_flush_trx_cache"); @@ -1869,7 +1870,7 @@ binlog_commit_flush_trx_cache(THD *thd, bool all, binlog_cache_mngr *cache_mngr) } Query_log_event end_evt(thd, buf, buflen, TRUE, TRUE, TRUE, 0); - DBUG_RETURN(binlog_flush_cache(thd, cache_mngr, &end_evt, all, FALSE, TRUE)); + DBUG_RETURN(binlog_flush_cache(thd, cache_mngr, &end_evt, all, FALSE, TRUE, ro_1pc)); } @@ -1988,24 +1989,18 @@ inline bool is_preparing_xa(THD *thd) static int binlog_prepare(handlerton *hton, THD *thd, bool all) { /* Do nothing unless the transaction is a user XA. */ - return is_preparing_xa(thd) ? binlog_commit(NULL, thd, all) : 0; -} - - -static int binlog_xa_recover_dummy(handlerton *hton __attribute__((unused)), - XID *xid_list __attribute__((unused)), - uint len __attribute__((unused))) -{ - /* Does nothing. */ - return 0; + return is_preparing_xa(thd) ? binlog_commit(thd, all, FALSE) : 0; } -static int binlog_commit_by_xid(handlerton *hton, XID *xid) +int binlog_commit_by_xid(handlerton *hton, XID *xid) { int rc= 0; THD *thd= current_thd; + if (thd->is_current_stmt_binlog_disabled()) + return 0; + /* the asserted state can't be reachable with xa commit */ DBUG_ASSERT(!thd->get_stmt_da()->is_error() || thd->get_stmt_da()->sql_errno() != ER_XA_RBROLLBACK); @@ -2023,18 +2018,21 @@ static int binlog_commit_by_xid(handlerton *hton, XID *xid) DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT); - rc= binlog_commit(hton, thd, TRUE); + rc= binlog_commit(thd, TRUE, FALSE); thd->ha_data[binlog_hton->slot].ha_info[1].reset(); return rc; } -static int binlog_rollback_by_xid(handlerton *hton, XID *xid) +int binlog_rollback_by_xid(handlerton *hton, XID *xid) { int rc= 0; THD *thd= current_thd; + if (thd->is_current_stmt_binlog_disabled()) + return 0; + if (thd->get_stmt_da()->is_error() && thd->get_stmt_da()->sql_errno() == ER_XA_RBROLLBACK) return rc; @@ -2130,20 +2128,17 @@ static int binlog_commit_flush_xa_prepare(THD *thd, bool all, return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE)); } - /** This function is called once after each statement. It has the responsibility to flush the caches to the binary log on commits. - @param hton The binlog handlerton. @param thd The client thread that executes the transaction. @param all This is @c true if this is a real transaction commit, and @false otherwise. - - @see handlerton::commit + @param ro_1pc read-only one-phase commit transaction */ -static int binlog_commit(handlerton *hton, THD *thd, bool all) +int binlog_commit(THD *thd, bool all, bool ro_1pc) { int error= 0; PSI_stage_info org_stage; @@ -2175,7 +2170,6 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) YESNO(thd->transaction->all.modified_non_trans_table), YESNO(thd->transaction->stmt.modified_non_trans_table))); - thd->backup_stage(&org_stage); THD_STAGE_INFO(thd, stage_binlog_write); #ifdef WITH_WSREP @@ -2218,15 +2212,15 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) error= is_xa_prepare ? binlog_commit_flush_xa_prepare(thd, all, cache_mngr) : - binlog_commit_flush_trx_cache (thd, all, cache_mngr); - // the user xa is unlogged on common exec path with the "empty" xa case - if (cache_mngr->need_unlog && !is_xa_prepare) - { - error= - mysql_bin_log.unlog(BINLOG_COOKIE_MAKE(cache_mngr->binlog_id, - cache_mngr->delayed_error), 1); - cache_mngr->need_unlog= false; - } + binlog_commit_flush_trx_cache (thd, all, cache_mngr, ro_1pc); + // the user xa is unlogged on common exec path with the "empty" xa case + if (cache_mngr->need_unlog && !is_xa_prepare) + { + error= + mysql_bin_log.unlog(BINLOG_COOKIE_MAKE(cache_mngr->binlog_id, + cache_mngr->delayed_error), 1); + cache_mngr->need_unlog= false; + } } /* This is part of the stmt rollback. @@ -2566,7 +2560,7 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg) } if (bcmp(magic, BINLOG_MAGIC, sizeof(magic))) { - *errmsg = "Binlog has bad magic number; It's not a binary log file that can be used by this version of MySQL"; + *errmsg = "Binlog has bad magic number; It's not a binary log file that can be used by this version of MariaDB"; return 1; } return 0; @@ -3092,7 +3086,8 @@ void MYSQL_QUERY_LOG::reopen_file() TRUE - error occurred */ -bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host, size_t user_host_len, my_thread_id thread_id_arg, +bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host, + size_t user_host_len, my_thread_id thread_id_arg, const char *command_type, size_t command_type_len, const char *sql_text, size_t sql_text_len) { @@ -3193,7 +3188,8 @@ err: */ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, - const char *user_host, size_t user_host_len, ulonglong query_utime, + const char *user_host, size_t user_host_len, + ulonglong query_utime, ulonglong lock_utime, bool is_command, const char *sql_text, size_t sql_text_len) { @@ -3232,7 +3228,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, my_b_write(&log_file, (uchar*) user_host, user_host_len) || my_b_write(&log_file, (uchar*) "\n", 1)) goto err; - + /* For slow query log */ sprintf(query_time_buff, "%.6f", ulonglong2double(query_utime)/1000000.0); sprintf(lock_time_buff, "%.6f", ulonglong2double(lock_utime)/1000000.0); @@ -6281,7 +6277,8 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, bool MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone, - bool is_transactional, uint64 commit_id) + bool is_transactional, uint64 commit_id, + bool has_xid, bool is_ro_1pc) { rpl_gtid gtid; uint32 domain_id; @@ -6334,7 +6331,7 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone, Gtid_log_event gtid_event(thd, seq_no, domain_id, standalone, LOG_EVENT_SUPPRESS_USE_F, is_transactional, - commit_id); + commit_id, has_xid, is_ro_1pc); /* Write the event to the binary log. */ DBUG_ASSERT(this == &mysql_bin_log); @@ -6525,11 +6522,13 @@ MYSQL_BIN_LOG::bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no) bool MYSQL_BIN_LOG::check_strict_gtid_sequence(uint32 domain_id, uint32 server_id_arg, - uint64 seq_no) + uint64 seq_no, + bool no_error) { return rpl_global_gtid_binlog_state.check_strict_sequence(domain_id, server_id_arg, - seq_no); + seq_no, + no_error); } @@ -7088,10 +7087,11 @@ void MYSQL_BIN_LOG::purge() { mysql_mutex_assert_not_owner(&LOCK_log); #ifdef HAVE_REPLICATION - if (expire_logs_days) + if (binlog_expire_logs_seconds) { DEBUG_SYNC(current_thd, "at_purge_logs_before_date"); - time_t purge_time= my_time(0) - expire_logs_days*24*60*60; + time_t purge_time= my_time(0) - binlog_expire_logs_seconds; + DBUG_EXECUTE_IF("expire_logs_always", { purge_time = my_time(0); }); if (purge_time >= 0) { purge_logs_before_date(purge_time); @@ -7608,7 +7608,8 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd) } void -MYSQL_BIN_LOG::write_binlog_checkpoint_event_already_locked(const char *name_arg, uint len) +MYSQL_BIN_LOG:: +write_binlog_checkpoint_event_already_locked(const char *name_arg, uint len) { my_off_t offset; Binlog_checkpoint_log_event ev(name_arg, len); @@ -7677,7 +7678,8 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd, binlog_cache_mngr *cache_mngr, Log_event *end_ev, bool all, bool using_stmt_cache, - bool using_trx_cache) + bool using_trx_cache, + bool is_ro_1pc) { group_commit_entry entry; Ha_trx_info *ha_info; @@ -7706,6 +7708,7 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd, entry.using_trx_cache= using_trx_cache; entry.need_unlog= is_preparing_xa(thd); ha_info= all ? thd->transaction->all.ha_list : thd->transaction->stmt.ha_list; + entry.ro_1pc= is_ro_1pc; entry.end_event= end_ev; auto has_xid= entry.end_event->get_type_code() == XID_EVENT; @@ -8074,8 +8077,6 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) if (orig_queue == NULL) result= -3; } - else - DBUG_ASSERT(result != -2 && result != -3); #endif /* WITH_WSREP */ if (opt_binlog_commit_wait_count > 0 && orig_queue != NULL) @@ -8383,6 +8384,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) } else { + DEBUG_SYNC(leader->thd, "commit_before_update_binlog_end_pos"); bool any_error= false; mysql_mutex_assert_not_owner(&LOCK_prepare_ordered); @@ -8588,10 +8590,13 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry, uint64 commit_id) { binlog_cache_mngr *mngr= entry->cache_mngr; + bool has_xid= entry->end_event->get_type_code() == XID_EVENT; + DBUG_ENTER("MYSQL_BIN_LOG::write_transaction_or_stmt"); if (write_gtid_event(entry->thd, is_prepared_xa(entry->thd), - entry->using_trx_cache, commit_id)) + entry->using_trx_cache, commit_id, + has_xid, entry->ro_1pc)) DBUG_RETURN(ER_ERROR_ON_WRITE); if (entry->using_stmt_cache && !mngr->stmt_cache.empty() && @@ -9322,6 +9327,11 @@ TC_LOG::run_commit_ordered(THD *thd, bool all) if (!ht->commit_ordered) continue; ht->commit_ordered(ht, thd, all); + DBUG_EXECUTE_IF("enable_log_write_upto_crash", + { + DBUG_SET_INITIAL("+d,crash_after_log_write_upto"); + sleep(1000); + }); DEBUG_SYNC(thd, "commit_after_run_commit_ordered"); } } @@ -10016,7 +10026,7 @@ err2: err1: sql_print_error("Crash recovery failed. Either correct the problem " "(if it's, for example, out of memory error) and restart, " - "or delete tc log and start mysqld with " + "or delete tc log and start server with " "--tc-heuristic-recover={commit|rollback}"); return 1; } @@ -10047,16 +10057,162 @@ int TC_LOG::using_heuristic_recover() sql_print_information("Heuristic crash recovery mode"); if (ha_recover(0)) sql_print_error("Heuristic crash recovery failed"); - sql_print_information("Please restart mysqld without --tc-heuristic-recover"); + sql_print_information("Please restart without --tc-heuristic-recover"); return 1; } /****** transaction coordinator log for 2pc - binlog() based solution ******/ #define TC_LOG_BINLOG MYSQL_BIN_LOG +/** + Truncates the current binlog to specified position. Removes the rest of binlogs + which are present after this binlog file. + + @param truncate_file Holds the binlog name to be truncated + @param truncate_pos Position within binlog from where it needs to + truncated. + + @retval true ok + @retval false error + +*/ +bool MYSQL_BIN_LOG::truncate_and_remove_binlogs(const char *file_name, + my_off_t pos, + rpl_gtid *ptr_gtid) +{ + int error= 0; +#ifdef HAVE_REPLICATION + LOG_INFO log_info; + THD *thd= current_thd; + my_off_t index_file_offset= 0; + File file= -1; + MY_STAT s; + my_off_t old_size; + + if ((error= find_log_pos(&log_info, file_name, 1))) + { + sql_print_error("Failed to locate binary log file:%s." + "Error:%d", file_name, error); + goto end; + } + + while (!(error= find_next_log(&log_info, 1))) + { + if (!index_file_offset) + { + index_file_offset= log_info.index_file_start_offset; + if ((error= open_purge_index_file(TRUE))) + { + sql_print_error("Failed to open purge index " + "file:%s. Error:%d", purge_index_file_name, error); + goto end; + } + } + if ((error= register_purge_index_entry(log_info.log_file_name))) + { + sql_print_error("Failed to copy %s to purge index" + " file. Error:%d", log_info.log_file_name, error); + goto end; + } + } + + if (error != LOG_INFO_EOF) + { + sql_print_error("Failed to find the next binlog to " + "add to purge index register. Error:%d", error); + goto end; + } + + if (is_inited_purge_index_file()) + { + if (!index_file_offset) + index_file_offset= log_info.index_file_start_offset; + + if ((error= sync_purge_index_file())) + { + sql_print_error("Failed to flush purge index " + "file. Error:%d", error); + goto end; + } + + // Trim index file + error= mysql_file_chsize(index_file.file, index_file_offset, '\n', + MYF(MY_WME)); + if (!error) + error= mysql_file_sync(index_file.file, MYF(MY_WME|MY_SYNC_FILESIZE)); + if (error) + { + sql_print_error("Failed to truncate binlog index " + "file:%s to offset:%llu. Error:%d", index_file_name, + index_file_offset, error); + goto end; + } + + /* Reset data in old index cache */ + if ((error= reinit_io_cache(&index_file, READ_CACHE, (my_off_t) 0, 0, 1))) + { + sql_print_error("Failed to reinit binlog index " + "file. Error:%d", error); + goto end; + } + + /* Read each entry from purge_index_file and delete the file. */ + if ((error= purge_index_entry(thd, NULL, TRUE))) + { + sql_print_error("Failed to process registered " + "files that would be purged."); + goto end; + } + } + + DBUG_ASSERT(pos); + + if ((file= mysql_file_open(key_file_binlog, file_name, + O_RDWR | O_BINARY, MYF(MY_WME))) < 0) + { + error= 1; + sql_print_error("Failed to open binlog file:%s for " + "truncation.", file_name); + goto end; + } + my_stat(file_name, &s, MYF(0)); + old_size= s.st_size; + clear_inuse_flag_when_closing(file); + /* Change binlog file size to truncate_pos */ + error= mysql_file_chsize(file, pos, 0, MYF(MY_WME)); + if (!error) + error= mysql_file_sync(file, MYF(MY_WME|MY_SYNC_FILESIZE)); + if (error) + { + sql_print_error("Failed to truncate the " + "binlog file:%s to size:%llu. Error:%d", + file_name, pos, error); + goto end; + } + else + { + char buf[21]; + longlong10_to_str(ptr_gtid->seq_no, buf, 10); + sql_print_information("Successfully truncated binlog file:%s " + "from previous file size %llu " + "to pos:%llu to remove transactions starting from " + "GTID %u-%u-%s", + file_name, old_size, pos, + ptr_gtid->domain_id, ptr_gtid->server_id, buf); + } + +end: + if (file >= 0) + mysql_file_close(file, MYF(MY_WME)); + + error= error || close_purge_index_file(); +#endif + return error > 0; +} int TC_LOG_BINLOG::open(const char *opt_name) { int error= 1; + DBUG_ENTER("TC_LOG_BINLOG::open"); DBUG_ASSERT(total_ha_2pc > 1); DBUG_ASSERT(opt_name); @@ -10066,7 +10222,7 @@ int TC_LOG_BINLOG::open(const char *opt_name) { /* There was a failure to open the index file, can't open the binlog */ cleanup(); - return 1; + DBUG_RETURN(1); } if (using_heuristic_recover()) @@ -10076,12 +10232,12 @@ int TC_LOG_BINLOG::open(const char *opt_name) open(opt_name, 0, 0, WRITE_CACHE, max_binlog_size, 0, TRUE); mysql_mutex_unlock(&LOCK_log); cleanup(); - return 1; + DBUG_RETURN(1); } error= do_binlog_recovery(opt_name, true); binlog_state_recover_done= true; - return error; + DBUG_RETURN(error); } /** This is called on shutdown, after ha_panic. */ @@ -10535,38 +10691,579 @@ start_binlog_background_thread() return 0; } +#ifdef HAVE_REPLICATION +class Recovery_context +{ +public: + my_off_t prev_event_pos; + rpl_gtid last_gtid; + bool last_gtid_standalone; + bool last_gtid_valid; + bool last_gtid_no2pc; // true when the group does not end with Xid event + uint last_gtid_engines; + Binlog_offset last_gtid_coord; // <binlog id, binlog offset> + /* + When true, it's semisync slave recovery mode + rolls back transactions in doubt and wipes them off from binlog. + The rest of declarations deal with this type of recovery. + */ + bool do_truncate; + /* + transaction-in-doubt's gtid:s. `truncate_gtid` is the ultimate value, + if it's non-zero truncation is taking place to start from it. + Its value gets refined throughout binlog scanning conducted with at most + 2 rounds. + When an estimate is done in the 1st round of 2-round recovery its value + gets memorized for possible adoption as the ultimate `truncate_gtid`. + */ + rpl_gtid truncate_gtid, truncate_gtid_1st_round; + /* + the last non-transactional group that is located in binlog + behind truncate_gtid. + */ + rpl_gtid binlog_unsafe_gtid; + char binlog_truncate_file_name[FN_REFLEN] ; + char binlog_unsafe_file_name[FN_REFLEN] ; + /* + When do_truncate is true, the truncate position may not be + found in one round when recovered transactions are multi-engine + or just on different engines. + In the single recoverable engine case `truncate_reset_done` and + therefore `truncate_validated` remains `false` when the last + binlog is the binlog-checkpoint one. + The meaning of `truncate_reset_done` is according to the following example: + Let round = 1, Binlog contains the sequence of replication event groups: + [g1, G2, g3] + where `G` (in capital) stands for committed, `g` for prepared. + g1 is first set as truncation candidate, then G2 reset it to indicate + the actual truncation is behind (to the right of) it. + `truncate_validated` is set to true when `binlog_truncate_pos` (as of `g3`) + won't change. + Observe last_gtid_valid is affected, so in the above example `g1` that + was initially ignored for the gtid binlog state now seeing `G2` + would have to be added to it. See gtid_maybe_to_truncate. + */ + bool truncate_validated; // trued when the truncate position settled + bool truncate_reset_done; // trued when the position is to reevaluate + /* Flags the fact of truncate position estimation is done the 1st round */ + bool truncate_set_in_1st; + /* + Monotonically indexes binlog files in the recovery list. + When the list is "likely" singleton the value is UINT_MAX. + Otherwise enumeration starts with zero for the first file, increments + by one for any next file except for the last file in the list, which + is also the initial binlog file for recovery, + that is enumberated with UINT_MAX. + */ + Binlog_file_id id_binlog; + enum_binlog_checksum_alg checksum_alg; + Binlog_offset binlog_truncate_coord, + binlog_truncate_coord_1st_round; // pair is similar to truncate_gtid + Binlog_offset binlog_unsafe_coord; + /* + Populated at decide_or_assess() with gtid-in-doubt whose + binlog offset greater of equal by that of the current gtid truncate + candidate. + Gets empited by reset_truncate_coord into gtid binlog state. + */ + Dynamic_array<rpl_gtid> *gtid_maybe_to_truncate; + Recovery_context(); + ~Recovery_context() { delete gtid_maybe_to_truncate; } + /* + Completes the recovery procedure. + In the normal case prepared xids gets committed when they also found + in binlog, otherwise they are rolled back. + In the semisync slave case the xids that are located in binlog in + a truncated tail get rolled back, otherwise they are committed. + Both decisions are contingent on safety to truncate. + */ + bool complete(MYSQL_BIN_LOG *log, HASH &xids); + + /* + decides on commit of xid passed through member argument. + In the semisync slave case it assigns binlog coordinate to + any xid that remains in-doubt. Decision on them will be + done after binlog scan rounds. + */ + bool decide_or_assess(xid_recovery_member *member, int round, + Format_description_log_event *fdle, + LOG_INFO *linfo, my_off_t pos); + + /* + Assigns last_gtid and assesses the maximum (in the binlog offset term) + unsafe gtid (group of events). + */ + void process_gtid(int round, Gtid_log_event *gev, LOG_INFO *linfo); + + /* + Compute next action at the end of processing of the current binlog file. + It may increment the round. + When the round turns in the semisync-slave recovery + binlog_id, truncate_validated, truncate_reset_done + gets reset/set for the next round. + Within the 2nd round id_binlog keeps incrementing. + + Passed arguments: + round the current round that *may* be increment here + last_log_name the recovery starting binlog file + binlog_checkpoint_name + binlog checkpoint file + linfo binlog file list struct for next file + log pointer to mysql_bin_log instance + + Returns: 0 when rounds continue, maybe the current one remains + 1 when all rounds are done + */ + int next_binlog_or_round(int& round, + const char *last_log_name, + const char *binlog_checkpoint_name, + LOG_INFO *linfo, MYSQL_BIN_LOG *log); + /* + Relates to the semisync recovery. + Returns true when truncated tail does not contain non-transactional + group of events. + Otherwise returns false. + */ + bool is_safe_to_truncate() + { + return !do_truncate ? true : + (truncate_gtid.seq_no == 0 || // no truncate + binlog_unsafe_coord < binlog_truncate_coord); // or unsafe is earlier + } + + /* + Relates to the semisync recovery. + Is invoked when a standalone or non-2pc group is detected. + Both are unsafe to truncate in the semisync-slave recovery so + the maximum unsafe coordinate may be updated. + In the non-2pc group case though, *exeptionally*, + the no-engine group is considered safe, to be invalidated + to not contribute to binlog state. + */ + void update_binlog_unsafe_coord_if_needed(LOG_INFO *linfo); + + /* + Relates to the semisync recovery. + Is called when a committed or decided to-commit transaction is detected. + Actions: + truncate_gtid then is set to "nil" as indicated by rpl_gtid::seq_no := 0. + truncate_reset_done takes a note of that fact. + binlog_truncate_coord gets reset to the current gtid offset merely to + "suggest" any potential future truncate gtid must have a greater offset. + gtid_maybe_to_truncate gets emptied into gtid binlog state. + + Returns: + false on success, otherwise + true when OOM at rpl_global_gtid_binlog_state insert + */ + bool reset_truncate_coord(my_off_t pos); + + /* + Sets binlog_truncate_pos to the value of the current transaction's gtid. + In multi-engine case that might be just an assessment to be refined + in the current round and confirmed in a next one. + gtid_maybe_to_truncate receives the current gtid as a new element. + Returns + false on success, otherwise + true when OOM at gtid_maybe_to_truncate append + + */ + bool set_truncate_coord(LOG_INFO *linfo, int round, + enum_binlog_checksum_alg fd_checksum_alg); +}; + +bool Recovery_context::complete(MYSQL_BIN_LOG *log, HASH &xids) +{ + if (!do_truncate || is_safe_to_truncate()) + { + uint count_in_prepare= + ha_recover_complete(&xids, + !do_truncate ? NULL : + (truncate_gtid.seq_no > 0 ? + &binlog_truncate_coord : &last_gtid_coord)); + + if (count_in_prepare > 0 && global_system_variables.log_warnings > 2) + { + sql_print_warning("Could not complete %u number of transactions.", + count_in_prepare); + return false; // there's later dry run ha_recover() to error out + } + } + + /* Truncation is not done when there's no transaction to roll back */ + if (do_truncate && truncate_gtid.seq_no > 0) + { + if (is_safe_to_truncate()) + { + if (log->truncate_and_remove_binlogs(binlog_truncate_file_name, + binlog_truncate_coord.second, + &truncate_gtid)) + { + sql_print_error("Failed to truncate the binary log to " + "file:%s pos:%llu.", binlog_truncate_file_name, + binlog_truncate_coord.second); + return true; + } + } + else + { + sql_print_error("Cannot truncate the binary log to file:%s " + "pos:%llu as unsafe statement " + "is found at file:%s pos:%llu which is " + "beyond the truncation position;" + "all transactions in doubt are left intact. ", + binlog_truncate_file_name, binlog_truncate_coord.second, + binlog_unsafe_file_name, binlog_unsafe_coord.second); + return true; + } + } + + return false; +} + +Recovery_context::Recovery_context() : + prev_event_pos(0), + last_gtid_standalone(false), last_gtid_valid(false), last_gtid_no2pc(false), + last_gtid_engines(0), + do_truncate(rpl_semi_sync_slave_enabled), + truncate_validated(false), truncate_reset_done(false), + truncate_set_in_1st(false), id_binlog(MAX_binlog_id), + checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF), gtid_maybe_to_truncate(NULL) +{ + last_gtid_coord= Binlog_offset(0,0); + binlog_truncate_coord= binlog_truncate_coord_1st_round= Binlog_offset(0,0); + binlog_unsafe_coord= Binlog_offset(0,0); + binlog_truncate_file_name[0]= 0; + binlog_unsafe_file_name [0]= 0; + binlog_unsafe_gtid= truncate_gtid= truncate_gtid_1st_round= rpl_gtid(); + if (do_truncate) + gtid_maybe_to_truncate= new Dynamic_array<rpl_gtid>(16, 16); +} + +bool Recovery_context::reset_truncate_coord(my_off_t pos) +{ + DBUG_ASSERT(binlog_truncate_coord.second == 0 || + last_gtid_coord >= binlog_truncate_coord || + truncate_set_in_1st); + // save as backup to restore at next_binlog_or_round when necessary + if (truncate_set_in_1st && truncate_gtid_1st_round.seq_no == 0) + { + truncate_gtid_1st_round= truncate_gtid; + binlog_truncate_coord_1st_round= binlog_truncate_coord; + } + binlog_truncate_coord= Binlog_offset(id_binlog, pos); + truncate_gtid= rpl_gtid(); + truncate_reset_done= true; + for (uint i= 0; i < gtid_maybe_to_truncate->elements(); i++) + { + rpl_gtid gtid= gtid_maybe_to_truncate->at(i); + if (rpl_global_gtid_binlog_state.update_nolock(>id, false)) + return true; + } + gtid_maybe_to_truncate->clear(); + + return false; +} + +bool Recovery_context::set_truncate_coord(LOG_INFO *linfo, int round, + enum_binlog_checksum_alg fd_checksum) +{ + binlog_truncate_coord= last_gtid_coord; + strmake_buf(binlog_truncate_file_name, linfo->log_file_name); + + truncate_gtid= last_gtid; + checksum_alg= fd_checksum; + truncate_set_in_1st= (round == 1); + + return gtid_maybe_to_truncate->append(last_gtid); +} + +bool Recovery_context::decide_or_assess(xid_recovery_member *member, int round, + Format_description_log_event *fdle, + LOG_INFO *linfo, my_off_t pos) +{ + if (member) + { + /* + xid in doubt are resolved as follows: + in_engine_prepare is compared agaist binlogged info to + yield the commit-or-rollback decision in the normal case. + In the semisync-slave recovery the decision is done later + after the binlog scanning has determined the truncation offset. + */ + if (member->in_engine_prepare > last_gtid_engines) + { + char buf[21]; + longlong10_to_str(last_gtid.seq_no, buf, 10); + sql_print_error("Error to recovery multi-engine transaction: " + "the number of engines prepared %u exceeds the " + "respective number %u in its GTID %u-%u-%s " + "located at file:%s pos:%llu", + member->in_engine_prepare, last_gtid_engines, + last_gtid.domain_id, last_gtid.server_id, buf, + linfo->log_file_name, last_gtid_coord.second); + return true; + } + else if (member->in_engine_prepare < last_gtid_engines) + { + DBUG_ASSERT(member->in_engine_prepare > 0); + /* + This is an "unlikely" branch of two or more engines in transaction + that is partially committed, so to complete. + */ + member->decided_to_commit= true; + if (do_truncate) + { + /* Validated truncate at this point can be only in the 2nd round. */ + DBUG_ASSERT(!truncate_validated || + (round == 2 && truncate_set_in_1st && + last_gtid_coord < binlog_truncate_coord)); + /* + Estimated truncate must not be greater than the current one's + offset, unless the turn of the rounds. + */ + DBUG_ASSERT(truncate_validated || + (last_gtid_coord >= binlog_truncate_coord || + (round == 2 && truncate_set_in_1st))); + + if (!truncate_validated && reset_truncate_coord(pos)) + return true; + } + } + else // member->in_engine_prepare == last_gtid_engines + { + if (!do_truncate) // "normal" recovery + { + member->decided_to_commit= true; + } + else + { + member->binlog_coord= last_gtid_coord; + last_gtid_valid= false; + /* + First time truncate position estimate before its validation. + An estimate may change to involve reset_truncate_coord call. + */ + if (!truncate_validated) + { + if (truncate_gtid.seq_no == 0 /* was reset or never set */ || + (truncate_set_in_1st && round == 2 /* reevaluted at round turn */)) + { + if (set_truncate_coord(linfo, round, fdle->checksum_alg)) + return true; + } + else + { + /* Truncate estimate was done ago, this gtid can't improve it. */ + DBUG_ASSERT(last_gtid_coord >= binlog_truncate_coord); + + gtid_maybe_to_truncate->append(last_gtid); + } + + DBUG_ASSERT(member->decided_to_commit == false); // may redecided + } + else + { + /* + binlog truncate was determined, possibly to none, otherwise + its offset greater than that of the current gtid. + */ + DBUG_ASSERT(truncate_gtid.seq_no == 0 || + last_gtid_coord < binlog_truncate_coord); + member->decided_to_commit= true; + } + } + } + } + else if (do_truncate) // "0" < last_gtid_engines + { + /* + Similar to the partial commit branch above. + */ + DBUG_ASSERT(!truncate_validated || last_gtid_coord < binlog_truncate_coord); + DBUG_ASSERT(truncate_validated || + (last_gtid_coord >= binlog_truncate_coord || + (round == 2 && truncate_set_in_1st))); + + if (!truncate_validated && reset_truncate_coord(pos)) + return true; + } + + return false; +} + +void Recovery_context::update_binlog_unsafe_coord_if_needed(LOG_INFO *linfo) +{ + if (!do_truncate) + return; + + if (truncate_gtid.seq_no > 0 && // g1,U2, *not* G1,U2 + last_gtid_coord > binlog_truncate_coord) + { + DBUG_ASSERT(binlog_truncate_coord.second > 0); + /* + Potentially unsafe when the truncate coordinate is not determined, + just detected as unsafe when behind the latter. + */ + if (last_gtid_engines == 0) + { + last_gtid_valid= false; + } + else + { + binlog_unsafe_gtid= last_gtid; + binlog_unsafe_coord= last_gtid_coord; + strmake_buf(binlog_unsafe_file_name, linfo->log_file_name); + } + } +} + +void Recovery_context::process_gtid(int round, Gtid_log_event *gev, + LOG_INFO *linfo) +{ + last_gtid.domain_id= gev->domain_id; + last_gtid.server_id= gev->server_id; + last_gtid.seq_no= gev->seq_no; + last_gtid_engines= gev->extra_engines != UCHAR_MAX ? + gev->extra_engines + 1 : 0; + last_gtid_coord= Binlog_offset(id_binlog, prev_event_pos); + + DBUG_ASSERT(!last_gtid_valid); + DBUG_ASSERT(last_gtid.seq_no != 0); + + if (round == 1 || (do_truncate && !truncate_validated)) + { + DBUG_ASSERT(!last_gtid_valid); + + last_gtid_no2pc= false; + last_gtid_standalone= + (gev->flags2 & Gtid_log_event::FL_STANDALONE) ? true : false; + if (do_truncate && last_gtid_standalone) + update_binlog_unsafe_coord_if_needed(linfo); + /* Update the binlog state with any 'valid' GTID logged after Gtid_list. */ + last_gtid_valid= true; // may flip at Xid when falls to truncate + } +} + +int Recovery_context::next_binlog_or_round(int& round, + const char *last_log_name, + const char *binlog_checkpoint_name, + LOG_INFO *linfo, + MYSQL_BIN_LOG *log) +{ + if (!strcmp(linfo->log_file_name, last_log_name)) + { + /* Exit the loop now at the end of the current round. */ + DBUG_ASSERT(round <= 2); + + if (do_truncate) + { + truncate_validated= truncate_reset_done; + truncate_reset_done= false; + /* + Restore the 1st round saved estimate if it was not refined in the 2nd. + That can only occur in multiple log files context when the inital file + has a truncation candidate (a `g`) and does not have any commited `G`, + *and* other files (binlog-checkpoint one and so on) do not have any + transaction-in-doubt. + */ + if (truncate_gtid.seq_no == 0 && truncate_set_in_1st) + { + DBUG_ASSERT(truncate_gtid_1st_round.seq_no > 0); + + truncate_gtid= truncate_gtid_1st_round; + binlog_truncate_coord= binlog_truncate_coord_1st_round; + } + } + return 1; + } + else if (round == 1) + { + if (do_truncate) + { + truncate_validated= truncate_reset_done; + if (!truncate_validated) + { + rpl_global_gtid_binlog_state.reset_nolock(); + gtid_maybe_to_truncate->clear(); + } + truncate_reset_done= false; + id_binlog= 0; + } + round++; + } + else if (do_truncate) // binlog looping within round 2 + { + id_binlog++; + + DBUG_ASSERT(id_binlog <= MAX_binlog_id); // the assert is "practical" + } + + DBUG_ASSERT(!do_truncate || id_binlog != MAX_binlog_id || + !strcmp(linfo->log_file_name, binlog_checkpoint_name)); + + return 0; +} +#endif +/* + Execute recovery of the binary log + + @param do_xa + if true: Collect all Xid events and call ha_recover(). + if false: Collect only Xid events from Query events. This is + used to disable entries in the ddl recovery log that + are found in the binary log (and thus already executed and + logged and thus don't have to be redone). +*/ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, IO_CACHE *first_log, Format_description_log_event *fdle, bool do_xa) { Log_event *ev= NULL; - HASH xids; + HASH xids, ddl_log_ids; MEM_ROOT mem_root; char binlog_checkpoint_name[FN_REFLEN]; bool binlog_checkpoint_found; - bool first_round; IO_CACHE log; File file= -1; const char *errmsg; #ifdef HAVE_REPLICATION - rpl_gtid last_gtid; - bool last_gtid_standalone= false; - bool last_gtid_valid= false; + Recovery_context ctx; #endif + DBUG_ENTER("TC_LOG_BINLOG::recover"); + /* + The for-loop variable is updated by the following rule set: + Initially set to 1. + After the initial binlog file is processed to identify + the Binlog-checkpoint file it is incremented when the latter file + is different from the initial one. Otherwise the only log has been + fully parsed so the for loop exits. + The 2nd round parses all earlier in binlog index order files + starting from the Binlog-checkpoint file. It ends when the initial + binlog file is reached. + */ + int round; if (! fdle->is_valid() || - (do_xa && my_hash_init(key_memory_binlog_recover_exec, &xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0, - sizeof(my_xid), 0, 0, MYF(0)))) + (my_hash_init(key_memory_binlog_recover_exec, &xids, + &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0, + sizeof(my_xid), 0, 0, MYF(0))) || + (my_hash_init(key_memory_binlog_recover_exec, &ddl_log_ids, + &my_charset_bin, 64, 0, + sizeof(my_xid), 0, 0, MYF(0)))) goto err1; - if (do_xa) - init_alloc_root(key_memory_binlog_recover_exec, &mem_root, - TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE, MYF(0)); + init_alloc_root(key_memory_binlog_recover_exec, &mem_root, + TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE, MYF(0)); fdle->flags&= ~LOG_EVENT_BINLOG_IN_USE_F; // abort on the first error + /* finds xids when root is not NULL */ + if (do_xa && ha_recover(&xids, &mem_root)) + goto err1; + /* Scan the binlog for XIDs that need to be committed if still in the prepared stage. @@ -10576,10 +11273,9 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, */ binlog_checkpoint_found= false; - first_round= true; - for (;;) + for (round= 1;;) { - while ((ev= Log_event::read_log_event(first_round ? first_log : &log, + while ((ev= Log_event::read_log_event(round == 1 ? first_log : &log, fdle, opt_master_verify_checksum)) && ev->is_valid()) { @@ -10587,19 +11283,48 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, switch (typ) { case XID_EVENT: + if (do_xa) + { + xid_recovery_member *member= + (xid_recovery_member*) + my_hash_search(&xids, (uchar*) &static_cast<Xid_log_event*>(ev)->xid, + sizeof(my_xid)); +#ifndef HAVE_REPLICATION + { + if (member) + member->decided_to_commit= true; + } +#else + if (ctx.decide_or_assess(member, round, fdle, linfo, ev->log_pos)) + goto err2; +#endif + } + break; + case QUERY_EVENT: { - if (do_xa) + Query_log_event *query_ev= (Query_log_event*) ev; + if (query_ev->xid) { - Xid_log_event *xev=(Xid_log_event *)ev; - uchar *x= (uchar *) memdup_root(&mem_root, (uchar*) &xev->xid, - sizeof(xev->xid)); - if (!x || my_hash_insert(&xids, x)) + DBUG_PRINT("QQ", ("xid: %llu xid")); + DBUG_ASSERT(sizeof(query_ev->xid) == sizeof(my_xid)); + uchar *x= (uchar *) memdup_root(&mem_root, + (uchar*) &query_ev->xid, + sizeof(query_ev->xid)); + if (!x || my_hash_insert(&ddl_log_ids, x)) goto err2; } +#ifdef HAVE_REPLICATION + if (((Query_log_event *)ev)->is_commit() || + ((Query_log_event *)ev)->is_rollback()) + { + ctx.last_gtid_no2pc= true; + ctx.update_binlog_unsafe_coord_if_needed(linfo); + } +#endif break; } case BINLOG_CHECKPOINT_EVENT: - if (first_round && do_xa) + if (round == 1 && do_xa) { size_t dir_len; Binlog_checkpoint_log_event *cev= (Binlog_checkpoint_log_event *)ev; @@ -10619,8 +11344,9 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, } } break; +#ifdef HAVE_REPLICATION case GTID_LIST_EVENT: - if (first_round) + if (round == 1 || (ctx.do_truncate && ctx.id_binlog == 0)) { Gtid_list_log_event *glev= (Gtid_list_log_event *)ev; @@ -10630,20 +11356,13 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, } break; -#ifdef HAVE_REPLICATION case GTID_EVENT: - if (first_round) - { - Gtid_log_event *gev= (Gtid_log_event *)ev; - - /* Update the binlog state with any GTID logged after Gtid_list. */ - last_gtid.domain_id= gev->domain_id; - last_gtid.server_id= gev->server_id; - last_gtid.seq_no= gev->seq_no; - last_gtid_standalone= - ((gev->flags2 & Gtid_log_event::FL_STANDALONE) ? true : false); - last_gtid_valid= true; - } + ctx.process_gtid(round, (Gtid_log_event *)ev, linfo); + break; + + case XA_PREPARE_LOG_EVENT: + ctx.last_gtid_no2pc= true; // TODO: complete MDEV-21469 that removes this block + ctx.update_binlog_unsafe_coord_if_needed(linfo); break; #endif @@ -10657,30 +11376,32 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, default: /* Nothing. */ break; - } + } // end of switch #ifdef HAVE_REPLICATION - if (last_gtid_valid && - ((last_gtid_standalone && !ev->is_part_of_group(typ)) || - (!last_gtid_standalone && - (typ == XID_EVENT || - typ == XA_PREPARE_LOG_EVENT || - (LOG_EVENT_IS_QUERY(typ) && - (((Query_log_event *)ev)->is_commit() || - ((Query_log_event *)ev)->is_rollback())))))) + if (ctx.last_gtid_valid && + ((ctx.last_gtid_standalone && !ev->is_part_of_group(typ)) || + (!ctx.last_gtid_standalone && + (typ == XID_EVENT || ctx.last_gtid_no2pc)))) { - if (rpl_global_gtid_binlog_state.update_nolock(&last_gtid, false)) + DBUG_ASSERT(round == 1 || (ctx.do_truncate && !ctx.truncate_validated)); + DBUG_ASSERT(!ctx.last_gtid_no2pc || + (ctx.last_gtid_standalone || + typ == XA_PREPARE_LOG_EVENT || + (LOG_EVENT_IS_QUERY(typ) && + (((Query_log_event *)ev)->is_commit() || + ((Query_log_event *)ev)->is_rollback())))); + + if (rpl_global_gtid_binlog_state.update_nolock(&ctx.last_gtid, false)) goto err2; - last_gtid_valid= false; + ctx.last_gtid_valid= false; } + ctx.prev_event_pos= ev->log_pos; #endif - delete ev; ev= NULL; - } + } // end of while - if (!do_xa) - break; /* If the last binlog checkpoint event points to an older log, we have to scan all logs from there also, to get all possible XIDs to recover. @@ -10689,11 +11410,10 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, written by an older version of MariaDB (or MySQL) - these always have an (implicit) binlog checkpoint event at the start of the last binlog file. */ - if (first_round) + if (round == 1) { if (!binlog_checkpoint_found) break; - first_round= false; DBUG_EXECUTE_IF("xa_recover_expect_master_bin_000004", if (0 != strcmp("./master-bin.000004", binlog_checkpoint_name) && 0 != strcmp(".\\master-bin.000004", binlog_checkpoint_name)) @@ -10711,38 +11431,57 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, end_io_cache(&log); mysql_file_close(file, MYF(MY_WME)); file= -1; + /* + NOTE: reading other binlog's FD is necessary for finding out + the checksum status of the respective binlog file. + */ + if (find_next_log(linfo, 1)) + { + sql_print_error("Error reading binlog files during recovery. " + "Aborting."); + goto err2; + } } +#ifdef HAVE_REPLICATION + int rc= ctx.next_binlog_or_round(round, last_log_name, + binlog_checkpoint_name, linfo, this); + if (rc == -1) + goto err2; + else if (rc == 1) + break; // all rounds done +#else if (!strcmp(linfo->log_file_name, last_log_name)) break; // No more files to do + round++; +#endif + if ((file= open_binlog(&log, linfo->log_file_name, &errmsg)) < 0) { sql_print_error("%s", errmsg); goto err2; } - /* - We do not need to read the Format_description_log_event of other binlog - files. It is not possible for a binlog checkpoint to span multiple - binlog files written by different versions of the server. So we can use - the first one read for reading from all binlog files. - */ - if (find_next_log(linfo, 1)) - { - sql_print_error("Error reading binlog files during recovery. Aborting."); - goto err2; - } fdle->reset_crypto(); - } + } // end of for if (do_xa) { - if (ha_recover(&xids)) - goto err2; - - free_root(&mem_root, MYF(0)); - my_hash_free(&xids); + if (binlog_checkpoint_found) + { +#ifndef HAVE_REPLICATION + if (ha_recover_complete(&xids)) +#else + if (ctx.complete(this, xids)) +#endif + goto err2; + } } - return 0; + if (ddl_log_close_binlogged_events(&ddl_log_ids)) + goto err2; + free_root(&mem_root, MYF(0)); + my_hash_free(&xids); + my_hash_free(&ddl_log_ids); + DBUG_RETURN(0); err2: delete ev; @@ -10751,20 +11490,20 @@ err2: end_io_cache(&log); mysql_file_close(file, MYF(MY_WME)); } - if (do_xa) - { - free_root(&mem_root, MYF(0)); - my_hash_free(&xids); - } + free_root(&mem_root, MYF(0)); + my_hash_free(&xids); + my_hash_free(&ddl_log_ids); + err1: sql_print_error("Crash recovery failed. Either correct the problem " "(if it's, for example, out of memory error) and restart, " - "or delete (or rename) binary log and start mysqld with " + "or delete (or rename) binary log and start serverwith " "--tc-heuristic-recover={commit|rollback}"); - return 1; + DBUG_RETURN(1); } + int MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery) { diff --git a/sql/log.h b/sql/log.h index 02c696357b8..13819b73d9d 100644 --- a/sql/log.h +++ b/sql/log.h @@ -476,6 +476,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG /* Flag used to optimise around wait_for_prior_commit. */ bool queued_by_other; ulong binlog_id; + bool ro_1pc; // passes the binlog_cache_mngr::ro_1pc value to Gtid ctor }; /* @@ -814,7 +815,8 @@ public: my_bool *with_annotate= 0); // binary log write bool write_transaction_to_binlog(THD *thd, binlog_cache_mngr *cache_mngr, Log_event *end_ev, bool all, - bool using_stmt_cache, bool using_trx_cache); + bool using_stmt_cache, bool using_trx_cache, + bool is_ro_1pc); bool write_incident_already_locked(THD *thd); bool write_incident(THD *thd); @@ -864,6 +866,9 @@ public: int purge_first_log(Relay_log_info* rli, bool included); int set_purge_index_file_name(const char *base_file_name); int open_purge_index_file(bool destroy); + bool truncate_and_remove_binlogs(const char *truncate_file, + my_off_t truncate_pos, + rpl_gtid *gtid); bool is_inited_purge_index_file(); int close_purge_index_file(); int clean_purge_index_file(); @@ -901,7 +906,8 @@ public: void set_status_variables(THD *thd); bool is_xidlist_idle(); bool write_gtid_event(THD *thd, bool standalone, bool is_transactional, - uint64 commit_id); + uint64 commit_id, + bool has_xid= false, bool ro_1pc= false); int read_state_from_file(); int write_state_to_file(); int get_most_recent_gtid_list(rpl_gtid **list, uint32 *size); @@ -913,7 +919,7 @@ public: bool lookup_domain_in_binlog_state(uint32 domain_id, rpl_gtid *out_gtid); int bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no); bool check_strict_gtid_sequence(uint32 domain_id, uint32 server_id, - uint64 seq_no); + uint64 seq_no, bool no_error= false); /** * used when opening new file, and binlog_end_pos moves backwards @@ -1255,4 +1261,8 @@ class Gtid_list_log_event; const char * get_gtid_list_event(IO_CACHE *cache, Gtid_list_log_event **out_gtid_list); +int binlog_commit(THD *thd, bool all, bool is_ro_1pc= false); +int binlog_commit_by_xid(handlerton *hton, XID *xid); +int binlog_rollback_by_xid(handlerton *hton, XID *xid); + #endif /* LOG_H */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 8bcdb3d6bae..53785a89f1e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -55,6 +55,8 @@ #include "rpl_constants.h" #include "sql_digest.h" #include "zlib.h" +#include "myisampack.h" +#include <algorithm> #define my_b_write_string(A, B) my_b_write((A), (uchar*)(B), (uint) (sizeof(B) - 1)) @@ -146,7 +148,7 @@ public: constructor, but it would be possible to create a subclass holding the IO_CACHE itself. */ - Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags = 0, Log_event *ev = NULL) + Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags= 0, Log_event *ev= NULL) : m_cache(cache), m_file(file), m_flags(flags), m_ev(ev) { reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE); @@ -241,13 +243,13 @@ private: read_str() */ -static inline int read_str(const char **buf, const char *buf_end, - const char **str, uint8 *len) +static inline bool read_str(const uchar **buf, const uchar *buf_end, + const char **str, uint8 *len) { - if (*buf + ((uint) (uchar) **buf) >= buf_end) + if (*buf + ((uint) **buf) >= buf_end) return 1; *len= (uint8) **buf; - *str= (*buf)+1; + *str= (char*) (*buf)+1; (*buf)+= (uint) *len+1; return 0; } @@ -307,44 +309,44 @@ uint32 binlog_get_compress_len(uint32 len) return zero if successful, others otherwise. */ -int binlog_buf_compress(const char *src, char *dst, uint32 len, uint32 *comlen) +int binlog_buf_compress(const uchar *src, uchar *dst, uint32 len, uint32 *comlen) { uchar lenlen; if (len & 0xFF000000) { - dst[1] = uchar(len >> 24); - dst[2] = uchar(len >> 16); - dst[3] = uchar(len >> 8); - dst[4] = uchar(len); - lenlen = 4; + dst[1]= uchar(len >> 24); + dst[2]= uchar(len >> 16); + dst[3]= uchar(len >> 8); + dst[4]= uchar(len); + lenlen= 4; } else if (len & 0x00FF0000) { - dst[1] = uchar(len >> 16); - dst[2] = uchar(len >> 8); - dst[3] = uchar(len); - lenlen = 3; + dst[1]= uchar(len >> 16); + dst[2]= uchar(len >> 8); + dst[3]= uchar(len); + lenlen= 3; } else if (len & 0x0000FF00) { - dst[1] = uchar(len >> 8); - dst[2] = uchar(len); - lenlen = 2; + dst[1]= uchar(len >> 8); + dst[2]= uchar(len); + lenlen= 2; } else { - dst[1] = uchar(len); - lenlen = 1; + dst[1]= uchar(len); + lenlen= 1; } - dst[0] = 0x80 | (lenlen & 0x07); + dst[0]= 0x80 | (lenlen & 0x07); - uLongf tmplen = (uLongf)*comlen - BINLOG_COMPRESSED_HEADER_LEN - lenlen - 1; + uLongf tmplen= (uLongf)*comlen - BINLOG_COMPRESSED_HEADER_LEN - lenlen - 1; if (compress((Bytef *)dst + BINLOG_COMPRESSED_HEADER_LEN + lenlen, &tmplen, (const Bytef *)src, (uLongf)len) != Z_OK) { return 1; } - *comlen = (uint32)tmplen + BINLOG_COMPRESSED_HEADER_LEN + lenlen; + *comlen= (uint32)tmplen + BINLOG_COMPRESSED_HEADER_LEN + lenlen; return 0; } @@ -363,16 +365,17 @@ int binlog_buf_compress(const char *src, char *dst, uint32 len, uint32 *comlen) int query_event_uncompress(const Format_description_log_event *description_event, - bool contain_checksum, const char *src, ulong src_len, - char* buf, ulong buf_size, bool* is_malloc, char **dst, + bool contain_checksum, const uchar *src, ulong src_len, + uchar* buf, ulong buf_size, bool* is_malloc, uchar **dst, ulong *newlen) { - ulong len = uint4korr(src + EVENT_LEN_OFFSET); - const char *tmp = src; - const char *end = src + len; + ulong len= uint4korr(src + EVENT_LEN_OFFSET); + const uchar *tmp= src; + const uchar *end= src + len; + uchar *new_dst; // bad event - if (src_len < len ) + if (src_len < len) return 1; DBUG_ASSERT((uchar)src[EVENT_TYPE_OFFSET] == QUERY_COMPRESSED_EVENT); @@ -381,97 +384,90 @@ query_event_uncompress(const Format_description_log_event *description_event, uint8 post_header_len= description_event->post_header_len[QUERY_COMPRESSED_EVENT-1]; - *is_malloc = false; + *is_malloc= false; - tmp += common_header_len; + tmp+= common_header_len; // bad event if (end <= tmp) return 1; - uint db_len = (uint)tmp[Q_DB_LEN_OFFSET]; + uint db_len= (uint)tmp[Q_DB_LEN_OFFSET]; uint16 status_vars_len= uint2korr(tmp + Q_STATUS_VARS_LEN_OFFSET); - tmp += post_header_len + status_vars_len + db_len + 1; + tmp+= post_header_len + status_vars_len + db_len + 1; // bad event if (end <= tmp) return 1; - int32 comp_len = (int32)(len - (tmp - src) - - (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); - uint32 un_len = binlog_get_uncompress_len(tmp); + int32 comp_len= (int32)(len - (tmp - src) - + (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); + uint32 un_len= binlog_get_uncompress_len(tmp); // bad event if (comp_len < 0 || un_len == 0) return 1; - *newlen = (ulong)(tmp - src) + un_len; - if(contain_checksum) - *newlen += BINLOG_CHECKSUM_LEN; + *newlen= (ulong)(tmp - src) + un_len; + if (contain_checksum) + *newlen+= BINLOG_CHECKSUM_LEN; - uint32 alloc_size = (uint32)ALIGN_SIZE(*newlen); - char *new_dst = NULL; - + uint32 alloc_size= (uint32)ALIGN_SIZE(*newlen); if (alloc_size <= buf_size) - { - new_dst = buf; - } + new_dst= buf; else { - new_dst = (char *)my_malloc(PSI_INSTRUMENT_ME, alloc_size, MYF(MY_WME)); + new_dst= (uchar *) my_malloc(PSI_INSTRUMENT_ME, alloc_size, MYF(MY_WME)); if (!new_dst) return 1; - - *is_malloc = true; + *is_malloc= true; } /* copy the head*/ memcpy(new_dst, src , tmp - src); - if (binlog_buf_uncompress(tmp, new_dst + (tmp - src), - comp_len, &un_len)) + if (binlog_buf_uncompress(tmp, new_dst + (tmp - src), comp_len, &un_len)) { if (*is_malloc) + { + *is_malloc= false; my_free(new_dst); - - *is_malloc = false; - + } return 1; } - new_dst[EVENT_TYPE_OFFSET] = QUERY_EVENT; + new_dst[EVENT_TYPE_OFFSET]= QUERY_EVENT; int4store(new_dst + EVENT_LEN_OFFSET, *newlen); - if(contain_checksum) + if (contain_checksum) { - ulong clear_len = *newlen - BINLOG_CHECKSUM_LEN; + ulong clear_len= *newlen - BINLOG_CHECKSUM_LEN; int4store(new_dst + clear_len, my_checksum(0L, (uchar *)new_dst, clear_len)); } - *dst = new_dst; + *dst= new_dst; return 0; } int row_log_event_uncompress(const Format_description_log_event *description_event, - bool contain_checksum, const char *src, ulong src_len, - char* buf, ulong buf_size, bool* is_malloc, char **dst, - ulong *newlen) + bool contain_checksum, const uchar *src, ulong src_len, + uchar* buf, ulong buf_size, bool* is_malloc, + uchar **dst, ulong *newlen) { - Log_event_type type = (Log_event_type)(uchar)src[EVENT_TYPE_OFFSET]; - ulong len = uint4korr(src + EVENT_LEN_OFFSET); - const char *tmp = src; - char *new_dst = NULL; - const char *end = tmp + len; + Log_event_type type= (Log_event_type)(uchar)src[EVENT_TYPE_OFFSET]; + ulong len= uint4korr(src + EVENT_LEN_OFFSET); + const uchar *tmp= src; + uchar *new_dst= NULL; + const uchar *end= tmp + len; - // bad event if (src_len < len) - return 1; + return 1; // bad event DBUG_ASSERT(LOG_EVENT_IS_ROW_COMPRESSED(type)); uint8 common_header_len= description_event->common_header_len; uint8 post_header_len= description_event->post_header_len[type-1]; - tmp += common_header_len + ROWS_HEADER_LEN_V1; + tmp+= common_header_len + ROWS_HEADER_LEN_V1; if (post_header_len == ROWS_HEADER_LEN_V2) { /* @@ -479,15 +475,14 @@ row_log_event_uncompress(const Format_description_log_event *description_event, which includes length bytes */ - // bad event if (end - tmp <= 2) - return 1; + return 1; // bad event uint16 var_header_len= uint2korr(tmp); DBUG_ASSERT(var_header_len >= 2); /* skip over var-len header, extracting 'chunks' */ - tmp += var_header_len; + tmp+= var_header_len; /* get the uncompressed event type */ type= @@ -500,51 +495,46 @@ row_log_event_uncompress(const Format_description_log_event *description_event, (type - WRITE_ROWS_COMPRESSED_EVENT_V1 + WRITE_ROWS_EVENT_V1); } - //bad event if (end <= tmp) - return 1; + return 1; //bad event - ulong m_width = net_field_length((uchar **)&tmp); - tmp += (m_width + 7) / 8; + ulong m_width= net_field_length((uchar **)&tmp); + tmp+= (m_width + 7) / 8; if (type == UPDATE_ROWS_EVENT_V1 || type == UPDATE_ROWS_EVENT) { - tmp += (m_width + 7) / 8; + tmp+= (m_width + 7) / 8; } - //bad event if (end <= tmp) - return 1; + return 1; //bad event - uint32 un_len = binlog_get_uncompress_len(tmp); - //bad event + uint32 un_len= binlog_get_uncompress_len(tmp); if (un_len == 0) - return 1; + return 1; //bad event - int32 comp_len = (int32)(len - (tmp - src) - - (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); - //bad event + int32 comp_len= (int32)(len - (tmp - src) - + (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); if (comp_len <=0) - return 1; + return 1; //bad event - *newlen = ulong(tmp - src) + un_len; - if(contain_checksum) - *newlen += BINLOG_CHECKSUM_LEN; + *newlen= ulong(tmp - src) + un_len; + if (contain_checksum) + *newlen+= BINLOG_CHECKSUM_LEN; - size_t alloc_size = ALIGN_SIZE(*newlen); + size_t alloc_size= ALIGN_SIZE(*newlen); - *is_malloc = false; + *is_malloc= false; if (alloc_size <= buf_size) { - new_dst = buf; + new_dst= buf; } else { - new_dst = (char *)my_malloc(PSI_INSTRUMENT_ME, alloc_size, MYF(MY_WME)); + new_dst= (uchar*) my_malloc(PSI_INSTRUMENT_ME, alloc_size, MYF(MY_WME)); if (!new_dst) return 1; - - *is_malloc = true; + *is_malloc= true; } /* Copy the head. */ @@ -555,18 +545,18 @@ row_log_event_uncompress(const Format_description_log_event *description_event, { if (*is_malloc) my_free(new_dst); - return 1; } - new_dst[EVENT_TYPE_OFFSET] = type; + new_dst[EVENT_TYPE_OFFSET]= type; int4store(new_dst + EVENT_LEN_OFFSET, *newlen); - if(contain_checksum){ - ulong clear_len = *newlen - BINLOG_CHECKSUM_LEN; + if (contain_checksum) + { + ulong clear_len= *newlen - BINLOG_CHECKSUM_LEN; int4store(new_dst + clear_len, my_checksum(0L, (uchar *)new_dst, clear_len)); } - *dst = new_dst; + *dst= new_dst; return 0; } @@ -575,33 +565,33 @@ row_log_event_uncompress(const Format_description_log_event *description_event, return 0 means error. */ -uint32 binlog_get_uncompress_len(const char *buf) +uint32 binlog_get_uncompress_len(const uchar *buf) { - uint32 len = 0; - uint32 lenlen = 0; + uint32 len, lenlen; if ((buf == NULL) || ((buf[0] & 0xe0) != 0x80)) - return len; + return 0; - lenlen = buf[0] & 0x07; + lenlen= buf[0] & 0x07; - switch(lenlen) - { + buf++; + /* Length is stored in high byte first order, like myisam keys */ + switch(lenlen) { case 1: - len = uchar(buf[1]); + len= buf[0]; break; case 2: - len = uchar(buf[1]) << 8 | uchar(buf[2]); + len= mi_uint2korr(buf); break; case 3: - len = uchar(buf[1]) << 16 | uchar(buf[2]) << 8 | uchar(buf[3]); + len= mi_uint3korr(buf); break; case 4: - len = uchar(buf[1]) << 24 | uchar(buf[2]) << 16 | - uchar(buf[3]) << 8 | uchar(buf[4]); + len= mi_uint4korr(buf); break; default: DBUG_ASSERT(lenlen >= 1 && lenlen <= 4); + len= 0; break; } return len; @@ -618,27 +608,22 @@ uint32 binlog_get_uncompress_len(const char *buf) return zero if successful, others otherwise. */ -int binlog_buf_uncompress(const char *src, char *dst, uint32 len, +int binlog_buf_uncompress(const uchar *src, uchar *dst, uint32 len, uint32 *newlen) { - if((src[0] & 0x80) == 0) - { + if ((src[0] & 0x80) == 0) return 1; - } uint32 lenlen= src[0] & 0x07; - uLongf buflen= *newlen; + uLongf buflen= *newlen; // zlib type - uint32 alg = (src[0] & 0x70) >> 4; - switch(alg) - { + uint32 alg= (src[0] & 0x70) >> 4; + switch(alg) { case 0: // zlib - if(uncompress((Bytef *)dst, &buflen, + if (uncompress((Bytef *)dst, &buflen, (const Bytef*)src + 1 + lenlen, len - 1 - lenlen) != Z_OK) - { return 1; - } break; default: //TODO @@ -647,7 +632,7 @@ int binlog_buf_uncompress(const char *src, char *dst, uint32 len, } DBUG_ASSERT(*newlen == (uint32)buflen); - *newlen = (uint32)buflen; + *newlen= (uint32)buflen; return 0; } @@ -731,17 +716,17 @@ const char* Log_event::get_type_str() Log_event::Log_event() */ -Log_event::Log_event(const char* buf, +Log_event::Log_event(const uchar *buf, const Format_description_log_event* description_event) :temp_buf(0), exec_time(0), cache_type(Log_event::EVENT_INVALID_CACHE), checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF) { #ifndef MYSQL_CLIENT - thd = 0; + thd= 0; #endif - when = uint4korr(buf); + when= uint4korr(buf); when_sec_part= ~0UL; - server_id = uint4korr(buf + SERVER_ID_OFFSET); + server_id= uint4korr(buf + SERVER_ID_OFFSET); data_written= uint4korr(buf + EVENT_LEN_OFFSET); if (description_event->binlog_version==1) { @@ -879,7 +864,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, WolfSSL reads memory out of bounds with decryption/NOPAD) We allocate a little more memory therefore. */ - sz += MY_AES_BLOCK_SIZE; + sz+= MY_AES_BLOCK_SIZE; #endif char *newpkt= (char*)my_malloc(PSI_INSTRUMENT_ME, sz, MYF(MY_WME)); if (!newpkt) @@ -912,10 +897,10 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, { /* Corrupt the event for Dump thread*/ DBUG_EXECUTE_IF("corrupt_read_log_event2", - uchar *debug_event_buf_c = (uchar*) packet->ptr() + ev_offset; + uchar *debug_event_buf_c= (uchar*) packet->ptr() + ev_offset; if (debug_event_buf_c[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT) { - int debug_cor_pos = rand() % (data_len - BINLOG_CHECKSUM_LEN); + int debug_cor_pos= rand() % (data_len - BINLOG_CHECKSUM_LEN); debug_event_buf_c[debug_cor_pos] =~ debug_event_buf_c[debug_cor_pos]; DBUG_PRINT("info", ("Corrupt the event at Log_event::read_log_event: byte on position %d", debug_cor_pos)); DBUG_SET("-d,corrupt_read_log_event2"); @@ -969,9 +954,9 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, goto err; } - if ((res= read_log_event(event.ptr(), event.length(), + if ((res= read_log_event((uchar*) event.ptr(), event.length(), &error, fdle, crc_check))) - res->register_temp_buf(event.release(), true); + res->register_temp_buf((uchar*) event.release(), true); err: if (unlikely(error)) @@ -1007,8 +992,8 @@ err: constructors. */ -Log_event* Log_event::read_log_event(const char* buf, uint event_len, - const char **error, +Log_event* Log_event::read_log_event(const uchar *buf, uint event_len, + const char **error, const Format_description_log_event *fdle, my_bool crc_check) { @@ -1029,7 +1014,7 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, DBUG_RETURN(NULL); // general sanity check - will fail on a partial read } - uint event_type= (uchar)buf[EVENT_TYPE_OFFSET]; + uint event_type= buf[EVENT_TYPE_OFFSET]; // all following START events in the current file are without checksum if (event_type == START_EVENT_V3) (const_cast< Format_description_log_event *>(fdle))->checksum_alg= BINLOG_CHECKSUM_ALG_OFF; @@ -1058,15 +1043,14 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, DBUG_EXECUTE_IF("corrupt_read_log_event_char", if (event_type != FORMAT_DESCRIPTION_EVENT) { - char *debug_event_buf_c = (char *)buf; - int debug_cor_pos = rand() % (event_len - BINLOG_CHECKSUM_LEN); - debug_event_buf_c[debug_cor_pos] =~ debug_event_buf_c[debug_cor_pos]; + uchar *debug_event_buf_c= const_cast<uchar*>(buf); + int debug_cor_pos= rand() % (event_len - BINLOG_CHECKSUM_LEN); + debug_event_buf_c[debug_cor_pos]=~ debug_event_buf_c[debug_cor_pos]; DBUG_PRINT("info", ("Corrupt the event at Log_event::read_log_event(char*,...): byte on position %d", debug_cor_pos)); DBUG_SET("-d,corrupt_read_log_event_char"); } ); - if (crc_check && - event_checksum_test((uchar *) buf, event_len, alg)) + if (crc_check && event_checksum_test(const_cast<uchar*>(buf), event_len, alg)) { #ifdef MYSQL_CLIENT *error= "Event crc check failed! Most likely there is event corruption."; @@ -1138,100 +1122,100 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, } switch(event_type) { case QUERY_EVENT: - ev = new Query_log_event(buf, event_len, fdle, QUERY_EVENT); + ev= new Query_log_event(buf, event_len, fdle, QUERY_EVENT); break; case QUERY_COMPRESSED_EVENT: - ev = new Query_compressed_log_event(buf, event_len, fdle, + ev= new Query_compressed_log_event(buf, event_len, fdle, QUERY_COMPRESSED_EVENT); break; case LOAD_EVENT: - ev = new Load_log_event(buf, event_len, fdle); + ev= new Load_log_event(buf, event_len, fdle); break; case NEW_LOAD_EVENT: - ev = new Load_log_event(buf, event_len, fdle); + ev= new Load_log_event(buf, event_len, fdle); break; case ROTATE_EVENT: - ev = new Rotate_log_event(buf, event_len, fdle); + ev= new Rotate_log_event(buf, event_len, fdle); break; case BINLOG_CHECKPOINT_EVENT: - ev = new Binlog_checkpoint_log_event(buf, event_len, fdle); + ev= new Binlog_checkpoint_log_event(buf, event_len, fdle); break; case GTID_EVENT: - ev = new Gtid_log_event(buf, event_len, fdle); + ev= new Gtid_log_event(buf, event_len, fdle); break; case GTID_LIST_EVENT: - ev = new Gtid_list_log_event(buf, event_len, fdle); + ev= new Gtid_list_log_event(buf, event_len, fdle); break; case CREATE_FILE_EVENT: - ev = new Create_file_log_event(buf, event_len, fdle); + ev= new Create_file_log_event(buf, event_len, fdle); break; case APPEND_BLOCK_EVENT: - ev = new Append_block_log_event(buf, event_len, fdle); + ev= new Append_block_log_event(buf, event_len, fdle); break; case DELETE_FILE_EVENT: - ev = new Delete_file_log_event(buf, event_len, fdle); + ev= new Delete_file_log_event(buf, event_len, fdle); break; case EXEC_LOAD_EVENT: - ev = new Execute_load_log_event(buf, event_len, fdle); + ev= new Execute_load_log_event(buf, event_len, fdle); break; case START_EVENT_V3: /* this is sent only by MySQL <=4.x */ - ev = new Start_log_event_v3(buf, event_len, fdle); + ev= new Start_log_event_v3(buf, event_len, fdle); break; case STOP_EVENT: - ev = new Stop_log_event(buf, fdle); + ev= new Stop_log_event(buf, fdle); break; case INTVAR_EVENT: - ev = new Intvar_log_event(buf, fdle); + ev= new Intvar_log_event(buf, fdle); break; case XID_EVENT: - ev = new Xid_log_event(buf, fdle); + ev= new Xid_log_event(buf, fdle); break; case XA_PREPARE_LOG_EVENT: - ev = new XA_prepare_log_event(buf, fdle); + ev= new XA_prepare_log_event(buf, fdle); break; case RAND_EVENT: - ev = new Rand_log_event(buf, fdle); + ev= new Rand_log_event(buf, fdle); break; case USER_VAR_EVENT: - ev = new User_var_log_event(buf, event_len, fdle); + ev= new User_var_log_event(buf, event_len, fdle); break; case FORMAT_DESCRIPTION_EVENT: - ev = new Format_description_log_event(buf, event_len, fdle); + ev= new Format_description_log_event(buf, event_len, fdle); break; #if defined(HAVE_REPLICATION) case PRE_GA_WRITE_ROWS_EVENT: - ev = new Write_rows_log_event_old(buf, event_len, fdle); + ev= new Write_rows_log_event_old(buf, event_len, fdle); break; case PRE_GA_UPDATE_ROWS_EVENT: - ev = new Update_rows_log_event_old(buf, event_len, fdle); + ev= new Update_rows_log_event_old(buf, event_len, fdle); break; case PRE_GA_DELETE_ROWS_EVENT: - ev = new Delete_rows_log_event_old(buf, event_len, fdle); + ev= new Delete_rows_log_event_old(buf, event_len, fdle); break; case WRITE_ROWS_EVENT_V1: case WRITE_ROWS_EVENT: - ev = new Write_rows_log_event(buf, event_len, fdle); + ev= new Write_rows_log_event(buf, event_len, fdle); break; case UPDATE_ROWS_EVENT_V1: case UPDATE_ROWS_EVENT: - ev = new Update_rows_log_event(buf, event_len, fdle); + ev= new Update_rows_log_event(buf, event_len, fdle); break; case DELETE_ROWS_EVENT_V1: case DELETE_ROWS_EVENT: - ev = new Delete_rows_log_event(buf, event_len, fdle); + ev= new Delete_rows_log_event(buf, event_len, fdle); break; case WRITE_ROWS_COMPRESSED_EVENT: case WRITE_ROWS_COMPRESSED_EVENT_V1: - ev = new Write_rows_compressed_log_event(buf, event_len, fdle); + ev= new Write_rows_compressed_log_event(buf, event_len, fdle); break; case UPDATE_ROWS_COMPRESSED_EVENT: case UPDATE_ROWS_COMPRESSED_EVENT_V1: - ev = new Update_rows_compressed_log_event(buf, event_len, fdle); + ev= new Update_rows_compressed_log_event(buf, event_len, fdle); break; case DELETE_ROWS_COMPRESSED_EVENT: case DELETE_ROWS_COMPRESSED_EVENT_V1: - ev = new Delete_rows_compressed_log_event(buf, event_len, fdle); + ev= new Delete_rows_compressed_log_event(buf, event_len, fdle); break; /* MySQL GTID events are ignored */ @@ -1245,23 +1229,23 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, break; case TABLE_MAP_EVENT: - ev = new Table_map_log_event(buf, event_len, fdle); + ev= new Table_map_log_event(buf, event_len, fdle); break; #endif case BEGIN_LOAD_QUERY_EVENT: - ev = new Begin_load_query_log_event(buf, event_len, fdle); + ev= new Begin_load_query_log_event(buf, event_len, fdle); break; case EXECUTE_LOAD_QUERY_EVENT: ev= new Execute_load_query_log_event(buf, event_len, fdle); break; case INCIDENT_EVENT: - ev = new Incident_log_event(buf, event_len, fdle); + ev= new Incident_log_event(buf, event_len, fdle); break; case ANNOTATE_ROWS_EVENT: - ev = new Annotate_rows_log_event(buf, event_len, fdle); + ev= new Annotate_rows_log_event(buf, event_len, fdle); break; case START_ENCRYPTION_EVENT: - ev = new Start_encryption_log_event(buf, event_len, fdle); + ev= new Start_encryption_log_event(buf, event_len, fdle); break; default: DBUG_PRINT("error",("Unknown event code: %d", @@ -1365,8 +1349,7 @@ get_str_len_and_pointer(const Log_event::Byte **src, return 0; } -static void copy_str_and_move(const char **src, - Log_event::Byte **dst, +static void copy_str_and_move(const char **src, Log_event::Byte **dst, size_t len) { memcpy(*dst, *src, len); @@ -1394,6 +1377,7 @@ code_name(int code) case Q_TABLE_MAP_FOR_UPDATE_CODE: return "Q_TABLE_MAP_FOR_UPDATE_CODE"; case Q_MASTER_DATA_WRITTEN_CODE: return "Q_MASTER_DATA_WRITTEN_CODE"; case Q_HRNOW: return "Q_HRNOW"; + case Q_XID: return "XID"; } sprintf(buf, "CODE#%d", code); return buf; @@ -1433,7 +1417,7 @@ code_name(int code) /** This is used by the SQL slave thread to prepare the event before execution. */ -Query_log_event::Query_log_event(const char* buf, uint event_len, +Query_log_event::Query_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event, Log_event_type event_type) @@ -1442,7 +1426,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, flags2_inited(0), sql_mode_inited(0), charset_inited(0), flags2(0), auto_increment_increment(1), auto_increment_offset(1), time_zone_len(0), lc_time_names_number(0), charset_database_number(0), - table_map_for_update(0), master_data_written(0) + table_map_for_update(0), xid(0), master_data_written(0) { ulong data_len; uint32 tmp; @@ -1466,13 +1450,13 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, */ if (event_len < (uint)(common_header_len + post_header_len)) DBUG_VOID_RETURN; - data_len = event_len - (common_header_len + post_header_len); + data_len= event_len - (common_header_len + post_header_len); buf+= common_header_len; - thread_id = slave_proxy_id = uint4korr(buf + Q_THREAD_ID_OFFSET); - exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET); - db_len = (uchar)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars - error_code = uint2korr(buf + Q_ERR_CODE_OFFSET); + thread_id= slave_proxy_id= uint4korr(buf + Q_THREAD_ID_OFFSET); + exec_time= uint4korr(buf + Q_EXEC_TIME_OFFSET); + db_len= buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars + error_code= uint2korr(buf + Q_ERR_CODE_OFFSET); /* 5.0 format starts here. @@ -1626,6 +1610,13 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, pos+= 3; break; } + case Q_XID: + { + CHECK_SPACE(pos, end, 8); + xid= uint8korr(pos); + pos+= 8; + break; + } default: /* That's why you must write status vars in growing order of code */ DBUG_PRINT("info",("Query_log_event has unknown status vars (first has\ @@ -1675,7 +1666,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, */ #if !defined(MYSQL_CLIENT) && defined(HAVE_QUERY_CACHE) - if (!(start= data_buf = (Log_event::Byte*) my_malloc(PSI_INSTRUMENT_ME, + if (!(start= data_buf= (Log_event::Byte*) my_malloc(PSI_INSTRUMENT_ME, catalog_len + 1 + time_zone_len + 1 + user.length + 1 @@ -1687,7 +1678,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, + QUERY_CACHE_FLAGS_SIZE, MYF(MY_WME)))) #else - if (!(start= data_buf = (Log_event::Byte*) my_malloc(PSI_INSTRUMENT_ME, + if (!(start= data_buf= (Log_event::Byte*) my_malloc(PSI_INSTRUMENT_ME, catalog_len + 1 + time_zone_len + 1 + user.length + 1 @@ -1756,11 +1747,10 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, DBUG_VOID_RETURN; } - uint32 max_length= uint32(event_len - ((const char*)(end + db_len + 1) - + uint32 max_length= uint32(event_len - ((end + db_len + 1) - (buf - common_header_len))); if (q_len != max_length || - (event_len < uint((const char*)(end + db_len + 1) - - (buf - common_header_len)))) + (event_len < uint((end + db_len + 1) - (buf - common_header_len)))) { q_len= 0; query= NULL; @@ -1777,7 +1767,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, DBUG_VOID_RETURN; } -Query_compressed_log_event::Query_compressed_log_event(const char *buf, +Query_compressed_log_event::Query_compressed_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event, @@ -1785,24 +1775,24 @@ Query_compressed_log_event::Query_compressed_log_event(const char *buf, :Query_log_event(buf, event_len, description_event, event_type), query_buf(NULL) { - if(query) + if (query) { - uint32 un_len=binlog_get_uncompress_len(query); + uint32 un_len= binlog_get_uncompress_len((uchar*) query); if (!un_len) { - query = 0; + query= 0; return; } /* Reserve one byte for '\0' */ - query_buf = (Log_event::Byte*)my_malloc(PSI_INSTRUMENT_ME, + query_buf= (Log_event::Byte*) my_malloc(PSI_INSTRUMENT_ME, ALIGN_SIZE(un_len + 1), MYF(MY_WME)); - if(query_buf && - !binlog_buf_uncompress(query, (char *)query_buf, q_len, &un_len)) + if (query_buf && !binlog_buf_uncompress((uchar*) query, (uchar *) query_buf, + q_len, &un_len)) { - query_buf[un_len] = 0; - query = (const char *)query_buf; - q_len = un_len; + query_buf[un_len]= 0; + query= (char*) query_buf; + q_len= un_len; } else { @@ -1811,6 +1801,7 @@ Query_compressed_log_event::Query_compressed_log_event(const char *buf, } } + /* Replace a binlog event read into a packet with a dummy event. Either a Query_log_event that has just a comment, or if that will not fit in the @@ -1990,7 +1981,7 @@ Query_log_event::begin_event(String *packet, ulong ev_offset, **************************************************************************/ -Start_log_event_v3::Start_log_event_v3(const char* buf, uint event_len, +Start_log_event_v3::Start_log_event_v3(const uchar *buf, uint event_len, const Format_description_log_event *description_event) :Log_event(buf, description_event), binlog_version(BINLOG_VERSION) @@ -2088,9 +2079,9 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) Hence, we need to be assign some value here, to avoid reading uninitialized memory when the array is written to disk. */ - post_header_len[PRE_GA_WRITE_ROWS_EVENT-1] = 0; - post_header_len[PRE_GA_UPDATE_ROWS_EVENT-1] = 0; - post_header_len[PRE_GA_DELETE_ROWS_EVENT-1] = 0; + post_header_len[PRE_GA_WRITE_ROWS_EVENT-1]= 0; + post_header_len[PRE_GA_UPDATE_ROWS_EVENT-1]= 0; + post_header_len[PRE_GA_DELETE_ROWS_EVENT-1]= 0; post_header_len[TABLE_MAP_EVENT-1]= TABLE_MAP_HEADER_LEN; post_header_len[WRITE_ROWS_EVENT_V1-1]= ROWS_HEADER_LEN_V1; @@ -2223,10 +2214,8 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) */ Format_description_log_event:: -Format_description_log_event(const char* buf, - uint event_len, - const - Format_description_log_event* +Format_description_log_event(const uchar *buf, uint event_len, + const Format_description_log_event* description_event) :Start_log_event_v3(buf, event_len, description_event), common_header_len(0), post_header_len(NULL), event_type_permutation(0) @@ -2379,7 +2368,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp checksum-unaware (effectively no checksum) and the actuall [1-254] range alg descriptor. */ -enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len) +enum enum_binlog_checksum_alg get_checksum_alg(const uchar *buf, ulong len) { enum enum_binlog_checksum_alg ret; char version[ST_SERVER_VER_LEN]; @@ -2402,17 +2391,17 @@ enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len) DBUG_RETURN(ret); } -Start_encryption_log_event::Start_encryption_log_event( - const char* buf, uint event_len, - const Format_description_log_event* description_event) +Start_encryption_log_event:: +Start_encryption_log_event(const uchar *buf, uint event_len, + const Format_description_log_event* description_event) :Log_event(buf, description_event) { if ((int)event_len == LOG_EVENT_MINIMAL_HEADER_LEN + Start_encryption_log_event::get_data_size()) { - buf += LOG_EVENT_MINIMAL_HEADER_LEN; - crypto_scheme = *(uchar*)buf; - key_version = uint4korr(buf + BINLOG_CRYPTO_SCHEME_LENGTH); + buf+= LOG_EVENT_MINIMAL_HEADER_LEN; + crypto_scheme= *buf; + key_version= uint4korr(buf + BINLOG_CRYPTO_SCHEME_LENGTH); memcpy(nonce, buf + BINLOG_CRYPTO_SCHEME_LENGTH + BINLOG_KEY_VERSION_LENGTH, BINLOG_NONCE_LENGTH); @@ -2422,7 +2411,7 @@ Start_encryption_log_event::Start_encryption_log_event( } - /************************************************************************** +/************************************************************************** Load_log_event methods General note about Load_log_event: the binlogging of LOAD DATA INFILE is going to be changed in 5.0 (or maybe in 5.1; not decided yet). @@ -2437,16 +2426,18 @@ Start_encryption_log_event::Start_encryption_log_event( Note that I (Guilhem) manually tested replication of a big LOAD DATA INFILE between 3.23 and 5.0, and between 4.0 and 5.0, and it works fine (and the positions displayed in SHOW SLAVE STATUS then are fine too). - **************************************************************************/ +**************************************************************************/ /** @note - The caller must do buf[event_len] = 0 before he starts using the + The caller must do buf[event_len]= 0 before he starts using the constructed event. */ -Load_log_event::Load_log_event(const char *buf, uint event_len, - const Format_description_log_event *description_event) + +Load_log_event::Load_log_event(const uchar *buf, uint event_len, + const Format_description_log_event + *description_event) :Log_event(buf, description_event), num_fields(0), fields(0), field_lens(0),field_block_len(0), table_name(0), db(0), fname(0), local_fname(FALSE), @@ -2478,52 +2469,51 @@ Load_log_event::Load_log_event(const char *buf, uint event_len, Load_log_event::copy_log_event() */ -int Load_log_event::copy_log_event(const char *buf, ulong event_len, +int Load_log_event::copy_log_event(const uchar *buf, ulong event_len, int body_offset, - const Format_description_log_event *description_event) + const Format_description_log_event + *description_event) { DBUG_ENTER("Load_log_event::copy_log_event"); uint data_len; if ((int) event_len <= body_offset) DBUG_RETURN(1); - char* buf_end = (char*)buf + event_len; + const uchar *buf_end= buf + event_len; /* this is the beginning of the post-header */ - const char* data_head = buf + description_event->common_header_len; + const uchar *data_head= buf + description_event->common_header_len; thread_id= slave_proxy_id= uint4korr(data_head + L_THREAD_ID_OFFSET); - exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); - skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); - table_name_len = (uint)data_head[L_TBL_LEN_OFFSET]; - db_len = (uint)data_head[L_DB_LEN_OFFSET]; - num_fields = uint4korr(data_head + L_NUM_FIELDS_OFFSET); + exec_time= uint4korr(data_head + L_EXEC_TIME_OFFSET); + skip_lines= uint4korr(data_head + L_SKIP_LINES_OFFSET); + table_name_len= (uint)data_head[L_TBL_LEN_OFFSET]; + db_len= (uint)data_head[L_DB_LEN_OFFSET]; + num_fields= uint4korr(data_head + L_NUM_FIELDS_OFFSET); /* Sql_ex.init() on success returns the pointer to the first byte after the sql_ex structure, which is the start of field lengths array. */ - if (!(field_lens= (uchar*)sql_ex.init((char*)buf + body_offset, - buf_end, - (uchar)buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) + if (!(field_lens= (uchar*) sql_ex.init(buf + body_offset, buf_end, + buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) DBUG_RETURN(1); - data_len = event_len - body_offset; + data_len= event_len - body_offset; if (num_fields > data_len) // simple sanity check against corruption DBUG_RETURN(1); - for (uint i = 0; i < num_fields; i++) - field_block_len += (uint)field_lens[i] + 1; + for (uint i= 0; i < num_fields; i++) + field_block_len+= (uint)field_lens[i] + 1; - fields = (char*)field_lens + num_fields; - table_name = fields + field_block_len; + fields= (char*) field_lens + num_fields; + table_name= fields + field_block_len; if (strlen(table_name) > NAME_LEN) goto err; - db = table_name + table_name_len + 1; - DBUG_EXECUTE_IF ("simulate_invalid_address", - db_len = data_len;); - fname = db + db_len + 1; - if ((db_len > data_len) || (fname > buf_end)) + db= table_name + table_name_len + 1; + DBUG_EXECUTE_IF("simulate_invalid_address", db_len= data_len;); + fname= db + db_len + 1; + if ((db_len > data_len) || (fname > (char*) buf_end)) goto err; - fname_len = (uint) strlen(fname); - if ((fname_len > data_len) || (fname + fname_len > buf_end)) + fname_len= (uint) strlen(fname); + if ((fname_len > data_len) || (fname + fname_len > (char*) buf_end)) goto err; // null termination is accomplished by the caller doing buf[event_len]=0 @@ -2531,7 +2521,7 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, err: // Invalid event. - table_name = 0; + table_name= 0; DBUG_RETURN(1); } @@ -2540,8 +2530,9 @@ err: Rotate_log_event methods **************************************************************************/ -Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, - const Format_description_log_event* description_event) +Rotate_log_event::Rotate_log_event(const uchar *buf, uint event_len, + const Format_description_log_event* + description_event) :Log_event(buf, description_event) ,new_log_ident(0), flags(DUP_NAME) { DBUG_ENTER("Rotate_log_event::Rotate_log_event(char*,...)"); @@ -2555,7 +2546,8 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, ident_len= (uint)(event_len - (LOG_EVENT_MINIMAL_HEADER_LEN + post_header_len)); ident_offset= post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); - new_log_ident= my_strndup(PSI_INSTRUMENT_ME, buf + ident_offset, (uint) ident_len, MYF(MY_WME)); + new_log_ident= my_strndup(PSI_INSTRUMENT_ME, (char*) buf + ident_offset, + (uint) ident_len, MYF(MY_WME)); DBUG_PRINT("debug", ("new_log_ident: '%s'", new_log_ident)); DBUG_VOID_RETURN; } @@ -2566,7 +2558,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, **************************************************************************/ Binlog_checkpoint_log_event::Binlog_checkpoint_log_event( - const char *buf, uint event_len, + const uchar *buf, uint event_len, const Format_description_log_event *description_event) :Log_event(buf, description_event), binlog_file_name(0) { @@ -2582,8 +2574,8 @@ Binlog_checkpoint_log_event::Binlog_checkpoint_log_event( binlog_file_len= uint4korr(buf); if (event_len - (header_size + post_header_len) < binlog_file_len) return; - binlog_file_name= my_strndup(PSI_INSTRUMENT_ME, buf + post_header_len, binlog_file_len, - MYF(MY_WME)); + binlog_file_name= my_strndup(PSI_INSTRUMENT_ME, (char*) buf + post_header_len, + binlog_file_len, MYF(MY_WME)); return; } @@ -2592,12 +2584,15 @@ Binlog_checkpoint_log_event::Binlog_checkpoint_log_event( Global transaction ID stuff **************************************************************************/ -Gtid_log_event::Gtid_log_event(const char *buf, uint event_len, - const Format_description_log_event *description_event) - : Log_event(buf, description_event), seq_no(0), commit_id(0) +Gtid_log_event::Gtid_log_event(const uchar *buf, uint event_len, + const Format_description_log_event + *description_event) + : Log_event(buf, description_event), seq_no(0), commit_id(0), + flags_extra(0), extra_engines(0) { uint8 header_size= description_event->common_header_len; uint8 post_header_len= description_event->post_header_len[GTID_EVENT-1]; + const uchar *buf_0= buf; if (event_len < (uint) header_size + (uint) post_header_len || post_header_len < GTID_HEADER_LEN) return; @@ -2631,13 +2626,45 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len, memcpy(xid.data, buf, data_length); buf+= data_length; } + + /* the extra flags check and actions */ + if (static_cast<uint>(buf - buf_0) < event_len) + { + flags_extra= *buf++; + /* + extra engines flags presence is identifed by non-zero byte value + at this point + */ + if (flags_extra & FL_EXTRA_MULTI_ENGINE) + { + DBUG_ASSERT(static_cast<uint>(buf - buf_0) < event_len); + + extra_engines= *buf++; + + DBUG_ASSERT(extra_engines > 0); + } + } + /* + the strict '<' part of the assert corresponds to extra zero-padded + trailing bytes, + */ + DBUG_ASSERT(static_cast<uint>(buf - buf_0) <= event_len); + /* and the last of them is tested. */ +#ifdef MYSQL_SERVER +#ifdef WITH_WSREP + if (!WSREP_ON) +#endif +#endif + DBUG_ASSERT(static_cast<uint>(buf - buf_0) == event_len || + buf_0[event_len - 1] == 0); } /* GTID list. */ -Gtid_list_log_event::Gtid_list_log_event(const char *buf, uint event_len, - const Format_description_log_event *description_event) +Gtid_list_log_event::Gtid_list_log_event(const uchar *buf, uint event_len, + const Format_description_log_event + *description_event) : Log_event(buf, description_event), count(0), list(0), sub_id_list(0) { uint32 i; @@ -2728,7 +2755,7 @@ Gtid_list_log_event::peek(const char *event_start, size_t event_len, p+= 4; count= count_field & ((1<<28)-1); if (event_len < (uint32)fdev->common_header_len + GTID_LIST_HEADER_LEN + - 16 * count) + element_size * count) return true; if (!(gtid_list= (rpl_gtid *)my_malloc(PSI_INSTRUMENT_ME, sizeof(rpl_gtid)*count + (count == 0), MYF(MY_WME)))) @@ -2758,7 +2785,7 @@ Gtid_list_log_event::peek(const char *event_start, size_t event_len, Intvar_log_event::Intvar_log_event() */ -Intvar_log_event::Intvar_log_event(const char* buf, +Intvar_log_event::Intvar_log_event(const uchar *buf, const Format_description_log_event* description_event) :Log_event(buf, description_event) { @@ -2788,7 +2815,7 @@ const char* Intvar_log_event::get_var_type_name() Rand_log_event methods **************************************************************************/ -Rand_log_event::Rand_log_event(const char* buf, +Rand_log_event::Rand_log_event(const uchar *buf, const Format_description_log_event* description_event) :Log_event(buf, description_event) { @@ -2814,7 +2841,7 @@ Rand_log_event::Rand_log_event(const char* buf, */ Xid_log_event:: -Xid_log_event(const char* buf, +Xid_log_event(const uchar *buf, const Format_description_log_event *description_event) :Xid_apply_log_event(buf, description_event) { @@ -2828,7 +2855,7 @@ Xid_log_event(const char* buf, XA_prepare_log_event methods **************************************************************************/ XA_prepare_log_event:: -XA_prepare_log_event(const char* buf, +XA_prepare_log_event(const uchar *buf, const Format_description_log_event *description_event) :Xid_apply_log_event(buf, description_event) { @@ -2867,7 +2894,7 @@ XA_prepare_log_event(const char* buf, **************************************************************************/ User_var_log_event:: -User_var_log_event(const char* buf, uint event_len, +User_var_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event) :Log_event(buf, description_event) #ifndef MYSQL_CLIENT @@ -2875,7 +2902,7 @@ User_var_log_event(const char* buf, uint event_len, #endif { bool error= false; - const char* buf_start= buf, *buf_end= buf + event_len; + const uchar *buf_start= buf, *buf_end= buf + event_len; /* The Post-Header is empty. The Variable Data part begins immediately. */ buf+= description_event->common_header_len + @@ -2895,7 +2922,7 @@ User_var_log_event(const char* buf, uint event_len, may have the bigger value possible, is_null= True and there is no payload for val, or even that name_len is 0. */ - if (name + name_len + UV_VAL_IS_NULL > buf_end) + if (name + name_len + UV_VAL_IS_NULL > (char*) buf_end) { error= true; goto err; @@ -2916,7 +2943,7 @@ User_var_log_event(const char* buf, uint event_len, val= (char *) (buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE); - if (val > buf_end) + if (val > (char*) buf_end) { error= true; goto err; @@ -2938,7 +2965,7 @@ User_var_log_event(const char* buf, uint event_len, Old events will not have this extra byte, thence, we keep the flags set to UNDEF_F. */ - size_t bytes_read= (val + val_len) - buf_start; + size_t bytes_read= (val + val_len) - (char*) buf_start; if (bytes_read > event_len) { error= true; @@ -2966,16 +2993,19 @@ err: Create_file_log_event ctor */ -Create_file_log_event::Create_file_log_event(const char* buf, uint len, - const Format_description_log_event* description_event) - :Load_log_event(buf,0,description_event),fake_base(0),block(0),inited_from_old(0) +Create_file_log_event:: +Create_file_log_event(const uchar *buf, uint len, + const Format_description_log_event* description_event) + :Load_log_event(buf,0,description_event),fake_base(0),block(0), + inited_from_old(0) { DBUG_ENTER("Create_file_log_event::Create_file_log_event(char*,...)"); uint block_offset; uint header_len= description_event->common_header_len; uint8 load_header_len= description_event->post_header_len[LOAD_EVENT-1]; uint8 create_file_header_len= description_event->post_header_len[CREATE_FILE_EVENT-1]; - if (!(event_buf= (char*) my_memdup(PSI_INSTRUMENT_ME, buf, len, MYF(MY_WME))) || + if (!(event_buf= (uchar*) my_memdup(PSI_INSTRUMENT_ME, buf, len, + MYF(MY_WME))) || copy_log_event(event_buf,len, (((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? load_header_len + header_len : @@ -3005,13 +3035,13 @@ Create_file_log_event::Create_file_log_event(const char* buf, uint len, create_file_header_len + 1); if (len < block_offset) DBUG_VOID_RETURN; - block = (uchar*)buf + block_offset; - block_len = len - block_offset; + block= const_cast<uchar*>(buf) + block_offset; + block_len= len - block_offset; } else { sql_ex.force_new_format(); - inited_from_old = 1; + inited_from_old= 1; } DBUG_VOID_RETURN; } @@ -3025,8 +3055,9 @@ Create_file_log_event::Create_file_log_event(const char* buf, uint len, Append_block_log_event ctor */ -Append_block_log_event::Append_block_log_event(const char* buf, uint len, - const Format_description_log_event* description_event) +Append_block_log_event:: +Append_block_log_event(const uchar *buf, uint len, + const Format_description_log_event* description_event) :Log_event(buf, description_event),block(0) { DBUG_ENTER("Append_block_log_event::Append_block_log_event(char*,...)"); @@ -3037,7 +3068,7 @@ Append_block_log_event::Append_block_log_event(const char* buf, uint len, if (len < total_header_len) DBUG_VOID_RETURN; file_id= uint4korr(buf + common_header_len + AB_FILE_ID_OFFSET); - block= (uchar*)buf + total_header_len; + block= const_cast<uchar*>(buf) + total_header_len; block_len= len - total_header_len; DBUG_VOID_RETURN; } @@ -3051,8 +3082,9 @@ Append_block_log_event::Append_block_log_event(const char* buf, uint len, Delete_file_log_event ctor */ -Delete_file_log_event::Delete_file_log_event(const char* buf, uint len, - const Format_description_log_event* description_event) +Delete_file_log_event:: +Delete_file_log_event(const uchar *buf, uint len, + const Format_description_log_event* description_event) :Log_event(buf, description_event),file_id(0) { uint8 common_header_len= description_event->common_header_len; @@ -3071,8 +3103,9 @@ Delete_file_log_event::Delete_file_log_event(const char* buf, uint len, Execute_load_log_event ctor */ -Execute_load_log_event::Execute_load_log_event(const char* buf, uint len, - const Format_description_log_event* description_event) +Execute_load_log_event:: +Execute_load_log_event(const uchar *buf, uint len, + const Format_description_log_event* description_event) :Log_event(buf, description_event), file_id(0) { uint8 common_header_len= description_event->common_header_len; @@ -3088,7 +3121,7 @@ Execute_load_log_event::Execute_load_log_event(const char* buf, uint len, **************************************************************************/ Begin_load_query_log_event:: -Begin_load_query_log_event(const char* buf, uint len, +Begin_load_query_log_event(const uchar *buf, uint len, const Format_description_log_event* desc_event) :Append_block_log_event(buf, len, desc_event) { @@ -3101,7 +3134,7 @@ Begin_load_query_log_event(const char* buf, uint len, Execute_load_query_log_event:: -Execute_load_query_log_event(const char* buf, uint event_len, +Execute_load_query_log_event(const uchar *buf, uint event_len, const Format_description_log_event* desc_event): Query_log_event(buf, event_len, desc_event, EXECUTE_LOAD_QUERY_EVENT), file_id(0), fn_pos_start(0), fn_pos_end(0) @@ -3137,10 +3170,10 @@ ulong Execute_load_query_log_event::get_post_header_size_for_derived() sql_ex_info::init() */ -const char *sql_ex_info::init(const char *buf, const char *buf_end, +const uchar *sql_ex_info::init(const uchar *buf, const uchar *buf_end, bool use_new_format) { - cached_new_format = use_new_format; + cached_new_format= use_new_format; if (use_new_format) { empty_flags=0; @@ -3157,19 +3190,19 @@ const char *sql_ex_info::init(const char *buf, const char *buf_end, read_str(&buf, buf_end, &line_start, &line_start_len) || read_str(&buf, buf_end, &escaped, &escaped_len)) return 0; - opt_flags = *buf++; + opt_flags= *buf++; } else { if (buf_end - buf < 7) return 0; // Wrong data field_term_len= enclosed_len= line_term_len= line_start_len= escaped_len=1; - field_term = buf++; // Use first byte in string - enclosed= buf++; - line_term= buf++; - line_start= buf++; - escaped= buf++; - opt_flags = *buf++; + field_term= (char*) buf++; // Use first byte in string + enclosed= (char*) buf++; + line_term= (char*) buf++; + line_start= (char*) buf++; + escaped= (char*) buf++; + opt_flags= *buf++; empty_flags= *buf++; if (empty_flags & FIELD_TERM_EMPTY) field_term_len=0; @@ -3192,7 +3225,7 @@ const char *sql_ex_info::init(const char *buf, const char *buf_end, **************************************************************************/ -Rows_log_event::Rows_log_event(const char *buf, uint event_len, +Rows_log_event::Rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Log_event(buf, description_event), @@ -3227,7 +3260,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, event_len, common_header_len, post_header_len)); - const char *post_start= buf + common_header_len; + const uchar *post_start= buf + common_header_len; post_start+= RW_MAPID_OFFSET; if (post_header_len == 6) { @@ -3264,9 +3297,9 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, var_header_len-= 2; /* Iterate over var-len header, extracting 'chunks' */ - const char* start= post_start + 2; - const char* end= start + var_header_len; - for (const char* pos= start; pos < end;) + const uchar *start= post_start + 2; + const uchar *end= start + var_header_len; + for (const uchar* pos= start; pos < end;) { switch(*pos++) { @@ -3301,7 +3334,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, uchar const *const ptr_width= var_start; uchar *ptr_after_width= (uchar*) ptr_width; DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); - m_width = net_field_length(&ptr_after_width); + m_width= net_field_length(&ptr_after_width); DBUG_PRINT("debug", ("m_width=%lu", m_width)); /* Avoid reading out of buffer */ @@ -3387,18 +3420,19 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, void Rows_log_event::uncompress_buf() { - uint32 un_len = binlog_get_uncompress_len((char *)m_rows_buf); + uint32 un_len= binlog_get_uncompress_len(m_rows_buf); if (!un_len) return; - uchar *new_buf= (uchar*) my_malloc(PSI_INSTRUMENT_ME, ALIGN_SIZE(un_len), MYF(MY_WME)); + uchar *new_buf= (uchar*) my_malloc(PSI_INSTRUMENT_ME, ALIGN_SIZE(un_len), + MYF(MY_WME)); if (new_buf) { - if(!binlog_buf_uncompress((char *)m_rows_buf, (char *)new_buf, + if (!binlog_buf_uncompress(m_rows_buf, new_buf, (uint32)(m_rows_cur - m_rows_buf), &un_len)) { my_free(m_rows_buf); - m_rows_buf = new_buf; + m_rows_buf= new_buf; #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) m_curr_row= m_rows_buf; #endif @@ -3435,7 +3469,7 @@ int Rows_log_event::get_data_size() (general_type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + m_rows_cur - m_rows_buf);); int data_size= 0; - Log_event_type type = get_type_code(); + Log_event_type type= get_type_code(); bool is_v2_event= LOG_EVENT_IS_ROW_V2(type); if (is_v2_event) { @@ -3463,9 +3497,10 @@ int Rows_log_event::get_data_size() Annotate_rows_log_event member functions **************************************************************************/ -Annotate_rows_log_event::Annotate_rows_log_event(const char *buf, - uint event_len, - const Format_description_log_event *desc) +Annotate_rows_log_event:: +Annotate_rows_log_event(const uchar *buf, + uint event_len, + const Format_description_log_event *desc) : Log_event(buf, desc), m_save_thd_query_txt(0), m_save_thd_query_len(0), @@ -3544,7 +3579,7 @@ bool Annotate_rows_log_event::is_valid() const Constructor used by slave to read the event from the binary log. */ #if defined(HAVE_REPLICATION) -Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, +Table_map_log_event::Table_map_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event) @@ -3579,7 +3614,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, DBUG_VOID_RETURN; /* Read the post-header */ - const char *post_start= buf + common_header_len; + const uchar *post_start= buf + common_header_len; post_start+= TM_MAPID_OFFSET; VALIDATE_BYTES_READ(post_start, buf, event_len); @@ -3601,7 +3636,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, m_flags= uint2korr(post_start); /* Read the variable part of the event */ - const char *const vpart= buf + common_header_len + post_header_len; + const uchar *const vpart= buf + common_header_len + post_header_len; /* Extract the length of the various parts from the buffer */ uchar const *const ptr_dblen= (uchar const*)vpart + 0; @@ -3620,9 +3655,9 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, m_colcnt= net_field_length(&ptr_after_colcnt); DBUG_PRINT("info",("m_dblen: %lu off: %ld m_tbllen: %lu off: %ld m_colcnt: %lu off: %ld", - (ulong) m_dblen, (long) (ptr_dblen-(const uchar*)vpart), - (ulong) m_tbllen, (long) (ptr_tbllen-(const uchar*)vpart), - m_colcnt, (long) (ptr_colcnt-(const uchar*)vpart))); + (ulong) m_dblen, (long) (ptr_dblen - vpart), + (ulong) m_tbllen, (long) (ptr_tbllen - vpart), + m_colcnt, (long) (ptr_colcnt - vpart))); /* Allocate mem for all fields in one go. If fails, caught in is_valid() */ m_memory= (uchar*) my_multi_malloc(PSI_INSTRUMENT_ME, MYF(MY_WME), @@ -3932,7 +3967,7 @@ Optional_metadata_fields(unsigned char* optional_metadata, Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Write_rows_log_event::Write_rows_log_event(const char *buf, uint event_len, +Write_rows_log_event::Write_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Rows_log_event(buf, event_len, description_event) @@ -3940,7 +3975,7 @@ Write_rows_log_event::Write_rows_log_event(const char *buf, uint event_len, } Write_rows_compressed_log_event::Write_rows_compressed_log_event( - const char *buf, uint event_len, + const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Write_rows_log_event(buf, event_len, description_event) @@ -3958,7 +3993,7 @@ Write_rows_compressed_log_event::Write_rows_compressed_log_event( Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Delete_rows_log_event::Delete_rows_log_event(const char *buf, uint event_len, +Delete_rows_log_event::Delete_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Rows_log_event(buf, event_len, description_event) @@ -3966,7 +4001,7 @@ Delete_rows_log_event::Delete_rows_log_event(const char *buf, uint event_len, } Delete_rows_compressed_log_event::Delete_rows_compressed_log_event( - const char *buf, uint event_len, + const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Delete_rows_log_event(buf, event_len, description_event) @@ -3994,7 +4029,7 @@ Update_rows_log_event::~Update_rows_log_event() Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Update_rows_log_event::Update_rows_log_event(const char *buf, uint event_len, +Update_rows_log_event::Update_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event) @@ -4003,7 +4038,7 @@ Update_rows_log_event::Update_rows_log_event(const char *buf, uint event_len, } Update_rows_compressed_log_event::Update_rows_compressed_log_event( - const char *buf, uint event_len, + const uchar *buf, uint event_len, const Format_description_log_event *description_event) : Update_rows_log_event(buf, event_len, description_event) @@ -4012,7 +4047,7 @@ Update_rows_compressed_log_event::Update_rows_compressed_log_event( } #endif -Incident_log_event::Incident_log_event(const char *buf, uint event_len, +Incident_log_event::Incident_log_event(const uchar *buf, uint event_len, const Format_description_log_event *descr_event) : Log_event(buf, descr_event) { @@ -4038,8 +4073,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, DBUG_VOID_RETURN; } m_incident= static_cast<Incident>(incident_number); - char const *ptr= buf + common_header_len + post_header_len; - char const *const str_end= buf + event_len; + uchar const *ptr= buf + common_header_len + post_header_len; + uchar const *const str_end= buf + event_len; uint8 len= 0; // Assignment to keep compiler happy const char *str= NULL; // Assignment to keep compiler happy if (read_str(&ptr, str_end, &str, &len)) @@ -4081,7 +4116,7 @@ Incident_log_event::description() const } -Ignorable_log_event::Ignorable_log_event(const char *buf, +Ignorable_log_event::Ignorable_log_event(const uchar *buf, const Format_description_log_event *descr_event, const char *event_name) diff --git a/sql/log_event.h b/sql/log_event.h index 096d8587848..b031fec665d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -319,6 +319,7 @@ class String; #define Q_INVOKER 11 #define Q_HRNOW 128 +#define Q_XID 129 /* Intvar event post-header */ @@ -489,6 +490,16 @@ class String; #define LOG_EVENT_IGNORABLE_F 0x80 /** + @def LOG_EVENT_ACCEPT_OWN_F + + Flag sets by the semisync slave for accepting + the same server_id ("own") events which the slave must not have + in its state. Typically such events were never committed by + their originator (this server) and discared at its semisync-slave recovery. +*/ +#define LOG_EVENT_ACCEPT_OWN_F 0x4000 + +/** @def LOG_EVENT_SKIP_REPLICATION_F Flag set by application creating the event (with @@skip_replication); the @@ -1188,7 +1199,7 @@ public: A temp buffer for read_log_event; it is later analysed according to the event's type, and its content is distributed in the event-specific fields. */ - char *temp_buf; + uchar *temp_buf; /* TRUE <=> this event 'owns' temp_buf and should call my_free() when done @@ -1436,10 +1447,10 @@ public: { return (cache_type == Log_event::EVENT_NO_CACHE); } - Log_event(const char* buf, const Format_description_log_event + Log_event(const uchar *buf, const Format_description_log_event *description_event); virtual ~Log_event() { free_temp_buf();} - void register_temp_buf(char* buf, bool must_free) + void register_temp_buf(uchar* buf, bool must_free) { temp_buf= buf; event_owns_temp_buf= must_free; @@ -1458,7 +1469,7 @@ public: is calculated during write() */ virtual int get_data_size() { return 0;} - static Log_event* read_log_event(const char* buf, uint event_len, + static Log_event* read_log_event(const uchar *buf, uint event_len, const char **error, const Format_description_log_event *description_event, my_bool crc_check); @@ -2105,6 +2116,8 @@ public: statement, for other query statements, this will be zero. */ ulonglong table_map_for_update; + /* Xid for the event, if such exists */ + ulonglong xid; /* Holds the original length of a Query_log_event that comes from a master of version < 5.0 (i.e., binlog_version < 4). When the IO @@ -2130,7 +2143,7 @@ public: #endif Query_log_event(); - Query_log_event(const char* buf, uint event_len, + Query_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event, Log_event_type event_type); ~Query_log_event() @@ -2162,8 +2175,10 @@ public: /* !!! Public in this patch to allow old usage */ int do_apply_event(rpl_group_info *rgi, const char *query_arg, uint32 q_len_arg); - static bool peek_is_commit_rollback(const char *event_start, - size_t event_len, enum enum_binlog_checksum_alg checksum_alg); + static bool peek_is_commit_rollback(const uchar *event_start, + size_t event_len, + enum enum_binlog_checksum_alg + checksum_alg); #endif /* HAVE_REPLICATION */ /* If true, the event always be applied by slave SQL thread or be printed by @@ -2196,7 +2211,7 @@ class Query_compressed_log_event:public Query_log_event{ protected: Log_event::Byte* query_buf; // point to the uncompressed query public: - Query_compressed_log_event(const char* buf, uint event_len, + Query_compressed_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event, Log_event_type event_type); ~Query_compressed_log_event() @@ -2207,7 +2222,7 @@ public: Log_event_type get_type_code() { return QUERY_COMPRESSED_EVENT; } /* - the min length of log_bin_compress_min_len is 10, + the min length of log_bin_compress_min_len is 10, means that Begin/Commit/Rollback would never be compressed! */ virtual bool is_begin() { return false; } @@ -2248,7 +2263,7 @@ struct sql_ex_info line_start_len + escaped_len + 6 : 7); } bool write_data(Log_event_writer *writer); - const char* init(const char* buf, const char* buf_end, bool use_new_format); + const uchar *init(const uchar *buf, const uchar* buf_end, bool use_new_format); bool new_format() { return ((cached_new_format != -1) ? cached_new_format : @@ -2462,7 +2477,7 @@ class Load_log_event: public Log_event { private: protected: - int copy_log_event(const char *buf, ulong event_len, + int copy_log_event(const uchar *buf, ulong event_len, int body_offset, const Format_description_log_event* description_event); @@ -2543,7 +2558,7 @@ public: logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used for the common_header_len (post_header_len will not be changed). */ - Load_log_event(const char* buf, uint event_len, + Load_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event); ~Load_log_event() {} @@ -2634,7 +2649,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Start_log_event_v3(const char* buf, uint event_len, + Start_log_event_v3(const uchar *buf, uint event_len, const Format_description_log_event* description_event); ~Start_log_event_v3() {} Log_event_type get_type_code() { return START_EVENT_V3;} @@ -2703,9 +2718,9 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Start_encryption_log_event( - const char* buf, uint event_len, - const Format_description_log_event* description_event); + Start_encryption_log_event(const uchar *buf, uint event_len, + const Format_description_log_event + *description_event); bool is_valid() const { return crypto_scheme == 1; } @@ -2809,7 +2824,7 @@ public: uint32 options_written_to_bin_log; Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); - Format_description_log_event(const char* buf, uint event_len, + Format_description_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Format_description_log_event() @@ -2924,7 +2939,7 @@ Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg, bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Intvar_log_event(const char* buf, + Intvar_log_event(const uchar *buf, const Format_description_log_event *description_event); ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} @@ -3005,7 +3020,7 @@ class Rand_log_event: public Log_event bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Rand_log_event(const char* buf, + Rand_log_event(const uchar *buf, const Format_description_log_event *description_event); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} @@ -3032,9 +3047,9 @@ public: Xid_apply_log_event(THD* thd_arg): Log_event(thd_arg, 0, TRUE) {} #endif - Xid_apply_log_event(const char* buf, + Xid_apply_log_event(const uchar *buf, const Format_description_log_event *description_event): - Log_event(buf, description_event) {} + Log_event(buf, description_event) {} ~Xid_apply_log_event() {} bool is_valid() const { return 1; } @@ -3085,7 +3100,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Xid_log_event(const char* buf, + Xid_log_event(const uchar *buf, const Format_description_log_event *description_event); ~Xid_log_event() {} Log_event_type get_type_code() { return XID_EVENT;} @@ -3230,7 +3245,7 @@ public: #else bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - XA_prepare_log_event(const char* buf, + XA_prepare_log_event(const uchar *buf, const Format_description_log_event *description_event); ~XA_prepare_log_event() {} Log_event_type get_type_code() { return XA_PREPARE_LOG_EVENT; } @@ -3287,7 +3302,8 @@ public: bool deferred; query_id_t query_id; User_var_log_event(THD* thd_arg, const char *name_arg, size_t name_len_arg, - const char *val_arg, size_t val_len_arg, Item_result type_arg, + const char *val_arg, size_t val_len_arg, + Item_result type_arg, uint charset_number_arg, uchar flags_arg, bool using_trans, bool direct) :Log_event(thd_arg, 0, using_trans), @@ -3304,7 +3320,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - User_var_log_event(const char* buf, uint event_len, + User_var_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~User_var_log_event() {} Log_event_type get_type_code() { return USER_VAR_EVENT;} @@ -3352,7 +3368,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Stop_log_event(const char* buf, + Stop_log_event(const uchar *buf, const Format_description_log_event *description_event): Log_event(buf, description_event) {} @@ -3433,7 +3449,7 @@ public: DUP_NAME= 2, // if constructor should dup the string argument RELAY_LOG=4 // rotate event for relay log }; - const char* new_log_ident; + const char *new_log_ident; ulonglong pos; uint ident_len; uint flags; @@ -3448,7 +3464,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Rotate_log_event(const char* buf, uint event_len, + Rotate_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event); ~Rotate_log_event() { @@ -3487,11 +3503,12 @@ public: #else bool print(FILE *file, PRINT_EVENT_INFO *print_event_info); #endif - Binlog_checkpoint_log_event(const char *buf, uint event_len, - const Format_description_log_event *description_event); + Binlog_checkpoint_log_event(const uchar *buf, uint event_len, + const Format_description_log_event + *description_event); ~Binlog_checkpoint_log_event() { my_free(binlog_file_name); } Log_event_type get_type_code() { return BINLOG_CHECKPOINT_EVENT;} - int get_data_size() { return binlog_file_len + BINLOG_CHECKPOINT_HEADER_LEN;} + int get_data_size() { return binlog_file_len + BINLOG_CHECKPOINT_HEADER_LEN;} bool is_valid() const { return binlog_file_name != 0; } #ifdef MYSQL_SERVER bool write(); @@ -3577,6 +3594,13 @@ public: event_mysql_xid_t xid; #endif uchar flags2; + uint flags_extra; // more flags area placed after the regular flags2's one + /* + Number of engine participants in transaction minus 1. + When zero the event does not contain that information. + */ + uint8 extra_engines; + /* Flags2. */ /* FL_STANDALONE is set when there is no terminating COMMIT event. */ @@ -3608,9 +3632,19 @@ public: /* FL_"COMMITTED or ROLLED-BACK"_XA is set for XA transaction. */ static const uchar FL_COMPLETED_XA= 128; + /* Flags_extra. */ + + /* + FL_EXTRA_MULTI_ENGINE is set for event group comprising a transaction + involving multiple storage engines. No flag and extra data are added + to the event when the transaction involves only one engine. + */ + static const uchar FL_EXTRA_MULTI_ENGINE= 1; + #ifdef MYSQL_SERVER Gtid_log_event(THD *thd_arg, uint64 seq_no, uint32 domain_id, bool standalone, - uint16 flags, bool is_transactional, uint64 commit_id); + uint16 flags, bool is_transactional, uint64 commit_id, + bool has_xid= false, bool is_ro_1pc= false); #ifdef HAVE_REPLICATION void pack_info(Protocol *protocol); virtual int do_apply_event(rpl_group_info *rgi); @@ -3620,7 +3654,7 @@ public: #else bool print(FILE *file, PRINT_EVENT_INFO *print_event_info); #endif - Gtid_log_event(const char *buf, uint event_len, + Gtid_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Gtid_log_event() { } Log_event_type get_type_code() { return GTID_EVENT; } @@ -3634,7 +3668,7 @@ public: bool write(); static int make_compatible_event(String *packet, bool *need_dummy_event, ulong ev_offset, enum enum_binlog_checksum_alg checksum_alg); - static bool peek(const char *event_start, size_t event_len, + static bool peek(const uchar *event_start, size_t event_len, enum enum_binlog_checksum_alg checksum_alg, uint32 *domain_id, uint32 *server_id, uint64 *seq_no, uchar *flags2, const Format_description_log_event *fdev); @@ -3734,7 +3768,7 @@ public: #else bool print(FILE *file, PRINT_EVENT_INFO *print_event_info); #endif - Gtid_list_log_event(const char *buf, uint event_len, + Gtid_list_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Gtid_list_log_event() { my_free(list); my_free(sub_id_list); } Log_event_type get_type_code() { return GTID_LIST_EVENT; } @@ -3778,8 +3812,8 @@ protected: */ bool fake_base; public: - uchar* block; - const char *event_buf; + uchar *block; + const uchar *event_buf; uint block_len; uint file_id; bool inited_from_old; @@ -3801,7 +3835,7 @@ public: bool enable_local); #endif - Create_file_log_event(const char* buf, uint event_len, + Create_file_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event); ~Create_file_log_event() { @@ -3853,7 +3887,7 @@ public: event needs to have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not written to the binlog (it's not used by Append_block_log_event::write()), so it can't be read in - the Append_block_log_event(const char* buf, int event_len) + the Append_block_log_event(const uchar *buf, int event_len) constructor. In other words, 'db' is used only for filtering by binlog-*-db rules. Create_file_log_event is different: it's 'db' (which is inherited from Load_log_event) is written to the binlog @@ -3872,7 +3906,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Append_block_log_event(const char* buf, uint event_len, + Append_block_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Append_block_log_event() {} @@ -3914,7 +3948,7 @@ public: bool enable_local); #endif - Delete_file_log_event(const char* buf, uint event_len, + Delete_file_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event); ~Delete_file_log_event() {} Log_event_type get_type_code() { return DELETE_FILE_EVENT;} @@ -3953,7 +3987,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Execute_load_log_event(const char* buf, uint event_len, + Execute_load_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Execute_load_log_event() {} @@ -3993,7 +4027,7 @@ public: int get_create_or_append() const; #endif /* HAVE_REPLICATION */ #endif - Begin_load_query_log_event(const char* buf, uint event_len, + Begin_load_query_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Begin_load_query_log_event() {} @@ -4051,7 +4085,7 @@ public: bool print(FILE* file, PRINT_EVENT_INFO* print_event_info, const char *local_fname); #endif - Execute_load_query_log_event(const char* buf, uint event_len, + Execute_load_query_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); ~Execute_load_query_log_event() {} @@ -4086,7 +4120,7 @@ public: Log_event's ctor, this way we can extract maximum information from the event's header (the unique ID for example). */ - Unknown_log_event(const char* buf, + Unknown_log_event(const uchar *buf, const Format_description_log_event *description_event): Log_event(buf, description_event), what(UNKNOWN) {} @@ -4118,7 +4152,7 @@ public: #ifndef MYSQL_CLIENT Annotate_rows_log_event(THD*, bool using_trans, bool direct); #endif - Annotate_rows_log_event(const char *buf, uint event_len, + Annotate_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event*); ~Annotate_rows_log_event(); @@ -4768,7 +4802,7 @@ public: Table_map_log_event(THD *thd, TABLE *tbl, ulong tid, bool is_transactional); #endif #ifdef HAVE_REPLICATION - Table_map_log_event(const char *buf, uint event_len, + Table_map_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif @@ -5091,11 +5125,11 @@ protected: this class, not create instances of this class. */ #ifdef MYSQL_SERVER - Rows_log_event(THD*, TABLE*, ulong table_id, + Rows_log_event(THD*, TABLE*, ulong table_id, MY_BITMAP const *cols, bool is_transactional, Log_event_type event_type); #endif - Rows_log_event(const char *row_data, uint event_len, + Rows_log_event(const uchar *row_data, uint event_len, const Format_description_log_event *description_event); void uncompress_buf(); @@ -5247,7 +5281,7 @@ private: DESCRIPTION The member function will do the actual execution needed to handle a row. - The row is located at m_curr_row. When the function returns, + The row is located at m_curr_row. When the function returns, m_curr_row_end should point at the next row (one byte after the end of the current row). @@ -5284,7 +5318,7 @@ public: bool is_transactional); #endif #ifdef HAVE_REPLICATION - Write_rows_log_event(const char *buf, uint event_len, + Write_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif #if defined(MYSQL_SERVER) @@ -5326,7 +5360,7 @@ public: virtual bool write(); #endif #ifdef HAVE_REPLICATION - Write_rows_compressed_log_event(const char *buf, uint event_len, + Write_rows_compressed_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif private: @@ -5366,7 +5400,7 @@ public: virtual ~Update_rows_log_event(); #ifdef HAVE_REPLICATION - Update_rows_log_event(const char *buf, uint event_len, + Update_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif @@ -5414,7 +5448,7 @@ public: virtual bool write(); #endif #ifdef HAVE_REPLICATION - Update_rows_compressed_log_event(const char *buf, uint event_len, + Update_rows_compressed_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif private: @@ -5456,7 +5490,7 @@ public: Delete_rows_log_event(THD*, TABLE*, ulong, bool is_transactional); #endif #ifdef HAVE_REPLICATION - Delete_rows_log_event(const char *buf, uint event_len, + Delete_rows_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif #ifdef MYSQL_SERVER @@ -5498,7 +5532,7 @@ public: virtual bool write(); #endif #ifdef HAVE_REPLICATION - Delete_rows_compressed_log_event(const char *buf, uint event_len, + Delete_rows_compressed_log_event(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif private: @@ -5592,7 +5626,7 @@ public: virtual bool write_data_body(); #endif - Incident_log_event(const char *buf, uint event_len, + Incident_log_event(const uchar *buf, uint event_len, const Format_description_log_event *descr_event); virtual ~Incident_log_event(); @@ -5655,7 +5689,7 @@ public: } #endif - Ignorable_log_event(const char *buf, + Ignorable_log_event(const uchar *buf, const Format_description_log_event *descr_event, const char *event_name); virtual ~Ignorable_log_event(); @@ -5708,7 +5742,7 @@ class Heartbeat_log_event: public Log_event { public: uint8 hb_flags; - Heartbeat_log_event(const char* buf, ulong event_len, + Heartbeat_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event); Log_event_type get_type_code() { return HEARTBEAT_LOG_EVENT; } bool is_valid() const @@ -5716,12 +5750,12 @@ public: return (log_ident != NULL && ident_len <= FN_REFLEN-1 && log_pos >= BIN_LOG_HEADER_SIZE); } - const char * get_log_ident() { return log_ident; } + const uchar * get_log_ident() { return log_ident; } uint get_ident_len() { return ident_len; } private: - const char* log_ident; uint ident_len; + const uchar *log_ident; }; inline int Log_event_writer::write(Log_event *ev) @@ -5742,9 +5776,10 @@ inline int Log_event_writer::write(Log_event *ev) bool slave_execute_deferred_events(THD *thd); #endif -bool event_that_should_be_ignored(const char *buf); -bool event_checksum_test(uchar *buf, ulong event_len, enum_binlog_checksum_alg alg); -enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len); +bool event_that_should_be_ignored(const uchar *buf); +bool event_checksum_test(uchar *buf, ulong event_len, + enum_binlog_checksum_alg alg); +enum enum_binlog_checksum_alg get_checksum_alg(const uchar *buf, ulong len); extern TYPELIB binlog_checksum_typelib; #ifdef WITH_WSREP enum Log_event_type wsrep_peak_event(rpl_group_info *rgi, ulonglong* event_size); @@ -5755,17 +5790,23 @@ enum Log_event_type wsrep_peak_event(rpl_group_info *rgi, ulonglong* event_size) */ -int binlog_buf_compress(const char *src, char *dst, uint32 len, uint32 *comlen); -int binlog_buf_uncompress(const char *src, char *dst, uint32 len, uint32 *newlen); +int binlog_buf_compress(const uchar *src, uchar *dst, uint32 len, + uint32 *comlen); +int binlog_buf_uncompress(const uchar *src, uchar *dst, uint32 len, + uint32 *newlen); uint32 binlog_get_compress_len(uint32 len); -uint32 binlog_get_uncompress_len(const char *buf); - -int query_event_uncompress(const Format_description_log_event *description_event, bool contain_checksum, - const char *src, ulong src_len, char* buf, ulong buf_size, bool* is_malloc, - char **dst, ulong *newlen); - -int row_log_event_uncompress(const Format_description_log_event *description_event, bool contain_checksum, - const char *src, ulong src_len, char* buf, ulong buf_size, bool* is_malloc, - char **dst, ulong *newlen); +uint32 binlog_get_uncompress_len(const uchar *buf); + +int query_event_uncompress(const Format_description_log_event *description_event, + bool contain_checksum, + const uchar *src, ulong src_len, uchar *buf, + ulong buf_size, bool* is_malloc, + uchar **dst, ulong *newlen); +int row_log_event_uncompress(const Format_description_log_event + *description_event, + bool contain_checksum, + const uchar *src, ulong src_len, + uchar* buf, ulong buf_size, bool *is_malloc, + uchar **dst, ulong *newlen); #endif /* _log_event_h */ diff --git a/sql/log_event_client.cc b/sql/log_event_client.cc index 9a8ee6b0239..621fd7ceaa1 100644 --- a/sql/log_event_client.cc +++ b/sql/log_event_client.cc @@ -1594,7 +1594,7 @@ bool Log_event::print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info, bool do_print_encoded) { - uchar *ptr= (uchar *)temp_buf; + uchar *ptr= temp_buf; uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET); DBUG_ENTER("Log_event::print_base64"); @@ -1609,31 +1609,31 @@ bool Log_event::print_base64(IO_CACHE* file, switch (ev_type) { case WRITE_ROWS_EVENT: ptr[EVENT_TYPE_OFFSET]= DELETE_ROWS_EVENT; - ev= new Delete_rows_log_event((const char*) ptr, tmp_size, + ev= new Delete_rows_log_event(ptr, tmp_size, glob_description_event); ev->change_to_flashback_event(print_event_info, ptr, ev_type); break; case WRITE_ROWS_EVENT_V1: ptr[EVENT_TYPE_OFFSET]= DELETE_ROWS_EVENT_V1; - ev= new Delete_rows_log_event((const char*) ptr, tmp_size, + ev= new Delete_rows_log_event(ptr, tmp_size, glob_description_event); ev->change_to_flashback_event(print_event_info, ptr, ev_type); break; case DELETE_ROWS_EVENT: ptr[EVENT_TYPE_OFFSET]= WRITE_ROWS_EVENT; - ev= new Write_rows_log_event((const char*) ptr, tmp_size, + ev= new Write_rows_log_event(ptr, tmp_size, glob_description_event); ev->change_to_flashback_event(print_event_info, ptr, ev_type); break; case DELETE_ROWS_EVENT_V1: ptr[EVENT_TYPE_OFFSET]= WRITE_ROWS_EVENT_V1; - ev= new Write_rows_log_event((const char*) ptr, tmp_size, + ev= new Write_rows_log_event(ptr, tmp_size, glob_description_event); ev->change_to_flashback_event(print_event_info, ptr, ev_type); break; case UPDATE_ROWS_EVENT: case UPDATE_ROWS_EVENT_V1: - ev= new Update_rows_log_event((const char*) ptr, tmp_size, + ev= new Update_rows_log_event(ptr, tmp_size, glob_description_event); ev->change_to_flashback_event(print_event_info, ptr, ev_type); break; @@ -1680,7 +1680,7 @@ bool Log_event::print_base64(IO_CACHE* file, case TABLE_MAP_EVENT: { Table_map_log_event *map; - map= new Table_map_log_event((const char*) ptr, size, + map= new Table_map_log_event(ptr, size, glob_description_event); #ifdef WHEN_FLASHBACK_REVIEW_READY if (need_flashback_review) @@ -1695,42 +1695,42 @@ bool Log_event::print_base64(IO_CACHE* file, case WRITE_ROWS_EVENT: case WRITE_ROWS_EVENT_V1: { - ev= new Write_rows_log_event((const char*) ptr, size, + ev= new Write_rows_log_event(ptr, size, glob_description_event); break; } case DELETE_ROWS_EVENT: case DELETE_ROWS_EVENT_V1: { - ev= new Delete_rows_log_event((const char*) ptr, size, + ev= new Delete_rows_log_event(ptr, size, glob_description_event); break; } case UPDATE_ROWS_EVENT: case UPDATE_ROWS_EVENT_V1: { - ev= new Update_rows_log_event((const char*) ptr, size, + ev= new Update_rows_log_event(ptr, size, glob_description_event); break; } case WRITE_ROWS_COMPRESSED_EVENT: case WRITE_ROWS_COMPRESSED_EVENT_V1: { - ev= new Write_rows_compressed_log_event((const char*) ptr, size, + ev= new Write_rows_compressed_log_event(ptr, size, glob_description_event); break; } case UPDATE_ROWS_COMPRESSED_EVENT: case UPDATE_ROWS_COMPRESSED_EVENT_V1: { - ev= new Update_rows_compressed_log_event((const char*) ptr, size, + ev= new Update_rows_compressed_log_event(ptr, size, glob_description_event); break; } case DELETE_ROWS_COMPRESSED_EVENT: case DELETE_ROWS_COMPRESSED_EVENT_V1: { - ev= new Delete_rows_compressed_log_event((const char*) ptr, size, + ev= new Delete_rows_compressed_log_event(ptr, size, glob_description_event); break; } @@ -1827,9 +1827,10 @@ bool Query_log_event::print_query_header(IO_CACHE* file, { if (print_header(file, print_event_info, FALSE) || my_b_printf(file, - "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n", + "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d" + "\txid=%lu\n", get_type_str(), (ulong) thread_id, (ulong) exec_time, - error_code)) + error_code, (ulong) xid)) goto err; } @@ -1961,7 +1962,7 @@ bool Query_log_event::print_query_header(IO_CACHE* file, { /* for mysql client */ if (my_b_printf(file, "/*!\\C %s */%s\n", - cs_info->csname, print_event_info->delimiter)) + cs_info->cs_name.str, print_event_info->delimiter)) goto err; } if (my_b_printf(file,"SET " @@ -2512,7 +2513,7 @@ bool User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) } else error= my_b_printf(&cache, ":=_%s %s COLLATE `%s`%s\n", - cs->csname, hex_str, cs->name, + cs->cs_name.str, hex_str, cs->coll_name.str, print_event_info->delimiter); my_free(hex_str); if (unlikely(error)) @@ -3113,7 +3114,8 @@ int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len, // Create new temp_buf ulong event_cur_len= uint4korr(temp_buf + EVENT_LEN_OFFSET); ulong event_new_len= event_cur_len + len_diff; - char* new_temp_buf= (char*) my_malloc(PSI_NOT_INSTRUMENTED, event_new_len, MYF(MY_WME)); + uchar* new_temp_buf= (uchar*) my_malloc(PSI_NOT_INSTRUMENTED, event_new_len, + MYF(MY_WME)); if (!new_temp_buf) { @@ -3124,7 +3126,7 @@ int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len, } // Rewrite temp_buf - char* ptr= new_temp_buf; + uchar *ptr= new_temp_buf; size_t cnt= 0; // Copy header and change event length @@ -3573,7 +3575,8 @@ void Table_map_log_event::print_columns(IO_CACHE *file, // Print column character set, except in text columns with binary collation if (cs != NULL && (is_enum_or_set_type(real_type) || cs->number != my_charset_bin.number)) - my_b_printf(file, " CHARSET %s COLLATE %s", cs->csname, cs->name); + my_b_printf(file, " CHARSET %s COLLATE %s", cs->cs_name.str, + cs->coll_name.str); if (i != m_colcnt - 1) my_b_printf(file, ",\n# "); } my_b_printf(file, ")"); @@ -3621,12 +3624,13 @@ bool Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) bool Write_rows_compressed_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) { - char *new_buf; + uchar *new_buf; ulong len; bool is_malloc = false; if(!row_log_event_uncompress(glob_description_event, checksum_alg == BINLOG_CHECKSUM_ALG_CRC32, - temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len)) + temp_buf, UINT_MAX32, NULL, 0, &is_malloc, + &new_buf, &len)) { free_temp_buf(); register_temp_buf(new_buf, true); @@ -3657,12 +3661,13 @@ bool Delete_rows_log_event::print(FILE *file, bool Delete_rows_compressed_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) { - char *new_buf; + uchar *new_buf; ulong len; bool is_malloc = false; if(!row_log_event_uncompress(glob_description_event, checksum_alg == BINLOG_CHECKSUM_ALG_CRC32, - temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len)) + temp_buf, UINT_MAX32, NULL, 0, &is_malloc, + &new_buf, &len)) { free_temp_buf(); register_temp_buf(new_buf, true); @@ -3693,12 +3698,13 @@ bool Update_rows_compressed_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info) { - char *new_buf; + uchar *new_buf; ulong len; bool is_malloc= false; if(!row_log_event_uncompress(glob_description_event, checksum_alg == BINLOG_CHECKSUM_ALG_CRC32, - temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len)) + temp_buf, UINT_MAX32, NULL, 0, &is_malloc, + &new_buf, &len)) { free_temp_buf(); register_temp_buf(new_buf, true); diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 10d1df4f3e7..4e6b9e3f1c8 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1175,7 +1175,7 @@ Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, #endif -Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len, +Old_rows_log_event::Old_rows_log_event(const uchar *buf, uint event_len, Log_event_type event_type, const Format_description_log_event *description_event) @@ -1198,8 +1198,8 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len, event_len, common_header_len, post_header_len)); - const char *post_start= buf + common_header_len; - DBUG_DUMP("post_header", (uchar*) post_start, post_header_len); + const uchar *post_start= buf + common_header_len; + DBUG_DUMP("post_header", post_start, post_header_len); post_start+= RW_MAPID_OFFSET; if (post_header_len == 6) { @@ -2417,7 +2417,7 @@ Write_rows_log_event_old::Write_rows_log_event_old(THD *thd_arg, Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Write_rows_log_event_old::Write_rows_log_event_old(const char *buf, +Write_rows_log_event_old::Write_rows_log_event_old(const uchar *buf, uint event_len, const Format_description_log_event *description_event) @@ -2530,12 +2530,13 @@ Delete_rows_log_event_old::Delete_rows_log_event_old(THD *thd_arg, Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Delete_rows_log_event_old::Delete_rows_log_event_old(const char *buf, - uint event_len, - const Format_description_log_event - *description_event) - : Old_rows_log_event(buf, event_len, PRE_GA_DELETE_ROWS_EVENT, - description_event), +Delete_rows_log_event_old:: +Delete_rows_log_event_old(const uchar *buf, + uint event_len, + const Format_description_log_event + *description_event) + :Old_rows_log_event(buf, event_len, PRE_GA_DELETE_ROWS_EVENT, + description_event), m_after_image(NULL), m_memory(NULL) { } @@ -2544,8 +2545,8 @@ Delete_rows_log_event_old::Delete_rows_log_event_old(const char *buf, #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int -Delete_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const) +int Delete_rows_log_event_old:: +do_before_row_operations(const Slave_reporting_capability *const) { if ((m_table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && m_table->s->primary_key < MAX_KEY) @@ -2636,7 +2637,7 @@ Update_rows_log_event_old::Update_rows_log_event_old(THD *thd_arg, Constructor used by slave to read the event from the binary log. */ #ifdef HAVE_REPLICATION -Update_rows_log_event_old::Update_rows_log_event_old(const char *buf, +Update_rows_log_event_old::Update_rows_log_event_old(const uchar *buf, uint event_len, const Format_description_log_event @@ -2652,12 +2653,14 @@ Update_rows_log_event_old::Update_rows_log_event_old(const char *buf, #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) int -Update_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const) +Update_rows_log_event_old:: +do_before_row_operations(const Slave_reporting_capability *const) { if (m_table->s->keys > 0) { // Allocate buffer for key searches - m_key= (uchar*)my_malloc(key_memory_log_event_old, m_table->key_info->key_length, MYF(MY_WME)); + m_key= (uchar*)my_malloc(key_memory_log_event_old, + m_table->key_info->key_length, MYF(MY_WME)); if (!m_key) return HA_ERR_OUT_OF_MEM; } @@ -2667,8 +2670,8 @@ Update_rows_log_event_old::do_before_row_operations(const Slave_reporting_capabi int -Update_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const, - int error) +Update_rows_log_event_old:: +do_after_row_operations(const Slave_reporting_capability *const, int error) { /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ m_table->file->ha_index_or_rnd_end(); diff --git a/sql/log_event_old.h b/sql/log_event_old.h index 3a11313a31f..e5aaacec209 100644 --- a/sql/log_event_old.h +++ b/sql/log_event_old.h @@ -161,7 +161,7 @@ protected: Old_rows_log_event(THD*, TABLE*, ulong table_id, MY_BITMAP const *cols, bool is_transactional); #endif - Old_rows_log_event(const char *row_data, uint event_len, + Old_rows_log_event(const uchar *row_data, uint event_len, Log_event_type event_type, const Format_description_log_event *description_event); @@ -363,7 +363,7 @@ public: MY_BITMAP const *cols, bool is_transactional); #endif #ifdef HAVE_REPLICATION - Write_rows_log_event_old(const char *buf, uint event_len, + Write_rows_log_event_old(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif #if !defined(MYSQL_CLIENT) @@ -436,7 +436,7 @@ public: #endif #ifdef HAVE_REPLICATION - Update_rows_log_event_old(const char *buf, uint event_len, + Update_rows_log_event_old(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif @@ -511,7 +511,7 @@ public: MY_BITMAP const *cols, bool is_transactional); #endif #ifdef HAVE_REPLICATION - Delete_rows_log_event_old(const char *buf, uint event_len, + Delete_rows_log_event_old(const uchar *buf, uint event_len, const Format_description_log_event *description_event); #endif #if !defined(MYSQL_CLIENT) diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index 56d1c978ac4..66a39cc9e66 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2019, Oracle and/or its affiliates. - Copyright (c) 2009, 2021, MariaDB + Copyright (c) 2009, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -518,6 +518,7 @@ int append_query_string(CHARSET_INFO *csinfo, String *to, const char *str, size_t len, bool no_backslash) { char *beg, *ptr; + my_bool overflow; uint32 const orig_len= to->length(); if (to->reserve(orig_len + len * 2 + 4)) return 1; @@ -531,7 +532,7 @@ int append_query_string(CHARSET_INFO *csinfo, String *to, *ptr++= '\''; if (!no_backslash) { - ptr+= escape_string_for_mysql(csinfo, ptr, 0, str, len); + ptr+= escape_string_for_mysql(csinfo, ptr, 0, str, len, &overflow); } else { @@ -644,7 +645,7 @@ Log_event::do_shall_skip(rpl_group_info *rgi) rli->replicate_same_server_id, rli->slave_skip_counter)); if ((server_id == global_system_variables.server_id && - !rli->replicate_same_server_id) || + !(rli->replicate_same_server_id || (flags & LOG_EVENT_ACCEPT_OWN_F))) || (rli->slave_skip_counter == 1 && rli->is_in_group()) || (flags & LOG_EVENT_SKIP_REPLICATION_F && opt_replicate_events_marked_for_skip != RPL_SKIP_REPLICATE)) @@ -661,7 +662,7 @@ Log_event::do_shall_skip(rpl_group_info *rgi) void Log_event::pack_info(Protocol *protocol) { - protocol->store("", &my_charset_bin); + protocol->store("", 0, &my_charset_bin); } @@ -676,7 +677,7 @@ int Log_event::net_send(Protocol *protocol, const char* log_name, my_off_t pos) log_name = p + 1; protocol->prepare_for_resend(); - protocol->store(log_name, &my_charset_bin); + protocol->store(log_name, strlen(log_name), &my_charset_bin); protocol->store((ulonglong) pos); event_type = get_type_str(); protocol->store(event_type, strlen(event_type), &my_charset_bin); @@ -1294,6 +1295,15 @@ bool Query_log_event::write() int3store(start, when_sec_part); start+= 3; } + + /* xid's is used with ddl_log handling */ + if (thd && thd->binlog_xid) + { + *start++= Q_XID; + int8store(start, thd->binlog_xid); + start+= 8; + } + /* NOTE: When adding new status vars, please don't forget to update the MAX_SIZE_LOG_EVENT_STATUS in log_event.h and update the function @@ -1331,14 +1341,14 @@ bool Query_log_event::write() bool Query_compressed_log_event::write() { - char *buffer; + uchar *buffer; uint32 alloc_size, compressed_size; bool ret= true; compressed_size= alloc_size= binlog_get_compress_len(q_len); - buffer= (char*) my_safe_alloca(alloc_size); + buffer= (uchar*) my_safe_alloca(alloc_size); if (buffer && - !binlog_buf_compress(query, buffer, q_len, &compressed_size)) + !binlog_buf_compress((uchar*) query, buffer, q_len, &compressed_size)) { /* Write the compressed event. We have to temporarily store the event @@ -1346,7 +1356,7 @@ bool Query_compressed_log_event::write() */ const char *query_tmp= query; uint32 q_len_tmp= q_len; - query= buffer; + query= (char*) buffer; q_len= compressed_size; ret= Query_log_event::write(); query= query_tmp; @@ -1901,8 +1911,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, thd->variables.sql_log_slow= !MY_TEST(global_system_variables.log_slow_disabled_statements & LOG_SLOW_DISABLE_SLAVE); } - mysql_parse(thd, thd->query(), thd->query_length(), &parser_state, - FALSE, FALSE); + mysql_parse(thd, thd->query(), thd->query_length(), &parser_state); /* Finalize server status flags after executing a statement. */ thd->update_server_status(); log_slow_statement(thd); @@ -2166,9 +2175,10 @@ Query_log_event::do_shall_skip(rpl_group_info *rgi) bool -Query_log_event::peek_is_commit_rollback(const char *event_start, +Query_log_event::peek_is_commit_rollback(const uchar *event_start, size_t event_len, - enum enum_binlog_checksum_alg checksum_alg) + enum enum_binlog_checksum_alg + checksum_alg) { if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32) { @@ -3257,10 +3267,13 @@ bool Binlog_checkpoint_log_event::write() Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg, uint32 domain_id_arg, bool standalone, uint16 flags_arg, bool is_transactional, - uint64 commit_id_arg) + uint64 commit_id_arg, bool has_xid, + bool ro_1pc) : Log_event(thd_arg, flags_arg, is_transactional), seq_no(seq_no_arg), commit_id(commit_id_arg), domain_id(domain_id_arg), - flags2((standalone ? FL_STANDALONE : 0) | (commit_id_arg ? FL_GROUP_COMMIT_ID : 0)) + flags2((standalone ? FL_STANDALONE : 0) | + (commit_id_arg ? FL_GROUP_COMMIT_ID : 0)), + flags_extra(0), extra_engines(0) { cache_type= Log_event::EVENT_NO_CACHE; bool is_tmp_table= thd_arg->lex->stmt_accessed_temp_table(); @@ -3283,16 +3296,41 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg, flags2|= (thd_arg->rgi_slave->gtid_ev_flags2 & (FL_DDL|FL_WAITED)); XID_STATE &xid_state= thd->transaction->xid_state; - if (is_transactional && xid_state.is_explicit_XA() && - (thd->lex->sql_command == SQLCOM_XA_PREPARE || - xid_state.get_state_code() == XA_PREPARED)) + if (is_transactional) { - DBUG_ASSERT(!(thd->lex->sql_command == SQLCOM_XA_COMMIT && - thd->lex->xa_opt == XA_ONE_PHASE)); + if (xid_state.is_explicit_XA() && + (thd->lex->sql_command == SQLCOM_XA_PREPARE || + xid_state.get_state_code() == XA_PREPARED)) + { + DBUG_ASSERT(!(thd->lex->sql_command == SQLCOM_XA_COMMIT && + thd->lex->xa_opt == XA_ONE_PHASE)); + + flags2|= thd->lex->sql_command == SQLCOM_XA_PREPARE ? + FL_PREPARED_XA : FL_COMPLETED_XA; + xid.set(xid_state.get_xid()); + } + /* count non-zero extra recoverable engines; total = extra + 1 */ + if (has_xid) + { + DBUG_ASSERT(ha_count_rw_2pc(thd_arg, + thd_arg->in_multi_stmt_transaction_mode())); + + extra_engines= + ha_count_rw_2pc(thd_arg, thd_arg->in_multi_stmt_transaction_mode()) - 1; + } + else if (ro_1pc) + { + extra_engines= UCHAR_MAX; + } + else if (thd->lex->sql_command == SQLCOM_XA_PREPARE) + { + DBUG_ASSERT(thd_arg->in_multi_stmt_transaction_mode()); - flags2|= thd->lex->sql_command == SQLCOM_XA_PREPARE ? - FL_PREPARED_XA : FL_COMPLETED_XA; - xid.set(xid_state.get_xid()); + uint8 count= ha_count_rw_2pc(thd_arg, true); + extra_engines= count > 1 ? 0 : UCHAR_MAX; + } + if (extra_engines > 0) + flags_extra|= FL_EXTRA_MULTI_ENGINE; } } @@ -3302,12 +3340,12 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg, fully contruct every Gtid_log_event() needlessly. */ bool -Gtid_log_event::peek(const char *event_start, size_t event_len, +Gtid_log_event::peek(const uchar *event_start, size_t event_len, enum enum_binlog_checksum_alg checksum_alg, uint32 *domain_id, uint32 *server_id, uint64 *seq_no, uchar *flags2, const Format_description_log_event *fdev) { - const char *p; + const uchar *p; if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32) { @@ -3328,7 +3366,7 @@ Gtid_log_event::peek(const char *event_start, size_t event_len, p+= 8; *domain_id= uint4korr(p); p+= 4; - *flags2= (uchar)*p; + *flags2= *p; return false; } @@ -3336,19 +3374,19 @@ Gtid_log_event::peek(const char *event_start, size_t event_len, bool Gtid_log_event::write() { - uchar buf[GTID_HEADER_LEN+2+sizeof(XID)]; - size_t write_len; + uchar buf[GTID_HEADER_LEN+2+sizeof(XID) + /* flags_extra: */ 1+4]; + size_t write_len= 13; int8store(buf, seq_no); int4store(buf+8, domain_id); buf[12]= flags2; if (flags2 & FL_GROUP_COMMIT_ID) { - int8store(buf+13, commit_id); + DBUG_ASSERT(write_len + 8 == GTID_HEADER_LEN + 2); + + int8store(buf+write_len, commit_id); write_len= GTID_HEADER_LEN + 2; } - else - write_len= 13; if (flags2 & (FL_PREPARED_XA | FL_COMPLETED_XA)) { @@ -3360,6 +3398,16 @@ Gtid_log_event::write() memcpy(buf+write_len, xid.data, data_length); write_len+= data_length; } + if (flags_extra > 0) + { + buf[write_len]= flags_extra; + write_len++; + } + if (flags_extra & FL_EXTRA_MULTI_ENGINE) + { + buf[write_len]= extra_engines; + write_len++; + } if (write_len < GTID_HEADER_LEN) { @@ -4140,9 +4188,9 @@ static bool user_var_append_name_part(THD *thd, String *buf, const char *name, size_t name_len) { - return buf->append("@") || + return buf->append('@') || append_identifier(thd, buf, name, name_len) || - buf->append("="); + buf->append('='); } void User_var_log_event::pack_info(Protocol* protocol) @@ -4153,7 +4201,7 @@ void User_var_log_event::pack_info(Protocol* protocol) String buf(buf_mem, sizeof(buf_mem), system_charset_info); buf.length(0); if (user_var_append_name_part(protocol->thd, &buf, name, name_len) || - buf.append("NULL")) + buf.append(NULL_clex_str)) return; protocol->store(buf.ptr(), buf.length(), &my_charset_bin); } @@ -4198,9 +4246,10 @@ void User_var_log_event::pack_info(Protocol* protocol) buf.length(0); my_decimal((const uchar *) (val + 2), val[0], val[1]).to_string(&str); if (user_var_append_name_part(protocol->thd, &buf, name, name_len) || - buf.append(buf2)) + buf.append(str)) return; protocol->store(buf.ptr(), buf.length(), &my_charset_bin); + break; } case STRING_RESULT: @@ -4212,7 +4261,7 @@ void User_var_log_event::pack_info(Protocol* protocol) buf.length(0); if (!(cs= get_charset(charset_number, MYF(0)))) { - if (buf.append("???")) + if (buf.append(STRING_WITH_LEN("???"))) return; } else @@ -4220,9 +4269,9 @@ void User_var_log_event::pack_info(Protocol* protocol) size_t old_len; char *beg, *end; if (user_var_append_name_part(protocol->thd, &buf, name, name_len) || - buf.append("_") || - buf.append(cs->csname) || - buf.append(" ")) + buf.append('_') || + buf.append(cs->cs_name) || + buf.append(' ')) return; old_len= buf.length(); if (buf.reserve(old_len + val_len * 2 + 3 + sizeof(" COLLATE ") + @@ -4231,8 +4280,8 @@ void User_var_log_event::pack_info(Protocol* protocol) beg= const_cast<char *>(buf.ptr()) + old_len; end= str_to_hex(beg, val, val_len); buf.length(old_len + (end - beg)); - if (buf.append(" COLLATE ") || - buf.append(cs->name)) + if (buf.append(STRING_WITH_LEN(" COLLATE ")) || + buf.append(cs->coll_name)) return; } protocol->store(buf.ptr(), buf.length(), &my_charset_bin); @@ -5036,7 +5085,7 @@ void Execute_load_query_log_event::pack_info(Protocol *protocol) } if (query && q_len && buf.append(query, q_len)) return; - if (buf.append(" ;file_id=") || + if (buf.append(STRING_WITH_LEN(" ;file_id=")) || buf.append_ulonglong(file_id)) return; protocol->store(buf.ptr(), buf.length(), &my_charset_bin); @@ -6009,14 +6058,15 @@ bool Rows_log_event::write_data_body() bool Rows_log_event::write_compressed() { - uchar *m_rows_buf_tmp = m_rows_buf; - uchar *m_rows_cur_tmp = m_rows_cur; - bool ret = true; + uchar *m_rows_buf_tmp= m_rows_buf; + uchar *m_rows_cur_tmp= m_rows_cur; + bool ret= true; uint32 comlen, alloc_size; - comlen= alloc_size= binlog_get_compress_len((uint32)(m_rows_cur_tmp - m_rows_buf_tmp)); - m_rows_buf = (uchar *)my_safe_alloca(alloc_size); + comlen= alloc_size= binlog_get_compress_len((uint32)(m_rows_cur_tmp - + m_rows_buf_tmp)); + m_rows_buf= (uchar*) my_safe_alloca(alloc_size); if(m_rows_buf && - !binlog_buf_compress((const char *)m_rows_buf_tmp, (char *)m_rows_buf, + !binlog_buf_compress(m_rows_buf_tmp, m_rows_buf, (uint32)(m_rows_cur_tmp - m_rows_buf_tmp), &comlen)) { m_rows_cur= comlen + m_rows_buf; @@ -7499,13 +7549,21 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi) { DBUG_ASSERT(m_table != NULL); const char *tmp= thd->get_proc_info(); - const char *message= "Write_rows_log_event::write_row()"; + LEX_CSTRING tmp_db= thd->db; + char *message, msg[128]; + const char *table_name= m_table->s->table_name.str; + char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); + my_snprintf(msg, sizeof(msg),"Write_rows_log_event::write_row() on table %c%s%c", + quote_char, table_name, quote_char); + thd->reset_db(&m_table->s->db); + message= msg; int error; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Write_rows_log_event::write_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Write_rows_log_event::write_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -7519,6 +7577,7 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi) my_error(ER_UNKNOWN_ERROR, MYF(0)); } + thd->reset_db(&tmp_db); return error; } @@ -8115,14 +8174,22 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) { int error; const char *tmp= thd->get_proc_info(); - const char *message= "Delete_rows_log_event::find_row()"; + LEX_CSTRING tmp_db= thd->db; + char *message, msg[128]; + const char *table_name= m_table->s->table_name.str; + char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); + my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::find_row() on table %c%s%c", + quote_char, table_name, quote_char); + thd->reset_db(&m_table->s->db); + message= msg; const bool invoke_triggers= (m_table->triggers && do_invoke_trigger()); DBUG_ASSERT(m_table != NULL); #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::find_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Delete_rows_log_event::find_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8132,11 +8199,14 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) /* Delete the record found, located in record[0] */ - message= "Delete_rows_log_event::ha_delete_row()"; + my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::ha_delete_row() on table %c%s%c", + quote_char, table_name, quote_char); + message= msg; #ifdef WSREP_PROC_INFO snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Delete_rows_log_event::ha_delete_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Delete_rows_log_event::ha_delete_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + quote_char); message= thd->wsrep_info; #endif thd_proc_info(thd, message); @@ -8167,6 +8237,7 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi) error= HA_ERR_GENERIC; // in case if error is not set yet m_table->file->ha_index_or_rnd_end(); } + thd->reset_db(&tmp_db); thd_proc_info(thd, tmp); return error; } @@ -8266,13 +8337,21 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) { const bool invoke_triggers= (m_table->triggers && do_invoke_trigger()); const char *tmp= thd->get_proc_info(); - const char *message= "Update_rows_log_event::find_row()"; DBUG_ASSERT(m_table != NULL); + LEX_CSTRING tmp_db= thd->db; + char *message, msg[128]; + const char *table_name= m_table->s->table_name.str; + char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name)); + my_snprintf(msg, sizeof(msg),"Update_rows_log_event::find_row() on table %c%s%c", + quote_char, table_name, quote_char); + thd->reset_db(&m_table->s->db); + message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::find_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Update_rows_log_event::find_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8293,6 +8372,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) if ((m_curr_row= m_curr_row_end)) unpack_current_row(rgi, &m_cols_ai); thd_proc_info(thd, tmp); + thd->reset_db(&tmp_db); return error; } @@ -8310,11 +8390,14 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) store_record(m_table,record[1]); m_curr_row= m_curr_row_end; - message= "Update_rows_log_event::unpack_current_row()"; + my_snprintf(msg, sizeof(msg),"Update_rows_log_event::unpack_current_row() on table %c%s%c", + quote_char, table_name, quote_char); + message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::unpack_current_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Update_rows_log_event::unpack_current_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, + quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8337,11 +8420,13 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength); #endif - message= "Update_rows_log_event::ha_update_row()"; + my_snprintf(msg, sizeof(msg),"Update_rows_log_event::ha_update_row() on table %c%s%c", + quote_char, table_name, quote_char); + message= msg; #ifdef WSREP_PROC_INFO my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1, - "Update_rows_log_event::ha_update_row(%lld)", - (long long) wsrep_thd_trx_seqno(thd)); + "Update_rows_log_event::ha_update_row(%lld) on table %c%s%c", + (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, quote_char); message= thd->wsrep_info; #endif /* WSREP_PROC_INFO */ @@ -8370,9 +8455,10 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE))) error= HA_ERR_GENERIC; // in case if error is not set yet - thd_proc_info(thd, tmp); err: + thd_proc_info(thd, tmp); + thd->reset_db(&tmp_db); m_table->file->ha_index_or_rnd_end(); return error; } @@ -8414,9 +8500,8 @@ Log_event* wsrep_read_log_event( char **arg_buf, size_t *arg_buf_len, const Format_description_log_event *description_event) { - char *head= (*arg_buf); + uchar *head= (uchar*) (*arg_buf); uint data_len = uint4korr(head + EVENT_LEN_OFFSET); - char *buf= (*arg_buf); const char *error= 0; Log_event *res= 0; DBUG_ENTER("wsrep_read_log_event"); @@ -8427,15 +8512,16 @@ Log_event* wsrep_read_log_event( goto err; } - res= Log_event::read_log_event(buf, data_len, &error, description_event, false); + res= Log_event::read_log_event(head, data_len, &error, description_event, + false); err: if (!res) { DBUG_ASSERT(error != 0); sql_print_error("Error in Log_event::read_log_event(): " - "'%s', data_len: %d, event_type: %d", - error,data_len,(uchar)head[EVENT_TYPE_OFFSET]); + "'%s', data_len: %u, event_type: %d", + error, data_len, (int) head[EVENT_TYPE_OFFSET]); } (*arg_buf)+= data_len; (*arg_buf_len)-= data_len; @@ -8445,8 +8531,7 @@ err: #if defined(HAVE_REPLICATION) -int -Incident_log_event::do_apply_event(rpl_group_info *rgi) +int Incident_log_event::do_apply_event(rpl_group_info *rgi) { Relay_log_info const *rli= rgi->rli; DBUG_ENTER("Incident_log_event::do_apply_event"); @@ -8499,7 +8584,7 @@ void Ignorable_log_event::pack_info(Protocol *protocol) #if defined(HAVE_REPLICATION) -Heartbeat_log_event::Heartbeat_log_event(const char* buf, ulong event_len, +Heartbeat_log_event::Heartbeat_log_event(const uchar *buf, uint event_len, const Format_description_log_event* description_event) :Log_event(buf, description_event) { @@ -8529,9 +8614,9 @@ Heartbeat_log_event::Heartbeat_log_event(const char* buf, ulong event_len, 1 Don't write event */ -bool event_that_should_be_ignored(const char *buf) +bool event_that_should_be_ignored(const uchar *buf) { - uint event_type= (uchar)buf[EVENT_TYPE_OFFSET]; + uint event_type= buf[EVENT_TYPE_OFFSET]; if (event_type == GTID_LOG_EVENT || event_type == ANONYMOUS_GTID_LOG_EVENT || event_type == PREVIOUS_GTIDS_LOG_EVENT || diff --git a/sql/main.cc b/sql/main.cc index 957efb8fa2e..357c72b815d 100644 --- a/sql/main.cc +++ b/sql/main.cc @@ -17,10 +17,20 @@ /* main() for mysqld. Calls mysqld_main() entry point exported by sql library. + On Windows, might do some service handling. */ +#ifdef _WIN32 +/* Windows main function, service handling, calls mysqld_main */ +extern int mysqld_win_main(int argc, char **argv); +#else extern int mysqld_main(int argc, char **argv); +#endif int main(int argc, char **argv) { +#ifdef _WIN32 + return mysqld_win_main(argc, argv); +#else return mysqld_main(argc, argv); +#endif } diff --git a/sql/mdl.cc b/sql/mdl.cc index 863f063a774..f1c9c6e0444 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -1625,7 +1625,7 @@ MDL_lock::MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END]= TD | + + + + + - - + + + + + + + | SD | + + + + - - - + + + + + + + | DDL | + + + - - - - + + + + - + + | - BLOCK_DDL | + + + + + + + + + + - + + + | + BLOCK_DDL | - + + + + + + + + + - + + + | ALTER_COP | + + + + + - - + + + + + + + | COMMIT | + + + + - + - + + + + + + + | @@ -1664,7 +1664,7 @@ const MDL_lock::bitmap_t MDL_lock::MDL_backup_lock::m_granted_incompatible[MDL_BACKUP_END]= { /* MDL_BACKUP_START */ - MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT), + MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_BLOCK_DDL), MDL_BIT(MDL_BACKUP_START), MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML), MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_DDL), @@ -1680,7 +1680,7 @@ MDL_lock::MDL_backup_lock::m_granted_incompatible[MDL_BACKUP_END]= /* MDL_BACKUP_DDL */ MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL), /* MDL_BACKUP_BLOCK_DDL */ - MDL_BIT(MDL_BACKUP_DDL), + MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_BLOCK_DDL) | MDL_BIT(MDL_BACKUP_DDL), MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2), /* MDL_BACKUP_COMMIT */ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2) @@ -1691,7 +1691,7 @@ const MDL_lock::bitmap_t MDL_lock::MDL_backup_lock::m_waiting_incompatible[MDL_BACKUP_END]= { /* MDL_BACKUP_START */ - MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT), + MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_BLOCK_DDL), 0, 0, 0, @@ -1707,7 +1707,7 @@ MDL_lock::MDL_backup_lock::m_waiting_incompatible[MDL_BACKUP_END]= /* MDL_BACKUP_DDL */ MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL), /* MDL_BACKUP_BLOCK_DDL */ - 0, + MDL_BIT(MDL_BACKUP_START), MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2), /* MDL_BACKUP_COMMIT */ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2) @@ -2974,6 +2974,10 @@ void MDL_context::release_all_locks_for_name(MDL_ticket *name) void MDL_ticket::downgrade_lock(enum_mdl_type type) { + DBUG_ENTER("MDL_ticket::downgrade_lock"); + DBUG_PRINT("enter",("old_type: %s new_type: %s", + get_type_name()->str, + get_type_name(type)->str)); /* Do nothing if already downgraded. Used when we FLUSH TABLE under LOCK TABLES and a table is listed twice in LOCK TABLES list. @@ -2982,7 +2986,10 @@ void MDL_ticket::downgrade_lock(enum_mdl_type type) here that target lock is weaker than existing lock. */ if (m_type == type || !has_stronger_or_equal_type(type)) - return; + { + DBUG_PRINT("info", ("Nothing to downgrade")); + DBUG_VOID_RETURN; + } /* Only allow downgrade in some specific known cases */ DBUG_ASSERT((get_key()->mdl_namespace() != MDL_key::BACKUP && @@ -2990,6 +2997,7 @@ void MDL_ticket::downgrade_lock(enum_mdl_type type) m_type == MDL_SHARED_NO_WRITE)) || (get_key()->mdl_namespace() == MDL_key::BACKUP && (m_type == MDL_BACKUP_DDL || + m_type == MDL_BACKUP_BLOCK_DDL || m_type == MDL_BACKUP_WAIT_FLUSH))); mysql_prlock_wrlock(&m_lock->m_rwlock); @@ -3002,6 +3010,7 @@ void MDL_ticket::downgrade_lock(enum_mdl_type type) m_lock->m_granted.add_ticket(this); m_lock->reschedule_waiters(); mysql_prlock_unlock(&m_lock->m_rwlock); + DBUG_VOID_RETURN; } diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index 877a49edbec..a8087ed5fc5 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -87,7 +87,7 @@ int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t) } /* extern "C" */ -#elif defined(__WIN__) +#elif defined(_WIN32) // Remove linker warning 4221 about empty file namespace { char dummy; }; diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index c7c3079f28f..2701dac56c4 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -179,6 +179,8 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, { /* Can't scan one range => can't do MRR scan at all */ total_rows= HA_POS_ERROR; + if (thd->is_error()) + DBUG_RETURN(HA_POS_ERROR); break; } if (pages.first_page == UNUSED_PAGE_NO) diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index edf3e82de40..54b038ccb2d 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -115,7 +115,7 @@ int my_decimal::to_string_native(String *str, uint fixed_prec, uint fixed_dec, ? (fixed_prec + ((fixed_prec == fixed_dec) ? 1 : 0) + 1) : my_decimal_string_length(this)); int result; - if (str->alloc(length)) + if (str->alloc(length+1)) // Alloc also space for \0 return check_result(mask, E_DEC_OOM); result= decimal2string(this, (char*) str->ptr(), &length, (int)fixed_prec, fixed_dec, @@ -198,7 +198,8 @@ str_set_decimal(uint mask, const my_decimal *val, E_DEC_OVERFLOW */ -int my_decimal::to_binary(uchar *bin, int prec, int scale, uint mask) const +int my_decimal::to_binary(uchar *bin, int prec, decimal_digits_t scale, + uint mask) const { int err1= E_DEC_OK, err2; my_decimal rounded; @@ -329,7 +330,7 @@ my_decimal *date2my_decimal(const MYSQL_TIME *ltime, my_decimal *dec) } -void my_decimal_trim(ulonglong *precision, uint *scale) +void my_decimal_trim(ulonglong *precision, decimal_digits_t *scale) { if (!(*precision) && !(*scale)) { diff --git a/sql/my_decimal.h b/sql/my_decimal.h index 4c1f41463d5..a0e3be2fbd9 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -50,7 +50,8 @@ typedef struct st_mysql_time MYSQL_TIME; #define DECIMAL_MAX_FIELD_SIZE DECIMAL_MAX_PRECISION -inline uint my_decimal_size(uint precision, uint scale) +inline uint my_decimal_size(decimal_digits_t precision, + decimal_digits_t scale) { /* Always allocate more space to allow library to put decimal point @@ -60,9 +61,12 @@ inline uint my_decimal_size(uint precision, uint scale) } -inline int my_decimal_int_part(uint precision, uint decimals) +inline decimal_digits_t my_decimal_int_part(decimal_digits_t precision, + decimal_digits_t decimals) { - return precision - ((decimals == DECIMAL_NOT_SPECIFIED) ? 0 : decimals); + return (decimal_digits_t) (precision - + ((decimals == DECIMAL_NOT_SPECIFIED) ? 0 : + decimals)); } @@ -147,7 +151,7 @@ public: { init(); } - my_decimal(const uchar *bin, int prec, int scale) + my_decimal(const uchar *bin, decimal_digits_t prec, decimal_digits_t scale) { init(); check_result(E_DEC_FATAL_ERROR, bin2decimal(bin, this, prec, scale)); @@ -168,7 +172,7 @@ public: bool sign() const { return decimal_t::sign; } void sign(bool s) { decimal_t::sign= s; } - uint precision() const { return intg + frac; } + decimal_digits_t precision() const { return (decimal_digits_t) (intg + frac); } void set_zero() { /* @@ -217,17 +221,19 @@ public: { return to_string(to, 0, 0, 0); } - String *to_string_round(String *to, int scale, my_decimal *round_buff) const + String *to_string_round(String *to, decimal_digits_t scale, + my_decimal *round_buff) const { (void) round_to(round_buff, scale, HALF_UP); // QQ: check result? return round_buff->to_string(to); } + /* Scale can be negative here when called from truncate() */ int round_to(my_decimal *to, int scale, decimal_round_mode mode, int mask= E_DEC_FATAL_ERROR) const { return check_result(mask, decimal_round(this, to, scale, mode)); } - int to_binary(uchar *bin, int prec, int scale, + int to_binary(uchar *bin, int prec, decimal_digits_t scale, uint mask= E_DEC_FATAL_ERROR) const; #endif /** Swap two my_decimal values */ @@ -253,7 +259,8 @@ bool str_set_decimal(uint mask, const my_decimal *val, uint fixed_prec, extern my_decimal decimal_zero; inline -void max_my_decimal(my_decimal *to, int precision, int frac) +void max_my_decimal(my_decimal *to, decimal_digits_t precision, + decimal_digits_t frac) { DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION)&& (frac <= DECIMAL_MAX_SCALE)); @@ -277,30 +284,34 @@ inline int check_result_and_overflow(uint mask, int result, my_decimal *val) return result; } -inline uint my_decimal_length_to_precision(uint length, uint scale, - bool unsigned_flag) +inline decimal_digits_t my_decimal_length_to_precision(decimal_digits_t length, + decimal_digits_t scale, + bool unsigned_flag) { /* Precision can't be negative thus ignore unsigned_flag when length is 0. */ DBUG_ASSERT(length || !scale); - return (uint) (length - (scale>0 ? 1:0) - - (unsigned_flag || !length ? 0:1)); + return (decimal_digits_t) (length - (scale>0 ? 1:0) - + (unsigned_flag || !length ? 0:1)); } -inline uint32 my_decimal_precision_to_length_no_truncation(uint precision, - uint8 scale, - bool unsigned_flag) +inline decimal_digits_t +my_decimal_precision_to_length_no_truncation(decimal_digits_t precision, + decimal_digits_t scale, + bool unsigned_flag) { /* When precision is 0 it means that original length was also 0. Thus unsigned_flag is ignored in this case. */ DBUG_ASSERT(precision || !scale); - return (uint32)(precision + (scale > 0 ? 1 : 0) + - (unsigned_flag || !precision ? 0 : 1)); + return (decimal_digits_t)(precision + (scale > 0 ? 1 : 0) + + (unsigned_flag || !precision ? 0 : 1)); } -inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale, - bool unsigned_flag) +inline decimal_digits_t +my_decimal_precision_to_length(decimal_digits_t precision, + decimal_digits_t scale, + bool unsigned_flag) { /* When precision is 0 it means that original length was also 0. Thus @@ -313,7 +324,7 @@ inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale, } inline -int my_decimal_string_length(const my_decimal *d) +uint my_decimal_string_length(const my_decimal *d) { /* length of string representation including terminating '\0' */ return decimal_string_size(d); @@ -321,7 +332,7 @@ int my_decimal_string_length(const my_decimal *d) inline -int my_decimal_max_length(const my_decimal *d) +uint my_decimal_max_length(const my_decimal *d) { /* -1 because we do not count \0 */ return decimal_string_size(d) - 1; @@ -329,9 +340,10 @@ int my_decimal_max_length(const my_decimal *d) inline -int my_decimal_get_binary_size(uint precision, uint scale) +uint my_decimal_get_binary_size(decimal_digits_t precision, + decimal_digits_t scale) { - return decimal_bin_size((int)precision, (int)scale); + return decimal_bin_size(precision, scale); } @@ -343,8 +355,8 @@ void my_decimal2decimal(const my_decimal *from, my_decimal *to) inline -int binary2my_decimal(uint mask, const uchar *bin, my_decimal *d, int prec, - int scale) +int binary2my_decimal(uint mask, const uchar *bin, my_decimal *d, + decimal_digits_t prec, decimal_digits_t scale) { return check_result(mask, bin2decimal(bin, d, prec, scale)); } @@ -531,7 +543,7 @@ int my_decimal_intg(const my_decimal *a) } -void my_decimal_trim(ulonglong *precision, uint *scale); +void my_decimal_trim(ulonglong *precision, decimal_digits_t *scale); #endif /*my_decimal_h*/ diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc index 0397f87dd77..9470ba57855 100644 --- a/sql/my_json_writer.cc +++ b/sql/my_json_writer.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2014, 2020, MariaDB Corporation. +/* Copyright (C) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -53,7 +53,7 @@ void Json_writer::start_object() if (!element_started) start_element(); - output.append("{"); + output.append('{'); indent_level+=INDENT_SIZE; first_child=true; element_started= false; @@ -80,7 +80,7 @@ void Json_writer::start_array() if (!element_started) start_element(); - output.append("["); + output.append('['); indent_level+=INDENT_SIZE; first_child=true; element_started= false; @@ -100,7 +100,7 @@ void Json_writer::end_object() if (!first_child) append_indent(); first_child= false; - output.append("}"); + output.append('}'); } @@ -116,7 +116,7 @@ void Json_writer::end_array() indent_level-=INDENT_SIZE; if (!first_child) append_indent(); - output.append("]"); + output.append(']'); } @@ -136,7 +136,7 @@ Json_writer& Json_writer::add_member(const char *name, size_t len) output.append('"'); output.append(name, len); - output.append("\": ", 3); + output.append(STRING_WITH_LEN("\": ")); } #if !defined(NDEBUG) || defined(JSON_WRITER_UNIT_TEST) if (!fmt_helper.is_making_writer_calls()) @@ -414,13 +414,13 @@ void Single_line_formatting_helper::flush_on_one_line() { owner->output.append('"'); owner->output.append(str); - owner->output.append("\": "); + owner->output.append(STRING_WITH_LEN("\": ")); owner->output.append('['); } else { if (nr != 1) - owner->output.append(", "); + owner->output.append(STRING_WITH_LEN(", ")); owner->output.append('"'); owner->output.append(str); owner->output.append('"'); diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h index bf654db5759..089abd0ad48 100644 --- a/sql/my_json_writer.h +++ b/sql/my_json_writer.h @@ -27,6 +27,7 @@ #ifdef JSON_WRITER_UNIT_TEST #include "sql_string.h" +constexpr uint FAKE_SELECT_LEX_ID= UINT_MAX; // Also, mock objects are defined in my_json_writer-t.cc #define VALIDITY_ASSERT(x) if (!(x)) this->invalid_json= true; #else @@ -575,7 +576,7 @@ public: if (my_writer) { add_member("select_id"); - if (unlikely(select_number >= INT_MAX)) + if (unlikely(select_number == FAKE_SELECT_LEX_ID)) context.add_str("fake"); else context.add_ll(static_cast<longlong>(select_number)); diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc index 35e24a521e4..1e79e6444ff 100644 --- a/sql/mysql_install_db.cc +++ b/sql/mysql_install_db.cc @@ -26,9 +26,13 @@ #include <shellapi.h> #include <accctrl.h> #include <aclapi.h> +#include <ntsecapi.h> +#include <sddl.h> struct IUnknown; #include <shlwapi.h> +#include <string> + #define USAGETEXT \ "mysql_install_db.exe Ver 1.00 for Windows\n" \ "Copyright (C) 2010-2011 Monty Program Ab & Vladislav Vaintroub\n" \ @@ -39,9 +43,8 @@ struct IUnknown; extern "C" const char* mysql_bootstrap_sql[]; -static char default_os_user[]= "NT AUTHORITY\\NetworkService"; static char default_datadir[MAX_PATH]; -static int create_db_instance(); +static int create_db_instance(const char *datadir); static uint opt_silent; static char datadir_buffer[FN_REFLEN]; static char mysqld_path[FN_REFLEN]; @@ -51,13 +54,13 @@ static char *opt_password; static int opt_port; static int opt_innodb_page_size; static char *opt_socket; -static char *opt_os_user; -static char *opt_os_password; static my_bool opt_default_user; static my_bool opt_allow_remote_root_access; static my_bool opt_skip_networking; static my_bool opt_verbose_bootstrap; static my_bool verbose_errors; +static my_bool opt_large_pages; +static char *opt_config; #define DEFAULT_INNODB_PAGE_SIZE 16*1024 @@ -73,14 +76,14 @@ static struct my_option my_long_options[]= &opt_password, &opt_password, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "mysql port", &opt_port, &opt_port, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"socket", 'W', + {"socket", 'W', "named pipe name (if missing, it will be set the same as service)", &opt_socket, &opt_socket, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-user", 'D', "Create default user", &opt_default_user, &opt_default_user, 0 , GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"allow-remote-root-access", 'R', + {"allow-remote-root-access", 'R', "Allows remote access from network for user root", - &opt_allow_remote_root_access, &opt_allow_remote_root_access, 0 , GET_BOOL, + &opt_allow_remote_root_access, &opt_allow_remote_root_access, 0 , GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"skip-networking", 'N', "Do not use TCP connections, use pipe instead", &opt_skip_networking, &opt_skip_networking, 0 , GET_BOOL, OPT_ARG, 0, 0, 0, 0, @@ -91,6 +94,10 @@ static struct my_option my_long_options[]= &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose-bootstrap", 'o', "Include mysqld bootstrap output",&opt_verbose_bootstrap, &opt_verbose_bootstrap, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + { "large-pages",'l', "Use large pages", &opt_large_pages, + &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"config",'c', "my.ini config template file", &opt_config, + &opt_config, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -120,22 +127,13 @@ ATTRIBUTE_NORETURN static void die(const char *fmt, ...) fprintf(stderr, "FATAL ERROR: "); vfprintf(stderr, fmt, args); fputc('\n', stderr); - if (verbose_errors) - { - fprintf(stderr, - "https://mariadb.com/kb/en/installation-issues-on-windows contains some help\n" - "for solving the most common problems. If this doesn't help you, please\n" - "leave a comment in the Knowledge Base or file a bug report at\n" - "https://jira.mariadb.org"); - } - fflush(stderr); va_end(args); my_end(0); exit(1); } -static void verbose(const char *fmt, ...) +static void verbose( const char *fmt, ...) { va_list args; @@ -150,15 +148,16 @@ static void verbose(const char *fmt, ...) va_end(args); } +static char full_config_path[MAX_PATH]; int main(int argc, char **argv) { int error; - char self_name[FN_REFLEN]; + char self_name[MAX_PATH]; char *p; - + char *datadir = NULL; MY_INIT(argv[0]); - GetModuleFileName(NULL, self_name, FN_REFLEN); + GetModuleFileName(NULL, self_name, MAX_PATH); strcpy(mysqld_path,self_name); p= strrchr(mysqld_path, FN_LIBCHAR); if (p) @@ -168,7 +167,56 @@ int main(int argc, char **argv) if ((error= handle_options(&argc, &argv, my_long_options, get_one_option))) exit(error); - if (!opt_datadir) + + if (opt_config != 0 && _access(opt_config, 04) != 0) + { + int err= errno; + switch(err) + { + case EACCES: + die("File %s can't be read", opt_config); + break; + case ENOENT: + die("File %s does not exist", opt_config); + break; + default: + die("Can't access file %s, errno %d",opt_config, err); + break; + } + } + if (opt_config) + { + DWORD dwret = GetFullPathName(opt_config, sizeof(full_config_path), full_config_path, NULL); + if (dwret == 0) + { + die("GetFullPathName failed, last error %u", GetLastError()); + } + else if (dwret > sizeof(full_config_path)) + { + die("Can't resolve the config file name, path too large"); + } + opt_config= full_config_path; + } + + if(opt_datadir) + datadir = opt_datadir; + + if (!datadir && opt_config) + { + for(auto section : {"server","mysqld"}) + { + auto ret = GetPrivateProfileStringA(section,"datadir", NULL, default_datadir, + sizeof(default_datadir)-1, opt_config); + if (ret) + { + datadir= default_datadir; + printf("Data directory (from config file) is %s\n",datadir); + break; + } + } + } + + if (!datadir) { /* Figure out default data directory. It "data" directory, next to "bin" directory, where @@ -189,31 +237,30 @@ int main(int argc, char **argv) my_print_help(my_long_options); } strcat_s(default_datadir, "\\data"); - opt_datadir= default_datadir; - printf("Default data directory is %s\n",opt_datadir); + datadir= default_datadir; + printf("Default data directory is %s\n",datadir); } - /* Print some help on errors */ - verbose_errors= TRUE; + DBUG_ASSERT(datadir); + - if (!opt_os_user) - { - opt_os_user= default_os_user; - opt_os_password= NULL; - } /* Workaround WiX bug (strip possible quote character at the end of path) */ - size_t len= strlen(opt_datadir); + size_t len= strlen(datadir); if (len > 0) { - if (opt_datadir[len-1] == '"') + if (datadir[len-1] == '"') { - opt_datadir[len-1]= 0; + datadir[len-1]= 0; + } + if (datadir[0] == '"') + { + datadir++; } } - GetFullPathName(opt_datadir, FN_REFLEN, datadir_buffer, NULL); - opt_datadir= datadir_buffer; + GetFullPathName(datadir, FN_REFLEN, datadir_buffer, NULL); + datadir= datadir_buffer; - if (create_db_instance()) + if (create_db_instance(datadir)) { die("database creation failed"); } @@ -228,11 +275,11 @@ int main(int argc, char **argv) Convert slashes in paths into MySQL-compatible form */ -static void convert_slashes(char *s) +static void convert_slashes(char *s, char replacement) { - for (; *s ; s++) - if (*s == '\\') - *s= '/'; + for (; *s; s++) + if (*s == '\\' || *s == '/') + *s= replacement; } @@ -242,15 +289,16 @@ static void convert_slashes(char *s) E.g basedir for C:\my\bin\mysqld.exe would be C:\my */ -static void get_basedir(char *basedir, int size, const char *mysqld_path) +static void get_basedir(char *basedir, int size, const char *mysqld_path, + char slash) { strcpy_s(basedir, size, mysqld_path); - convert_slashes(basedir); - char *p= strrchr(basedir,'/'); + convert_slashes(basedir, '\\'); + char *p= strrchr(basedir, '\\'); if (p) { *p = 0; - p= strrchr(basedir, '/'); + p= strrchr(basedir, '\\'); if (p) *p= 0; } @@ -262,7 +310,7 @@ static void get_basedir(char *basedir, int size, const char *mysqld_path) static char *get_plugindir() { static char plugin_dir[2*MAX_PATH]; - get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path); + get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path, '/'); strcat(plugin_dir, "/" STR(INSTALL_PLUGINDIR)); if (access(plugin_dir, 0) == 0) @@ -279,19 +327,37 @@ static char *get_plugindir() static char *init_bootstrap_command_line(char *cmdline, size_t size) { - char basedir[MAX_PATH]; - get_basedir(basedir, sizeof(basedir), mysqld_path); - - my_snprintf(cmdline, size - 1, - "\"\"%s\" --no-defaults %s --innodb-page-size=%d --bootstrap" - " \"--lc-messages-dir=%s/share\"" - " --basedir=. --datadir=. --default-storage-engine=myisam" - " --max_allowed_packet=9M " - " --net-buffer-length=16k\"", mysqld_path, - opt_verbose_bootstrap ? "--console" : "", opt_innodb_page_size, basedir); + snprintf(cmdline, size - 1, + "\"\"%s\"" + " --defaults-file=my.ini" + " %s" + " --bootstrap" + " --datadir=." + " --loose-innodb-buffer-pool-size=20M" + "\"" + , mysqld_path, opt_verbose_bootstrap ? "--console" : ""); return cmdline; } +static char my_ini_path[MAX_PATH]; + +static void write_myini_str(const char *key, const char* val, const char *section="mysqld") +{ + DBUG_ASSERT(my_ini_path[0]); + if (!WritePrivateProfileString(section, key, val, my_ini_path)) + { + die("Can't write to ini file key=%s, val=%s, section=%s, Windows error %u",key,val,section, + GetLastError()); + } +} + + +static void write_myini_int(const char* key, int val, const char* section = "mysqld") +{ + char buf[10]; + itoa(val, buf, 10); + write_myini_str(key, buf, section); +} /** Create my.ini in current directory (this is assumed to be @@ -305,59 +371,63 @@ static int create_myini() char path_buf[MAX_PATH]; GetCurrentDirectory(MAX_PATH, path_buf); - - /* Create ini file. */ - FILE *myini= fopen("my.ini","wt"); - if (!myini) + snprintf(my_ini_path,sizeof(my_ini_path), "%s\\my.ini", path_buf); + if (opt_config) { - die("Can't create my.ini in data directory"); + if (!CopyFile(opt_config, my_ini_path,TRUE)) + { + die("Can't copy %s to my.ini , last error %lu", opt_config, GetLastError()); + } } /* Write out server settings. */ - fprintf(myini, "[mysqld]\n"); - convert_slashes(path_buf); - fprintf(myini, "datadir=%s\n", path_buf); + convert_slashes(path_buf,'/'); + write_myini_str("datadir",path_buf); + if (opt_skip_networking) { - fprintf(myini,"skip-networking\n"); + write_myini_str("skip-networking","ON"); if (!opt_socket) opt_socket= opt_service; } - enable_named_pipe= (my_bool) + enable_named_pipe= (my_bool) ((opt_socket && opt_socket[0]) || opt_skip_networking); if (enable_named_pipe) { - fprintf(myini,"named-pipe=ON\n"); + write_myini_str("named-pipe","ON"); } if (opt_socket && opt_socket[0]) { - fprintf(myini, "socket=%s\n", opt_socket); + write_myini_str("socket", opt_socket); } if (opt_port) { - fprintf(myini,"port=%d\n", opt_port); + write_myini_int("port", opt_port); } if (opt_innodb_page_size != DEFAULT_INNODB_PAGE_SIZE) { - fprintf(myini, "innodb-page-size=%d\n", opt_innodb_page_size); + write_myini_int("innodb-page-size", opt_innodb_page_size); + } + if (opt_large_pages) + { + write_myini_str("large-pages","ON"); } + /* Write out client settings. */ - fprintf(myini, "[client]\n"); /* Used for named pipes */ if (opt_socket && opt_socket[0]) - fprintf(myini,"socket=%s\n",opt_socket); + write_myini_str("socket",opt_socket,"client"); if (opt_skip_networking) - fprintf(myini,"protocol=pipe\n"); + write_myini_str("protocol", "pipe", "client"); else if (opt_port) - fprintf(myini,"port=%d\n",opt_port); + write_myini_int("port",opt_port,"client"); char *plugin_dir = get_plugindir(); if (plugin_dir) - fprintf(myini, "plugin-dir=%s\n", plugin_dir); - fclose(myini); + write_myini_str("plugin-dir", plugin_dir, "client"); return 0; } @@ -380,22 +450,92 @@ static const char allow_remote_root_access_cmd[]= "DROP TABLE tmp_user;\n"; static const char end_of_script[]="-- end."; +/* +Add or remove privilege for a user +@param[in] account_name - user name, Windows style, e.g "NT SERVICE\mariadb", or ".\joe" +@param[in] privilege name - standard Windows privilege name, e.g "SeLockMemoryPrivilege" +@param[in] add - when true, add privilege, otherwise remove it + +In special case where privilege name is NULL, and add is false +all privileges for the user are removed. +*/ +static int handle_user_privileges(const char *account_name, const wchar_t *privilege_name, bool add) +{ + LSA_OBJECT_ATTRIBUTES attr{}; + LSA_HANDLE lsa_handle; + auto status= LsaOpenPolicy( + 0, &attr, POLICY_LOOKUP_NAMES | POLICY_CREATE_ACCOUNT, &lsa_handle); + if (status) + { + verbose("LsaOpenPolicy returned %lu", LsaNtStatusToWinError(status)); + return 1; + } + BYTE sidbuf[SECURITY_MAX_SID_SIZE]; + PSID sid= (PSID) sidbuf; + SID_NAME_USE name_use; + char domain_name[256]; + DWORD cbSid= sizeof(sidbuf); + DWORD cbDomain= sizeof(domain_name); + BOOL ok= LookupAccountNameA(0, account_name, sid, &cbSid, domain_name, + &cbDomain, &name_use); + if (!ok) + { + verbose("LsaOpenPolicy returned %lu", LsaNtStatusToWinError(status)); + return 1; + } + + if (privilege_name) + { + LSA_UNICODE_STRING priv{}; + priv.Buffer= (PWSTR) privilege_name; + priv.Length= (USHORT) wcslen(privilege_name) * sizeof(wchar_t); + priv.MaximumLength= priv.Length; + if (add) + { + status= LsaAddAccountRights(lsa_handle, sid, &priv, 1); + if (status) + { + verbose("LsaAddAccountRights returned %lu/%lu", status, + LsaNtStatusToWinError(status)); + return 1; + } + } + else + { + status= LsaRemoveAccountRights(lsa_handle, sid, FALSE, &priv, 1); + if (status) + { + verbose("LsaRemoveRights returned %lu/%lu", + LsaNtStatusToWinError(status)); + return 1; + } + } + } + else + { + DBUG_ASSERT(!add); + status= LsaRemoveAccountRights(lsa_handle, sid, TRUE, 0, 0); + } + LsaClose(lsa_handle); + return 0; +} + /* Register service. Assume my.ini is in datadir */ -static int register_service() +static int register_service(const char *datadir, const char *user, const char *passwd) { char buf[3*MAX_PATH +32]; /* path to mysqld.exe, to my.ini, service name */ SC_HANDLE sc_manager, sc_service; - size_t datadir_len= strlen(opt_datadir); + size_t datadir_len= strlen(datadir); const char *backslash_after_datadir= "\\"; - if (datadir_len && opt_datadir[datadir_len-1] == '\\') + if (datadir_len && datadir[datadir_len-1] == '\\') backslash_after_datadir= ""; verbose("Registering service '%s'", opt_service); my_snprintf(buf, sizeof(buf)-1, - "\"%s\" \"--defaults-file=%s%smy.ini\" \"%s\"" , mysqld_path, opt_datadir, + "\"%s\" \"--defaults-file=%s%smy.ini\" \"%s\"" , mysqld_path, datadir, backslash_after_datadir, opt_service); /* Get a handle to the SCM database. */ @@ -408,7 +548,7 @@ static int register_service() /* Create the service. */ sc_service= CreateService(sc_manager, opt_service, opt_service, SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, SERVICE_AUTO_START, - SERVICE_ERROR_NORMAL, buf, NULL, NULL, NULL, opt_os_user, opt_os_password); + SERVICE_ERROR_NORMAL, buf, NULL, NULL, NULL, user, passwd); if (!sc_service) { @@ -450,7 +590,8 @@ static void clean_directory(const char *dir) (defined as username or group string or as SID) */ -static int set_directory_permissions(const char *dir, const char *os_user) +static int set_directory_permissions(const char *dir, const char *os_user, + DWORD permission) { struct{ @@ -526,12 +667,19 @@ static int set_directory_permissions(const char *dir, const char *os_user) ea.Trustee.TrusteeForm= TRUSTEE_IS_SID; ea.Trustee.ptstrName= (LPTSTR)pSid; } + ea.Trustee.TrusteeType= TRUSTEE_IS_UNKNOWN; ea.grfAccessMode= GRANT_ACCESS; - ea.grfAccessPermissions= GENERIC_ALL; - ea.grfInheritance= CONTAINER_INHERIT_ACE|OBJECT_INHERIT_ACE; - ea.Trustee.TrusteeType= TRUSTEE_IS_UNKNOWN; - ACL* pNewDACL= 0; - SetEntriesInAcl(1,&ea,pOldDACL,&pNewDACL); + ea.grfAccessPermissions= permission; + ea.grfInheritance= CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE; + ACL *pNewDACL= 0; + + ACCESS_MASK access_mask; + if (GetEffectiveRightsFromAcl(pOldDACL, &ea.Trustee, &access_mask) != ERROR_SUCCESS + || (access_mask & permission) != permission) + { + SetEntriesInAcl(1, &ea, pOldDACL, &pNewDACL); + } + if (pNewDACL) { SetSecurityInfo(hDir,SE_FILE_OBJECT,DACL_SECURITY_INFORMATION,NULL, NULL, @@ -545,11 +693,69 @@ static int set_directory_permissions(const char *dir, const char *os_user) return 0; } +static void set_permissions(const char *datadir, const char *service_user) +{ + /* + Set data directory permissions for both current user and + the one who who runs services. + */ + set_directory_permissions(datadir, NULL, + FILE_GENERIC_READ | FILE_GENERIC_WRITE); + if (!service_user) + return; + + /* Datadir permission for the service. */ + set_directory_permissions(datadir, service_user, FILE_ALL_ACCESS); + char basedir[MAX_PATH]; + char path[MAX_PATH]; + + struct + { + const char *subdir; + DWORD perm; + } all_subdirs[]= { + {STR(INSTALL_PLUGINDIR), FILE_GENERIC_READ | FILE_GENERIC_EXECUTE}, + {STR(INSTALL_SHAREDIR), FILE_GENERIC_READ}, + }; + + + if (strncmp(service_user,"NT SERVICE\\",sizeof("NT SERVICE\\")-1) == 0) + { + /* + Read and execute permission for executables can/should be given + to any service account, rather than specific one. + */ + service_user="NT SERVICE\\ALL SERVICES"; + } + + get_basedir(basedir, sizeof(basedir), mysqld_path, '\\'); + for (int i= 0; i < array_elements(all_subdirs); i++) + { + auto subdir= + snprintf(path, sizeof(path), "%s\\%s", basedir, all_subdirs[i].subdir); + if (access(path, 0) == 0) + { + set_directory_permissions(path, service_user, all_subdirs[i].perm); + } + } + /* Bindir, the directory where mysqld_path is located. */ + strcpy_s(path, mysqld_path); + char *end= strrchr(path, '/'); + if (!end) + end= strrchr(path, '\\'); + if (end) + *end= 0; + if (access(path, 0) == 0) + { + set_directory_permissions(path, service_user, + FILE_GENERIC_READ | FILE_GENERIC_EXECUTE); + } +} /* Create database instance (including registering as service etc) .*/ -static int create_db_instance() +static int create_db_instance(const char *datadir) { int ret= 0; char cwd[MAX_PATH]; @@ -558,6 +764,8 @@ static int create_db_instance() FILE *in; bool created_datadir= false; DWORD last_error; + bool service_created= false; + std::string mysql_db_dir; verbose("Running bootstrap"); @@ -565,7 +773,7 @@ static int create_db_instance() /* Create datadir and datadir/mysql, if they do not already exist. */ - if (CreateDirectory(opt_datadir, NULL)) + if (CreateDirectory(datadir, NULL)) { created_datadir= true; } @@ -576,71 +784,85 @@ static int create_db_instance() { case ERROR_ACCESS_DENIED: die("Can't create data directory '%s' (access denied)\n", - opt_datadir); + datadir); break; case ERROR_PATH_NOT_FOUND: die("Can't create data directory '%s' " "(one or more intermediate directories do not exist)\n", - opt_datadir); + datadir); break; default: die("Can't create data directory '%s', last error %u\n", - opt_datadir, last_error); + datadir, last_error); break; } } - if (!SetCurrentDirectory(opt_datadir)) + if (!SetCurrentDirectory(datadir)) { last_error = GetLastError(); switch (last_error) { case ERROR_DIRECTORY: die("Can't set current directory to '%s', the path is not a valid directory \n", - opt_datadir); + datadir); break; default: die("Can' set current directory to '%s', last error %u\n", - opt_datadir, last_error); + datadir, last_error); break; } } - if (!PathIsDirectoryEmpty(opt_datadir)) + if (!PathIsDirectoryEmpty(datadir)) { - fprintf(stderr,"ERROR : Data directory %s is not empty." - " Only new or empty existing directories are accepted for --datadir\n",opt_datadir); + fprintf(stderr, "ERROR : Data directory %s is not empty." + " Only new or empty existing directories are accepted for --datadir\n", datadir); exit(1); } - if (!CreateDirectory("mysql",NULL)) + std::string service_user; + /* Register service if requested. */ + if (opt_service && opt_service[0]) { - last_error = GetLastError(); - DWORD attributes; - switch(last_error) - { - case ERROR_ACCESS_DENIED: - die("Can't create subdirectory 'mysql' in '%s' (access denied)\n",opt_datadir); - break; - case ERROR_ALREADY_EXISTS: - attributes = GetFileAttributes("mysql"); + /* Run service under virtual account NT SERVICE\service_name.*/ + service_user.append("NT SERVICE\\").append(opt_service); + ret = register_service(datadir, service_user.c_str(), NULL); + if (ret) + goto end; + service_created = true; + } - if (attributes == INVALID_FILE_ATTRIBUTES) - die("GetFileAttributes() failed for existing file '%s\\mysql', last error %u", - opt_datadir, GetLastError()); - else if (!(attributes & FILE_ATTRIBUTE_DIRECTORY)) - die("File '%s\\mysql' exists, but it is not a directory", opt_datadir); + set_permissions(datadir, service_user.c_str()); - break; - } + if (opt_large_pages) + { + handle_user_privileges(service_user.c_str(), L"SeLockMemoryPrivilege", true); } /* - Set data directory permissions for both current user and - default_os_user (the one who runs services). + Get security descriptor for the data directory. + It will be passed, as SDDL text, to the mysqld bootstrap subprocess, + to allow for correct subdirectory permissions. */ - set_directory_permissions(opt_datadir, NULL); - set_directory_permissions(opt_datadir, default_os_user); + PSECURITY_DESCRIPTOR pSD; + if (GetNamedSecurityInfoA(datadir, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, + 0, 0, 0, 0, &pSD) == ERROR_SUCCESS) + { + char* string_sd = NULL; + if (ConvertSecurityDescriptorToStringSecurityDescriptor(pSD, SDDL_REVISION_1, + DACL_SECURITY_INFORMATION, &string_sd, 0)) + { + _putenv_s("MARIADB_NEW_DIRECTORY_SDDL", string_sd); + LocalFree(string_sd); + } + LocalFree(pSD); + } + + /* Create my.ini file in data directory.*/ + ret = create_myini(); + if (ret) + goto end; /* Do mysqld --bootstrap. */ init_bootstrap_command_line(cmdline, sizeof(cmdline)); @@ -656,18 +878,23 @@ static int create_db_instance() { verbose("WARNING: Can't disable buffering on mysqld's stdin"); } - if (fwrite("use mysql;\n",11,1, in) != 1) + static const char *pre_bootstrap_sql[] = { "create database mysql;\n","use mysql;\n"}; + for (auto cmd : pre_bootstrap_sql) { - verbose("ERROR: Can't write to mysqld's stdin"); - ret= 1; - goto end; + /* Write the bootstrap script to stdin. */ + if (fwrite(cmd, strlen(cmd), 1, in) != 1) + { + verbose("ERROR: Can't write to mysqld's stdin"); + ret= 1; + goto end; + } } - int i; - for (i=0; mysql_bootstrap_sql[i]; i++) + for (int i= 0; mysql_bootstrap_sql[i]; i++) { + auto cmd = mysql_bootstrap_sql[i]; /* Write the bootstrap script to stdin. */ - if (fwrite(mysql_bootstrap_sql[i], strlen(mysql_bootstrap_sql[i]), 1, in) != 1) + if (fwrite(cmd, strlen(cmd), 1, in) != 1) { verbose("ERROR: Can't write to mysqld's stdin"); ret= 1; @@ -709,7 +936,7 @@ static int create_db_instance() } /* - On some reason, bootstrap chokes if last command sent via stdin ends with + On some reason, bootstrap chokes if last command sent via stdin ends with newline, so we supply a dummy comment, that does not end with newline. */ fputs(end_of_script, in); @@ -723,25 +950,37 @@ static int create_db_instance() goto end; } +end: + if (!ret) + return ret; - /* Create my.ini file in data directory.*/ - ret= create_myini(); - if (ret) - goto end; - - /* Register service if requested. */ - if (opt_service && opt_service[0]) + /* Cleanup after error.*/ + if (created_datadir) { - ret= register_service(); - if (ret) - goto end; + SetCurrentDirectory(cwd); + clean_directory(datadir); } -end: - if (ret) + if (service_created) { - SetCurrentDirectory(cwd); - clean_directory(opt_datadir); + auto sc_manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); + if (sc_manager) + { + auto sc_handle= OpenServiceA(sc_manager,opt_service, DELETE); + if (sc_handle) + { + DeleteService(sc_handle); + CloseServiceHandle(sc_handle); + } + CloseServiceHandle(sc_manager); + } + + /*Remove all service user privileges for the user.*/ + if(strncmp(service_user.c_str(), "NT SERVICE\\", + sizeof("NT SERVICE\\")-1)) + { + handle_user_privileges(service_user.c_str(), 0, false); + } if (created_datadir) RemoveDirectory(opt_datadir); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b65cd6d81bb..f4da993673d 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -18,7 +18,7 @@ #include "sql_priv.h" #include "unireg.h" #include <signal.h> -#ifndef __WIN__ +#ifndef _WIN32 #include <netdb.h> // getservbyname, servent #endif #include "sql_parse.h" // path_starts_from_data_home_dir @@ -30,7 +30,7 @@ #include "parse_file.h" // File_parser_dummy_hook #include "sql_db.h" // my_dboptions_cache_free // my_dboptions_cache_init -#include "sql_table.h" // release_ddl_log, execute_ddl_log_recovery +#include "sql_table.h" // ddl_log_release, ddl_log_execute_recovery #include "sql_connect.h" // free_max_user_conn, init_max_user_conn, // handle_one_connection #include "thread_cache.h" @@ -51,6 +51,7 @@ #include "sql_manager.h" // stop_handle_manager, start_handle_manager #include "sql_expression_cache.h" // subquery_cache_miss, subquery_cache_hit #include "sys_vars_shared.h" +#include "ddl_log.h" #include <m_ctype.h> #include <my_dir.h> @@ -118,12 +119,15 @@ #include "sql_reload.h" // reload_acl_and_cache #include "sp_head.h" // init_sp_psi_keys +#include <mysqld_default_groups.h> + #ifdef HAVE_POLL_H #include <poll.h> #endif #ifdef _WIN32 #include <handle_connections_win.h> +#include <sddl.h> #endif #include <my_service_manager.h> @@ -132,11 +136,6 @@ #define mysqld_charset &my_charset_latin1 -/* We have HAVE_valgrind below as this speeds up the shutdown of MySQL */ - -#if defined(HAVE_valgrind) && defined(__linux__) -#define HAVE_CLOSE_SERVER_SOCK 1 -#endif extern "C" { // Because of SCO 3.2V4.2 #include <sys/stat.h> @@ -152,7 +151,7 @@ extern "C" { // Because of SCO 3.2V4.2 #endif #include <my_net.h> -#if !defined(__WIN__) +#if !defined(_WIN32) #include <sys/resource.h> #ifdef HAVE_SYS_UN_H #include <sys/un.h> @@ -164,11 +163,11 @@ extern "C" { // Because of SCO 3.2V4.2 #include <sys/select.h> #endif #include <sys/utsname.h> -#endif /* __WIN__ */ +#endif /* _WIN32 */ #include <my_libwrap.h> -#ifdef __WIN__ +#ifdef _WIN32 #include <crtdbg.h> #endif @@ -301,7 +300,6 @@ const char *my_localhost= "localhost", *delayed_user= "DELAYED"; bool opt_large_files= sizeof(my_off_t) > 4; static my_bool opt_autocommit; ///< for --autocommit command-line option - /* Used with --help for detailed option */ @@ -338,7 +336,6 @@ static char *character_set_filesystem_name; static char *lc_messages; static char *lc_time_names_name; char *my_bind_addr_str; -int server_socket_ai_family; static char *default_collation_name; char *default_storage_engine, *default_tmp_storage_engine; char *enforced_storage_engine=NULL; @@ -371,7 +368,7 @@ my_bool locked_in_memory; bool opt_using_transactions; bool volatile abort_loop; uint volatile global_disable_checkpoint; -#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY) +#if defined(_WIN32) ulong slow_start_timeout; #endif static MEM_ROOT startup_root; @@ -475,6 +472,7 @@ ulong aborted_threads, aborted_connects, aborted_connects_preauth; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; ulong delayed_insert_errors,flush_time; +ulong malloc_calls; ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0; @@ -516,7 +514,9 @@ ulong current_pid; ulong slow_launch_threads = 0; uint sync_binlog_period= 0, sync_relaylog_period= 0, sync_relayloginfo_period= 0, sync_masterinfo_period= 0; -ulong expire_logs_days = 0; +double expire_logs_days = 0; +ulong binlog_expire_logs_seconds = 0; + /** Soft upper limit for number of sp_head objects that can be stored in the sp_cache for one connection. @@ -574,7 +574,7 @@ char log_error_file[FN_REFLEN], glob_hostname[FN_REFLEN], *opt_log_basename; char mysql_real_data_home[FN_REFLEN], lc_messages_dir[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN], - *opt_init_file, *opt_tc_log_file; + *opt_init_file, *opt_tc_log_file, *opt_ddl_recovery_file; char *lc_messages_dir_ptr= lc_messages_dir, *log_error_file_ptr; char mysql_unpacked_real_data_home[FN_REFLEN]; size_t mysql_unpacked_real_data_home_len; @@ -662,11 +662,18 @@ SHOW_COMP_OPTION have_openssl; #ifndef EMBEDDED_LIBRARY static std::atomic<char*> shutdown_user; #endif //EMBEDDED_LIBRARY +std::atomic<my_thread_id> shutdown_thread_id; /* Thread specific variables */ static thread_local THD *THR_THD; +/** + Get current THD object from thread local data + + @retval The THD object for the thread, NULL if not connection thread +*/ + MYSQL_THD _current_thd() { return THR_THD; } void set_current_thd(THD *thd) { THR_THD= thd; } @@ -708,6 +715,7 @@ mysql_mutex_t LOCK_prepared_stmt_count; #ifdef HAVE_OPENSSL mysql_mutex_t LOCK_des_key_file; #endif +mysql_mutex_t LOCK_backup_log; mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; mysql_rwlock_t LOCK_ssl_refresh; mysql_rwlock_t LOCK_all_status_vars; @@ -730,6 +738,8 @@ char *opt_relay_logname = 0, *opt_relaylog_index_name=0; char *opt_logname, *opt_slow_logname, *opt_bin_logname; char *opt_binlog_index_name=0; + + /* Static variables */ my_bool opt_stack_trace; @@ -851,6 +861,7 @@ PSI_file_key key_file_binlog, key_file_binlog_cache, key_file_binlog_index, key_file_dbopt, key_file_des_key_file, key_file_ERRMSG, key_select_to_file, key_file_fileparser, key_file_frm, key_file_global_ddl_log, key_file_load, key_file_loadfile, key_file_log_event_data, key_file_log_event_info, + key_file_log_ddl, key_file_master_info, key_file_misc, key_file_partition_ddl_log, key_file_pid, key_file_relay_log_info, key_file_send_file, key_file_tclog, key_file_trg, key_file_trn, key_file_init; @@ -876,7 +887,7 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list, key_LOCK_crypt, key_LOCK_delayed_create, key_LOCK_delayed_insert, key_LOCK_delayed_status, key_LOCK_error_log, key_LOCK_gdl, key_LOCK_global_system_variables, - key_LOCK_manager, + key_LOCK_manager, key_LOCK_backup_log, key_LOCK_prepared_stmt_count, key_LOCK_rpl_status, key_LOCK_server_started, key_LOCK_status, @@ -936,6 +947,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_delayed_insert_mutex, "Delayed_insert::mutex", 0}, { &key_hash_filo_lock, "hash_filo::lock", 0}, { &key_LOCK_active_mi, "LOCK_active_mi", PSI_FLAG_GLOBAL}, + { &key_LOCK_backup_log, "LOCK_backup_log", PSI_FLAG_GLOBAL}, { &key_LOCK_thread_id, "LOCK_thread_id", PSI_FLAG_GLOBAL}, { &key_LOCK_crypt, "LOCK_crypt", PSI_FLAG_GLOBAL}, { &key_LOCK_delayed_create, "LOCK_delayed_create", PSI_FLAG_GLOBAL}, @@ -1340,7 +1352,12 @@ static Buffered_logs buffered_logs; struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD() #ifndef EMBEDDED_LIBRARY -MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock; + +Dynamic_array<MYSQL_SOCKET> listen_sockets(PSI_INSTRUMENT_MEM, 0); +bool unix_sock_is_online= false; +static int systemd_sock_activation; /* systemd socket activation */ + + C_MODE_START #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /** @@ -1394,21 +1411,10 @@ static pthread_t select_thread; /* OS specific variables */ -#ifdef __WIN__ -#undef getpid -#include <process.h> - -static bool start_mode=0, use_opt_args; -static int opt_argc; -static char **opt_argv; - -#if !defined(EMBEDDED_LIBRARY) +#ifdef _WIN32 HANDLE hEventShutdown; -static char shutdown_event_name[40]; -#include "nt_servc.h" -static NTService Service; ///< Service object for WinNT -#endif /* EMBEDDED_LIBRARY */ -#endif /* __WIN__ */ +#endif + #ifndef EMBEDDED_LIBRARY bool mysqld_embedded=0; @@ -1465,7 +1471,7 @@ struct st_VioSSLFd *ssl_acceptor_fd; /** Number of currently active user connections. */ -Atomic_counter<uint> connection_count; +static Atomic_counter<uint> connection_count; static Atomic_counter<uint> extra_connection_count; my_bool opt_gtid_strict_mode= FALSE; @@ -1496,7 +1502,6 @@ static int test_if_case_insensitive(const char *dir_name); static bool pid_file_created= false; static void usage(void); static void start_signal_handler(void); -static void close_server_sock(); static void clean_up_mutexes(void); static void wait_for_signal_thread_to_end(void); static void create_pid_file(); @@ -1536,12 +1541,11 @@ static my_bool kill_thread_phase_1(THD *thd, int *n_threads_awaiting_ack) if (DBUG_EVALUATE_IF("only_kill_system_threads", !thd->system_thread, 0)) return 0; - if (DBUG_EVALUATE_IF("only_kill_system_threads_no_loop", !thd->system_thread, 0)) + if (DBUG_EVALUATE_IF("only_kill_system_threads_no_loop", + !thd->system_thread, 0)) return 0; - thd->set_killed(KILL_SERVER_HARD); - MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (thd)); - kill_thread(thd); + thd->awake(KILL_SERVER_HARD); return 0; } @@ -1610,9 +1614,8 @@ static void break_connect_loop() abort_loop= 1; -#if defined(__WIN__) - if (!SetEvent(hEventShutdown)) - DBUG_PRINT("error", ("Got error: %ld from SetEvent", GetLastError())); +#if defined(_WIN32) + mysqld_win_initiate_shutdown(); #else /* Avoid waiting for ourselves when thread-handling=no-threads. */ if (pthread_equal(pthread_self(), select_thread)) @@ -1643,10 +1646,9 @@ static void break_connect_loop() if (error != 0 && error != ETIMEDOUT && !count++) sql_print_error("Got error %d from mysql_cond_timedwait", error); #endif - close_server_sock(); } mysql_mutex_unlock(&LOCK_start_thread); -#endif /* __WIN__ */ +#endif /* _WIN32 */ } @@ -1674,6 +1676,7 @@ void kill_mysql(THD *thd) my_free(user); } + shutdown_thread_id= thd->thread_id; DBUG_EXECUTE_IF("mysql_admin_shutdown_wait_for_slaves", thd->lex->is_shutdown_wait_for_slaves= true;); #ifdef ENABLED_DEBUG_SYNC @@ -1703,28 +1706,20 @@ static void close_connections(void) /* Abort listening to new connections */ DBUG_PRINT("quit",("Closing sockets")); - if (!opt_disable_networking ) + /* Protect against pthread_kill() calling close_server_sock(*) */ + mysql_mutex_lock(&LOCK_start_thread); + for (uint i= 0 ; i < listen_sockets.elements() ; i++) { - if (mysql_socket_getfd(base_ip_sock) != INVALID_SOCKET) + MYSQL_SOCKET *sock= listen_sockets.get_pos(i); + (void) mysql_socket_close(*sock); + if (sock->is_unix_domain_socket && !systemd_sock_activation) { - (void) mysql_socket_close(base_ip_sock); - base_ip_sock= MYSQL_INVALID_SOCKET; - } - if (mysql_socket_getfd(extra_ip_sock) != INVALID_SOCKET) - { - (void) mysql_socket_close(extra_ip_sock); - extra_ip_sock= MYSQL_INVALID_SOCKET; + (void) unlink(mysqld_unix_port); } } + listen_sockets.free_memory(); + mysql_mutex_unlock(&LOCK_start_thread); -#ifdef HAVE_SYS_UN_H - if (mysql_socket_getfd(unix_sock) != INVALID_SOCKET) - { - (void) mysql_socket_close(unix_sock); - (void) unlink(mysqld_unix_port); - unix_sock= MYSQL_INVALID_SOCKET; - } -#endif end_thr_alarm(0); // Abort old alarms. while (CONNECT::count) @@ -1826,39 +1821,6 @@ static void close_connections(void) DBUG_VOID_RETURN; } - -#ifdef HAVE_CLOSE_SERVER_SOCK -static void close_socket(MYSQL_SOCKET sock, const char *info) -{ - DBUG_ENTER("close_socket"); - - if (mysql_socket_getfd(sock) != INVALID_SOCKET) - { - DBUG_PRINT("info", ("calling shutdown on %s socket", info)); - (void) mysql_socket_shutdown(sock, SHUT_RDWR); - } - DBUG_VOID_RETURN; -} -#endif - - -static void close_server_sock() -{ -#ifdef HAVE_CLOSE_SERVER_SOCK - DBUG_ENTER("close_server_sock"); - - close_socket(base_ip_sock, "TCP/IP"); - close_socket(extra_ip_sock, "TCP/IP"); - close_socket(unix_sock, "unix/IP"); - - if (mysql_socket_getfd(unix_sock) != INVALID_SOCKET) - (void) unlink(mysqld_unix_port); - base_ip_sock= extra_ip_sock= unix_sock= MYSQL_INVALID_SOCKET; - - DBUG_VOID_RETURN; -#endif -} - #endif /*EMBEDDED_LIBRARY*/ @@ -1870,12 +1832,19 @@ extern "C" sig_handler print_signal_warning(int sig) #ifdef SIGNAL_HANDLER_RESET_ON_DELIVERY my_sigset(sig,print_signal_warning); /* int. thread system calls */ #endif -#if !defined(__WIN__) +#if !defined(_WIN32) if (sig == SIGALRM) alarm(2); /* reschedule alarm */ #endif } +#ifdef _WIN32 +typedef void (*report_svc_status_t)(DWORD current_state, DWORD win32_exit_code, + DWORD wait_hint); +static void dummy_svc_status(DWORD, DWORD, DWORD) {} +static report_svc_status_t my_report_svc_status= dummy_svc_status; +#endif + #ifndef EMBEDDED_LIBRARY extern "C" void unireg_abort(int exit_code) { @@ -1921,7 +1890,6 @@ extern "C" void unireg_abort(int exit_code) mysqld_exit(exit_code); } - static void mysqld_exit(int exit_code) { DBUG_ENTER("mysqld_exit"); @@ -1948,10 +1916,13 @@ static void mysqld_exit(int exit_code) { fprintf(stderr, "Warning: Memory not freed: %lld\n", (longlong) global_status_var.global_memory_used); - if (exit_code == 0) + if (exit_code == 0 || opt_endinfo) SAFEMALLOC_REPORT_MEMORY(0); } DBUG_LEAVE; +#ifdef _WIN32 + my_report_svc_status(SERVICE_STOPPED, exit_code, 0); +#endif sd_notify(0, "STATUS=MariaDB server is down"); exit(exit_code); /* purecov: inspected */ } @@ -1971,7 +1942,7 @@ static void clean_up(bool print_message) my_bitmap_free(&slave_error_mask); #endif stop_handle_manager(); - release_ddl_log(); + ddl_log_release(); logger.cleanup_base(); @@ -2124,6 +2095,7 @@ static void clean_up_mutexes() #endif /* HAVE_REPLICATION */ mysql_mutex_destroy(&LOCK_active_mi); mysql_rwlock_destroy(&LOCK_ssl_refresh); + mysql_mutex_destroy(&LOCK_backup_log); mysql_rwlock_destroy(&LOCK_sys_init_connect); mysql_rwlock_destroy(&LOCK_sys_init_slave); mysql_mutex_destroy(&LOCK_global_system_variables); @@ -2186,7 +2158,7 @@ static void set_ports() } if (!mysqld_unix_port) { -#ifdef __WIN__ +#ifdef _WIN32 mysqld_unix_port= (char*) MYSQL_NAMEDPIPE; #else mysqld_unix_port= (char*) MYSQL_UNIX_ADDR; @@ -2245,7 +2217,7 @@ static void set_user(const char *user, struct passwd *user_info_arg) allow_coredumps(); } -#if !defined(__WIN__) +#if !defined(_WIN32) static void set_effective_user(struct passwd *user_info_arg) { DBUG_ASSERT(user_info_arg != 0); @@ -2266,7 +2238,7 @@ static void set_effective_user(struct passwd *user_info_arg) /** Change root user if started with @c --chroot . */ static void set_root(const char *path) { -#if !defined(__WIN__) +#if !defined(_WIN32) if (chroot(path) == -1) { sql_perror("chroot"); @@ -2280,7 +2252,9 @@ static void set_root(const char *path) Activate usage of a tcp port */ -static MYSQL_SOCKET activate_tcp_port(uint port) +static void activate_tcp_port(uint port, + Dynamic_array<MYSQL_SOCKET> *sockets, + bool is_extra_port= false) { struct addrinfo *ai, *a; struct addrinfo hints; @@ -2312,20 +2286,6 @@ static MYSQL_SOCKET activate_tcp_port(uint port) unireg_abort(1); /* purecov: tested */ } - /* - special case: for wildcard addresses prefer ipv6 over ipv4, - because we later switch off IPV6_V6ONLY, so ipv6 wildcard - addresses will work for ipv4 too - */ - if (!real_bind_addr_str && ai->ai_family == AF_INET && ai->ai_next - && ai->ai_next->ai_family == AF_INET6) - { - a= ai; - ai= ai->ai_next; - a->ai_next= ai->ai_next; - ai->ai_next= a; - } - for (a= ai; a != NULL; a= a->ai_next) { ip_sock= mysql_socket_socket(key_socket_tcpip, a->ai_family, @@ -2348,101 +2308,246 @@ static MYSQL_SOCKET activate_tcp_port(uint port) } else { - server_socket_ai_family= a->ai_family; + ip_sock.address_family= a->ai_family; sql_print_information("Server socket created on IP: '%s'.", (const char *) ip_addr); - break; - } - } - if (mysql_socket_getfd(ip_sock) == INVALID_SOCKET) - { - DBUG_PRINT("error",("Got error: %d from socket()",socket_errno)); - sql_perror(ER_DEFAULT(ER_IPSOCK_ERROR)); /* purecov: tested */ - unireg_abort(1); /* purecov: tested */ - } + if (mysql_socket_getfd(ip_sock) == INVALID_SOCKET) + { + DBUG_PRINT("error",("Got error: %d from socket()",socket_errno)); + sql_perror(ER_DEFAULT(ER_IPSOCK_ERROR)); /* purecov: tested */ + unireg_abort(1); /* purecov: tested */ + } - mysql_socket_set_thread_owner(ip_sock); + mysql_socket_set_thread_owner(ip_sock); -#ifndef __WIN__ - /* - We should not use SO_REUSEADDR on windows as this would enable a - user to open two mysqld servers with the same TCP/IP port. - */ - arg= 1; - (void) mysql_socket_setsockopt(ip_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg, - sizeof(arg)); -#endif /* __WIN__ */ +#ifndef _WIN32 + /* + We should not use SO_REUSEADDR on windows as this would enable a + user to open two mysqld servers with the same TCP/IP port. + */ + arg= 1; + (void) mysql_socket_setsockopt(ip_sock, SOL_SOCKET, SO_REUSEADDR, + (char*)&arg, sizeof(arg)); +#endif /* _WIN32 */ #ifdef IPV6_V6ONLY - /* - For interoperability with older clients, IPv6 socket should - listen on both IPv6 and IPv4 wildcard addresses. - Turn off IPV6_V6ONLY option. - - NOTE: this will work starting from Windows Vista only. - On Windows XP dual stack is not available, so it will not - listen on the corresponding IPv4-address. - */ - if (a->ai_family == AF_INET6) - { - arg= 0; - (void) mysql_socket_setsockopt(ip_sock, IPPROTO_IPV6, IPV6_V6ONLY, - (char*)&arg, sizeof(arg)); - } + /* + If an address name resolves to both IPv4 and IPv6 addresses, the server + will listen on them both. With IPV6_V6ONLY unset, listening on an IPv6 + wildcard address may cause listening on an IPv4 wildcard address + to fail. That's why IPV6_V6ONLY needs to be forcefully turned on. + */ + if (a->ai_family == AF_INET6) + { + arg= 1; + (void) mysql_socket_setsockopt(ip_sock, IPPROTO_IPV6, IPV6_V6ONLY, + (char*)&arg, sizeof(arg)); + } #endif #ifdef IP_FREEBIND - arg= 1; - (void) mysql_socket_setsockopt(ip_sock, IPPROTO_IP, IP_FREEBIND, (char*) &arg, - sizeof(arg)); + arg= 1; + (void) mysql_socket_setsockopt(ip_sock, IPPROTO_IP, IP_FREEBIND, + (char*) &arg, sizeof(arg)); #endif - /* - Sometimes the port is not released fast enough when stopping and - restarting the server. This happens quite often with the test suite - on busy Linux systems. Retry to bind the address at these intervals: - Sleep intervals: 1, 2, 4, 6, 9, 13, 17, 22, ... - Retry at second: 1, 3, 7, 13, 22, 35, 52, 74, ... - Limit the sequence by mysqld_port_timeout (set --port-open-timeout=#). - */ - int ret; - uint waited, retry, this_wait; - for (waited= 0, retry= 1; ; retry++, waited+= this_wait) - { - if (((ret= mysql_socket_bind(ip_sock, a->ai_addr, a->ai_addrlen)) >= 0 ) || - (socket_errno != SOCKET_EADDRINUSE) || - (waited >= mysqld_port_timeout)) - break; - sql_print_information("Retrying bind on TCP/IP port %u", port); - this_wait= retry * retry / 3 + 1; - sleep(this_wait); + /* + Sometimes the port is not released fast enough when stopping and + restarting the server. This happens quite often with the test suite + on busy Linux systems. Retry to bind the address at these intervals: + Sleep intervals: 1, 2, 4, 6, 9, 13, 17, 22, ... + Retry at second: 1, 3, 7, 13, 22, 35, 52, 74, ... + Limit the sequence by mysqld_port_timeout (set --port-open-timeout=#). + */ + int ret; + uint waited, retry, this_wait; + for (waited= 0, retry= 1; ; retry++, waited+= this_wait) + { + if (((ret= mysql_socket_bind(ip_sock, a->ai_addr, a->ai_addrlen)) >= 0 ) + || (socket_errno != SOCKET_EADDRINUSE) + || (waited >= mysqld_port_timeout)) + break; + sql_print_information("Retrying bind on TCP/IP port %u", port); + this_wait= retry * retry / 3 + 1; + sleep(this_wait); + } + + if (ret < 0) + { + char buff[100]; + sprintf(buff, "Can't start server: Bind on TCP/IP port. Got error: %d", + (int) socket_errno); + sql_perror(buff); + sql_print_error("Do you already have another server running on " + "port: %u ?", port); + unireg_abort(1); + } + if (mysql_socket_listen(ip_sock,(int) back_log) < 0) + { + sql_perror("Can't start server: listen() on TCP/IP port"); + sql_print_error("listen() on TCP/IP failed with error %d", + socket_errno); + unireg_abort(1); + } + +#ifdef FD_CLOEXEC + (void) fcntl(mysql_socket_getfd(ip_sock), F_SETFD, FD_CLOEXEC); +#endif + ip_sock.is_extra_port= is_extra_port; + sockets->push(ip_sock); + } } + freeaddrinfo(ai); - if (ret < 0) - { - char buff[100]; - sprintf(buff, "Can't start server: Bind on TCP/IP port. Got error: %d", - (int) socket_errno); - sql_perror(buff); - sql_print_error("Do you already have another mysqld server running on " - "port: %u ?", port); - unireg_abort(1); - } - if (mysql_socket_listen(ip_sock,(int) back_log) < 0) + DBUG_VOID_RETURN; +} + + +/** + Activate usage of a systemd activated sockets + i.e started by mariadb.socket +*/ + +static void use_systemd_activated_sockets() +{ +#ifndef __linux__ + return; +#else + char **names = NULL; + int sd_sockets; + DBUG_ENTER("use_systemd_activated_sockets"); + + sd_sockets= sd_listen_fds_with_names(0, &names); + + if (!sd_sockets) + DBUG_VOID_RETURN; + + DBUG_PRINT("general",("Systemd listen_fds is %d", sd_sockets)); + while (sd_sockets--) { - sql_perror("Can't start server: listen() on TCP/IP port"); - sql_print_error("listen() on TCP/IP failed with error %d", - socket_errno); - unireg_abort(1); + MYSQL_SOCKET sock; + int stype= 0, accepting= 0, getnameinfo_err; + socklen_t l; + union + { + struct sockaddr sa; + struct sockaddr_storage storage; + struct sockaddr_in in; + struct sockaddr_in6 in6; + struct sockaddr_un un; + } addr; + SOCKET_SIZE_TYPE addrlen= sizeof(addr); + char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV]; + + int fd= SD_LISTEN_FDS_START + sd_sockets; + + if (getsockname(fd, &addr.sa, &addrlen)) + { + sql_print_error("Unable to getsockname on systemd socket activation socket %d," + " errno %d", fd, errno); + goto err; + } + + l= sizeof(stype); + if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &stype, &l) < 0) + { + sql_print_error("Unable to getsockopt(SOL_SOCKET, SO_TYPE) on" + " systemd socket activation socket %d," + " errno %d", fd, errno); + goto err; + } + + if (stype != SOCK_STREAM) + { + sql_print_error("Unknown systemd socket activation socket %d," + " not of type SOCK_STREAM - type %d", fd, stype); + goto err; + } + + l= sizeof(accepting); + if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &accepting, &l) < 0) + { + sql_print_error("Unable to getsockopt(SOL_SOCKET, SO_ACCEPTCONN) on" + " systemd socket activation socket %d," + " errno %d", fd, errno); + goto err; + } + + if (!accepting) + { + sql_print_error("Unknown systemd socket activation socket %d," + " is not listening", fd); + goto err; + } + + switch (addr.sa.sa_family) + { + case AF_INET: + sock= mysql_socket_fd(key_socket_tcpip, fd); + sock.is_unix_domain_socket= 0; + mysqld_port= ntohs(addr.in.sin_port); + break; + case AF_INET6: + sock= mysql_socket_fd(key_socket_tcpip, fd); + sock.is_unix_domain_socket= 0; + mysqld_port= ntohs(addr.in6.sin6_port); + break; + case AF_UNIX: + sock= mysql_socket_fd(key_socket_unix, fd); + sock.is_unix_domain_socket= 1; + break; + default: + sql_print_error("Unknown systemd socket activation socket %d," + " not UNIX or INET socket", fd); + goto err; + } + + /* + We check names!=NULL here because sd_listen_fds_with_names maybe + just sd_listen_fds on older pre v227 systemd + */ + sock.is_extra_port= names && strcmp(names[sd_sockets], "extra") == 0; + + if (addr.sa.sa_family == AF_UNIX) + { + /* + Handle abstract sockets and present them in @ form. + */ + if (addr.un.sun_path[0] == '\0') + addr.un.sun_path[0] = '@'; + sql_print_information("Using systemd activated unix socket %s%s", + addr.un.sun_path, sock.is_extra_port ? " (extra)" : ""); + memset(addr.un.sun_path, 0, sizeof(addr.un.sun_path)); + } + else + { + getnameinfo_err= getnameinfo(&addr.sa, addrlen, hbuf, sizeof(hbuf), sbuf, + sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV); + if (getnameinfo_err) + sql_print_warning("getnameinfo() on systemd socket activation socket %d" + " failed with error %s(%d)", fd, + gai_strerror(getnameinfo_err), getnameinfo_err); + else + sql_print_information("Using systemd activated socket host %s port %s%s", hbuf, sbuf, + sock.is_extra_port ? " (extra)" : ""); + } + + mysql_socket_set_thread_owner(sock); + listen_sockets.push(sock); } + systemd_sock_activation= 1; + free(names); -#ifdef FD_CLOEXEC - (void) fcntl(mysql_socket_getfd(ip_sock), F_SETFD, FD_CLOEXEC); -#endif + DBUG_VOID_RETURN; - DBUG_RETURN(ip_sock); +err: + free(names); + unireg_abort(1); + DBUG_VOID_RETURN; +#endif /* __linux__ */ } + static void network_init(void) { #ifdef HAVE_SYS_UN_H @@ -2451,6 +2556,8 @@ static void network_init(void) #endif DBUG_ENTER("network_init"); + use_systemd_activated_sockets(); + if (MYSQL_CALLBACK_ELSE(thread_scheduler, init, (), 0)) unireg_abort(1); /* purecov: inspected */ @@ -2467,20 +2574,23 @@ static void network_init(void) if (!opt_disable_networking) DBUG_ASSERT(report_port != 0); #endif - if (!opt_disable_networking && !opt_bootstrap) + if (!opt_disable_networking && !opt_bootstrap && !systemd_sock_activation) { if (mysqld_port) - base_ip_sock= activate_tcp_port(mysqld_port); + activate_tcp_port(mysqld_port, &listen_sockets, + /* is_extra_port= */ false); if (mysqld_extra_port) - extra_ip_sock= activate_tcp_port(mysqld_extra_port); + activate_tcp_port(mysqld_extra_port, &listen_sockets, + /* is_extra_port= */ true); } #if defined(HAVE_SYS_UN_H) /* ** Create the UNIX socket */ - if (mysqld_unix_port[0] && !opt_bootstrap) + if (mysqld_unix_port[0] && !opt_bootstrap && systemd_sock_activation==0) { + MYSQL_SOCKET unix_sock= MYSQL_INVALID_SOCKET; size_t port_len; DBUG_PRINT("general",("UNIX Socket is %s",mysqld_unix_port)); @@ -2497,6 +2607,9 @@ static void network_init(void) unireg_abort(1); /* purecov: inspected */ } + unix_sock.is_unix_domain_socket= true; + listen_sockets.push(unix_sock); + unix_sock_is_online= true; mysql_socket_set_thread_owner(unix_sock); bzero((char*) &UNIXaddr, sizeof(UNIXaddr)); @@ -2524,7 +2637,7 @@ static void network_init(void) port_len) < 0) { sql_perror("Can't start server : Bind on unix socket"); /* purecov: tested */ - sql_print_error("Do you already have another mysqld server running on socket: %s ?",mysqld_unix_port); + sql_print_error("Do you already have another server running on socket: %s ?",mysqld_unix_port); unireg_abort(1); /* purecov: tested */ } umask(((~my_umask) & 0666)); @@ -2633,14 +2746,66 @@ void unlink_thd(THD *thd) } -/****************************************************************************** - Setup a signal thread with handles all signals. - Because Linux doesn't support schemas use a mutex to check that - the signal thread is ready before continuing -******************************************************************************/ +#if defined(_WIN32) +/* + If server is started as service, the service routine will set + the callback function. +*/ +void mysqld_set_service_status_callback(void (*r)(DWORD, DWORD, DWORD)) +{ + my_report_svc_status= r; +} -#if defined(__WIN__) +static bool startup_complete() +{ + return hEventShutdown != NULL; +} +/** + Initiates shutdown on Windows by setting shutdown event. + Reports windows service status. + + If startup was not finished, terminates process (no good + cleanup possible) +*/ +void mysqld_win_initiate_shutdown() +{ + if (startup_complete()) + { + my_report_svc_status(SERVICE_STOP_PENDING, 0, 0); + abort_loop= 1; + if (!SetEvent(hEventShutdown)) + /* This should never fail.*/ + abort(); + } + else + { + my_report_svc_status(SERVICE_STOPPED, 1, 0); + TerminateProcess(GetCurrentProcess(), 1); + } +} + +/* + Signal when server has started and can accept connections. +*/ +void mysqld_win_set_startup_complete() +{ + my_report_svc_status(SERVICE_RUNNING, 0, 0); + DBUG_ASSERT(startup_complete()); +} + + +void mysqld_win_extend_service_timeout(DWORD sec) +{ + my_report_svc_status((DWORD)-1, 0, 2*1000*sec); +} + + +void mysqld_win_set_service_name(const char *name) +{ + if (stricmp(name, "mysql")) + load_default_groups[array_elements(load_default_groups) - 2]= name; +} /* On Windows, we use native SetConsoleCtrlHandler for handle events like Ctrl-C @@ -2651,33 +2816,30 @@ void unlink_thd(THD *thd) callstack. */ -static BOOL WINAPI console_event_handler( DWORD type ) +static BOOL WINAPI console_event_handler( DWORD type ) { - DBUG_ENTER("console_event_handler"); -#ifndef EMBEDDED_LIBRARY - if(type == CTRL_C_EVENT) + static const char *names[]= { + "CTRL_C_EVENT","CTRL_BREAK_EVENT", "CTRL_CLOSE_EVENT", "", "", + "CTRL_LOGOFF_EVENT", "CTRL_SHUTDOWN_EVENT"}; + + switch (type) { - /* - Do not shutdown before startup is finished and shutdown - thread is initialized. Otherwise there is a race condition - between main thread doing initialization and CTRL-C thread doing - cleanup, which can result into crash. - */ -#ifndef EMBEDDED_LIBRARY - if(hEventShutdown) - break_connect_loop(); - else -#endif - sql_print_warning("CTRL-C ignored during startup"); - DBUG_RETURN(TRUE); + case CTRL_C_EVENT: + case CTRL_BREAK_EVENT: + sql_print_information("console_event_handler: received %s event, shutting down", + names[type]); + mysqld_win_initiate_shutdown(); + return TRUE; + case CTRL_CLOSE_EVENT: + sql_print_information("console_event_handler: received CTRL_CLOSE_EVENT event, terminating"); + TerminateProcess(GetCurrentProcess(), 1); + return TRUE; + default: + return FALSE; } -#endif - DBUG_RETURN(FALSE); } - - #ifdef DEBUG_UNHANDLED_EXCEPTION_FILTER #define DEBUGGER_ATTACH_TIMEOUT 120 /* @@ -2708,7 +2870,7 @@ static void wait_for_debugger(int timeout_sec) } #endif /* DEBUG_UNHANDLED_EXCEPTION_FILTER */ -LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers) +static LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers) { static BOOL first_time= TRUE; if(!first_time) @@ -2755,10 +2917,9 @@ LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers) void init_signals(void) { - if(opt_console) - SetConsoleCtrlHandler(console_event_handler,TRUE); + SetConsoleCtrlHandler(console_event_handler,TRUE); - /* Avoid MessageBox()es*/ + /* Avoid MessageBox()es*/ _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); @@ -2775,7 +2936,8 @@ void init_signals(void) */ SetErrorMode(SetErrorMode(0) | SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX); - SetUnhandledExceptionFilter(my_unhandler_exception_filter); + if(!opt_debugging) + SetUnhandledExceptionFilter(my_unhandler_exception_filter); } @@ -2792,7 +2954,7 @@ static void start_signal_handler(void) static void check_data_home(const char *path) {} -#endif /* __WIN__ */ +#endif /* _WIN32 */ #if BACKTRACE_DEMANGLE @@ -2819,7 +2981,7 @@ mariadb_dbug_assert_failed(const char *assert_expr, const char *file, } #endif /* DBUG_ASSERT_AS_PRINT */ -#if !defined(__WIN__) +#if !defined(_WIN32) #ifndef SA_RESETHAND #define SA_RESETHAND 0 #endif /* SA_RESETHAND */ @@ -3016,7 +3178,7 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) case SIGQUIT: case SIGKILL: #ifdef EXTRA_DEBUG - sql_print_information("Got signal %d to shutdown mysqld",sig); + sql_print_information("Got signal %d to shutdown server",sig); #endif /* switch to the old log message processing */ logger.set_handlers(global_system_variables.sql_log_slow ? LOG_FILE:LOG_NONE, @@ -3085,7 +3247,7 @@ static void check_data_home(const char *path) {} #endif /*!EMBEDDED_LIBRARY*/ -#endif /* __WIN__*/ +#endif /* _WIN32*/ /** @@ -3159,12 +3321,7 @@ void *my_str_realloc_mysqld(void *ptr, size_t size) } #endif -#include <mysqld_default_groups.h> -#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) -static const int load_default_groups_sz= -sizeof(load_default_groups)/sizeof(load_default_groups[0]); -#endif /** @@ -3315,7 +3472,6 @@ SHOW_VAR com_status_vars[]= { {"kill", STMT_STATUS(SQLCOM_KILL)}, {"load", STMT_STATUS(SQLCOM_LOAD)}, {"lock_tables", STMT_STATUS(SQLCOM_LOCK_TABLES)}, - {"multi", COM_STATUS(com_multi)}, {"optimize", STMT_STATUS(SQLCOM_OPTIMIZE)}, {"preload_keys", STMT_STATUS(SQLCOM_PRELOAD_KEYS)}, {"prepare_sql", STMT_STATUS(SQLCOM_PREPARE)}, @@ -3506,6 +3662,10 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) { THD *thd= current_thd; +#ifndef DBUG_OFF + statistic_increment(malloc_calls, &LOCK_status); +#endif + /* When thread specific is set, both mysqld_server_initialized and thd must be set, and we check that with DBUG_ASSERT. @@ -3766,7 +3926,7 @@ static int init_common_variables() /* TODO: remove this when my_time_t is 64 bit compatible */ if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time)) { - sql_print_error("This MySQL server doesn't support dates later than 2038"); + sql_print_error("This server doesn't support dates later than 2038"); exit(1); } @@ -3844,7 +4004,7 @@ static int init_common_variables() of SQLCOM_ constants. */ compile_time_assert(sizeof(com_status_vars)/sizeof(com_status_vars[0]) - 1 == - SQLCOM_END + 11); + SQLCOM_END + 10); #endif if (get_options(&remaining_argc, &remaining_argv)) @@ -3891,7 +4051,7 @@ static int init_common_variables() /* MyISAM requires two file handles per table. */ wanted_files= (extra_files + max_connections + extra_max_connections + tc_size * 2 * tc_instances); -#if defined(HAVE_POOL_OF_THREADS) && !defined(__WIN__) +#if defined(HAVE_POOL_OF_THREADS) && !defined(_WIN32) // add epoll or kevent fd for each threadpool group, in case pool of threads is used wanted_files+= (thread_handling > SCHEDULER_NO_THREADS) ? 0 : threadpool_size; #endif @@ -4013,6 +4173,8 @@ static int init_common_variables() test purposes, to be able to start "mysqld" even if the requested character set is not available (see bug#18743). */ + myf utf8_flag= global_system_variables.old_behavior & + OLD_MODE_UTF8_IS_UTF8MB3 ? MY_UTF8_IS_UTF8MB3 : 0; for (;;) { char *next_character_set_name= strchr(default_character_set_name, ','); @@ -4020,7 +4182,7 @@ static int init_common_variables() *next_character_set_name++= '\0'; if (!(default_charset_info= get_charset_by_csname(default_character_set_name, - MY_CS_PRIMARY, MYF(MY_WME)))) + MY_CS_PRIMARY, MYF(utf8_flag | MY_WME)))) { if (next_character_set_name) { @@ -4037,7 +4199,7 @@ static int init_common_variables() if (default_collation_name) { CHARSET_INFO *default_collation; - default_collation= get_charset_by_name(default_collation_name, MYF(0)); + default_collation= get_charset_by_name(default_collation_name, MYF(utf8_flag)); if (!default_collation) { #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE @@ -4051,7 +4213,7 @@ static int init_common_variables() { sql_print_error(ER_DEFAULT(ER_COLLATION_CHARSET_MISMATCH), default_collation_name, - default_charset_info->csname); + default_charset_info->cs_name.str); return 1; } default_charset_info= default_collation; @@ -4069,8 +4231,8 @@ static int init_common_variables() { sql_print_warning("'%s' can not be used as client character set. " "'%s' will be used as default client character set.", - default_charset_info->csname, - my_charset_latin1.csname); + default_charset_info->cs_name.str, + my_charset_latin1.cs_name.str); global_system_variables.collation_connection= &my_charset_latin1; global_system_variables.character_set_results= &my_charset_latin1; global_system_variables.character_set_client= &my_charset_latin1; @@ -4078,7 +4240,7 @@ static int init_common_variables() if (!(character_set_filesystem= get_charset_by_csname(character_set_filesystem_name, - MY_CS_PRIMARY, MYF(MY_WME)))) + MY_CS_PRIMARY, MYF(utf8_flag | MY_WME)))) return 1; global_system_variables.character_set_filesystem= character_set_filesystem; @@ -4242,6 +4404,7 @@ static int init_thread_environment() MY_MUTEX_INIT_SLOW); mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered, MY_MUTEX_INIT_SLOW); + mysql_mutex_init(key_LOCK_backup_log, &LOCK_backup_log, MY_MUTEX_INIT_FAST); #ifdef HAVE_OPENSSL mysql_mutex_init(key_LOCK_des_key_file, @@ -4407,6 +4570,7 @@ void ssl_acceptor_stats_update(int sslaccept_ret) static void init_ssl() { +#if !defined(EMBEDDED_LIBRARY) /* Not need to check require_secure_transport on the Linux, because it always has Unix domain sockets that are secure: @@ -4422,7 +4586,7 @@ static void init_ssl() unireg_abort(1); } #endif -#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) +#if defined(HAVE_OPENSSL) if (opt_use_ssl) { enum enum_ssl_init_error error= SSL_INITERR_NOERROR; @@ -4462,7 +4626,8 @@ static void init_ssl() } if (des_key_file) load_des_key_file(des_key_file); -#endif /* HAVE_OPENSSL && ! EMBEDDED_LIBRARY */ +#endif /* HAVE_OPENSSL */ +#endif /* !EMBEDDED_LIBRARY */ } /* Reinitialize SSL (FLUSH SSL) */ @@ -5002,6 +5167,9 @@ static int init_server_components() MYSQL_COMPATIBILITY_OPTION("new"), MYSQL_COMPATIBILITY_OPTION("show_compatibility_56"), + /* The following options were removed in 10.6 */ + MARIADB_REMOVED_OPTION("innodb-force-load-corrupted"), + /* The following options were removed in 10.5 */ #if defined(__linux__) MARIADB_REMOVED_OPTION("super-large-pages"), @@ -5019,6 +5187,31 @@ static int init_server_components() /* The following options were added after 5.6.10 */ MYSQL_TO_BE_IMPLEMENTED_OPTION("rpl-stop-slave-timeout"), MYSQL_TO_BE_IMPLEMENTED_OPTION("validate-user-plugins"), // NO_EMBEDDED_ACCESS_CHECKS + + /* The following options were deprecated in 10.5 or earlier */ + MARIADB_REMOVED_OPTION("innodb-adaptive-max-sleep-delay"), + MARIADB_REMOVED_OPTION("innodb-background-scrub-data-check-interval"), + MARIADB_REMOVED_OPTION("innodb-background-scrub-data-compressed"), + MARIADB_REMOVED_OPTION("innodb-background-scrub-data-interval"), + MARIADB_REMOVED_OPTION("innodb-background-scrub-data-uncompressed"), + MARIADB_REMOVED_OPTION("innodb-buffer-pool-instances"), + MARIADB_REMOVED_OPTION("innodb-commit-concurrency"), + MARIADB_REMOVED_OPTION("innodb-concurrency-tickets"), + MARIADB_REMOVED_OPTION("innodb-file-format"), + MARIADB_REMOVED_OPTION("innodb-large-prefix"), + MARIADB_REMOVED_OPTION("innodb-lock-schedule-algorithm"), + MARIADB_REMOVED_OPTION("innodb-log-checksums"), + MARIADB_REMOVED_OPTION("innodb-log-compressed-pages"), + MARIADB_REMOVED_OPTION("innodb-log-files-in-group"), + MARIADB_REMOVED_OPTION("innodb-log-optimize-ddl"), + MARIADB_REMOVED_OPTION("innodb-page-cleaners"), + MARIADB_REMOVED_OPTION("innodb-replication-delay"), + MARIADB_REMOVED_OPTION("innodb-scrub-log"), + MARIADB_REMOVED_OPTION("innodb-scrub-log-speed"), + MARIADB_REMOVED_OPTION("innodb-sync-array-size"), + MARIADB_REMOVED_OPTION("innodb-thread-concurrency"), + MARIADB_REMOVED_OPTION("innodb-thread-sleep-delay"), + MARIADB_REMOVED_OPTION("innodb-undo-logs"), {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; /* @@ -5079,7 +5272,7 @@ static int init_server_components() if (log_output_options & LOG_NONE) { /* - Issue a warining if there were specified additional options to the + Issue a warning if there were specified additional options to the log-output along with NONE. Probably this wasn't what user wanted. */ if ((log_output_options & LOG_NONE) && (log_output_options & ~LOG_NONE)) @@ -5128,7 +5321,7 @@ static int init_server_components() #ifdef USE_ARIA_FOR_TMP_TABLES if (!ha_storage_engine_is_enabled(maria_hton) && !opt_bootstrap) { - sql_print_error("Aria engine is not enabled or did not start. The Aria engine must be enabled to continue as mysqld was configured with --with-aria-tmp-tables"); + sql_print_error("Aria engine is not enabled or did not start. The Aria engine must be enabled to continue as server was configured with --with-aria-tmp-tables"); unireg_abort(1); } #endif @@ -5152,6 +5345,9 @@ static int init_server_components() } #endif + if (ddl_log_initialize()) + unireg_abort(1); + tc_log= get_tc_log_implementation(); if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file)) @@ -5161,9 +5357,7 @@ static int init_server_components() } if (ha_recover(0)) - { unireg_abort(1); - } #ifndef EMBEDDED_LIBRARY start_handle_manager(); @@ -5181,14 +5375,27 @@ static int init_server_components() } #ifdef HAVE_REPLICATION - if (opt_bin_log && expire_logs_days) + if (opt_bin_log) + { + if (binlog_expire_logs_seconds) + { + time_t purge_time= server_start_time - binlog_expire_logs_seconds; + if (purge_time >= 0) + mysql_bin_log.purge_logs_before_date(purge_time); + } + } + else { - time_t purge_time= server_start_time - expire_logs_days*24*60*60; - if (purge_time >= 0) - mysql_bin_log.purge_logs_before_date(purge_time); + if (binlog_expire_logs_seconds) + sql_print_warning("You need to use --log-bin to make --expire-logs-days " + "or --binlog-expire-logs-seconds work."); } #endif + if (ddl_log_execute_recovery() > 0) + unireg_abort(1); + ha_signal_ddl_recovery_done(); + if (opt_myisam_log) (void) mi_log(1); @@ -5235,19 +5442,6 @@ static int init_server_components() #ifndef EMBEDDED_LIBRARY -#ifdef _WIN32 -static void create_shutdown_event() -{ - hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name); - // On "Stop Service" we have to do regular shutdown - Service.SetShutdownEvent(hEventShutdown); -} -#else /*_WIN32*/ -#define create_shutdown_event() -#endif -#endif /* EMBEDDED_LIBRARY */ - -#ifndef EMBEDDED_LIBRARY #ifndef DBUG_OFF /* @@ -5287,11 +5481,7 @@ static void test_lc_time_sz() #endif//DBUG_OFF -#ifdef __WIN__ -int win_main(int argc, char **argv) -#else int mysqld_main(int argc, char **argv) -#endif { #ifndef _WIN32 /* We can't close stdin just now, because it may be booststrap mode. */ @@ -5309,17 +5499,15 @@ int mysqld_main(int argc, char **argv) if (init_early_variables()) exit(1); -#ifndef _WIN32 #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE pre_initialize_performance_schema(); #endif /*WITH_PERFSCHEMA_STORAGE_ENGINE */ - // For windows, my_init() is called from the win specific mysqld_main + if (my_init()) // init my_sys library & pthreads { fprintf(stderr, "my_init() failed."); return 1; } -#endif orig_argc= argc; orig_argv= argv; @@ -5519,16 +5707,16 @@ int mysqld_main(int argc, char **argv) } #ifdef WITH_WSREP - wsrep_set_wsrep_on(); + wsrep_set_wsrep_on(nullptr); if (WSREP_ON && wsrep_check_opts()) unireg_abort(1); #endif +#ifdef _WIN32 /* The subsequent calls may take a long time : e.g. innodb log read. Thus set the long running service control manager timeout */ -#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY) - Service.SetSlowStarting(slow_start_timeout); + my_report_svc_status(SERVICE_START_PENDING, NO_ERROR, slow_start_timeout); #endif if (init_server_components()) @@ -5537,13 +5725,6 @@ int mysqld_main(int argc, char **argv) init_ssl(); network_init(); -#ifdef _WIN32 - if (!opt_console) - { - FreeConsole(); // Remove window - } -#endif - #ifdef WITH_WSREP // Recover and exit. if (wsrep_recovery) @@ -5583,8 +5764,6 @@ int mysqld_main(int argc, char **argv) initialize_information_schema_acl(); - execute_ddl_log_recovery(); - /* Change EVENTS_ORIGINAL to EVENTS_OFF (the default value) as there is no point in using ORIGINAL during startup @@ -5634,8 +5813,6 @@ int mysqld_main(int argc, char **argv) } } - create_shutdown_event(); - /* Copy default global rpl_filter to global_rpl_filter */ copy_filter_setting(global_rpl_filter, get_or_create_rpl_filter("", 0)); @@ -5660,8 +5837,8 @@ int mysqld_main(int argc, char **argv) if (IS_SYSVAR_AUTOSIZE(&server_version_ptr)) sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname, server_version, - ((mysql_socket_getfd(unix_sock) == INVALID_SOCKET) ? - (char*) "" : mysqld_unix_port), + (systemd_sock_activation ? "Systemd socket activated ports" : + (unix_sock_is_online ? mysqld_unix_port : (char*) "")), mysqld_port, MYSQL_COMPILATION_COMMENT); else { @@ -5673,8 +5850,8 @@ int mysqld_main(int argc, char **argv) sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname, real_server_version, - ((mysql_socket_getfd(unix_sock) == INVALID_SOCKET) ? - (char*) "" : mysqld_unix_port), + (systemd_sock_activation ? "Systemd socket activated ports" : + (unix_sock_is_online ? mysqld_unix_port : (char*) "")), mysqld_port, MYSQL_COMPILATION_COMMENT); } @@ -5687,9 +5864,6 @@ int mysqld_main(int argc, char **argv) } #endif -#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY) - Service.SetRunning(); -#endif /* Signal threads waiting for server to be started */ mysql_mutex_lock(&LOCK_server_started); @@ -5745,16 +5919,6 @@ int mysqld_main(int argc, char **argv) */ PSI_CALL_delete_current_thread(); -#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) - if (start_mode) - Service.Stop(); - else - { - Service.SetShutdownEvent(0); - if (hEventShutdown) - CloseHandle(hEventShutdown); - } -#endif #if (defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)) ERR_remove_state(0); #endif @@ -5765,245 +5929,6 @@ int mysqld_main(int argc, char **argv) #endif /* !EMBEDDED_LIBRARY */ -/**************************************************************************** - Main and thread entry function for Win32 - (all this is needed only to run mysqld as a service on WinNT) -****************************************************************************/ - -#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) -void mysql_service(void *p) -{ - if (my_thread_init()) - abort(); - - if (use_opt_args) - win_main(opt_argc, opt_argv); - else - win_main(Service.my_argc, Service.my_argv); - - my_thread_end(); -} - - -/* Quote string if it contains space, else copy */ - -static char *add_quoted_string(char *to, const char *from, char *to_end) -{ - uint length= (uint) (to_end-to); - - if (!strchr(from, ' ')) - return strmake(to, from, length-1); - return strxnmov(to, length-1, "\"", from, "\"", NullS); -} - - -/** - Handle basic handling of services, like installation and removal. - - @param argv Pointer to argument list - @param servicename Internal name of service - @param displayname Display name of service (in taskbar ?) - @param file_path Path to this program - @param startup_option Startup option to mysqld - - @retval 0 option handled - @retval 1 Could not handle option -*/ - -static bool -default_service_handling(char **argv, - const char *servicename, - const char *displayname, - const char *file_path, - const char *extra_opt, - const char *account_name) -{ - char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end; - const char *opt_delim; - end= path_and_service + sizeof(path_and_service)-3; - - /* We have to quote filename if it contains spaces */ - pos= add_quoted_string(path_and_service, file_path, end); - if (extra_opt && *extra_opt) - { - /* - Add option after file_path. There will be zero or one extra option. It's - assumed to be --defaults-file=file but isn't checked. The variable (not - the option name) should be quoted if it contains a string. - */ - *pos++= ' '; - if ((opt_delim= strchr(extra_opt, '='))) - { - size_t length= ++opt_delim - extra_opt; - pos= strnmov(pos, extra_opt, length); - } - else - opt_delim= extra_opt; - - pos= add_quoted_string(pos, opt_delim, end); - } - /* We must have servicename last */ - *pos++= ' '; - (void) add_quoted_string(pos, servicename, end); - - if (Service.got_service_option(argv, "install")) - { - Service.Install(1, servicename, displayname, path_and_service, - account_name); - return 0; - } - if (Service.got_service_option(argv, "install-manual")) - { - Service.Install(0, servicename, displayname, path_and_service, - account_name); - return 0; - } - if (Service.got_service_option(argv, "remove")) - { - Service.Remove(servicename); - return 0; - } - return 1; -} - -/* Remove service name from the command line arguments, and pass -resulting command line to the service via opt_args.*/ -#include <vector> -static void service_init_cmdline_args(int argc, char **argv) -{ - start_mode= 1; - use_opt_args= 1; - - if(argc == 1) - { - opt_argc= argc; - opt_argv= argv; - } - else - { - static std::vector<char *> argv_no_service; - for (int i= 0; argv[i]; i++) - argv_no_service.push_back(argv[i]); - // Remove the last argument, service name - argv_no_service[argv_no_service.size() - 1]= 0; - opt_argc= (int)argv_no_service.size() - 1; - opt_argv= &argv_no_service[0]; - } - DBUG_ASSERT(!opt_argv[opt_argc]); -} - -int mysqld_main(int argc, char **argv) -{ - my_progname= argv[0]; - - /* - When several instances are running on the same machine, we - need to have an unique named hEventShudown through the - application PID e.g.: MySQLShutdown1890; MySQLShutdown2342 - */ - int10_to_str((int) GetCurrentProcessId(),strmov(shutdown_event_name, - "MySQLShutdown"), 10); - - /* Must be initialized early for comparison of service name */ - system_charset_info= &my_charset_utf8mb3_general_ci; - -#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE - pre_initialize_performance_schema(); -#endif /*WITH_PERFSCHEMA_STORAGE_ENGINE */ - - if (my_init()) - { - fprintf(stderr, "my_init() failed."); - return 1; - } - - - char file_path[FN_REFLEN]; - my_path(file_path, argv[0], ""); /* Find name in path */ - fn_format(file_path,argv[0],file_path,"", MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_RESOLVE_SYMLINKS); - - - if (argc == 2) - { - if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, - file_path, "", NULL)) - return 0; - - if (Service.IsService(argv[1])) /* Start an optional service */ - { - /* - Only add the service name to the groups read from the config file - if it's not "MySQL". (The default service name should be 'mysqld' - but we started a bad tradition by calling it MySQL from the start - and we are now stuck with it. - */ - if (my_strcasecmp(system_charset_info, argv[1],"mysql")) - load_default_groups[load_default_groups_sz-2]= argv[1]; - service_init_cmdline_args(argc, argv); - Service.Init(argv[1], mysql_service); - return 0; - } - } - else if (argc == 3) /* install or remove any optional service */ - { - if (!default_service_handling(argv, argv[2], argv[2], file_path, "", - NULL)) - return 0; - if (Service.IsService(argv[2])) - { - /* - mysqld was started as - mysqld --defaults-file=my_path\my.ini service-name - */ - if (my_strcasecmp(system_charset_info, argv[2],"mysql")) - load_default_groups[load_default_groups_sz-2]= argv[2]; - service_init_cmdline_args(argc, argv); - Service.Init(argv[2], mysql_service); - return 0; - } - } - else if (argc == 4 || argc == 5) - { - /* - This may seem strange, because we handle --local-service while - preserving 4.1's behavior of allowing any one other argument that is - passed to the service on startup. (The assumption is that this is - --defaults-file=file, but that was not enforced in 4.1, so we don't - enforce it here.) - */ - const char *extra_opt= NullS; - const char *account_name = NullS; - int index; - for (index = 3; index < argc; index++) - { - if (!strcmp(argv[index], "--local-service")) - account_name= "NT AUTHORITY\\LocalService"; - else - extra_opt= argv[index]; - } - - if (argc == 4 || account_name) - if (!default_service_handling(argv, argv[2], argv[2], file_path, - extra_opt, account_name)) - return 0; - } - else if (argc == 1 && Service.IsService(MYSQL_SERVICENAME)) - { - /* start the default service */ - service_init_cmdline_args(argc, argv); - Service.Init(MYSQL_SERVICENAME, mysql_service); - return 0; - } - - /* Start as standalone server */ - Service.my_argc=argc; - Service.my_argv=argv; - mysql_service(NULL); - return 0; -} -#endif - - static bool read_init_file(char *file_name) { MYSQL_FILE *file; @@ -6133,8 +6058,7 @@ void handle_accepted_socket(MYSQL_SOCKET new_sock, MYSQL_SOCKET sock) { #ifdef HAVE_LIBWRAP { - if (mysql_socket_getfd(sock) == mysql_socket_getfd(base_ip_sock) || - mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock)) + if (!sock.is_unix_domain_socket) { struct request_info req; signal(SIGCHLD, SIG_DFL); @@ -6177,11 +6101,9 @@ void handle_accepted_socket(MYSQL_SOCKET new_sock, MYSQL_SOCKET sock) DBUG_PRINT("info", ("Creating CONNECT for new connection")); if (auto connect= new CONNECT(new_sock, - mysql_socket_getfd(sock) == - mysql_socket_getfd(unix_sock) ? + sock.is_unix_domain_socket ? VIO_TYPE_SOCKET : VIO_TYPE_TCPIP, - mysql_socket_getfd(sock) == - mysql_socket_getfd(extra_ip_sock) ? + sock.is_extra_port ? extra_thread_scheduler : thread_scheduler)) create_new_thread(connect); else @@ -6217,36 +6139,32 @@ void handle_connections_sockets() struct sockaddr_storage cAddr; int retval; #ifdef HAVE_POLL - int socket_count= 0; - struct pollfd fds[3]; // for ip_sock, unix_sock and extra_ip_sock - MYSQL_SOCKET pfs_fds[3]; // for performance schema -#define setup_fds(X) \ - mysql_socket_set_thread_owner(X); \ - pfs_fds[socket_count]= (X); \ - fds[socket_count].fd= mysql_socket_getfd(X); \ - fds[socket_count].events= POLLIN; \ - socket_count++ + // for ip_sock, unix_sock and extra_ip_sock + Dynamic_array<struct pollfd> fds(PSI_INSTRUMENT_MEM); #else -#define setup_fds(X) FD_SET(mysql_socket_getfd(X),&clientFDs) fd_set readFDs,clientFDs; - FD_ZERO(&clientFDs); #endif DBUG_ENTER("handle_connections_sockets"); - if (mysql_socket_getfd(base_ip_sock) != INVALID_SOCKET) +#ifdef HAVE_POLL + for (size_t i= 0; i < listen_sockets.size(); i++) { - setup_fds(base_ip_sock); - set_non_blocking_if_supported(base_ip_sock); + struct pollfd local_fds; + mysql_socket_set_thread_owner(listen_sockets.at(i)); + local_fds.fd= mysql_socket_getfd(listen_sockets.at(i)); + local_fds.events= POLLIN; + fds.push(local_fds); + set_non_blocking_if_supported(listen_sockets.at(i)); } - if (mysql_socket_getfd(extra_ip_sock) != INVALID_SOCKET) +#else + FD_ZERO(&clientFDs); + for (size_t i= 0; i < listen_sockets.size(); i++) { - setup_fds(extra_ip_sock); - set_non_blocking_if_supported(extra_ip_sock); + int fd= mysql_socket_getfd(listen_sockets.at(i)); + FD_SET(fd, &clientFDs); + set_non_blocking_if_supported(listen_sockets.at(i)); } -#ifdef HAVE_SYS_UN_H - setup_fds(unix_sock); - set_non_blocking_if_supported(unix_sock); #endif sd_notify(0, "READY=1\n" @@ -6256,10 +6174,10 @@ void handle_connections_sockets() while (!abort_loop) { #ifdef HAVE_POLL - retval= poll(fds, socket_count, -1); + retval= poll(fds.get_pos(0), fds.size(), -1); #else readFDs=clientFDs; - retval= select((int) 0,&readFDs,0,0,0); + retval= select(FD_SETSIZE, &readFDs, NULL, NULL, NULL); #endif if (retval < 0) @@ -6273,7 +6191,7 @@ void handle_connections_sockets() */ statistic_increment(connection_errors_accept, &LOCK_status); if (!select_errors++ && !abort_loop) /* purecov: inspected */ - sql_print_error("mysqld: Got error %d from select",socket_errno); /* purecov: inspected */ + sql_print_error("Server: Got error %d from select",socket_errno); /* purecov: inspected */ } continue; } @@ -6283,22 +6201,23 @@ void handle_connections_sockets() /* Is this a new connection request ? */ #ifdef HAVE_POLL - for (int i= 0; i < socket_count; ++i) + for (size_t i= 0; i < fds.size(); ++i) { - if (fds[i].revents & POLLIN) + if (fds.at(i).revents & POLLIN) { - sock= pfs_fds[i]; + sock= listen_sockets.at(i); break; } } #else // HAVE_POLL - if (FD_ISSET(mysql_socket_getfd(base_ip_sock),&readFDs)) - sock= base_ip_sock; - else - if (FD_ISSET(mysql_socket_getfd(extra_ip_sock),&readFDs)) - sock= extra_ip_sock; - else - sock = unix_sock; + for (size_t i=0; i < listen_sockets.size(); i++) + { + if (FD_ISSET(mysql_socket_getfd(listen_sockets.at(i)), &readFDs)) + { + sock= listen_sockets.at(i); + break; + } + } #endif // HAVE_POLL for (uint retry=0; retry < MAX_ACCEPT_RETRY && !abort_loop; retry++) @@ -6618,6 +6537,10 @@ struct my_option my_long_options[]= "relay logs", &opt_relaylog_index_name, &opt_relaylog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"log-ddl-recovery", 0, + "Path to file used for recovery of DDL statements after a crash", + &opt_ddl_recovery_file, &opt_ddl_recovery_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.", &myisam_log_filename, &myisam_log_filename, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, @@ -6639,7 +6562,7 @@ struct my_option my_long_options[]= {"master-retry-count", 0, "The number of tries the slave will make to connect to the master before giving up.", &master_retry_count, &master_retry_count, 0, GET_ULONG, - REQUIRED_ARG, 3600*24, 0, 0, 0, 0, 0}, + REQUIRED_ARG, 100000, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"init-rpl-role", 0, "Set the replication role", &rpl_status, &rpl_status, &rpl_role_typelib, @@ -6760,7 +6683,7 @@ struct my_option my_long_options[]= &opt_use_ssl, &opt_use_ssl, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif -#ifdef __WIN__ +#ifdef _WIN32 {"standalone", 0, "Dummy option to start as a standalone program (NT).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -7425,6 +7348,9 @@ SHOW_VAR status_vars[]= { {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS}, SHOW_FUNC_ENTRY("Key", &show_default_keycache), {"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS}, +#ifndef DBUG_OFF + {"malloc_calls", (char*) &malloc_calls, SHOW_LONG}, +#endif {"Max_statement_time_exceeded", (char*) offsetof(STATUS_VAR, max_statement_time_exceeded), SHOW_LONG_STATUS}, {"Master_gtid_wait_count", (char*) offsetof(STATUS_VAR, master_gtid_wait_count), SHOW_LONG_STATUS}, {"Master_gtid_wait_timeouts", (char*) offsetof(STATUS_VAR, master_gtid_wait_timeouts), SHOW_LONG_STATUS}, @@ -7432,6 +7358,7 @@ SHOW_VAR status_vars[]= { {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, {"Memory_used", (char*) &show_memory_used, SHOW_SIMPLE_FUNC}, {"Memory_used_initial", (char*) &start_memory_used, SHOW_LONGLONG}, + {"Resultset_metadata_skipped", (char *) offsetof(STATUS_VAR, skip_metadata_count),SHOW_LONG_STATUS}, {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_NOFLUSH}, {"Open_files", (char*) &my_file_opened, SHOW_SINT}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_NOFLUSH}, @@ -7668,12 +7595,14 @@ static void print_help() static void usage(void) { DBUG_ENTER("usage"); + myf utf8_flag= global_system_variables.old_behavior & + OLD_MODE_UTF8_IS_UTF8MB3 ? MY_UTF8_IS_UTF8MB3 : 0; if (!(default_charset_info= get_charset_by_csname(default_character_set_name, MY_CS_PRIMARY, - MYF(MY_WME)))) + MYF(utf8_flag | MY_WME)))) exit(1); if (!default_collation_name) - default_collation_name= (char*) default_charset_info->name; + default_collation_name= (char*) default_charset_info->coll_name.str; print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Starts the MariaDB database server.\n"); @@ -7682,7 +7611,7 @@ static void usage(void) puts("\nFor more help options (several pages), use mysqld --verbose --help."); else { -#ifdef __WIN__ +#ifdef _WIN32 puts("NT and Win32 specific options:\n" " --install Install the default service (NT).\n" " --install-manual Install the default service started manually (NT).\n" @@ -7707,8 +7636,8 @@ static void usage(void) "\nbecause execution stopped before plugins were initialized."); } - puts("\nTo see what variables a running MySQL server is using, type" - "\n'mysqladmin variables' instead of 'mysqld --verbose --help'."); + puts("\nTo see what variables a running server is using, type" + "\n'SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES' instead of 'mysqld --verbose --help' or 'mariadbd --verbose --help'."); } DBUG_VOID_RETURN; } @@ -7744,6 +7673,7 @@ static int mysql_init_variables(void) opt_logname= opt_binlog_index_name= opt_slow_logname= 0; opt_log_basename= 0; opt_tc_log_file= (char *)"tc.log"; // no hostname in tc_log file name ! + opt_ddl_recovery_file= (char *) "ddl_recovery.log"; opt_secure_auth= 0; opt_bootstrap= opt_myisam_log= 0; disable_log_notes= 0; @@ -7757,6 +7687,7 @@ static int mysql_init_variables(void) abort_loop= select_thread_in_use= signal_thread_in_use= 0; grant_option= 0; aborted_threads= aborted_connects= aborted_connects_preauth= 0; + malloc_calls= 0; subquery_cache_miss= subquery_cache_hit= 0; delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0; delayed_insert_errors= thread_created= 0; @@ -7785,9 +7716,6 @@ static int mysql_init_variables(void) character_set_filesystem= &my_charset_bin; opt_specialflag= SPECIAL_ENGLISH; -#ifndef EMBEDDED_LIBRARY - unix_sock= base_ip_sock= extra_ip_sock= MYSQL_INVALID_SOCKET; -#endif mysql_home_ptr= mysql_home; log_error_file_ptr= log_error_file; protocol_version= PROTOCOL_VERSION; @@ -7901,7 +7829,7 @@ static int mysql_init_variables(void) #endif /* ! EMBEDDED_LIBRARY */ #endif /* HAVE_OPENSSL */ -#if defined(__WIN__) +#if defined(_WIN32) /* Allow Win32 users to move MySQL anywhere */ { char prg_dev[LIBLEN]; @@ -8098,6 +8026,17 @@ mysqld_get_one_option(const struct my_option *opt, const char *argument, } break; } + case (int)OPT_EXPIRE_LOGS_DAYS: + { + binlog_expire_logs_seconds= (ulong)(expire_logs_days*24*60*60); + break; + } + case (int)OPT_BINLOG_EXPIRE_LOGS_SECONDS: + { + expire_logs_days= (binlog_expire_logs_seconds/double (24*60*60)); + break; + } + #ifdef HAVE_REPLICATION case (int)OPT_REPLICATE_IGNORE_DB: { @@ -8231,6 +8170,23 @@ mysqld_get_one_option(const struct my_option *opt, const char *argument, break; case OPT_BOOTSTRAP: opt_noacl=opt_bootstrap=1; +#ifdef _WIN32 + { + /* + Check if security descriptor is passed from + mysql_install_db.exe. + Used by Windows installer to correctly setup + privileges on the new directories. + */ + char* dir_sddl = getenv("MARIADB_NEW_DIRECTORY_SDDL"); + if (dir_sddl) + { + ConvertStringSecurityDescriptorToSecurityDescriptor( + dir_sddl, SDDL_REVISION_1, &my_dir_security_attributes.lpSecurityDescriptor, NULL); + DBUG_ASSERT(my_dir_security_attributes.lpSecurityDescriptor); + } + } +#endif break; case OPT_SERVER_ID: ::server_id= global_system_variables.server_id; @@ -8680,15 +8636,20 @@ static int get_options(int *argc_ptr, char ***argv_ptr) return 1; #ifdef EMBEDDED_LIBRARY - one_thread_scheduler(thread_scheduler); - one_thread_scheduler(extra_thread_scheduler); + one_thread_scheduler(thread_scheduler, &connection_count); + /* + It looks like extra_connection_count should be passed here but + its been using connection_count for the last 10+ years and + no-one was requested a change so lets not suprise anyone. + */ + one_thread_scheduler(extra_thread_scheduler, &connection_count); #else if (thread_handling <= SCHEDULER_ONE_THREAD_PER_CONNECTION) one_thread_per_connection_scheduler(thread_scheduler, &max_connections, &connection_count); else if (thread_handling == SCHEDULER_NO_THREADS) - one_thread_scheduler(thread_scheduler); + one_thread_scheduler(thread_scheduler, &connection_count); else pool_of_threads_scheduler(thread_scheduler, &max_connections, &connection_count); @@ -9100,6 +9061,7 @@ static PSI_file_info all_server_files[]= { &key_file_global_ddl_log, "global_ddl_log", 0}, { &key_file_load, "load", 0}, { &key_file_loadfile, "LOAD_FILE", 0}, + { &key_file_log_ddl, "log_ddl", 0}, { &key_file_log_event_data, "log_event_data", 0}, { &key_file_log_event_info, "log_event_info", 0}, { &key_file_master_info, "master_info", 0}, @@ -9256,6 +9218,8 @@ PSI_stage_info stage_slave_background_process_request= { 0, "Processing requests PSI_stage_info stage_slave_background_wait_request= { 0, "Waiting for requests", 0}; PSI_stage_info stage_waiting_for_deadlock_kill= { 0, "Waiting for parallel replication deadlock handling to complete", 0}; PSI_stage_info stage_starting= { 0, "starting", 0}; +PSI_stage_info stage_waiting_for_flush= { 0, "Waiting for non trans tables to be flushed", 0}; +PSI_stage_info stage_waiting_for_ddl= { 0, "Waiting for DDLs", 0}; PSI_memory_key key_memory_DATE_TIME_FORMAT; PSI_memory_key key_memory_DDL_LOG_MEMORY_ENTRY; @@ -9305,6 +9269,7 @@ PSI_memory_key key_memory_binlog_ver_1_event; PSI_memory_key key_memory_bison_stack; PSI_memory_key key_memory_blob_mem_storage; PSI_memory_key key_memory_dboptions_hash; +PSI_memory_key key_memory_dbnames_cache; PSI_memory_key key_memory_errmsgs; PSI_memory_key key_memory_frm_string; PSI_memory_key key_memory_gdl; @@ -9605,6 +9570,7 @@ static PSI_memory_info all_server_memory[]= { &key_memory_THD_handler_tables_hash, "THD::handler_tables_hash", 0}, { &key_memory_hash_index_key_buffer, "hash_index_key_buffer", 0}, { &key_memory_dboptions_hash, "dboptions_hash", 0}, + { &key_memory_dbnames_cache, "dbnames_cache", 0}, { &key_memory_user_conn, "user_conn", 0}, // { &key_memory_LOG_POS_COORD, "LOG_POS_COORD", 0}, // { &key_memory_XID_STATE, "XID_STATE", 0}, diff --git a/sql/mysqld.h b/sql/mysqld.h index d31f0159eb6..6756cbc425a 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -116,7 +116,6 @@ extern bool opt_ignore_builtin_innodb; extern my_bool opt_character_set_client_handshake; extern my_bool debug_assert_on_not_freed_memory; extern MYSQL_PLUGIN_IMPORT bool volatile abort_loop; -extern Atomic_counter<uint> connection_count; extern my_bool opt_safe_user_create; extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap; extern my_bool opt_slave_compressed_protocol, use_temp_pool; @@ -147,7 +146,6 @@ extern ulong opt_replicate_events_marked_for_skip; extern char *default_tz_name; extern Time_zone *default_tz; extern char *my_bind_addr_str; -extern int server_socket_ai_family; extern char *default_storage_engine, *default_tmp_storage_engine; extern char *enforced_storage_engine; extern char *gtid_pos_auto_engines; @@ -156,7 +154,8 @@ extern bool opt_endinfo, using_udf_functions; extern my_bool locked_in_memory; extern bool opt_using_transactions; extern ulong current_pid; -extern ulong expire_logs_days; +extern double expire_logs_days; +extern ulong binlog_expire_logs_seconds; extern my_bool relay_log_recovery; extern uint sync_binlog_period, sync_relaylog_period, sync_relayloginfo_period, sync_masterinfo_period; @@ -208,7 +207,7 @@ extern MYSQL_PLUGIN_IMPORT char glob_hostname[FN_REFLEN]; extern char mysql_home[FN_REFLEN]; extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; extern char default_logfile_name[FN_REFLEN]; -extern char log_error_file[FN_REFLEN], *opt_tc_log_file; +extern char log_error_file[FN_REFLEN], *opt_tc_log_file, *opt_ddl_recovery_file; extern const double log_10[309]; extern ulonglong keybuff_size; extern ulonglong thd_startup_options; @@ -269,6 +268,8 @@ extern MYSQL_PLUGIN_IMPORT const char *my_localhost; extern MYSQL_PLUGIN_IMPORT const char **errmesg; /* Error messages */ extern const char *myisam_recover_options_str; extern const LEX_CSTRING in_left_expr_name, in_additional_cond, in_having_cond; +extern const LEX_CSTRING NULL_clex_str; +extern const LEX_CSTRING error_clex_str; extern SHOW_VAR status_vars[]; extern struct system_variables max_system_variables; extern struct system_status_var global_status_var; @@ -395,7 +396,7 @@ extern PSI_file_key key_file_binlog, key_file_binlog_cache, key_file_loadfile, key_file_log_event_data, key_file_log_event_info, key_file_master_info, key_file_misc, key_file_partition_ddl_log, key_file_pid, key_file_relay_log_info, key_file_send_file, key_file_tclog, - key_file_trg, key_file_trn, key_file_init; + key_file_trg, key_file_trn, key_file_init, key_file_log_ddl; extern PSI_file_key key_file_query_log, key_file_slow_log; extern PSI_file_key key_file_relaylog, key_file_relaylog_index, key_file_relaylog_cache, key_file_relaylog_index_cache; @@ -503,6 +504,7 @@ extern PSI_memory_key key_memory_TABLE; extern PSI_memory_key key_memory_binlog_statement_buffer; extern PSI_memory_key key_memory_user_conn; extern PSI_memory_key key_memory_dboptions_hash; +extern PSI_memory_key key_memory_dbnames_cache; extern PSI_memory_key key_memory_hash_index_key_buffer; extern PSI_memory_key key_memory_THD_handler_tables_hash; extern PSI_memory_key key_memory_JOIN_CACHE; @@ -638,7 +640,9 @@ extern PSI_stage_info stage_upgrading_lock; extern PSI_stage_info stage_user_lock; extern PSI_stage_info stage_user_sleep; extern PSI_stage_info stage_verifying_table; +extern PSI_stage_info stage_waiting_for_ddl; extern PSI_stage_info stage_waiting_for_delay_list; +extern PSI_stage_info stage_waiting_for_flush; extern PSI_stage_info stage_waiting_for_gtid_to_be_written_to_binary_log; extern PSI_stage_info stage_waiting_for_handler_insert; extern PSI_stage_info stage_waiting_for_handler_lock; @@ -696,7 +700,7 @@ void init_sql_statement_info(); void init_com_statement_info(); #endif /* HAVE_PSI_STATEMENT_INTERFACE */ -#ifndef __WIN__ +#ifndef _WIN32 extern pthread_t signal_thread; #endif @@ -742,7 +746,7 @@ extern mysql_mutex_t LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, LOCK_active_mi, LOCK_manager, LOCK_user_conn, - LOCK_prepared_stmt_count, LOCK_error_messages; + LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_backup_log; extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_global_system_variables; extern mysql_rwlock_t LOCK_all_status_vars; extern mysql_mutex_t LOCK_start_thread; @@ -777,6 +781,8 @@ enum options_mysqld OPT_BINLOG_IGNORE_DB, OPT_BIN_LOG, OPT_BOOTSTRAP, + OPT_EXPIRE_LOGS_DAYS, + OPT_BINLOG_EXPIRE_LOGS_SECONDS, OPT_CONSOLE, OPT_DEBUG_SYNC_TIMEOUT, OPT_REMOVED_OPTION, @@ -954,4 +960,15 @@ extern ulong opt_binlog_dbug_fsync_sleep; extern uint volatile global_disable_checkpoint; extern my_bool opt_help; +extern int mysqld_main(int argc, char **argv); + +#ifdef _WIN32 +extern HANDLE hEventShutdown; +extern void mysqld_win_initiate_shutdown(); +extern void mysqld_win_set_startup_complete(); +extern void mysqld_win_extend_service_timeout(DWORD sec); +extern void mysqld_set_service_status_callback(void (*)(DWORD, DWORD, DWORD)); +extern void mysqld_win_set_service_name(const char *name); +#endif + #endif /* MYSQLD_INCLUDED */ diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 2024172ec39..70e71d9a21b 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -92,7 +92,7 @@ static void inline MYSQL_SERVER_my_error(...) {} the client should have a bigger max_allowed_packet. */ -#if defined(__WIN__) || !defined(MYSQL_SERVER) +#if defined(_WIN32) || !defined(MYSQL_SERVER) /* The following is because alarms doesn't work on windows. */ #ifndef NO_ALARM #define NO_ALARM @@ -167,7 +167,7 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags) { /* For perl DBI/DBD. */ net->fd= vio_fd(vio); -#if defined(MYSQL_SERVER) && !defined(__WIN__) +#if defined(MYSQL_SERVER) && !defined(_WIN32) if (!(test_flags & TEST_BLOCKING)) { my_bool old_mode; @@ -291,7 +291,7 @@ static int net_data_is_ready(my_socket sd) struct timeval tv; int res; -#ifndef __WIN__ +#ifndef _WIN32 /* Windows uses an _array_ of 64 fd's as default, so it's safe */ if (sd >= FD_SETSIZE) return -1; @@ -654,8 +654,20 @@ net_real_write(NET *net,const uchar *packet, size_t len) my_bool net_blocking = vio_is_blocking(net->vio); DBUG_ENTER("net_real_write"); -#if defined(MYSQL_SERVER) && defined(USE_QUERY_CACHE) - query_cache_insert(net->thd, (char*) packet, len, net->pkt_nr); +#if defined(MYSQL_SERVER) + THD *thd= (THD *)net->thd; +#if defined(USE_QUERY_CACHE) + query_cache_insert(thd, (char*) packet, len, net->pkt_nr); +#endif + if (likely(thd)) + { + /* + Wait until pending operations (currently it is engine + asynchronous group commit) are finished before replying + to the client, to keep durability promise. + */ + thd->async_state.wait_for_pending_ops(); + } #endif if (unlikely(net->error == 2)) @@ -712,7 +724,7 @@ net_real_write(NET *net,const uchar *packet, size_t len) if ((long) (length= vio_write(net->vio,pos,(size_t) (end-pos))) <= 0) { my_bool interrupted = vio_should_retry(net->vio); -#if !defined(__WIN__) +#if !defined(_WIN32) if ((interrupted || length == 0) && !thr_alarm_in_use(&alarmed)) { if (!thr_alarm(&alarmed, net->write_timeout, &alarm_buff)) @@ -735,7 +747,7 @@ net_real_write(NET *net,const uchar *packet, size_t len) } } else -#endif /* !defined(__WIN__) */ +#endif /* !defined(_WIN32) */ if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) && interrupted) { @@ -760,7 +772,7 @@ net_real_write(NET *net,const uchar *packet, size_t len) pos+=length; update_statistics(thd_increment_bytes_sent(net->thd, length)); } -#ifndef __WIN__ +#ifndef _WIN32 end: #endif #ifdef HAVE_COMPRESS @@ -1007,7 +1019,7 @@ retry: goto end; } -#if !defined(__WIN__) && defined(MYSQL_SERVER) +#if !defined(_WIN32) && defined(MYSQL_SERVER) /* We got an error that there was no data on the socket. We now set up an alarm to not 'read forever', change the socket to the blocking @@ -1039,7 +1051,7 @@ retry: continue; } } -#endif /* (!defined(__WIN__) && defined(MYSQL_SERVER) */ +#endif /* (!defined(_WIN32) && defined(MYSQL_SERVER) */ if (thr_alarm_in_use(&alarmed) && !thr_got_alarm(&alarmed) && interrupted) { /* Probably in MIT threads */ diff --git a/sql/nt_servc.cc b/sql/nt_servc.cc deleted file mode 100644 index 9c754763aab..00000000000 --- a/sql/nt_servc.cc +++ /dev/null @@ -1,555 +0,0 @@ -/** - @file - - @brief - Windows NT Service class library. - - Copyright Abandoned 1998 Irena Pancirov - Irnet Snc - This file is public domain and comes with NO WARRANTY of any kind -*/ -#include <windows.h> -#include <process.h> -#include <stdio.h> -#include <stdlib.h> -#include "nt_servc.h" - - -static NTService *pService; - -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -NTService::NTService() -{ - - bOsNT = FALSE; - //service variables - ServiceName = NULL; - hExitEvent = 0; - bPause = FALSE; - bRunning = FALSE; - hThreadHandle = 0; - fpServiceThread = NULL; - - //time-out variables - nStartTimeOut = 15000; - nStopTimeOut = 86400000; - nPauseTimeOut = 5000; - nResumeTimeOut = 5000; - - //install variables - dwDesiredAccess = SERVICE_ALL_ACCESS; - dwServiceType = SERVICE_WIN32_OWN_PROCESS; - dwStartType = SERVICE_AUTO_START; - dwErrorControl = SERVICE_ERROR_NORMAL; - szLoadOrderGroup = NULL; - lpdwTagID = NULL; - szDependencies = NULL; - - my_argc = 0; - my_argv = NULL; - hShutdownEvent = 0; - nError = 0; - dwState = 0; -} - -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -NTService::~NTService() -{ - if (ServiceName != NULL) delete[] ServiceName; -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ - - -/** - Registers the main service thread with the service manager. - - @param ServiceThread pointer to the main programs entry function - when the service is started -*/ - - -long NTService::Init(LPCSTR szInternName, THREAD_FC ServiceThread) -{ - - pService = this; - - fpServiceThread = ServiceThread; - ServiceName = new char[lstrlen(szInternName)+1]; - lstrcpy(ServiceName,szInternName); - - SERVICE_TABLE_ENTRY stb[] = - { - { (char *)szInternName, ServiceMain} , - { NULL, NULL } - }; - - return StartServiceCtrlDispatcher(stb); //register with the Service Manager -} - - -/** - Installs the service with Service manager. - - nError values: - - 0 success - - 1 Can't open the Service manager - - 2 Failed to create service. -*/ - - -BOOL NTService::Install(int startType, LPCSTR szInternName, - LPCSTR szDisplayName, - LPCSTR szFullPath, LPCSTR szAccountName, - LPCSTR szPassword) -{ - BOOL ret_val=FALSE; - SC_HANDLE newService, scm; - - if (!SeekStatus(szInternName,1)) - return FALSE; - - char szFilePath[_MAX_PATH]; - GetModuleFileName(NULL, szFilePath, sizeof(szFilePath)); - - // open a connection to the SCM - if (!(scm = OpenSCManager(0, 0,SC_MANAGER_CREATE_SERVICE))) - printf("Failed to install the service (Couldn't open the SCM)\n"); - else // Install the new service - { - if (!(newService= - CreateService(scm, - szInternName, - szDisplayName, - dwDesiredAccess,//default: SERVICE_ALL_ACCESS - dwServiceType, //default: SERVICE_WIN32_OWN_PROCESS - //default: SERVICE_AUTOSTART - (startType == 1 ? SERVICE_AUTO_START : - SERVICE_DEMAND_START), - dwErrorControl, //default: SERVICE_ERROR_NORMAL - szFullPath, //exec full path - szLoadOrderGroup, //default: NULL - lpdwTagID, //default: NULL - szDependencies, //default: NULL - szAccountName, //default: NULL - szPassword))) //default: NULL - printf("Failed to install the service (Couldn't create service)\n"); - else - { - printf("Service successfully installed.\n"); - CloseServiceHandle(newService); - ret_val=TRUE; // Everything went ok - } - CloseServiceHandle(scm); - } - return ret_val; -} - - -/** - Removes the service. - - nError values: - - 0 success - - 1 Can't open the Service manager - - 2 Failed to locate service - - 3 Failed to delete service. -*/ - - -BOOL NTService::Remove(LPCSTR szInternName) -{ - BOOL ret_value=FALSE; - SC_HANDLE service, scm; - - if (!SeekStatus(szInternName,0)) - return FALSE; - - nError=0; - - // open a connection to the SCM - if (!(scm = OpenSCManager(0, 0,SC_MANAGER_CREATE_SERVICE))) - { - printf("Failed to remove the service (Couldn't open the SCM)\n"); - } - else - { - if ((service = OpenService(scm,szInternName, DELETE))) - { - if (!DeleteService(service)) - printf("Failed to remove the service\n"); - else - { - printf("Service successfully removed.\n"); - ret_value=TRUE; // everything went ok - } - CloseServiceHandle(service); - } - else - printf("Failed to remove the service (Couldn't open the service)\n"); - CloseServiceHandle(scm); - } - return ret_value; -} - -/** - this function should be called before the app. exits to stop - the service -*/ -void NTService::Stop(void) -{ - SetStatus(SERVICE_STOP_PENDING,NO_ERROR, 0, 1, 60000); - StopService(); - SetStatus(SERVICE_STOPPED, NO_ERROR, 0, 1, 1000); -} - -/** - This is the function that is called from the - service manager to start the service. -*/ - - -void NTService::ServiceMain(DWORD argc, LPTSTR *argv) -{ - - // registration function - if (!(pService->hServiceStatusHandle = - RegisterServiceCtrlHandler(pService->ServiceName, - NTService::ServiceCtrlHandler))) - goto error; - - // notify SCM of progress - if (!pService->SetStatus(SERVICE_START_PENDING,NO_ERROR, 0, 1, 8000)) - goto error; - - // create the exit event - if (!(pService->hExitEvent = CreateEvent (0, TRUE, FALSE,0))) - goto error; - - if (!pService->SetStatus(SERVICE_START_PENDING,NO_ERROR, 0, 3, - pService->nStartTimeOut)) - goto error; - - // save start arguments - pService->my_argc=argc; - pService->my_argv=argv; - - // start the service - if (!pService->StartService()) - goto error; - - // wait for exit event - WaitForSingleObject (pService->hExitEvent, INFINITE); - - // wait for thread to exit - if (WaitForSingleObject (pService->hThreadHandle, INFINITE) == WAIT_TIMEOUT) - CloseHandle(pService->hThreadHandle); - - pService->Exit(0); - return; - -error: - pService->Exit(GetLastError()); - return; -} - - - -void NTService::SetRunning() -{ - if (pService) - pService->SetStatus(SERVICE_RUNNING, NO_ERROR, 0, 0, 0); -} - -void NTService::SetSlowStarting(unsigned long timeout) -{ - if (pService) - pService->SetStatus(SERVICE_START_PENDING,NO_ERROR, 0, 0, timeout); -} - - -/* ------------------------------------------------------------------------ - StartService() - starts the application thread - -------------------------------------------------------------------------- */ - -BOOL NTService::StartService() -{ - // Start the real service's thread (application) - if (!(hThreadHandle = (HANDLE) _beginthread(fpServiceThread,0, - (void *) this))) - return FALSE; - bRunning = TRUE; - return TRUE; -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -void NTService::StopService() -{ - bRunning=FALSE; - - // Set the event for application - if (hShutdownEvent) - SetEvent(hShutdownEvent); - - // Set the event for ServiceMain - SetEvent(hExitEvent); -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -void NTService::PauseService() -{ - bPause = TRUE; - SuspendThread(hThreadHandle); -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -void NTService::ResumeService() -{ - bPause=FALSE; - ResumeThread(hThreadHandle); -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -BOOL NTService::SetStatus (DWORD dwCurrentState,DWORD dwWin32ExitCode, - DWORD dwServiceSpecificExitCode, DWORD dwCheckPoint, - DWORD dwWaitHint) -{ - BOOL bRet; - SERVICE_STATUS serviceStatus; - - dwState=dwCurrentState; - - serviceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS; - serviceStatus.dwCurrentState = dwCurrentState; - - if (dwCurrentState == SERVICE_START_PENDING) - serviceStatus.dwControlsAccepted = 0; //don't accept control events - else - serviceStatus.dwControlsAccepted = (SERVICE_ACCEPT_STOP | - SERVICE_ACCEPT_PAUSE_CONTINUE | - SERVICE_ACCEPT_SHUTDOWN); - - // if a specific exit code is defined,set up the win32 exit code properly - if (dwServiceSpecificExitCode == 0) - serviceStatus.dwWin32ExitCode = dwWin32ExitCode; - else - serviceStatus.dwWin32ExitCode = ERROR_SERVICE_SPECIFIC_ERROR; - - serviceStatus.dwServiceSpecificExitCode = dwServiceSpecificExitCode; - - serviceStatus.dwCheckPoint = dwCheckPoint; - serviceStatus.dwWaitHint = dwWaitHint; - - // Pass the status to the Service Manager - if (!(bRet=SetServiceStatus (hServiceStatusHandle, &serviceStatus))) - StopService(); - - return bRet; -} -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ -void NTService::ServiceCtrlHandler(DWORD ctrlCode) -{ - DWORD dwState; - - if (!pService) - return; - - dwState=pService->dwState; // get current state - - switch(ctrlCode) { - case SERVICE_CONTROL_SHUTDOWN: - case SERVICE_CONTROL_STOP: - dwState = SERVICE_STOP_PENDING; - pService->SetStatus(SERVICE_STOP_PENDING,NO_ERROR, 0, 1, - pService->nStopTimeOut); - pService->StopService(); - break; - - default: - pService->SetStatus(dwState, NO_ERROR,0, 0, 0); - break; - } - //pService->SetStatus(dwState, NO_ERROR,0, 0, 0); -} - -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ - -void NTService::Exit(DWORD error) -{ - if (hExitEvent) - CloseHandle(hExitEvent); - - // Send a message to the scm to tell that we stop - if (hServiceStatusHandle) - SetStatus(SERVICE_STOPPED, error,0, 0, 0); - - // If the thread has started kill it ??? - // if (hThreadHandle) CloseHandle(hThreadHandle); - -} - -/* ------------------------------------------------------------------------ - - -------------------------------------------------------------------------- */ - -BOOL NTService::SeekStatus(LPCSTR szInternName, int OperationType) -{ - BOOL ret_value=FALSE; - SC_HANDLE service, scm; - - // open a connection to the SCM - if (!(scm = OpenSCManager(0, 0,SC_MANAGER_CREATE_SERVICE))) - { - DWORD ret_error=GetLastError(); - if (ret_error == ERROR_ACCESS_DENIED) - { - printf("Install/Remove of the Service Denied!\n"); - if (!is_super_user()) - printf("That operation should be made by an user with Administrator privileges!\n"); - } - else - printf("There is a problem for to open the Service Control Manager!\n"); - } - else - { - if (OperationType == 1) - { - /* an install operation */ - if ((service = OpenService(scm,szInternName, SERVICE_ALL_ACCESS ))) - { - LPQUERY_SERVICE_CONFIG ConfigBuf; - DWORD dwSize; - - ConfigBuf = (LPQUERY_SERVICE_CONFIG) LocalAlloc(LPTR, 4096); - printf("The service already exists!\n"); - if (QueryServiceConfig(service,ConfigBuf,4096,&dwSize)) - printf("The current server installed: %s\n", - ConfigBuf->lpBinaryPathName); - LocalFree(ConfigBuf); - CloseServiceHandle(service); - } - else - ret_value=TRUE; - } - else - { - /* a remove operation */ - if (!(service = OpenService(scm,szInternName, SERVICE_ALL_ACCESS ))) - printf("The service doesn't exist!\n"); - else - { - SERVICE_STATUS ss; - - memset(&ss, 0, sizeof(ss)); - if (QueryServiceStatus(service,&ss)) - { - DWORD dwState = ss.dwCurrentState; - if (dwState == SERVICE_RUNNING) - printf("Failed to remove the service because the service is running\nStop the service and try again\n"); - else if (dwState == SERVICE_STOP_PENDING) - printf("\ -Failed to remove the service because the service is in stop pending state!\n\ -Wait 30 seconds and try again.\n\ -If this condition persist, reboot the machine and try again\n"); - else - ret_value= TRUE; - } - CloseServiceHandle(service); - } - } - CloseServiceHandle(scm); - } - - return ret_value; -} -/* ------------------------------------------------------------------------ - -------------------------------------------------------------------------- */ -BOOL NTService::IsService(LPCSTR ServiceName) -{ - BOOL ret_value=FALSE; - SC_HANDLE service, scm; - - if ((scm= OpenSCManager(0, 0,SC_MANAGER_ENUMERATE_SERVICE))) - { - if ((service = OpenService(scm,ServiceName, SERVICE_QUERY_STATUS))) - { - ret_value=TRUE; - CloseServiceHandle(service); - } - CloseServiceHandle(scm); - } - return ret_value; -} -/* ------------------------------------------------------------------------ - -------------------------------------------------------------------------- */ -BOOL NTService::got_service_option(char **argv, const char *service_option) -{ - char *option; - for (option= argv[1]; *option; option++) - if (!strcmp(option, service_option)) - return TRUE; - return FALSE; -} -/* ------------------------------------------------------------------------ - -------------------------------------------------------------------------- */ -BOOL NTService::is_super_user() -{ - HANDLE hAccessToken; - UCHAR InfoBuffer[1024]; - PTOKEN_GROUPS ptgGroups=(PTOKEN_GROUPS)InfoBuffer; - DWORD dwInfoBufferSize; - PSID psidAdministrators; - SID_IDENTIFIER_AUTHORITY siaNtAuthority = SECURITY_NT_AUTHORITY; - UINT x; - BOOL ret_value=FALSE; - - if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE,&hAccessToken )) - { - if (GetLastError() != ERROR_NO_TOKEN) - return FALSE; - - if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hAccessToken)) - return FALSE; - } - - ret_value= GetTokenInformation(hAccessToken,TokenGroups,InfoBuffer, - 1024, &dwInfoBufferSize); - - CloseHandle(hAccessToken); - - if (!ret_value ) - return FALSE; - - if (!AllocateAndInitializeSid(&siaNtAuthority, 2, - SECURITY_BUILTIN_DOMAIN_RID, - DOMAIN_ALIAS_RID_ADMINS, - 0, 0, 0, 0, 0, 0, - &psidAdministrators)) - return FALSE; - - ret_value = FALSE; - - for (x=0;x<ptgGroups->GroupCount;x++) - { - if ( EqualSid(psidAdministrators, ptgGroups->Groups[x].Sid) ) - { - ret_value = TRUE; - break; - } - - } - FreeSid(psidAdministrators); - return ret_value; -} diff --git a/sql/nt_servc.h b/sql/nt_servc.h deleted file mode 100644 index 8ba29519c8f..00000000000 --- a/sql/nt_servc.h +++ /dev/null @@ -1,113 +0,0 @@ -#ifndef NT_SERVC_INCLUDED -#define NT_SERVC_INCLUDED - -/** - @file - - @brief - Windows NT Service class library - - Copyright Abandoned 1998 Irena Pancirov - Irnet Snc - This file is public domain and comes with NO WARRANTY of any kind -*/ - -// main application thread -typedef void (*THREAD_FC)(void *); - -class NTService -{ - public: - NTService(); - ~NTService(); - - BOOL bOsNT; ///< true if OS is NT, false for Win95 - //install optinos - DWORD dwDesiredAccess; - DWORD dwServiceType; - DWORD dwStartType; - DWORD dwErrorControl; - - LPSTR szLoadOrderGroup; - LPDWORD lpdwTagID; - LPSTR szDependencies; - OSVERSIONINFO osVer; - - // time-out (in milisec) - int nStartTimeOut; - int nStopTimeOut; - int nPauseTimeOut; - int nResumeTimeOut; - - // - DWORD my_argc; - LPTSTR *my_argv; - HANDLE hShutdownEvent; - int nError; - DWORD dwState; - - //init service entry point - long Init(LPCSTR szInternName,THREAD_FC ServiceThread); - - //application shutdown event - void SetShutdownEvent(HANDLE hEvent){ hShutdownEvent=hEvent; } - - - //service install / un-install - BOOL Install(int startType,LPCSTR szInternName,LPCSTR szDisplayName, - LPCSTR szFullPath, LPCSTR szAccountName=NULL, - LPCSTR szPassword=NULL); - BOOL SeekStatus(LPCSTR szInternName, int OperationType); - BOOL Remove(LPCSTR szInternName); - BOOL IsService(LPCSTR ServiceName); - BOOL got_service_option(char **argv, const char *service_option); - BOOL is_super_user(); - - /* - SetRunning() is to be called by the application - when initialization completes and it can accept - stop request - */ - void SetRunning(void); - - /** - Sets a timeout after which SCM will abort service startup if SetRunning() - was not called or the timeout was not extended with another call to - SetSlowStarting(). Should be called when static initialization completes, - and the variable initialization part begins - - @arg timeout the timeout to pass to the SCM (in milliseconds) - */ - void SetSlowStarting(unsigned long timeout); - - /* - Stop() is to be called by the application to stop - the service - */ - void Stop(void); - - protected: - LPSTR ServiceName; - HANDLE hExitEvent; - SERVICE_STATUS_HANDLE hServiceStatusHandle; - BOOL bPause; - BOOL bRunning; - HANDLE hThreadHandle; - THREAD_FC fpServiceThread; - - void PauseService(); - void ResumeService(); - void StopService(); - BOOL StartService(); - - static void WINAPI ServiceMain(DWORD argc, LPTSTR *argv); - static void WINAPI ServiceCtrlHandler (DWORD ctrlCode); - - void Exit(DWORD error); - BOOL SetStatus (DWORD dwCurrentState,DWORD dwWin32ExitCode, - DWORD dwServiceSpecificExitCode, - DWORD dwCheckPoint,DWORD dwWaitHint); - -}; -/* ------------------------- the end -------------------------------------- */ - -#endif /* NT_SERVC_INCLUDED */ diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc index 15bc2074e1f..6a24fa95b68 100644 --- a/sql/opt_index_cond_pushdown.cc +++ b/sql/opt_index_cond_pushdown.cc @@ -152,7 +152,6 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, } } -#define ICP_COND_USES_INDEX_ONLY 10 /* Get a part of the condition that can be checked using only index fields @@ -161,8 +160,8 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, make_cond_for_index() cond The source condition table The table that is partially available - keyno The index in the above table. Only fields covered by the index - are available + keyno The index in the above table. Only fields covered by the + index are available other_tbls_ok TRUE <=> Fields of other non-const tables are allowed DESCRIPTION @@ -173,8 +172,8 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, Example: make_cond_for_index( - "cond(t1.field) AND cond(t2.key1) AND cond(t2.non_key) AND cond(t2.key2)", - t2, keyno(t2.key1)) + "cond(t1.field) AND cond(t2.key1) AND cond(t2.non_key) AND cond(t2.key2)", + t2, keyno(t2.key1)) will return "cond(t1.field) AND cond(t2.key2)" @@ -185,11 +184,10 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, static Item *make_cond_for_index(THD *thd, Item *cond, TABLE *table, uint keyno, bool other_tbls_ok) { - if (!cond) - return NULL; + if (!cond || cond->basic_const_item()) + return cond; if (cond->type() == Item::COND_ITEM) { - uint n_marked= 0; if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) { table_map used_tables= 0; @@ -206,19 +204,12 @@ static Item *make_cond_for_index(THD *thd, Item *cond, TABLE *table, uint keyno, new_cond->argument_list()->push_back(fix, thd->mem_root); used_tables|= fix->used_tables(); } - if (item->marker == ICP_COND_USES_INDEX_ONLY) - { - n_marked++; - item->marker= 0; - } } - if (n_marked ==((Item_cond*)cond)->argument_list()->elements) - cond->marker= ICP_COND_USES_INDEX_ONLY; switch (new_cond->argument_list()->elements) { case 0: return (COND*) 0; case 1: - new_cond->used_tables_cache= used_tables; + /* remove AND level if there is only one argument */ return new_cond->argument_list()->head(); default: new_cond->quick_fix_field(); @@ -239,14 +230,7 @@ static Item *make_cond_for_index(THD *thd, Item *cond, TABLE *table, uint keyno, if (!fix) return (COND*) 0; new_cond->argument_list()->push_back(fix, thd->mem_root); - if (item->marker == ICP_COND_USES_INDEX_ONLY) - { - n_marked++; - item->marker= 0; - } } - if (n_marked ==((Item_cond*)cond)->argument_list()->elements) - cond->marker= ICP_COND_USES_INDEX_ONLY; new_cond->quick_fix_field(); new_cond->used_tables_cache= ((Item_cond_or*) cond)->used_tables_cache; new_cond->top_level_item(); @@ -256,7 +240,6 @@ static Item *make_cond_for_index(THD *thd, Item *cond, TABLE *table, uint keyno, if (!uses_index_fields_only(cond, table, keyno, other_tbls_ok)) return (COND*) 0; - cond->marker= ICP_COND_USES_INDEX_ONLY; return cond; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2cc6c9ad37f..e3c80a300a1 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -8053,7 +8053,7 @@ SEL_TREE *Item_func_in::get_func_row_mm_tree(RANGE_OPT_PARAM *param, table_map param_comp= ~(param->prev_tables | param->read_tables | param->current_table); uint row_cols= key_row->cols(); - Dynamic_array <Key_col_info> key_cols_info(row_cols); + Dynamic_array <Key_col_info> key_cols_info(PSI_INSTRUMENT_MEM,row_cols); cmp_item_row *row_cmp_item; if (array) @@ -11689,8 +11689,8 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++) { - uint16 fieldnr= param->table->key_info[keynr]. - key_part[kp - table_key->key_part].fieldnr - 1; + field_index_t fieldnr= (param->table->key_info[keynr]. + key_part[kp - table_key->key_part].fieldnr - 1); if (param->table->field[fieldnr]->key_length() != kp->length) return FALSE; } @@ -14387,7 +14387,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, &has_min_max, &has_other)) DBUG_RETURN(FALSE); } - else if (cur_arg->const_item() && !cur_arg->is_expensive()) + else if (cur_arg->can_eval_in_optimize()) { /* For predicates of the form "const OP expr" we also have to check 'expr' @@ -16071,7 +16071,7 @@ print_sel_arg_key(Field *field, const uchar *key, String *out) { if (*key) { - out->append("NULL"); + out->append(STRING_WITH_LEN("NULL")); goto end; } key++; // Skip null byte @@ -16104,15 +16104,16 @@ const char *dbug_print_sel_arg(SEL_ARG *sel_arg) { StringBuffer<64> buf; String &out= dbug_print_sel_arg_buf; + LEX_CSTRING tmp; out.length(0); if (!sel_arg) { - out.append("NULL"); + out.append(STRING_WITH_LEN("NULL")); goto end; } - out.append("SEL_ARG("); + out.append(STRING_WITH_LEN("SEL_ARG(")); const char *stype; switch(sel_arg->type) { @@ -16132,27 +16133,35 @@ const char *dbug_print_sel_arg(SEL_ARG *sel_arg) if (stype) { - out.append("type="); - out.append(stype); + out.append(STRING_WITH_LEN("type=")); + out.append(stype, strlen(stype)); goto end; } if (sel_arg->min_flag & NO_MIN_RANGE) - out.append("-inf"); + out.append(STRING_WITH_LEN("-inf")); else { print_sel_arg_key(sel_arg->field, sel_arg->min_value, &buf); out.append(buf); } - out.append((sel_arg->min_flag & NEAR_MIN)? "<" : "<="); + if (sel_arg->min_flag & NEAR_MIN) + lex_string_set3(&tmp, "<", 1); + else + lex_string_set3(&tmp, "<=", 2); + out.append(&tmp); out.append(sel_arg->field->field_name); - out.append((sel_arg->max_flag & NEAR_MAX)? "<" : "<="); + if (sel_arg->min_flag & NEAR_MAX) + lex_string_set3(&tmp, "<", 1); + else + lex_string_set3(&tmp, "<=", 2); + out.append(&tmp); if (sel_arg->max_flag & NO_MAX_RANGE) - out.append("+inf"); + out.append(STRING_WITH_LEN("+inf")); else { buf.length(0); @@ -16160,7 +16169,7 @@ const char *dbug_print_sel_arg(SEL_ARG *sel_arg) out.append(buf); } - out.append(")"); + out.append(')'); end: return dbug_print_sel_arg_buf.c_ptr_safe(); diff --git a/sql/opt_split.cc b/sql/opt_split.cc index e148dd42d1e..18710e85624 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -187,6 +187,7 @@ #include "mariadb.h" #include "sql_select.h" +#include "opt_trace.h" /* Info on a splitting field */ struct SplM_field_info @@ -960,6 +961,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, The key for splitting was chosen, look for the plan for this key in the cache */ + Json_writer_array spl_trace(thd, "choose_best_splitting"); spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts); if (!spl_plan) { @@ -1008,6 +1010,16 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, spl_plan->cost= join->best_positions[join->table_count-1].read_time + + oper_cost; + if (unlikely(thd->trace_started())) + { + Json_writer_object wrapper(thd); + Json_writer_object find_trace(thd, "best_splitting"); + find_trace.add("table", best_table->alias.c_ptr()); + find_trace.add("key", best_table->key_info[best_key].name); + find_trace.add("record_count", record_count); + find_trace.add("cost", spl_plan->cost); + find_trace.add("unsplit_cost", spl_opt_info->unsplit_cost); + } memcpy((char *) spl_plan->best_positions, (char *) join->best_positions, sizeof(POSITION) * join->table_count); @@ -1034,6 +1046,11 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, { startup_cost= record_count * spl_plan->cost; records= (ha_rows) (records * spl_plan->split_sel); + + Json_writer_object trace(thd, "lateral_derived"); + trace.add("startup_cost", startup_cost); + trace.add("splitting_cost", spl_plan->cost); + trace.add("records", records); } else startup_cost= spl_opt_info->unsplit_cost; diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 3b3f9e56606..b971c96cda2 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -615,9 +615,9 @@ int check_and_do_in_subquery_rewrites(JOIN *join) // (1) - ORDER BY without LIMIT can be removed from IN/EXISTS subqueries // (2) - for EXISTS, can also remove "ORDER BY ... LIMIT n", // but cannot remove "ORDER BY ... LIMIT n OFFSET m" - if (!select_lex->select_limit || // (1) + if (!select_lex->limit_params.select_limit || // (1) (substype == Item_subselect::EXISTS_SUBS && // (2) - !select_lex->offset_limit)) // (2) + !select_lex->limit_params.offset_limit)) // (2) { select_lex->join->order= 0; select_lex->join->skip_sort_order= 1; @@ -861,7 +861,7 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs) Item *left_exp= in_subs->left_exp(); DBUG_ENTER("subquery_types_allow_materialization"); - DBUG_ASSERT(left_exp->is_fixed()); + DBUG_ASSERT(left_exp->fixed()); List_iterator<Item> it(in_subs->unit->first_select()->item_list); uint elements= in_subs->unit->first_select()->item_list.elements; @@ -964,7 +964,7 @@ bool make_in_exists_conversion(THD *thd, JOIN *join, Item_in_subselect *item) /* We're going to finalize IN->EXISTS conversion. Normally, IN->EXISTS conversion takes place inside the - Item_subselect::fix_fields() call, where item_subselect->is_fixed()==FALSE (as + Item_subselect::fix_fields() call, where item_subselect->fixed()==FALSE (as fix_fields() haven't finished yet) and item_subselect->changed==FALSE (as the conversion haven't been finalized) @@ -975,7 +975,7 @@ bool make_in_exists_conversion(THD *thd, JOIN *join, Item_in_subselect *item) call. */ item->changed= 0; - item->fixed= 0; + item->base_flags|= item_base_t::FIXED; SELECT_LEX *save_select_lex= thd->lex->current_select; thd->lex->current_select= item->unit->first_select(); @@ -988,10 +988,10 @@ bool make_in_exists_conversion(THD *thd, JOIN *join, Item_in_subselect *item) DBUG_RETURN(TRUE); item->changed= 1; - item->fixed= 1; + DBUG_ASSERT(item->fixed()); Item *substitute= item->substitution; - bool do_fix_fields= !item->substitution->is_fixed(); + bool do_fix_fields= !item->substitution->fixed(); /* The Item_subselect has already been wrapped with Item_in_optimizer, so we should search for item->optimizer, not 'item'. @@ -1327,7 +1327,7 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) { JOIN *child_join= in_subq->unit->first_select()->join; in_subq->changed= 0; - in_subq->fixed= 0; + in_subq->base_flags|= item_base_t::FIXED; SELECT_LEX *save_select_lex= thd->lex->current_select; thd->lex->current_select= in_subq->unit->first_select(); @@ -1340,10 +1340,10 @@ bool convert_join_subqueries_to_semijoins(JOIN *join) DBUG_RETURN(TRUE); in_subq->changed= 1; - in_subq->fixed= 1; + DBUG_ASSERT(in_subq->fixed()); Item *substitute= in_subq->substitution; - bool do_fix_fields= !in_subq->substitution->is_fixed(); + bool do_fix_fields= !in_subq->substitution->fixed(); Item **tree= (in_subq->emb_on_expr_nest == NO_JOIN_NEST)? &join->conds : &(in_subq->emb_on_expr_nest->on_expr); Item *replace_me= in_subq->original_item(); @@ -1423,6 +1423,14 @@ void get_delayed_table_estimates(TABLE *table, double *startup_cost) { Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect; + Table_function_json_table *table_function= + table->pos_in_table_list->table_function; + + if (table_function) + { + table_function->get_estimates(out_rows, scan_time, startup_cost); + return; + } DBUG_ASSERT(item->engine->engine_type() == subselect_engine::HASH_SJ_ENGINE); @@ -1800,6 +1808,10 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred) tl->jtbm_subselect->fix_after_pullout(parent_lex, &dummy, true); DBUG_ASSERT(dummy == tl->jtbm_subselect); } + else if (tl->table_function) + { + tl->table_function->fix_after_pullout(tl, parent_lex, true); + } SELECT_LEX *old_sl= tl->select_lex; tl->select_lex= parent_join->select_lex; for (TABLE_LIST *emb= tl->embedding; @@ -1891,7 +1903,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred) subq_lex->ref_pointer_array[i]); if (!item_eq) goto restore_tl_and_exit; - DBUG_ASSERT(left_exp->element_index(i)->is_fixed()); + DBUG_ASSERT(left_exp->element_index(i)->fixed()); if (left_exp_orig->element_index(i) != left_exp->element_index(i)) thd->change_item_tree(item_eq->arguments(), @@ -2722,7 +2734,7 @@ bool find_eq_ref_candidate(TABLE *table, table_map sj_inner_tables) */ if (!(keyuse->used_tables & sj_inner_tables) && !(keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) && - (keyuse->null_rejecting || !keyuse->val->maybe_null)) + (keyuse->null_rejecting || !keyuse->val->maybe_null())) { bound_parts |= 1 << keyuse->keypart; } @@ -4379,11 +4391,11 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab) sjm_tab->type= JT_ALL; /* Initialize full scan */ - sjm_tab->read_first_record= join_read_record_no_init; + sjm_tab->read_first_record= join_init_read_record; sjm_tab->read_record.copy_field= sjm->copy_field; sjm_tab->read_record.copy_field_end= sjm->copy_field + sjm->sjm_table_cols.elements; - sjm_tab->read_record.read_record_func= rr_sequential_and_unpack; + sjm_tab->read_record.read_record_func= read_record_func_for_rr_and_unpack; } sjm_tab->bush_children->end[-1].next_select= end_sj_materialize; @@ -4688,7 +4700,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) table->record[1]= table->record[0]+alloc_length; share->default_values= table->record[1]+alloc_length; } - setup_tmp_table_column_bitmaps(table, bitmaps); + setup_tmp_table_column_bitmaps(table, bitmaps, table->s->fields); recinfo= start_recinfo; null_flags=(uchar*) table->record[0]; @@ -6516,8 +6528,8 @@ bool JOIN::choose_subquery_plan(table_map join_tables) /* A strategy must be chosen earlier. */ DBUG_ASSERT(in_subs->has_strategy()); DBUG_ASSERT(in_to_exists_where || in_to_exists_having); - DBUG_ASSERT(!in_to_exists_where || in_to_exists_where->is_fixed()); - DBUG_ASSERT(!in_to_exists_having || in_to_exists_having->is_fixed()); + DBUG_ASSERT(!in_to_exists_where || in_to_exists_where->fixed()); + DBUG_ASSERT(!in_to_exists_having || in_to_exists_having->fixed()); /* The original QEP of the subquery. */ Join_plan_state save_qep(table_count); @@ -6693,7 +6705,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables) Item_in_subselect::test_limit). However, once we allow this, here we should set the correct limit if given in the query. */ - in_subs->unit->global_parameters()->select_limit= NULL; + in_subs->unit->global_parameters()->limit_params.select_limit= NULL; in_subs->unit->set_limit(unit->global_parameters()); /* Set the limit of this JOIN object as well, because normally its being @@ -7233,3 +7245,16 @@ exit: thd->lex->current_select= save_curr_select; DBUG_RETURN(FALSE); } + +/* + @brief + Check if a table is a SJM Scan table + + @retval + TRUE SJM scan table + FALSE Otherwise +*/ +bool TABLE_LIST::is_sjm_scan_table() +{ + return is_active_sjm() && sj_mat_info->is_sj_scan; +} diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 8664ccae53c..627ddc86abd 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -342,7 +342,7 @@ int opt_sum_query(THD *thd, to the number of rows in the tables if this number is exact and there are no outer joins. */ - if (!conds && !((Item_sum_count*) item)->get_arg(0)->maybe_null && + if (!conds && !((Item_sum_count*) item)->get_arg(0)->maybe_null() && !outer_tables && maybe_exact_count && ((item->used_tables() & OUTER_REF_TABLE_BIT) == 0)) { @@ -448,7 +448,8 @@ int opt_sum_query(THD *thd, const_result= 0; break; } - item_sum->set_aggregator(item_sum->has_with_distinct() ? + item_sum->set_aggregator(thd, + item_sum->has_with_distinct() ? Aggregator::DISTINCT_AGGREGATOR : Aggregator::SIMPLE_AGGREGATOR); /* diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 3958797ec44..a6f0ac24719 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -628,7 +628,7 @@ void eliminate_tables(JOIN *join) List_iterator<Item> val_it(thd->lex->value_list); while ((item= val_it++)) { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); used_tables |= item->used_tables(); } } @@ -637,6 +637,22 @@ void eliminate_tables(JOIN *join) List_iterator<Item> it(join->fields_list); while ((item= it++)) used_tables |= item->used_tables(); + + { + /* + Table function JSON_TABLE() can have references to other tables. Do not + eliminate the tables that JSON_TABLE() refers to. + Note: the JSON_TABLE itself cannot be eliminated as it doesn't + have unique keys. + */ + List_iterator<TABLE_LIST> it(join->select_lex->leaf_tables); + TABLE_LIST *tbl; + while ((tbl= it++)) + { + if (tbl->table_function) + used_tables|= tbl->table_function->used_tables(); + } + } /* Add tables referred to from ORDER BY and GROUP BY lists */ ORDER *all_lists[]= { join->order, join->group_list}; diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc index ddec6d5ed2d..4bc493940fb 100644 --- a/sql/opt_trace.cc +++ b/sql/opt_trace.cc @@ -247,9 +247,11 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl) { /* Anonymous derived tables (as in - "SELECT ... FROM (SELECT ...)") don't have their grant.privilege set. + "SELECT ... FROM (SELECT ...)") and table functions + don't have their grant.privilege set. */ - if (!t->is_anonymous_derived_table()) + if (!t->is_anonymous_derived_table() && + !t->table_function) { const GRANT_INFO backup_grant_info= t->grant; Security_context *const backup_table_sctx= t->security_ctx; @@ -469,12 +471,14 @@ void Opt_trace_context::end() current_trace= NULL; } -Opt_trace_start::Opt_trace_start(THD *thd, TABLE_LIST *tbl, - enum enum_sql_command sql_command, - List<set_var_base> *set_vars, - const char *query, - size_t query_length, - const CHARSET_INFO *query_charset):ctx(&thd->opt_trace) + +void Opt_trace_start::init(THD *thd, + TABLE_LIST *tbl, + enum enum_sql_command sql_command, + List<set_var_base> *set_vars, + const char *query, + size_t query_length, + const CHARSET_INFO *query_charset) { /* if optimizer trace is enabled and the statment we have is traceable, @@ -494,6 +498,9 @@ Opt_trace_start::Opt_trace_start(THD *thd, TABLE_LIST *tbl, ctx->set_query(query, query_length, query_charset); traceable= TRUE; opt_trace_disable_if_no_tables_access(thd, tbl); + Json_writer *w= ctx->get_current_json(); + w->start_object(); + w->add_member("steps").start_array(); } } @@ -501,6 +508,9 @@ Opt_trace_start::~Opt_trace_start() { if (traceable) { + Json_writer *w= ctx->get_current_json(); + w->end_array(); + w->end_object(); ctx->end(); traceable= FALSE; } @@ -595,6 +605,18 @@ void Json_writer::add_table_name(const TABLE *table) } +void trace_condition(THD * thd, const char *name, const char *transform_type, + Item *item, const char *table_name) +{ + Json_writer_object trace_wrapper(thd); + Json_writer_object trace_cond(thd, transform_type); + trace_cond.add("condition", name); + if (table_name) + trace_cond.add("attached_to", table_name); + trace_cond.add("resulting_condition", item); +} + + void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab) { DBUG_ASSERT(thd->trace_started()); diff --git a/sql/opt_trace.h b/sql/opt_trace.h index 550f18c0797..1ee23a33591 100644 --- a/sql/opt_trace.h +++ b/sql/opt_trace.h @@ -72,14 +72,18 @@ struct Opt_trace_info */ -class Opt_trace_start { +class Opt_trace_start +{ public: - Opt_trace_start(THD *thd_arg, TABLE_LIST *tbl, - enum enum_sql_command sql_command, - List<set_var_base> *set_vars, - const char *query, - size_t query_length, - const CHARSET_INFO *query_charset); + Opt_trace_start(THD *thd_arg): ctx(&thd_arg->opt_trace), traceable(false) {} + + void init(THD *thd, TABLE_LIST *tbl, + enum enum_sql_command sql_command, + List<set_var_base> *set_vars, + const char *query, + size_t query_length, + const CHARSET_INFO *query_charset); + ~Opt_trace_start(); private: @@ -108,6 +112,10 @@ void print_final_join_order(JOIN *join); void print_best_access_for_table(THD *thd, POSITION *pos, enum join_type type); +void trace_condition(THD * thd, const char *name, const char *transform_type, + Item *item, const char *table_name= nullptr); + + /* Security related (need to add a proper comment here) */ diff --git a/sql/parse_file.cc b/sql/parse_file.cc index 0d81c121f36..f4aae1300e2 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -24,7 +24,9 @@ #include "sql_priv.h" #include "parse_file.h" #include "unireg.h" // CREATE_MODE -#include "sql_table.h" // build_table_filename +#include "sql_table.h" // build_table_filename +#include "debug.h" +#include <mysys_err.h> // EE_WRITE #include <m_ctype.h> #include <my_dir.h> @@ -246,7 +248,6 @@ write_parameter(IO_CACHE *file, const uchar* base, File_option *parameter) TRUE error */ - my_bool sql_create_definition_file(const LEX_CSTRING *dir, const LEX_CSTRING *file_name, @@ -288,6 +289,8 @@ sql_create_definition_file(const LEX_CSTRING *dir, DBUG_RETURN(TRUE); } + debug_crash_here("definition_file_after_create"); + if (init_io_cache(&file, handler, 0, WRITE_CACHE, 0L, 0, MYF(MY_WME))) goto err_w_file; @@ -297,6 +300,9 @@ sql_create_definition_file(const LEX_CSTRING *dir, my_b_write(&file, (const uchar *)STRING_WITH_LEN("\n"))) goto err_w_cache; + if (debug_simulate_error("definition_file_simulate_write_error", EE_WRITE)) + goto err_w_cache; + // write parameters to temporary file for (param= parameters; param->name.str; param++) { @@ -338,9 +344,52 @@ err_w_cache: end_io_cache(&file); err_w_file: mysql_file_close(handler, MYF(MY_WME)); + mysql_file_delete(key_file_fileparser, path, MYF(MY_WME)); DBUG_RETURN(TRUE); } + +/* + Make a copy of a definition file with '-' added to the name + + @param org_name Original file name + @param new_name Pointer to a buff of FN_REFLEN. Will be updated to name of + backup file + @return 0 ok + @return 1 error +*/ + +int sql_backup_definition_file(const LEX_CSTRING *org_name, + LEX_CSTRING *new_name) +{ + char *new_name_buff= (char*) new_name->str; + new_name->length= org_name->length+1; + + memcpy(new_name_buff, org_name->str, org_name->length+1); + new_name_buff[org_name->length]= '-'; + new_name_buff[org_name->length+1]= 0; + return my_copy(org_name->str, new_name->str, MYF(MY_WME)); +} + +/* + Restore copy of a definition file + + @param org_name Name of backup file (ending with '-' or '~') + + @return 0 ok + @return 1 error +*/ + +int sql_restore_definition_file(const LEX_CSTRING *name) +{ + char new_name[FN_REFLEN+1]; + memcpy(new_name, name->str, name->length-1); + new_name[name->length-1]= 0; + return mysql_file_rename(key_file_fileparser, name->str, new_name, + MYF(MY_WME)); +} + + /** Renames a frm file (including backups) in same schema. diff --git a/sql/parse_file.h b/sql/parse_file.h index cbd41d16cbc..cd26ffec91a 100644 --- a/sql/parse_file.h +++ b/sql/parse_file.h @@ -96,6 +96,10 @@ my_bool rename_in_schema_file(THD *thd, const char *schema, const char *old_name, const char *new_db, const char *new_name); +int sql_backup_definition_file(const LEX_CSTRING *org_name, + LEX_CSTRING *new_name); +int sql_restore_definition_file(const LEX_CSTRING *name); + class File_parser: public Sql_alloc { char *start, *end; diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 3ac0f144536..3af7e97db2b 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -127,7 +127,7 @@ partition_info *partition_info::get_clone(THD *thd, bool empty_data_and_index_fi /** Mark named [sub]partition to be used/locked. - @param part_name Partition name to match. + @param part_name Partition name to match. Must be \0 terminated! @param length Partition name length. @return Success if partition found @@ -141,7 +141,10 @@ bool partition_info::add_named_partition(const char *part_name, size_t length) PART_NAME_DEF *part_def; Partition_share *part_share; DBUG_ENTER("partition_info::add_named_partition"); - DBUG_ASSERT(table && table->s && table->s->ha_share); + DBUG_ASSERT(part_name[length] == 0); + DBUG_ASSERT(table); + DBUG_ASSERT(table->s); + DBUG_ASSERT(table->s->ha_share); part_share= static_cast<Partition_share*>((table->s->ha_share)); DBUG_ASSERT(part_share->partition_name_hash_initialized); part_name_hash= &part_share->partition_name_hash; @@ -173,9 +176,9 @@ bool partition_info::add_named_partition(const char *part_name, size_t length) else bitmap_set_bit(&read_partitions, part_def->part_id); } - DBUG_PRINT("info", ("Found partition %u is_subpart %d for name %s", + DBUG_PRINT("info", ("Found partition %u is_subpart %d for name %.*s", part_def->part_id, part_def->is_subpart, - part_name)); + length, part_name)); DBUG_RETURN(false); } @@ -866,7 +869,7 @@ void partition_info::vers_check_limit(THD *thd) #ifndef DBUG_OFF const uint32 sub_factor= num_subparts ? num_subparts : 1; uint32 part_id= vers_info->hist_part->id * sub_factor; - const uint32 part_id_end= part_id + sub_factor; + const uint32 part_id_end __attribute__((unused)) = part_id + sub_factor; DBUG_ASSERT(part_id_end <= num_parts * sub_factor); #endif diff --git a/sql/privilege.h b/sql/privilege.h index 98168c0e62e..82173912e2a 100644 --- a/sql/privilege.h +++ b/sql/privilege.h @@ -296,6 +296,7 @@ constexpr privilege_t TMP_TABLE_ACLS= COL_DML_ACLS | ALL_TABLE_DDL_ACLS | REFERENCES_ACL; +constexpr privilege_t PRIV_LOCK_TABLES= SELECT_ACL | LOCK_TABLES_ACL; /* Allow to set an object definer: diff --git a/sql/procedure.cc b/sql/procedure.cc index 92df43bb0e3..21afef274bc 100644 --- a/sql/procedure.cc +++ b/sql/procedure.cc @@ -46,7 +46,7 @@ my_decimal *Item_proc_string::val_decimal(my_decimal *decimal_value) { if (null_value) return 0; - string2my_decimal(E_DEC_FATAL_ERROR, &str_value, decimal_value); + string2my_decimal(E_DEC_FATAL_ERROR, &value, decimal_value); return (decimal_value); } diff --git a/sql/procedure.h b/sql/procedure.h index 769eac5f217..c59b766d2b9 100644 --- a/sql/procedure.h +++ b/sql/procedure.h @@ -44,9 +44,9 @@ public: this->name.str= name_par; this->name.length= strlen(name_par); } - enum Type type() const { return Item::PROC_ITEM; } + enum Type type() const override { return Item::PROC_ITEM; } Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src, - const Tmp_field_param *param) + const Tmp_field_param *param) override { /* We can get to here when using a CURSOR for a query with PROCEDURE: @@ -58,19 +58,19 @@ public: virtual void set(double nr)=0; virtual void set(const char *str,uint length,CHARSET_INFO *cs)=0; virtual void set(longlong nr)=0; - const Type_handler *type_handler() const=0; + const Type_handler *type_handler() const override=0; void set(const char *str) { set(str,(uint) strlen(str), default_charset()); } unsigned int size_of() { return sizeof(*this);} - bool check_vcol_func_processor(void *arg) + bool check_vcol_func_processor(void *arg) override { DBUG_ASSERT(0); // impossible return mark_unsupported_function("proc", arg, VCOL_IMPOSSIBLE); } - bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override { return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate); } - Item* get_copy(THD *thd) { return 0; } + Item* get_copy(THD *thd) override { return 0; } }; class Item_proc_real :public Item_proc @@ -82,23 +82,24 @@ public: { decimals=dec; max_length=float_length(dec); } - const Type_handler *type_handler() const { return &type_handler_double; } - void set(double nr) { value=nr; } - void set(longlong nr) { value=(double) nr; } - void set(const char *str,uint length,CHARSET_INFO *cs) + const Type_handler *type_handler() const override + { return &type_handler_double; } + void set(double nr) override { value=nr; } + void set(longlong nr) override { value=(double) nr; } + void set(const char *str,uint length,CHARSET_INFO *cs) override { int err_not_used; char *end_not_used; value= cs->strntod((char*) str,length, &end_not_used, &err_not_used); } - double val_real() { return value; } - longlong val_int() { return (longlong) value; } - String *val_str(String *s) + double val_real() override { return value; } + longlong val_int() override { return (longlong) value; } + String *val_str(String *s) override { s->set_real(value,decimals,default_charset()); return s; } - my_decimal *val_decimal(my_decimal *); + my_decimal *val_decimal(my_decimal *) override; unsigned int size_of() { return sizeof(*this);} }; @@ -108,53 +109,61 @@ class Item_proc_int :public Item_proc public: Item_proc_int(THD *thd, const char *name_par): Item_proc(thd, name_par) { max_length=11; } - const Type_handler *type_handler() const + const Type_handler *type_handler() const override { if (unsigned_flag) return &type_handler_ulonglong; return &type_handler_slonglong; } - void set(double nr) { value=(longlong) nr; } - void set(longlong nr) { value=nr; } - void set(const char *str,uint length, CHARSET_INFO *cs) + void set(double nr) override { value=(longlong) nr; } + void set(longlong nr) override { value=nr; } + void set(const char *str,uint length, CHARSET_INFO *cs) override { int err; value= cs->strntoll(str,length,10,NULL,&err); } - double val_real() { return (double) value; } - longlong val_int() { return value; } - String *val_str(String *s) { s->set(value, default_charset()); return s; } - my_decimal *val_decimal(my_decimal *); + double val_real() override { return (double) value; } + longlong val_int() override { return value; } + String *val_str(String *s) override + { s->set(value, default_charset()); return s; } + my_decimal *val_decimal(my_decimal *) override; unsigned int size_of() { return sizeof(*this);} }; class Item_proc_string :public Item_proc { + String value; public: Item_proc_string(THD *thd, const char *name_par, uint length): - Item_proc(thd, name_par) { this->max_length=length; } - const Type_handler *type_handler() const { return &type_handler_varchar; } - void set(double nr) { str_value.set_real(nr, 2, default_charset()); } - void set(longlong nr) { str_value.set(nr, default_charset()); } - void set(const char *str, uint length, CHARSET_INFO *cs) - { str_value.copy(str,length,cs); } - double val_real() + Item_proc(thd, name_par) + { + this->max_length=length; + value.set_thread_specific(); + } + const Type_handler *type_handler() const override + { return &type_handler_varchar; } + void set(double nr) override { value.set_real(nr, 2, default_charset()); } + void set(longlong nr) override { value.set(nr, default_charset()); } + void set(const char *str, uint length, CHARSET_INFO *cs) override + { value.copy(str,length,cs); } + double val_real() override { int err_not_used; char *end_not_used; - CHARSET_INFO *cs= str_value.charset(); - return cs->strntod((char*) str_value.ptr(), str_value.length(), + CHARSET_INFO *cs= value.charset(); + return cs->strntod((char*) value.ptr(), value.length(), &end_not_used, &err_not_used); } - longlong val_int() + longlong val_int() override { int err; - CHARSET_INFO *cs=str_value.charset(); - return cs->strntoll(str_value.ptr(),str_value.length(),10,NULL,&err); + CHARSET_INFO *cs=value.charset(); + return cs->strntoll(value.ptr(), value.length(), 10, NULL, &err); } - String *val_str(String*) + String *val_str(String*) override { - return null_value ? (String*) 0 : (String*) &str_value; + return null_value ? (String*) 0 : &value; } - my_decimal *val_decimal(my_decimal *); + my_decimal *val_decimal(my_decimal *) override; + void cleanup() override { value.free(); } unsigned int size_of() { return sizeof(*this);} }; diff --git a/sql/protocol.cc b/sql/protocol.cc index 008ae13d9f0..33d865a3f9f 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -211,8 +211,7 @@ bool Protocol::net_send_ok(THD *thd, uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong id, - const char *message, bool is_eof, - bool skip_flush) + const char *message, bool is_eof) { NET *net= &thd->net; StringBuffer<MYSQL_ERRMSG_SIZE + 10> store; @@ -284,7 +283,7 @@ Protocol::net_send_ok(THD *thd, DBUG_ASSERT(store.length() <= MAX_PACKET_LENGTH); error= my_net_write(net, (const unsigned char*)store.ptr(), store.length()); - if (likely(!error) && (!skip_flush || is_eof)) + if (likely(!error)) error= net_flush(net); thd->get_stmt_da()->set_overwrite_status(false); @@ -337,7 +336,7 @@ Protocol::net_send_eof(THD *thd, uint server_status, uint statement_warn_count) (thd->get_command() != COM_BINLOG_DUMP )) { error= net_send_ok(thd, server_status, statement_warn_count, 0, 0, NULL, - true, false); + true); DBUG_RETURN(error); } @@ -609,16 +608,14 @@ void Protocol::end_statement() thd->get_stmt_da()->statement_warn_count(), thd->get_stmt_da()->affected_rows(), thd->get_stmt_da()->last_insert_id(), - thd->get_stmt_da()->message(), - thd->get_stmt_da()->skip_flush()); + thd->get_stmt_da()->message()); break; case Diagnostics_area::DA_DISABLED: break; case Diagnostics_area::DA_EMPTY: default: DBUG_ASSERT(0); - error= send_ok(thd->server_status, 0, 0, 0, NULL, - thd->get_stmt_da()->skip_flush()); + error= send_ok(thd->server_status, 0, 0, 0, NULL); break; } if (likely(!error)) @@ -637,12 +634,12 @@ void Protocol::end_statement() bool Protocol::send_ok(uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong last_insert_id, - const char *message, bool skip_flush) + const char *message) { DBUG_ENTER("Protocol::send_ok"); const bool retval= net_send_ok(thd, server_status, statement_warn_count, - affected_rows, last_insert_id, message, false, skip_flush); + affected_rows, last_insert_id, message, false); DBUG_RETURN(retval); } @@ -918,6 +915,242 @@ bool Protocol_text::store_field_metadata(const THD * thd, } +/* + MARIADB_CLIENT_CACHE_METADATA support. + + Bulk of the code below is dedicated to detecting whether column metadata has + changed after prepare, or between executions of a prepared statement. + + For some prepared statements, metadata can't change without going through + Prepared_Statement::reprepare(), which makes detecting changes easy. + + Others, "SELECT ?" & Co, are more fragile, and sensitive to input parameters, + or user variables. Detecting metadata change for this class of PS is harder, + we calculate signature (hash value), and check whether this changes between + executions. This is a more expensive method. +*/ + + +/** + Detect whether column info can be changed without + PS repreparing. + + Such colum info is called fragile. The opposite of + fragile is. + + + @param it - Item representing column info + @return true, if columninfo is "fragile", false if it is stable + + + @todo does not work due to MDEV-23913. Currently, + everything about prepared statements is fragile. +*/ + +static bool is_fragile_columnifo(Item *it) +{ +#define MDEV_23913_FIXED 0 +#if MDEV_23913_FIXED + if (dynamic_cast<Item_param *>(it)) + return true; + + if (dynamic_cast<Item_func_user_var *>(it)) + return true; + + if (dynamic_cast <Item_sp_variable*>(it)) + return true; + + /* Check arguments of functions.*/ + auto item_args= dynamic_cast<Item_args *>(it); + if (!item_args) + return false; + auto args= item_args->arguments(); + auto arg_count= item_args->argument_count(); + for (uint i= 0; i < arg_count; i++) + { + if (is_fragile_columnifo(args[i])) + return true; + } + return false; +#else /* MDEV-23913 fixed*/ + return true; +#endif +} + + +#define INVALID_METADATA_CHECKSUM 0 + + +/** + Calculate signature for column info sent to the client as CRC32 over data, + that goes into the column info packet. + We assume that if checksum does not change, then column info was not + modified. + + @param thd THD + @param list column info + + @return CRC32 of the metadata +*/ + +static uint32 calc_metadata_hash(THD *thd, List<Item> *list) +{ + List_iterator_fast<Item> it(*list); + Item *item; + uint32 crc32_c= 0; + while ((item= it++)) + { + Send_field field(thd, item); + auto field_type= item->type_handler()->field_type(); + auto charset= item->charset_for_protocol(); + /* + The data below should contain everything that influences + content of the column info packet. + */ + LEX_CSTRING data[]= + { + field.table_name, + field.org_table_name, + field.col_name, + field.org_col_name, + field.db_name, + field.attr(MARIADB_FIELD_ATTR_DATA_TYPE_NAME), + field.attr(MARIADB_FIELD_ATTR_FORMAT_NAME), + {(const char *) &field.length, sizeof(field.length)}, + {(const char *) &field.flags, sizeof(field.flags)}, + {(const char *) &field.decimals, sizeof(field.decimals)}, + {(const char *) &charset, sizeof(charset)}, + {(const char *) &field_type, sizeof(field_type)}, + }; + for (const auto &chunk : data) + crc32_c= my_crc32c(crc32_c, chunk.str, chunk.length); + } + + if (crc32_c == INVALID_METADATA_CHECKSUM) + return 1; + return crc32_c; +} + + + +/** + Check if metadata columns have changed since last call to this + function. + + @param send_column_info_state saved state, changed if the function + return true. + @param thd THD + @param list columninfo Items + @return true,if metadata columns have changed since last call, + false otherwise +*/ + +static bool metadata_columns_changed(send_column_info_state &state, THD *thd, + List<Item> &list) +{ + if (!state.initialized) + { + state.initialized= true; + state.immutable= true; + Item *item; + List_iterator_fast<Item> it(list); + while ((item= it++)) + { + if (is_fragile_columnifo(item)) + { + state.immutable= false; + state.checksum= calc_metadata_hash(thd, &list); + break; + } + } + state.last_charset= thd->variables.character_set_client; + return true; + } + + /* + Since column info can change under our feet, we use more expensive + checksumming to check if column metadata has not changed since last time. + */ + if (!state.immutable) + { + uint32 checksum= calc_metadata_hash(thd, &list); + if (checksum != state.checksum) + { + state.checksum= checksum; + state.last_charset= thd->variables.character_set_client; + return true; + } + } + + /* + Character_set_client influences result set metadata, thus resend metadata + whenever it changes. + */ + if (state.last_charset != thd->variables.character_set_client) + { + state.last_charset= thd->variables.character_set_client; + return true; + } + + return false; +} + + +/** + Determine whether column info must be sent to the client. + Skip column info, if client supports caching, and (prepared) statement + output fields have not changed. + + @param thd THD + @param list column info + @param flags send flags. If Protocol::SEND_FORCE_COLUMN_INFO is set, + this function will return true + @return true, if column info must be sent to the client. + false otherwise +*/ + +static bool should_send_column_info(THD* thd, List<Item>* list, uint flags) +{ + if (!(thd->client_capabilities & MARIADB_CLIENT_CACHE_METADATA)) + { + /* Client does not support abbreviated metadata.*/ + return true; + } + + if (!thd->cur_stmt) + { + /* Neither COM_PREPARE nor COM_EXECUTE run.*/ + return true; + } + + if (thd->spcont) + { + /* Always sent full metadata from inside the stored procedure.*/ + return true; + } + + if (flags & Protocol::SEND_FORCE_COLUMN_INFO) + return true; + + auto &column_info_state= thd->cur_stmt->column_info_state; +#ifndef DBUG_OFF + auto cmd= thd->get_command(); +#endif + + DBUG_ASSERT(cmd == COM_STMT_EXECUTE || cmd == COM_STMT_PREPARE + || cmd == COM_STMT_BULK_EXECUTE); + DBUG_ASSERT(cmd != COM_STMT_PREPARE || !column_info_state.initialized); + + bool ret= metadata_columns_changed(column_info_state, thd, *list); + + DBUG_ASSERT(cmd != COM_STMT_PREPARE || ret); + if (!ret) + thd->status_var.skip_metadata_count++; + + return ret; +} + + /** Send name and type of result to client. @@ -938,35 +1171,49 @@ bool Protocol_text::store_field_metadata(const THD * thd, */ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags) { - List_iterator_fast<Item> it(*list); - Item *item; - Protocol_text prot(thd, thd->variables.net_buffer_length); DBUG_ENTER("Protocol::send_result_set_metadata"); + bool send_column_info= should_send_column_info(thd, list, flags); + if (flags & SEND_NUM_ROWS) - { // Packet with number of elements - uchar buff[MAX_INT_WIDTH]; + { + /* + Packet with number of columns. + + Will also have a 1 byte column info indicator, in case + MARIADB_CLIENT_CACHE_METADATA client capability is set. + */ + uchar buff[MAX_INT_WIDTH+1]; uchar *pos= net_store_length(buff, list->elements); + if (thd->client_capabilities & MARIADB_CLIENT_CACHE_METADATA) + *pos++= (uchar)send_column_info; + DBUG_ASSERT(pos <= buff + sizeof(buff)); if (my_net_write(&thd->net, buff, (size_t) (pos-buff))) DBUG_RETURN(1); } + if (send_column_info) + { + List_iterator_fast<Item> it(*list); + Item *item; + Protocol_text prot(thd, thd->variables.net_buffer_length); #ifndef DBUG_OFF - field_handlers= (const Type_handler**) thd->alloc(sizeof(field_handlers[0]) * - list->elements); + field_handlers= (const Type_handler **) thd->alloc( + sizeof(field_handlers[0]) * list->elements); #endif - for (uint pos= 0; (item=it++); pos++) - { - prot.prepare_for_resend(); - if (prot.store_item_metadata(thd, item, pos)) - goto err; - if (prot.write()) - DBUG_RETURN(1); + for (uint pos= 0; (item= it++); pos++) + { + prot.prepare_for_resend(); + if (prot.store_item_metadata(thd, item, pos)) + goto err; + if (prot.write()) + DBUG_RETURN(1); #ifndef DBUG_OFF - field_handlers[pos]= item->type_handler(); + field_handlers[pos]= item->type_handler(); #endif + } } if (flags & SEND_EOF) @@ -1072,18 +1319,12 @@ bool Protocol_text::store_field_metadata_for_list_fields(const THD *thd, bool Protocol::send_result_set_row(List<Item> *row_items) { List_iterator_fast<Item> it(*row_items); - + ValueBuffer<MAX_FIELD_WIDTH> value_buffer; DBUG_ENTER("Protocol::send_result_set_row"); for (Item *item= it++; item; item= it++) { - /* - ValueBuffer::m_string can be altered during Item::send(). - It's important to declare value_buffer inside the loop, - to have ValueBuffer::m_string point to ValueBuffer::buffer - on every iteration. - */ - ValueBuffer<MAX_FIELD_WIDTH> value_buffer; + value_buffer.reset_buffer(); if (item->send(this, &value_buffer)) { // If we're out of memory, reclaim some, to help us recover. @@ -1100,9 +1341,9 @@ bool Protocol::send_result_set_row(List<Item> *row_items) /** - Send \\0 end terminated string. + Send \\0 end terminated string or NULL - @param from NullS or \\0 terminated string + @param from NullS or \\0 terminated string @note In most cases one should use store(from, length) instead of this function @@ -1113,12 +1354,11 @@ bool Protocol::send_result_set_row(List<Item> *row_items) 1 error */ -bool Protocol::store(const char *from, CHARSET_INFO *cs) +bool Protocol::store_string_or_null(const char *from, CHARSET_INFO *cs) { if (!from) return store_null(); - size_t length= strlen(from); - return store(from, length, cs); + return store(from, strlen(from), cs); } @@ -1137,7 +1377,7 @@ bool Protocol::store(I_List<i_string>* str_list) tmp.length(0); while ((s=it++)) { - tmp.append(s->ptr); + tmp.append(s->ptr, strlen(s->ptr)); tmp.append(','); } if ((len= tmp.length())) @@ -1352,7 +1592,7 @@ bool Protocol_text::store(Field *field) } -bool Protocol_text::store(MYSQL_TIME *tm, int decimals) +bool Protocol_text::store_datetime(MYSQL_TIME *tm, int decimals) { #ifndef DBUG_OFF DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_DATETIME)); @@ -1570,7 +1810,7 @@ bool Protocol_binary::store(Field *field) } -bool Protocol_binary::store(MYSQL_TIME *tm, int decimals) +bool Protocol_binary::store_datetime(MYSQL_TIME *tm, int decimals) { char buff[12],*pos; uint length; @@ -1604,7 +1844,7 @@ bool Protocol_binary::store_date(MYSQL_TIME *tm) { tm->hour= tm->minute= tm->second=0; tm->second_part= 0; - return Protocol_binary::store(tm, 0); + return Protocol_binary::store_datetime(tm, 0); } @@ -1690,7 +1930,8 @@ bool Protocol_binary::send_out_parameters(List<Item_param> *sp_params) thd->server_status|= SERVER_PS_OUT_PARAMS | SERVER_MORE_RESULTS_EXISTS; /* Send meta-data. */ - if (send_result_set_metadata(&out_param_lst, SEND_NUM_ROWS | SEND_EOF)) + if (send_result_set_metadata(&out_param_lst, + SEND_NUM_ROWS | SEND_EOF | SEND_FORCE_COLUMN_INFO)) return TRUE; /* Send data. */ diff --git a/sql/protocol.h b/sql/protocol.h index f98b4cabfed..f095ad68a34 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -54,7 +54,7 @@ protected: virtual bool net_store_data_cs(const uchar *from, size_t length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs); virtual bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *, - bool, bool); + bool); virtual bool net_send_error_packet(THD *, uint, const char *, const char *); #ifdef EMBEDDED_LIBRARY char **next_field; @@ -78,7 +78,7 @@ protected: virtual bool send_ok(uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong last_insert_id, - const char *message, bool skip_flush); + const char *message); virtual bool send_eof(uint server_status, uint statement_warn_count); @@ -93,13 +93,13 @@ public: virtual ~Protocol() {} void init(THD* thd_arg); - enum { SEND_NUM_ROWS= 1, SEND_EOF= 2 }; + enum { SEND_NUM_ROWS= 1, SEND_EOF= 2, SEND_FORCE_COLUMN_INFO= 4 }; virtual bool send_result_set_metadata(List<Item> *list, uint flags); bool send_list_fields(List<Field> *list, const TABLE_LIST *table_list); bool send_result_set_row(List<Item> *row_items); bool store(I_List<i_string> *str_list); - bool store(const char *from, CHARSET_INFO *cs); + bool store_string_or_null(const char *from, CHARSET_INFO *cs); bool store_warning(const char *from, size_t length); String *storage_packet() { return packet; } inline void free() { packet->free(); } @@ -138,7 +138,7 @@ public: CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0; virtual bool store_float(float from, uint32 decimals)=0; virtual bool store_double(double from, uint32 decimals)=0; - virtual bool store(MYSQL_TIME *time, int decimals)=0; + virtual bool store_datetime(MYSQL_TIME *time, int decimals)=0; virtual bool store_date(MYSQL_TIME *time)=0; virtual bool store_time(MYSQL_TIME *time, int decimals)=0; virtual bool store(Field *field)=0; @@ -188,10 +188,6 @@ public: bool net_send_error(THD *thd, uint sql_errno, const char *err, const char* sqlstate); void end_statement(); - - friend int send_answer_1(Protocol *protocol, String *s1, String *s2, - String *s3); - friend int send_header_2(Protocol *protocol, bool for_category); }; @@ -217,7 +213,7 @@ public: bool store_decimal(const my_decimal *) override; bool store_str(const char *from, size_t length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) override; - bool store(MYSQL_TIME *time, int decimals) override; + bool store_datetime(MYSQL_TIME *time, int decimals) override; bool store_date(MYSQL_TIME *time) override; bool store_time(MYSQL_TIME *time, int decimals) override; bool store_float(float nr, uint32 decimals) override; @@ -265,7 +261,7 @@ public: bool store_decimal(const my_decimal *) override; bool store_str(const char *from, size_t length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) override; - bool store(MYSQL_TIME *time, int decimals) override; + bool store_datetime(MYSQL_TIME *time, int decimals) override; bool store_date(MYSQL_TIME *time) override; bool store_time(MYSQL_TIME *time, int decimals) override; bool store_float(float nr, uint32 decimals) override; @@ -316,7 +312,7 @@ public: { return false; } - bool store(MYSQL_TIME *, int) override { return false; } + bool store_datetime(MYSQL_TIME *, int) override { return false; } bool store_date(MYSQL_TIME *) override { return false; } bool store_time(MYSQL_TIME *, int) override { return false; } bool store_float(float, uint32) override { return false; } diff --git a/sql/records.cc b/sql/records.cc index 8da605072a6..3aad36ca862 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -255,7 +255,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, thd->variables.read_rnd_buff_size && !(table->file->ha_table_flags() & HA_FAST_KEY_READ) && (table->db_stat & HA_READ_ONLY || - table->reginfo.lock_type <= TL_READ_NO_INSERT) && + table->reginfo.lock_type < TL_FIRST_WRITE) && (ulonglong) table->s->reclength* (table->file->stats.records+ table->file->stats.deleted) > (ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE && @@ -830,3 +830,32 @@ inline void SORT_INFO::unpack_addon_fields(uchar *buff) field->unpack(field->ptr, buff + addonf->offset, buff_end, 0); } } + + +/* + @brief + Read and unpack next record from a table + + @details + The function first reads the next record from the table. + If a success then it unpacks the values to the base table fields. + This is used by SJM scan table to unpack the values of the materialized + table to the base table fields + + @retval + 0 Record successfully read. + @retval + -1 There is no record to be read anymore. + >0 Error +*/ +int read_record_func_for_rr_and_unpack(READ_RECORD *info) +{ + int error; + if ((error= info->read_record_func_and_unpack_calls(info))) + return error; + + for (Copy_field *cp= info->copy_field; cp != info->copy_field_end; cp++) + (*cp->do_copy)(cp); + + return error; +} diff --git a/sql/records.h b/sql/records.h index 272bbd0d9b5..9bc1b98fde4 100644 --- a/sql/records.h +++ b/sql/records.h @@ -56,6 +56,7 @@ struct READ_RECORD TABLE *table; /* Head-form */ Unlock_row_func unlock_row; Read_func read_record_func; + Read_func read_record_func_and_unpack_calls; THD *thd; SQL_SELECT *select; uint ref_length, reclength, rec_cache_size, error_offset; diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 1df85759a9c..d0285b54928 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -122,7 +122,7 @@ int THD::register_slave(uchar *packet, size_t packet_length) uchar *p= packet, *p_end= packet + packet_length; const char *errmsg= "Wrong parameters to function register_slave"; - if (check_access(this, PRIV_COM_REGISTER_SLAVE, any_db, NULL, NULL, 0, 0)) + if (check_access(this, PRIV_COM_REGISTER_SLAVE, any_db.str, NULL,NULL,0,0)) return 1; if (!(si= (Slave_info*)my_malloc(key_memory_SLAVE_INFO, sizeof(Slave_info), MYF(MY_WME)))) @@ -183,11 +183,11 @@ static my_bool show_slave_hosts_callback(THD *thd, Protocol *protocol) { protocol->prepare_for_resend(); protocol->store(si->server_id); - protocol->store(si->host, &my_charset_bin); + protocol->store(si->host, strlen(si->host), &my_charset_bin); if (opt_show_slave_auth_info) { - protocol->store(si->user, &my_charset_bin); - protocol->store(si->password, &my_charset_bin); + protocol->store(si->user, safe_strlen(si->user), &my_charset_bin); + protocol->store(si->password, safe_strlen(si->password), &my_charset_bin); } protocol->store((uint32) si->port); protocol->store(si->master_id); diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h index b76b8b1e635..cb1615c5925 100644 --- a/sql/rowid_filter.h +++ b/sql/rowid_filter.h @@ -318,7 +318,8 @@ public: bool alloc() { - array= new Dynamic_array<char> (elem_size * max_elements, + array= new Dynamic_array<char> (PSI_INSTRUMENT_MEM, + elem_size * max_elements, elem_size * max_elements/sizeof(char) + 1); return array == NULL; } diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 5c4a4d9f58a..1e1f98c1e3e 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -805,7 +805,7 @@ Rpl_filter::db_rule_ent_list_to_str(String* str, I_List<i_string>* list) while ((s= it++)) { - str->append(s->ptr); + str->append(s->ptr, strlen(s->ptr)); str->append(','); } diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index e4514e28e64..fce5f260639 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -1037,13 +1037,13 @@ rpl_slave_state_tostring_helper(String *dest, const rpl_gtid *gtid, bool *first) if (*first) *first= false; else - if (dest->append(",",1)) + if (dest->append(',')) return true; return dest->append_ulonglong(gtid->domain_id) || - dest->append("-",1) || + dest->append('-') || dest->append_ulonglong(gtid->server_id) || - dest->append("-",1) || + dest->append('-') || dest->append_ulonglong(gtid->seq_no); } @@ -1734,7 +1734,7 @@ rpl_binlog_state::alloc_element_nolock(const rpl_gtid *gtid) */ bool rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id, - uint64 seq_no) + uint64 seq_no, bool no_error) { element *elem; bool res= 0; @@ -1744,9 +1744,10 @@ rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id, (const uchar *)(&domain_id), 0)) && elem->last_gtid && elem->last_gtid->seq_no >= seq_no) { - my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no, - elem->last_gtid->domain_id, elem->last_gtid->server_id, - elem->last_gtid->seq_no); + if (!no_error) + my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no, + elem->last_gtid->domain_id, elem->last_gtid->server_id, + elem->last_gtid->seq_no); res= 1; } mysql_mutex_unlock(&LOCK_binlog_state); diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h index 531d746763b..c8decff8fe8 100644 --- a/sql/rpl_gtid.h +++ b/sql/rpl_gtid.h @@ -317,7 +317,8 @@ struct rpl_binlog_state int update_with_next_gtid(uint32 domain_id, uint32 server_id, rpl_gtid *gtid); int alloc_element_nolock(const rpl_gtid *gtid); - bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no); + bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no, + bool no_error= false); int bump_seq_no_if_needed(uint32 domain_id, uint64 seq_no); int write_to_iocache(IO_CACHE *dest); int read_from_iocache(IO_CACHE *src); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 17c9029e1a5..8322bcd3042 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -43,7 +43,8 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg, gtid_reconnect_event_skip_count(0), gtid_event_seen(false), in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0), users(0), killed(0), - total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0) + total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0), + do_accept_own_server_id(false) { char *tmp; host[0] = 0; user[0] = 0; password[0] = 0; @@ -1226,17 +1227,15 @@ bool Master_info_index::init_all_master_info() if (!err_num) // No Error on read Master_info { - if (global_system_variables.log_warnings > 1) + if (global_system_variables.log_warnings > 2) sql_print_information("Reading of all Master_info entries succeeded"); DBUG_RETURN(0); } + if (succ_num) // Have some Error and some Success - { sql_print_warning("Reading of some Master_info entries failed"); - DBUG_RETURN(1); - } - - sql_print_error("Reading of all Master_info entries failed!"); + else + sql_print_error("Reading of all Master_info entries failed!"); DBUG_RETURN(1); error: @@ -1433,7 +1432,7 @@ bool Master_info_index::add_master_info(Master_info *mi, bool write_to_file) if (unlikely(abort_loop) || !my_hash_insert(&master_info_hash, (uchar*) mi)) { - if (global_system_variables.log_warnings > 1) + if (global_system_variables.log_warnings > 2) sql_print_information("Added new Master_info '%.*s' to hash table", (int) mi->connection_name.length, mi->connection_name.str); @@ -1979,15 +1978,16 @@ void prot_store_ids(THD *thd, DYNAMIC_ARRAY *ids) break the loop whenever remained space could not fit ellipses on the next cycle */ - sprintf(dbuff + cur_len, "..."); + cur_len+= sprintf(dbuff + cur_len, "..."); break; } - cur_len += sprintf(buff + cur_len, "%s", dbuff); + cur_len+= sprintf(buff + cur_len, "%s", dbuff); } - thd->protocol->store(buff, &my_charset_bin); + thd->protocol->store(buff, cur_len, &my_charset_bin); return; } + bool Master_info_index::flush_all_relay_logs() { DBUG_ENTER("flush_all_relay_logs"); diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index b6ff69d1f64..1377a816d48 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -40,6 +40,13 @@ private: */ bool m_filter; +public: + /* domain id list types */ + enum enum_list_type { + DO_DOMAIN_IDS= 0, + IGNORE_DOMAIN_IDS + }; + /* DO_DOMAIN_IDS (0): Ignore all the events which do not belong to any of the domain ids in the @@ -50,13 +57,6 @@ private: */ DYNAMIC_ARRAY m_domain_ids[2]; -public: - /* domain id list types */ - enum enum_list_type { - DO_DOMAIN_IDS= 0, - IGNORE_DOMAIN_IDS - }; - Domain_id_filter(); ~Domain_id_filter(); @@ -144,8 +144,8 @@ typedef struct st_rows_event_tracker my_off_t first_seen; my_off_t last_seen; bool stmt_end_seen; - void update(const char* file_name, my_off_t pos, - const char* buf, + void update(const char *file_name, my_off_t pos, + const uchar *buf, const Format_description_log_event *fdle); void reset(); bool check_and_report(const char* file_name, my_off_t pos); @@ -352,6 +352,20 @@ class Master_info : public Slave_reporting_capability ACK from slave, or if delay_master is enabled. */ int semi_ack; + /* + The flag has replicate_same_server_id semantics and is raised to accept + a same-server-id event group by the gtid strict mode semisync slave. + Own server-id events can normally appear as result of EITHER + A. this server semisync (failover to) slave crash-recovery: + the transaction was created on this server then being master, + got replicated elsewhere right before the crash before commit, + and finally at recovery the transaction gets evicted from the + server's binlog and its gtid (slave) state; OR + B. in a general circular configuration and then when a recieved (returned + to slave) gtid exists in the server's binlog. Then, in gtid strict mode, + it must be ignored similarly to the replicate-same-server-id rule. + */ + bool do_accept_own_server_id; }; int init_master_info(Master_info* mi, const char* master_info_fname, diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 8409a9cd89c..5e32565cca7 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -803,6 +803,7 @@ do_retry: mysql_mutex_lock(&rli->data_lock); ++rli->retried_trans; + ++rpt->last_trans_retry_count; statistic_increment(slave_retried_transactions, LOCK_status); mysql_mutex_unlock(&rli->data_lock); @@ -1103,6 +1104,11 @@ handle_rpl_parallel_thread(void *arg) mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->thd= thd; + PSI_thread *psi= PSI_CALL_get_thread(); + PSI_CALL_set_thread_os_id(psi); + PSI_CALL_set_thread_THD(psi, thd); + PSI_CALL_set_thread_id(psi, thd->thread_id); + rpt->thd->set_psi(psi); while (rpt->delay_start) mysql_cond_wait(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread); @@ -1124,6 +1130,7 @@ handle_rpl_parallel_thread(void *arg) uint wait_count= 0; rpl_parallel_thread::queued_event *qev, *next_qev; + rpt->start_time_tracker(); thd->ENTER_COND(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread, &stage_waiting_for_work_from_sql_thread, &old_stage); /* @@ -1147,6 +1154,7 @@ handle_rpl_parallel_thread(void *arg) } rpt->dequeue1(events); thd->EXIT_COND(&old_stage); + rpt->add_to_worker_idle_time_and_reset(); more_events: for (qev= events; qev; qev= next_qev) @@ -1192,6 +1200,13 @@ handle_rpl_parallel_thread(void *arg) /* Handle a new event group, which will be initiated by a GTID event. */ if ((event_type= qev->ev->get_type_code()) == GTID_EVENT) { + rpt->last_trans_retry_count= 0; + rpt->last_seen_gtid= rgi->current_gtid; + rpt->channel_name_length= (uint)rgi->rli->mi->connection_name.length; + if (rpt->channel_name_length) + memcpy(rpt->channel_name, rgi->rli->mi->connection_name.str, + rgi->rli->mi->connection_name.length); + bool did_enter_cond= false; PSI_stage_info old_stage; @@ -1743,6 +1758,7 @@ int rpl_parallel_activate_pool(rpl_parallel_thread_pool *pool) { int rc= 0; + struct pool_bkp_for_pfs* bkp= &pool->pfs_bkp; if ((rc= pool_mark_busy(pool, current_thd))) return rc; // killed @@ -1752,6 +1768,23 @@ rpl_parallel_activate_pool(rpl_parallel_thread_pool *pool) pool_mark_not_busy(pool); rc= rpl_parallel_change_thread_count(pool, opt_slave_parallel_threads, 0); + if (!rc) + { + if (pool->count) + { + if (bkp->inited) + { + if (bkp->count != pool->count) + { + bkp->destroy(); + bkp->init(pool->count); + } + } + else + bkp->init(pool->count); + } + } + } else { @@ -2009,8 +2042,16 @@ rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco) } +rpl_parallel_thread::rpl_parallel_thread() + : channel_name_length(0), last_error_number(0), last_error_timestamp(0), + worker_idle_time(0), last_trans_retry_count(0), start_time(0) +{ +} + + rpl_parallel_thread_pool::rpl_parallel_thread_pool() - : threads(0), free_list(0), count(0), inited(false), busy(false) + : threads(0), free_list(0), count(0), inited(false), busy(false), + pfs_bkp{0, false, NULL} { } @@ -2041,6 +2082,7 @@ void rpl_parallel_thread_pool::destroy() { deactivate(); + pfs_bkp.destroy(); destroy_cond_mutex(); } @@ -2109,6 +2151,37 @@ rpl_parallel_thread_pool::release_thread(rpl_parallel_thread *rpt) mysql_mutex_unlock(&LOCK_rpl_thread_pool); } +void +rpl_parallel_thread_pool::copy_pool_for_pfs(Relay_log_info *rli) +{ + if (pfs_bkp.inited) + { + for(uint i=0; i<count;i++) + { + rpl_parallel_thread *rpt, *pfs_rpt; + rpt= threads[i]; + pfs_rpt= pfs_bkp.rpl_thread_arr[i]; + if (rpt->channel_name_length) + { + pfs_rpt->channel_name_length= rpt->channel_name_length; + strmake(pfs_rpt->channel_name, rpt->channel_name, + rpt->channel_name_length); + } + pfs_rpt->thd= rpt->thd; + pfs_rpt->last_seen_gtid= rpt->last_seen_gtid; + if (rli->err_thread_id && rpt->thd->thread_id == rli->err_thread_id) + { + pfs_rpt->last_error_number= rli->last_error().number; + strmake(pfs_rpt->last_error_message, + rli->last_error().message, sizeof(rli->last_error().message)); + pfs_rpt->last_error_timestamp= rli->last_error().skr*1000000; + } + pfs_rpt->running= false; + pfs_rpt->worker_idle_time= rpt->get_worker_idle_time(); + pfs_rpt->last_trans_retry_count= rpt->last_trans_retry_count; + } + } +} /* Obtain a worker thread that we can queue an event to. @@ -2383,6 +2456,7 @@ rpl_parallel::wait_for_done(THD *thd, Relay_log_info *rli) };); #endif + global_rpl_thread_pool.copy_pool_for_pfs(rli); for (i= 0; i < domain_hash.records; ++i) { e= (struct rpl_parallel_entry *)my_hash_element(&domain_hash, i); diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index b88e77d5427..d3c46301ff8 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -7,6 +7,7 @@ struct rpl_parallel; struct rpl_parallel_entry; struct rpl_parallel_thread_pool; +extern struct rpl_parallel_thread_pool pool_bkp_for_pfs; class Relay_log_info; struct inuse_relaylog; @@ -161,6 +162,35 @@ struct rpl_parallel_thread { inuse_relaylog *accumulated_ir_last; uint64 accumulated_ir_count; + char channel_name[MAX_CONNECTION_NAME]; + uint channel_name_length; + rpl_gtid last_seen_gtid; + int last_error_number; + char last_error_message[MAX_SLAVE_ERRMSG]; + ulonglong last_error_timestamp; + ulonglong worker_idle_time; + ulong last_trans_retry_count; + ulonglong start_time; + void start_time_tracker() + { + start_time= microsecond_interval_timer(); + } + ulonglong compute_time_lapsed() + { + return (ulonglong)((microsecond_interval_timer() - start_time) / 1000000.0); + } + void add_to_worker_idle_time_and_reset() + { + worker_idle_time+= compute_time_lapsed(); + start_time=0; + } + ulonglong get_worker_idle_time() + { + if (start_time) + return (worker_idle_time + compute_time_lapsed()); + else + return worker_idle_time; + } void enqueue(queued_event *qev) { if (last_in_queue) @@ -224,9 +254,42 @@ struct rpl_parallel_thread { void batch_free(); /* Update inuse_relaylog refcounts with what we have accumulated so far. */ void inuse_relaylog_refcount_update(); + rpl_parallel_thread(); }; +struct pool_bkp_for_pfs{ + uint32 count; + bool inited; + struct rpl_parallel_thread **rpl_thread_arr; + void init(uint32 thd_count) + { + DBUG_ASSERT(thd_count); + rpl_thread_arr= (rpl_parallel_thread **) + my_malloc(PSI_INSTRUMENT_ME, + thd_count * sizeof(rpl_parallel_thread*), + MYF(MY_WME | MY_ZEROFILL)); + for (uint i=0; i<thd_count; i++) + rpl_thread_arr[i]= (rpl_parallel_thread *) + my_malloc(PSI_INSTRUMENT_ME, sizeof(rpl_parallel_thread), + MYF(MY_WME | MY_ZEROFILL)); + count= thd_count; + inited= true; + } + + void destroy() + { + if (inited) + { + for (uint i=0; i<count; i++) + my_free(rpl_thread_arr[i]); + + my_free(rpl_thread_arr); + rpl_thread_arr= NULL; + } + } +}; + struct rpl_parallel_thread_pool { struct rpl_parallel_thread **threads; struct rpl_parallel_thread *free_list; @@ -240,8 +303,10 @@ struct rpl_parallel_thread_pool { is in progress. */ bool busy; + struct pool_bkp_for_pfs pfs_bkp; rpl_parallel_thread_pool(); + void copy_pool_for_pfs(Relay_log_info *rli); int init(uint32 size); void destroy(); void deactivate(); diff --git a/sql/rpl_reporting.cc b/sql/rpl_reporting.cc index aa69168d44c..d04f18c9c44 100644 --- a/sql/rpl_reporting.cc +++ b/sql/rpl_reporting.cc @@ -22,7 +22,7 @@ #include "sql_class.h" Slave_reporting_capability::Slave_reporting_capability(char const *thread_name) - : m_thread_name(thread_name) + : err_thread_id(0), m_thread_name(thread_name) { mysql_mutex_init(key_mutex_slave_reporting_capability_err_lock, &err_lock, MY_MUTEX_INIT_FAST); @@ -51,6 +51,7 @@ Slave_reporting_capability::report(loglevel level, int err_code, pbuff= m_last_error.message; pbuffsize= sizeof(m_last_error.message); m_last_error.number = err_code; + m_last_error.update_timestamp(); report_function= sql_print_error; break; case WARNING_LEVEL: @@ -69,6 +70,7 @@ Slave_reporting_capability::report(loglevel level, int err_code, mysql_mutex_unlock(&err_lock); va_end(args); + err_thread_id= current_thd->thread_id; /* If the msg string ends with '.', do not add a ',' it would be ugly */ report_function("%s %s: %s%s %s%sInternal MariaDB error code: %d", diff --git a/sql/rpl_reporting.h b/sql/rpl_reporting.h index 62b934c1527..46a71ff5ad6 100644 --- a/sql/rpl_reporting.h +++ b/sql/rpl_reporting.h @@ -41,6 +41,7 @@ public: @param thread_name Printable name of the slave thread that is reporting. */ Slave_reporting_capability(char const *thread_name); + mutable my_thread_id err_thread_id; /** Writes a message and, if it's an error message, to Last_Error @@ -81,12 +82,35 @@ public: { number= 0; message[0]= '\0'; + timestamp[0]= '\0'; + } + void update_timestamp() + { + struct tm tm_tmp; + struct tm *start; + + skr= my_time(0); + localtime_r(&skr, &tm_tmp); + start=&tm_tmp; + + sprintf(timestamp, "%02d%02d%02d %02d:%02d:%02d", + start->tm_year % 100, + start->tm_mon+1, + start->tm_mday, + start->tm_hour, + start->tm_min, + start->tm_sec); + timestamp[15]= '\0'; } /** Error code */ uint32 number; /** Error message */ char message[MAX_SLAVE_ERRMSG]; + /** Error timestamp as string */ + char timestamp[64]; + /** Error timestamp as time_t variable. Used in performance_schema */ + time_t skr; }; Error const& last_error() const { return m_last_error; } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 35d84792fcb..064299c6822 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -61,7 +61,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery, const char* thread_name) gtid_skip_flag(GTID_SKIP_NOT), inited(0), abort_slave(0), stop_for_until(0), slave_running(MYSQL_SLAVE_NOT_RUN), until_condition(UNTIL_NONE), until_log_pos(0), retried_trans(0), executed_entries(0), - sql_delay(0), sql_delay_end(0), + last_trans_retry_count(0), sql_delay(0), sql_delay_end(0), until_relay_log_names_defer(false), m_flags(0) { @@ -87,6 +87,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery, const char* thread_name) max_relay_log_size= global_system_variables.max_relay_log_size; bzero((char*) &info_file, sizeof(info_file)); bzero((char*) &cache_buf, sizeof(cache_buf)); + bzero(&last_seen_gtid, sizeof(last_seen_gtid)); mysql_mutex_init(key_relay_log_info_run_lock, &run_lock, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_relay_log_info_data_lock, &data_lock, MY_MUTEX_INIT_FAST); @@ -1710,7 +1711,8 @@ scan_all_gtid_slave_pos_table(THD *thd, int (*cb)(THD *, LEX_CSTRING *, void *), else { size_t i; - Dynamic_array<LEX_CSTRING*> files(dirp->number_of_files); + Dynamic_array<LEX_CSTRING*> files(PSI_INSTRUMENT_MEM, + dirp->number_of_files); Discovered_table_list tl(thd, &files); int err; diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 85e31ef8187..cc807852bf2 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -539,7 +539,8 @@ public: int32 get_sql_delay() { return sql_delay; } void set_sql_delay(int32 _sql_delay) { sql_delay= _sql_delay; } time_t get_sql_delay_end() { return sql_delay_end; } - + rpl_gtid last_seen_gtid; + ulong last_trans_retry_count; private: diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 7c347eba51f..9ea8bb3b822 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -288,8 +288,15 @@ table_def::~table_def() @return TRUE if test fails FALSE as success + + @notes + event_buf will have same values on return. However during the process of + caluclating the checksum, it's temporary changed. Because of this the + event_buf argument is not a pointer to const. + */ -bool event_checksum_test(uchar *event_buf, ulong event_len, enum enum_binlog_checksum_alg alg) +bool event_checksum_test(uchar *event_buf, ulong event_len, + enum enum_binlog_checksum_alg alg) { bool res= FALSE; uint16 flags= 0; // to store in FD's buffer flags orig value diff --git a/sql/scheduler.cc b/sql/scheduler.cc index 7380b134f13..7261c5f39d7 100644 --- a/sql/scheduler.cc +++ b/sql/scheduler.cc @@ -131,11 +131,12 @@ void handle_connection_in_main_thread(CONNECT *connect) Initialize scheduler for --thread-handling=no-threads */ -void one_thread_scheduler(scheduler_functions *func) +void one_thread_scheduler(scheduler_functions *func, + Atomic_counter<uint> *arg_connection_count) { scheduler_init(); func->max_threads= 1; func->max_connections= &max_connections; - func->connection_count= &connection_count; + func->connection_count= arg_connection_count; func->add_connection= handle_connection_in_main_thread; } diff --git a/sql/scheduler.h b/sql/scheduler.h index ebf8d6e9e64..c2686aad21c 100644 --- a/sql/scheduler.h +++ b/sql/scheduler.h @@ -40,6 +40,8 @@ struct scheduler_functions void (*thd_wait_end)(THD *thd); void (*post_kill_notification)(THD *thd); void (*end)(void); + /** resume previous unfinished command (threadpool only)*/ + void (*thd_resume)(THD* thd); }; @@ -72,7 +74,7 @@ enum scheduler_types void one_thread_per_connection_scheduler(scheduler_functions *func, ulong *arg_max_connections, Atomic_counter<uint> *arg_connection_count); -void one_thread_scheduler(scheduler_functions *func); +void one_thread_scheduler(scheduler_functions *func, Atomic_counter<uint> *arg_connection_count); extern void scheduler_init(); extern void post_kill_notification(THD *); diff --git a/sql/semisync_master_ack_receiver.h b/sql/semisync_master_ack_receiver.h index 138f7b5aeed..b75cb7b76cb 100644 --- a/sql/semisync_master_ack_receiver.h +++ b/sql/semisync_master_ack_receiver.h @@ -183,7 +183,7 @@ public: bool listen_on_sockets() { - /* Reinitialze the fds with active fds before calling select */ + /* Reinitialize the fds with active fds before calling select */ m_fds= m_init_fds; struct timeval tv= {1,0}; /* select requires max fd + 1 for the first argument */ diff --git a/sql/semisync_slave.cc b/sql/semisync_slave.cc index 8fe3ba3657d..3e7578b6a53 100644 --- a/sql/semisync_slave.cc +++ b/sql/semisync_slave.cc @@ -52,10 +52,10 @@ int Repl_semi_sync_slave::init_object() return result; } -int Repl_semi_sync_slave::slave_read_sync_header(const char *header, +int Repl_semi_sync_slave::slave_read_sync_header(const uchar *header, unsigned long total_len, int *semi_flags, - const char **payload, + const uchar **payload, unsigned long *payload_len) { int read_res = 0; @@ -64,7 +64,7 @@ int Repl_semi_sync_slave::slave_read_sync_header(const char *header, if (rpl_semi_sync_slave_status) { if (DBUG_EVALUATE_IF("semislave_corrupt_log", 0, 1) - && (unsigned char)(header[0]) == k_packet_magic_num) + && header[0] == k_packet_magic_num) { semi_sync_need_reply = (header[1] & k_packet_flag_sync); *payload_len = total_len - 2; diff --git a/sql/semisync_slave.h b/sql/semisync_slave.h index 35f93476792..f0b8eceeebf 100644 --- a/sql/semisync_slave.h +++ b/sql/semisync_slave.h @@ -79,9 +79,9 @@ public: * Return: * 0: success; non-zero: error */ - int slave_read_sync_header(const char *header, unsigned long total_len, + int slave_read_sync_header(const uchar *header, unsigned long total_len, int *semi_flags, - const char **payload, unsigned long *payload_len); + const uchar **payload, unsigned long *payload_len); /* A slave replies to the master indicating its replication process. It * indicates that the slave has received all events before the specified diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index 1731eeec667..43183ff7595 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -1,4 +1,4 @@ -/* Copyright 2018-2021 Codership Oy <info@codership.com> +/* Copyright 2018 Codership Oy <info@codership.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -29,14 +29,12 @@ extern "C" my_bool wsrep_on(const THD *thd) extern "C" void wsrep_thd_LOCK(const THD *thd) { - mysql_mutex_lock(&thd->LOCK_thd_kill); mysql_mutex_lock(&thd->LOCK_thd_data); } extern "C" void wsrep_thd_UNLOCK(const THD *thd) { mysql_mutex_unlock(&thd->LOCK_thd_data); - mysql_mutex_unlock(&thd->LOCK_thd_kill); } extern "C" void wsrep_thd_kill_LOCK(const THD *thd) @@ -190,8 +188,6 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, DBUG_ASSERT(wsrep_thd_is_SR(victim_thd)); if (!victim_thd || !wsrep_on(bf_thd)) return; - wsrep_thd_LOCK(victim_thd); - WSREP_DEBUG("handle rollback, for deadlock: thd %llu trx_id %" PRIu64 " frags %zu conf %s", victim_thd->thread_id, victim_thd->wsrep_trx_id(), @@ -212,9 +208,6 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, { wsrep_thd_self_abort(victim_thd); } - - wsrep_thd_UNLOCK(victim_thd); - if (bf_thd) { wsrep_store_threadvars(bf_thd); @@ -225,44 +218,31 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, my_bool signal) { mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); -#ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); -#endif + mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data); my_bool ret= wsrep_bf_abort(bf_thd, victim_thd); /* Send awake signal if victim was BF aborted or does not have wsrep on. Note that this should never interrupt RSU as RSU has paused the provider. */ - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - if ((ret || !wsrep_on(victim_thd)) && signal) { + mysql_mutex_lock(&victim_thd->LOCK_thd_data); + if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) { WSREP_DEBUG("victim is killed already by %llu, skipping awake", victim_thd->wsrep_aborter); - wsrep_thd_UNLOCK(victim_thd); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); return false; } victim_thd->wsrep_aborter= bf_thd->thread_id; victim_thd->awake_no_mutex(KILL_QUERY); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + } else { + WSREP_DEBUG("wsrep_thd_bf_abort skipped awake"); } - else - WSREP_DEBUG("wsrep_thd_bf_abort skipped awake for %llu", thd_get_thread_id(victim_thd)); - - wsrep_thd_UNLOCK(victim_thd); return ret; } @@ -287,6 +267,8 @@ extern "C" my_bool wsrep_thd_order_before(const THD *left, const THD *right) extern "C" my_bool wsrep_thd_is_aborting(const MYSQL_THD thd) { + mysql_mutex_assert_owner(&thd->LOCK_thd_data); + const wsrep::client_state& cs(thd->wsrep_cs()); const enum wsrep::transaction::state tx_state(cs.transaction().state()); switch (tx_state) @@ -300,6 +282,8 @@ extern "C" my_bool wsrep_thd_is_aborting(const MYSQL_THD thd) default: return false; } + + return false; } static inline enum wsrep::key::type diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index de82d8be90c..3eacdc03b50 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -996,7 +996,7 @@ enum_tx_state Transaction_state_tracker::calc_trx_state(THD *thd, bool has_trx) { enum_tx_state s; - bool read= (l <= TL_READ_NO_INSERT); + bool read= (l < TL_FIRST_WRITE); if (read) s= has_trx ? TX_READ_TRX : TX_READ_UNSAFE; diff --git a/sql/set_var.cc b/sql/set_var.cc index 861db2c1b8d..3dd97527433 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -533,7 +533,6 @@ static my_old_conv old_conv[]= CHARSET_INFO *get_old_charset_by_name(const char *name) { my_old_conv *conv; - for (conv= old_conv; conv->old_name; conv++) { if (!my_strcasecmp(&my_charset_latin1, name, conv->old_name)) @@ -826,15 +825,20 @@ int set_var::check(THD *thd) */ int set_var::light_check(THD *thd) { + if (var->is_readonly()) + { + my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), var->name.str, "read only"); + return -1; + } if (var->check_type(type)) { int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE; my_error(err, MYF(0), var->name.str); return -1; } - if (type == OPT_GLOBAL && - check_global_access(thd, PRIV_SET_GLOBAL_SYSTEM_VARIABLE)) - return 1; + + if (type == OPT_GLOBAL && var->on_check_access_global(thd)) + return 1; if (value && value->fix_fields_if_needed_for_scalar(thd, &value)) return -1; @@ -1034,7 +1038,7 @@ int set_var_collation_client::check(THD *thd) if (!is_supported_parser_charset(character_set_client)) { my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", - character_set_client->csname); + character_set_client->cs_name.str); return 1; } return 0; @@ -1089,7 +1093,7 @@ int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond) StringBuffer<STRING_BUFFER_USUAL_SIZE> strbuf(scs); const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : 0; Field **fields=tables->table->field; - bool has_file_acl= !check_access(thd, FILE_ACL, any_db, NULL, NULL, 0, 1); + bool has_file_acl= !check_access(thd, FILE_ACL, any_db.str, NULL,NULL,0,1); DBUG_ASSERT(tables->table->in_use == thd); @@ -1219,12 +1223,14 @@ int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond) { uint i; strbuf.length(0); - for (i=0; i + 1 < tl->count; i++) + for (i=0; i < tl->count; i++) { - strbuf.append(tl->type_names[i]); + const char *name= tl->type_names[i]; + strbuf.append(name, strlen(name)); strbuf.append(','); } - strbuf.append(tl->type_names[i]); + if (!strbuf.is_empty()) + strbuf.chop(); fields[11]->set_notnull(); fields[11]->store(strbuf.ptr(), strbuf.length(), scs); } diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml index 058f48ace15..cec3bfcf7be 100644 --- a/sql/share/charsets/Index.xml +++ b/sql/share/charsets/Index.xml @@ -528,15 +528,15 @@ To make maintaining easier please: <collation name="armscii8_nopad_bin" id="1088" flag="binary" flag="nopad"/> </charset> -<charset name="utf8"> +<charset name="utf8mb3"> <family>Unicode</family> <description>UTF-8 Unicode</description> <alias>utf-8</alias> - <collation name="utf8_general_ci" id="33"> + <collation name="utf8mb3_general_ci" id="33"> <flag>primary</flag> <flag>compiled</flag> </collation> - <collation name="utf8_bin" id="83"> + <collation name="utf8mb3_bin" id="83"> <flag>binary</flag> <flag>compiled</flag> </collation> diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 984c48c30db..53814b16578 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -1,4 +1,4 @@ -languages bulgarian=bgn cp1251, chinese=chi gbk, czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hindi=hindi utf8, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u; +languages bulgarian=bgn cp1251, chinese=chi gbk, czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hindi=hindi utf8mb3, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u; default-language eng @@ -52,7 +52,7 @@ ER_YES rus "ДА" serbian "DA" slo "Áno" - spa "SI" + spa "SÍ" ukr "ТАК" ER_CANT_CREATE_FILE chi "无法创建文件'%-.200s'(错误号码:%M)" @@ -77,7 +77,7 @@ ER_CANT_CREATE_FILE rus "Невозможно создать файл '%-.200s' (ошибка: %M)" serbian "Ne mogu da kreiram file '%-.200s' (errno: %M)" slo "Nemôžem vytvoriť súbor '%-.200s' (chybový kód: %M)" - spa "No puedo crear archivo '%-.200s' (Error: %M)" + spa "No puedo crear el fichero/archivo '%-.200s' (Error: %M)" swe "Kan inte skapa filen '%-.200s' (Felkod: %M)" ukr "Не можу створити файл '%-.200s' (помилка: %M)" ER_CANT_CREATE_TABLE @@ -103,7 +103,7 @@ ER_CANT_CREATE_TABLE rus "Невозможно создать таблицу %`s.%`s (ошибка: %M)" serbian "Ne mogu da kreiram tabelu %`s.%`s (errno: %M)" slo "Nemôžem vytvoriť tabuľku %`s.%`s (chybový kód: %M)" - spa "No puedo crear tabla %`s.%`s (Error: %M)" + spa "No puedo crear la tabla %`s.%`s (Error: %M)" swe "Kan inte skapa tabellen %`s.%`s (Felkod: %M)" ukr "Не можу створити таблицю %`s.%`s (помилка: %M)" ER_CANT_CREATE_DB @@ -129,7 +129,7 @@ ER_CANT_CREATE_DB rus "Невозможно создать базу данных '%-.192s' (ошибка: %M)" serbian "Ne mogu da kreiram bazu '%-.192s' (errno: %M)" slo "Nemôžem vytvoriť databázu '%-.192s' (chybový kód: %M)" - spa "No puedo crear base de datos '%-.192s' (Error: %M)" + spa "No puedo crear la base de datos '%-.192s' (Error: %M)" swe "Kan inte skapa databasen '%-.192s' (Felkod: %M)" ukr "Не можу створити базу данних '%-.192s' (помилка: %M)" ER_DB_CREATE_EXISTS @@ -155,7 +155,7 @@ ER_DB_CREATE_EXISTS rus "Невозможно создать базу данных '%-.192s'. База данных уже существует" serbian "Ne mogu da kreiram bazu '%-.192s'; baza već postoji" slo "Nemôžem vytvoriť databázu '%-.192s'; databáza existuje" - spa "No puedo crear base de datos '%-.192s'; la base de datos ya existe" + spa "No puedo crear la base de datos '%-.192s'; la base de datos ya existe" swe "Databasen '%-.192s' existerar redan" ukr "Не можу створити базу данних '%-.192s'. База данних існує" ER_DB_DROP_EXISTS @@ -181,7 +181,7 @@ ER_DB_DROP_EXISTS rus "Невозможно удалить базу данных '%-.192s'. Такой базы данных нет" serbian "Ne mogu da izbrišem bazu '%-.192s'; baza ne postoji" slo "Nemôžem zmazať databázu '%-.192s'; databáza neexistuje" - spa "No puedo eliminar base de datos '%-.192s'; la base de datos no existe" + spa "No puedo eliminar la base de datos '%-.192s'; la base de datos no existe" swe "Kan inte radera databasen '%-.192s'; databasen finns inte" ukr "Не можу видалити базу данних '%-.192s'. База данних не існує" ER_DB_DROP_DELETE @@ -207,7 +207,7 @@ ER_DB_DROP_DELETE rus "Ошибка при удалении базы данных (невозможно удалить '%-.192s', ошибка: %M)" serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.192s', errno: %M)" slo "Chyba pri mazaní databázy (nemôžem zmazať '%-.192s', chybový kód: %M)" - spa "Error eliminando la base de datos(no puedo borrar '%-.192s', error %M)" + spa "Error eliminando la base de datos (no puedo borrar '%-.192s', error %M)" swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %M)" ukr "Не можу видалити базу данних (Не можу видалити '%-.192s', помилка: %M)" ER_DB_DROP_RMDIR @@ -233,7 +233,7 @@ ER_DB_DROP_RMDIR rus "Невозможно удалить базу данных (невозможно удалить каталог '%-.192s', ошибка: %M)" serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.192s', errno: %M)" slo "Chyba pri mazaní databázy (nemôžem vymazať adresár '%-.192s', chybový kód: %M)" - spa "Error eliminando la base de datos (No puedo borrar directorio '%-.192s', error %M)" + spa "Error eliminando la base de datos (No puedo borrar el directorio '%-.192s', error %M)" swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %M)" ukr "Не можу видалити базу данних (Не можу видалити теку '%-.192s', помилка: %M)" ER_CANT_DELETE_FILE @@ -337,7 +337,7 @@ ER_CANT_GET_WD rus "Невозможно определить рабочий каталог (ошибка: %M)" serbian "Ne mogu da dobijem trenutni direktorijum (errno: %M)" slo "Nemôžem zistiť pracovný adresár (chybový kód: %M)" - spa "No puedo acceder al directorio (Error: %M)" + spa "No puedo obtener directorio de trabajo (Error: %M)" swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %M)" ukr "Не можу визначити робочу теку (помилка: %M)" ER_CANT_LOCK @@ -363,7 +363,7 @@ ER_CANT_LOCK rus "Невозможно поставить блокировку на файле (ошибка: %M)" serbian "Ne mogu da zaključam file (errno: %M)" slo "Nemôžem zamknúť súbor (chybový kód: %M)" - spa "No puedo bloquear archivo: (Error: %M)" + spa "No puedo bloquear fichero/archivo: (Error: %M)" swe "Kan inte låsa filen. (Felkod: %M)" ukr "Не можу заблокувати файл (помилка: %M)" ER_CANT_OPEN_FILE @@ -389,7 +389,7 @@ ER_CANT_OPEN_FILE rus "Невозможно открыть файл: '%-.200s' (ошибка: %M)" serbian "Ne mogu da otvorim file: '%-.200s' (errno: %M)" slo "Nemôžem otvoriť súbor: '%-.200s' (chybový kód: %M)" - spa "No puedo abrir archivo: '%-.200s' (Error: %M)" + spa "No puedo abrir el fichero/archivo: '%-.200s' (Error: %M)" swe "Kan inte använda '%-.200s' (Felkod: %M)" ukr "Не можу відкрити файл: '%-.200s' (помилка: %M)" ER_FILE_NOT_FOUND @@ -415,7 +415,7 @@ ER_FILE_NOT_FOUND rus "Невозможно найти файл: '%-.200s' (ошибка: %M)" serbian "Ne mogu da pronađem file: '%-.200s' (errno: %M)" slo "Nemôžem nájsť súbor: '%-.200s' (chybový kód: %M)" - spa "No puedo encontrar archivo: '%-.200s' (Error: %M)" + spa "No puedo hallar el fichero/archivo: '%-.200s' (Error: %M)" swe "Hittar inte filen '%-.200s' (Felkod: %M)" ukr "Не можу знайти файл: '%-.200s' (помилка: %M)" ER_CANT_READ_DIR @@ -467,7 +467,7 @@ ER_CANT_SET_WD rus "Невозможно перейти в каталог '%-.192s' (ошибка: %M)" serbian "Ne mogu da promenim direktorijum na '%-.192s' (errno: %M)" slo "Nemôžem vojsť do adresára '%-.192s' (chybový kód: %M)" - spa "No puedo cambiar al directorio de '%-.192s' (Error: %M)" + spa "No puedo cambiar al directorio a '%-.192s' (Error: %M)" swe "Kan inte byta till '%-.192s' (Felkod: %M)" ukr "Не можу перейти у теку '%-.192s' (помилка: %M)" ER_CHECKREAD @@ -519,7 +519,7 @@ ER_DISK_FULL rus "Диск заполнен. (%s). Ожидаем, пока кто-то не уберет после себя мусор... (ошибка: %M)" serbian "Disk je pun (%s). Čekam nekoga da dođe i oslobodi nešto mesta... (errno: %M)" slo "Disk je plný (%s), čakám na uvoľnenie miesta... (chybový kód: %M)" - spa "Disco lleno (%s). Esperando para que se libere algo de espacio... (Error: %M)" + spa "Disco lleno (%s). Esperando a que alguien libere algo de espacio... (Error: %M)" swe "Disken är full (%s). Väntar tills det finns ledigt utrymme... (Felkod: %M)" ukr "Диск заповнений (%s). Вичикую, доки звільниться трохи місця... (помилка: %M)" ER_DUP_KEY 23000 @@ -597,7 +597,7 @@ ER_ERROR_ON_READ rus "Ошибка чтения файла '%-.200s' (ошибка: %M)" serbian "Greška pri čitanju file-a '%-.200s' (errno: %M)" slo "Chyba pri čítaní súboru '%-.200s' (chybový kód: %M)" - spa "Error leyendo el fichero '%-.200s' (Error: %M)" + spa "Error leyendo el fichero/archivo '%-.200s' (Error: %M)" swe "Fick fel vid läsning av '%-.200s' (Felkod %M)" ukr "Не можу прочитати файл '%-.200s' (помилка: %M)" ER_ERROR_ON_RENAME @@ -649,7 +649,7 @@ ER_ERROR_ON_WRITE rus "Ошибка записи в файл '%-.200s' (ошибка: %M)" serbian "Greška pri upisu '%-.200s' (errno: %M)" slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %M)" - spa "Error escribiendo el archivo '%-.200s' (Error: %M)" + spa "Error escribiendo el fichero/archivo '%-.200s' (Error: %M)" swe "Fick fel vid skrivning till '%-.200s' (Felkod %M)" ukr "Не можу записати файл '%-.200s' (помилка: %M)" ER_FILE_USED @@ -701,7 +701,7 @@ ER_FILSORT_ABORT rus "Сортировка прервана" serbian "Sortiranje je prekinuto" slo "Triedenie prerušené" - spa "Ordeancion cancelada" + spa "Ordenación cancelada" swe "Sorteringen avbruten" ukr "Сортування перервано" ER_FORM_NOT_FOUND @@ -745,7 +745,7 @@ ER_GET_ERRNO por "Obteve erro %M no manipulador de tabelas %s" rum "Eroarea %M obtinuta din handlerul tabelei %s" rus "Получена ошибка %M от обработчика таблиц %s" - spa "Error %M desde el manejador de la tabla %s" + spa "Obtenido error %M desde el motor de almacenaje %s" swe "Fick felkod %M från databashanteraren %s" ukr "Отримано помилку %M від дескриптора таблиці %s" ER_ILLEGAL_HA @@ -754,6 +754,7 @@ ER_ILLEGAL_HA ger "Diese Option gibt es nicht in Speicher-Engine %s für %`s.%`s" hindi "स्टोरेज इंजन %s में यह विकल्प उपलब्ध नहीं है (टेबल: %`s.%`s)" rus "Обработчик %s таблицы %`s.%`s не поддерживает эту возможность" + spa "El motor de almacenaje %s de la tabla %`s.%`s no contiene esta opción" ukr "Дескриптор %s таблиці %`s.%`s не має цієї властивості" ER_KEY_NOT_FOUND chi "无法在'%-.192s'中找到记录" @@ -804,7 +805,7 @@ ER_NOT_FORM_FILE rus "Некорректная информация в файле '%-.200s'" serbian "Pogrešna informacija u file-u: '%-.200s'" slo "Nesprávna informácia v súbore: '%-.200s'" - spa "Informacion erronea en el archivo: '%-.200s'" + spa "Información incorrecta en el fichero/archivo: '%-.200s'" swe "Felaktig fil: '%-.200s'" ukr "Хибна інформація у файлі: '%-.200s'" ER_NOT_KEYFILE @@ -830,7 +831,7 @@ ER_NOT_KEYFILE rus "Некорректный индексный файл для таблицы: '%-.200s'. Попробуйте восстановить его" serbian "Pogrešan key file za tabelu: '%-.200s'; probajte da ga ispravite" slo "Nesprávny kľúč pre tabuľku '%-.200s'; pokúste sa ho opraviť" - spa "Clave de archivo erronea para la tabla: '%-.200s'; intente repararlo" + spa "El índice para la tabla: '%-.200s' está corrupto; intente repararlo" swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation" ukr "Хибний файл ключей для таблиці: '%-.200s'; Спробуйте його відновити" ER_OLD_KEYFILE @@ -856,7 +857,7 @@ ER_OLD_KEYFILE rus "Старый индексный файл для таблицы '%-.192s'; отремонтируйте его!" serbian "Zastareo key file za tabelu '%-.192s'; ispravite ga" slo "Starý kľúčový súbor pre '%-.192s'; opravte ho!" - spa "Clave de archivo antigua para la tabla '%-.192s'; reparelo!" + spa "Clave antigua de fichero/archivo para la tabla '%-.192s'; ¡repárela!" swe "Gammal nyckelfil '%-.192s'; reparera registret" ukr "Старий файл ключей для таблиці '%-.192s'; Відновіть його!" ER_OPEN_AS_READONLY @@ -882,7 +883,7 @@ ER_OPEN_AS_READONLY rus "Таблица '%-.192s' предназначена только для чтения" serbian "Tabelu '%-.192s' je dozvoljeno samo čitati" slo "'%-.192s' is čítať only" - spa "'%-.192s' es de solo lectura" + spa "La tabla '%-.192s' es de sólo lectura" swe "'%-.192s' är skyddad mot förändring" ukr "Таблиця '%-.192s' тільки для читання" ER_OUTOFMEMORY HY001 S1001 @@ -907,7 +908,7 @@ ER_OUTOFMEMORY HY001 S1001 rus "Недостаточно памяти. Перезапустите сервер и попробуйте еще раз (нужно %d байт)" serbian "Nema memorije. Restartujte MariaDB server i probajte ponovo (potrebno je %d byte-ova)" slo "Málo pamäti. Reštartujte daemona a skúste znova (je potrebných %d bytov)" - spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)" + spa "Memoria insuficiente. Reinicie el servidor e inténtelo otra vez (necesita %d bytes)" swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)" ukr "Брак пам'яті. Рестартуйте сервер та спробуйте знову (потрібно %d байтів)" ER_OUT_OF_SORTMEMORY HY001 S1001 @@ -932,7 +933,7 @@ ER_OUT_OF_SORTMEMORY HY001 S1001 rus "Недостаточно памяти для сортировки. Увеличьте размер буфера сортировки на сервере" serbian "Nema memorije za sortiranje. Povećajte veličinu sort buffer-a MariaDB server-u" slo "Málo pamäti pre triedenie, zvýšte veľkosť triediaceho bufferu" - spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion" + spa "Memoria de ordenación insuficiente. Considere el incrementar el tamaño del búfer de ordenación del servidor" swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna" ukr "Брак пам'яті для сортування. Треба збільшити розмір буфера сортування у сервера" ER_UNEXPECTED_EOF @@ -957,7 +958,7 @@ ER_UNEXPECTED_EOF rus "Неожиданный конец файла '%-.192s' (ошибка: %M)" serbian "Neočekivani kraj pri čitanju file-a '%-.192s' (errno: %M)" slo "Neočakávaný koniec súboru pri čítaní '%-.192s' (chybový kód: %M)" - spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.192s' (Error: %M)" + spa "Inesperado fin de fichero/archivo mientras leíamos el fichero/archivo '%-.192s' (Error: %M)" swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %M)" ukr "Хибний кінець файлу '%-.192s' (помилка: %M)" ER_CON_COUNT_ERROR 08004 @@ -1008,7 +1009,7 @@ ER_OUT_OF_RESOURCES rus "Недостаточно памяти." serbian "Nema memorije." slo "Málo miesta-pamäti pre vlákno" - spa "Memoria/espacio de tranpaso insuficiente" + spa "Memoria agotada" swe "Fick slut på minnet." ukr "Брак пам'яті." ER_BAD_HOST_ERROR 08S01 @@ -1034,7 +1035,7 @@ ER_BAD_HOST_ERROR 08S01 rus "Невозможно получить имя хоста для вашего адреса" serbian "Ne mogu da dobijem ime host-a za vašu IP adresu" slo "Nemôžem zistiť meno hostiteľa pre vašu adresu" - spa "No puedo obtener el nombre de maquina de tu direccion" + spa "No puedo obtener el nombre de equipo de la dirección de vd." swe "Kan inte hitta 'hostname' för din adress" ukr "Не можу визначити ім'я хосту для вашої адреси" ER_HANDSHAKE_ERROR 08S01 @@ -1059,7 +1060,7 @@ ER_HANDSHAKE_ERROR 08S01 rus "Некорректное приветствие" serbian "Loš početak komunikacije (handshake)" slo "Chyba pri nadväzovaní spojenia" - spa "Protocolo erroneo" + spa "Mal apretón de manos (handshake)" swe "Fel vid initiering av kommunikationen med klienten" ukr "Невірна установка зв'язку" ER_DBACCESS_DENIED_ERROR 42000 @@ -1084,7 +1085,7 @@ ER_DBACCESS_DENIED_ERROR 42000 rus "Для пользователя '%s'@'%s' доступ к базе данных '%-.192s' закрыт" serbian "Pristup je zabranjen korisniku '%s'@'%s' za bazu '%-.192s'" slo "Zakázaný prístup pre užívateľa: '%s'@'%s' k databázi '%-.192s'" - spa "Acceso negado para usuario: '%s'@'%s' para la base de datos '%-.192s'" + spa "Acceso denegado para usuario: '%s'@'%s' a la base de datos '%-.192s'" swe "Användare '%s'@'%s' är ej berättigad att använda databasen %-.192s" ukr "Доступ заборонено для користувача: '%s'@'%s' до бази данних '%-.192s'" ER_ACCESS_DENIED_ERROR 28000 @@ -1109,7 +1110,7 @@ ER_ACCESS_DENIED_ERROR 28000 rus "Доступ закрыт для пользователя '%s'@'%s' (был использован пароль: %s)" serbian "Pristup je zabranjen korisniku '%s'@'%s' (koristi lozinku: '%s')" slo "Zakázaný prístup pre užívateľa: '%s'@'%s' (použitie hesla: %s)" - spa "Acceso negado para usuario: '%s'@'%s' (Usando clave: %s)" + spa "Acceso denegado para usuario: '%s'@'%s' (Usando contraseña: %s)" swe "Användare '%s'@'%s' är ej berättigad att logga in (Använder lösen: %s)" ukr "Доступ заборонено для користувача: '%s'@'%s' (Використано пароль: %s)" ER_NO_DB_ERROR 3D000 @@ -1213,7 +1214,7 @@ ER_BAD_DB_ERROR 42000 rus "Неизвестная база данных '%-.192s'" serbian "Nepoznata baza '%-.192s'" slo "Neznáma databáza '%-.192s'" - spa "Base de datos desconocida '%-.192s'" + spa "Base de datos '%-.192s' desconocida" swe "Okänd databas: '%-.192s'" ukr "Невідома база данних '%-.192s'" ER_TABLE_EXISTS_ERROR 42S01 @@ -1239,7 +1240,7 @@ ER_TABLE_EXISTS_ERROR 42S01 rus "Таблица '%-.192s' уже существует" serbian "Tabela '%-.192s' već postoji" slo "Tabuľka '%-.192s' už existuje" - spa "La tabla '%-.192s' ya existe" + spa "La tabla '%-.192s' ya existe" swe "Tabellen '%-.192s' finns redan" ukr "Таблиця '%-.192s' вже існує" ER_BAD_TABLE_ERROR 42S02 @@ -1265,7 +1266,7 @@ ER_BAD_TABLE_ERROR 42S02 rus "Неизвестная таблица '%-.100T'" serbian "Nepoznata tabela '%-.100T'" slo "Neznáma tabuľka '%-.100T'" - spa "Tabla '%-.100T' desconocida" + spa "Tabla '%-.100T' no reconocida" swe "Okänd tabell '%-.100T'" ukr "Невідома таблиця '%-.100T'" ER_NON_UNIQ_ERROR 23000 @@ -1317,7 +1318,7 @@ ER_SERVER_SHUTDOWN 08S01 rus "Сервер находится в процессе остановки" serbian "Gašenje servera je u toku" slo "Prebieha ukončovanie práce servera" - spa "Desconexion de servidor en proceso" + spa "Desconexión de servidor en proceso" swe "Servern går nu ned" ukr "Завершується работа сервера" ER_BAD_FIELD_ERROR 42S22 S0022 @@ -1343,7 +1344,7 @@ ER_BAD_FIELD_ERROR 42S22 S0022 rus "Неизвестный столбец '%-.192s' в '%-.192s'" serbian "Nepoznata kolona '%-.192s' u '%-.192s'" slo "Neznáme pole '%-.192s' v '%-.192s'" - spa "La columna '%-.192s' en %-.192s es desconocida" + spa "No se reconoce la columna '%-.192s' en %-.192s" swe "Okänd kolumn '%-.192s' i %-.192s" ukr "Невідомий стовбець '%-.192s' у '%-.192s'" ER_WRONG_FIELD_WITH_GROUP 42000 S1009 @@ -1369,7 +1370,7 @@ ER_WRONG_FIELD_WITH_GROUP 42000 S1009 rus "'%-.192s' не присутствует в GROUP BY" serbian "Entitet '%-.192s' nije naveden u komandi 'GROUP BY'" slo "Použité '%-.192s' nebolo v 'group by'" - spa "Usado '%-.192s' el cual no esta group by" + spa "'%-.192s' no se encuentra en GROUP BY" swe "'%-.192s' finns inte i GROUP BY" ukr "'%-.192s' не є у GROUP BY" ER_WRONG_GROUP_FIELD 42000 S1009 @@ -1419,7 +1420,7 @@ ER_WRONG_SUM_SELECT 42000 S1009 rus "Выражение содержит групповые функции и столбцы, но не включает GROUP BY. А как вы умудрились получить это сообщение об ошибке?" serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme" slo "Príkaz obsahuje zároveň funkciu 'sum' a poľa" - spa "El estamento tiene funciones de suma y columnas en el mismo estamento" + spa "La sentencia tiene funciones de suma y columnas en la misma sentencia" swe "Kommandot har både sum functions och enkla funktioner" ukr "У виразі використано підсумовуючі функції поряд з іменами стовбців" ER_WRONG_VALUE_COUNT 21S01 @@ -1444,7 +1445,7 @@ ER_WRONG_VALUE_COUNT 21S01 rus "Количество столбцов не совпадает с количеством значений" serbian "Broj kolona ne odgovara broju vrednosti" slo "Počet polí nezodpovedá zadanej hodnote" - spa "La columna con count no tiene valores para contar" + spa "El contador de columnas no coincide con el contador de valores" swe "Antalet kolumner motsvarar inte antalet värden" ukr "Кількість стовбців не співпадає з кількістю значень" ER_TOO_LONG_IDENT 42000 S1009 @@ -1496,7 +1497,7 @@ ER_DUP_FIELDNAME 42S21 S1009 rus "Дублирующееся имя столбца '%-.192s'" serbian "Duplirano ime kolone '%-.192s'" slo "Opakované meno poľa '%-.192s'" - spa "Nombre de columna duplicado '%-.192s'" + spa "Nombre duplicado de columna '%-.192s'" swe "Kolumnnamn '%-.192s finns flera gånger" ukr "Дублююче ім'я стовбця '%-.192s'" ER_DUP_KEYNAME 42000 S1009 @@ -1522,13 +1523,13 @@ ER_DUP_KEYNAME 42000 S1009 rus "Дублирующееся имя ключа '%-.192s'" serbian "Duplirano ime ključa '%-.192s'" slo "Opakované meno kľúča '%-.192s'" - spa "Nombre de clave duplicado '%-.192s'" + spa "Nombre duplicado de clave '%-.192s'" swe "Nyckelnamn '%-.192s' finns flera gånger" ukr "Дублююче ім'я ключа '%-.192s'" # When using this error code, please use ER(ER_DUP_ENTRY_WITH_KEY_NAME) # for the message string. See, for example, code in handler.cc. ER_DUP_ENTRY 23000 S1009 - chi "重复条目'%-.192T'在索引%d" + chi "重复条目'%-.192T'在索引%d" cze "Zdvojený klíč '%-.192T' (číslo klíče %d)" dan "Ens værdier '%-.192T' for indeks %d" eng "Duplicate entry '%-.192T' for key %d" @@ -1576,7 +1577,7 @@ ER_WRONG_FIELD_SPEC 42000 S1009 rus "Некорректный определитель столбца для столбца '%-.192s'" serbian "Pogrešan naziv kolone za kolonu '%-.192s'" slo "Chyba v špecifikácii poľa '%-.192s'" - spa "Especificador de columna erroneo para la columna '%-.192s'" + spa "Especificador de columna incorrecto para la columna '%-.192s'" swe "Felaktigt kolumntyp för kolumn '%-.192s'" ukr "Невірний специфікатор стовбця '%-.192s'" ER_PARSE_ERROR 42000 s1009 @@ -1602,7 +1603,7 @@ ER_PARSE_ERROR 42000 s1009 rus "%s около '%-.80T' на строке %d" serbian "'%s' u iskazu '%-.80T' na liniji %d" slo "%s blízko '%-.80T' na riadku %d" - spa "%s cerca '%-.80T' en la linea %d" + spa "%s cerca de '%-.80T' en la línea %d" swe "%s nära '%-.80T' på rad %d" ukr "%s біля '%-.80T' в строці %d" ER_EMPTY_QUERY 42000 @@ -1628,7 +1629,7 @@ ER_EMPTY_QUERY 42000 rus "Запрос оказался пустым" serbian "Upit je bio prazan" slo "Výsledok požiadavky bol prázdny" - spa "La query estaba vacia" + spa "La consulta (query) estaba vacia" swe "Frågan var tom" ukr "Пустий запит" ER_NONUNIQ_TABLE 42000 S1009 @@ -1654,7 +1655,7 @@ ER_NONUNIQ_TABLE 42000 S1009 rus "Повторяющаяся таблица/псевдоним '%-.192s'" serbian "Tabela ili alias nisu bili jedinstveni: '%-.192s'" slo "Nie jednoznačná tabuľka/alias: '%-.192s'" - spa "Tabla/alias: '%-.192s' es no unica" + spa "La tabla/alias: '%-.192s' no es única" swe "Icke unikt tabell/alias: '%-.192s'" ukr "Неунікальна таблиця/псевдонім: '%-.192s'" ER_INVALID_DEFAULT 42000 S1009 @@ -1680,7 +1681,7 @@ ER_INVALID_DEFAULT 42000 S1009 rus "Некорректное значение по умолчанию для '%-.192s'" serbian "Loša default vrednost za '%-.192s'" slo "Chybná implicitná hodnota pre '%-.192s'" - spa "Valor por defecto invalido para '%-.192s'" + spa "Valor por defecto inválido para '%-.192s'" swe "Ogiltigt DEFAULT värde för '%-.192s'" ukr "Невірне значення по замовчуванню для '%-.192s'" ER_MULTIPLE_PRI_KEY 42000 S1009 @@ -1706,7 +1707,7 @@ ER_MULTIPLE_PRI_KEY 42000 S1009 rus "Указано несколько первичных ключей" serbian "Definisani višestruki primarni ključevi" slo "Zadefinovaných viac primárnych kľúčov" - spa "Multiples claves primarias definidas" + spa "Múltiples claves primarias definidas" swe "Flera PRIMARY KEY använda" ukr "Первинного ключа визначено неодноразово" ER_TOO_MANY_KEYS 42000 S1009 @@ -1732,7 +1733,7 @@ ER_TOO_MANY_KEYS 42000 S1009 rus "Указано слишком много ключей. Разрешается указывать не более %d ключей" serbian "Navedeno je previše ključeva. Maksimum %d ključeva je dozvoljeno" slo "Zadaných ríliš veľa kľúčov. Najviac %d kľúčov je povolených" - spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas" + spa "Demasiadas claves primarias declaradas. Se permite un máximo de %d claves" swe "För många nycklar använda. Man får ha högst %d nycklar" ukr "Забагато ключів зазначено. Дозволено не більше %d ключів" ER_TOO_MANY_KEY_PARTS 42000 S1009 @@ -1758,7 +1759,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009 rus "Указано слишком много частей составного ключа. Разрешается указывать не более %d частей" serbian "Navedeno je previše delova ključa. Maksimum %d delova je dozvoljeno" slo "Zadaných ríliš veľa častí kľúčov. Je povolených najviac %d častí" - spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas" + spa "Demasiadas partes de clave declaradas. Se permite un máximo de %d partes" swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar" ukr "Забагато частин ключа зазначено. Дозволено не більше %d частин" ER_TOO_LONG_KEY 42000 S1009 @@ -1784,7 +1785,7 @@ ER_TOO_LONG_KEY 42000 S1009 rus "Указан слишком длинный ключ. Максимальная длина ключа составляет %d байт" serbian "Navedeni ključ je predug. Maksimalna dužina ključa je %d" slo "Zadaný kľúč je príliš dlhý, najväčšia dĺžka kľúča je %d" - spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d" + spa "Declaración de clave demasiado larga. La máxima longitud de clave es de %d" swe "För lång nyckel. Högsta tillåtna nyckellängd är %d" ukr "Зазначений ключ задовгий. Найбільша довжина ключа %d байтів" ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 @@ -1810,7 +1811,7 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 rus "Ключевой столбец '%-.192s' в таблице не существует" serbian "Ključna kolona '%-.192s' ne postoji u tabeli" slo "Kľúčový stĺpec '%-.192s' v tabuľke neexistuje" - spa "La columna clave '%-.192s' no existe en la tabla" + spa "La columna de clave '%-.192s' no existe en la tabla" swe "Nyckelkolumn '%-.192s' finns inte" ukr "Ключовий стовбець '%-.192s' не існує у таблиці" ER_BLOB_USED_AS_KEY 42000 S1009 @@ -1819,6 +1820,7 @@ ER_BLOB_USED_AS_KEY 42000 S1009 ger "BLOB-Feld %`s kann beim %s Tabellen nicht als Schlüssel verwendet werden" hindi "BLOB कॉलम %`s टेबल %s में KEY विनिर्देश में इस्तेमाल नहीं किया जा सकता" rus "Столбец типа BLOB %`s не может быть использован как значение ключа в %s таблице" + spa "La columna BLOB %`s no se puede usar en la especificación de clave en la tabla %s" ukr "BLOB стовбець %`s не може бути використаний у визначенні ключа в %s таблиці" ER_TOO_BIG_FIELDLENGTH 42000 S1009 chi "数据太长超过列容量 '%-.192s' (最长 = %lu); 用 BLOB 或 TEXT 替代" @@ -1843,7 +1845,7 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009 rus "Слишком большая длина столбца '%-.192s' (максимум = %lu). Используйте тип BLOB или TEXT вместо текущего" serbian "Previše podataka za kolonu '%-.192s' (maksimum je %lu). Upotrebite BLOB polje" slo "Príliš veľká dĺžka pre pole '%-.192s' (maximum = %lu). Použite BLOB" - spa "Longitud de columna demasiado grande para la columna '%-.192s' (maximo = %lu).Usar BLOB en su lugar" + spa "Longitud de columna demasiado grande para la columna '%-.192s' (máximo = %lu). Use BLOB o TEXT en su lugar" swe "För stor kolumnlängd angiven för '%-.192s' (max= %lu). Använd en BLOB instället" ukr "Задовга довжина стовбця '%-.192s' (max = %lu). Використайте тип BLOB" ER_WRONG_AUTO_KEY 42000 S1009 @@ -1869,12 +1871,13 @@ ER_WRONG_AUTO_KEY 42000 S1009 rus "Некорректное определение таблицы: может существовать только один автоинкрементный столбец, и он должен быть определен как ключ" serbian "Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona ključa" slo "Môžete mať iba jedno AUTO pole a to musí byť definované ako kľúč" - spa "Puede ser solamente un campo automatico y este debe ser definido como una clave" + spa "Definición incorrecta de tabla; sólo puede haber una columna automática y ésta debe de ser definida como una clave" swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel" ukr "Хибне визначення таблиці; Може бути лише один автоматичний стовбець, що повинен бути визначений як ключ" ER_BINLOG_CANT_DELETE_GTID_DOMAIN chi "无法删除gtid域. 原因: %s." eng "Could not delete gtid domain. Reason: %s." + spa "No pude borrar el dominio gtid. Motivo: %s." ukr "Не можу видалити домен gtid. Причина: %s." ER_NORMAL_SHUTDOWN chi "%s(%s):正常关闭" @@ -1899,7 +1902,7 @@ ER_NORMAL_SHUTDOWN rus "%s (инициирована пользователем: %s): Корректная остановка" serbian "%s (%s): Normalno gašenje" slo "%s (%s): normálne ukončenie" - spa "%s (%s): Apagado normal" + spa "%s (iniciado por: %s): Apagado normal" swe "%s (%s): Normal avslutning" ukr "%s (%s): Нормальне завершення" ER_GOT_SIGNAL @@ -1925,7 +1928,7 @@ ER_GOT_SIGNAL rus "%s: Получен сигнал %d. Прекращаем!\n" serbian "%s: Dobio signal %d. Prekidam!\n" slo "%s: prijatý signál %d, ukončenie (Abort)!\n" - spa "%s: Recibiendo signal %d. Abortando!\n" + spa "%s: Obtenida señal %d. ¡Abortando!\n" swe "%s: Fick signal %d. Avslutar!\n" ukr "%s: Отримано сигнал %d. Перериваюсь!\n" ER_SHUTDOWN_COMPLETE @@ -1977,7 +1980,7 @@ ER_FORCING_CLOSE 08S01 rus "%s: Принудительно закрываем поток %ld пользователя: '%-.48s'\n" serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.48s'\n" slo "%s: násilné ukončenie vlákna %ld užívateľa '%-.48s'\n" - spa "%s: Forzando a cerrar el thread %ld usuario: '%-.48s'\n" + spa "%s: Forzando a cerrar el hilo (thread) %ld usuario: '%-.48s'\n" swe "%s: Stänger av tråd %ld; användare: '%-.48s'\n" ukr "%s: Прискорюю закриття гілки %ld користувача: '%-.48s'\n" ER_IPSOCK_ERROR 08S01 @@ -2029,7 +2032,7 @@ ER_NO_SUCH_INDEX 42S12 S1009 rus "В таблице '%-.192s' нет такого индекса, как в CREATE INDEX. Создайте таблицу заново" serbian "Tabela '%-.192s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" slo "Tabuľka '%-.192s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova" - spa "La tabla '%-.192s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" + spa "La tabla '%-.192s' no tiene un índice como el usado en CREATE INDEX. Crea de nuevo la tabla" swe "Tabellen '%-.192s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" ukr "Таблиця '%-.192s' має індекс, що не співпадає з вказанним у CREATE INDEX. Створіть таблицю знову" ER_WRONG_FIELD_TERMINATORS 42000 S1009 @@ -2055,7 +2058,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009 rus "Аргумент разделителя полей - не тот, который ожидался. Обращайтесь к документации" serbian "Argument separatora polja nije ono što se očekivalo. Proverite uputstvo MariaDB server-a" slo "Argument oddeľovač polí nezodpovedá požiadavkám. Skontrolujte v manuáli" - spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual" + spa "Los separadores de argumentos del campo no son los especificados. Compruebe el manual" swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen" ukr "Хибний розділювач полів. Почитайте документацію" ER_BLOBS_AND_NO_TERMINATED 42000 S1009 @@ -2081,7 +2084,7 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009 rus "Фиксированный размер записи с полями типа BLOB использовать нельзя, применяйте 'fields terminated by'" serbian "Ne možete koristiti fiksnu veličinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju" slo "Nie je možné použiť fixnú dĺžku s BLOBom. Použite 'fields terminated by'" - spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '" + spa "No se pueden usar longitudes fijas de filas con BLOBs. Por favor, use 'campos terminados por'" swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'" ukr "Не можна використовувати сталу довжину строки з BLOB. Зкористайтеся 'fields terminated by'" ER_TEXTFILE_NOT_READABLE @@ -2107,7 +2110,7 @@ ER_TEXTFILE_NOT_READABLE rus "Файл '%-.128s' должен находиться в том же каталоге, что и база данных, или быть общедоступным для чтения" serbian "File '%-.128s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuća prava pristupa" slo "Súbor '%-.128s' musí byť v adresári databázy, alebo čitateľný pre všetkých" - spa "El archivo '%-.128s' debe estar en el directorio de la base de datos o ser de lectura por todos" + spa "El fichero/archivo '%-.128s' debe de estar en el directorio de la base de datos o debe de ser legible por todos" swe "Textfilen '%-.128s' måste finnas i databasbiblioteket eller vara läsbar för alla" ukr "Файл '%-.128s' повинен бути у теці бази данних або мати встановлене право на читання для усіх" ER_FILE_EXISTS_ERROR @@ -2133,7 +2136,7 @@ ER_FILE_EXISTS_ERROR rus "Файл '%-.200s' уже существует" serbian "File '%-.200s' već postoji" slo "Súbor '%-.200s' už existuje" - spa "El archivo '%-.200s' ya existe" + spa "El fichero/archivo '%-.200s' ya existe" swe "Filen '%-.200s' existerar redan" ukr "Файл '%-.200s' вже існує" ER_LOAD_INFO @@ -2159,7 +2162,7 @@ ER_LOAD_INFO rus "Записей: %ld Удалено: %ld Пропущено: %ld Предупреждений: %ld" serbian "Slogova: %ld Izbrisano: %ld Preskočeno: %ld Upozorenja: %ld" slo "Záznamov: %ld Zmazaných: %ld Preskočených: %ld Varovania: %ld" - spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld" + spa "Registros: %ld Borrados: %ld Saltados: %ld Avisos: %ld" swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" ukr "Записів: %ld Видалено: %ld Пропущено: %ld Застережень: %ld" ER_ALTER_INFO @@ -2210,7 +2213,7 @@ ER_WRONG_SUB_KEY rus "Некорректная часть ключа. Используемая часть ключа не является строкой, указанная длина больше, чем длина части ключа, или обработчик таблицы не поддерживает уникальные части ключа" serbian "Pogrešan pod-ključ dela ključa. Upotrebljeni deo ključa nije string, upotrebljena dužina je veća od dela ključa ili handler tabela ne podržava jedinstvene pod-ključeve" slo "Incorrect prefix key; the used key part isn't a string or the used length is longer than the key part" - spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave" + spa "Prefijo incorrecto de clave; la parte utilizada de la clave no es una cadena, la longitud usada es mayor que la parte de la clave o el motor de almacenaje no soporta claves con prefijo único" swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden" ukr "Невірна частина ключа. Використана частина ключа не є строкою, задовга або вказівник таблиці не підтримує унікальних частин ключей" ER_CANT_REMOVE_ALL_FIELDS 42000 @@ -2236,7 +2239,7 @@ ER_CANT_REMOVE_ALL_FIELDS 42000 rus "Нельзя удалить все столбцы с помощью ALTER TABLE. Используйте DROP TABLE" serbian "Ne možete da izbrišete sve kolone pomoću komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite" slo "One nemôžem zmazať all fields with ALTER TABLE; use DROP TABLE instead" - spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo" + spa "No se pueden borrar todas las columnas con ALTER TABLE. Use DROP TABLE para hacerlo" swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället" ukr "Не можливо видалити всі стовбці за допомогою ALTER TABLE. Для цього скористайтеся DROP TABLE" ER_CANT_DROP_FIELD_OR_KEY 42000 @@ -2260,7 +2263,7 @@ ER_CANT_DROP_FIELD_OR_KEY 42000 rus "Невозможно удалить (DROP %s) %`-.192s. Убедитесь что он действительно существует" serbian "Ne mogu da izvršim komandu drop 'DROP %s' na %`-.192s. Proverite da li ta kolona (odnosno ključ) postoji" slo "Nemôžem zrušiť (DROP %s) %`-.192s. Skontrolujte, či neexistujú záznamy/kľúče" - spa "No puedo eliminar (DROP %s) %`-.192s. compuebe que el campo/clave existe" + spa "No puedo eliminar %s %`-.192s; compruebe que ya existe" swe "Kan inte ta bort (DROP %s) %`-.192s. Kontrollera att begränsningen/fältet/nyckel finns" ukr "Не можу DROP %s %`-.192s. Перевірте, чи він існує" ER_INSERT_INFO @@ -2286,7 +2289,7 @@ ER_INSERT_INFO rus "Записей: %ld Дубликатов: %ld Предупреждений: %ld" serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld" slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld" - spa "Registros: %ld Duplicados: %ld Peligros: %ld" + spa "Registros: %ld Duplicados: %ld Avisos: %ld" swe "Rader: %ld Dubletter: %ld Varningar: %ld" ukr "Записів: %ld Дублікатів: %ld Застережень: %ld" ER_UPDATE_TABLE_USED @@ -2317,7 +2320,7 @@ ER_NO_SUCH_THREAD rus "Неизвестный номер потока: %lu" serbian "Nepoznat thread identifikator: %lu" slo "Neznáma identifikácia vlákna: %lu" - spa "Identificador del thread: %lu desconocido" + spa "Identificador del hilo (thread): %lu desconocido" swe "Finns ingen tråd med id %lu" ukr "Невідомий ідентифікатор гілки: %lu" ER_KILL_DENIED_ERROR @@ -2343,7 +2346,7 @@ ER_KILL_DENIED_ERROR rus "Вы не являетесь владельцем потока %lld" serbian "Vi niste vlasnik thread-a %lld" slo "Nie ste vlastníkom vlákna %lld" - spa "Tu no eres el propietario del thread%lld" + spa "No eres el propietario del hilo (thread) %lld" swe "Du är inte ägare till tråd %lld" ukr "Ви не володар гілки %lld" ER_NO_TABLES_USED @@ -2369,7 +2372,7 @@ ER_NO_TABLES_USED rus "Никакие таблицы не использованы" serbian "Nema upotrebljenih tabela" slo "Nie je použitá žiadna tabuľka" - spa "No ha tablas usadas" + spa "No se están usando tablas" swe "Inga tabeller angivna" ukr "Не використано таблиць" ER_TOO_BIG_SET @@ -2394,7 +2397,7 @@ ER_TOO_BIG_SET rus "Слишком много значений для столбца %-.192s в SET" serbian "Previše string-ova za kolonu '%-.192s' i komandu 'SET'" slo "Príliš mnoho reťazcov pre pole %-.192s a SET" - spa "Muchas strings para columna %-.192s y SET" + spa "Demasiadas cadenas para la columna %-.192s y SET" swe "För många alternativ till kolumn %-.192s för SET" ukr "Забагато строк для стовбця %-.192s та SET" ER_NO_UNIQUE_LOGFILE @@ -2420,7 +2423,7 @@ ER_NO_UNIQUE_LOGFILE rus "Невозможно создать уникальное имя файла журнала %-.200s.(1-999)\n" serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.200s.(1-999)'\n" slo "Nemôžem vytvoriť unikátne meno log-súboru %-.200s.(1-999)\n" - spa "No puede crear un unico archivo log %-.200s.(1-999)\n" + spa "No puedo generar un único fichero/archivo de historial (log) llamado %-.200s.(1-999)\n" swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n" ukr "Не можу згенерувати унікальне ім'я log-файлу %-.200s.(1-999)\n" ER_TABLE_NOT_LOCKED_FOR_WRITE @@ -2446,7 +2449,7 @@ ER_TABLE_NOT_LOCKED_FOR_WRITE rus "Таблица '%-.192s' заблокирована уровнем READ lock и не может быть изменена" serbian "Tabela '%-.192s' je zaključana READ lock-om; iz nje se može samo čitati ali u nju se ne može pisati" slo "Tabuľka '%-.192s' bola zamknutá s READ a nemôže byť zmenená" - spa "Tabla '%-.192s' fue trabada con un READ lock y no puede ser actualizada" + spa "La tabla '%-.192s' ha sido bloqueada con un READ lock y no puede ser actualizada" swe "Tabell '%-.192s' kan inte uppdateras emedan den är låst för läsning" ukr "Таблицю '%-.192s' заблоковано тільки для читання, тому її не можна оновити" ER_TABLE_NOT_LOCKED @@ -2472,12 +2475,13 @@ ER_TABLE_NOT_LOCKED rus "Таблица '%-.192s' не была заблокирована с помощью LOCK TABLES" serbian "Tabela '%-.192s' nije bila zaključana komandom 'LOCK TABLES'" slo "Tabuľka '%-.192s' nebola zamknutá s LOCK TABLES" - spa "Tabla '%-.192s' no fue trabada con LOCK TABLES" + spa "La tabla '%-.192s' no fue bloqueada con LOCK TABLES" swe "Tabell '%-.192s' är inte låst med LOCK TABLES" ukr "Таблицю '%-.192s' не було блоковано з LOCK TABLES" ER_UNUSED_17 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_WRONG_DB_NAME 42000 chi "数据库名称不正确'%-.100T'" cze "Nepřípustné jméno databáze '%-.100T'" @@ -2501,7 +2505,7 @@ ER_WRONG_DB_NAME 42000 rus "Некорректное имя базы данных '%-.100T'" serbian "Pogrešno ime baze '%-.100T'" slo "Neprípustné meno databázy '%-.100T'" - spa "Nombre de base de datos ilegal '%-.100T'" + spa "Nombre incorrecto de base de datos '%-.100T'" swe "Felaktigt databasnamn '%-.100T'" ukr "Невірне ім'я бази данних '%-.100T'" ER_WRONG_TABLE_NAME 42000 @@ -2527,7 +2531,7 @@ ER_WRONG_TABLE_NAME 42000 rus "Некорректное имя таблицы '%-.100s'" serbian "Pogrešno ime tabele '%-.100s'" slo "Neprípustné meno tabuľky '%-.100s'" - spa "Nombre de tabla ilegal '%-.100s'" + spa "Nombre incorrecto de tabla '%-.100s'" swe "Felaktigt tabellnamn '%-.100s'" ukr "Невірне ім'я таблиці '%-.100s'" ER_TOO_BIG_SELECT 42000 @@ -2553,7 +2557,7 @@ ER_TOO_BIG_SELECT 42000 rus "Для такой выборки SELECT должен будет просмотреть слишком много записей и, видимо, это займет очень много времени. Проверьте ваше указание WHERE, и, если в нем все в порядке, укажите SET SQL_BIG_SELECTS=1" serbian "Komanda 'SELECT' će ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu" slo "Zadaná požiadavka SELECT by prechádzala príliš mnoho záznamov a trvala by príliš dlho. Skontrolujte tvar WHERE a ak je v poriadku, použite SET SQL_BIG_SELECTS=1" - spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto" + spa "El SELECT debería de examinar más de MAX_JOIN_SIZE filas; revise su WHERE y utilice SET SQL_BIG_SELECTS=1 o SET MAX_JOIN_SIZE=# si el SELECT está ok" swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" ukr "Запиту SELECT потрібно обробити багато записів, що, певне, займе дуже багато часу. Перевірте ваше WHERE та використовуйте SET SQL_BIG_SELECTS=1, якщо цей запит SELECT є вірним" ER_UNKNOWN_ERROR @@ -2630,7 +2634,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 rus "Некорректное количество параметров для процедуры '%-.192s'" serbian "Pogrešan broj parametara za proceduru '%-.192s'" slo "Chybný počet parametrov procedúry '%-.192s'" - spa "Equivocado parametro count para procedimiento %-.192s" + spa "Contador de parámetros incorrecto para procedimiento %-.192s" swe "Felaktigt antal parametrar till procedur %-.192s" ukr "Хибна кількість параметрів процедури '%-.192s'" ER_WRONG_PARAMETERS_TO_PROCEDURE @@ -2656,7 +2660,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE rus "Некорректные параметры для процедуры '%-.192s'" serbian "Pogrešni parametri prosleđeni proceduri '%-.192s'" slo "Chybné parametre procedúry '%-.192s'" - spa "Equivocados parametros para procedimiento %-.192s" + spa "Parámetros incorrectos para procedimiento %-.192s" swe "Felaktiga parametrar till procedur %-.192s" ukr "Хибний параметер процедури '%-.192s'" ER_UNKNOWN_TABLE 42S02 @@ -2682,7 +2686,7 @@ ER_UNKNOWN_TABLE 42S02 rus "Неизвестная таблица '%-.192s' в %-.32s" serbian "Nepoznata tabela '%-.192s' u '%-.32s'" slo "Neznáma tabuľka '%-.192s' v %-.32s" - spa "Tabla desconocida '%-.192s' in %-.32s" + spa "Tabla desconocida '%-.192s' en %-.32s" swe "Okänd tabell '%-.192s' i '%-.32s'" ukr "Невідома таблиця '%-.192s' у %-.32s" ER_FIELD_SPECIFIED_TWICE 42000 @@ -2708,7 +2712,7 @@ ER_FIELD_SPECIFIED_TWICE 42000 rus "Столбец '%-.192s' указан дважды" serbian "Kolona '%-.192s' je navedena dva puta" slo "Pole '%-.192s' je zadané dvakrát" - spa "Campo '%-.192s' especificado dos veces" + spa "Columna '%-.192s' especificada dos veces" swe "Fält '%-.192s' är redan använt" ukr "Стовбець '%-.192s' зазначено двічі" ER_INVALID_GROUP_FUNC_USE @@ -2731,7 +2735,7 @@ ER_INVALID_GROUP_FUNC_USE rus "Неправильное использование групповых функций" serbian "Pogrešna upotreba 'GROUP' funkcije" slo "Nesprávne použitie funkcie GROUP" - spa "Invalido uso de función en grupo" + spa "Inválido uso de función de grupo" swe "Felaktig användning av SQL grupp function" ukr "Хибне використання функції групування" ER_UNSUPPORTED_EXTENSION 42000 @@ -2757,7 +2761,7 @@ ER_UNSUPPORTED_EXTENSION 42000 rus "В таблице '%-.192s' используются возможности, не поддерживаемые в этой версии MariaDB" serbian "Tabela '%-.192s' koristi ekstenziju koje ne postoji u ovoj verziji MariaDB-a" slo "Tabuľka '%-.192s' používa rozšírenie, ktoré v tejto verzii MariaDB nie je" - spa "Tabla '%-.192s' usa una extensión que no existe en esta MariaDB versión" + spa "La tabla '%-.192s' usa una extensión que no existe en esta versión de MariaDB" swe "Tabell '%-.192s' har en extension som inte finns i denna version av MariaDB" ukr "Таблиця '%-.192s' використовує розширення, що не існує у цій версії MariaDB" ER_TABLE_MUST_HAVE_COLUMNS 42000 @@ -2780,7 +2784,7 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000 rus "В таблице должен быть как минимум один столбец" serbian "Tabela mora imati najmanje jednu kolonu" slo "Tabuľka musí mať aspoň 1 pole" - spa "Una tabla debe tener al menos 1 columna" + spa "Una tabla debe de tener al menos 1 columna" swe "Tabeller måste ha minst 1 kolumn" ukr "Таблиця повинна мати хочаб один стовбець" ER_RECORD_FILE_FULL @@ -2826,7 +2830,7 @@ ER_UNKNOWN_CHARACTER_SET 42000 rus "Неизвестная кодировка '%-.64s'" serbian "Nepoznati karakter-set: '%-.64s'" slo "Neznáma znaková sada: '%-.64s'" - spa "Juego de caracteres desconocido: '%-.64s'" + spa "Juego desconocido de caracteres: '%-.64s'" swe "Okänd teckenuppsättning: '%-.64s'" ukr "Невідома кодова таблиця: '%-.64s'" ER_TOO_MANY_TABLES @@ -2849,7 +2853,7 @@ ER_TOO_MANY_TABLES rus "Слишком много таблиц. MariaDB может использовать только %d таблиц в соединении" serbian "Previše tabela. MariaDB može upotrebiti maksimum %d tabela pri 'JOIN' operaciji" slo "Príliš mnoho tabuliek. MariaDB môže použiť len %d v JOIN-e" - spa "Muchas tablas. MariaDB solamente puede usar %d tablas en un join" + spa "Demasiadas tablas. MariaDB solamente puede usar %d tablas en un join" swe "För många tabeller. MariaDB can ha högst %d tabeller i en och samma join" ukr "Забагато таблиць. MariaDB може використовувати лише %d таблиць у об'єднанні" ER_TOO_MANY_FIELDS @@ -2872,7 +2876,7 @@ ER_TOO_MANY_FIELDS rus "Слишком много столбцов" serbian "Previše kolona" slo "Príliš mnoho polí" - spa "Muchos campos" + spa "Demasiadas columnas" swe "För många fält" ukr "Забагато стовбців" ER_TOO_BIG_ROWSIZE 42000 @@ -2894,7 +2898,7 @@ ER_TOO_BIG_ROWSIZE 42000 rus "Слишком большой размер записи. Максимальный размер строки, исключая поля BLOB, - %ld. Возможно, вам следует изменить тип некоторых полей на BLOB" serbian "Prevelik slog. Maksimalna veličina sloga, ne računajući BLOB polja, je %ld. Trebali bi da promenite tip nekih polja u BLOB" slo "Riadok je príliš veľký. Maximálna veľkosť riadku, okrem 'BLOB', je %ld. Musíte zmeniť niektoré položky na BLOB" - spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %ld. Tu tienes que cambiar algunos campos para blob" + spa "Tamaño de fila muy grande. El máximo tamaño de fila para el tipo de tabla usada, sin contar BLOBs, es de %ld. Esto incluye sobrecarga de almacenaje, revise el manual. Tiene que cambiar algunas columnas a TEXT o BLOBs" swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %ld. Ändra några av dina fält till BLOB" ukr "Задовга строка. Найбільшою довжиною строки, не рахуючи BLOB, є %ld. Вам потрібно привести деякі стовбці до типу BLOB" ER_STACK_OVERRUN @@ -2915,7 +2919,7 @@ ER_STACK_OVERRUN rus "Стек потоков переполнен: использовано: %ld из %ld стека. Применяйте 'mariadbd --thread_stack=#' для указания большего размера стека, если необходимо" serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mariadbd --thread_stack=#' da navedete veći stack ako je potrebno" slo "Pretečenie zásobníku vlákna: použité: %ld z %ld. Použite 'mariadbd --thread_stack=#' k zadaniu väčšieho zásobníka" - spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mariadbd --thread_stack=#' para especificar una mayor pila si necesario" + spa "Desbordamiento de la pila de hilos (threads): Usado: %ld de una pila de %ld. Considere el incrementar la variable de sistema thread_stack" swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mariadbd --thread_stack=#' ifall du behöver en större stack" ukr "Стек гілок переповнено: Використано: %ld з %ld. Використовуйте 'mariadbd --thread_stack=#' аби зазначити більший стек, якщо необхідно" ER_WRONG_OUTER_JOIN 42000 @@ -2936,12 +2940,13 @@ ER_WRONG_OUTER_JOIN 42000 rus "В OUTER JOIN обнаружена перекрестная зависимость. Внимательно проанализируйте свои условия ON" serbian "Unakrsna zavisnost pronađena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove" slo "V OUTER JOIN bol nájdený krížový odkaz. Skontrolujte podmienky ON" - spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON" + spa "Dependencia cruzada hallada en OUTER JOIN. Examina tus condiciones ON" swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" ukr "Перехресна залежність у OUTER JOIN. Перевірте умову ON" ER_NULL_COLUMN_IN_INDEX 42000 chi "表处理程序不支持给定索引中的 NULL. 请将列 '%-.192s' 改为 NOT NULL 或使用其他处理程序" eng "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler" + spa "El manejador de tabla no soporta NULL en índice suministrado. Por favor, cambie la columna '%-.192s' para que sea NOT NULL o utilice otro manejador" swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.192s' till NOT NULL eller använd en annan hanterare" ukr "Вказівник таблиці не підтримує NULL у зазначенному індексі. Будь ласка, зменіть стовпчик '%-.192s' на NOT NULL або використайте інший вказівник таблиці." ER_CANT_FIND_UDF @@ -2964,7 +2969,7 @@ ER_CANT_FIND_UDF rus "Невозможно загрузить функцию '%-.192s'" serbian "Ne mogu da učitam funkciju '%-.192s'" slo "Nemôžem načítať funkciu '%-.192s'" - spa "No puedo cargar función '%-.192s'" + spa "No puedo cargar la función '%-.192s'" swe "Kan inte ladda funktionen '%-.192s'" ukr "Не можу завантажити функцію '%-.192s'" ER_CANT_INITIALIZE_UDF @@ -2987,7 +2992,7 @@ ER_CANT_INITIALIZE_UDF rus "Невозможно инициализировать функцию '%-.192s'; %-.80s" serbian "Ne mogu da inicijalizujem funkciju '%-.192s'; %-.80s" slo "Nemôžem inicializovať funkciu '%-.192s'; %-.80s" - spa "No puedo inicializar función '%-.192s'; %-.80s" + spa "No puedo inicializar la función '%-.192s'; %-.80s" swe "Kan inte initialisera funktionen '%-.192s'; '%-.80s'" ukr "Не можу ініціалізувати функцію '%-.192s'; %-.80s" ER_UDF_NO_PATHS @@ -3009,7 +3014,7 @@ ER_UDF_NO_PATHS rus "Недопустимо указывать пути для динамических библиотек" serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke" slo "Neprípustné žiadne cesty k zdieľanej knižnici" - spa "No pasos permitidos para librarias conjugadas" + spa "No existen rutas autorizadas para biblioteca compartida" swe "Man får inte ange sökväg för dynamiska bibliotek" ukr "Не дозволено використовувати путі для розділюваних бібліотек" ER_UDF_EXISTS @@ -3032,7 +3037,7 @@ ER_UDF_EXISTS rus "Функция '%-.192s' уже существует" serbian "Funkcija '%-.192s' već postoji" slo "Funkcia '%-.192s' už existuje" - spa "Función '%-.192s' ya existe" + spa "La función '%-.192s' ya existe" swe "Funktionen '%-.192s' finns redan" ukr "Функція '%-.192s' вже існує" ER_CANT_OPEN_LIBRARY @@ -3057,7 +3062,7 @@ ER_CANT_OPEN_LIBRARY rus "Невозможно открыть динамическую библиотеку '%-.192s' (ошибка: %d, %-.128s)" serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.192s' (errno: %d, %-.128s)" slo "Nemôžem otvoriť zdieľanú knižnicu '%-.192s' (chybový kód: %d, %-.128s)" - spa "No puedo abrir libraria conjugada '%-.192s' (errno: %d, %-.128s)" + spa "No puedo abrir la biblioteca compartida '%-.192s' (error: %d, %-.128s)" swe "Kan inte öppna det dynamiska biblioteket '%-.192s' (Felkod: %d, %-.128s)" ukr "Не можу відкрити розділювану бібліотеку '%-.192s' (помилка: %d, %-.128s)" ER_CANT_FIND_DL_ENTRY @@ -3079,7 +3084,7 @@ ER_CANT_FIND_DL_ENTRY rus "Невозможно отыскать символ '%-.128s' в библиотеке" serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci" slo "Nemôžem nájsť funkciu '%-.128s' v knižnici" - spa "No puedo encontrar función '%-.128s' en libraria" + spa "No puedo encontrar el símbolo '%-.128s' en biblioteca" swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" ukr "Не можу знайти функцію '%-.128s' у бібліотеці" ER_FUNCTION_NOT_DEFINED @@ -3102,7 +3107,7 @@ ER_FUNCTION_NOT_DEFINED rus "Функция '%-.192s' не определена" serbian "Funkcija '%-.192s' nije definisana" slo "Funkcia '%-.192s' nie je definovaná" - spa "Función '%-.192s' no está definida" + spa "La función '%-.192s' no está definida" swe "Funktionen '%-.192s' är inte definierad" ukr "Функцію '%-.192s' не визначено" ER_HOST_IS_BLOCKED @@ -3124,7 +3129,7 @@ ER_HOST_IS_BLOCKED rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mariadb-admin flush-hosts'" rus "Хост '%-.64s' заблокирован из-за слишком большого количества ошибок соединения. Разблокировать его можно с помощью 'mariadb-admin flush-hosts'" serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoću komande 'mariadb-admin flush-hosts'" - spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mariadb-admin flush-hosts'" + spa "El equipo '%-.64s' está bloqueado debido a muchos errores de conexión; desbloquea con 'mariadb-admin flush-hosts'" swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mariadb-admin flush-hosts' för att ta bort alla blockeringarna" ukr "Хост '%-.64s' заблоковано з причини великої кількості помилок з'єднання. Для розблокування використовуйте 'mariadb-admin flush-hosts'" ER_HOST_NOT_PRIVILEGED @@ -3146,7 +3151,7 @@ ER_HOST_NOT_PRIVILEGED rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MariaDB" rus "Хосту '%-.64s' не разрешается подключаться к этому серверу MariaDB" serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MariaDB server" - spa "Servidor '%-.64s' no está permitido para conectar con este servidor MariaDB" + spa "El equipo '%-.64s' no está autorizado a conectar con este servidor MariaDB" swe "Denna dator, '%-.64s', har inte privileger att använda denna MariaDB server" ukr "Хосту '%-.64s' не доволено зв'язуватись з цим сервером MariaDB" ER_PASSWORD_ANONYMOUS_USER 42000 @@ -3168,7 +3173,7 @@ ER_PASSWORD_ANONYMOUS_USER 42000 rum "Dumneavoastra folositi MariaDB ca un utilizator anonim si utilizatorii anonimi nu au voie sa schimbe setarile utilizatorilor" rus "Вы используете MariaDB от имени анонимного пользователя, а анонимным пользователям не разрешается менять пароли" serbian "Vi koristite MariaDB kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke" - spa "Tu estás usando MariaDB como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves" + spa "Está usando MariaDB como un usuario anónimo y lo usuarios anónimos no tienen permiso para cambiar las propiedades de usuario" swe "Du använder MariaDB som en anonym användare och som sådan får du inte ändra ditt lösenord" ukr "Ви використовуєте MariaDB як анонімний користувач, тому вам не дозволено змінювати паролі" ER_PASSWORD_NOT_ALLOWED 42000 @@ -3189,7 +3194,7 @@ ER_PASSWORD_NOT_ALLOWED 42000 rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" rus "Для того чтобы изменять пароли других пользователей, у вас должны быть привилегии на изменение таблиц в базе данных mysql" serbian "Morate imati privilegije da možete da update-ujete određene tabele ako želite da menjate lozinke za druge korisnike" - spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros" + spa "Vd debe de tener privilegios para actualizar tablas en la base de datos mysql para poder cambiar las contraseñas de otros" swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen" ukr "Ви повині мати право на оновлення таблиць у базі данних mysql, аби мати можливість змінювати пароль іншим" ER_PASSWORD_NO_MATCH 28000 @@ -3211,7 +3216,7 @@ ER_PASSWORD_NO_MATCH 28000 rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" rus "Невозможно отыскать подходящую запись в таблице пользователей" serbian "Ne mogu da pronađem odgovarajući slog u 'user' tabeli" - spa "No puedo encontrar una línea correponsdiente en la tabla user" + spa "No puedo encontrar una fila coincidente en la tabla de usuario" swe "Hittade inte användaren i 'user'-tabellen" ukr "Не можу знайти відповідних записів у таблиці користувача" ER_UPDATE_INFO @@ -3231,7 +3236,7 @@ ER_UPDATE_INFO rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" rus "Совпало записей: %ld Изменено: %ld Предупреждений: %ld" serbian "Odgovarajućih slogova: %ld Promenjeno: %ld Upozorenja: %ld" - spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld" + spa "Líneas coincidentes: %ld Cambiadas: %ld Avisos: %ld" swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" ukr "Записів відповідає: %ld Змінено: %ld Застережень: %ld" ER_CANT_CREATE_THREAD @@ -3254,7 +3259,7 @@ ER_CANT_CREATE_THREAD rum "Nu pot crea un thread nou (Eroare %M). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare" rus "Невозможно создать новый поток (ошибка %M). Если это не ситуация, связанная с нехваткой памяти, то вам следует изучить документацию на предмет описания возможной ошибки работы в конкретной ОС" serbian "Ne mogu da kreiram novi thread (errno %M). Ako imate još slobodne memorije, trebali biste da pogledate u priručniku da li je ovo specifična greška vašeg operativnog sistema" - spa "No puedo crear un nuevo thread (errno %M). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO" + spa "No puedo crear un nuevo hilo (thread) (error %M). Si no está falto de memoria disponible, vd puede consultar el manual para un posible error dependiente del SO" swe "Kan inte skapa en ny tråd (errno %M)" ukr "Не можу створити нову гілку (помилка %M). Якщо ви не використали усю пам'ять, то прочитайте документацію до вашої ОС - можливо це помилка ОС" ER_WRONG_VALUE_COUNT_ON_ROW 21S01 @@ -3273,7 +3278,7 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01 rum "Numarul de coloane nu corespunde cu numarul de valori la linia %lu" rus "Количество столбцов не совпадает с количеством значений в записи %lu" serbian "Broj kolona ne odgovara broju vrednosti u slogu %lu" - spa "El número de columnas no corresponde al número en la línea %lu" + spa "El número de columnas no se corresponde con el número de valores en la línea %lu" swe "Antalet kolumner motsvarar inte antalet värden på rad: %lu" ukr "Кількість стовбців не співпадає з кількістю значень у строці %lu" ER_CANT_REOPEN_TABLE @@ -3298,7 +3303,7 @@ ER_CANT_REOPEN_TABLE rus "Невозможно заново открыть таблицу '%-.192s'" serbian "Ne mogu da ponovo otvorim tabelu '%-.192s'" slo "Can't reopen table: '%-.192s" - spa "No puedo reabrir tabla: '%-.192s" + spa "No puedo reabrir la tabla: '%-.192s" swe "Kunde inte stänga och öppna tabell '%-.192s" ukr "Не можу перевідкрити таблицю: '%-.192s'" ER_INVALID_USE_OF_NULL 22004 @@ -3319,7 +3324,7 @@ ER_INVALID_USE_OF_NULL 22004 rum "Folosirea unei value NULL e invalida" rus "Неправильное использование величины NULL" serbian "Pogrešna upotreba vrednosti NULL" - spa "Invalido uso de valor NULL" + spa "Uso inválido del valor NULL" swe "Felaktig använding av NULL" ukr "Хибне використання значення NULL" ER_REGEXP_ERROR 42000 @@ -3330,7 +3335,7 @@ ER_REGEXP_ERROR 42000 est "regexp tagastas vea: %s" fre "Erreur '%s' provenant de regexp" ger "Regexp Fehler %s" - hindi "regexp में '%4s' त्रुटि हुई" + hindi "regexp में '%s' त्रुटि हुई" hun "'%s' hiba a regularis kifejezes hasznalata soran (regexp)" ita "Errore '%s' da regexp" jpn "regexp がエラー '%s' を返しました。" @@ -3360,7 +3365,7 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" rus "Одновременное использование сгруппированных (GROUP) столбцов (MIN(),MAX(),COUNT(),...) с несгруппированными столбцами является некорректным, если в выражении есть GROUP BY" serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz" - spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY" + spa "La mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con columnas no GROUP es ilegal si no exite la cláusula GROUP BY" swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del" ukr "Змішування GROUP стовбців (MIN(),MAX(),COUNT()...) з не GROUP стовбцями є забороненим, якщо не має GROUP BY" ER_NONEXISTING_GRANT 42000 @@ -3380,7 +3385,7 @@ ER_NONEXISTING_GRANT 42000 rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.48s' de pe host-ul '%-.64s'" rus "Такие права не определены для пользователя '%-.48s' на хосте '%-.64s'" serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s'" - spa "No existe permiso definido para usuario '%-.48s' en el servidor '%-.64s'" + spa "No existe tal concesión (grant) definida para usuario '%-.48s' en el equipo '%-.64s'" swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'" ukr "Повноважень не визначено для користувача '%-.48s' з хосту '%-.64s'" ER_TABLEACCESS_DENIED_ERROR 42000 @@ -3400,7 +3405,7 @@ ER_TABLEACCESS_DENIED_ERROR 42000 rum "Comanda %-.100T interzisa utilizatorului: '%s'@'%s' pentru tabela %`s.%`s" rus "Команда %-.100T запрещена пользователю '%s'@'%s' для таблицы %`s.%`s" serbian "%-.100T komanda zabranjena za korisnika '%s'@'%s' za tabelu %`s.%`s" - spa "%-.100T comando negado para usuario: '%s'@'%s' para tabla %`s.%`s" + spa "%-.100T comando denegado a usuario '%s'@'%s' para la tabla %`s.%`s" swe "%-.100T ej tillåtet för '%s'@'%s' för tabell %`s.%`s" ukr "%-.100T команда заборонена користувачу: '%s'@'%s' у таблиці %`s.%`s" ER_COLUMNACCESS_DENIED_ERROR 42000 @@ -3420,7 +3425,7 @@ ER_COLUMNACCESS_DENIED_ERROR 42000 rum "Comanda %-.32s interzisa utilizatorului: '%s'@'%s' pentru coloana '%-.192s' in tabela '%-.192s'" rus "Команда %-.32s запрещена пользователю '%s'@'%s' для столбца '%-.192s' в таблице '%-.192s'" serbian "%-.32s komanda zabranjena za korisnika '%s'@'%s' za kolonu '%-.192s' iz tabele '%-.192s'" - spa "%-.32s comando negado para usuario: '%s'@'%s' para columna '%-.192s' en la tabla '%-.192s'" + spa "%-.32s comando denegado a el usuario '%s'@'%s' para la columna '%-.192s' en la tabla '%-.192s'" swe "%-.32s ej tillåtet för '%s'@'%s' för kolumn '%-.192s' i tabell '%-.192s'" ukr "%-.32s команда заборонена користувачу: '%s'@'%s' для стовбця '%-.192s' у таблиці '%-.192s'" ER_ILLEGAL_GRANT_FOR_TABLE 42000 @@ -3445,7 +3450,7 @@ ER_ILLEGAL_GRANT_FOR_TABLE 42000 rus "Неверная команда GRANT или REVOKE. Обратитесь к документации, чтобы выяснить, какие привилегии можно использовать" serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priručniku koje vrednosti mogu biti upotrebljene" slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used" - spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados" + spa "Comando GRANT/REVOKE ilegal; por favor, consulte el manual para ver los permisos que se pueden usar" swe "Felaktigt GRANT-privilegium använt" ukr "Хибна GRANT/REVOKE команда; прочитайте документацію стосовно того, які права можна використовувати" ER_GRANT_WRONG_HOST_OR_USER 42000 @@ -3466,7 +3471,7 @@ ER_GRANT_WRONG_HOST_OR_USER 42000 rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" rus "Слишком длинное имя пользователя/хоста для GRANT" serbian "Argument 'host' ili 'korisnik' prosleđen komandi 'GRANT' je predugačak" - spa "El argumento para servidor o usuario para GRANT es demasiado grande" + spa "El argumento de GRANT para el equipo o usuario es demasiado grande" swe "Felaktigt maskinnamn eller användarnamn använt med GRANT" ukr "Аргумент host або user для GRANT задовгий" ER_NO_SUCH_TABLE 42S02 @@ -3491,7 +3496,7 @@ ER_NO_SUCH_TABLE 42S02 rus "Таблица '%-.192s.%-.192s' не существует" serbian "Tabela '%-.192s.%-.192s' ne postoji" slo "Table '%-.192s.%-.192s' doesn't exist" - spa "Tabla '%-.192s.%-.192s' no existe" + spa "La tabla '%-.192s.%-.192s' no existe" swe "Det finns ingen tabell som heter '%-.192s.%-.192s'" ukr "Таблиця '%-.192s.%-.192s' не існує" ER_NONEXISTING_TABLE_GRANT 42000 @@ -3511,7 +3516,7 @@ ER_NONEXISTING_TABLE_GRANT 42000 rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.48s' de pe host-ul '%-.64s' pentru tabela '%-.192s'" rus "Такие права не определены для пользователя '%-.48s' на компьютере '%-.64s' для таблицы '%-.192s'" serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s' tabeli '%-.192s'" - spa "No existe tal permiso definido para usuario '%-.48s' en el servidor '%-.64s' en la tabla '%-.192s'" + spa "No existe tal concesión (grant) definida para el usuario '%-.48s' en el equipo '%-.64s' en la tabla '%-.192s'" swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s' för tabell '%-.192s'" ukr "Повноважень не визначено для користувача '%-.48s' з хосту '%-.64s' для таблиці '%-.192s'" ER_NOT_ALLOWED_COMMAND 42000 @@ -3532,7 +3537,7 @@ ER_NOT_ALLOWED_COMMAND 42000 rum "Comanda folosita nu este permisa pentru aceasta versiune de MariaDB" rus "Эта команда не допускается в данной версии MariaDB" serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MariaDB servera" - spa "El comando usado no es permitido con esta versión de MariaDB" + spa "El comando usado no está permitido con esta versión de MariaDB" swe "Du kan inte använda detta kommando med denna MariaDB version" ukr "Використовувана команда не дозволена у цій версії MariaDB" ER_SYNTAX_ERROR 42000 @@ -3558,7 +3563,7 @@ ER_SYNTAX_ERROR 42000 rus "У вас ошибка в запросе. Изучите документацию по используемой версии MariaDB на предмет корректного синтаксиса" serbian "Imate grešku u vašoj SQL sintaksi" slo "Something is wrong in your syntax" - spa "Algo está equivocado en su sintax" + spa "Existe un error en su sintaxis SQL; revise el manual que se corresponde con su versión del servidor MariaDB para averiguar la sintaxis correcta a utilizar" swe "Du har något fel i din syntax" ukr "У вас помилка у синтаксисі SQL" ER_DELAYED_CANT_CHANGE_LOCK @@ -3578,7 +3583,7 @@ ER_DELAYED_CANT_CHANGE_LOCK rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.192s" rus "Поток, обслуживающий отложенную вставку (delayed insert), не смог получить запрашиваемую блокировку на таблицу %-.192s" serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zaključavanje tabele '%-.192s'" - spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.192s" + spa "El hilo (thread) de inserción retardada no pudo obtener bloqueo requerido para la tabla %-.192s" swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.192s'" ukr "Гілка для INSERT DELAYED не може отримати блокування для таблиці %-.192s" ER_TOO_MANY_DELAYED_THREADS @@ -3599,7 +3604,7 @@ ER_TOO_MANY_DELAYED_THREADS rum "Prea multe threaduri aminate care sint in uz" rus "Слишком много потоков, обслуживающих отложенную вставку (delayed insert)" serbian "Previše prolongiranih thread-ova je u upotrebi" - spa "Muchos threads retardados en uso" + spa "Demasiados hilos (threads) retardados en uso" swe "Det finns redan 'max_delayed_threads' trådar i använding" ukr "Забагато затриманих гілок використовується" ER_ABORTING_CONNECTION 08S01 @@ -3623,7 +3628,7 @@ ER_ABORTING_CONNECTION 08S01 rus "Прервано соединение %ld к базе данных '%-.192s' пользователя '%-.48s' (%-.64s)" serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' (%-.64s)" slo "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" - spa "Conexión abortada %ld para db: '%-.192s' usuario: '%-.48s' (%-.64s)" + spa "Conexión %ld abortada para la base de datos: '%-.192s' usuario: '%-.48s' (%-.64s)" swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s' (%-.64s)" ukr "Перервано з'єднання %ld до бази данних: '%-.192s' користувача: '%-.48s' (%-.64s)" ER_NET_PACKET_TOO_LARGE 08S01 @@ -3644,7 +3649,7 @@ ER_NET_PACKET_TOO_LARGE 08S01 rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" rus "Полученный пакет больше, чем 'max_allowed_packet'" serbian "Primio sam mrežni paket veći od definisane vrednosti 'max_allowed_packet'" - spa "Obtenido un paquete mayor que 'max_allowed_packet'" + spa "Obtenido un paquete mayor de 'max_allowed_packet' bytes" swe "Kommunkationspaketet är större än 'max_allowed_packet'" ukr "Отримано пакет більший ніж max_allowed_packet" ER_NET_READ_ERROR_FROM_PIPE 08S01 @@ -3665,7 +3670,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01 rum "Eroare la citire din cauza lui 'connection pipe'" rus "Получена ошибка чтения от потока соединения (connection pipe)" serbian "Greška pri čitanju podataka sa pipe-a" - spa "Obtenido un error de lectura de la conexión pipe" + spa "Obtenido un error de lectura desde la tubería de la conexión" swe "Fick läsfel från klienten vid läsning från 'PIPE'" ukr "Отримано помилку читання з комунікаційного каналу" ER_NET_FCNTL_ERROR 08S01 @@ -3707,7 +3712,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01 rum "Packets care nu sint ordonati au fost gasiti" rus "Пакеты получены в неверном порядке" serbian "Primio sam mrežne pakete van reda" - spa "Obtenido paquetes desordenados" + spa "Obtenidos paquetes desordenados" swe "Kommunikationspaketen kom i fel ordning" ukr "Отримано пакети у неналежному порядку" ER_NET_UNCOMPRESS_ERROR 08S01 @@ -3728,7 +3733,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01 rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" rus "Невозможно распаковать пакет, полученный через коммуникационный протокол" serbian "Ne mogu da dekompresujem mrežne pakete" - spa "No puedo descomprimir paquetes de comunicación" + spa "No pude descomprimir paquete de comunicación" swe "Kunde inte packa up kommunikationspaketet" ukr "Не можу декомпресувати комунікаційний пакет" ER_NET_READ_ERROR 08S01 @@ -3770,7 +3775,7 @@ ER_NET_READ_INTERRUPTED 08S01 rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" rus "Получен таймаут ожидания пакета через коммуникационный протокол " serbian "Vremenski limit za čitanje mrežnih paketa je istekao" - spa "Obtenido timeout leyendo paquetes de comunicación" + spa "Obtenido tiempo agotado (timeout) leyendo paquetes de comunicación" swe "Fick 'timeout' vid läsning från klienten" ukr "Отримано затримку читання комунікаційних пакетів" ER_NET_ERROR_ON_WRITE 08S01 @@ -3791,7 +3796,7 @@ ER_NET_ERROR_ON_WRITE 08S01 rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" rus "Получена ошибка при передаче пакета через коммуникационный протокол " serbian "Greška pri slanju mrežnih paketa" - spa "Obtenido un error de escribiendo paquetes de comunicación" + spa "Obtenido un error escribiendo paquetes de comunicación" swe "Fick ett fel vid skrivning till klienten" ukr "Отримано помилку запису комунікаційних пакетів" ER_NET_WRITE_INTERRUPTED 08S01 @@ -3812,7 +3817,7 @@ ER_NET_WRITE_INTERRUPTED 08S01 rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" rus "Получен таймаут в процессе передачи пакета через коммуникационный протокол " serbian "Vremenski limit za slanje mrežnih paketa je istekao" - spa "Obtenido timeout escribiendo paquetes de comunicación" + spa "Obtenido tiempo agotado (timeout) escribiendo paquetes de comunicación" swe "Fick 'timeout' vid skrivning till klienten" ukr "Отримано затримку запису комунікаційних пакетів" ER_TOO_LONG_STRING 42000 @@ -3833,7 +3838,7 @@ ER_TOO_LONG_STRING 42000 rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" rus "Результирующая строка больше, чем 'max_allowed_packet'" serbian "Rezultujuči string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'" - spa "La string resultante es mayor que max_allowed_packet" + spa "La cadena resultante es mayor de max_allowed_packet bytes" swe "Resultatsträngen är längre än max_allowed_packet" ukr "Строка результату довша ніж max_allowed_packet" ER_TABLE_CANT_HANDLE_BLOB 42000 @@ -3853,7 +3858,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000 rum "Tipul de tabela folosit (%s) nu suporta coloane de tip BLOB/TEXT" rus "%s таблицы не поддерживают типы BLOB/TEXT" serbian "Iskorišteni tip tabele (%s) ne podržava kolone tipa 'BLOB' odnosno 'TEXT'" - spa "El tipo de tabla usada (%s) no permite soporte para columnas BLOB/TEXT" + spa "El motor de almacenaje %s no soporta columnas de tipo BLOB/TEXT" swe "Den använda tabelltypen (%s) kan inte hantera BLOB/TEXT-kolumner" ukr "%s таблиці не підтримують BLOB/TEXT стовбці" ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 @@ -3873,7 +3878,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 rum "Tipul de tabela folosit (%s) nu suporta coloane de tip AUTO_INCREMENT" rus "%s таблицы не поддерживают автоинкрементные столбцы" serbian "Iskorišteni tip tabele (%s) ne podržava kolone tipa 'AUTO_INCREMENT'" - spa "El tipo de tabla usada (%s) no permite soporte para columnas AUTO_INCREMENT" + spa "El motor de almacenaje %s no soporta columnas AUTO_INCREMENT" swe "Den använda tabelltypen (%s) kan inte hantera AUTO_INCREMENT-kolumner" ukr "%s таблиці не підтримують AUTO_INCREMENT стовбці" ER_DELAYED_INSERT_TABLE_LOCKED @@ -3898,7 +3903,7 @@ ER_DELAYED_INSERT_TABLE_LOCKED rus "Нельзя использовать INSERT DELAYED для таблицы '%-.192s', потому что она заблокирована с помощью LOCK TABLES" serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.192s', zbog toga što je zaključana komandom 'LOCK TABLES'" slo "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" - spa "INSERT DELAYED no puede ser usado con tablas '%-.192s', porque esta bloqueada con LOCK TABLES" + spa "INSERT DELAYED no puede ser usado con la tabla '%-.192s' porque esta bloqueada con LOCK TABLES" swe "INSERT DELAYED kan inte användas med tabell '%-.192s', emedan den är låst med LOCK TABLES" ukr "INSERT DELAYED не може бути використано з таблицею '%-.192s', тому що її заблоковано з LOCK TABLES" ER_WRONG_COLUMN_NAME 42000 @@ -3918,7 +3923,7 @@ ER_WRONG_COLUMN_NAME 42000 rum "Nume increct de coloana '%-.100s'" rus "Неверное имя столбца '%-.100s'" serbian "Pogrešno ime kolone '%-.100s'" - spa "Incorrecto nombre de columna '%-.100s'" + spa "Nombre Incorrecto de columna '%-.100s'" swe "Felaktigt kolumnnamn '%-.100s'" ukr "Невірне ім'я стовбця '%-.100s'" ER_WRONG_KEY_COLUMN 42000 @@ -3927,6 +3932,7 @@ ER_WRONG_KEY_COLUMN 42000 ger "Die Speicher-Engine %s kann die Spalte %`s nicht indizieren" hindi "स्टोरेज इंजन %s, कॉलम %`s को इंडेक्स नहीं कर सकता" rus "Обработчик таблиц %s не может проиндексировать столбец %`s" + spa "El motor de almacenaje %s no puede indizar la columna %`s" ukr "Вказівник таблиц %s не може індексувати стовбець %`s" ER_WRONG_MRG_TABLE chi "无法打开定义不同或非 MyISAM 类型或不存在的表" @@ -3949,7 +3955,7 @@ ER_WRONG_MRG_TABLE rus "Не все таблицы в MERGE определены одинаково" serbian "Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti način" slo "All tables in the MERGE table are not defined identically" - spa "Todas las tablas en la MERGE tabla no estan definidas identicamente" + spa "Incapaz de abrir la tabla subyacente por estar definida de forma diferente o por no ser del tipo no-MyISAM o por no existir" swe "Tabellerna i MERGE-tabellen är inte identiskt definierade" ukr "Таблиці у MERGE TABLE мають різну структуру" ER_DUP_UNIQUE 23000 @@ -3968,7 +3974,7 @@ ER_DUP_UNIQUE 23000 rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.192s'" rus "Невозможно записать в таблицу '%-.192s' из-за ограничений уникального ключа" serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.192s'" - spa "No puedo escribir, debido al único constraint, para tabla '%-.192s'" + spa "No puedo grabar, debido a restricción única, en la tabla '%-.192s'" swe "Kan inte skriva till tabell '%-.192s'; UNIQUE-test" ukr "Не можу записати до таблиці '%-.192s', з причини вимог унікальності" ER_BLOB_KEY_WITHOUT_LENGTH 42000 @@ -3993,7 +3999,7 @@ ER_BLOB_KEY_WITHOUT_LENGTH 42000 rus "Столбец типа BLOB '%-.192s' был указан в определении ключа без указания длины ключа" serbian "BLOB kolona '%-.192s' je upotrebljena u specifikaciji ključa bez navođenja dužine ključa" slo "BLOB column '%-.192s' used in key specification without a key length" - spa "Columna BLOB column '%-.192s' usada en especificación de clave sin tamaño de la clave" + spa "Columna BLOB/TEXT '%-.192s', usada en especificación de clave, sin tamaño" swe "Du har inte angett någon nyckellängd för BLOB '%-.192s'" ukr "Стовбець BLOB '%-.192s' використано у визначенні ключа без вказання довжини ключа" ER_PRIMARY_CANT_HAVE_NULL 42000 @@ -4013,7 +4019,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000 rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" rus "Все части первичного ключа (PRIMARY KEY) должны быть определены как NOT NULL; Если вам нужна поддержка величин NULL в ключе, воспользуйтесь индексом UNIQUE" serbian "Svi delovi primarnog ključa moraju biti različiti od NULL; Ako Vam ipak treba NULL vrednost u ključu, upotrebite 'UNIQUE'" - spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE" + spa "Todas las partes de una PRIMARY KEY deben de ser NOT NULL; si necesita NULL en una clave, use UNIQUE en su lugar" swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället" ukr "Усі частини PRIMARY KEY повинні бути NOT NULL; Якщо ви потребуєте NULL у ключі, скористайтеся UNIQUE" ER_TOO_MANY_ROWS 42000 @@ -4033,7 +4039,7 @@ ER_TOO_MANY_ROWS 42000 rum "Resultatul constista din mai multe linii" rus "В результате возвращена более чем одна строка" serbian "Rezultat je sačinjen od više slogova" - spa "Resultado compuesto de mas que una línea" + spa "Resultado compuesto de más de una fila" swe "Resultet bestod av mera än en rad" ukr "Результат знаходиться у більше ніж одній строці" ER_REQUIRES_PRIMARY_KEY 42000 @@ -4053,7 +4059,7 @@ ER_REQUIRES_PRIMARY_KEY 42000 rum "Aceast tip de tabela are nevoie de o cheie primara" rus "Этот тип таблицы требует определения первичного ключа" serbian "Ovaj tip tabele zahteva da imate definisan primarni ključ" - spa "Este tipo de tabla necesita de una primary key" + spa "Este tipo de tabla necesita de una clave primaria" swe "Denna tabelltyp kräver en PRIMARY KEY" ukr "Цей тип таблиці потребує первинного ключа" ER_NO_RAID_COMPILED @@ -4073,7 +4079,7 @@ ER_NO_RAID_COMPILED rum "Aceasta versiune de MariaDB, nu a fost compilata cu suport pentru RAID" rus "Эта версия MariaDB скомпилирована без поддержки RAID" serbian "Ova verzija MariaDB servera nije kompajlirana sa podrškom za RAID uređaje" - spa "Esta versión de MariaDB no es compilada con soporte RAID" + spa "Esta versión de MariaDB no ha sido compilada con soporte para RAID" swe "Denna version av MariaDB är inte kompilerad med RAID" ukr "Ця версія MariaDB не зкомпільована з підтримкою RAID" ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE @@ -4091,7 +4097,7 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave" rus "Вы работаете в режиме безопасных обновлений (safe update mode) и попробовали изменить таблицу без использования ключевого столбца в части WHERE" serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu ključa" - spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna" + spa "Está usando modo de actualización segura y ha intentado actualizar una tabla sin un WHERE que use una columna KEY" swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel" ukr "Ви у режимі безпечного оновлення та намагаєтесь оновити таблицю без оператора WHERE, що використовує KEY стовбець" ER_KEY_DOES_NOT_EXISTS 42000 S1009 @@ -4110,7 +4116,7 @@ ER_KEY_DOES_NOT_EXISTS 42000 S1009 por "Chave '%-.192s' não existe na tabela '%-.192s'" rus "Ключ '%-.192s' не существует в таблице '%-.192s'" serbian "Ključ '%-.192s' ne postoji u tabeli '%-.192s'" - spa "Clave '%-.192s' no existe en la tabla '%-.192s'" + spa "La clave '%-.192s' no existe en la tabla '%-.192s'" swe "Nyckel '%-.192s' finns inte in tabell '%-.192s'" ukr "Ключ '%-.192s' не існує в таблиці '%-.192s'" ER_CHECK_NO_SUCH_TABLE 42000 @@ -4155,7 +4161,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000 rus "Обработчик таблицы не поддерживает этого: %s" serbian "Handler za ovu tabelu ne dozvoljava %s komande" slo "The handler for the table doesn't support %s" - spa "El manipulador de la tabla no permite soporte para %s" + spa "El motor de almacenaje para la tabla no soporta %s" swe "Tabellhanteraren för denna tabell kan inte göra %s" ukr "Вказівник таблиці не підтримуе %s" ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 @@ -4173,7 +4179,7 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 por "Não lhe é permitido executar este comando em uma transação" rus "Вам не разрешено выполнять эту команду в транзакции" serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji" - spa "No tienes el permiso para ejecutar este comando en una transición" + spa "No tiene el permiso para ejecutar este comando en una transacción" swe "Du får inte utföra detta kommando i en transaktion" ukr "Вам не дозволено виконувати цю команду в транзакції" ER_ERROR_DURING_COMMIT @@ -4266,12 +4272,13 @@ ER_NEW_ABORTING_CONNECTION 08S01 por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')" rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s' (%-.64s)" serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)" - spa "Abortada conexión %lld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)" + spa "Abortada conexión %lld a la base de datos: '%-.192s' usuario: '%-.48s' equipo: '%-.64s' (%-.64s)" swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)" ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)" ER_UNUSED_10 chi "你应当永远看不到这个" eng "You should never see it" + spa "Nunca lo debería vd de ver" ER_FLUSH_MASTER_BINLOG_CLOSED chi "Binlog 已关闭, 不能 RESET MASTER" eng "Binlog closed, cannot RESET MASTER" @@ -4280,6 +4287,7 @@ ER_FLUSH_MASTER_BINLOG_CLOSED por "Binlog fechado. Não pode fazer RESET MASTER" rus "Двоичный журнал обновления закрыт, невозможно выполнить RESET MASTER" serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'" + spa "Binlog cerrado, no puedo hacer RESET MASTER" ukr "Реплікаційний лог закрито, не можу виконати RESET MASTER" ER_INDEX_REBUILD chi "重建 dumped table '%-.192s' 的索引失败" @@ -4296,7 +4304,7 @@ ER_INDEX_REBUILD por "Falhou na reconstrução do índice da tabela 'dumped' '%-.192s'" rus "Ошибка перестройки индекса сохраненной таблицы '%-.192s'" serbian "Izgradnja indeksa dump-ovane tabele '%-.192s' nije uspela" - spa "Falla reconstruyendo el indice de la tabla dumped '%-.192s'" + spa "Fallo reconstruyendo el índice del volcado de la tabla '%-.192s'" ukr "Невдале відновлення індекса переданої таблиці '%-.192s'" ER_MASTER chi "Master错误:'%-.64s'" @@ -4311,7 +4319,7 @@ ER_MASTER por "Erro no 'master' '%-.64s'" rus "Ошибка от головного сервера: '%-.64s'" serbian "Greška iz glavnog servera '%-.64s' u klasteru" - spa "Error del master: '%-.64s'" + spa "Error del maestro (master): '%-.64s'" swe "Fel från master: '%-.64s'" ukr "Помилка від головного: '%-.64s'" ER_MASTER_NET_READ 08S01 @@ -4327,7 +4335,7 @@ ER_MASTER_NET_READ 08S01 por "Erro de rede lendo do 'master'" rus "Возникла ошибка чтения в процессе коммуникации с головным сервером" serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru" - spa "Error de red leyendo del master" + spa "Error de red leyendo del maestro (master)" swe "Fick nätverksfel vid läsning från master" ukr "Мережева помилка читання від головного" ER_MASTER_NET_WRITE 08S01 @@ -4343,7 +4351,7 @@ ER_MASTER_NET_WRITE 08S01 por "Erro de rede gravando no 'master'" rus "Возникла ошибка записи в процессе коммуникации с головным сервером" serbian "Greška u slanju mrežnih paketa na glavni server u klasteru" - spa "Error de red escribiendo para el master" + spa "Error de red grabando en maestro (master)" swe "Fick nätverksfel vid skrivning till master" ukr "Мережева помилка запису до головного" ER_FT_MATCHING_KEY_NOT_FOUND @@ -4360,7 +4368,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas" rus "Невозможно отыскать полнотекстовый (FULLTEXT) индекс, соответствующий списку столбцов" serbian "Ne mogu da pronađem 'FULLTEXT' indeks koli odgovara listi kolona" - spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas" + spa "No puedo encontrar índice FULLTEXT coincidente con la lista de columnas" swe "Hittar inte ett FULLTEXT-index i kolumnlistan" ukr "Не можу знайти FULLTEXT індекс, що відповідає переліку стовбців" ER_LOCK_OR_ACTIVE_TRANSACTION @@ -4377,7 +4385,7 @@ ER_LOCK_OR_ACTIVE_TRANSACTION por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa" rus "Невозможно выполнить указанную команду, поскольку у вас присутствуют активно заблокированные таблица или открытая транзакция" serbian "Ne mogu da izvršim datu komandu zbog toga što su tabele zaključane ili je transakcija u toku" - spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa" + spa "No puedo ejecutar el comando dado porque tiene tablas activas bloqueadas o una transacción activa" swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion" ukr "Не можу виконати подану команду тому, що таблиця заблокована або виконується транзакція" ER_UNKNOWN_SYSTEM_VARIABLE @@ -4395,7 +4403,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE por "Variável de sistema '%-.*s' desconhecida" rus "Неизвестная системная переменная '%-.*s'" serbian "Nepoznata sistemska promenljiva '%-.*s'" - spa "Desconocida variable de sistema '%-.*s'" + spa "Variable de sistema '%-.*s' desconocida" swe "Okänd systemvariabel: '%-.*s'" ukr "Невідома системна змінна '%-.*s'" ER_CRASHED_ON_USAGE @@ -4412,7 +4420,7 @@ ER_CRASHED_ON_USAGE por "Tabela '%-.192s' está marcada como danificada e deve ser reparada" rus "Таблица '%-.192s' помечена как испорченная и должна пройти проверку и ремонт" serbian "Tabela '%-.192s' je markirana kao oštećena i trebala bi biti popravljena" - spa "Tabla '%-.192s' está marcada como crashed y debe ser reparada" + spa "La tabla '%-.192s' está marcada como estropeada y debe de ser reparada" swe "Tabell '%-.192s' är trasig och bör repareras med REPAIR TABLE" ukr "Таблицю '%-.192s' марковано як зіпсовану та її потрібно відновити" ER_CRASHED_ON_REPAIR @@ -4429,7 +4437,7 @@ ER_CRASHED_ON_REPAIR por "Tabela '%-.192s' está marcada como danificada e a última reparação (automática?) falhou" rus "Таблица '%-.192s' помечена как испорченная и последний (автоматический?) ремонт не был успешным" serbian "Tabela '%-.192s' je markirana kao oštećena, a zadnja (automatska?) popravka je bila neuspela" - spa "Tabla '%-.192s' está marcada como crashed y la última reparación (automactica?) falló" + spa "La tabla '%-.192s' está marcada como estropeada y la última reparación (¿automática?) falló" swe "Tabell '%-.192s' är trasig och senast (automatiska?) reparation misslyckades" ukr "Таблицю '%-.192s' марковано як зіпсовану та останнє (автоматичне?) відновлення не вдалося" ER_WARNING_NOT_COMPLETE_ROLLBACK @@ -4445,7 +4453,7 @@ ER_WARNING_NOT_COMPLETE_ROLLBACK por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)" rus "Внимание: по некоторым измененным нетранзакционным таблицам невозможно будет произвести откат транзакции" serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'" - spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back" + spa "Algunas tablas no transaccionales ya cambiadas no puedieron ser retrocedidas (rolled back)" swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK" ukr "Застереження: Деякі нетранзакційні зміни таблиць не можна буде повернути" ER_TRANS_CACHE_FULL @@ -4460,7 +4468,7 @@ ER_TRANS_CACHE_FULL nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mariadbd variabele en probeer opnieuw" por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mariadbd e tente novamente" rus "Транзакции, включающей большое количество команд, потребовалось более чем 'max_binlog_cache_size' байт. Увеличьте эту переменную сервера mariadbd и попробуйте еще раз" - spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mariadbd y tente de nuevo" + spa "Transacción multi-sentencia requirió de más de 'max_binlog_cache_size' bytes de almacenamiento" swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mariadbd-variabel och försök på nytt" ukr "Транзакція з багатьма виразами вимагає більше ніж 'max_binlog_cache_size' байтів для зберігання. Збільште цю змінну mariadbd та спробуйте знову" ER_SLAVE_MUST_STOP @@ -4474,7 +4482,7 @@ ER_SLAVE_MUST_STOP por "Esta operação não pode ser realizada com um 'slave' '%2$*1$s' em execução. Execute STOP SLAVE '%2$*1$s' primeiro" rus "Эту операцию невозможно выполнить при работающем потоке подчиненного сервера %2$*1$s. Сначала выполните STOP SLAVE '%2$*1$s'" serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni '%2$*1$s' server. Zadajte prvo komandu 'STOP SLAVE '%2$*1$s'' da zaustavite podređeni server" - spa "Esta operación no puede ser hecha con el esclavo '%2$*1$s' funcionando, primero use STOP SLAVE '%2$*1$s'" + spa "Esta operación no puede ser realizada con el esclavo '%2$*1$s' en marcha; primero ejecute STOP SLAVE '%2$*1$s'" swe "Denna operation kan inte göras under replikering; Du har en aktiv förbindelse till '%2$*1$s'. Gör STOP SLAVE '%2$*1$s' först" ukr "Операція не може бути виконана з запущеним підлеглим '%2$*1$s', спочатку виконайте STOP SLAVE '%2$*1$s'" ER_SLAVE_NOT_RUNNING @@ -4489,7 +4497,7 @@ ER_SLAVE_NOT_RUNNING por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE" rus "Для этой операции требуется работающий подчиненный сервер. Сначала выполните START SLAVE" serbian "Ova operacija zahteva da je aktivan podređeni server. Konfigurišite prvo podređeni server i onda izvršite komandu 'START SLAVE'" - spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE" + spa "Esta operación requiere de un esclavo funcionando; configure el esclavo y haga el START SLAVE" swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE" ukr "Операція вимагає запущеного підлеглого, зконфігуруйте підлеглого та виконайте START SLAVE" ER_BAD_SLAVE @@ -4504,7 +4512,7 @@ ER_BAD_SLAVE por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO" rus "Этот сервер не настроен как подчиненный. Внесите исправления в конфигурационном файле или с помощью CHANGE MASTER TO" serbian "Server nije konfigurisan kao podređeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'" - spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO" + spa "El servidor no está configurado como esclavo; arréglelo en el fichero/archivo de configuración o con CHANGE MASTER TO" swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO" ukr "Сервер не зконфігуровано як підлеглий, виправте це у файлі конфігурації або з CHANGE MASTER TO" ER_MASTER_INFO @@ -4514,6 +4522,7 @@ ER_MASTER_INFO ger "Konnte Master-Info-Struktur '%.*s' nicht initialisieren. Weitere Fehlermeldungen können im MariaDB-Error-Log eingesehen werden" jpn "'master info '%.*s''構造体の初期化ができませんでした。MariaDBエラーログでエラーメッセージを確認してください。" serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info' '%.*s'" + spa "No pude inicializar estructura info de maestro (master) para '%.*s'; se pueden ver más mensajes de error en el historial (log) de errores de MariaDB" swe "Kunde inte initialisera replikationsstrukturerna för '%.*s'. See MariaDB fel fil för mera information" ukr "Інформаційна структура з'єднання головного і підлеглого (master.info) для '%.*s' не може бути ініціалізована" ER_SLAVE_THREAD @@ -4528,7 +4537,7 @@ ER_SLAVE_THREAD por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" rus "Невозможно создать поток подчиненного сервера. Проверьте системные ресурсы" serbian "Nisam mogao da startujem thread za podređeni server, proverite sistemske resurse" - spa "No puedo crear el thread esclavo, verifique recursos del sistema" + spa "No puedo crear el hilo (thread) esclavo; verifique recursos del sistema" swe "Kunde inte starta en tråd för replikering" ukr "Не можу створити підлеглу гілку, перевірте системні ресурси" ER_TOO_MANY_USER_CONNECTIONS 42000 @@ -4545,7 +4554,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000 por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas" rus "У пользователя %-.64s уже больше чем 'max_user_connections' активных соединений" serbian "Korisnik %-.64s već ima više aktivnih konekcija nego što je to određeno 'max_user_connections' promenljivom" - spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas" + spa "El usuario %-.64s ya tiene más de 'max_user_connections' conexiones activas" swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar" ukr "Користувач %-.64s вже має більше ніж 'max_user_connections' активних з'єднань" ER_SET_CONSTANTS_ONLY @@ -4562,7 +4571,7 @@ ER_SET_CONSTANTS_ONLY por "Você pode usar apenas expressões constantes com SET" rus "С этой командой вы можете использовать только константные выражения" serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'" - spa "Tu solo debes usar expresiones constantes con SET" + spa "Sólo puede usar expresiones constantes en esta sentencia" swe "Man kan endast använda konstantuttryck med SET" ukr "Можна використовувати лише вирази зі сталими у SET" ER_LOCK_WAIT_TIMEOUT @@ -4578,7 +4587,7 @@ ER_LOCK_WAIT_TIMEOUT por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação" rus "Таймаут ожидания блокировки истек; попробуйте перезапустить транзакцию" serbian "Vremenski limit za zaključavanje tabele je istekao; Probajte da ponovo startujete transakciju" - spa "Tiempo de bloqueo de espera excedido" + spa "Tiempo de espera de bloqueo excedido; intente rearrancar la transacción" swe "Fick inte ett lås i tid ; Försök att starta om transaktionen" ukr "Затримку очікування блокування вичерпано" ER_LOCK_TABLE_FULL @@ -4611,7 +4620,7 @@ ER_READ_ONLY_TRANSACTION 25000 por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED" rus "Блокировки обновлений нельзя получить в процессе чтения не принятой (в режиме READ UNCOMMITTED) транзакции" serbian "Zaključavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" - spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED" + spa "No se pueden adquirir bloqueos de actualización durante una transacción READ UNCOMMITTED" swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED" ukr "Оновити блокування не можливо на протязі транзакції READ UNCOMMITTED" ER_DROP_DB_WITH_READ_LOCK @@ -4627,7 +4636,7 @@ ER_DROP_DB_WITH_READ_LOCK por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" rus "Не допускается DROP DATABASE, пока поток держит глобальную блокировку чтения" serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zaključava čitanje podataka" - spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + spa "DROP DATABASE no permitido mientras un hilo (thread) está ejerciendo un bloqueo de lectura global" swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås" ukr "DROP DATABASE не дозволено доки гілка перебуває під загальним блокуванням читання" ER_CREATE_DB_WITH_READ_LOCK @@ -4643,7 +4652,7 @@ ER_CREATE_DB_WITH_READ_LOCK por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" rus "Не допускается CREATE DATABASE, пока поток держит глобальную блокировку чтения" serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zaključava čitanje podataka" - spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + spa "CREATE DATABASE no permitido mientras un hilo (thread) está manteniendo un bloqueo de lectura global" swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås" ukr "CREATE DATABASE не дозволено доки гілка перебуває під загальним блокуванням читання" ER_WRONG_ARGUMENTS @@ -4659,7 +4668,7 @@ ER_WRONG_ARGUMENTS por "Argumentos errados para %s" rus "Неверные параметры для %s" serbian "Pogrešni argumenti prosleđeni na %s" - spa "Argumentos errados para %s" + spa "Argumentos incorrectos para %s" swe "Felaktiga argument till %s" ukr "Хибний аргумент для %s" ER_NO_PERMISSION_TO_CREATE_USER 42000 @@ -4674,7 +4683,7 @@ ER_NO_PERMISSION_TO_CREATE_USER 42000 por "Não é permitido a '%s'@'%s' criar novos usuários" rus "'%s'@'%s' не разрешается создавать новых пользователей" serbian "Korisniku '%s'@'%s' nije dozvoljeno da kreira nove korisnike" - spa "'%s'@'%s' no es permitido para crear nuevos usuarios" + spa "'%s'@'%s' no está permitido para crear nuevos usuarios" swe "'%s'@'%s' har inte rättighet att skapa nya användare" ukr "Користувачу '%s'@'%s' не дозволено створювати нових користувачів" ER_UNION_TABLES_IN_DIFFERENT_DIR @@ -4689,7 +4698,7 @@ ER_UNION_TABLES_IN_DIFFERENT_DIR por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados" rus "Неверное определение таблицы; Все таблицы в MERGE должны принадлежать одной и той же базе данных" serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" - spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos" + spa "Definición incorrecta de la tabla; todas las tablas MERGE deben de estar en la misma base de datos" swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas" ukr "Хибне визначення таблиці; всі MERGE-таблиці повинні належити до однієї бази ланних." ER_LOCK_DEADLOCK 40001 @@ -4704,7 +4713,7 @@ ER_LOCK_DEADLOCK 40001 por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação" rus "Возникла тупиковая ситуация в процессе получения блокировки; Попробуйте перезапустить транзакцию" serbian "Unakrsno zaključavanje pronađeno kada sam pokušao da dobijem pravo na zaključavanje; Probajte da restartujete transakciju" - spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición" + spa "Encontrado estancamiento (deadlock) al intentar obtener el bloqueo; intente volver a comenzar la transacción" swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen" ukr "Взаємне блокування знайдено під час спроби отримати блокування; спробуйте перезапустити транзакцію." ER_TABLE_CANT_HANDLE_FT @@ -4719,7 +4728,7 @@ ER_TABLE_CANT_HANDLE_FT por "O tipo de tabela utilizado (%s) não suporta índices de texto completo (fulltext indexes)" rus "Используемый тип таблиц (%s) не поддерживает полнотекстовых индексов" serbian "Upotrebljeni tip tabele (%s) ne podržava 'FULLTEXT' indekse" - spa "El tipo de tabla usada (%s) no soporta índices FULLTEXT" + spa "El motor de almacenaje %s no soporta índices FULLTEXT" swe "Tabelltypen (%s) har inte hantering av FULLTEXT-index" ukr "Використаний тип таблиці (%s) не підтримує FULLTEXT індексів" ER_CANNOT_ADD_FOREIGN @@ -4733,7 +4742,7 @@ ER_CANNOT_ADD_FOREIGN por "Não pode acrescentar uma restrição de chave estrangeira para `%s`" rus "Невозможно добавить ограничения внешнего ключа для `%s`" serbian "Ne mogu da dodam proveru spoljnog ključa na `%s`" - spa "No puede adicionar clave extranjera constraint para `%s`" + spa "No puedo añadir restricción de clave foránea para `%s`" swe "Kan inte lägga till 'FOREIGN KEY constraint' för `%s`'" ukr "Не можу додати обмеження зовнішнього ключа Ha `%s`" ER_NO_REFERENCED_ROW 23000 @@ -4749,7 +4758,7 @@ ER_NO_REFERENCED_ROW 23000 norwegian-ny "Cannot add a child row: a foreign key constraint fails" por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou" rus "Невозможно добавить или обновить дочернюю строку: проверка ограничений внешнего ключа не выполняется" - spa "No puede adicionar una línea hijo: falla de clave extranjera constraint" + spa "No puedo añadir o actualizar una fila hija: ha fallado una restrición de clave foránea" swe "FOREIGN KEY-konflikt: Kan inte skriva barn" ukr "Не вдається додати або оновити дочірній рядок: невдала перевірка обмеження зовнішнього ключа" ER_ROW_IS_REFERENCED 23000 @@ -4764,7 +4773,7 @@ ER_ROW_IS_REFERENCED 23000 por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou" rus "Невозможно удалить или обновить родительскую строку: проверка ограничений внешнего ключа не выполняется" serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog ključa je neuspela" - spa "No puede deletar una línea padre: falla de clave extranjera constraint" + spa "No puedo borrar o actualizar una fila padre: ha fallado una restrición de clave foránea" swe "FOREIGN KEY-konflikt: Kan inte radera fader" ER_CONNECT_TO_MASTER 08S01 chi "连接master时出错:%-.128s" @@ -4775,7 +4784,7 @@ ER_CONNECT_TO_MASTER 08S01 nla "Fout bij opbouwen verbinding naar master: %-.128s" por "Erro conectando com o master: %-.128s" rus "Ошибка соединения с головным сервером: %-.128s" - spa "Error de coneccion a master: %-.128s" + spa "Error conectando al maestro (master): %-.128s" swe "Fick fel vid anslutning till master: %-.128s" ER_QUERY_ON_MASTER chi "在Master上运行查询时出错:%-.128s" @@ -4786,7 +4795,7 @@ ER_QUERY_ON_MASTER nla "Fout bij uitvoeren query op master: %-.128s" por "Erro rodando consulta no master: %-.128s" rus "Ошибка выполнения запроса на головном сервере: %-.128s" - spa "Error executando el query en master: %-.128s" + spa "Error ejecutando consulta (query) en maestro (master): %-.128s" swe "Fick fel vid utförande av command på mastern: %-.128s" ER_ERROR_WHEN_EXECUTING_COMMAND chi "执行命令%s时出错:%-.128s" @@ -4799,7 +4808,7 @@ ER_ERROR_WHEN_EXECUTING_COMMAND por "Erro quando executando comando %s: %-.128s" rus "Ошибка при выполнении команды %s: %-.128s" serbian "Greška pri izvršavanju komande %s: %-.128s" - spa "Error de %s: %-.128s" + spa "Error al ejecutar comando %s: %-.128s" swe "Fick fel vid utförande av %s: %-.128s" ER_WRONG_USAGE chi "%s和%s使用不正确" @@ -4812,7 +4821,7 @@ ER_WRONG_USAGE por "Uso errado de %s e %s" rus "Неверное использование %s и %s" serbian "Pogrešna upotreba %s i %s" - spa "Equivocado uso de %s y %s" + spa "Uso incorrecto de %s y %s" swe "Felaktig använding av %s and %s" ukr "Wrong usage of %s and %s" ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 @@ -4826,7 +4835,7 @@ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 por "Os comandos SELECT usados têm diferente número de colunas" rus "Использованные операторы выборки (SELECT) дают разное количество столбцов" serbian "Upotrebljene 'SELECT' komande adresiraju različit broj kolona" - spa "El comando SELECT usado tiene diferente número de columnas" + spa "Las sentencias SELECT usadas tienen un número diferente de columnas" swe "SELECT-kommandona har olika antal kolumner" ER_CANT_UPDATE_WITH_READLOCK chi "无法执行查询,因为您有冲突的读锁" @@ -4839,7 +4848,7 @@ ER_CANT_UPDATE_WITH_READLOCK por "Não posso executar a consulta porque você tem um conflito de travamento de leitura" rus "Невозможно исполнить запрос, поскольку у вас установлены конфликтующие блокировки чтения" serbian "Ne mogu da izvršim upit zbog toga što imate zaključavanja čitanja podataka u konfliktu" - spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura" + spa "No puedo ejecutar la consulta (query) porque vd tiene un conflicto de bloqueo de lectura" swe "Kan inte utföra kommandot emedan du har ett READ-lås" ER_MIXING_NOT_ALLOWED chi "事务和非事务表的混合被禁用" @@ -4852,7 +4861,7 @@ ER_MIXING_NOT_ALLOWED por "Mistura de tabelas transacional e não-transacional está desabilitada" rus "Использование транзакционных таблиц наряду с нетранзакционными запрещено" serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je isključeno" - spa "Mezla de transancional y no-transancional tablas está deshabilitada" + spa "Desactivada la mezcla de tablas transaccionales y no transaccionales" swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat" ER_DUP_ARGUMENT chi "选项'%s'在语句中使用两次" @@ -4864,7 +4873,7 @@ ER_DUP_ARGUMENT nla "Optie '%s' tweemaal gebruikt in opdracht" por "Opção '%s' usada duas vezes no comando" rus "Опция '%s' дважды использована в выражении" - spa "Opción '%s' usada dos veces en el comando" + spa "Opción '%s' usada dos veces en la sentencia" swe "Option '%s' användes två gånger" ER_USER_LIMIT_REACHED 42000 chi "用户'%-.64s'已超过'%s'资源(当前值:%ld)" @@ -4875,7 +4884,7 @@ ER_USER_LIMIT_REACHED 42000 nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)" por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" rus "Пользователь '%-.64s' превысил использование ресурса '%s' (текущее значение: %ld)" - spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" + spa "El usuario '%-.64s' ha excedido el recurso '%s' (valor actual: %ld)" swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)" ER_SPECIFIC_ACCESS_DENIED_ERROR 42000 chi "拒绝访问;您需要(至少一个)%-.128s特权用于此操作" @@ -4886,7 +4895,7 @@ ER_SPECIFIC_ACCESS_DENIED_ERROR 42000 nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie" por "Acesso negado. Você precisa o privilégio %-.128s para essa operação" rus "В доступе отказано. Вам нужны привилегии %-.128s для этой операции" - spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación" + spa "Acceso denegado. Usted necesita (al menos un(os)) privilegio(s) %-.128s para esta operación" swe "Du har inte privlegiet '%-.128s' som behövs för denna operation" ukr "Access denied. You need the %-.128s privilege for this operation" ER_LOCAL_VARIABLE @@ -4898,7 +4907,7 @@ ER_LOCAL_VARIABLE nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL" por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL" rus "Переменная '%-.64s' является потоковой (SESSION) переменной и не может быть изменена с помощью SET GLOBAL" - spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" + spa "La variable '%-.64s' es una variable de SESSION y no puede ser usada con SET GLOBAL" swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL" ER_GLOBAL_VARIABLE chi "变量'%-.64s'是全局变量,应该用SET GLOBAL设置" @@ -4909,7 +4918,7 @@ ER_GLOBAL_VARIABLE nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL" por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL" rus "Переменная '%-.64s' является глобальной (GLOBAL) переменной, и ее следует изменять с помощью SET GLOBAL" - spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" + spa "La variable '%-.64s' es una variable GLOBAL y debería de ser configurada con SET GLOBAL" swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL" ER_NO_DEFAULT 42000 chi "变量'%-.64s'没有默认值" @@ -4920,7 +4929,7 @@ ER_NO_DEFAULT 42000 nla "Variabele '%-.64s' heeft geen standaard waarde" por "Variável '%-.64s' não tem um valor padrão" rus "Переменная '%-.64s' не имеет значения по умолчанию" - spa "Variable '%-.64s' no tiene un valor patrón" + spa "La variable '%-.64s' no tiene un valor por defecto" swe "Variabel '%-.64s' har inte ett DEFAULT-värde" ER_WRONG_VALUE_FOR_VAR 42000 chi "变量'%-.64s'无法设置为'%-.200T'的值" @@ -4931,7 +4940,7 @@ ER_WRONG_VALUE_FOR_VAR 42000 nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.200T'" por "Variável '%-.64s' não pode ser configurada para o valor de '%-.200T'" rus "Переменная '%-.64s' не может быть установлена в значение '%-.200T'" - spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.200T'" + spa "La variable '%-.64s' no puede ser configurada para el valor de '%-.200T'" swe "Variabel '%-.64s' kan inte sättas till '%-.200T'" ER_WRONG_TYPE_FOR_VAR 42000 chi "变量'%-.64s'的参数类型不正确" @@ -4942,7 +4951,7 @@ ER_WRONG_TYPE_FOR_VAR 42000 nla "Foutief argumenttype voor variabele '%-.64s'" por "Tipo errado de argumento para variável '%-.64s'" rus "Неверный тип аргумента для переменной '%-.64s'" - spa "Tipo de argumento equivocado para variable '%-.64s'" + spa "Tipo de argumento incorrecto para variable '%-.64s'" swe "Fel typ av argument till variabel '%-.64s'" ER_VAR_CANT_BE_READ chi "变量'%-.64s'只能设置,不能读" @@ -4953,7 +4962,7 @@ ER_VAR_CANT_BE_READ nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen" por "Variável '%-.64s' somente pode ser configurada, não lida" rus "Переменная '%-.64s' может быть только установлена, но не считана" - spa "Variable '%-.64s' solamente puede ser configurada, no leída" + spa "La variable '%-.64s' solamente puede ser configurada, no leída" swe "Variabeln '%-.64s' kan endast sättas, inte läsas" ER_CANT_USE_OPTION_HERE 42000 chi "'%s'的使用/放置不正确" @@ -4964,7 +4973,7 @@ ER_CANT_USE_OPTION_HERE 42000 nla "Foutieve toepassing/plaatsing van '%s'" por "Errado uso/colocação de '%s'" rus "Неверное использование или в неверном месте указан '%s'" - spa "Equivocado uso/colocación de '%s'" + spa "Incorrecto uso/colocación de '%s'" swe "Fel använding/placering av '%s'" ER_NOT_SUPPORTED_YET 42000 chi "此版本的MariaDB尚未支持'%s'" @@ -4975,7 +4984,7 @@ ER_NOT_SUPPORTED_YET 42000 nla "Deze versie van MariaDB ondersteunt nog geen '%s'" por "Esta versão de MariaDB não suporta ainda '%s'" rus "Эта версия MariaDB пока еще не поддерживает '%s'" - spa "Esta versión de MariaDB no soporta todavia '%s'" + spa "Esta versión de MariaDB no soporta todavía '%s'" swe "Denna version av MariaDB kan ännu inte utföra '%s'" ER_MASTER_FATAL_ERROR_READING_BINLOG chi "从二进制日志读取数据时,从master遇到致命错误%d:'%-.320s'" @@ -4986,7 +4995,7 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG nla "Kreeg fatale fout %d: '%-.320s' van master tijdens lezen van data uit binaire log" por "Obteve fatal erro %d: '%-.320s' do master quando lendo dados do binary log" rus "Получена неисправимая ошибка %d: '%-.320s' от головного сервера в процессе выборки данных из двоичного журнала" - spa "Recibió fatal error %d: '%-.320s' del master cuando leyendo datos del binary log" + spa "Obtenido error fatal %d del maestro (master) al leer datos del historial (log) binario: '%-.320s'" swe "Fick fatalt fel %d: '%-.320s' från master vid läsning av binärloggen" ER_SLAVE_IGNORED_TABLE chi "由于复制replicate-*-table规则,Slave SQL线程忽略了查询" @@ -4995,7 +5004,7 @@ ER_SLAVE_IGNORED_TABLE jpn "replicate-*-table ルールに従って、スレーブSQLスレッドはクエリを無視しました。" nla "Slave SQL thread negeerde de query vanwege replicate-*-table opties" por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela" - spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" + spa "El hilo (thread) SQL esclavo ha ignorado la consulta (query) debido a las reglas de replicar-*-tabla" swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel" ER_INCORRECT_GLOBAL_LOCAL_VAR chi "变量'%-.192s'是一个%s变量" @@ -5004,7 +5013,7 @@ ER_INCORRECT_GLOBAL_LOCAL_VAR jpn "変数 '%-.192s' は %s 変数です。" nla "Variabele '%-.192s' is geen %s variabele" serbian "Promenljiva '%-.192s' je %s promenljiva" - spa "Variable '%-.192s' es una %s variable" + spa "La variable '%-.192s' es una variable %s" swe "Variabel '%-.192s' är av typ %s" ER_WRONG_FK_DEF 42000 chi "'%-.192s'的外键定义不正确:%s" @@ -5013,7 +5022,7 @@ ER_WRONG_FK_DEF 42000 jpn "外部キー '%-.192s' の定義の不正: %s" nla "Incorrecte foreign key definitie voor '%-.192s': %s" por "Definição errada da chave estrangeira para '%-.192s': %s" - spa "Equivocada definición de llave extranjera para '%-.192s': %s" + spa "Definición de clave foránea incorrecta para '%-.192s': %s" swe "Felaktig FOREIGN KEY-definition för '%-.192s': %s" ER_KEY_REF_DO_NOT_MATCH_TABLE_REF chi "索引参考和表参考不匹配" @@ -5022,7 +5031,7 @@ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF jpn "外部キーの参照表と定義が一致しません。" nla "Sleutel- en tabelreferentie komen niet overeen" por "Referência da chave e referência da tabela não coincidem" - spa "Referencia de llave y referencia de tabla no coinciden" + spa "La referencia de clave y la referencia de tabla no coinciden" swe "Nyckelreferensen och tabellreferensen stämmer inte överens" ER_OPERAND_COLUMNS 21000 chi "操作数应包含%d列" @@ -5031,7 +5040,7 @@ ER_OPERAND_COLUMNS 21000 jpn "オペランドに %d 個の列が必要です。" nla "Operand behoort %d kolommen te bevatten" rus "Операнд должен содержать %d колонок" - spa "Operando debe tener %d columna(s)" + spa "El operando debería de contener %d columna(s)" ukr "Операнд має складатися з %d стовбців" ER_SUBQUERY_NO_1_ROW 21000 chi "子查询返回超过1行" @@ -5041,7 +5050,7 @@ ER_SUBQUERY_NO_1_ROW 21000 nla "Subquery retourneert meer dan 1 rij" por "Subconsulta retorna mais que 1 registro" rus "Подзапрос возвращает более одной записи" - spa "Subconsulta retorna mas que 1 línea" + spa "La subconsulta (subquery) devuelve más de 1 fila" swe "Subquery returnerade mer än 1 rad" ukr "Підзапит повертає більш нiж 1 запис" ER_UNKNOWN_STMT_HANDLER @@ -5052,7 +5061,7 @@ ER_UNKNOWN_STMT_HANDLER jpn "'%.*s' はプリペアードステートメントの不明なハンドルです。(%s で指定されました)" nla "Onebekende prepared statement handler (%.*s) voor %s aangegeven" por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s" - spa "Desconocido preparado comando handler (%.*s) dado para %s" + spa "Manejador desconocido de sentencia preparada (%.*s) dado para %s" swe "Okänd PREPARED STATEMENT id (%.*s) var given till %s" ukr "Unknown prepared statement handler (%.*s) given to %s" ER_CORRUPT_HELP_DB @@ -5062,7 +5071,7 @@ ER_CORRUPT_HELP_DB jpn "ヘルプデータベースは壊れているか存在しません。" nla "Help database is beschadigd of bestaat niet" por "Banco de dado de ajuda corrupto ou não existente" - spa "Base de datos Help está corrupto o no existe" + spa "O la Base de datos de Ayuda está corrupta o no existe" swe "Hjälpdatabasen finns inte eller är skadad" ER_CYCLIC_REFERENCE chi "亚查询的死环参考" @@ -5072,7 +5081,7 @@ ER_CYCLIC_REFERENCE nla "Cyclische verwijzing in subqueries" por "Referência cíclica em subconsultas" rus "Циклическая ссылка на подзапрос" - spa "Cíclica referencia en subconsultas" + spa "Referencia cíclica en subconsultas (subqueries)" swe "Cyklisk referens i subqueries" ukr "Циклічне посилання на підзапит" ER_AUTO_CONVERT @@ -5083,7 +5092,7 @@ ER_AUTO_CONVERT nla "Veld '%s' wordt van %s naar %s geconverteerd" por "Convertendo coluna '%s' de %s para %s" rus "Преобразование поля '%s' из %s в %s" - spa "Convirtiendo columna '%s' de %s para %s" + spa "Convirtiendo la columna '%s' de %s a %s" swe "Konvertar kolumn '%s' från %s till %s" ukr "Перетворення стовбца '%s' з %s у %s" ER_ILLEGAL_REFERENCE 42S22 @@ -5104,7 +5113,7 @@ ER_DERIVED_MUST_HAVE_ALIAS 42000 jpn "導出表には別名が必須です。" nla "Voor elke afgeleide tabel moet een unieke alias worden gebruikt" por "Cada tabela derivada deve ter seu próprio alias" - spa "Cada tabla derivada debe tener su propio alias" + spa "Cada tabla derivada debe de tener su propio alias" swe "Varje 'derived table' måste ha sitt eget alias" ER_SELECT_REDUCED 01000 chi "SELECT %u在优化期间被减" @@ -5114,7 +5123,7 @@ ER_SELECT_REDUCED 01000 nla "Select %u werd geredureerd tijdens optimtalisatie" por "Select %u foi reduzido durante otimização" rus "Select %u был упразднен в процессе оптимизации" - spa "Select %u fué reducido durante optimización" + spa "La selección %u fué reducida durante optimización" swe "Select %u reducerades vid optimiering" ukr "Select %u was скасовано при оптимiзацii" ER_TABLENAME_NOT_ALLOWED_HERE 42000 @@ -5124,7 +5133,7 @@ ER_TABLENAME_NOT_ALLOWED_HERE 42000 jpn "特定のSELECTのみで使用の表 '%-.192s' は %-.32s では使用できません。" nla "Tabel '%-.192s' uit een van de SELECTS kan niet in %-.32s gebruikt worden" por "Tabela '%-.192s' de um dos SELECTs não pode ser usada em %-.32s" - spa "Tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s" + spa "La tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s" swe "Tabell '%-.192s' från en SELECT kan inte användas i %-.32s" ER_NOT_SUPPORTED_AUTH_MODE 08004 chi "客户端不支持服务器请求的身份验证协议;考虑升级MariaDB客户端" @@ -5133,7 +5142,7 @@ ER_NOT_SUPPORTED_AUTH_MODE 08004 jpn "クライアントはサーバーが要求する認証プロトコルに対応できません。MariaDBクライアントのアップグレードを検討してください。" nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MariaDB client te gebruiken" por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MariaDB" - spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MariaDB" + spa "El cliente no soporta protocolo de autenticación requerido por el servidor; considere mejorar el cliente MariaDB" swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet" ER_SPATIAL_CANT_HAVE_NULL 42000 chi "SPATIAL索引的所有部分必须不为null" @@ -5142,7 +5151,7 @@ ER_SPATIAL_CANT_HAVE_NULL 42000 jpn "空間索引のキー列は NOT NULL でなければいけません。" nla "Alle delete van een SPATIAL index dienen als NOT NULL gedeclareerd te worden" por "Todas as partes de uma SPATIAL index devem ser NOT NULL" - spa "Todas las partes de una SPATIAL index deben ser NOT NULL" + spa "Todas las partes de un índice SPATIAL deben de ser NOT NULL" swe "Alla delar av en SPATIAL index måste vara NOT NULL" ER_COLLATION_CHARSET_MISMATCH 42000 chi "COLLATION'%s'无效地用于字符集'%s'" @@ -5151,7 +5160,7 @@ ER_COLLATION_CHARSET_MISMATCH 42000 jpn "COLLATION '%s' は CHARACTER SET '%s' に適用できません。" nla "COLLATION '%s' is niet geldig voor CHARACTER SET '%s'" por "COLLATION '%s' não é válida para CHARACTER SET '%s'" - spa "COLLATION '%s' no es válido para CHARACTER SET '%s'" + spa "El COTEJO (COLLATION) '%s' no es válido para CHARACTER SET '%s'" swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'" ER_SLAVE_WAS_RUNNING chi "Slave已经在运行" @@ -5160,7 +5169,7 @@ ER_SLAVE_WAS_RUNNING jpn "スレーブはすでに稼働中です。" nla "Slave is reeds actief" por "O slave já está rodando" - spa "Slave ya está funcionando" + spa "El esclavo ya está funcionando" swe "Slaven har redan startat" ER_SLAVE_WAS_NOT_RUNNING chi "slave已经停止了" @@ -5169,7 +5178,7 @@ ER_SLAVE_WAS_NOT_RUNNING jpn "スレーブはすでに停止しています。" nla "Slave is reeds gestopt" por "O slave já está parado" - spa "Slave ya fué parado" + spa "El esclavo ya fué parado" swe "Slaven har redan stoppat" ER_TOO_BIG_FOR_UNCOMPRESS chi "未压缩的数据量太大;最大量为%d(可能未压缩数据的长度已损坏)" @@ -5178,7 +5187,7 @@ ER_TOO_BIG_FOR_UNCOMPRESS jpn "展開後のデータが大きすぎます。最大サイズは %d です。(展開後データの長さ情報が壊れている可能性もあります。)" nla "Ongecomprimeerder data is te groot; de maximum lengte is %d (waarschijnlijk, de lengte van de gecomprimeerde data was beschadigd)" por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)" - spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)" + spa "Tamaño demasiado grande para datos descomprimidos; el máximo tamaño es %d. (probablemente, el tamaño de datos descomprimidos fué corrompido)" ER_ZLIB_Z_MEM_ERROR chi "ZLIB:内存不足" eng "ZLIB: Not enough memory" @@ -5186,7 +5195,7 @@ ER_ZLIB_Z_MEM_ERROR jpn "ZLIB: メモリ不足です。" nla "ZLIB: Onvoldoende geheugen" por "ZLIB: Não suficiente memória disponível" - spa "Z_MEM_ERROR: No suficiente memoria para zlib" + spa "ZLIB: No hay suficiente memoria" ER_ZLIB_Z_BUF_ERROR chi "ZLIB:输出缓冲区中没有足够的空间(可能未压缩数据的长度已损坏)" eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" @@ -5194,7 +5203,7 @@ ER_ZLIB_Z_BUF_ERROR jpn "ZLIB: 出力バッファに十分な空きがありません。(展開後データの長さ情報が壊れている可能性もあります。)" nla "ZLIB: Onvoldoende ruimte in uitgaande buffer (waarschijnlijk, de lengte van de ongecomprimeerde data was beschadigd)" por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)" - spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)" + spa "ZLIB: No hay suficiente espacio en el búfer de salida (probablemente, el tamaño de datos descomprimidos fué corrompido)" ER_ZLIB_Z_DATA_ERROR chi "ZLIB:输入数据已损坏" eng "ZLIB: Input data corrupted" @@ -5202,10 +5211,11 @@ ER_ZLIB_Z_DATA_ERROR jpn "ZLIB: 入力データが壊れています。" nla "ZLIB: Invoer data beschadigd" por "ZLIB: Dados de entrada está corrupto" - spa "ZLIB: Dato de entrada fué corrompido para zlib" + spa "ZLIB: Dato de entrada corrupto" ER_CUT_VALUE_GROUP_CONCAT chi "group_concat()削减了行%u. %s" eng "Row %u was cut by %s)" + spa "La fila %u ha sido cortada por %s)" ER_WARN_TOO_FEW_RECORDS 01000 chi "行%lu不包含所有列的数据" eng "Row %lu doesn't contain data for all columns" @@ -5213,7 +5223,7 @@ ER_WARN_TOO_FEW_RECORDS 01000 jpn "行 %lu はすべての列へのデータを含んでいません。" nla "Rij %lu bevat niet de data voor alle kolommen" por "Conta de registro é menor que a conta de coluna na linha %lu" - spa "Línea %lu no contiene datos para todas las columnas" + spa "La fila %lu no contiene datos para todas las columnas" ER_WARN_TOO_MANY_RECORDS 01000 chi "行%lu被截断;它包含的数据比输入列更多" eng "Row %lu was truncated; it contained more data than there were input columns" @@ -5221,24 +5231,25 @@ ER_WARN_TOO_MANY_RECORDS 01000 jpn "行 %lu はデータを切り捨てられました。列よりも多いデータを含んでいました。" nla "Regel %lu ingekort, bevatte meer data dan invoer kolommen" por "Conta de registro é maior que a conta de coluna na linha %lu" - spa "Línea %lu fué truncada; La misma contine mas datos que las que existen en las columnas de entrada" + spa "La fila %lu fué truncada; contenía más datos que columnas de entrada" ER_WARN_NULL_TO_NOTNULL 22004 chi "列设置为默认值; NULL在行'%s'中提供给了NOT NULL列%lu" eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %lu" ger "Feld auf Vorgabewert gesetzt, da NULL für NOT-NULL-Feld '%s' in Zeile %lu angegeben" jpn "列にデフォルト値が設定されました。NOT NULLの列 '%s' に 行 %lu で NULL が与えられました。" por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %lu" - spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %lu" + spa "Columna puesta a valor por defecto; NULL suministrado para columna NOT NULL '%s' en la fila %lu" ER_WARN_DATA_OUT_OF_RANGE 22003 - chi "列'%s'行%lu的值超出范围" + chi "列'%s'行%lu的值超出范围" eng "Out of range value for column '%s' at row %lu" + spa "Valor fuera de rango para la columna '%s' en la fila %lu" WARN_DATA_TRUNCATED 01000 chi "数据被截断,在列'%s', 行%lu" eng "Data truncated for column '%s' at row %lu" ger "Daten abgeschnitten für Feld '%s' in Zeile %lu" jpn "列 '%s' の 行 %lu でデータが切り捨てられました。" por "Dado truncado para coluna '%s' na linha %lu" - spa "Datos truncados para columna '%s' en la línea %lu" + spa "Datos truncados para la columna '%s' en la fila %lu" ER_WARN_USING_OTHER_HANDLER chi "使用存储引擎%s 表格'%s'" eng "Using storage engine %s for table '%s'" @@ -5246,7 +5257,7 @@ ER_WARN_USING_OTHER_HANDLER hindi "स्टोरेज इंजन %s का इस्तेमाल टेबल '%s' के लिए किया जा रहा है" jpn "ストレージエンジン %s が表 '%s' に利用されています。" por "Usando engine de armazenamento %s para tabela '%s'" - spa "Usando motor de almacenamiento %s para tabla '%s'" + spa "Usando motor de almacenaje %s para la tabla '%s'" swe "Använder handler %s för tabell '%s'" ER_CANT_AGGREGATE_2COLLATIONS chi "非法混合collations(%s,%s)和(%s,%s),用于操作'%s'" @@ -5254,53 +5265,54 @@ ER_CANT_AGGREGATE_2COLLATIONS ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'" jpn "照合順序 (%s,%s) と (%s,%s) の混在は操作 '%s' では不正です。" por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'" - spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'" + spa "Mezcla ilegal de cotejos (collations) (%s,%s) y (%s,%s) para la operación '%s'" ER_DROP_USER chi "无法删除一个或多个请求的用户" eng "Cannot drop one or more of the requested users" ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen" + spa "No puedo eliminar uno o más de los usuarios solicitados" ER_REVOKE_GRANTS chi "无法为一个或多个请求的用户撤消所有权限" eng "Can't revoke all privileges for one or more of the requested users" ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden" jpn "指定されたユーザーから指定された全ての権限を剥奪することができませんでした。" por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos" - spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" + spa "No puedo revocar todos los privilegios para uno o más de los usuarios solicitados" ER_CANT_AGGREGATE_3COLLATIONS chi "非法混合collations(%s,%s),(%s,%s)和(%s,%s),用于操作'%s'" eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'" jpn "照合順序 (%s,%s), (%s,%s), (%s,%s) の混在は操作 '%s' では不正です。" por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'" - spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'" + spa "Mezcla ilegal de cotejos (collations) (%s,%s), (%s,%s), (%s,%s) para la operación '%s'" ER_CANT_AGGREGATE_NCOLLATIONS chi "非法混合collations操作'%s'" eng "Illegal mix of collations for operation '%s'" ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'" jpn "操作 '%s' では不正な照合順序の混在です。" por "Ilegal combinação de collations para operação '%s'" - spa "Ilegal mezcla de collations para operación '%s'" + spa "Mezcla ilegal de cotejos (collations) para la operación '%s'" ER_VARIABLE_IS_NOT_STRUCT chi "变量'%-.64s'不是可变组件(不能用作xxxx.variable_name)" eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)" jpn "変数 '%-.64s' は構造変数の構成要素ではありません。(XXXX.変数名 という指定はできません。)" por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)" - spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" + spa "La variable '%-.64s' no es un componente variable (No puede ser usada como XXXX.variable_name)" ER_UNKNOWN_COLLATION chi "未知的collation:'%-.64s'" eng "Unknown collation: '%-.64s'" ger "Unbekannte Sortierreihenfolge: '%-.64s'" jpn "不明な照合順序: '%-.64s'" por "Collation desconhecida: '%-.64s'" - spa "Collation desconocida: '%-.64s'" + spa "Cotejo (Collation) desconocido: '%-.64s'" ER_SLAVE_IGNORED_SSL_PARAMS chi "CHANGE MASTER中的SSL参数被忽略,因为此MariaDB从站未在没有SSL支持的情况下编译;如果启动了SSL的MariaDB从站,则可以使用它们" eng "SSL parameters in CHANGE MASTER are ignored because this MariaDB slave was compiled without SSL support; they can be used later if MariaDB slave with SSL is started" ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MariaDB-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MariaDB-Slave mit SSL gestartet wird" jpn "このMariaDBスレーブはSSLサポートを含めてコンパイルされていないので、CHANGE MASTER のSSLパラメータは無視されました。今後SSLサポートを持つMariaDBスレーブを起動する際に利用されます。" por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MariaDB foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MariaDB com SSL seja iniciado." - spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MariaDB fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MariaDB con SSL sea inicializado" + spa "Los parámetros SSL en CHANGE MASTER son ignorados porque este esclavo MariaDB fue compilado sin soporte SSL; pueden ser usados después cuando el esclavo MariaDB con SSL sea arrancado" ER_SERVER_IS_IN_SECURE_AUTH_MODE chi "服务器在--secure-auth模式下运行,但'%s'@'%s'具有旧格式的密码;请将密码更改为新格式" eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" @@ -5308,7 +5320,7 @@ ER_SERVER_IS_IN_SECURE_AUTH_MODE jpn "サーバーは --secure-auth モードで稼働しています。しかし '%s'@'%s' は古い形式のパスワードを使用しています。新しい形式のパスワードに変更してください。" por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" rus "Сервер запущен в режиме --secure-auth (безопасной авторизации), но для пользователя '%s'@'%s' пароль сохранён в старом формате; необходимо обновить формат пароля" - spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" + spa "El servidor se está ejecutando en modo --secure-auth, pero '%s'@'%s' tiene una contraseña con formato antiguo; por favor cambie la contraseña al nuevo formato" ER_WARN_FIELD_RESOLVED chi "列或参考'%-.192s%s%-.192s%s%-.192s' 在SELECT #%d 中, 在SELECT #%d中得到解决" eng "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d" @@ -5316,7 +5328,7 @@ ER_WARN_FIELD_RESOLVED jpn "フィールドまたは参照 '%-.192s%s%-.192s%s%-.192s' は SELECT #%d ではなく、SELECT #%d で解決されました。" por "Campo ou referência '%-.192s%s%-.192s%s%-.192s' de SELECT #%d foi resolvido em SELECT #%d" rus "Поле или ссылка '%-.192s%s%-.192s%s%-.192s' из SELECTа #%d была найдена в SELECTе #%d" - spa "Campo o referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d fue resolvido en SELECT #%d" + spa "El campo o la referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d se resolvió en SELECT #%d" ukr "Стовбець або посилання '%-.192s%s%-.192s%s%-.192s' із SELECTу #%d було знайдене у SELECTі #%d" ER_BAD_SLAVE_UNTIL_COND chi "START SLAVE UNTIL的参数或参数的组合不正确" @@ -5324,21 +5336,21 @@ ER_BAD_SLAVE_UNTIL_COND ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL" jpn "START SLAVE UNTIL へのパラメータまたはその組み合わせが不正です。" por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL" - spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL" + spa "Parámetro incorrecto o combinación de parámetros para START SLAVE UNTIL" ER_MISSING_SKIP_SLAVE chi "START SLAVE UNTIL进行逐步复制时建议使用--skip-slave-start;否则,如果有意外的Slave的mariadbd重启,可能有问题" eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mariadbd restart" ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet" jpn "START SLAVE UNTIL で段階的にレプリケーションを行う際には、--skip-slave-start オプションを使うことを推奨します。使わない場合、スレーブのmariadbdが不慮の再起動をすると問題が発生します。" por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mariadbd escravo" - spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mariadbd slave" + spa "Se recomienda usar --skip-slave-start al hacer réplica paso a paso con START SLAVE UNTIL; en caso contrario, obtendrá problemas si tiene lugar un rearranque inesperado del esclavo mariadb" ER_UNTIL_COND_IGNORED chi "不能启动SQL线程所以UNTIL选项被忽略" eng "SQL thread is not to be started so UNTIL options are ignored" ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" jpn "スレーブSQLスレッドが開始されないため、UNTILオプションは無視されました。" por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas" - spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" + spa "Un hilo (thread) SQL no ha de ser arrancado de esa manera HASTA que las opciones sean ignordas" ER_WRONG_NAME_FOR_INDEX 42000 chi "索引名称'%-.100s'不正确" eng "Incorrect index name '%-.100s'" @@ -5353,7 +5365,7 @@ ER_WRONG_NAME_FOR_CATALOG 42000 ger "Falscher Katalogname '%-.100s'" jpn "カタログ名 '%-.100s' は不正です。" por "Incorreto nome de catálogo '%-.100s'" - spa "Nombre de catalog incorrecto '%-.100s'" + spa "Nombre de catálogo incorrecto '%-.100s'" swe "Felaktigt katalog namn '%-.100s'" ER_WARN_QC_RESIZE chi "设置查询缓存值%llu失败;新查询缓存值为%lu" @@ -5361,7 +5373,7 @@ ER_WARN_QC_RESIZE ger "Änderung der Query-Cache-Größe auf %llu fehlgeschlagen; neue Query-Cache-Größe ist %lu" por "Falha em Query cache para configurar tamanho %llu, novo tamanho de query cache é %lu" rus "Кеш запросов не может установить размер %llu, новый размер кеша зпросов - %lu" - spa "Query cache fallada para configurar tamaño %llu, nuevo tamaño de query cache es %lu" + spa "La caché de consulta (query) ha fallado al poner el tamaño %llu; el nuevo tamaño de caché de consulta (query) es %lu" swe "Storleken av "Query cache" kunde inte sättas till %llu, ny storlek är %lu" ukr "Кеш запитів неспроможен встановити розмір %llu, новий розмір кеша запитів - %lu" ER_BAD_FT_COLUMN @@ -5370,7 +5382,7 @@ ER_BAD_FT_COLUMN ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein" jpn "列 '%-.192s' は全文索引のキーにはできません。" por "Coluna '%-.192s' não pode ser parte de índice FULLTEXT" - spa "Columna '%-.192s' no puede ser parte de FULLTEXT index" + spa "La columna '%-.192s' no puede format parte de índice FULLTEXT" swe "Kolumn '%-.192s' kan inte vara del av ett FULLTEXT index" ER_UNKNOWN_KEY_CACHE chi "未知索引缓存'%-.100s'" @@ -5378,7 +5390,7 @@ ER_UNKNOWN_KEY_CACHE ger "Unbekannter Schlüssel-Cache '%-.100s'" jpn "'%-.100s' は不明なキーキャッシュです。" por "Key cache desconhecida '%-.100s'" - spa "Desconocida key cache '%-.100s'" + spa "Caché de clave desconocida '%-.100s'" swe "Okänd nyckel cache '%-.100s'" ER_WARN_HOSTNAME_WONT_WORK chi "MariaDB以-skip-name-resolve模式启动;想用grant,您必须重新启动,不用这个选项" @@ -5386,7 +5398,7 @@ ER_WARN_HOSTNAME_WONT_WORK ger "MariaDB wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist" jpn "MariaDBは --skip-name-resolve モードで起動しています。このオプションを外して再起動しなければ、この権限操作は機能しません。" por "MariaDB foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar" - spa "MariaDB esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar" + spa "MariaDB ha sido arrancada en modo --skip-name-resolve; vd necesita reinicializarla sin esta opción para que esta concesión funcione" ER_UNKNOWN_STORAGE_ENGINE 42000 chi "未知的存储引擎'%s'" eng "Unknown storage engine '%s'" @@ -5394,14 +5406,14 @@ ER_UNKNOWN_STORAGE_ENGINE 42000 hindi "अज्ञात स्टोरेज इंजन '%s'" jpn "'%s' は不明なストレージエンジンです。" por "Motor de tabela desconhecido '%s'" - spa "Desconocido motor de tabla '%s'" + spa "Motor de almacenaje '%s' desconocido" ER_WARN_DEPRECATED_SYNTAX chi "弃用'%s',将在将来的版本中删除。请使用%s" eng "'%s' is deprecated and will be removed in a future release. Please use %s instead" ger "'%s' ist veraltet. Bitte benutzen Sie '%s'" jpn "'%s' は将来のリリースで廃止予定です。代わりに %s を使用してください。" por "'%s' é desatualizado. Use '%s' em seu lugar" - spa "'%s' está desaprobado, use '%s' en su lugar" + spa "'%s' está obsoleto y será quitado en una entrega futura, use '%s' en su lugar" ER_NON_UPDATABLE_TABLE chi "目标表%-.100s多个%s不可更新" eng "The target table %-.100s of the %s is not updatable" @@ -5418,7 +5430,7 @@ ER_FEATURE_DISABLED ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MariaDB mit '%s' übersetzen, damit es verfügbar ist" jpn "機能 '%s' は無効です。利用するためには '%s' を含めてビルドしたMariaDBが必要です。" por "O recurso '%s' foi desativado; você necessita MariaDB construído com '%s' para ter isto funcionando" - spa "El recurso '%s' fue deshabilitado; usted necesita construir MariaDB con '%s' para tener eso funcionando" + spa "La característica '%s' fue deshabilitada; usted necesita construir MariaDB con '%s' para tener eso funcionando" swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MariaDB med '%s' definierad" ER_OPTION_PREVENTS_STATEMENT chi "MariaDB服务器使用%s选项运行,因此无法执行此语句" @@ -5426,7 +5438,7 @@ ER_OPTION_PREVENTS_STATEMENT ger "Der MariaDB-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen" jpn "MariaDBサーバーが %s オプションで実行されているので、このステートメントは実行できません。" por "O servidor MariaDB está rodando com a opção %s razão pela qual não pode executar esse commando" - spa "El servidor MariaDB está rodando con la opción %s tal que no puede ejecutar este comando" + spa "El servidor MariaDB se está ejecutando con la opción %s por lo que no se puede ejecutar esta sentencia" swe "MariaDB är startad med %s. Pga av detta kan du inte använda detta kommando" ER_DUPLICATED_VALUE_IN_TYPE chi "列'%-.100s'有重复的值'%-.64s'在%s" @@ -5434,32 +5446,33 @@ ER_DUPLICATED_VALUE_IN_TYPE ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s" jpn "列 '%-.100s' で、重複する値 '%-.64s' が %s に指定されています。" por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" - spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" + spa "La columna '%-.100s' tiene valor duplicado '%-.64s' en %s" ER_TRUNCATED_WRONG_VALUE 22007 chi "截断的不正确%-.32T值:'%-.128T'" eng "Truncated incorrect %-.32T value: '%-.128T'" ger "Falscher %-.32T-Wert gekürzt: '%-.128T'" jpn "不正な %-.32T の値が切り捨てられました。: '%-.128T'" por "Truncado errado %-.32T valor: '%-.128T'" - spa "Equivocado truncado %-.32T valor: '%-.128T'" + spa "Truncado incorrecto %-.32T valor: '%-.128T'" ER_TOO_MUCH_AUTO_TIMESTAMP_COLS chi "表定义不正确;默认或ON UPDATE中只能有一个带有CURRENT_TIMESTAMP的TIMESTAMP列" eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben" jpn "不正な表定義です。DEFAULT句またはON UPDATE句に CURRENT_TIMESTAMP をともなうTIMESTAMP型の列は1つまでです。" por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" - spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" + spa "Definición incorrecta de tabla; solamente puede haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o en cláusula ON UPDATE" ER_INVALID_ON_UPDATE chi "在'%-.192s'列的ON UPDATE子句上无效" eng "Invalid ON UPDATE clause for '%-.192s' column" ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.192s'" jpn "列 '%-.192s' に ON UPDATE句は無効です。" por "Inválida cláusula ON UPDATE para campo '%-.192s'" - spa "Inválido ON UPDATE cláusula para campo '%-.192s'" + spa "Cláusula ON UPDATE inválida para la columna '%-.192s'" ER_UNSUPPORTED_PS chi "尚未在prepared statement协议中支持此命令" eng "This command is not supported in the prepared statement protocol yet" ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt" + spa "Este comando no se encuentra soportado para protocolo de sentencia preparada, aún" ER_GET_ERRMSG chi "出错%d '%-.200s'来自%s" dan "Modtog fejl %d '%-.200s' fra %s" @@ -5468,6 +5481,7 @@ ER_GET_ERRMSG jpn "エラー %d '%-.200s' が %s から返されました。" nor "Mottok feil %d '%-.200s' fa %s" norwegian-ny "Mottok feil %d '%-.200s' fra %s" + spa "Obtenido error %d '%-.200s' desde %s" ER_GET_TEMPORARY_ERRMSG chi "出临时错误%d '%-.200s'来自%s" dan "Modtog temporary fejl %d '%-.200s' fra %s" @@ -5476,244 +5490,297 @@ ER_GET_TEMPORARY_ERRMSG jpn "一時エラー %d '%-.200s' が %s から返されました。" nor "Mottok temporary feil %d '%-.200s' fra %s" norwegian-ny "Mottok temporary feil %d '%-.200s' fra %s" + spa "Obtenido error temporal %d '%-.200s' desde %s" ER_UNKNOWN_TIME_ZONE chi "未知或不正确的时区:'%-.64s'" eng "Unknown or incorrect time zone: '%-.64s'" ger "Unbekannte oder falsche Zeitzone: '%-.64s'" + spa "Zona temporal desconocida o incorrecta: '%-.64s'" ER_WARN_INVALID_TIMESTAMP chi "无效TIMESTAMP值:列'%s' 行'%lu'" eng "Invalid TIMESTAMP value in column '%s' at row %lu" ger "Ungültiger TIMESTAMP-Wert in Feld '%s', Zeile %lu" + spa "Valor inválido de SELLO TEMPORAL (TIMESTAMP) en la columna '%s' de la fila %lu" ER_INVALID_CHARACTER_STRING chi "无效的%s字符串:'%.64T'" eng "Invalid %s character string: '%.64T'" ger "Ungültiger %s-Zeichen-String: '%.64T'" + spa "Cadena de carácter %s inválida: '%.64T'" ER_WARN_ALLOWED_PACKET_OVERFLOWED chi "%s()的结果大于max_allowed_packet(%ld) - 截断" eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated" ger "Ergebnis von %s() war größer als max_allowed_packet (%ld) Bytes und wurde deshalb gekürzt" + spa "El resultado de %s() ha sido mayor que max_allowed_packet (%ld) - truncado" ER_CONFLICTING_DECLARATIONS chi "矛盾语句:'%s%s'和'%s%s'" eng "Conflicting declarations: '%s%s' and '%s%s'" ger "Widersprüchliche Deklarationen: '%s%s' und '%s%s'" + spa "Declaraciones conflictivas: '%s%s' y '%s%s'" ER_SP_NO_RECURSIVE_CREATE 2F003 chi "无法从另一个存储过程中创建%s" eng "Can't create a %s from within another stored routine" ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen" + spa "No puedo crear una %s desde dentro de otra rutina almacenada" ER_SP_ALREADY_EXISTS 42000 chi "%s%s已经存在" eng "%s %s already exists" ger "%s %s existiert bereits" hindi "%s %s पहले से ही मौजूद है" + spa "%s %s ya existe" ER_SP_DOES_NOT_EXIST 42000 chi "%s%s不存在" eng "%s %s does not exist" ger "%s %s existiert nicht" hindi "%s %s मौजूद नहीं है" + spa "%s %s no existe" ER_SP_DROP_FAILED chi "未能DROP%s%s" eng "Failed to DROP %s %s" ger "DROP %s %s ist fehlgeschlagen" hindi "%s %s को ड्रॉप करने में असफल रहे" + spa "No pude ELIMINAR (DROP) %s %s" ER_SP_STORE_FAILED chi "无法创建%s%s" eng "Failed to CREATE %s %s" ger "CREATE %s %s ist fehlgeschlagen" hindi "%s %s को बनाने में असफल रहे" + spa "No pude CREAR %s %s" ER_SP_LILABEL_MISMATCH 42000 chi "%s,没有匹配标签:%s" eng "%s with no matching label: %s" ger "%s ohne passende Marke: %s" + spa "%s sin etiqueta coincidente: %s" ER_SP_LABEL_REDEFINE 42000 chi "重新定义标签%s" eng "Redefining label %s" ger "Neudefinition der Marke %s" + spa "Redefiniendo etiqueta %s" ER_SP_LABEL_MISMATCH 42000 chi "没有匹配的最终标签%s" eng "End-label %s without match" ger "Ende-Marke %s ohne zugehörigen Anfang" + spa "Etiqueta-Final %s sin coincidencia" ER_SP_UNINIT_VAR 01000 chi "参考未初始化的变量%s" eng "Referring to uninitialized variable %s" ger "Zugriff auf nichtinitialisierte Variable %s" + spa "Refiriéndose a variable %s sin inicializar" ER_SP_BADSELECT 0A000 chi "PROCEDURE%s不能返回给定上下文中的结果集" eng "PROCEDURE %s can't return a result set in the given context" ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurückgeben" + spa "El PROCEDIMIENTO (PROCEDURE) %s no puede devolver un conjunto de resultados en el contexto dado" ER_SP_BADRETURN 42000 chi "RETURN仅允许在函数中" eng "RETURN is only allowed in a FUNCTION" ger "RETURN ist nur innerhalb einer FUNCTION erlaubt" hindi "RETURN को केवल FUNCTION में इस्तेमाल किया जा सकता है" + spa "RETURN sólo se permite dentro de una FUNCIÓN" ER_SP_BADSTATEMENT 0A000 chi "%s不允许在存储过程中" eng "%s is not allowed in stored procedures" ger "%s ist in gespeicherten Prozeduren nicht erlaubt" hindi "%s को STORED PROCEDURE में इस्तेमाल नहीं किया जा सकता है" + spa "%s no permitido en procedimientos almacenados" ER_UPDATE_LOG_DEPRECATED_IGNORED 42000 chi "更新日志被弃用并由二进制日志替换;SET SQL_LOG_UPDATE已被忽略。此选项将在MariaDB 5.6中删除" eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB 5.6" ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert. Diese Option wird in MariaDB 5.6 entfernt" + spa "El historial (log) de actualización se encuentra obsoleto y reemplazado por el historial (log) binario; SET SQL_LOG_UPDATE ha sido ignorado. Esta opción será quitada en MariaDB 5.6" ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000 chi "更新日志被弃用并由二进制日志替换;SET SQL_LOG_UPDATE已被转换为设置SQL_LOG_BIN。此选项将在MariaDB 5.6中删除" eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN. This option will be removed in MariaDB 5.6" ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt. Diese Option wird in MariaDB 5.6 entfernt" + spa "El historial (log) de actualización se encuentra obsoleto y reemplazado por el historial binario; SET SQL_LOG_UPDATE ha sido traducido a SET SQL_LOG_BIN. Esta opción será quitada en MariaDB 5.6" ER_QUERY_INTERRUPTED 70100 chi "查询执行中断" eng "Query execution was interrupted" ger "Ausführung der Abfrage wurde unterbrochen" + spa "Se ha interrumpido la ejecución de la consulta (query)" ER_SP_WRONG_NO_OF_ARGS 42000 chi "%s%s的参数数量不正确;预期%u,得到%u" eng "Incorrect number of arguments for %s %s; expected %u, got %u" ger "Falsche Anzahl von Argumenten für %s %s; erwarte %u, erhalte %u" + spa "Número incorrecto de argumentos para %s %s; se esperaba %u, se obtuvo %u" ER_SP_COND_MISMATCH 42000 chi "未定义的CONDITION:%s" eng "Undefined CONDITION: %s" ger "Undefinierte CONDITION: %s" + spa "CONDICIÓN no definida: %s" ER_SP_NORETURN 42000 chi "FUNCTION%s中没有RETURN" eng "No RETURN found in FUNCTION %s" ger "Kein RETURN in FUNCTION %s gefunden" hindi "FUNCTION %s में कोई RETURN है" + spa "No se hallado RETURN en FUNCIÓN %s" ER_SP_NORETURNEND 2F005 chi "FUNCTION%s结束但无RETURN" eng "FUNCTION %s ended without RETURN" ger "FUNCTION %s endete ohne RETURN" hindi "FUNCTION %s RETURN के बिना समाप्त हो गया" + spa "La FUNCIÓN %s termina sin RETURN" ER_SP_BAD_CURSOR_QUERY 42000 chi "Cursor语句必须是选择" eng "Cursor statement must be a SELECT" ger "Cursor-Anweisung muss ein SELECT sein" + spa "La sentencia de cursor debe de ser un SELECT" ER_SP_BAD_CURSOR_SELECT 42000 chi "Cursor SELECT不能有INTO" eng "Cursor SELECT must not have INTO" ger "Cursor-SELECT darf kein INTO haben" + spa "El SELECT de cursor no debe de tener INTO" ER_SP_CURSOR_MISMATCH 42000 chi "未定义的CURSOR:%s" eng "Undefined CURSOR: %s" ger "Undefinierter CURSOR: %s" hindi "CURSOR %s अपरिभाषित है" + spa "CURSOR indefinido: %s" ER_SP_CURSOR_ALREADY_OPEN 24000 chi "Cursor已经打开" eng "Cursor is already open" ger "Cursor ist schon geöffnet" hindi "CURSOR पहले से ही खुला है" + spa "Cursor ya abierto" ER_SP_CURSOR_NOT_OPEN 24000 chi "Cursor未打开" eng "Cursor is not open" ger "Cursor ist nicht geöffnet" + spa "Cursor no abierto" ER_SP_UNDECLARED_VAR 42000 chi "未定义的变量:%s" eng "Undeclared variable: %s" ger "Nicht deklarierte Variable: %s" + spa "Variable sin declarar: %s" ER_SP_WRONG_NO_OF_FETCH_ARGS chi "FETCH变量数不正确" eng "Incorrect number of FETCH variables" ger "Falsche Anzahl von FETCH-Variablen" + spa "Incorrecto número de variables FETCH" ER_SP_FETCH_NO_DATA 02000 chi "没有数据 - 零行被选择或处理" eng "No data - zero rows fetched, selected, or processed" ger "Keine Daten - null Zeilen geholt (fetch), ausgewählt oder verarbeitet" + spa "No hay datos - cero filas logradas, seleccionadas o procesadas" ER_SP_DUP_PARAM 42000 chi "重复参数:%s" eng "Duplicate parameter: %s" ger "Doppelter Parameter: %s" + spa "Parámetro duplicado: %s" ER_SP_DUP_VAR 42000 chi "重复变量:%s" eng "Duplicate variable: %s" ger "Doppelte Variable: %s" + spa "Variable duplicada: %s" ER_SP_DUP_COND 42000 chi "重复条件:%s" eng "Duplicate condition: %s" ger "Doppelte Bedingung: %s" + spa "Condición duplicada: %s" ER_SP_DUP_CURS 42000 chi "重复Cursor:%s" eng "Duplicate cursor: %s" ger "Doppelter Cursor: %s" + spa "Cursor duplicado: %s" ER_SP_CANT_ALTER chi "未能ALTER %s%s" eng "Failed to ALTER %s %s" ger "ALTER %s %s fehlgeschlagen" hindi "%s %s को ALTER करने में असफल रहे" + spa "Fallo en ALTER %s %s" ER_SP_SUBSELECT_NYI 0A000 chi "不支持子查询值" eng "Subquery value not supported" ger "Subquery-Wert wird nicht unterstützt" + spa "Valor de Subconsulta (subquery) no soportado" ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000 chi "在存储的函数或触发中不允许%s" eng "%s is not allowed in stored function or trigger" ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt" + spa "%s no permitido en función almacenada o en disparador" ER_SP_VARCOND_AFTER_CURSHNDLR 42000 chi "变量或条件声明在cursor或处理程序定义之后" eng "Variable or condition declaration after cursor or handler declaration" ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers" + spa "Declaración de variable o condición tras declaración de cursor o manejador" ER_SP_CURSOR_AFTER_HANDLER 42000 chi "处理程序声明后的cursor声明" eng "Cursor declaration after handler declaration" ger "Deklaration eines Cursors nach der Deklaration eines Handlers" + spa "Declaración de cursor tras declaración de manejador" ER_SP_CASE_NOT_FOUND 20000 chi "未能在CASE语句找到Case" eng "Case not found for CASE statement" ger "Fall für CASE-Anweisung nicht gefunden" + spa "Caso no hallado para sentencia CASE" ER_FPARSER_TOO_BIG_FILE chi "配置文件'%-.192s'太大了" eng "Configuration file '%-.192s' is too big" ger "Konfigurationsdatei '%-.192s' ist zu groß" rus "Слишком большой конфигурационный файл '%-.192s'" + spa "El fichero/archivo de configuración '%-.192s' es demasiado grande" ukr "Занадто великий конфігураційний файл '%-.192s'" ER_FPARSER_BAD_HEADER chi "文件'%-.192s'中的文件类型格式有问题" eng "Malformed file type header in file '%-.192s'" ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.192s'" rus "Неверный заголовок типа файла '%-.192s'" + spa "Cabecera de tipo de fichero/archivo malformada en fichero/archivo '%-.192s'" ukr "Невірний заголовок типу у файлі '%-.192s'" ER_FPARSER_EOF_IN_COMMENT chi "解析评论'%-.200s'时意外碰到EOF" eng "Unexpected end of file while parsing comment '%-.200s'" ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.200s'" rus "Неожиданный конец файла в коментарии '%-.200s'" + spa "Inesperado fin de fichero/archivo mientras se analizaba comentario '%-.200s'" ukr "Несподіванний кінець файлу у коментарі '%-.200s'" ER_FPARSER_ERROR_IN_PARAMETER chi "解析参数'%-.192s'时出错(行:'%-.192s')" eng "Error while parsing parameter '%-.192s' (line: '%-.192s')" ger "Fehler beim Parsen des Parameters '%-.192s' (Zeile: '%-.192s')" rus "Ошибка при распознавании параметра '%-.192s' (строка: '%-.192s')" + spa "Error mientras se analizaba parámetro '%-.192s' (línea: '%-.192s')" ukr "Помилка в роспізнаванні параметру '%-.192s' (рядок: '%-.192s')" ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER chi "跳过未知参数'%-.192s'时意外碰到EOF" eng "Unexpected end of file while skipping unknown parameter '%-.192s'" ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.192s'" rus "Неожиданный конец файла при пропуске неизвестного параметра '%-.192s'" + spa "Inesperado fin de fichero/archivo mientras se saltaba parámetro desconocido '%-.192s'" ukr "Несподіванний кінець файлу у спробі проминути невідомий параметр '%-.192s'" ER_VIEW_NO_EXPLAIN chi "ANALYZE/EXPLAIN/SHOW无法进行;缺乏底层表的特权" eng "ANALYZE/EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" ger "ANALYZE/EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen" rus "ANALYZE/EXPLAIN/SHOW не может быть выполнено; недостаточно прав на таблицы запроса" + spa "ANALYZE/EXPLAIN/SHOW no puede ser emitdo; privilegios insuficientes para tabla subyacente" ukr "ANALYZE/EXPLAIN/SHOW не може бути виконано; немає прав на таблиці запиту" ER_FRM_UNKNOWN_TYPE chi "文件'%-.192s'在其标题中有未知的'%-.64s'" eng "File '%-.192s' has unknown type '%-.64s' in its header" ger "Datei '%-.192s' hat unbekannten Typ '%-.64s' im Header" rus "Файл '%-.192s' содержит неизвестный тип '%-.64s' в заголовке" + spa "El fichero/archivo '%-.192s' es de un tipo desconocido '%-.64s' en su cabecera" ukr "Файл '%-.192s' має невідомий тип '%-.64s' у заголовку" ER_WRONG_OBJECT chi "'%-.192s.%-.192s'不是'%s'类" eng "'%-.192s.%-.192s' is not of type '%s'" ger "'%-.192s.%-.192s' ist nicht %s" rus "'%-.192s.%-.192s' - не %s" + spa "'%-.192s.%-.192s' no es del tipo '%s'" ukr "'%-.192s.%-.192s' не є %s" ER_NONUPDATEABLE_COLUMN chi "列'%-.192s'不可更新" eng "Column '%-.192s' is not updatable" ger "Feld '%-.192s' ist nicht aktualisierbar" rus "Столбец '%-.192s' не обновляемый" + spa "La columna '%-.192s' no es actualiable" ukr "Стовбець '%-.192s' не може бути зминений" ER_VIEW_SELECT_DERIVED chi "View的Select的FROM包含子查询" eng "View's SELECT contains a subquery in the FROM clause" ger "SELECT der View enthält eine Subquery in der FROM-Klausel" rus "View SELECT содержит подзапрос в конструкции FROM" + spa "El SELECT de la vista contiene una subconsulta (subquery) en la cláusula FROM" ukr "View SELECT має підзапит у конструкції FROM" # Not used any more, syntax error is returned instead @@ -5722,540 +5789,666 @@ ER_VIEW_SELECT_CLAUSE eng "View's SELECT contains a '%s' clause" ger "SELECT der View enthält eine '%s'-Klausel" rus "View SELECT содержит конструкцию '%s'" + spa "El SELECT de la vista contiene una cláusula '%s'" ukr "View SELECT має конструкцію '%s'" ER_VIEW_SELECT_VARIABLE chi "View的选择包含变量或参数" eng "View's SELECT contains a variable or parameter" ger "SELECT der View enthält eine Variable oder einen Parameter" rus "View SELECT содержит переменную или параметр" + spa "El SELECT de la vista contiene una variable o un parámetro" ukr "View SELECT має зминну або параметер" ER_VIEW_SELECT_TMPTABLE chi "View的SELECT指的是临时表'%-.192s'" eng "View's SELECT refers to a temporary table '%-.192s'" ger "SELECT der View verweist auf eine temporäre Tabelle '%-.192s'" rus "View SELECT содержит ссылку на временную таблицу '%-.192s'" + spa "El SELECT de la vista se refiere a una tabla temporal '%-.192s'" ukr "View SELECT використовує тимчасову таблицю '%-.192s'" ER_VIEW_WRONG_LIST chi "View的选择和VIEW的字段列表具有不同的列计数" eng "View's SELECT and view's field list have different column counts" ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten" rus "View SELECT и список полей view имеют разное количество столбцов" + spa "El SELECT de la vista y la lista de campos de la vista tienen un contador diferente de columnas" ukr "View SELECT і перелік стовбців view мають різну кількість сковбців" ER_WARN_VIEW_MERGE chi "View合并算法目前不能使用(假设未定义的算法)" eng "View merge algorithm can't be used here for now (assumed undefined algorithm)" ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)" rus "Алгоритм слияния view не может быть использован сейчас (алгоритм будет неопеределенным)" + spa "El algoritmo de fusión de la vista no se puede usar aquí por ahora (se asume algoritmo indefinido)" ukr "Алгоритм зливання view не може бути використаний зараз (алгоритм буде невизначений)" ER_WARN_VIEW_WITHOUT_KEY chi "更新的视图没有底层表的完整键" eng "View being updated does not have complete key of underlying table in it" ger "Die aktualisierte View enthält nicht den vollständigen Schlüssel der zugrunde liegenden Tabelle" rus "Обновляемый view не содержит ключа использованных(ой) в нем таблиц(ы)" + spa "La vista que se está actualizando no tiene clave completa de la tabla subyacente que contiene" ukr "View, що оновлюеться, не містить повного ключа таблиці(ь), що викорістана в ньюому" ER_VIEW_INVALID chi "View'%-.192s.%-.192s'引用无效的表、列、函数、或者函数或View缺乏使用权" eng "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" + spa "La vista '%-.192s.%-.192s' hace referencia a tabla(s) o columna(s) o función(es) inválida(s) o al definidor/invocador de vista le faltan derechos para usarlos" ER_SP_NO_DROP_SP chi "无法从另一个存储的例程中删除或更改%s" eng "Can't drop or alter a %s from within another stored routine" ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern" + spa "No puedo eliminar o alterar una %s desde dentro de otra rutina almacenada" ER_SP_GOTO_IN_HNDLR chi "在存储过程处理程序中不允许GOTO" eng "GOTO is not allowed in a stored procedure handler" ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt" + spa "GOTO no permitido en manejador de procedimiento almacenado" ER_TRG_ALREADY_EXISTS chi "触发'%s'已经存在" eng "Trigger '%s' already exists" ger "Trigger '%s' existiert bereits" hindi "TRIGGER '%s' पहले से मौजूद है" + spa "Ya existe el disparador `%s`" ER_TRG_DOES_NOT_EXIST chi "触发不存在" eng "Trigger does not exist" ger "Trigger existiert nicht" hindi "TRIGGER मौजूद नहीं है" + spa "El disparador no existe" ER_TRG_ON_VIEW_OR_TEMP_TABLE chi "触发器的'%-.192s'是视图或临时表" - eng "Trigger's '%-.192s' is view, temporary table or sequence" - hindi "'%-.192s' एक व्यू, टेम्पररी टेबल या सीक्वेंस है" - ger "'%-.192s' des Triggers ist View oder temporäre Tabelle" + eng "Trigger's '%-.192s' is a view, temporary table or sequence" + ger "'%-.192s' des Triggers ist ein View, temporäre Tabelle oder Sequence" + spa "El disparador '%-.192s' es una vista, tabla temporal o secuencia" + hindi "Trigger का '%-.192s' एक व्यू, टेम्पररी टेबल या सीक्वेंस है" ER_TRG_CANT_CHANGE_ROW chi "更新%s行在%s触发器中不允许" eng "Updating of %s row is not allowed in %strigger" ger "Aktualisieren einer %s-Zeile ist in einem %s-Trigger nicht erlaubt" + spa "Actualizar la fila %s no está permitido en disparador %s" ER_TRG_NO_SUCH_ROW_IN_TRG chi "没有%s行,触发%s" eng "There is no %s row in %s trigger" ger "Es gibt keine %s-Zeile im %s-Trigger" + spa "No hay fila %s en disparador %s" ER_NO_DEFAULT_FOR_FIELD chi "字段'%-.192s'没有默认值" eng "Field '%-.192s' doesn't have a default value" ger "Feld '%-.192s' hat keinen Vorgabewert" + spa "El campo '%-.192s' no tiene un valor por defecto" ER_DIVISION_BY_ZERO 22012 chi "除0错误" eng "Division by 0" ger "Division durch 0" hindi "0 से विभाजन" + spa "División por 0" ER_TRUNCATED_WRONG_VALUE_FOR_FIELD 22007 chi "不正确的%-.32s值:'%-.128T'用于列`%.192s`%.192s`%.192s`在%lu行" eng "Incorrect %-.32s value: '%-.128T' for column `%.192s`.`%.192s`.`%.192s` at row %lu" ger "Falscher %-.32s-Wert: '%-.128T' für Feld '`%.192s`.`%.192s`.`%.192s` in Zeile %lu" + spa "Incorrecto %-.32s valor: '%-.128T' para columna `%.192s`.`%.192s`.`%.192s` en la fila %lu" ER_ILLEGAL_VALUE_FOR_TYPE 22007 chi "在解析期间发现的非法%s '%-.192T'值" eng "Illegal %s '%-.192T' value found during parsing" ger "Nicht zulässiger %s-Wert '%-.192T' beim Parsen gefunden" + spa "Hallado valor ilegal %s '%-.192T' durante el análisi" ER_VIEW_NONUPD_CHECK chi "在不可更新的视图%`-.192s.%`-.192s上CHECK OPTION" eng "CHECK OPTION on non-updatable view %`-.192s.%`-.192s" ger "CHECK OPTION auf nicht-aktualisierbarem View %`-.192s.%`-.192s" rus "CHECK OPTION для необновляемого VIEW %`-.192s.%`-.192s" + spa "CHECK OPTION en vista no actualizable %`-.192s.%`-.192s" ukr "CHECK OPTION для VIEW %`-.192s.%`-.192s що не може бути оновленним" ER_VIEW_CHECK_FAILED 44000 chi "CHECK OPTION失败%`-.192s.%`-.192s" eng "CHECK OPTION failed %`-.192s.%`-.192s" ger "CHECK OPTION fehlgeschlagen: %`-.192s.%`-.192s" rus "Проверка CHECK OPTION для VIEW %`-.192s.%`-.192s провалилась" + spa "CHECK OPTION falló %`-.192s.%`-.192s" ukr "Перевірка CHECK OPTION для VIEW %`-.192s.%`-.192s не пройшла" ER_PROCACCESS_DENIED_ERROR 42000 chi "%-.32s命令被拒绝。用户为'%s'@'%s' 例程'%-.192s'" eng "%-.32s command denied to user '%s'@'%s' for routine '%-.192s'" ger "Befehl %-.32s nicht zulässig für Benutzer '%s'@'%s' in Routine '%-.192s'" + spa "Comando %-.32s denegado para el usuario '%s'@'%s' para rutina '%-.192s'" ER_RELAY_LOG_FAIL chi "清除旧继relay日志失败:%s" eng "Failed purging old relay logs: %s" ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s" + spa "Falló la purga de viejos historiales (logs) de reenvío: %s" ER_PASSWD_LENGTH chi "密码哈希应该是一个%d-digit十六进制数" eng "Password hash should be a %d-digit hexadecimal number" ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein" + spa "El valor calculado de la contraseña debería de ser un número hexadecimal de %d-dígitos" ER_UNKNOWN_TARGET_BINLOG chi "在Binlog索引中找不到目标日志" eng "Target log not found in binlog index" ger "Ziel-Log im Binlog-Index nicht gefunden" + spa "Historial (log) de destino no hallado en índice binlog" ER_IO_ERR_LOG_INDEX_READ chi "读取日志索引文件时I/O错误" eng "I/O error reading log index file" ger "Fehler beim Lesen der Log-Index-Datei" + spa "Error de E/S leyendo fichero/archivo índice de historial (log)" ER_BINLOG_PURGE_PROHIBITED chi "服务器配置不允许Binlog清除" eng "Server configuration does not permit binlog purge" ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung" + spa "La configuración del servidor no permite purgar binlog" ER_FSEEK_FAIL chi "fseek()失败" eng "Failed on fseek()" ger "fseek() fehlgeschlagen" hindi "fseek() विफल रहा" + spa "Fallo en fseek()" ER_BINLOG_PURGE_FATAL_ERR chi "日志清除期间的致命错误" eng "Fatal error during log purge" ger "Schwerwiegender Fehler bei der Log-Bereinigung" + spa "Error fatal durante la purga del historial (log)" ER_LOG_IN_USE chi "日志在用,不会清除" eng "A purgeable log is in use, will not purge" ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung" + spa "Se encuentra en uso un historial purgable, no lo purgaré" ER_LOG_PURGE_UNKNOWN_ERR chi "日志清除期间未知错误" eng "Unknown error during log purge" ger "Unbekannter Fehler bei Log-Bereinigung" + spa "Error desconocido durante la purga del historial (log)" ER_RELAY_LOG_INIT chi "初始化relay日志失败。位置:%s" eng "Failed initializing relay log position: %s" ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s" + spa "Fallo inicializando la posición del historial de reenvío: %s" ER_NO_BINARY_LOGGING chi "您不使用二进制日志记录" eng "You are not using binary logging" ger "Sie verwenden keine Binärlogs" + spa "No está usando historial (log) binario" ER_RESERVED_SYNTAX chi "'%-.64s'语法保留用于MariaDB服务器内部" eng "The '%-.64s' syntax is reserved for purposes internal to the MariaDB server" ger "Die Schreibweise '%-.64s' ist für interne Zwecke des MariaDB-Servers reserviert" + spa "La sintaxis '%-.64s' está reservada para propósitos internos del servidor MariaDB" ER_WSAS_FAILED chi "WSAStartup失败了" eng "WSAStartup Failed" ger "WSAStartup fehlgeschlagen" + spa "Falló WSAStartup" ER_DIFF_GROUPS_PROC chi "无法处理具有不同组的过程" eng "Can't handle procedures with different groups yet" ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten" + spa "No puedo manejar procedimientos con grupos diferentes, aún" ER_NO_GROUP_FOR_PROC chi "SELECT必须具有此过程的组" eng "Select must have a group with this procedure" ger "SELECT muss bei dieser Prozedur ein GROUP BY haben" + spa "La selección debe de tener un grupo con este procedimiento" ER_ORDER_WITH_PROC chi "无法在次存储过程使用ORDER子句" eng "Can't use ORDER clause with this procedure" ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden" + spa "No puedo usar la cláusula ORDER con este procedimiento" ER_LOGGING_PROHIBIT_CHANGING_OF chi "二进制日志记录和复制禁止更改全局服务器%s" eng "Binary logging and replication forbid changing the global server %s" ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s" + spa "El historial (log) binario y la réplica prohibe cambiar el servidor global %s" ER_NO_FILE_MAPPING chi "无法映射文件:%-.200s,错误号码:%M" eng "Can't map file: %-.200s, errno: %M" ger "Kann Datei nicht abbilden: %-.200s, Fehler: %M" + spa "No puedo mapear fichero/archivo: %-.200s, error: %M" ER_WRONG_MAGIC chi "魔法错误%-.64s" eng "Wrong magic in %-.64s" ger "Falsche magische Zahlen in %-.64s" + spa "Magia equivocada en %-.64s" ER_PS_MANY_PARAM chi "Prepared statement包含太多占位符" eng "Prepared statement contains too many placeholders" ger "Vorbereitete Anweisung enthält zu viele Platzhalter" + spa "Sentencia preparada contiene demasiados marcadores de posición" ER_KEY_PART_0 chi "索引部分'%-.192s'长度不能为0" eng "Key part '%-.192s' length cannot be 0" ger "Länge des Schlüsselteils '%-.192s' kann nicht 0 sein" + spa "El tamaño de trozo de clave '%-.192s' no puede ser 0" ER_VIEW_CHECKSUM chi "查看文本checksum失败" eng "View text checksum failed" ger "View-Text-Prüfsumme fehlgeschlagen" rus "Проверка контрольной суммы текста VIEW провалилась" + spa "Ha fallado la suma de comprobación del texto de la vista" ukr "Перевірка контрольної суми тексту VIEW не пройшла" ER_VIEW_MULTIUPDATE chi "无法通过JOIN视图'%-.192s.%-.192s'修改多个基础表。" eng "Can not modify more than one base table through a join view '%-.192s.%-.192s'" ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.192s.%-.192s' ändern" rus "Нельзя изменить больше чем одну базовую таблицу используя многотабличный VIEW '%-.192s.%-.192s'" + spa "No puedo modificar más de una tabla base a través de una vista de unión '%-.192s.%-.192s'" ukr "Неможливо оновити більш ниж одну базову таблицю выкористовуючи VIEW '%-.192s.%-.192s', що містіть декілька таблиць" ER_VIEW_NO_INSERT_FIELD_LIST chi "无法写入JOIN视图'%-.192s.%-.192s'没有字段列表" eng "Can not insert into join view '%-.192s.%-.192s' without fields list" ger "Kann nicht ohne Feldliste in Join-View '%-.192s.%-.192s' einfügen" rus "Нельзя вставлять записи в многотабличный VIEW '%-.192s.%-.192s' без списка полей" + spa "No puedo insertar dentro de vista de unión '%-.192s.%-.192s' sin lista de campos" ukr "Неможливо уставити рядки у VIEW '%-.192s.%-.192s', що містить декілька таблиць, без списку стовбців" ER_VIEW_DELETE_MERGE_VIEW chi "无法从JOIN视图'%-.192s.%-.192s'删除" eng "Can not delete from join view '%-.192s.%-.192s'" ger "Kann nicht aus Join-View '%-.192s.%-.192s' löschen" rus "Нельзя удалять из многотабличного VIEW '%-.192s.%-.192s'" + spa "No puedo borrar desde vista de unión '%-.192s.%-.192s'" ukr "Неможливо видалити рядки у VIEW '%-.192s.%-.192s', що містить декілька таблиць" ER_CANNOT_USER chi "操作%s失败%.256s" eng "Operation %s failed for %.256s" ger "Operation %s schlug fehl für %.256s" norwegian-ny "Operation %s failed for '%.256s'" + spa "Ha fallado la operación %s para %.256s" ER_XAER_NOTA XAE04 chi "XAER_NOTA:未知的XID" eng "XAER_NOTA: Unknown XID" ger "XAER_NOTA: Unbekannte XID" + spa "XAER_NOTA: XID desconocido" ER_XAER_INVAL XAE05 chi "XAER_INVAL:无效的参数(或不支持的命令)" eng "XAER_INVAL: Invalid arguments (or unsupported command)" ger "XAER_INVAL: Ungültige Argumente (oder nicht unterstützter Befehl)" + spa "XAER_INVAL: Argumentos inválidos (o comando no soportado)" ER_XAER_RMFAIL XAE07 chi "XAER_RMFAIL:当全局事务处于%.64s状态时,无法执行该命令" eng "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state" ger "XAER_RMFAIL: DEr Befehl kann nicht ausgeführt werden, wenn die globale Transaktion im Zustand %.64s ist" rus "XAER_RMFAIL: эту команду нельзя выполнять когда глобальная транзакция находится в состоянии '%.64s'" + spa "XAER_RMFAIL: El comando no se puede ejecutar cuando la transacción global se encuentra en estado %.64s" ER_XAER_OUTSIDE XAE09 chi "XAER_OUTSIDE:一些工作是在全局交易之外完成的" eng "XAER_OUTSIDE: Some work is done outside global transaction" ger "XAER_OUTSIDE: Einige Arbeiten werden außerhalb der globalen Transaktion verrichtet" + spa "XAER_OUTSIDE: Algún trabajo se ha realizado fuera de la transacción global" ER_XAER_RMERR XAE03 chi "XAER_RMERR:事务分支中发生致命错误 - 检查您的数据以获得一致性" eng "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency" ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prüfen Sie Ihre Daten auf Konsistenz" + spa "XAER_RMERR: Ha ocurrido un error fatal en la rama de la transacción - revise la consitencia de sus datos" ER_XA_RBROLLBACK XA100 chi "XA_RBROLBACK:交易分支回滚" eng "XA_RBROLLBACK: Transaction branch was rolled back" ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt" + spa "XA_RBROLLBACK: La rama de la transacción ha sido retrocedida (rolled back)" ER_NONEXISTING_PROC_GRANT 42000 chi "无授权:用户'%-.48s'主机'%-.64s'ROUTINE'%-.192s'" eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'" ger "Es gibt diese Berechtigung für Benutzer '%-.48s' auf Host '%-.64s' für Routine '%-.192s' nicht" + spa "No existe tal concesión definida para el usuario '%-.48s' en equipo '%-.64s' en rutina '%-.192s'" ER_PROC_AUTO_GRANT_FAIL chi "无法授予EXECUTE和ALTER ROUTINE权限" eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen" + spa "Fallo al conceder privilegios de EXECUTE y ALTER ROUTINE" ER_PROC_AUTO_REVOKE_FAIL chi "无法撤消所有权限以删除例程" eng "Failed to revoke all privileges to dropped routine" ger "Rücknahme aller Rechte für die gelöschte Routine fehlgeschlagen" + spa "Fallo al rescindir todos los privilegios de la rutina anulada" ER_DATA_TOO_LONG 22001 chi "列'%s'行%lu数据太长" eng "Data too long for column '%s' at row %lu" ger "Daten zu lang für Feld '%s' in Zeile %lu" + spa "Datos demasiado largos para la columna '%s' en la fila %lu" ER_SP_BAD_SQLSTATE 42000 chi "坏SQLSTATE:'%s'" eng "Bad SQLSTATE: '%s'" ger "Ungültiger SQLSTATE: '%s'" + spa "Mal SQLSTATE: '%s'" ER_STARTUP chi "%s:已经准备好接受连接\nVersion:'%s'套接字:'%s'端口:%d %s" eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s" ger "%s: bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s" + spa "%s: preparada para conexiones.\nVersión: '%s' conector: '%s' puerto: %d %s" ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR chi "无法从带有固定大小行的文件中加载值到变量" eng "Can't load value from file with fixed size rows to variable" ger "Kann Wert aus Datei mit Zeilen fester Größe nicht in Variable laden" + spa "No puedo cargar valor desde fichero/archivo con filas de tamaño fijo en variable" ER_CANT_CREATE_USER_WITH_GRANT 42000 chi "您不允许使用创建用户时给予GRANT" eng "You are not allowed to create a user with GRANT" ger "Sie dürfen keinen Benutzer mit GRANT anlegen" + spa "No está autorizado a crear un usuario con GRANT" ER_WRONG_VALUE_FOR_TYPE chi "不正确的%-.32s值:'%-.128T' 函数:%-.32s" eng "Incorrect %-.32s value: '%-.128T' for function %-.32s" ger "Falscher %-.32s-Wert: '%-.128T' für Funktion %-.32s" + spa "Incorrecto valor %-.32s: '%-.128T' para la función %-.32s" ER_TABLE_DEF_CHANGED chi "表定义已更改,请重试" eng "Table definition has changed, please retry transaction" ger "Tabellendefinition wurde geändert, bitte starten Sie die Transaktion neu" + spa "Ha cambiado la definición de la tabla, por favor reintente la transacción" ER_SP_DUP_HANDLER 42000 chi "在同一块中声明的处理程序重复" eng "Duplicate handler declared in the same block" ger "Doppelter Handler im selben Block deklariert" + spa "Manejador duplicado declarado en mismo bloque" ER_SP_NOT_VAR_ARG 42000 chi "OUT或INOUT参数%d 例程 %s的不是BEFORE触发器里的变量或新伪变量" eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger" ger "OUT- oder INOUT-Argument %d für Routine %s ist keine Variable" + spa "El argumento %d OUT o INOUT para la rutina %s no es una variable o pseudo-variable NEW en disparador BEFORE" ER_SP_NO_RETSET 0A000 chi "不允许从%s返回结果集" eng "Not allowed to return a result set from a %s" ger "Rückgabe einer Ergebnismenge aus einer %s ist nicht erlaubt" + spa "No autorizado a devolver un conjunto de resultados desde un %s" ER_CANT_CREATE_GEOMETRY_OBJECT 22003 chi "无法从发送到几何字段的数据中获取几何对象" eng "Cannot get geometry object from data you send to the GEOMETRY field" ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld übergeben haben" + spa "No puedo obtener objeto de geometría desde los datos que vd envía al campo GEOMETRY" ER_FAILED_ROUTINE_BREAK_BINLOG chi "ROUTINE失败,定义中既没有NO SQL也没有READ SQL DAT。启用二进制日志记录;如果更新非事务性表,则二进制日志将会错过其更改" eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes" ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binärlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthält das Binärlog ihre Änderungen nicht" + spa "Ha fallado una rutina y no tiene ni NO SQL ni READS SQL DATA en su declaración y el historial (log) binario se encuentra activado; si han sido actualizadas tablas no transaccionales, el fichero/archivo binario de historial (log) perderá sus cambios" ER_BINLOG_UNSAFE_ROUTINE chi "此函数定义中没有DETERMINISTIC,NO SQL,或者READS SQL DATA,并且已启用二进制日志记录(您*可能*希望使用较少的安全性的log_bin_trust_function_creators变量)" eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_function_creators verwenden)" + spa "Esta función no tiene ninguno de DETERMINISTIC, NO SQL o READS SQL DATA en su declaración y está activado el historial binario (vd *podría* querer usar la variable menos segura log_bin_trust_function_creators)" ER_BINLOG_CREATE_ROUTINE_NEED_SUPER chi "您没有超级特权和二进制日志记录已启用(您*可能*想要使用较少的安全log_bin_trust_function_creators变量)" eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_function_creators verwenden)" + spa "No tiene el privilegio SUPER y está activado el historial binario (*podría* querer usar la variable menos segura log_bin_trust_function_creators)" ER_EXEC_STMT_WITH_OPEN_CURSOR chi "您无法执行具有与之关联的打开Cursor的prepared statement。重置语句以重新执行它" eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it" ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen" + spa "No puede ejecutar una sentencia preparada que tiene abierto un cursor asociado con ella. Renueve la sentencia para re-ejecutarla" ER_STMT_HAS_NO_OPEN_CURSOR chi "语句(%lu)没有开放的Cursor" eng "The statement (%lu) has no open cursor" ger "Die Anweisung (%lu) hat keinen geöffneten Cursor" + spa "La sentencia (%lu) no tiene cursor abierto" ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG chi "在存储的函数或触发器中不允许显式或隐式提交" eng "Explicit or implicit commit is not allowed in stored function or trigger" ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt" + spa "Comisión (commit) implícita o explícita no permitida en funciones almacenadas o en disparadores" ER_NO_DEFAULT_FOR_VIEW_FIELD chi "VIEW的列'%-.192s.%-.192s'底层表没有默认值" eng "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value" ger "Ein Feld der dem View '%-.192s.%-.192s' zugrundeliegenden Tabelle hat keinen Vorgabewert" + spa "El campo de tabla subyacente de vista '%-.192s.%-.192s' no tiene valor por defecto" ER_SP_NO_RECURSION chi "不允许递归存储功能和触发器" eng "Recursive stored functions and triggers are not allowed" ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt" + spa "No autorizadas funciones almacenadas recursivas ni disparadores" ER_TOO_BIG_SCALE 42000 S1009 chi "指定的大规模%llu为'%-.192s'。最大是%u" eng "Too big scale %llu specified for '%-.192s'. Maximum is %u" ger "Zu großer Skalierungsfaktor %llu für '%-.192s' angegeben. Maximum ist %u" + spa "Escala %llu demasiado grande especificada para '%-.192s'. El máximo es de %u" ER_TOO_BIG_PRECISION 42000 S1009 chi "指定的精度%llu太大 '%-.192s'。最大是%u" eng "Too big precision %llu specified for '%-.192s'. Maximum is %u" ger "Zu große Genauigkeit %llu für '%-.192s' angegeben. Maximum ist %u" + spa "Precisión %llu demasiado grande especificada para '%-.192s'. El máximo es de %u" ER_M_BIGGER_THAN_D 42000 S1009 chi "对于FLOAT(M,D),DOUBLE(M,D)或DECIMAL(M,D),M必须> = D(列'%-.192s')" eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s')" ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.192s')" + spa "Para flotante(M,D), doble(M,D) o decimal(M,D), M debe de ser >= D (columna '%-.192s')" ER_WRONG_LOCK_OF_SYSTEM_TABLE chi "您无法将系统表的写入锁定与其他表或锁定类型相结合" eng "You can't combine write-locking of system tables with other tables or lock types" ger "Sie können Schreibsperren auf der Systemtabelle nicht mit anderen Tabellen kombinieren" + spa "No puede combinar bloqueo de escritura de tablas de sistema con otras tablas o tipos de bloqueo" ER_CONNECT_TO_FOREIGN_DATA_SOURCE chi "无法连接到外数据源:%.64s" eng "Unable to connect to foreign data source: %.64s" ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" + spa "No puedo conectar a fuente foránea de datos: %.64s" ER_QUERY_ON_FOREIGN_DATA_SOURCE chi "处理对外数据源上的查询时出现问题。数据源错误:%-.64s" eng "There was a problem processing the query on the foreign data source. Data source error: %-.64s" ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s" + spa "Hubo un problema procesando la consulta (query) en la fuente foránea de datos. Error de fuente de datos: %-.64s" ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST chi "您尝试引用的外数据源不存在。数据源错误:%-.64s" eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s" ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" + spa "La fuente foránea de datos que intenta referenciar no existe. Error en fuente de datos: %-.64s" ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE chi "无法创建联合表。数据源连接字符串'%-.64s'不是正确的格式" eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format" ger "Kann föderierte Tabelle nicht erzeugen. Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" + spa "No puedo crear tabla federada. La cadena de conexión de la fuente de datos '%-.64s' no tiene el formato correcto" ER_FOREIGN_DATA_STRING_INVALID chi "数据源连接字符串'%-.64s'不是正确的格式" eng "The data source connection string '%-.64s' is not in the correct format" ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" + spa "La cadena de conexón de la fuente de datos '%-.64s' no tiene el formato correcto" ER_CANT_CREATE_FEDERATED_TABLE chi "无法创建联合表。外数据SRC错误:%-.64s" eng "Can't create federated table. Foreign data src error: %-.64s" ger "Kann föderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s" + spa "No puedo crear tabla federada. Error en fuente de datos foráneos: %-.64s" ER_TRG_IN_WRONG_SCHEMA chi "触发在错的SCHEMA" eng "Trigger in wrong schema" ger "Trigger im falschen Schema" + spa "Disparador en esquema equivocado" ER_STACK_OVERRUN_NEED_MORE chi "线程堆栈溢出:%ld字节堆栈的%ld字节,以及所需的%ld字节。使用'mariadbd --thread_stack =#'指定更大的堆栈" eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Consider increasing the thread_stack system variable." ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mariadbd --thread_stack=#', um einen größeren Stack anzugeben" jpn "スレッドスタック不足です(使用: %ld ; サイズ: %ld ; 要求: %ld)。より大きい値で 'mariadbd --thread_stack=#' の指定をしてください。" + spa "Desbordamiento en pila de hilos (threads): %ld bytes usados de una pila de %ld y son necesarios %ld bytes. Considere el incrementar la variable de sistema --thread_stack=#." ER_TOO_LONG_BODY 42000 S1009 chi "'%-.100s”的ROUTINE太长了" eng "Routine body for '%-.100s' is too long" ger "Routinen-Body für '%-.100s' ist zu lang" + spa "El cuerpo de rutina para '%-.100s' es demasiado largo" ER_WARN_CANT_DROP_DEFAULT_KEYCACHE chi "无法删除默认索引缓存" eng "Cannot drop default keycache" ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden" + spa "No puedo eliminar keycache por defecto" ER_TOO_BIG_DISPLAYWIDTH 42000 S1009 chi "显示宽度超过'%-.192s'的范围(max =%lu)" eng "Display width out of range for '%-.192s' (max = %lu)" ger "Anzeigebreite außerhalb des zulässigen Bereichs für '%-.192s' (Maximum = %lu)" + spa "Ancho a mostrar fuera de rango para '%-.192s' (máx = %lu)" ER_XAER_DUPID XAE08 chi "XAER_DUPID:xid已存在" eng "XAER_DUPID: The XID already exists" ger "XAER_DUPID: Die XID existiert bereits" + spa "XAER_DUPID: El XID ya existe" ER_DATETIME_FUNCTION_OVERFLOW 22008 chi "DateTime函数:%-.32s字段溢出" eng "Datetime function: %-.32s field overflow" ger "Datetime-Funktion: %-.32s Feldüberlauf" + spa "Función Datetime: %-.32s desbordamiento de campo" ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG chi "在存储的函数/触发器中无法更新表'%-.192s',因为它已被调用此存储的函数/触发器调用的语句" eng "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger" ger "Kann Tabelle '%-.192s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" + spa "No puedo actualizar tabla '%-.192s' en función almacenada/disparador porque ya está siendo usada por la sentencia que invocó esta función almacenada/disparador" ER_VIEW_PREVENT_UPDATE chi "表'%-.192s'的定义可防止在表'%-.192s'上的操作'%-.192s'" eng "The definition of table '%-.192s' prevents operation %-.192s on table '%-.192s'" ger "Die Definition der Tabelle '%-.192s' verhindert die Operation %-.192s auf Tabelle '%-.192s'" + spa "La definición de la tabla '%-.192s' previene la operación %-.192s en la tabla '%-.192s'" ER_PS_NO_RECURSION chi "prepared statement包含一个有关该语句的存储例程调用。它不允许以这种递归方式执行prepared statement" eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner" ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen" + spa "La sentencia preparada contiene una llamada a rutina almacenada que se refiere a esa misma sentencia. No está permitido ejecutar una sentencia preparada de esta manera recursiva" ER_SP_CANT_SET_AUTOCOMMIT chi "不允许从存储的函数或触发器设置自动判处" eng "Not allowed to set autocommit from a stored function or trigger" ger "Es ist nicht erlaubt, innerhalb einer gespeicherten Funktion oder eines Triggers AUTOCOMMIT zu setzen" + spa "No permitido usar auto acometida (autocommit) desde una función almacenada o disparador" ER_MALFORMED_DEFINER 0L000 chi "无效的定义" eng "Invalid definer" + spa "Definidor inválido" ER_VIEW_FRM_NO_USER chi "VIEW'%-.192s'。'%-.192s'没有绝定的信息(旧表格式)。当前用户用作定义。请重新创建视图!" eng "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!" ger "View '%-.192s'.'%-.192s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" + spa "La vista '%-.192s'.'%-.192s' no tiene información de definidor (formato viejo de tabla). Se usa el usuario actual como definidor. Por favor, ¡recrea la vista!" ER_VIEW_OTHER_USER chi "您需要使用'%-.192s'@'%-.192s'的创建视图的超级特权" eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer" ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen" + spa "Vd necesita el privilegio SUPER para la creación de la vista con definidor '%-.192s'@'%-.192s'" ER_NO_SUCH_USER chi "指定为定义的用户('%-.64s'@'%-.64s')不存在" eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist" ger "Der als Definierer angegebene Benutzer ('%-.64s'@'%-.64s') existiert nicht" + spa "El usuario especificado como definidor ('%-.64s'@'%-.64s') no existe" ER_FORBID_SCHEMA_CHANGE chi "不允许从'%-.192s'到'%-.192s'的SCHEMA更改" eng "Changing schema from '%-.192s' to '%-.192s' is not allowed" ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt" + spa "Vd no está autorizado a cambiar el esquema de '%-.192s' a '%-.192s'" ER_ROW_IS_REFERENCED_2 23000 chi "无法删除或更新父行:外键约束失败(%.192s)" eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)" ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)" + spa "No puedo borrar o actualizar una fila padre: falla una restricción de clave foránea (%.192s)" ER_NO_REFERENCED_ROW_2 23000 chi "无法添加或更新子行:外键约束失败(%.192s)" eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)" ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)" + spa "No puedo añadir o actualizar una fila hija: falla una restricción de clave foránea (%.192s)" ER_SP_BAD_VAR_SHADOW 42000 chi "变量'%-.64s'必须用`...`,或重命名" eng "Variable '%-.64s' must be quoted with `...`, or renamed" ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden" + spa "La variable '%-.64s' debe de ser entrecomillada con `...` o renombrada" ER_TRG_NO_DEFINER chi "触发'%-.192s'的绝对属性。'%-.192s'。触发器将在调用者的授权下激活,该权限可能不足。请重新创建触发器" eng "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger" ger "Kein Definierer-Attribut für Trigger '%-.192s'.'%-.192s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an" + spa "No hay atributo de definidor para disparador '%-.192s'.'%-.192s'. El disparador será activado bajo la autorización del invocador, el cual puede tener insuficientes privilegios. Por favor, vuelva a crear el disparador" ER_OLD_FILE_FORMAT chi "'%-.192s'具有旧格式,您应该重新创建'%s'对象" eng "'%-.192s' has an old format, you should re-create the '%s' object(s)" ger "'%-.192s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen" + spa "'%-.192s' tiene un formato viejo, debería vd de volver a crear el/los objeto(s) '%s'" ER_SP_RECURSION_LIMIT chi "递归限制%d(如max_sp_recursion_depth变量设置)的例程%.192s" eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s" ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.192s überschritten" + spa "El límite recursivo %d (según se indica mediante la variable max_sp_recursion_depth) se ha excedido para la rutina %.192s" ER_SP_PROC_TABLE_CORRUPT chi "无法加载常规%-.192s(内部代码%d)。有关更多详细信息,请运行SHOW WARNINGS" eng "Failed to load routine %-.192s (internal code %d). For more details, run SHOW WARNINGS" ger "Routine %-.192s (interner Code %d) konnte nicht geladen werden. Weitere Einzelheiten erhalten Sie, wenn Sie SHOW WARNINGS ausführen" ukr "Невдала спроба завантажити процедуру %-.192s (внутрішний код %d). Для отримання детальної інформації використовуйте SHOW WARNINGS" + spa "No pude cargar la rutina %-.192s (código interno %d). Para más detalles, ejecute SHOW WARNINGS" ER_SP_WRONG_NAME 42000 chi "常规名称错误不正确'%-.192s'" eng "Incorrect routine name '%-.192s'" ger "Ungültiger Routinenname '%-.192s'" + spa "Nombre incorrecto de rutina '%-.192s'" ER_TABLE_NEEDS_UPGRADE chi "需要升级。请做\"修复%s%`s \"或转储/重新加载以修复!" eng "Upgrade required. Please do \"REPAIR %s %`s\" or dump/reload to fix it!" ger "Aktualisierung erforderlich. Bitte zum Reparieren \"REPAIR %s %`s\" eingeben!" + spa "Es necesaria una mejora. Por favor, ¡haga \"REPAIR %s %`s\" o vuelque/recargue para arreglarlo!" ER_SP_NO_AGGREGATE 42000 chi "存储函数不支持聚合" eng "AGGREGATE is not supported for stored functions" ger "AGGREGATE wird bei gespeicherten Funktionen nicht unterstützt" + spa "AGGREGATE no está soportado en funciones almacenadas" ER_MAX_PREPARED_STMT_COUNT_REACHED 42000 chi "无法创建超过max_prepared_stmt_count语句(当前值:%u)" eng "Can't create more than max_prepared_stmt_count statements (current value: %u)" ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %u)" + spa "No puedo crear más de max_prepared_stmt_count sentencias (valor en curso: %u)" ER_VIEW_RECURSIVE chi "%`s.%`s包含视图递归" eng "%`s.%`s contains view recursion" ger "%`s.%`s enthält View-Rekursion" + spa "`%-.192s`.`%-.192s` contiene recursividad de vista"" ER_NON_GROUPING_FIELD_USED 42000 chi "非分组字段'%-.192s'用于%-.64s条款" eng "Non-grouping field '%-.192s' is used in %-.64s clause" ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet" + spa "No hay campo agrupado '%-.192s' usado en cláusula %-.64s" ER_TABLE_CANT_HANDLE_SPKEYS chi "存储引擎%s不支持SPATIAL索引" eng "The storage engine %s doesn't support SPATIAL indexes" ger "Der verwendete Tabellentyp (%s) unterstützt keine SPATIAL-Indizes" + spa "El motor de almacenaje %s no soporta índices SPATIAL" ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA chi "无法在系统表上创建触发器" eng "Triggers can not be created on system tables" ger "Trigger können nicht auf Systemtabellen erzeugt werden" + spa "Los disparadores no pueden ser creados en las tablas del sistema" ER_REMOVED_SPACES chi "前面的空格从名称'%s'删除" eng "Leading spaces are removed from name '%s'" ger "Führende Leerzeichen werden aus dem Namen '%s' entfernt" + spa "Se quitan los espacios iniciales del nombre '%s'" ER_AUTOINC_READ_FAILED chi "无法从存储引擎读取自动增量值" eng "Failed to read auto-increment value from storage engine" ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen" hindi "स्टोरेज इंजन से auto-increment का मान पढ़ने में असफल रहे" + spa "No pude leer valor de auto-incremento del motor de almacenaje" ER_USERNAME chi "用户名" eng "user name" ger "Benutzername" hindi "यूज़र का नाम" + spa "nombre de usuario" ER_HOSTNAME chi "主机名" eng "host name" ger "Hostname" hindi "होस्ट का नाम" + spa "nombre de equipo" ER_WRONG_STRING_LENGTH chi "字符串'%-.70T'对于%s(应不超过%d)太长" eng "String '%-.70T' is too long for %s (should be no longer than %d)" ger "String '%-.70T' ist zu lang für %s (sollte nicht länger sein als %d)" + spa "La cadena '%-.70T' es demasiado larga para %s (no debería de ser mayor de %d)" ER_NON_INSERTABLE_TABLE chi "目标表%-.100s %s不可插入" eng "The target table %-.100s of the %s is not insertable-into" ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar" jpn "対象表 %-.100s は挿入可能ではないので、%s を行えません。" + spa "La tabla destino %-.100s de la %s no es insertable-dentro" ER_ADMIN_WRONG_MRG_TABLE chi "表'%-.64s'不同定义、或非myisam类型、或不存在" eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist" ger "Tabelle '%-.64s' ist unterschiedlich definiert, nicht vom Typ MyISAM oder existiert nicht" + spa "La tabla '%-.64s' está definida de forma diferente o es del tipo no-MyISAM o no existe" ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT chi "太高的嵌套SELECT" eng "Too high level of nesting for select" ger "Zu tief verschachtelte SELECT-Anweisungen" + spa "Demasiado alto el nivel de anidamiento para la selección" ER_NAME_BECOMES_EMPTY chi "名'%-.64s'已成为''" eng "Name '%-.64s' has become ''" ger "Name '%-.64s' wurde zu ''" + spa "El nombre '%-.64s' ha pasado a ser ''" ER_AMBIGUOUS_FIELD_TERM chi "FIELDS TERMINATED字符串的第一个字符是模棱两可的;请使用非空字段FIELDS ENCLOSED BY" eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY" ger "Das erste Zeichen der Zeichenkette FIELDS TERMINATED ist mehrdeutig; bitte benutzen Sie nicht optionale und nicht leere FIELDS ENCLOSED BY" + spa "El primer carácter de la cadena de los FIELDS TERMINATED es ambiguo; por favor, use FIELDS ENCLOSED BY no opcionales y no vacíos" ER_FOREIGN_SERVER_EXISTS chi "无法创建外部服务器'%s',因为它已经存在" eng "Cannot create foreign server '%s' as it already exists" @@ -6276,491 +6469,600 @@ ER_FOREIGN_SERVER_DOESNT_EXIST chi "您尝试引用的外部服务器名称不存在。数据源错误:%-.64s" eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s" ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" + spa "El nombre del servidor foráneo que intenta referenciar no existe. Error en fuentes de datos: %-.64s" ER_ILLEGAL_HA_CREATE_OPTION chi "表存储引擎'%-.64s'不支持创建选项'%.64s'" eng "Table storage engine '%-.64s' does not support the create option '%.64s'" ger "Speicher-Engine '%-.64s' der Tabelle unterstützt die Option '%.64s' nicht" + spa "El motor de almacenaje de la tabla '%-.64s' no soporta la opción de creación '%.64s'" ER_PARTITION_REQUIRES_VALUES_ERROR chi "语法错误:%-.64s PARTITIONING需要定义给每个分区VALUES %-.64s" eng "Syntax error: %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition" ger "Fehler in der SQL-Syntax: %-.64s-PARTITIONierung erfordert Definition von VALUES %-.64s für jede Partition" + spa "Error de sintaxis: %-.64s PARTITIONING requiere de la definición de VALUES %-.64s para cada partición" swe "Syntaxfel: %-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition" ER_PARTITION_WRONG_VALUES_ERROR chi "只有%-.64s PARTITIONING可以使用VALUES %-.64s在分区定义中" eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition" ger "Nur %-.64s-PARTITIONierung kann VALUES %-.64s in der Partitionsdefinition verwenden" + spa "Sólo %-.64s PARTITIONING puede usar VALUES %-.64s en la definición de la partición" swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen" ER_PARTITION_MAXVALUE_ERROR chi "MAXVALUE只能在最后一个分区定义中使用" eng "MAXVALUE can only be used in last partition definition" ger "MAXVALUE kann nur für die Definition der letzten Partition verwendet werden" + spa "MAXVALUE sólo se puede usar en la definición de la última partición" swe "MAXVALUE kan bara användas i definitionen av den sista partitionen" ER_PARTITION_SUBPARTITION_ERROR chi "子分区只能是哈希分区和分区列" eng "Subpartitions can only be hash partitions and by key" ger "Unterpartitionen dürfen nur HASH- oder KEY-Partitionen sein" + spa "Las subparticiones sólo pueden ser particiones dispersas (hash) y mediante clave" swe "Subpartitioner kan bara vara hash och key partitioner" ER_PARTITION_SUBPART_MIX_ERROR chi "如果在一个分区上,必须在所有分区上定义子组分" eng "Must define subpartitions on all partitions if on one partition" ger "Wenn Sie Unterpartitionen auf einer Partition definieren, müssen Sie das für alle Partitionen tun" + spa "Se deben de definir subparticiones en todas las particiones si se está en una partición" swe "Subpartitioner måste definieras på alla partitioner om på en" ER_PARTITION_WRONG_NO_PART_ERROR chi "定义了错误的分区数,与以前的设置不匹配" eng "Wrong number of partitions defined, mismatch with previous setting" ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen überein" + spa "Definido un número equivocado de particiones, no coincide con configuración previa" swe "Antal partitioner definierade och antal partitioner är inte lika" ER_PARTITION_WRONG_NO_SUBPART_ERROR chi "错误的子组分数定义,与以前的设置不匹配" eng "Wrong number of subpartitions defined, mismatch with previous setting" ger "Falsche Anzahl von Unterpartitionen definiert, stimmt nicht mit vorherigen Einstellungen überein" + spa "Definido un número equivocado de subparticiones, no coincide con configuración previa" swe "Antal subpartitioner definierade och antal subpartitioner är inte lika" ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR chi "不允许(子)分区功能中的常量,随机或时区依赖表达式" eng "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed" ger "Konstante oder Random-Ausdrücke in (Unter-)Partitionsfunktionen sind nicht erlaubt" + spa "Las expresiones constantes, al azar o dependientes de zona en función de (sub)particionado no están permitidas" swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner" ER_NOT_CONSTANT_EXPRESSION chi "%s中的表达必须是恒定的" eng "Expression in %s must be constant" ger "Ausdrücke in %s müssen konstant sein" + spa "Las expresiones incluidas en %s deben de ser constantes" swe "Uttryck i %s måste vara ett konstant uttryck" ER_FIELD_NOT_FOUND_PART_ERROR chi "在表中找不到分区功能的字段列表中的字段" eng "Field in list of fields for partition function not found in table" ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden" + spa "Campo en la lista de campos para función de partición no hallado en tabla" swe "Fält i listan av fält för partitionering med key inte funnen i tabellen" ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR chi "只允许在索引分区中允许字段列表" eng "List of fields is only allowed in KEY partitions" ger "Eine Feldliste ist nur in KEY-Partitionen erlaubt" + spa "Lista de campos sólo se permite en particiones KEY" swe "En lista av fält är endast tillåtet för KEY partitioner" ER_INCONSISTENT_PARTITION_INFO_ERROR chi "FRM文件中的分区信息不与可以写入FRM文件的内容一致" eng "The partition info in the frm file is not consistent with what can be written into the frm file" ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem überein, was in die frm-Datei geschrieben werden kann" + spa "La información de partición en el fichero/archivo frm no es consistente con lo que se puede grabar en un fichero/archivo frm" swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen" ER_PARTITION_FUNC_NOT_ALLOWED_ERROR chi "%-.192s函数返回错误的类型" eng "The %-.192s function returns the wrong type" ger "Die %-.192s-Funktion gibt einen falschen Typ zurück" + spa "La función %-.192s devueve un tipo equivocado" swe "%-.192s-funktionen returnerar felaktig typ" ER_PARTITIONS_MUST_BE_DEFINED_ERROR chi "对于%-.64s分区必须定义每个分区" eng "For %-.64s partitions each partition must be defined" ger "Für %-.64s-Partitionen muss jede Partition definiert sein" + spa "Para las particiones %-.64s, se debe de definir cada partición" swe "För %-.64s partitionering så måste varje partition definieras" ER_RANGE_NOT_INCREASING_ERROR chi "每个分区的VALUES LESS THAN的值必须严格增加" eng "VALUES LESS THAN value must be strictly increasing for each partition" ger "Werte in VALUES LESS THAN müssen für jede Partition strikt aufsteigend sein" + spa "El valor VALUES LESS THAN debe de ser estrictamente incremental para cada partición" swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition" ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR chi "VALUES值必须与分区函数相同" eng "VALUES value must be of same type as partition function" ger "VALUES-Werte müssen vom selben Typ wie die Partitionierungsfunktion sein" + spa "El valor VALUES debe de ser del mismo tipo que la función de partición" swe "Värden i VALUES måste vara av samma typ som partitioneringsfunktionen" ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR chi "列表分区中相同常量的多个定义" eng "Multiple definition of same constant in list partitioning" ger "Mehrfachdefinition derselben Konstante bei Listen-Partitionierung" + spa "Definición múltiple de la misma constante en el particionamiento de la lista" swe "Multipel definition av samma konstant i list partitionering" ER_PARTITION_ENTRY_ERROR chi "分区不能在查询中独立使用" eng "Partitioning can not be used stand-alone in query" ger "Partitionierung kann in einer Abfrage nicht alleinstehend benutzt werden" + spa "El particionado no puede ser usado de forma autónoma en consulta (query)" swe "Partitioneringssyntax kan inte användas på egen hand i en SQL-fråga" ER_MIX_HANDLER_ERROR chi "此版本的MariaDB中不允许分区中的处理程序混合" eng "The mix of handlers in the partitions is not allowed in this version of MariaDB" ger "Das Vermischen von Handlern in Partitionen ist in dieser Version von MariaDB nicht erlaubt" + spa "La mezcla de manejadores en las particiones no está autorizada en esta versión de MariaDB" swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MariaDB" ER_PARTITION_NOT_DEFINED_ERROR chi "对于分区引擎,需要定义所有%-.64s" eng "For the partitioned engine it is necessary to define all %-.64s" ger "Für die partitionierte Engine müssen alle %-.64s definiert sein" + spa "Para el motor de particionado es necesario definir todas %-.64s" swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s" ER_TOO_MANY_PARTITIONS_ERROR chi "定义了太多分区(包括子组分)" eng "Too many partitions (including subpartitions) were defined" ger "Es wurden zu vielen Partitionen (einschließlich Unterpartitionen) definiert" + spa "Definidas demasiadas particiones (incluyendo las subparticiones)" swe "För många partitioner (inkluderande subpartitioner) definierades" ER_SUBPARTITION_ERROR chi "只有在子节分节的HASH/KEY分区中可以混合RANGE/LIST分区" eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning" ger "RANGE/LIST-Partitionierung kann bei Unterpartitionen nur zusammen mit HASH/KEY-Partitionierung verwendet werden" + spa "Sólo es posible mezclar particionado RANGE/LIST con particionado HASH/KEY para el subparticionado" swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering" ER_CANT_CREATE_HANDLER_FILE chi "无法创建特定的处理程序文件" eng "Failed to create specific handler file" ger "Erzeugen einer spezifischen Handler-Datei fehlgeschlagen" + spa "No pude crear fichero/archivo de manejador específico" swe "Misslyckades med att skapa specifik fil i lagringsmotor" ER_BLOB_FIELD_IN_PART_FUNC_ERROR chi "分区功能中不允许BLOB字段" eng "A BLOB field is not allowed in partition function" ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt" + spa "No se autoriza campo BLOB en la función de partición" swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner" ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF chi "A%-.192s必须包含表的分区功能中的所有列" eng "A %-.192s must include all columns in the table's partitioning function" + spa "Un %-.192s debe de incluir todas las columnas en la función de particionado de la tabla" ER_NO_PARTS_ERROR chi "不允许%-.64s = 0" eng "Number of %-.64s = 0 is not an allowed value" ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert" + spa "El número de %-.64s = 0 no es un valor autorizado" swe "Antal %-.64s = 0 är inte ett tillåten värde" ER_PARTITION_MGMT_ON_NONPARTITIONED chi "不分区表上的分区管理是不可能的" eng "Partition management on a not partitioned table is not possible" ger "Partitionsverwaltung einer nicht partitionierten Tabelle ist nicht möglich" + spa "La gestión de particiones en una tabla no particionada no es posible" swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt" ER_FEATURE_NOT_SUPPORTED_WITH_PARTITIONING eng "Partitioned tables do not support %s" + spa "Las tablas particionadas no soportan %s" ER_DROP_PARTITION_NON_EXISTENT chi "分区列表错误%-.64s" eng "Error in list of partitions to %-.64s" ger "Fehler in der Partitionsliste bei %-.64s" + spa "Error en lista de particiones para %-.64s" swe "Fel i listan av partitioner att %-.64s" ER_DROP_LAST_PARTITION chi "无法删除所有分区,请使用删除表" eng "Cannot remove all partitions, use DROP TABLE instead" ger "Es lassen sich nicht sämtliche Partitionen löschen, benutzen Sie statt dessen DROP TABLE" + spa "No puedo quitar todas las particiones, use DROP TABLE en su lugar" swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället" ER_COALESCE_ONLY_ON_HASH_PARTITION chi "COALESCE分区只能用于哈希/索引分区" eng "COALESCE PARTITION can only be used on HASH/KEY partitions" ger "COALESCE PARTITION kann nur auf HASH- oder KEY-Partitionen benutzt werden" + spa "COALESCE PARTITION sólo se puede usar en particiones HASH/KEY" swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner" ER_REORG_HASH_ONLY_ON_SAME_NO chi "REORGANIZE PARTITION只能用于重新组织不改变他们的数字的分区" eng "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers" ger "REORGANIZE PARTITION kann nur zur Reorganisation von Partitionen verwendet werden, nicht, um ihre Nummern zu ändern" + spa "REORGANIZE PARTITION sólo se puede usar para reorganizar particiones no para cambiar sus números" swe "REORGANIZE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal" ER_REORG_NO_PARAM_ERROR chi "没有参数的REORGANIZE PARTITION只能用于HASH PARTITION的自动分区表" eng "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs" ger "REORGANIZE PARTITION ohne Parameter kann nur für auto-partitionierte Tabellen verwendet werden, die HASH-Partitionierung benutzen" + spa "REORGANIZE PARTITION sin parámetros sólo se puede usar en tablas auto-particionadas usando HASH PARTITIONs" swe "REORGANIZE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering" ER_ONLY_ON_RANGE_LIST_PARTITION chi "%-.64s分区只能用于RANGE/LIST分区" eng "%-.64s PARTITION can only be used on RANGE/LIST partitions" ger "%-.64s PARTITION kann nur für RANGE- oder LIST-Partitionen verwendet werden" + spa "%-.64s PARTITION sólo puede ser usada en particiones RANGE/LIST" swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner" ER_ADD_PARTITION_SUBPART_ERROR chi "尝试用错误数量的子分区添加分区" eng "Trying to Add partition(s) with wrong number of subpartitions" ger "Es wurde versucht, eine oder mehrere Partitionen mit der falschen Anzahl von Unterpartitionen hinzuzufügen" + spa "Intentando añadir particion(es) usando un número equivocado de subparticiones" swe "ADD PARTITION med fel antal subpartitioner" ER_ADD_PARTITION_NO_NEW_PARTITION chi "必须添加至少一个分区" eng "At least one partition must be added" ger "Es muss zumindest eine Partition hinzugefügt werden" + spa "Se debe de añadir una partición, al menos" swe "Åtminstone en partition måste läggas till vid ADD PARTITION" ER_COALESCE_PARTITION_NO_PARTITION chi "至少一个分区必须合并" eng "At least one partition must be coalesced" ger "Zumindest eine Partition muss mit COALESCE PARTITION zusammengefügt werden" + spa "Se debe de fusionar una partición, al menos" swe "Åtminstone en partition måste slås ihop vid COALESCE PARTITION" ER_REORG_PARTITION_NOT_EXIST chi "分区重组量超过而不是分区量" eng "More partitions to reorganize than there are partitions" ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren" + spa "Hay más particiones a reorganizar que las que existen" swe "Fler partitioner att reorganisera än det finns partitioner" ER_SAME_NAME_PARTITION chi "重复分区名称%-.192s" eng "Duplicate partition name %-.192s" ger "Doppelter Partitionsname: %-.192s" + spa "Nombre de partición duplicado %-.192s" swe "Duplicerat partitionsnamn %-.192s" ER_NO_BINLOG_ERROR chi "在此命令上不允许关闭binlog" eng "It is not allowed to shut off binlog on this command" ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten" + spa "No se autoriza a apagar binlog con este comando" swe "Det är inte tillåtet att stänga av binlog på detta kommando" ER_CONSECUTIVE_REORG_PARTITIONS chi "在重新组织一组分区时,它们必须按照次序" eng "When reorganizing a set of partitions they must be in consecutive order" ger "Bei der Reorganisation eines Satzes von Partitionen müssen diese in geordneter Reihenfolge vorliegen" + spa "Para reorganizar un conjunto de particiones, éstas deben de estar ordenadas consecutivamente" swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning" ER_REORG_OUTSIDE_RANGE chi "重组范围分区无法更改除最后分区之外的总范围,无法扩展范围" eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range" ger "Die Reorganisation von RANGE-Partitionen kann Gesamtbereiche nicht verändern, mit Ausnahme der letzten Partition, die den Bereich erweitern kann" + spa "El reorganizar un rango de particiones no puede cambiar los rangos totales excepto para la última partición donde se puede extender el rango" swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas" ER_PARTITION_FUNCTION_FAILURE chi "此版不支持此处理程序的分区功能" eng "Partition function not supported in this version for this handler" ger "Partitionsfunktion in dieser Version dieses Handlers nicht unterstützt" + spa "Función de partición no soportada en esta versión para este manejador" ER_PART_STATE_ERROR chi "无法从CREATE/ALTER表中定义分区状态" eng "Partition state cannot be defined from CREATE/ALTER TABLE" ger "Partitionszustand kann nicht von CREATE oder ALTER TABLE aus definiert werden" + spa "El estado de una partición no se puede definir desde CREATE/ALTER TABLE" swe "Partition state kan inte definieras från CREATE/ALTER TABLE" ER_LIMITED_PART_RANGE chi "%-.64s处理程序仅支持32-bit整数" eng "The %-.64s handler only supports 32 bit integers in VALUES" ger "Der Handler %-.64s unterstützt in VALUES nur 32-Bit-Integers" + spa "El manejador %-.64s sólo soporta enteros de 32 bit en VALUES" swe "%-.64s stödjer endast 32 bitar i integers i VALUES" ER_PLUGIN_IS_NOT_LOADED chi "插件'%-.192s'未加载" eng "Plugin '%-.192s' is not loaded" ger "Plugin '%-.192s' ist nicht geladen" + spa "Enchufe (plugin) '%-.192s' no cargado" ER_WRONG_VALUE chi "错误%-.32s值:'%-.128T'" eng "Incorrect %-.32s value: '%-.128T'" ger "Falscher %-.32s-Wert: '%-.128T'" + spa "Incorrecto %-.32s valor: '%-.128T'" ER_NO_PARTITION_FOR_GIVEN_VALUE chi "表没有%-.64s的分区" eng "Table has no partition for value %-.64s" ger "Tabelle hat für den Wert %-.64s keine Partition" + spa "La tabla no tiene partición para valor %-.64s" ER_FILEGROUP_OPTION_ONLY_ONCE chi "设置%s不能超过一次" eng "It is not allowed to specify %s more than once" ger "%s darf nicht mehr als einmal angegegeben werden" + spa "No se permite especificar %s más de unva vez" ER_CREATE_FILEGROUP_FAILED chi "无法创建%s" eng "Failed to create %s" ger "Anlegen von %s fehlgeschlagen" hindi "%s को बनाने में असफल रहे" + spa "No pude crear %s" ER_DROP_FILEGROUP_FAILED chi "未能DROP%s" eng "Failed to drop %s" ger "Löschen von %s fehlgeschlagen" hindi "%s को हटाने में असफल रहे" + spa "No pude eliminar %s" ER_TABLESPACE_AUTO_EXTEND_ERROR chi "处理程序不支持表空间的自动扩展名" eng "The handler doesn't support autoextend of tablespaces" ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces" + spa "El manejador no soporta autoextensión de espacios de tabla" ER_WRONG_SIZE_NUMBER chi "尺寸参数被错误地指定,编号或表单10M" eng "A size parameter was incorrectly specified, either number or on the form 10M" ger "Ein Größen-Parameter wurde unkorrekt angegeben, muss entweder Zahl sein oder im Format 10M" + spa "Se ha especificado de forma incorrecta un parámetro de medida o el número o en la forma 10M" ER_SIZE_OVERFLOW_ERROR chi "尺寸编号是正确的,但我们不允许数字部分超过20亿" eng "The size number was correct but we don't allow the digit part to be more than 2 billion" ger "Die Zahl für die Größe war korrekt, aber der Zahlanteil darf nicht größer als 2 Milliarden sein" + spa "El número de medida es correcto pero no permitimos que la parte del dígito tenga más de 2 billones" ER_ALTER_FILEGROUP_FAILED chi "未能改变:%s" eng "Failed to alter: %s" ger "Änderung von %s fehlgeschlagen" hindi "%s को ALTER करने में असफल रहे" + spa "No pude alterar: %s" ER_BINLOG_ROW_LOGGING_FAILED chi "将一行写入基于行的二进制日志失败" eng "Writing one row to the row-based binary log failed" ger "Schreiben einer Zeilen ins zeilenbasierte Binärlog fehlgeschlagen" + spa "Ha fallado el grabar una fila en historial (log) binario basado en fila" ER_BINLOG_ROW_WRONG_TABLE_DEF chi "表定义主机和从站不匹配:%s" eng "Table definition on master and slave does not match: %s" ger "Tabellendefinition auf Master und Slave stimmt nicht überein: %s" + spa "La definición de tabla en maestro (master) y esclavo no coincide: %s" ER_BINLOG_ROW_RBR_TO_SBR chi "使用--log-slave-updates的从站必须使用基于行的二进制日志记录,以便能够复制基于行的二进制日志事件" eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events" ger "Slave, die mit --log-slave-updates laufen, müssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Binärlog-Ereignisse loggen zu können" + spa "La ejecución esclava con --log-slave-updates debe de usar un historial (log) binario basado en fila para que pueda replicar eventos de historial (log) binario basados en fila" ER_EVENT_ALREADY_EXISTS chi "事件'%-.192s'已经存在" eng "Event '%-.192s' already exists" ger "Event '%-.192s' existiert bereits" + spa "El evento '%-.192s' ya existe" ER_EVENT_STORE_FAILED chi "无法存储事件%s。错误代码%M来自存储引擎" eng "Failed to store event %s. Error code %M from storage engine" ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %M" + spa "No pude almacenar evento %s. Código de error %M desde motor de almacenaje" ER_EVENT_DOES_NOT_EXIST chi "未知事件'%-.192s'" eng "Unknown event '%-.192s'" ger "Unbekanntes Event '%-.192s'" + spa "Evento desconocido '%-.192s'" ER_EVENT_CANT_ALTER chi "无法改变事件'%-.192s'" eng "Failed to alter event '%-.192s'" ger "Ändern des Events '%-.192s' fehlgeschlagen" hindi "'%-.192s' EVENT को ALTER करने में असफल रहे" + spa "No pude alterar evento '%-.192s'" ER_EVENT_DROP_FAILED chi "未能DROP%s" eng "Failed to drop %s" ger "Löschen von %s fehlgeschlagen" hindi "%s को हटाने में असफल रहे" + spa "No pude eliminar %s" ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG chi "INTERVAL为负或太大" eng "INTERVAL is either not positive or too big" ger "INTERVAL ist entweder nicht positiv oder zu groß" + spa "O INTERVAL no es positivo o es demasiado grande" ER_EVENT_ENDS_BEFORE_STARTS chi "ENDS无效的或在STARTS之前" eng "ENDS is either invalid or before STARTS" ger "ENDS ist entweder ungültig oder liegt vor STARTS" + spa "O ENDS es inválido o anterior a STARTS" ER_EVENT_EXEC_TIME_IN_THE_PAST chi "事件执行时间在过去。事件已被禁用" eng "Event execution time is in the past. Event has been disabled" ger "Ausführungszeit des Events liegt in der Vergangenheit. Event wurde deaktiviert" + spa "El tiempo de ejecución de evento se encuentra en el pasado. El evento ha sido desactivado" ER_EVENT_OPEN_TABLE_FAILED chi "无法打开mysql.event" eng "Failed to open mysql.event" ger "Öffnen von mysql.event fehlgeschlagen" hindi "mysql.event को खोलने में असफल रहे" + spa "No puede abrir mysql.event" ER_EVENT_NEITHER_M_EXPR_NOR_M_AT chi "没有提供DateTime表达式" eng "No datetime expression provided" ger "Kein DATETIME-Ausdruck angegeben" + spa "No se ha suministrado expresión datetime" ER_UNUSED_2 chi "你永远不应该看到它" eng "You should never see it" + spa "No lo debería vd de ver nunca" ER_UNUSED_3 chi "你永远不应该看到它" eng "You should never see it" + spa "No lo debería vd de ver nunca" ER_EVENT_CANNOT_DELETE chi "无法从mysql.event删除该事件" eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" hindi "EVENT को mysql.event से हटाने मैं असफल रहे" + spa "No pude borrar el evento desde mysql.event" ER_EVENT_COMPILE_ERROR chi "在汇编事件的主体时出错" eng "Error during compilation of event's body" ger "Fehler beim Kompilieren des Event-Bodys" + spa "Error durante compilación de cuerpo de evento" ER_EVENT_SAME_NAME chi "相同的旧活动名称" eng "Same old and new event name" ger "Alter und neuer Event-Name sind gleich" + spa "Mismo nombre de evento viejo y nuevo" ER_EVENT_DATA_TOO_LONG chi "列'%s'数据太长" eng "Data for column '%s' too long" ger "Daten der Spalte '%s' zu lang" + spa "Datos demasiado largos para la columna '%s'" ER_DROP_INDEX_FK chi "无法删除索引'%-.192s':外部索引约束中需要它" eng "Cannot drop index '%-.192s': needed in a foreign key constraint" ger "Kann Index '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung benötigt" + spa "No puedo eliminar índice '%-.192s': necesario en una restricción de clave foránea" # When using this error message, use the ER_WARN_DEPRECATED_SYNTAX error # code. ER_WARN_DEPRECATED_SYNTAX_WITH_VER chi "语法'%s'被弃用,将在Mariadb%s中删除。请使用%s" eng "The syntax '%s' is deprecated and will be removed in MariaDB %s. Please use %s instead" ger "Die Syntax '%s' ist veraltet und wird in MariaDB %s entfernt. Bitte benutzen Sie statt dessen %s" + spa "La sintaxis '%s' está obsoleta y será quitada en MariaDB %s. Por favor, use %s en su lugar" ER_CANT_WRITE_LOCK_LOG_TABLE chi "您无法获得日志表的写锁。只有读访问是可能的" eng "You can't write-lock a log table. Only read access is possible" ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich" + spa "No puede hacer bloqueo de escritura en una tabla de historial (log). Sólo es posible acceso de lectura" ER_CANT_LOCK_LOG_TABLE chi "您无法使用带日志表的锁" eng "You can't use locks with log tables" ger "Log-Tabellen können nicht gesperrt werden" + spa "No puede usar bloqueos con tablas de historial (log)" ER_UNUSED_4 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE chi "mysql的列计数。%s是错误的。预期%d,找到%d。使用MariaDB%d创建,现在运行%d。请使用mariadb-upgrade来修复此错误" eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mariadb-upgrade to fix this error" ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mariadb-upgrade, um den Fehler zu beheben" + spa "El contador de columnas de mysql.%s está equivocado. Se esperaba %d, hallado %d. Creado con MariaDB %d, ahora ejecuando %d. Por favor, use mariadb-upgrade para solucionar este error" ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR chi "当会话打开临时表时,无法切换出基于行的二进制日志格式" eng "Cannot switch out of the row-based binary log format when the session has open temporary tables" ger "Kann nicht aus dem zeilenbasierten Binärlog-Format herauswechseln, wenn die Sitzung offene temporäre Tabellen hat" + spa "No puedo conmutar fuera del formato de historial (log) binario basado en fila cuando la sesión ha abierto tablas temporales" ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT chi "无法更改存储函数或触发器内的二进制记录格式" eng "Cannot change the binary logging format inside a stored function or trigger" ger "Das Binärlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht geändert werden" + spa "No puedo cambiar el formato de historial (log) binario dentro de funciones almacenadas o disparadores" ER_UNUSED_13 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_PARTITION_NO_TEMPORARY chi "无法使用分区创建临时表" eng "Cannot create temporary table with partitions" ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich" hindi "अस्थाई टेबल को पार्टिशन्स के साथ नहीं बनाया जा सकता" + spa "No puedo crear tabla temporal con particiones" ER_PARTITION_CONST_DOMAIN_ERROR chi "分区常量超出分区功能域" eng "Partition constant is out of partition function domain" ger "Partitionskonstante liegt außerhalb der Partitionsfunktionsdomäne" + spa "La constante de partición está fuera del dominio de función de partición" swe "Partitionskonstanten är utanför partitioneringsfunktionens domän" ER_PARTITION_FUNCTION_IS_NOT_ALLOWED chi "不允许此分区功能" eng "This partition function is not allowed" ger "Diese Partitionierungsfunktion ist nicht erlaubt" + spa "Esta función de partición no está permitida" swe "Denna partitioneringsfunktion är inte tillåten" ER_DDL_LOG_ERROR chi "DDL日志中的错误" eng "Error in DDL log" ger "Fehler im DDL-Log" hindi "DDL लॉग में त्रुटि हुई" + spa "Error en historial (log) DDL" ER_NULL_IN_VALUES_LESS_THAN chi "VALUES LESS THAN不允许使用NULL" eng "Not allowed to use NULL value in VALUES LESS THAN" ger "In VALUES LESS THAN dürfen keine NULL-Werte verwendet werden" + spa "No autorizado a usar valor NULL en VALUES LESS THAN" swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN" ER_WRONG_PARTITION_NAME chi "分区名称不正确" eng "Incorrect partition name" ger "Falscher Partitionsname" hindi "पार्टीशन का नाम गलत है" + spa "Nombre incorrecto de partición" swe "Felaktigt partitionsnamn" ER_CANT_CHANGE_TX_CHARACTERISTICS 25001 chi "交易正在进行,无法更改事务特性" eng "Transaction characteristics can't be changed while a transaction is in progress" + spa "No se pueden cambiar las característias de transacción mientras que una transacción se ecuentre en proceso" ER_DUP_ENTRY_AUTOINCREMENT_CASE chi "ALTER TABLE表会导致AUTO_INCREMENT重建,导致重复的条目'%-.192T'用于索引'%-.192s'" eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.192T' for key '%-.192s'" ger "ALTER TABLE führt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.192T' für Schlüssel '%-.192s' auftritt" + spa "ALTER TABLE causa resecuenciado de auto_incremento resultando en entrada duplicada '%-.192T' para la clave '%-.192s'" ER_EVENT_MODIFY_QUEUE_ERROR chi "内部调度器错误%d" eng "Internal scheduler error %d" ger "Interner Scheduler-Fehler %d" + spa "Error en organizador interno %d" ER_EVENT_SET_VAR_ERROR chi "在开始/停止调度程序期间出错。错误代码%M" eng "Error during starting/stopping of the scheduler. Error code %M" ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %M" + spa "Error durante arranque/parada del organizador. Código de error %M" ER_PARTITION_MERGE_ERROR chi "引擎不能用于分区表" eng "Engine cannot be used in partitioned tables" ger "Engine kann in partitionierten Tabellen nicht verwendet werden" + spa "No se puede usar el motor en tablas particionadas" swe "Engine inte användas i en partitionerad tabell" ER_CANT_ACTIVATE_LOG chi "无法激活'%-.64s'日志" eng "Cannot activate '%-.64s' log" ger "Kann Logdatei '%-.64s' nicht aktivieren" + spa "No puedo activar historial (log) '%-.64s'" ER_RBR_NOT_AVAILABLE chi "服务器不是基于行的复制构建的" eng "The server was not built with row-based replication" ger "Der Server wurde nicht mit zeilenbasierter Replikation gebaut" + spa "El servidor no ha sido construido con réplica basada en fila" ER_BASE64_DECODE_ERROR chi "Base64字符串的解码失败" eng "Decoding of base64 string failed" ger "Der Server hat keine zeilenbasierte Replikation" + spa "Ha fallado la decodificación de cadena base64" swe "Avkodning av base64 sträng misslyckades" ER_EVENT_RECURSION_FORBIDDEN chi "EVENT主体存在时EVENT DDL语句递归被禁止" eng "Recursion of EVENT DDL statements is forbidden when body is present" ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert" + spa "Se prohiben sentencias de EVENT DDL cuando se encuentra presente el cuerpo" ER_EVENTS_DB_ERROR chi "无法继续,因为事件调度程序已禁用" eng "Cannot proceed, because event scheduler is disabled" ger "Die Operation kann nicht fortgesetzt werden, da Event Scheduler deaktiviert ist." + spa "No puedo proceder porque el organizado de eventos está desactivado" ER_ONLY_INTEGERS_ALLOWED chi "这里只允许整数作为数字" eng "Only integers allowed as number here" ger "An dieser Stelle sind nur Ganzzahlen zulässig" + spa "Sólo se permiten enteros como número aquí" ER_UNSUPORTED_LOG_ENGINE chi "存储引擎%s不能用于日志表" eng "Storage engine %s cannot be used for log tables" ger "Speicher-Engine %s kann für Logtabellen nicht verwendet werden" hindi "स्टोरेज इंजन %s को लॉग टेबल्स के लिए इस्तेमाल नहीं किया जा सकता है" + spa "No se puede usar el motor de almacenaje %s para tablas de historial (log)" ER_BAD_LOG_STATEMENT chi "如果启用日志记录,则无法'%s'日志表" eng "You cannot '%s' a log table if logging is enabled" ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist" + spa "No puede '%s' una tabla de historial (log) cuando se encuentra activado el llevar historial (log)" ER_CANT_RENAME_LOG_TABLE chi "无法重命名'%s'。启用日志记录时,重命名日志表必须重命名两个表:日志表到存档表,另一个表返回'%s'" eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'" ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen zwei Tabellen umbenannt werden: die Logtabelle zu einer Archivtabelle, und eine weitere Tabelle zu '%s'" + spa "No puedo renombrar '%s'. Si se encuentra activo el llevar historial (log), el renombrar a/desde tabla de historial (log) debe de renombrar dos tablas: la tabla de historial (log) a una tabla archivo y otra tabla de vuelta a '%s'" ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000 chi "对本机函数的呼叫中的参数计数不正确'%-.192s'" eng "Incorrect parameter count in the call to native function '%-.192s'" ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.192s'" + spa "Contador de parámetro incorrecto en la llamada a función nativa '%-.192s'" ER_WRONG_PARAMETERS_TO_NATIVE_FCT 42000 chi "对本机函数'%-.192s'呼叫中的参数不正确" eng "Incorrect parameters in the call to native function '%-.192s'" ger "Falscher Parameter beim Aufruf der nativen Funktion '%-.192s'" + spa "Parámetros incorrectos en la llamada a función nativa '%-.192s'" ER_WRONG_PARAMETERS_TO_STORED_FCT 42000 chi "呼叫中的参数不正确为存储函数'%-.192s'" eng "Incorrect parameters in the call to stored function '%-.192s'" ger "Falsche Parameter beim Aufruf der gespeicherten Funktion '%-.192s'" + spa "Parámetros incorrectos en la llamada a función almacenada '%-.192s'" ER_NATIVE_FCT_NAME_COLLISION chi "此功能'%-.192s'具有与本机函数相同的名称" eng "This function '%-.192s' has the same name as a native function" ger "Die Funktion '%-.192s' hat denselben Namen wie eine native Funktion" + spa "Esta función '%-.192s' tiene el mismo nombre que una función nativa" # When using this error message, use the ER_DUP_ENTRY error code. See, for # example, code in handler.cc. ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009 @@ -6792,177 +7094,221 @@ ER_BINLOG_PURGE_EMFILE chi "打开太多文件,请再次执行命令" eng "Too many files opened, please execute the command again" ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus" + spa "Demasiados ficheros/archivos abiertos. Por favor, ejecute el comando otra vez" ER_EVENT_CANNOT_CREATE_IN_THE_PAST chi "事件执行时间在过去,并ON COMPLETION NOT PRESERVE。创建后,事件立即丢弃" eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation" ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht" + spa "El tiempo de jecución del evento se encuentra en el pasado y está configurado ON COMPLETION NOT PRESERVE. El evento fue eliminado inmediatamente tras su creación" ER_EVENT_CANNOT_ALTER_IN_THE_PAST chi "事件执行时间在过去,并ON COMPLETION NOT PRESERVE。事件没有改变。指定将来的时间" eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future" ger "Execution Zeitpunkt des Ereignisses in der Vergangenheit liegt, und es war NACH ABSCHLUSS Set nicht erhalten. Die Veranstaltung wurde nicht verändert. Geben Sie einen Zeitpunkt in der Zukunft" + spa "El tiempo de jecución del evento se encuentra en el pasado y está configurado ON COMPLETION NOT PRESERVE. El evento no fue cambiado. Especifique un tiempo del futuro" ER_SLAVE_INCIDENT chi "事件%s发生在master上。消息:%-.64s" eng "The incident %s occurred on the master. Message: %-.64s" ger "Der Vorfall %s passierte auf dem Master. Meldung: %-.64s" + spa "Ha ocurrido un incidente %s en el maestro (master). Mensaje: %-.64s" ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT chi "表对某些现有值没有分区" eng "Table has no partition for some existing values" ger "Tabelle hat für einige bestehende Werte keine Partition" + spa "La tabla no tiene partición para algunos valores existentes" ER_BINLOG_UNSAFE_STATEMENT chi "自从BINLOG_FORMAT =STATEMENT以来,使用语句格式写入二进制日志的不安全语句。%s." eng "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s" ger "Unsichere Anweisung ins Binärlog geschrieben, weil Anweisungsformat BINLOG_FORMAT = STATEMENT. %s" + spa "Se ha grabado una sentencia no segura en historial (log) binario usando el formato de sentencia desde BINLOG_FORMAT = STATEMENT. %s" swe "Detta är inte säkert att logga i statement-format, för BINLOG_FORMAT = STATEMENT. %s" ER_SLAVE_FATAL_ERROR chi "致命错误:%s" eng "Fatal error: %s" ger "Fataler Fehler: %s" + spa "Error fatal: %s" ER_SLAVE_RELAY_LOG_READ_FAILURE chi "relay日志读取失败:%s" eng "Relay log read failure: %s" ger "Relaylog-Lesefehler: %s" + spa "Fallo de lectura en historial (log) de reenvío: %s" ER_SLAVE_RELAY_LOG_WRITE_FAILURE chi "relay日志写入失败:%s" eng "Relay log write failure: %s" ger "Relaylog-Schreibfehler: %s" + spa "Fallo de grabación en historial (log) de reenvío: %s" ER_SLAVE_CREATE_EVENT_FAILURE chi "无法创建%s" eng "Failed to create %s" ger "Erzeugen von %s fehlgeschlagen" hindi "%s को बनाने मैं असफल रहे" + spa "Fallo al crear %s" ER_SLAVE_MASTER_COM_FAILURE chi "Master命令%s失败:%s" eng "Master command %s failed: %s" ger "Master-Befehl %s fehlgeschlagen: %s" + spa "El comando maestro (master) %s ha fallado: %s" ER_BINLOG_LOGGING_IMPOSSIBLE chi "二进制记录不可能。消息:%s" eng "Binary logging not possible. Message: %s" ger "Binärlogging nicht möglich. Meldung: %s" + spa "No es posible llevar historial (log) binario. Mensaje: %s" ER_VIEW_NO_CREATION_CTX chi "View%`s.%`s没有创建上下文" eng "View %`s.%`s has no creation context" ger "View %`s.%`s hat keinen Erzeugungskontext" + spa "La vista %`s.%`s no tiene contexto de creación" ER_VIEW_INVALID_CREATION_CTX chi "Creation View%`s.%`s的上下文无效" eng "Creation context of view %`s.%`s is invalid" ger "Erzeugungskontext des Views%`s.%`s ist ungültig" + spa "El contexto de creación de la vista %`s.%`s es inválido" ER_SR_INVALID_CREATION_CTX chi "存储例程%`s.%`s的创建上下文无效" eng "Creation context of stored routine %`s.%`s is invalid" ger "Erzeugungskontext der gespeicherten Routine%`s.%`s ist ungültig" + spa "El contexto de creación de la rutina almacenada %`s.%`s es inválido" ER_TRG_CORRUPTED_FILE chi "表的trg文件损坏了。%`s.%`s" eng "Corrupted TRG file for table %`s.%`s" ger "Beschädigte TRG-Datei für Tabelle %`s.%`s" + spa "Fichero/archivo TRG estropeado para la tabla %`s.%`s`" ER_TRG_NO_CREATION_CTX chi "表%`s.%`s的触发器没有创建上下文" eng "Triggers for table %`s.%`s have no creation context" ger "Trigger für Tabelle %`s.%`s haben keinen Erzeugungskontext" + spa "Los disparadores para la tabla %`s.%`s no tienen contexto de creación" ER_TRG_INVALID_CREATION_CTX chi "触发表%`s.%`s的创建上下文无效" eng "Trigger creation context of table %`s.%`s is invalid" ger "Trigger-Erzeugungskontext der Tabelle %`s.%`s ist ungültig" + spa "El contexto de creación del disparador de la tabla %`s.%`s es inválido" ER_EVENT_INVALID_CREATION_CTX chi "事件%`s.%`s的创建上下文无效" eng "Creation context of event %`s.%`s is invalid" ger "Erzeugungskontext des Events %`s.%`s ist ungültig" + spa "El contexto de creación del evento %`s.%`s es inválido" ER_TRG_CANT_OPEN_TABLE chi "无法打开触发%`s.%`s的表" eng "Cannot open table for trigger %`s.%`s" ger "Kann Tabelle für den Trigger %`s.%`s nicht öffnen" + spa "No puedo abrir tabla para disparador %`s.%`s" ER_CANT_CREATE_SROUTINE chi "无法创建存储过程%`s。检查警告" eng "Cannot create stored routine %`s. Check warnings" ger "Kann gespeicherte Routine %`s nicht erzeugen. Beachten Sie die Warnungen" + spa "No puedo crear rutina alnacenada %`s. Revise los avisos" ER_UNUSED_11 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT chi "类型%s的Binlog语句未在格式描述binlog语句之前" eng "The BINLOG statement of type %s was not preceded by a format description BINLOG statement" ger "Der BINLOG-Anweisung vom Typ %s ging keine BINLOG-Anweisung zur Formatbeschreibung voran" + spa "La sentencia BINLOG del tipo `%s` no ha sido precedida por una sentencia de descripción de formato BINLOG" ER_SLAVE_CORRUPT_EVENT chi "检测到损坏的复制事件" eng "Corrupted replication event was detected" ger "Beschädigtes Replikationsereignis entdeckt" + spa "Se ha detectado un evento de réplica estropeado" ER_LOAD_DATA_INVALID_COLUMN chi "LOAD DATA中的列引用(%-.64s)无效" eng "Invalid column reference (%-.64s) in LOAD DATA" ger "Ungültige Spaltenreferenz (%-.64s) bei LOAD DATA" + spa "Referencia inválida a columna (%-.64s) en LOAD DATA" ER_LOG_PURGE_NO_FILE chi "未找到清除的log%s" eng "Being purged log %s was not found" ger "Zu bereinigende Logdatei %s wurde nicht gefunden" + spa "No ha sido hallado historial (log) %s siendo purgado" ER_XA_RBTIMEOUT XA106 chi "XA_RBTIMEOUT:交易分支回滚:花了太久了" eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long" ger "XA_RBTIMEOUT: Transaktionszweig wurde zurückgerollt: Zeitüberschreitung" + spa "XA_RBTIMEOUT: Rama de transacción ha sido retrocedida (rolled back): transcurrido demasiado tiempo" ER_XA_RBDEADLOCK XA102 chi "XA_RBDEADLOCK:交易分支回滚:检测到死锁" eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected" ger "XA_RBDEADLOCK: Transaktionszweig wurde zurückgerollt: Deadlock entdeckt" + spa "XA_RBDEADLOCK: Rama de transacción ha dido retrocedida (rolled back): se ha detectado estancamiento (deadlock)" ER_NEED_REPREPARE chi "prepared statement需要重新准备" eng "Prepared statement needs to be re-prepared" ger "Vorbereitete Anweisungen müssen noch einmal vorbereitet werden" + spa "Sentencia preparada necesita volver a ser preparada" ER_DELAYED_NOT_SUPPORTED chi "表'%-.192s'不支持延迟选项" eng "DELAYED option not supported for table '%-.192s'" ger "Die DELAYED-Option wird für Tabelle '%-.192s' nicht unterstützt" -WARN_NO_MASTER_INFO + spa "Opción DELAYED no soportada para la tabla '%-.192s'" +WARN_NO_MASTER_INFO eng "There is no master connection '%.*s'" ger "Die Master-Info-Struktur existiert nicht '%.*s'" + spa "No existe conexión maestra '%.*s'" WARN_OPTION_IGNORED eng "<%-.64s> option ignored" ger "Option <%-.64s> ignoriert" + spa "Opción <%-.64s> ignorada" ER_PLUGIN_DELETE_BUILTIN chi "内置插件无法删除" eng "Built-in plugins cannot be deleted" ger "Eingebaute Plugins können nicht gelöscht werden" + spa "No se pueden borrar los enchufes (plugins) internos" WARN_PLUGIN_BUSY chi "插件很忙,将在关机时卸载" eng "Plugin is busy and will be uninstalled on shutdown" ger "Plugin wird verwendet und wird erst beim Herunterfahren deinstalliert" + spa "El enchufe (plugin) está ocupado y será desinstalado cuando se apague" ER_VARIABLE_IS_READONLY chi "%s变量'%s'是只读的。使用set%s付值" eng "%s variable '%s' is read-only. Use SET %s to assign the value" ger "%s Variable '%s' ist nur lesbar. Benutzen Sie SET %s, um einen Wert zuzuweisen" + spa "%s variable '%s' es de sólo lectura. Use SET %s para asignar el valor" ER_WARN_ENGINE_TRANSACTION_ROLLBACK chi "存储引擎%s不支持此语句的回滚。交易回滚并必须重新启动" eng "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted" ger "Speicher-Engine %s unterstützt für diese Anweisung kein Rollback. Transaktion wurde zurückgerollt und muss neu gestartet werden" + spa "El motor de almacenaje %s no soporta retroceso (rollback) para esta sentencia. Transacción retrocedida (rolled back) y debe de ser rearrancada" ER_SLAVE_HEARTBEAT_FAILURE chi "意外的master心跳数据:%s" eng "Unexpected master's heartbeat data: %s" ger "Unerwartete Daten vom Heartbeat des Masters: %s" + spa "Datos inesperados de latido (heartbeat) de maestro (master): %s" ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE chi "心跳周期的请求值是负的或超过允许的最大值(%u秒)" eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%u seconds)" + spa "El valor requerido para el período de latido o es negativo o excede al máximo permitido (%u segundos)" ER_UNUSED_14 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_CONFLICT_FN_PARSE_ERROR chi "解析冲突功能时出错。消息:%-.64s" eng "Error in parsing conflict function. Message: %-.64s" ger "Fehler beim Parsen einer Konflikt-Funktion. Meldung: %-.64s" + spa "Error al analizar función de conflicto. Mensaje: %-.64s" ER_EXCEPTIONS_WRITE_ERROR chi "写入异常表失败。消息:%-.128s" eng "Write to exceptions table failed. Message: %-.128s"" ger "Schreiben in Ausnahme-Tabelle fehlgeschlagen. Meldung: %-.128s"" + spa "Ha fallado el grabar en tabla de excepciones. Mensaje: %-.128s"" ER_TOO_LONG_TABLE_COMMENT chi "表格备注'%-.64s'太长(max =%u)" eng "Comment for table '%-.64s' is too long (max = %u)" ger "Kommentar für Tabelle '%-.64s' ist zu lang (max = %u)" por "Comentário para a tabela '%-.64s' é longo demais (max = %u)" + spa "El comentario para tabla '%-.64s' es demasiado largo (máx = %u)" ER_TOO_LONG_FIELD_COMMENT chi "字段'%-.64s'太长(max =%u)" eng "Comment for field '%-.64s' is too long (max = %u)" ger "Kommentar für Feld '%-.64s' ist zu lang (max = %u)" por "Comentário para o campo '%-.64s' é longo demais (max = %u)" + spa "El comentario para el campo '%-.64s' es demasiado largo (máx = %u)" ER_FUNC_INEXISTENT_NAME_COLLISION 42000 chi "FUNCTION %s不存在。在参考手册中查看“函数名称解析”部分" eng "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual" ger "FUNCTION %s existiert nicht. Erläuterungen im Abschnitt 'Function Name Parsing and Resolution' im Referenzhandbuch" + spa "La FUNCTION %s no existe. Revise la sección de 'Function Name Parsing and Resolution' en el Manual de Referencia" # When updating these, please update EXPLAIN_FILENAME_MAX_EXTRA_LENGTH in # sql_table.h with the new maximal additional length for explain_filename. ER_DATABASE_NAME @@ -6970,252 +7316,319 @@ ER_DATABASE_NAME eng "Database" ger "Datenbank" hindi "डेटाबेस" + spa "Base de datos" swe "Databas" ER_TABLE_NAME chi "表" eng "Table" ger "Tabelle" hindi "टेबल" + spa "Tabla" swe "Tabell" ER_PARTITION_NAME chi "分区" eng "Partition" ger "Partition" hindi "पार्टीशन" + spa "Partición" swe "Partition" ER_SUBPARTITION_NAME chi "下分区" eng "Subpartition" ger "Unterpartition" hindi "सब-पार्टीशन" + spa "Subpartición" swe "Subpartition" ER_TEMPORARY_NAME chi "暂时的" eng "Temporary" ger "Temporär" hindi "अस्थायी" + spa "Temporaria" swe "Temporär" ER_RENAMED_NAME chi "重命名" eng "Renamed" ger "Umbenannt" + spa "Renombrado" swe "Namnändrad" ER_TOO_MANY_CONCURRENT_TRXS chi "“太多并发交易" eng "Too many active concurrent transactions" ger "Zu viele aktive simultane Transaktionen" + spa "Demasiadas transacciones concurrentes activas" WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED chi "非ASCII分隔符参数不完全支持" eng "Non-ASCII separator arguments are not fully supported" ger "Nicht-ASCII-Trennargumente werden nicht vollständig unterstützt" + spa "Los argumentos con separador No ASCII no están completamente soportados" ER_DEBUG_SYNC_TIMEOUT chi "调试同步点等待超时" eng "debug sync point wait timed out" ger "Debug Sync Point Wartezeit überschritten" + spa "agotado tiempo de espera de punto de sincronización de depuración" ER_DEBUG_SYNC_HIT_LIMIT chi "调试同步点限制达到" eng "debug sync point hit limit reached" ger "Debug Sync Point Hit Limit erreicht" + spa "alcanzado límite de punto de sincronización de depuración" ER_DUP_SIGNAL_SET 42000 chi "重复条件信息项'%s'" eng "Duplicate condition information item '%s'" ger "Informationselement '%s' für Duplikatbedingung" + spa "Duplicado elemento de información de condición '%s'" # Note that the SQLSTATE is not 01000, it is provided by SIGNAL/RESIGNAL ER_SIGNAL_WARN 01000 chi "未处理用户定义的警告条件" eng "Unhandled user-defined warning condition" ger "Unbehandelte benutzerdefinierte Warnbedingung" + spa "Condición de aviso definida por usuario sin manejar" # Note that the SQLSTATE is not 02000, it is provided by SIGNAL/RESIGNAL ER_SIGNAL_NOT_FOUND 02000 chi "未找到的用户定义未找到条件" eng "Unhandled user-defined not found condition" ger "Unbehandelte benutzerdefinierte Nicht-gefunden-Bedingung" + spa "Condición de no hallado definida por usuario sin manejar" # Note that the SQLSTATE is not HY000, it is provided by SIGNAL/RESIGNAL ER_SIGNAL_EXCEPTION HY000 chi "未处理用户定义的异常条件" eng "Unhandled user-defined exception condition" ger "Unbehandelte benutzerdefinierte Ausnahmebedingung" + spa "Condición de excepción definida por usuario sin manejar" ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER 0K000 chi "RESIGNAL处理程序不活跃" eng "RESIGNAL when handler not active" ger "RESIGNAL bei nicht aktivem Handler" + spa "RESIGNAL cuando el manejador no activo" ER_SIGNAL_BAD_CONDITION_TYPE chi "SIGNAL/RESIGNAL只能使用SQLState定义的条件" eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE" ger "SIGNAL/RESIGNAL kann nur mit einer Bedingung (CONDITION) benutzt werden, die bei SQLSTATE definiert wurde" + spa "SIGNAL/RESIGNAL sólo pueden usar una CONDITION definida mediante SQLSTATE" WARN_COND_ITEM_TRUNCATED chi "数据被截断为条件项目'%s'" eng "Data truncated for condition item '%s'" ger "Daten gekürzt für Bedingungselement '%s'" + spa "Datos truncados para elemento de condición '%s'" ER_COND_ITEM_TOO_LONG chi "条件项目'%s'的数据太长" eng "Data too long for condition item '%s'" ger "Daten zu lang für Bedingungselement '%s'" + spa "Datos demasiados largos para elemento de condición '%s'" ER_UNKNOWN_LOCALE chi "未知区域设置:'%-.64s'" eng "Unknown locale: '%-.64s'" ger "Unbekannte Locale: '%-.64s'" + spa "Localización (locale) desconocida: '%-.64s'" ER_SLAVE_IGNORE_SERVER_IDS chi "请求的服务器ID%d与SLAVE启动选项--replicate-same-server-id冲突" eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id" ger "Die angeforderte Server-ID %d steht im Konflikt mit der Startoption --replicate-same-server-id für den Slave" + spa "La id %d del servidor requerido choca con la opción de arranque del esclavo --replicate-same-server-id" ER_QUERY_CACHE_DISABLED chi "查询缓存已禁用;将query_cache_type设置为ON或DEMAND启用它" eng "Query cache is disabled; set query_cache_type to ON or DEMAND to enable it" + spa "Caché de consulta (query) desactivada; configura query_cache_type a ON o DEMAND para activarla" ER_SAME_NAME_PARTITION_FIELD chi "重复分区字段名称'%-.192s'" eng "Duplicate partition field name '%-.192s'" ger "Partitionsfeld '%-.192s' ist ein Duplikat" + spa "Nombre de campo de partición duplicado '%-.192s'" ER_PARTITION_COLUMN_LIST_ERROR chi "分区用的列和列表使用不一致" eng "Inconsistency in usage of column lists for partitioning" ger "Inkonsistenz bei der Benutzung von Spaltenlisten für Partitionierung" + spa "Inconsistencia en uso de listas de columna para particionar" ER_WRONG_TYPE_COLUMN_VALUE_ERROR chi "不正确类型的分区列值" eng "Partition column values of incorrect type" ger "Partitionsspaltenwerte sind vom falschen Typ" + spa "Valores de columna de partición de tipo incorrecto" ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR chi "'%-.192s'中的太多字段" eng "Too many fields in '%-.192s'" ger "Zu viele Felder in '%-.192s'" + spa "Demasiados campos en '%-.192s'" ER_MAXVALUE_IN_VALUES_IN chi "不能在VALUES IN使用MAXVALUE" eng "Cannot use MAXVALUE as value in VALUES IN" ger "MAXVALUE kann nicht als Wert in VALUES IN verwendet werden" + spa "No puedo usar MAXVALUE como valor en VALUES IN" ER_TOO_MANY_VALUES_ERROR chi "这种类型不能有多个值%-.64s 分区" eng "Cannot have more than one value for this type of %-.64s partitioning" ger "Für den Partionierungstyp %-.64s darf es nicht mehr als einen Wert geben" + spa "No puedo tener más de un valor para este tipo de particionamiento %-.64s" ER_ROW_SINGLE_PARTITION_FIELD_ERROR chi "仅允许的多字段列分区的VALUES IN的行表达式" eng "Row expressions in VALUES IN only allowed for multi-field column partitioning" ger "Zeilenausdrücke in VALUES IN sind nur für Mehrfeld-Spaltenpartionierung erlaubt" + spa "Expresiones de fila en VALUES IN sólo permitidas para particionamiento de columna multi-campo" ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD chi "字段'%-.192s'类型不允许为此类型的分区类型" eng "Field '%-.192s' is of a not allowed type for this type of partitioning" ger "Feld '%-.192s' ist für diese Art von Partitionierung von einem nicht zulässigen Typ" + spa "El campo '%-.192s' es de un tipo no permitido para este tipo de particionado" ER_PARTITION_FIELDS_TOO_LONG chi "分区字段的总长度太大" eng "The total length of the partitioning fields is too large" ger "Die Gesamtlänge der Partitionsfelder ist zu groß" + spa "El tamaño total de los campos de particionado es demasiado grande" ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE chi "无法执行语句:由于引擎不能支持行和语句,因此无法写入二进制日志" eng "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que ambos motores de fila-incapaz y de sentencia-incapaz están involucrados" ER_BINLOG_ROW_MODE_AND_STMT_ENGINE chi "无法执行语句:由于BINLOG_FORMAT =ROW和至少一个表使用存储引擎限制为基于语句的日志记录,因此无法写入二进制日志" eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que BINLOG_FORMAT = ROW y al menos una tabla utiliza motor de almacenaje limitado a historiales basados en sentencia" ER_BINLOG_UNSAFE_AND_STMT_ENGINE chi "无法执行语句:由于语句不安全,无法写入二进制日志,存储引擎仅限于基于语句的日志记录,而BINLOG_FORMAT = MIXED。%s." eng "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOG_FORMAT = MIXED. %s" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que la sentencia no es segura, el motor de almacenaje está limitado a historial basado en sentencia y BINLOG_FORMAT = MIXED. %s" ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE chi "无法执行语句:由于语句以行格式,至少一个表使用基于语句的日志记录的存储引擎,因此无法写入二进制日志。" eng "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que la sentencia está en un formato de fila y al menos una tabla utiliza un motor de almacenaje limitado a historial basado en sentencia" ER_BINLOG_STMT_MODE_AND_ROW_ENGINE chi "无法执行语句:由于BINLOG_FORMAT = STATEMENT,并且至少一个表使用存储引擎限制为基于行的日志记录,因此无法写入二进制日志。%s" eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que BINLOG_FORMAT = STATEMENT y al menos una tabla utilizan un motor de almacenaje limitado a historial basado en fila. %s" ER_BINLOG_ROW_INJECTION_AND_STMT_MODE chi "无法执行语句:由于语句的正常格式和BINLOG_FORMAT = STATEMENT,因此无法写入二进制日志" eng "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que la sentencia está en formato de fila y BINLOG_FORMAT = STATEMENT" ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE chi "无法执行语句:由于涉及多个引擎并且至少有一个引擎是自记录的,因此无法写入二进制日志。" eng "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging" + spa "No puedo ejecutar sentencia: imposible grabar historial (log) binario ya que hay más de un motor involucrado y al menos un motor usa auto-historial" ER_BINLOG_UNSAFE_LIMIT chi "该语句不安全,因为它使用限制子句。这不安全,因为所包含的一组行无法预测" eng "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted" + spa "La sentencia no es segura debido a usar una cláusula LIMIT. No es segura porque el conjunto incluido de filas no se puede predecir" ER_BINLOG_UNSAFE_INSERT_DELAYED chi "该声明不安全,因为它使用插入延迟。这是不安全的,因为无法预测插入行的时间" eng "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted" + spa "La sentencia no es segura debido a usar una INSERT DELAYED. No es segura porque los momentos en que las filas han de insertarse no se pueden predecir" ER_BINLOG_UNSAFE_SYSTEM_TABLE chi "该声明不安全,因为它使用常规日志,慢查询日志或performance_schema表。这是不安全的,因为系统表可能在slave上不同" eng "The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves" + spa "La sentencia no es segura debido a usar el historial (log) general, un historial (log) lento o tabla(s) de performance_schema. No es segura porque las tablas de sistema pueden diferir de las esclavas" ER_BINLOG_UNSAFE_AUTOINC_COLUMNS chi "语句不安全,因为它调用了插入AUTO_INCREMENT列的触发器或存储函数。插入的值无法正确记录" eng "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values cannot be logged correctly" + spa "La sentencia no es segura debido a invocar un disparador o una función almacenada que inserta una columna de AUTO_INCREMENT. No se puede llevar historial correcto de Los valores insertados". ER_BINLOG_UNSAFE_UDF chi "语句不安全,因为它使用了一个可能在从设备上返回相同值的UDF" eng "Statement is unsafe because it uses a UDF which may not return the same value on the slave" + spa "La sentencia no es segura porque usa un UDF que puede no devolver el mismo valor en el esclavo" ER_BINLOG_UNSAFE_SYSTEM_VARIABLE chi "语句不安全,因为它使用的系统变量可能在从站上具有不同的值" eng "Statement is unsafe because it uses a system variable that may have a different value on the slave" + spa "La sentencia no es segura porque usa una variable de sistema que puede tener un valor diferente en el esclavo" ER_BINLOG_UNSAFE_SYSTEM_FUNCTION chi "语句不安全,因为它使用系统函数可能在从站上返回不同的值" eng "Statement is unsafe because it uses a system function that may return a different value on the slave" + spa "La sentencia no es segura porque usa una función de sistema que puede devolver un valor diferente en el esclavo" ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS chi "语句不安全,因为它在访问同一事务中访问事务表后访问非事务性表" eng "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction" + spa "La sentencia no es segura porque accede a una tabla no transaccional tras acceder a una transaccional dentro de la misma transacción" ER_MESSAGE_AND_STATEMENT chi "%s.语句:%s" eng "%s. Statement: %s" + spa "%s. Sentencia: %s" ER_SLAVE_CONVERSION_FAILED chi "列%d表'%-.192s.%-.192s'无法从'%-.50s'类型为'%-.50s'" eng "Column %d of table '%-.192s.%-.192s' cannot be converted from type '%-.50s' to type '%-.50s'" + spa "La columna %d de la tabla '%-.192s.%-.192s' no puede ser convertida desde el tipo '%-.50s' al tipo '%-.50s'" ER_SLAVE_CANT_CREATE_CONVERSION chi "无法为表创建转换表'%-.192s.%-.192s'" eng "Can't create conversion table for table '%-.192s.%-.192s'" + spa "No puedo crear tabla de conversión para la tabla '%-.192s.%-.192s'" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT chi "无法在事务中修改@@session.binlog_format" eng "Cannot modify @@session.binlog_format inside a transaction" + spa "No puedo modificar @@session.binlog_format dentro de una transacción" ER_PATH_LENGTH chi "指定%.64T的路径太长了" eng "The path specified for %.64T is too long" hindi "%.64T के लिए निर्दिष्ट पथ बहुत लंबा है" + spa "La ruta especificada para %.64T es demasiado larga" ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT chi "'%s'被弃用,将在将来的版本中删除" eng "'%s' is deprecated and will be removed in a future release" ger "'%s' ist veraltet und wird in einer zukünftigen Version entfernt werden" + spa "'%s' está obsoleto y será quitada en una entrega futura" ER_WRONG_NATIVE_TABLE_STRUCTURE chi "本机表'%-.64s'。'%-.64s'具有错误的结构" eng "Native table '%-.64s'.'%-.64s' has the wrong structure" + spa "La tabla nativa '%-.64s'.'%-.64s' tiene una estructura equivocada" ER_WRONG_PERFSCHEMA_USAGE chi "performance_schema使用无效" eng "Invalid performance_schema usage" hindi "performance_schema का अवैध उपयोग" + spa "Uso inválido de performance_schema" ER_WARN_I_S_SKIPPED_TABLE chi "表'%s'.'%s'由于并发DDL语句正在修改其定义,因此跳过" eng "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement" + spa "La tabla '%s'.'%s' fue saltada ya que su definición está siendo modificada por la sentencia DDL concurrente" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT chi "无法在交易事务中修改@@session.binlog_direct_non_transactional_updates" eng "Cannot modify @@session.binlog_direct_non_transactional_updates inside a transaction" + spa "No puedo modificar @@session.binlog_direct_non_transactional_updates dentro de una transacción" ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT chi "无法在存储的函数或触发器内更改Binlog Direct标志" eng "Cannot change the binlog direct flag inside a stored function or trigger" + spa "No puedo cambiar la bandera directa de binlog dentro de una función almacenada o de un disparador" ER_SPATIAL_MUST_HAVE_GEOM_COL 42000 chi "空间索引可以仅包含几何类型列" eng "A SPATIAL index may only contain a geometrical type column" ger "Ein raumbezogener Index (SPATIAL) darf nur Spalten geometrischen Typs enthalten" + spa "Un índice SPATIAL sólo puede contener una columna de tipo geométrico" ER_TOO_LONG_INDEX_COMMENT chi "索引评论'%-.64s'太长(max =%lu)" eng "Comment for index '%-.64s' is too long (max = %lu)" + spa "El comentario para el índice '%-.64s' es demasiado largo (máx = %lu)" ER_LOCK_ABORTED chi "由于待处理的独家锁,等待锁被中止" eng "Wait on a lock was aborted due to a pending exclusive lock" + spa "Se ha abortado la espera por un bloqueo debido a bloqueo exclusivo pendiente" ER_DATA_OUT_OF_RANGE 22003 chi "%s值超出'%s'范围" eng "%s value is out of range in '%s'" + spa "%s valor se encuentra fuera de rango '%s'" ER_WRONG_SPVAR_TYPE_IN_LIMIT chi "基于非整数类型的基于LIMIT子句的变量" eng "A variable of a non-integer based type in LIMIT clause" + spa "Una variable de tipo basado en no entero en cláusula LIMIT" ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE chi "混合声明中的自记录和非自动记录引擎是不安全的" eng "Mixing self-logging and non-self-logging engines in a statement is unsafe" + spa "No es segura la mezcla de motores de auto-historial (log) y de no auto-historial en una sentencia" ER_BINLOG_UNSAFE_MIXED_STATEMENT chi "语句访问非致突变表以及事务性或临时表,并写入其中任何一个" eng "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them" + spa "La sentencia accede a tabla no transaccional así como transaccional o tabla temporal y graba en cualquiera de ellas" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN chi "无法修改事务中的@@sessient.sql_log_bin" eng "Cannot modify @@session.sql_log_bin inside a transaction" + spa "No puedo modificar @@session.sql_log_bin dentro de una transacción" ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN chi "无法在存储的函数或触发器内更改SQL_LOG_BIN" eng "Cannot change the sql_log_bin inside a stored function or trigger" + spa "No puedo cambiar sql_log_bin dentro de una función almacenada o disparador" ER_FAILED_READ_FROM_PAR_FILE chi "无法从.par文件中读取" eng "Failed to read from the .par file" hindi ".par फ़ाइल से पढ़ने में असफल रहे" + spa "No pude leer desde fichero/archivo .par" swe "Misslyckades läsa från .par filen" ER_VALUES_IS_NOT_INT_TYPE_ERROR chi "分区的值'%-.64s'必须具有类型INT" eng "VALUES value for partition '%-.64s' must have type INT" + spa "El valor VALUES para la partición '%-.64s' debe de tener el tipo INT" swe "Värden i VALUES för partition '%-.64s' måste ha typen INT" ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000 chi "拒绝用户'%s'@'%s'" @@ -7238,118 +7651,145 @@ ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000 rus "Доступ закрыт для пользователя '%s'@'%s'" serbian "Pristup je zabranjen korisniku '%s'@'%s'" slo "Zakázaný prístup pre užívateľa: '%s'@'%s'" - spa "Acceso negado para usuario: '%s'@'%s'" + spa "Acceso denegado para usuario: '%s'@'%s'" swe "Användare '%s'@'%s' är ej berättigad att logga in" ukr "Доступ заборонено для користувача: '%s'@'%s'" ER_SET_PASSWORD_AUTH_PLUGIN chi "通过%s插件验证的用户忽略SET PASSWORD" eng "SET PASSWORD is ignored for users authenticating via %s plugin" + spa "SET PASSWORD no tiene significado para usuarios que se autentican vía enchufe (plugin) %s" ER_GRANT_PLUGIN_USER_EXISTS chi "由于用户%-.*s已经存在,GRANT IDENTIFIED WITH授权是非法的" eng "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists" + spa "GRANT con IDENTIFIED WITH es ilegal porque el usuario %-.*s ya existe" ER_TRUNCATE_ILLEGAL_FK 42000 chi "无法截断外键约束中引用的表(%.192s)" eng "Cannot truncate a table referenced in a foreign key constraint (%.192s)" + spa "No puedo truncar una tabla referenciada en una restricción de clave foránea (%.192s)" ER_PLUGIN_IS_PERMANENT chi "插件'%s'是force_plus_permanent,无法卸载" eng "Plugin '%s' is force_plus_permanent and can not be unloaded" + spa "El enchufe (plugin) '%s' está force_plus_permanent y no puede ser descargado" ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN chi "心跳期的要求值小于1毫秒。该值重置为0,这意味着心跳将有效地禁用" eng "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled" + spa "El valor de requerimiento para el período de latido es menor de 1 milisegundo. El valor se vuelve a poner a 0, indicando que el latido será efectivamente desactivado" ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX chi "心跳期的请求值超出了“slave_net_timeout”秒的值。该期间的明智价值应小于超时" eng "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout" + spa "El valor de requerimiento para el período de latido excede el valor de `slave_net_timeout` segundos. Un valor sensible para el período debería de ser menor que el 'tiempo agotado'" ER_STMT_CACHE_FULL chi "需要多行语句超过“max_binlog_stmt_cache_size”字节的存储;增加这个mariadbd变量,然后重试" eng "Multi-row statements required more than 'max_binlog_stmt_cache_size' bytes of storage." + spa "Las sentencias Multi-fila requieren de más de 'max_binlog_stmt_cache_size' bytes para almacenaje." ER_MULTI_UPDATE_KEY_CONFLICT chi "由于表格被更新为'%-.192s'和'%-.192s',因此不允许允许主键/分区索引更新。" eng "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'" + spa "La actualización de clave primaria de o de partición no está permitida por estar la tabla actualizada como '%-.192s' y '%-.192s'" # When translating this error message make sure to include "ALTER TABLE" in the # message as mariadb-check parses the error message looking for ALTER TABLE. ER_TABLE_NEEDS_REBUILD chi "表需重建。请做ALTER TABLE %`s FORCE”或转储/重新加载以修复它!" eng "Table rebuild required. Please do \"ALTER TABLE %`s FORCE\" or dump/reload to fix it!" + spa "Se requiere reconstrucción de la tabla. Por favor, ¡haga un \"ALTER TABLE %`s FORCE\" o volcado/recarga para solucionarlo!" WARN_OPTION_BELOW_LIMIT chi "'%s'的值应该不小于'%s'的值" eng "The value of '%s' should be no less than the value of '%s'" + spa "El valor de '%s' debería de ser no menor que el valor de '%s'" ER_INDEX_COLUMN_TOO_LONG chi "索引列太大。最大列大小为%lu字节" eng "Index column size too large. The maximum column size is %lu bytes" + spa "El tamaño de índice de columna es demasiado grande. El tamaño máximo de columna es de %lu bytes" ER_ERROR_IN_TRIGGER_BODY chi "触发器'%-.64s'内存在错误:'%-.256s'" eng "Trigger '%-.64s' has an error in its body: '%-.256s'" + spa "El disparador '%-.64s' tiene una error en su cuerpo: '%-.256s'" ER_ERROR_IN_UNKNOWN_TRIGGER_BODY chi "未知触发器内存在错误:'%-.256s'" eng "Unknown trigger has an error in its body: '%-.256s'" + spa "El disparador desconocido tiene un error en su cuerpo: '%-.256s'" ER_INDEX_CORRUPT chi "索引%s已损坏" eng "Index %s is corrupted" + spa "El índice %s está estropeado" ER_UNDO_RECORD_TOO_BIG chi "撤消日志记录太大" eng "Undo log record is too big" + spa "El registro de historial (log) para deshacer es demasiado grande" ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT chi "INSERT IGNORE...SELECT不安全,因为选择由select检索行的顺序确定哪个(如果有)行被忽略。无法预测此顺序,并且在master和slave方面可能有所不同" eng "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave" + spa "INSERT IGNORE... SELECT es no seguro porque el orden en el que las filas se recuperan por el SELECT determina qué filas (si hay alguna) son ignoradas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE chi "INSERT... SELECT... ON DUPLICATE KEY UPDATE是不安全的,因为SELECT检索行的顺序确定哪个(如果有的话)是更新的。无法预测此顺序,并且在master和slave方面可能有所不同" eng "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave" + spa "INSERT... SELECT... ON DUPLICATE KEY UPDATE no es seguro porque el orden en el que las filas se recuperan por el SELECT determina qué filas (si hay alguna) son actualizadas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_REPLACE_SELECT chi "REPLACE... SELECT 不安全,因为选择由select检索行的顺序确定哪个(如果有)行被替换。无法预测此顺序,并且在master和slave方面可能有所不同" eng "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave" + spa "REPLACE... SELECT no es seguro porque el orden en el que las filas se recuperan por el SELECT determina qué filas (si hay alguna) son sustituidas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT chi "CREATE... IGNORE SELECT是不安全,因为选择由SELECT检索行的顺序确定哪个(如果有)行被忽略。无法预测此顺序,并且在master和slave方面可能有所不同" eng "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave" + spa "CREATE... IGNORE SELECT no es seguro porque el orden en el que las filas se recuperan por el SELECT determina qué filas (si hay alguna) son ignoradas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT chi "CREATE... REPLACE SELECT不安全,因为选择由SELECT检索行的顺序确定哪个(如果有)是替换哪个(如果有的话)。无法预测此顺序,并且在master和slave方面可能有所不同" eng "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave" + spa "CREATE... REPLACE SELECT no es seguro porque el orden en el que las filas se recuperan por el SELECT determina qué filas (si hay alguna) son sustituidas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_UPDATE_IGNORE chi "UPDATE IGNORE不安全,因为更新行的顺序确定了哪个(如果有)行被忽略。无法预测此顺序,并且在master和slave方面可能有所不同" eng "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave" + spa "UPDATE IGNORE no es seguro porque el orden en el que las filas son actualizadas determina qué filas (si hay alguna) son ignoradas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_UNUSED_15 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_UNUSED_16 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT chi "从另一个表选择后,使用自动增量列的表格写入的语句是不安全的,因为检索行的顺序确定将写入哪些(如果有)行。无法预测此顺序,并且在主站和slave方面可能有所不同" eng "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave" + spa "Las sentencias que graban en una tabla con columna de auto-incremento tras seleccionar desde otra tabla no son seguras porque el orden en el que las filas son recuperadas determina qué filas (si hay alguna) serán grabadas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC chi "创建表...在具有自动增量列的表上选择...不安全,因为选择的顺序是由select检索行的顺序,确定插入哪个(如果有)行。无法预测此订单,并且在主站和slave方面可能有所不同" eng "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave" + spa "CREATE TABLE... SELECT... en una tabla con columna de auto-incremento no es segura porque el orden en el que las filas son recuperadas por el SELECT determina qué filas (si hay alguna) serán insertadas. Este orden no puede ser predicho y puede diferir entre maestro (master) y esclavo" ER_BINLOG_UNSAFE_INSERT_TWO_KEYS chi "在具有多个唯一键的表上INSERT... ON DUPLICATE KEY UPDATE的重复索引更新是不安全的" eng "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe" + spa "INSERT... ON DUPLICATE KEY UPDATE en una tabla con más de una UNIQUE KEY no es segura" -ER_TABLE_IN_FK_CHECK - chi "外键检查在用此表" - eng "Table is being used in foreign key check" +ER_UNUSED_28 + chi "你永远不应该看到它" + eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_VERS_NOT_ALLOWED chi "系统版本的表%`s.%`s不允许" @@ -7358,6 +7798,7 @@ ER_VERS_NOT_ALLOWED ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST chi "插入AutoIncrement字段,该字段不是组成的主键中的第一部分是不安全的" eng "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe" + spa "INSERT dentro de campo con autoincremento que no es la primera parte en la clave primaria compuesta no es seguro" # # End of 5.5 error messages. # @@ -7366,366 +7807,455 @@ ER_CANNOT_LOAD_FROM_TABLE_V2 chi "不能从加载%s.%s。表可能损坏了" eng "Cannot load from %s.%s. The table is probably corrupted" ger "Kann %s.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt" + spa "No puedo cargar desde %s.%s. La tabla está probablemente estropeada" ER_MASTER_DELAY_VALUE_OUT_OF_RANGE chi "主延迟的所需值%lu超过最大%lu" eng "The requested value %lu for the master delay exceeds the maximum %lu" + spa "El valor requerido %lu para retraso en maestro (master) excede el máximo de %lu" ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT chi "在Binlog语句中只允许Format_Description_Log_Event和行事件(但是提供了%s)" eng "Only Format_description_log_event and row events are allowed in BINLOG statements (but %s was provided)" + spa "Sólo se permiten Format_description_log_event y eventos de fila en sentencias BINLOG (pero %s fue suministrado)" ER_PARTITION_EXCHANGE_DIFFERENT_OPTION chi "分区和表之间的非匹配属性'%-.64s'" eng "Non matching attribute '%-.64s' between partition and table" + spa "Atributo no coincidente '%-.64s' entre la partición y la tabla" swe "Attributet '%-.64s' är olika mellan partition och tabell" ER_PARTITION_EXCHANGE_PART_TABLE chi "用分区交换的表是分区:'%-.64s'" eng "Table to exchange with partition is partitioned: '%-.64s'" + spa "La tabla para intercambiar con la partición está particionada: '%-.64s'" swe "Tabellen att byta ut mot partition är partitionerad: '%-.64s'" ER_PARTITION_EXCHANGE_TEMP_TABLE chi "与分区交换的表是临时的:'%-.64s'" eng "Table to exchange with partition is temporary: '%-.64s'" + spa "La tabla para intercambiar con la partición es temporal: '%-.64s'" swe "Tabellen att byta ut mot partition är temporär: '%-.64s'" ER_PARTITION_INSTEAD_OF_SUBPARTITION chi "子分区表,使用子分区代替分区" eng "Subpartitioned table, use subpartition instead of partition" + spa "La tabla subparticionada utiliza subpartición en lugar de partición" swe "Subpartitionerad tabell, använd subpartition istället för partition" ER_UNKNOWN_PARTITION chi "未知分区'%-.64s'在表'%-.64s'" eng "Unknown partition '%-.64s' in table '%-.64s'" + spa "Partición desconocida '%-.64s' en la tabla '%-.64s'" swe "Okänd partition '%-.64s' i tabell '%-.64s'" ER_TABLES_DIFFERENT_METADATA chi "表有不同的定义" eng "Tables have different definitions" + spa "Las tablas tienen diferentes definiciones" swe "Tabellerna har olika definitioner" ER_ROW_DOES_NOT_MATCH_PARTITION chi "找到了与分区不匹配的行" eng "Found a row that does not match the partition" + spa "Hallada una fila que no coincide con la partición" swe "Hittade en rad som inte passar i partitionen" ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX chi "选项binlog_cache_size(%lu)大于max_binlog_cache_size(%lu);设置binlog_cache_size等于max_binlog_cache_size" eng "Option binlog_cache_size (%lu) is greater than max_binlog_cache_size (%lu); setting binlog_cache_size equal to max_binlog_cache_size" + spa "La opción binlog_cache_size (%lu) es mayor que max_binlog_cache_size (%lu); configurando binlog_cache_size igual a max_binlog_cache_size" ER_WARN_INDEX_NOT_APPLICABLE chi "不能使用%-.64s在索引'%-.64s'上的访问,由于字段'%-.64s”的类型或排序规则转换" eng "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'" + spa "No puedo usar acceso %-.64s en índice '%-.64s' debido al tipo o conversión de cotejo en campo '%-.64s'" ER_PARTITION_EXCHANGE_FOREIGN_KEY chi "与分区交换的表具有外键参考:'%-.64s'" eng "Table to exchange with partition has foreign key references: '%-.64s'" + spa "La tabla para intercambiar con la partición tiene referencias a clave foránea: '%-.64s'" swe "Tabellen att byta ut mot partition har foreign key referenser: '%-.64s'" ER_NO_SUCH_KEY_VALUE chi "键值'%-.192s'在表'%-.192s%-.192s'不存在" eng "Key value '%-.192s' was not found in table '%-.192s.%-.192s'" + spa "Valor de clave '%-.192s' no hallado en la tabla '%-.192s.%-.192s'" ER_VALUE_TOO_LONG chi "'%s'的价值太长了" eng "Too long value for '%s'" + spa "Valor demasiado largo para '%s'" ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE chi "从网络读取时,复制事件校验和验证失败" eng "Replication event checksum verification failed while reading from network" + spa "Ha fallado la verificación de la suma de revisión de evento de réplica mientras se leía desde la red" ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE chi "从日志文件读取时复制事件校验和验证失败" eng "Replication event checksum verification failed while reading from a log file" + spa "Ha fallado la verificación de la suma de revisión de evento de réplica mientras se leía desde fichero/archivo de historial (log)" ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX chi "选项binlog_stmt_cache_size(%lu)大于max_binlog_stmt_cache_size(%lu);设置binlog_stmt_cache_size等于max_binlog_stmt_cache_size" eng "Option binlog_stmt_cache_size (%lu) is greater than max_binlog_stmt_cache_size (%lu); setting binlog_stmt_cache_size equal to max_binlog_stmt_cache_size" + spa "La opción binlog_stmt_cache_size (%lu) es mayor que max_binlog_stmt_cache_size (%lu); configurando binlog_stmt_cache_size igual a max_binlog_stmt_cache_size" ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT chi "无法更新表'%-.192s'正在创建'%-.192s'" eng "Can't update table '%-.192s' while '%-.192s' is being created" + spa "No puedo actualizar tabla '%-.192s' mientras '%-.192s' está siendo creada" ER_PARTITION_CLAUSE_ON_NONPARTITIONED chi "非分区表上的PARTITION()子句" eng "PARTITION () clause on non partitioned table" + spa "Cláusula PARTITION () en tabla no particionada" swe "PARTITION () klausul för en icke partitionerad tabell" ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET chi "发现不匹配给定分区集的行" eng "Found a row not matching the given partition set" + spa "Hallada una fila que no coincide con el conjunto dado de partición" swe "Hittade en rad som inte passar i någon given partition" ER_UNUSED_5 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE chi "更改Replication存储库类型时失败:%s" eng "Failure while changing the type of replication repository: %s" + spa "Fallo mientras cambiaba el tipo de repositorio de réplica: %s" ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE chi "无法回滚一些临时表的创建" eng "The creation of some temporary tables could not be rolled back" + spa "La creación de algunas tablas temporales no se pudo retroceder (ROLLBACK)" ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE chi "一些临时表被删除,但这些操作无法回滚" eng "Some temporary tables were dropped, but these operations could not be rolled back" + spa "Algunas tablas temporales fueron eliminadas, pero estas operaciones no se pudieron retroceder (ROLLBACK)" ER_MTS_FEATURE_IS_NOT_SUPPORTED chi "%s不支持多线程从模式。%s." eng "%s is not supported in multi-threaded slave mode. %s" + spa "%s no se soporta en modo esclavo multi-hilo (thread). %s" ER_MTS_UPDATED_DBS_GREATER_MAX chi "修改的数据库的数量超过了最大%d;数据库名称不会包含在Replication事件元数据中" eng "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata" + spa "El núermo de bases de datos modificadas excede el máximo de %d; los nombres de base de datos no serán incluidos en los metadatos de eventos de réplica" ER_MTS_CANT_PARALLEL chi "无法以并行模式执行当前事件组。遇到事件%s,中继日志名称%s,position%s,它防止并行模式执行此事件组。原因:%s" eng "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s" + spa "No puedo ejecutar el grupo de eventos actual en modo paralelo. Encontrado evento %s, nombre de historial (log) de reenvío %s, posición %s que previene la ejecución de este grupo de eventos en modo paralelo. Motivo: %s" ER_MTS_INCONSISTENT_DATA eng "%s" ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING chi "分区表不支持FullText索引" eng "FULLTEXT index is not supported for partitioned tables" + spa "El índice FULLTEXT no está soportado para tablas particionadas" swe "FULLTEXT index stöds ej för partitionerade tabeller" ER_DA_INVALID_CONDITION_NUMBER 35000 chi "无效条件号" eng "Invalid condition number" por "Número de condição inválido" + spa "Número inválido de condición" ER_INSECURE_PLAIN_TEXT chi "在没有SSL/TLS的纯文本中发送密码非常不安全" eng "Sending passwords in plain text without SSL/TLS is extremely insecure" + spa "Enviar contraseñas en texto plano sin SSL/TLS es extremadamente inseguro" ER_INSECURE_CHANGE_MASTER chi "在Master.Info存储库中存储MariaDB用户名或密码信息不安全,因此不建议使用。有关此问题和可能的替代方案,请参阅MariaDB手册" eng "Storing MariaDB user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MariaDB Manual for more about this issue and possible alternatives" + spa "Almacenar nombre de usuario de MariaDB o información de contraseña en el repositorio master.info no es seguro y por ello no se recomienda. Por favor, mira el manual de MariaDB para saber más acerca de este asunto y sus posibles alternativas" ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO 23000 S1009 chi "表'%.192s'的外键约束,记录'%-.192s'会导致表'%.192s'中的重复条目,键'%.192s'" eng "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'" ger "Fremdschlüssel-Beschränkung für Tabelle '%.192s', Datensatz '%-.192s' würde zu einem doppelten Eintrag in Tabelle '%.192s', Schlüssel '%.192s' führen" + spa "La restricción de clave foránea para tabla '%.192s', registro '%-.192s' llevaría a una entrada duplicada en la tabla '%.192s', clave '%.192s'" swe "FOREIGN KEY constraint för tabell '%.192s', posten '%-.192s' kan inte uppdatera barntabell '%.192s' på grund av nyckel '%.192s'" ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO 23000 S1009 chi "表'%.192s'的外键约束,记录'%-.192s'会导致子表中的重复条目" eng "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table" ger "Fremdschlüssel-Beschränkung für Tabelle '%.192s', Datensatz '%-.192s' würde zu einem doppelten Eintrag in einer Kind-Tabelle führen" + spa "La restricción de clave foránea para tabla '%.192s', registro '%-.192s' llevaría a una entrada duplicada en una tabla hija" swe "FOREIGN KEY constraint för tabell '%.192s', posten '%-.192s' kan inte uppdatera en barntabell på grund av UNIQUE-test" ER_SQLTHREAD_WITH_SECURE_SLAVE chi "仅在启动从SQL线程时无法设置身份验证选项" eng "Setting authentication options is not possible when only the Slave SQL Thread is being started" + spa "Configurar opciones de autenticación no es posible si sólo se ha arrancado el Hilo (thread) SQL Esclavo" ER_TABLE_HAS_NO_FT chi "该表没有全文索引来支持此查询" eng "The table does not have FULLTEXT index to support this query" + spa "La tabla no tiene índice FULLTEXT que soporte esta consulta (query)" ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER chi "无法在存储的函数或触发器中设置系统变量%.200s" eng "The system variable %.200s cannot be set in stored functions or triggers" + spa "La variable de sistema %.200s no se puede poner en funciones almacenadas o disparadores" ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION chi "持续交易时,无法设置系统变量%.200s" eng "The system variable %.200s cannot be set when there is an ongoing transaction" + spa "La variable de sistema %.200s no se puede poner si hay una transacción en curso" ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST chi "系统变量@@session.gtid_next具有值%.200s,该值未在@@session.gtid_next_list中列出" eng "The system variable @@SESSION.GTID_NEXT has the value %.200s, which is not listed in @@SESSION.GTID_NEXT_LIST" + spa "La variable de sistema @@SESSION.GTID_NEXT tiene el valor %.200s, el cual no está listado en @@SESSION.GTID_NEXT_LIST" ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL chi "当@@session.gtid_next_list == null时,系统变量@@session.gtid_next无法在事务内更改" eng "When @@SESSION.GTID_NEXT_LIST == NULL, the system variable @@SESSION.GTID_NEXT cannot change inside a transaction" + spa "Cuando @@SESSION.GTID_NEXT_LIST == NULL, la variable de sistema @@SESSION.GTID_NEXT no puede cambiar dentro de una transacción" ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION chi "语句'SET %.200s'无法调用存储的函数" eng "The statement 'SET %.200s' cannot invoke a stored function" + spa "La sentencia 'SET %.200s' no puede invocar una función almacenada" ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL chi "系统变量@@sessient.gtid_next不能是'自动'@@sessient.gtid_next_list非null时" eng "The system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL" + spa "La variable de sistema @@SESSION.GTID_NEXT no puede ser 'AUTOMATIC' si la @@SESSION.GTID_NEXT_LIST es no-NULL" ER_SKIPPING_LOGGED_TRANSACTION chi "跳过事务%.200s,因为它已经被执行和记录" eng "Skipping transaction %.200s because it has already been executed and logged" + spa "Saltando transacción %.200s porque ya ha sido ejecutada y puesta en historial (log)" ER_MALFORMED_GTID_SET_SPECIFICATION chi "畸形GTID设置规范'%.200s'" eng "Malformed GTID set specification '%.200s'" + spa "GTID malformado pone especificación '%.200s'" ER_MALFORMED_GTID_SET_ENCODING chi "格式错误的GTID集编码" eng "Malformed GTID set encoding" + spa "GTID malformado pone codificación" ER_MALFORMED_GTID_SPECIFICATION chi "畸形GTID规范'%.200s'" eng "Malformed GTID specification '%.200s'" + spa "GTID malformado especificación '%.200s'" ER_GNO_EXHAUSTED chi "无法生成全局事务标识符:整数组件达到了最大值。用新server_uuId重新启动服务器" eng "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid" + spa "Imposible generar Identificador Global de Transacción: el componente entero alcanzó el máximo valor. Rearranque el servidor con un nuevo server_uuid" ER_BAD_SLAVE_AUTO_POSITION chi "当MASTER_AUTO_POSITION处于活动状态时,无法设置参数MASTER_LOG_FILE,MASTER_LOG_POS,RELAY_LOG_FILE和RELAY_LOG_POS" eng "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active" + spa "Los parámetros MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE y RELAY_LOG_POS no pueden ser puestos cuando MASTER_AUTO_POSITION esté activo" ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON - chi "CHANGE Master TO MASTER_AUTO_POSITION = 1只能在GTID_MODE =ON上执行" + chi "CHANGE MASTER TO MASTER_AUTO_POSITION = 1只能在GTID_MODE = ON上执行" eng "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when GTID_MODE = ON" + spa "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 sólo se puede ejecutar cuando GTID_MODE = ON" ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET - chi "当GTID_Next!= AUTOMATIC 或GTID_NEXT_LIST != NULL时,无法在事务中执行语句" + chi "当GTID_Next != AUTOMATIC 或GTID_NEXT_LIST != NULL时,无法在事务中执行语句" eng "Cannot execute statements with implicit commit inside a transaction when GTID_NEXT != AUTOMATIC or GTID_NEXT_LIST != NULL" + spa "No puedo ejecutar sentencias con acometidas (commit) implícitas dentro de una transacción cuando GTID_NEXT != AUTOMATIC o GTID_NEXT_LIST != NULL" ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON chi "GTID_MODE = ON或GTID_MODE = UPGRADE_STEP_2需要ENFORCE_GTID_CONSISTY = 1" eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires ENFORCE_GTID_CONSISTENCY = 1" + spa "GTID_MODE = ON o GTID_MODE = UPGRADE_STEP_2 requiere que ENFORCE_GTID_CONSISTENCY = 1" ER_GTID_MODE_REQUIRES_BINLOG chi "GTID_MODE = ON或UPGRADE_STEP_1或UPGRADE_STEP_2需要--log-bin和-log-slave-updates" eng "GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates" + spa "GTID_MODE = ON o UPGRADE_STEP_1 o UPGRADE_STEP_2 requiere --log-bin y --log-slave-updates" ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF chi "GTID_NEXT无法设置为UUID:NUMBER 当GTID_MODE = OFF" eng "GTID_NEXT cannot be set to UUID:NUMBER when GTID_MODE = OFF" + spa "GTID_NEXT no se puede poner a UUID:NUMBER cuando GTID_MODE = OFF" ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON chi "GTID_NEXT无法在当GTID_MODE = ON上时设置为ANONYMOUS" eng "GTID_NEXT cannot be set to ANONYMOUS when GTID_MODE = ON" + spa "GTID_NEXT no se puede poner como ANONYMOUS cuando GTID_MODE = ON" ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF chi "GTID_NEXT_LIST无法设置为非空值当GTID_MODE = OFF" eng "GTID_NEXT_LIST cannot be set to a non-NULL value when GTID_MODE = OFF" + spa "GTID_NEXT_LIST no se puede poner como valor no-NULL cuando GTID_MODE = OFF" ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF chi "找到一个Gtid_log_event或Previous_gtids_log_event,当gtid_mode = OFF时" eng "Found a Gtid_log_event or Previous_gtids_log_event when GTID_MODE = OFF" + spa "Hallado un Gtid_log_event o Previous_gtids_log_event cuando GTID_MODE = OFF" ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE chi "当Enforce_gtid_consistenty = 1时,对非事务性表的更新只能在Autocomated语句或单一语句事务中完成,而不是在与事务表中的更新相同的语句中" eng "When ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables" + spa "Cuando ENFORCE_GTID_CONSISTENCY = 1, las actualiaciones a tablas no transaccionales sólo se puede hacer o bien en sentencias autoacometidas o en transacciones de sentencias simples y nunca dentro de la misma sentencia como actualizaciones a tablas transaccionales" ER_GTID_UNSAFE_CREATE_SELECT chi "CREATE TABLE...SELECT在ENFORCE_GTID_CONSISTENCY = 1时被禁止" eng "CREATE TABLE ... SELECT is forbidden when ENFORCE_GTID_CONSISTENCY = 1" + spa "CREATE TABLE ... SELECT está prohibido si ENFORCE_GTID_CONSISTENCY = 1" ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION chi "当ENFORCE_GTID_CONSISTENCY = 1时,语句CREATE TEMPORARY TABL和DROP TEMPORARY TABLE,只能在非事务性上下文中执行,并且要求autocommit = 1" eng "When ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1" + spa "Cuando ENFORCE_GTID_CONSISTENCY = 1, las sentencias CREATE TEMPORARY TABLE y DROP TEMPORARY TABLE pueden ser ejecutadas sólo en contextos no-transaccionales y requieren que AUTOCOMMIT = 1" ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME chi "GTID_MODE的值只能一次更改一步:OFF<-> UPGRODE_STEP_1 <-> UPGRODE_STEP_2 <-> ON。另请注意,此值必须在所有服务器上同时上升或下降;有关说明,请参阅手册。“" eng "The value of GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions." + spa "El valor de GTID_MODE sólo puede cambiar un paso a la vez: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. También ten en cuenta que este valor debe de ser aumentado y disminuido simultaneamente en todos los servidores; mira el Manual para instrucciones." ER_MASTER_HAS_PURGED_REQUIRED_GTIDS chi "从机更改主站到Master_Auto_Position = 1,因此主站连接,但主设备已清除了slave需要的GTID的二进制日志" eng "The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires" + spa "El esclavo se está conectando usando CHANGE MASTER TO MASTER_AUTO_POSITION = 1, pero el maestro (master) ha purgado los historiales (logs) binarios que contienen GTIDs requeridos por el esclavo" ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID chi "无法由拥有GTID的客户端更改GTID_NEXT。客户拥有%s。所有权在提交或回滚上发布" eng "GTID_NEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK" + spa "GTID_NEXT no se puede cambiar por un cliente que posee un GTID. El cliente posee %s. La propiedad se libera con COMMIT o ROLLBACK" ER_UNKNOWN_EXPLAIN_FORMAT chi "未知%s格式名称:'%s'" eng "Unknown %s format name: '%s'" rus "Неизвестное имя формата команды %s: '%s'" + spa "Nombre de formato %s desconocido: '%s'" ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION 25006 chi "无法在只读事务中执行语句" eng "Cannot execute statement in a READ ONLY transaction" + spa "No puedo ejecutar sentencia en una transacción READ ONLY" ER_TOO_LONG_TABLE_PARTITION_COMMENT chi "表分区的评论'%-.64s'太长(max =%lu)" eng "Comment for table partition '%-.64s' is too long (max = %lu)" + spa "El comentario para partición de tabla '%-.64s' es demasido largo (máx = %lu)" ER_SLAVE_CONFIGURATION chi "从站未配置或未能正确初始化。您必须至少set --server-id以启用主站或从站。可以在MariaDB错误日志中找到其他错误消息" eng "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MariaDB error log" + spa "El esclavo no está configurado o falló al inicializarse de forma adecuada. Vd debe de poner al menos --server-id para activar o bien un maestro (master) o un esclavo. Mensajes de error adicionales pueden ser hallados en historial (log) de errores de MariaDB" ER_INNODB_FT_LIMIT chi "InnoDB目前一次支持一个全文索引创建" eng "InnoDB presently supports one FULLTEXT index creation at a time" + spa "Actualmente InnoDB soporta la creación de un índice FULLTEXT a la vez" ER_INNODB_NO_FT_TEMP_TABLE chi "无法在临时InnoDB表上创建FullText索引" eng "Cannot create FULLTEXT index on temporary InnoDB table" + spa "No puedo crear índice FULLTEXT en tabla temporaria InnoDB" ER_INNODB_FT_WRONG_DOCID_COLUMN chi "列'%-.192s'是innodb fulltext索引的错误类型" eng "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index" + spa "La columna '%-.192s' es de tipo equivocado para un índice InnoDB FULLTEXT" ER_INNODB_FT_WRONG_DOCID_INDEX - chi "InnoDB全文索引的索引'%-.192s'是错误的类型错误" eng "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index" + spa "El índice '%-.192s' es de tipo equivocado para un índice InnoDB FULLTEXT" ER_INNODB_ONLINE_LOG_TOO_BIG chi "创建索引'%-.192s'所需的多于'innodb_online_alter_log_max_size'字节的修改日志。请再试一次" eng "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again" + spa "La creación de índice '%-.192s' requirió más de 'innodb_online_alter_log_max_size' bytes de historial (log) de modificaciones. Por favor, pruebe otra vez" ER_UNKNOWN_ALTER_ALGORITHM chi "未知算法'%s'" eng "Unknown ALGORITHM '%s'" + spa "ALGORITHM desconocido '%s'" ER_UNKNOWN_ALTER_LOCK chi "未知锁定类型'%s'" eng "Unknown LOCK type '%s'" + spa "Tipo de LOCK desconocido '%s'" ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS chi "当从站因为错误停止或以MTS模式终止时,不呢执行CHANGE MASTER。考虑使用RESET SLAVE或START SLAVE UNTIL" eng "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL" + spa "CHANGE MASTER no se puede ejecutar cuando se ha parado el esclavo con un error o matado en modo MTS. Considere el usar RESET SLAVE o START SLAVE UNTIL" ER_MTS_RECOVERY_FAILURE chi "从并行执行模式下的从站错误后无法恢复。可以在MariaDB错误日志中找到其他错误消息" eng "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MariaDB error log" + spa "No puedo recuperar después de que SLAVE diera error en modo paralelo de ejecución. Mensajes de error adicionales se pueden hallar en el historial (log) de error de MariaDB" ER_MTS_RESET_WORKERS chi "无法清理工作者信息表。可以在MariaDB错误日志中找到其他错误消息" eng "Cannot clean up worker info tables. Additional error messages can be found in the MariaDB error log" + spa "No puedo limpiar tablas de información de trabajador. Mensajes de error adicionales se pueden hallar en el historial (log) de error de MariaDB" ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 chi "列数为%s.%s是错误的。预期的%d,找到%d。表可能损坏了" eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted" ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" + spa "El contador de columnas %s.%s está equivocado. Se esperaba %d, hallado %d. La tabla está probablemente estropeada" ER_SLAVE_SILENT_RETRY_TRANSACTION chi "从站必须静默地重试当前事务" eng "Slave must silently retry current transaction" + spa "El esclavo debe de reintentar silenciosamente la transacción en curso" ER_UNUSED_22 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_TABLE_SCHEMA_MISMATCH chi "架构不匹配(%s)" eng "Schema mismatch (%s)" + spa "Esquema no coincide (%s)" ER_TABLE_IN_SYSTEM_TABLESPACE chi "表%-.192s在系统表空间中" eng "Table %-.192s in system tablespace" + spa "Tabla %-.192s en espacio de tablas del sitema" ER_IO_READ_ERROR chi "IO读取错误:(%lu,%s)%s" eng "IO Read error: (%lu, %s) %s" + spa "Error de Lectura de E/S: (%lu, %s) %s" ER_IO_WRITE_ERROR chi "IO写错错误:(%lu,%s)%s" eng "IO Write error: (%lu, %s) %s" + spa "Error de Escritura de E/S: (%lu, %s) %s" ER_TABLESPACE_MISSING chi "表空间缺少表'%-.192s'" eng "Tablespace is missing for table '%-.192s'" + spa "Espacio de tabla falta para la tabla '%-.192s'" ER_TABLESPACE_EXISTS chi "表格'%-.192s'的表空间。请在导入之前丢弃表空间" eng "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT" + spa "Existe Espacio de tabla para la tabla '%-.192s'. Por favor, haga DISCARD del espacio de tabla antes de hacer IMPORT" ER_TABLESPACE_DISCARDED chi "表空间已被丢弃为表%`s" eng "Tablespace has been discarded for table %`s" + spa "El espacio de tabla ha sido descartado para la tabla %`s" ER_INTERNAL_ERROR chi "内部错误:%-.192s" eng "Internal error: %-.192s" + spa "Error interno: %-.192s" ER_INNODB_IMPORT_ERROR chi "ALTER TABLE '%-.192s' IMPORT TABLESPACE 失败,错误%lu:'%s'" eng "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'" + spa "ALTER TABLE '%-.192s' IMPORT TABLESPACE ha fallado con error %lu : '%s'" ER_INNODB_INDEX_CORRUPT chi "索引损坏:%s" eng "Index corrupt: %s" + spa "Índice corrupto: %s" ER_INVALID_YEAR_COLUMN_LENGTH chi "已弃用YEAR(%lu)列类型。创建YEAR(4)列代替" eng "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead" rus "Тип YEAR(%lu) более не поддерживается, вместо него будет создана колонка с типом YEAR(4)" + spa "El tipo de columna YEAR(%lu) está obsoleto. Creando columna YEAR(4) en su lugar" ER_NOT_VALID_PASSWORD eng "Your password does not satisfy the current policy requirements (%s)" @@ -7736,161 +8266,200 @@ ER_MUST_CHANGE_PASSWORD chi "您必须在执行此语句之前设置密码" eng "You must SET PASSWORD before executing this statement" rum "Trebuie sa iti schimbi parola folosind SET PASSWORD inainte de a executa aceasta comanda" + spa "Debe vd de poner SET PASSWORD antes de ejecutar esta sentencia" ER_FK_NO_INDEX_CHILD chi "无法添加外键约束。外表'%s'中的约束'%s'缺少索引" eng "Failed to add the foreign key constraint. Missing index for constraint '%s' in the foreign table '%s'" + spa "No pude añadir la restricción de clave foránea. Falta índice para restricción '%s' en la tabla foránea '%s'" ER_FK_NO_INDEX_PARENT chi "无法添加外键约束。引用的表'%s'中的约束'%s'缺少索引" eng "Failed to add the foreign key constraint. Missing index for constraint '%s' in the referenced table '%s'" + spa "No pude añadir la restricción de clave foránea. Falta índice para restricción '%s' en la tabla referenciada '%s'" ER_FK_FAIL_ADD_SYSTEM chi "无法将外键约束'%s'添加到系统表" eng "Failed to add the foreign key constraint '%s' to system tables" + spa "No pude añadir la restricción de clave foránea '%s' a las tablas del sistema" ER_FK_CANNOT_OPEN_PARENT chi "无法打开引用的表'%s'" eng "Failed to open the referenced table '%s'" + spa "No pude abrir la tabla referenciada '%s'" ER_FK_INCORRECT_OPTION chi "无法在表'%s'上添加外键约束。外键约束'%s'中的选项不正确" eng "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'" + spa "No pude añadir restricción de clave foránea en la tabla '%s'. Opciones incorrectas en restricción FOREIGN KEY '%s'" ER_DUP_CONSTRAINT_NAME chi "重复%s约束名称'%s'" eng "Duplicate %s constraint name '%s'" + spa "Duplicada restricción %s llamada '%s'" ER_PASSWORD_FORMAT chi "密码哈希没有预期的格式。检查密码()函数是否使用正确的密码算法" eng "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function" + spa "El cálculo de contraseña no tiene el formato esperado. Revise si se está usando el algoritmo correcto de contraseña con la función PASSWORD()" ER_FK_COLUMN_CANNOT_DROP chi "无法删除'%-.192s'列:在外部索引约束'%-.192s'中需要" eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'" ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' benötigt" + spa "No puedo eliminar la columna '%-.192s': necesaria en restricción de clave foránea '%-.192s'" ER_FK_COLUMN_CANNOT_DROP_CHILD chi "无法删除列'%-.192s':在外键约束'%-.192s'中需要,表%-.192s" eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table %-.192s" ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' der Tabelle %-.192s benötigt" + spa "No puedo eliminar la columna '%-.192s': necesaria en restriccón de clave foránea '%-.192s' de la tabla %-.192s" ER_FK_COLUMN_NOT_NULL chi "列'%-.192s'不能没有null:在外键约束'%-.192s'设置为null" eng "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL" ger "Spalte '%-.192s' kann nicht NOT NULL sein: wird für eine Fremdschlüsselbeschränkung '%-.192s' SET NULL benötigt" + spa "La columna '%-.192s' no puede ser NOT NULL: necesaria en restricción de clave foránea '%-.192s' SET NULL" ER_DUP_INDEX chi "重复索引%`s。这已弃用,将在未来的版本中不允许" eng "Duplicate index %`s. This is deprecated and will be disallowed in a future release" + spa "Índice duplicado %`s. Éste está obsoleto y será quitado en entregas futuras" ER_FK_COLUMN_CANNOT_CHANGE chi "无法更改列'%-.192s':用于外部键约束'%-.192s'" eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'" + spa "No puedo cambiar la columna '%-.192s': usada en una restricción de clave foránea '%-.192s'" ER_FK_COLUMN_CANNOT_CHANGE_CHILD chi "无法更改列'%-.192s':用于在外部键约束'%-.192s'的表'%-.192s'" eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'" + spa "No puedo cambiar la columna '%-.192s': usada en restricción de clave foránea '%-.192s' de la tabla '%-.192s'" ER_FK_CANNOT_DELETE_PARENT chi "无法从表中删除来自表中的父级的表中的行'%-.192s”表'%-.192s'" eng "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'" + spa "No puedo borrar filas de la tabla que es padre en restricción de clave foránea '%-.192s' de la tabla '%-.192s'" ER_MALFORMED_PACKET chi "畸形通信包" eng "Malformed communication packet" + spa "Paquete de comunicación malformado" ER_READ_ONLY_MODE chi "以只读模式运行" eng "Running in read-only mode" + spa "Ejecutando em modo sólo-lectura" ER_GTID_NEXT_TYPE_UNDEFINED_GROUP chi "当GTID_NEXT设置为GTID时,必须在提交或回滚后立即将其再次设置。如果在从SQL线程中看到此错误消息,则表示当前事务中的表是在主站和从站上的非交易的事务性。在客户端连接中,它意味着您在事务之前执行SET GTID_NEXT并忘记将GTID_NEXT设置为不同的标识符或在提交或回滚后“自动”。当前gtid_next是'%s'" eng "When GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET GTID_NEXT before a transaction and forgot to set GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current GTID_NEXT is '%s'" + spa "Cuando GTID_NEXT se pone a GTID, debe vd de ponerlo de nuevo de forma explícita tras un COMMIT o ROLLBACK. Si vd ve este mensaje de error en el hilo (thread) de SQL esclavo, indica que una tabla de la transacción en curso es transaccional en el maestro (master) y no transaccional en el esclavo. En una conexión cliente, indica que has ejecutado SET GTID_NEXT antes de una transacción y has olvidado poner GTID_NEXT a un identificador diferente o a 'AUTOMATIC' tras COMMIT o ROLLBACK. El GTID_NEXT actual es '%s'" ER_VARIABLE_NOT_SETTABLE_IN_SP chi "无法在存储过程中设置系统变量%.200s" eng "The system variable %.200s cannot be set in stored procedures" + spa "La variable de sistema %.200s no se puede poner en procedimentos almacenados" ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF chi "只能在GTID_MODE = ON设置GTID_PURGED" eng "GTID_PURGED can only be set when GTID_MODE = ON" + spa "GTID_PURGED sólo se puede usar cuando GTID_MODE = ON" ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY chi "只有在GTID_EXECUTED为空时才可以设置GTID_PURGED" eng "GTID_PURGED can only be set when GTID_EXECUTED is empty" + spa "GTID_PURGED sólo se puede poner cuando GTID_EXECUTED está vacío" ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY chi "只有在没有持续的事务时才可以设置GTID_PURGED(即使在其他客户端中不)" eng "GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)" + spa "GTID_PURGED sólo se puede poner cuando no hay trasacciones en curso (ni incluso en otros clientes)" ER_GTID_PURGED_WAS_CHANGED chi "GTID_PURGED从'%s'更改为'%s'" eng "GTID_PURGED was changed from '%s' to '%s'" + spa "GTID_PURGED se cambió de '%s' a '%s'" ER_GTID_EXECUTED_WAS_CHANGED chi "GTID_EXECUTE从'%s'更改为'%s'" eng "GTID_EXECUTED was changed from '%s' to '%s'" + spa "GTID_EXECUTED se cambió de '%s' a '%s'" ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES chi "无法执行语句:由于BINLOG_FORMAT = STATEMENT,因此无法写入二进制日志,并将复制和非复制表写入" eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to" + spa "No puedo ejecutar sentencia: imposible escribir en historial (log) binario desde BINLOG_FORMAT = STATEMENT y en tablas replicadas y no replicadas" ER_ALTER_OPERATION_NOT_SUPPORTED 0A000 chi "此操作不支持%s。试试%s" eng "%s is not supported for this operation. Try %s" + spa "%s no está soportado para esta operación. Pruebe %s" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON 0A000 chi "不支持%s。原因:%s。试试%s" eng "%s is not supported. Reason: %s. Try %s" + spa "%s no está soportado. Motivo: %s. Pruebe %s" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY chi "复制算法需要锁定" eng "COPY algorithm requires a lock" + spa "El algoritmo de COPY requiere de un bloqueo" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION chi "分区特定操作尚不支持锁定/算法" eng "Partition specific operations do not yet support LOCK/ALGORITHM" + spa "Las operaciones específicas de partición aún no soportan LOCK/ALGORITHM" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME chi "参与外键的列被更名" eng "Columns participating in a foreign key are renamed" + spa "Las columnas que participan en una clave foránea son renombradas" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE chi "无法更改列类型" eng "Cannot change column type" + spa "No puedo cambiar el tipo de la columna" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK chi "添加外键需要figner_key_checks = OFF" eng "Adding foreign keys needs foreign_key_checks=OFF" + spa "El añadir claves foráneas necesita de foreign_key_checks=OFF" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE chi "使用忽略创建唯一索引需要复制算法删除重复行" eng "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows" + spa "El crear ídices únicos con IGNORE requiere del algoritmo COPY para quitar filas duplicadas" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK chi "不允许删除主键,而不添加新的主键" eng "Dropping a primary key is not allowed without also adding a new primary key" + spa "Eliminar una clave primaria no está permitido sin añadir también una nueva clave primaria" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC chi "添加自动增量列需要锁定" eng "Adding an auto-increment column requires a lock" + spa "Para añadir una columna auto-incrementable se requiere de un bloqueo" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS chi "无法使用用户可见的替换隐藏的FTS_DOC_ID" eng "Cannot replace hidden FTS_DOC_ID with a user-visible one" + spa "No puedo reemplazar FTS_DOC_ID oculta con una visible-por-usuario" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS chi "无法删除或重命名FTS_DOC_ID" eng "Cannot drop or rename FTS_DOC_ID" + spa "No puedo eliminar o renombrar FTS_DOC_ID" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS chi "fulltext索引创建需要锁定" eng "Fulltext index creation requires a lock" + spa "La creación de un índice Fulltext requiere de un bloqueo" ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE chi "使用GTID_Mode = ON运行时,无法设置SQL_SLAVE_SKIP_COUNTER。相反,对于要跳过的每个事务,使用与事务相同的GTID生成空事务" eng "sql_slave_skip_counter can not be set when the server is running with GTID_MODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction" + spa "sql_slave_skip_counter no se puede poner cuando el servidor se ejecuta con GTID_MODE = ON. En su lugar, para cada transacción que desees saltar, genera una transacción vacía con el mismo GTID que la transacción" ER_DUP_UNKNOWN_IN_INDEX 23000 chi "索引的重复条目'%-.192s'" @@ -7921,85 +8490,105 @@ ER_DUP_UNKNOWN_IN_INDEX 23000 ER_IDENT_CAUSES_TOO_LONG_PATH chi "对象的长数据库名称和标识符导致路径长度超过%d字符。路径:'%s'" eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'" + spa "Nombre largo de base de datos e identificador para objeto resultó en que el tamaño de la ruta excedió de %d caracteres. Ruta: '%s'" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL chi "无法将null转换为非常量默认值" eng "cannot convert NULL to non-constant DEFAULT" + spa "no puedo convertir NULL a DEFAULT no-constante" ER_MUST_CHANGE_PASSWORD_LOGIN bgn "Паролата ви е изтекла. За да влезете трябва да я смените използвайки клиент който поддрържа такива пароли" chi "您的密码已过期。要登录您必须使用支持过期密码的客户端更改它" eng "Your password has expired. To log in you must change it using a client that supports expired passwords" rum "Parola ta a expirat. Pentru a te loga, trebuie sa o schimbi folosind un client ce suporta parole expirate" + spa "Su contraseña ha expirado. Para ingresar, vd debe de cambiarla usando un cliente que soporte contraseñas expiradas" ER_ROW_IN_WRONG_PARTITION chi "在错误分区%s中找到了一行" eng "Found a row in wrong partition %s" + spa "Hallada una fila en partición equivocada %s" swe "Hittade en rad i fel partition %s" ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX chi "无法安排事件%s,中继日志名称%s,position%s对工作线程,因为它的大小%lu超过了slave_pending_jobs_size_max (%lu)" eng "Cannot schedule event %s, relay-log name %s, position %s to Worker thread because its size %lu exceeds %lu of slave_pending_jobs_size_max" + spa "No puedo organizar evento %s, nombre de historial (log)-reenvío %s, posiciona %s a hilo (thread) de Trabajador porque su tamaño %lu excede %lu de slave_pending_jobs_size_max" ER_INNODB_NO_FT_USES_PARSER chi "无法在InnoDB表上CREATE FULLTEXT INDEX WITH PARSER" eng "Cannot CREATE FULLTEXT INDEX WITH PARSER on InnoDB table" + spa "No puedo CREATE FULLTEXT INDEX WITH PARSER en tabla InnoDB" ER_BINLOG_LOGICAL_CORRUPTION chi "二进制日志文件'%s'逻辑损坏:%s" eng "The binary log file '%s' is logically corrupted: %s" + spa "El fichero/archivo de historial (log) binario '%s' está lógicamente corrupto: %s" ER_WARN_PURGE_LOG_IN_USE chi "未清除文件%s,因为它被%d线程读取,只清除%d文件中的%d" eng "file %s was not purged because it was being read by %d thread(s), purged only %d out of %d files" + spa "el fchero %s no se ha purgado porque estaba siendo leído por hilo(s) (thread) %d, purgado sólo %d de %d ficheros/archivos" ER_WARN_PURGE_LOG_IS_ACTIVE chi "文件%s未清除,因为它是活动日志文件" eng "file %s was not purged because it is the active log file" + spa "el fichero/archivo %s no fue purgado porque es el fichero/archivo activo de historial (log)" ER_AUTO_INCREMENT_CONFLICT chi "更新中的自动增量值与内部生成的值冲突" eng "Auto-increment value in UPDATE conflicts with internally generated values" + spa "Valor de Auto-incremento en UPDATE está en conflicto con valores generados internamente" WARN_ON_BLOCKHOLE_IN_RBR chi "未记录行事件的%s语句,该语句以行格式修改BlackHole表。表:'%-.192s'" eng "Row events are not logged for %s statements that modify BLACKHOLE tables in row format. Table(s): '%-.192s'" + spa "Los eventos de fila no son puestos en historial (log) para sentencias %s que modifican tablas BLACKHOLE en formato de fila. Tabla(s): '%-.192s'" ER_SLAVE_MI_INIT_REPOSITORY chi "从设备无法从存储库初始化主信息结构" eng "Slave failed to initialize master info structure from the repository" + spa "El esclavo falló al inicializar información de estructura del maestro (master) desde el repositorio" ER_SLAVE_RLI_INIT_REPOSITORY chi "从站无法从存储库初始化中继日志信息结构" eng "Slave failed to initialize relay log info structure from the repository" + spa "El esclavo falló al inicializar estructura de información de historial (log) de reenvío desde el repositorio" ER_ACCESS_DENIED_CHANGE_USER_ERROR 28000 bgn "Отказан достъп при опит за смяна към потребител %-.48s'@'%-.64s' (използвана парола: %s). Затваряне на връзката" chi "访问拒绝尝试更改为用户'%-.48s'@'%-.64s'(使用密码:%s)。断开连接" eng "Access denied trying to change to user '%-.48s'@'%-.64s' (using password: %s). Disconnecting" + spa "Acceso denegado intentando cambiar a usuario '%-.48s'@'%-.64s' (usando contraseña: %s). Desconectando" ER_INNODB_READ_ONLY chi "innodb是只读模式" eng "InnoDB is in read only mode" hindi "InnoDB केवल READ-ONLY मोड में है" + spa "InnoDB está en modo de sólo lectura" ER_STOP_SLAVE_SQL_THREAD_TIMEOUT chi "STOP SLAVE命令执行不完整:从SQL线程获得停止信号,线程正忙,一旦当前任务完成后,SQL线程将停止" eng "STOP SLAVE command execution is incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current task is complete" + spa "La ejecución del comando STOP SLAVE está incompleta: El hilo (thread) de SQL esclavo recibió la señal de parada, hilo (thread) ocupado, el hilo (thread) SQL se parará una vez que se complete la tarea actual" ER_STOP_SLAVE_IO_THREAD_TIMEOUT chi "STOP SLAVE命令执行不完整:从机动程线程得到停止信号,线程很忙,一旦当前任务完成后,IO线程将停止。" eng "STOP SLAVE command execution is incomplete: Slave IO thread got the stop signal, thread is busy, IO thread will stop once the current task is complete" + spa "La ejecución del comando STOP SLAVE está incompleta: El hilo (thread) de E/S esclavo recibió la señal de parada, hilo (thread) ocupado, el hilo (thread) de E/S se parará una vez que se complete la tarea actual" ER_TABLE_CORRUPT chi "无法执行操作。表'%-.64s。%-.64s'丢失,损坏或包含不良数据" eng "Operation cannot be performed. The table '%-.64s.%-.64s' is missing, corrupt or contains bad data" + spa "La operación no se puede realizar. Falta la tabla '%-.64s.%-.64s', está corrupta o contiene datos malos" ER_TEMP_FILE_WRITE_FAILURE chi "临时文件写入失败" eng "Temporary file write failure" + spa "Fallo al escribir fichero/archivo temporal" ER_INNODB_FT_AUX_NOT_HEX_ID chi "升级索引名称失败,请使用创建索引(ALTER TABLE)算法复制来重建索引" eng "Upgrade index name failed, please use create index(alter table) algorithm copy to rebuild index" + spa "Falló la mejora de nombre de índice. Por favor, use una copia del algoritmo de create index(alter table) para reconstruir el índice" # # # MariaDB error messages section starts here @@ -8019,23 +8608,29 @@ ER_UNUSED_18 ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED chi "函数或表达式'%s'不能用于%s的%`s" eng "Function or expression '%s' cannot be used in the %s clause of %`s" + spa "La Función o expresión '%s' no se puede usar en la cláusula %s de %`s" ER_UNUSED_19 eng "" ER_PRIMARY_KEY_BASED_ON_GENERATED_COLUMN chi "主键无法在生成的列上定义" eng "Primary key cannot be defined upon a generated column" + spa "La clave primaria no se puede definir sobre una columna generada" ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN chi "无法在虚拟生成的列上定义键/索引" eng "Key/Index cannot be defined on a virtual generated column" + spa "Key/Index no se puede definir en una columna virtual generada" ER_WRONG_FK_OPTION_FOR_GENERATED_COLUMN chi "无法在生成的列上定义%s子句的外键" eng "Cannot define foreign key with %s clause on a generated column" + spa "No puedo definir clave foránea con cláusula %s en una columna generada" ER_WARNING_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN chi "忽略了表'%s'中为生成的列'%s'指定的值已被忽略" eng "The value specified for generated column '%s' in table '%s' has been ignored" + spa "El valor especificado para columna generada '%s' en la tabla '%s' ha sido ignorado" ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN chi "生成的列尚未支持这一点" eng "This is not yet supported for generated columns" + spa "Esto no está aún soportado para columnas generadas" ER_UNUSED_20 eng "" ER_UNUSED_21 @@ -8044,256 +8639,329 @@ ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS chi "%s存储引擎不支持生成的列" eng "%s storage engine does not support generated columns" hindi "स्टोरेज इंजन %s COMPUTED कॉलम्स को सपोर्ट नहीं करता" + spa "El motor de almacenaje %s no soporta columnas generadas" ER_UNKNOWN_OPTION chi "未知选项'%-.64s'" eng "Unknown option '%-.64s'" hindi "अज्ञात विकल्प '%-.64s'" + spa "Opción desconocida '%-.64s'" ER_BAD_OPTION_VALUE chi "值不正确'%-.64T'选项'%-.64s'" eng "Incorrect value '%-.64T' for option '%-.64s'" hindi "गलत मान '%-.64T' विकल्प '%-.64s' के लिए" + spa "Valor incorrecto '%-.64T' para opción '%-.64s'" ER_UNUSED_6 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_UNUSED_7 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_UNUSED_8 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_DATA_OVERFLOW 22003 chi "转换'%-.128s'到%-.32s时溢出。值截断" eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated" + spa "Obtenido desbordamiento al convertir '%-.128s' a %-.32s. Valor truncado" ER_DATA_TRUNCATED 22003 chi "转换'%-.128s'到%-.32s时值截断" eng "Truncated value '%-.128s' when converting to %-.32s" + spa "Valor truncado '%-.128s' al convertir a %-.32s" ER_BAD_DATA 22007 chi "非法值'%-.128s',在转换%-.32s时遇到" eng "Encountered illegal value '%-.128s' when converting to %-.32s" + spa "Encontrado valor ilegal '%-.128s' al convertir a %-.32s" ER_DYN_COL_WRONG_FORMAT chi "遇到非法格式的动态列字符串" eng "Encountered illegal format of dynamic column string" + spa "Encontrado formato ilegal de cadena en columna dinámica" ER_DYN_COL_IMPLEMENTATION_LIMIT chi "达到动态列实现限制" eng "Dynamic column implementation limit reached" + spa "Alcanzado límite de implementación de columna dinámica" ER_DYN_COL_DATA 22007 chi "非法值用作动态列函数的参数" eng "Illegal value used as argument of dynamic column function" + spa "Valor ilegal usado como argumento de función de columna dinámica" ER_DYN_COL_WRONG_CHARSET chi "动态列包含未知字符集" eng "Dynamic column contains unknown character set" + spa "Columna dinámica contiene conjunto desconocido de caracteres" ER_ILLEGAL_SUBQUERY_OPTIMIZER_SWITCHES chi "“in_to_exists”或'materialization'optimizer_switch标志中的至少一个必须是'开启'" eng "At least one of the 'in_to_exists' or 'materialization' optimizer_switch flags must be 'on'" hindi "कम से कम 'in_to_exists' या 'materialization' optimizer_switch फ्लैग 'ON' होना चाहिए" + spa "Al menos una de las banderas de optimizer_switch 'in_to_exists' o 'materialization' debe de estar a 'on'" ER_QUERY_CACHE_IS_DISABLED chi "查询缓存已禁用(调整大小或类似命令正在进行中);稍后重复此命令" eng "Query cache is disabled (resize or similar command in progress); repeat this command later" + spa "Caché de consulta (query) desactivada (el comando ajustar o similar está en proceso); repite este comando más tarde" ER_QUERY_CACHE_IS_GLOBALY_DISABLED chi "查询缓存全局禁用,您无法为此会话启用它" eng "Query cache is globally disabled and you can't enable it only for this session" hindi "क्वेरी कैश ग्लोबल स्तर पर DISABLED है और आप इसे केवल सत्र के लिए ENABLE नहीं कर सकते" + spa "La caché de consulta (query) está desactivada de forma global y no puede activarla sólo para esta sesión" ER_VIEW_ORDERBY_IGNORED chi "查看'%-.192s'.'%-.192s's ORDER BY子句被忽略,因为还有其他ORDER BY子句" eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already" + spa "Cláusula de vista '%-.192s'.'%-.192s' ORDER BY ignorada porque ya hay otra clásula ORDER BY" ER_CONNECTION_KILLED 70100 chi "连接被杀死" eng "Connection was killed" hindi "कनेक्शन को समाप्त कर दिया गया है" + spa "La conexión fue matada" ER_UNUSED_12 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION chi "无法修改事务中的@@session.skip_replication" eng "Cannot modify @@session.skip_replication inside a transaction" + spa "No puedo modificar @@session.skip_replication dentro de una transacción" ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION chi "无法修改存储函数或触发器内的@@session.skip_replication" eng "Cannot modify @@session.skip_replication inside a stored function or trigger" + spa "No puedo modificar @@session.skip_replication dentro de una función almacenada o disparador" ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT chi "查询执行被中断。查询检查至少%llu行,超过限制行(%llu)。查询结果可能是不完整的" eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete" + spa "Se ha interrumpido la ejecución de la consulta (query). La consulta (query) examinó al menos %llu filas, lo cual excede LIMIT ROWS EXAMINED (%llu). El resultado de la consulta (query) puede estar incompleto" ER_NO_SUCH_TABLE_IN_ENGINE 42S02 chi "表'%-.192s.%-.192s'在引擎中不存在" eng "Table '%-.192s.%-.192s' doesn't exist in engine" hindi "टेबल '%-.192s.%-.192s' इंजन में मौजूद नहीं है" + spa "La tabla '%-.192s.%-.192s' no existe en el motor" swe "Det finns ingen tabell som heter '%-.192s.%-.192s' i handlern" ER_TARGET_NOT_EXPLAINABLE chi "目标未运行可解释的命令" eng "Target is not running an EXPLAINable command" + spa "El objetivo no está ejecutando un comando EXPLAINable" ER_CONNECTION_ALREADY_EXISTS chi "连接'%.*s'与现有连接'%.*s'冲突" eng "Connection '%.*s' conflicts with existing connection '%.*s'" + spa "La conexión '%.*s' está en conflicto con la conexión existente '%.*s'" ER_MASTER_LOG_PREFIX chi "Master'%.*s':" eng "Master '%.*s': " + spa "Maestro (master) '%.*s': " ER_CANT_START_STOP_SLAVE chi "不能%sSLAVE'%.*s'" eng "Can't %s SLAVE '%.*s'" + spa "No puedo %s ESCLAVO '%.*s'" ER_SLAVE_STARTED chi "SLAVE '%.*s'开始了" eng "SLAVE '%.*s' started" + spa "ESCLAVO '%.*s' arrancado" ER_SLAVE_STOPPED chi "slave'%.*s'停止了" eng "SLAVE '%.*s' stopped" + spa "ESCLAVO '%.*s' parado" ER_SQL_DISCOVER_ERROR chi "引擎%s无法发现表%`-.192s.%`-.192s,'%s'" eng "Engine %s failed to discover table %`-.192s.%`-.192s with '%s'" + spa "El motor %s no pudo descubr la tabla %`-.192s.%`-.192s con '%s'" ER_FAILED_GTID_STATE_INIT chi "初始化复制GTID状态失败" eng "Failed initializing replication GTID state" + spa "Fallo inicializando estado de réplica GTID" ER_INCORRECT_GTID_STATE chi "无法解析GTID列表" eng "Could not parse GTID list" + spa "No pude analizar la lista GTID" ER_CANNOT_UPDATE_GTID_STATE chi "无法更新Replication Slave GTID状态" eng "Could not update replication slave gtid state" + spa "No pude actualizar estado gtid de esclavo de réplica" ER_DUPLICATE_GTID_DOMAIN chi "GTID %u-%u-%llu和%u-%u-%llu冲突(重复域ID%u)" eng "GTID %u-%u-%llu and %u-%u-%llu conflict (duplicate domain id %u)" + spa "Conflicto GTID %u-%u-%llu y %u-%u-%llu (duplicado id de dominio %u)" ER_GTID_OPEN_TABLE_FAILED chi "未能打开%s.%s" eng "Failed to open %s.%s" ger "Öffnen von %s.%s fehlgeschlagen" + spa "No pude abrir %s.%s" ER_GTID_POSITION_NOT_FOUND_IN_BINLOG chi "连接从站请求从GTID%u-%u-%llu开始,这不在Master的Binlog中" eng "Connecting slave requested to start from GTID %u-%u-%llu, which is not in the master's binlog" + spa "Se ha requerido que conectar esclavo arranque desde GTID %u-%u-%llu, el cual no está en el binlog del maestro (master)" ER_CANNOT_LOAD_SLAVE_GTID_STATE chi "无法从表%s中加载Replication Slave GTID位置。%s" eng "Failed to load replication slave GTID position from table %s.%s" + spa "No pude cargar posición GTID de esclavo de réplica desde la tabla %s.%s" ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG chi "指定的GTID%u-%u-%llu与二进制日志冲突,其中包含更新的GTID%u-%u-%llu。如果使用master_gtid_pos = current_pos,则Binlog位置将覆盖@@gtid_slave_pos的新值" eng "Specified GTID %u-%u-%llu conflicts with the binary log which contains a more recent GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos" + spa "El especificado GTID %u-%u-%llu está en conflicto con el historial (log) binario el cual contiene un más reciente GTID %u-%u-%llu. Si se usa MASTER_GTID_POS=CURRENT_POS, la posición de binlog sobreescribirá el nuevo valor de @@gtid_slave_pos" ER_MASTER_GTID_POS_MISSING_DOMAIN chi "指定值为@@gtid_slave_pos不包含复制域%u的值。这与二进制日志冲突,其中包含gtid%u-%u-%llu。如果使用master_gtid_pos = current_pos,则Binlog位置将覆盖@@ gtid_slave_pos的新值" eng "Specified value for @@gtid_slave_pos contains no value for replication domain %u. This conflicts with the binary log which contains GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos" + spa "El valor especificado para @@gtid_slave_pos no contiene valor para dominio de réplica %u. Esto está en conflicto con el historial (log) binario el cual contiene un GTID %u-%u-%llu. Si se usa MASTER_GTID_POS=CURRENT_POS, la posición binlog sobreescribirá el nuevo valor de @@gtid_slave_pos" ER_UNTIL_REQUIRES_USING_GTID chi "启动从站,直到master_gtid_pos要求从站使用gtid" eng "START SLAVE UNTIL master_gtid_pos requires that slave is using GTID" + spa "START SLAVE UNTIL master_gtid_pos requiere que esclavo esté usando GTID" ER_GTID_STRICT_OUT_OF_ORDER chi "尝试对Binlog GTID%u-%u-%llu进行,这将创建具有现有GTID%u-%u-%llu的订单无序序列号,并且启用了GTID严格模式" eng "An attempt was made to binlog GTID %u-%u-%llu which would create an out-of-order sequence number with existing GTID %u-%u-%llu, and gtid strict mode is enabled" + spa "Se ha intentado hacer binlog de GTID %u-%u-%llu lo cual crearía un número de secuencia fuera de orden con el existente GTID %u-%u-%llu y está activado gtid en modo estricto" ER_GTID_START_FROM_BINLOG_HOLE chi "主机上的Binlog缺少从站所需请求的GTID%u-%u-%llu(即使存在后续的序列号),并启用GTID严格模式" eng "The binlog on the master is missing the GTID %u-%u-%llu requested by the slave (even though a subsequent sequence number does exist), and GTID strict mode is enabled" + spa "Al binlog del maestro (master) le falta el GTID %u-%u-%llu requerido por el esclavo (incluso aunque existe un número posterior de secuencia) y está activado GTID en modo estricto" ER_SLAVE_UNEXPECTED_MASTER_SWITCH chi "重新连接后,从master收到意外的GTID。这通常表示在不重新启动从线程的情况下替换主服务器。%s." eng "Unexpected GTID received from master after reconnect. This normally indicates that the master server was replaced without restarting the slave threads. %s" + spa "Se ha recibido un GTID inesperado desde el maestro (master) tras reconectar. Esto indica normalmente que el servidor maestro (master) ha sido reemplazado sin rearrancar los hilos (threads) del esclavo. %s" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO chi "无法修改@@sessient.gtid_domain_id或@@session.gtid_seq_no" eng "Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a transaction" + spa "No puedo modificar @@session.gtid_domain_id o @@session.gtid_seq_no dentro de una transacción" ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO chi "无法修改@@sessient.gtid_domain_id或@@session.gtid_seq_no或触发器" eng "Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a stored function or trigger" + spa "No puedo modificar @@session.gtid_domain_id o @@session.gtid_seq_no dentro de una función almacenada o de un disparador" ER_GTID_POSITION_NOT_FOUND_IN_BINLOG2 chi "连接从站请求从GTID%u-%u-%llu开始,这不在Master的Binlog中。由于Master的Binlog包含具有更高序列号的GTID,因此它可能意味着由于执行额外错误的交易,因此slave已经分歧" eng "Connecting slave requested to start from GTID %u-%u-%llu, which is not in the master's binlog. Since the master's binlog contains GTIDs with higher sequence numbers, it probably means that the slave has diverged due to executing extra erroneous transactions" + spa "Se ha requerido arrancar la conexión a esclavo desde GTID %u-%u-%llu, el cual no está en el binlog del maestro (master). Ya que el binlog del maestro (master) contiene GTIDs con números mayores de secuencia, es probable que indique que el esclavo diverge debido a ejecutar transacciones extra erróneas" ER_BINLOG_MUST_BE_EMPTY chi "如果已将任何GTID记录到二进制日志,则不允许此操作。首先运行RESET MASTER擦除日志" eng "This operation is not allowed if any GTID has been logged to the binary log. Run RESET MASTER first to erase the log" + spa "Esta operación no está permitida si cualquier GTID ha sido puesto en historial (log) binario. Ejecuta primero RESET MASTER para borrar el historial (log)" ER_NO_SUCH_QUERY chi "未知查询ID:%lld" eng "Unknown query id: %lld" ger "Unbekannte Abfrage-ID: %lld" hindi "अज्ञात क्वेरी ID: %lld" rus "Неизвестный номер запроса: %lld" + spa "Id desconocido de consulta (query): %lld" ER_BAD_BASE64_DATA chi "错误Base64数据作为位置%u" eng "Bad base64 data as position %u" + spa "Datos base64 malos en posición %u" ER_INVALID_ROLE OP000 chi "无效的角色规范%`s" eng "Invalid role specification %`s" hindi "अमान्य रोल विनिर्देश %`s" rum "Rolul %`s este invalid" + spa "Especificación inválida de rol %`s" ER_INVALID_CURRENT_USER 0L000 chi "当前用户无效" eng "The current user is invalid" hindi "वर्तमान यूज़र अमान्य है" rum "Utilizatorul curent este invalid" + spa "El usuario en curso no es válido" ER_CANNOT_GRANT_ROLE chi "无法将角色'%s'授予:%s" eng "Cannot grant role '%s' to: %s" hindi "रोल '%s', %s को प्रदान नहीं कर सकते" rum "Rolul '%s' nu poate fi acordat catre: %s" + spa "No puedo conceder rol '%s' a: %s" ER_CANNOT_REVOKE_ROLE chi "无法撤消来自:%s的角色'%s'" eng "Cannot revoke role '%s' from: %s" hindi "रोल '%s', %s से हटाया नहीं जा सका" rum "Rolul '%s' nu poate fi revocat de la: %s" + spa "No puedo revocar rol '%s' desde: %s" ER_CHANGE_SLAVE_PARALLEL_THREADS_ACTIVE chi "无法更改@@slave_parallel_threads,而另一个更改正在进行中" eng "Cannot change @@slave_parallel_threads while another change is in progress" + spa "No puedo cambiar @@slave_parallel_threads mientras otro cambio esté en proceso" ER_PRIOR_COMMIT_FAILED chi "由于早期提交的失败取决于依赖于哪个依赖性,提交失败" eng "Commit failed due to failure of an earlier commit on which this one depends" + spa "Ha fallado la acometida (commit) debido a un fallo previo en acometida (commit) de la que depende ésta" ER_IT_IS_A_VIEW 42S02 chi "'%-.192s'是一个VIEW" eng "'%-.192s' is a view" hindi "'%-.192s' एक VIEW है" + spa "'%-.192s' es una vista" ER_SLAVE_SKIP_NOT_IN_GTID chi "使用并行复制和带有多个复制域的GTID时,无法使用@@SQL_SLAVE_SKIP_COUNTER。相反,可以使用明确设置@@gtid_slave_pos以在给定的gtid位置之后跳到" eng "When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position" + spa "Al usar réplica paralela y GTID con múltiples dominios de réplica, no se puede usar @@sql_slave_skip_counter. En su lugar, poner @@gtid_slave_pos de forma explícita se puede usar para saltar tras una posición GTID dada" ER_TABLE_DEFINITION_TOO_BIG chi "表%`s的定义太大了" eng "The definition for table %`s is too big" hindi "टेबल %`s की परिभाषा बहुत बड़ी है" + spa "La definición para la tabla %`s es demasiado larga" ER_PLUGIN_INSTALLED chi "插件'%-.192s'已安装" eng "Plugin '%-.192s' already installed" hindi "प्लग-इन '%-.192s' पहले से ही इन्स्टॉल्ड है" rus "Плагин '%-.192s' уже установлен" + spa "Ya instalado el enchufe (plugin) '%-.192s'" ER_STATEMENT_TIMEOUT 70100 chi "查询执行中断(超出MAX_STATEMENT_TIME)" eng "Query execution was interrupted (max_statement_time exceeded)" + spa "Se ha interrumpido la ejecución de una consulta (query) (excedido max_statement_time) ER_SUBQUERIES_NOT_SUPPORTED 42000 chi "%s不支持子查询或存储的函数" eng "%s does not support subqueries or stored functions" + spa "%s no soporta subconsultas (subqueries) o funciones almacenadas" ER_SET_STATEMENT_NOT_SUPPORTED 42000 chi "系统变量%.200s无法在set语句中设置。“" eng "The system variable %.200s cannot be set in SET STATEMENT." + spa "La variable del sistema %.200s no se puede poner en SET STATEMENT." ER_UNUSED_9 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_USER_CREATE_EXISTS chi "无法创建用户'%-.64s'@'%-.64s';它已经存在" eng "Can't create user '%-.64s'@'%-.64s'; it already exists" hindi "यूज़र '%-.64s'@'%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है" + spa "No puedo crear usuario '%-.64s'@'%-.64s'; ya existe" ER_USER_DROP_EXISTS chi "无法删除用户'%-.64s'@'%-.64s';它不存在" eng "Can't drop user '%-.64s'@'%-.64s'; it doesn't exist" hindi "यूज़र '%-.64s'@'%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है" + spa "No puedo eliminar usuario '%-.64s'@'%-.64s'; no existe" ER_ROLE_CREATE_EXISTS chi "无法创建角色'%-.64s';它已经存在" eng "Can't create role '%-.64s'; it already exists" hindi "रोल '%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है" + spa "No puedo crear rol '%-.64s'; ya existe" ER_ROLE_DROP_EXISTS chi "无法删除'%-.64s'。它不存在" eng "Can't drop role '%-.64s'; it doesn't exist" hindi "रोल '%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है" + spa "No puedo eliminar rol '%-.64s'; no existe" ER_CANNOT_CONVERT_CHARACTER chi "无法将'%s'字符0x转换为0x%-.64s到'%s'" eng "Cannot convert '%s' character 0x%-.64s to '%s'" + spa "No puedo convertir '%s' carácter 0x%-.64s a '%s'" ER_INVALID_DEFAULT_VALUE_FOR_FIELD 22007 chi "列的默认值不正确'%-.128T' '%.192s'" eng "Incorrect default value '%-.128T' for column '%.192s'" hindi "गलत डिफ़ॉल्ट मान '%-.128T' कॉलम '%.192s' के लिए" + spa "Valor por defecto incorrecto '%-.128T' para columna '%.192s'" ER_KILL_QUERY_DENIED_ERROR chi "你不是查询%lu的所有者" eng "You are not owner of query %lu" ger "Sie sind nicht Eigentümer von Abfrage %lu" hindi "आप क्वेरी %lu के OWNER नहीं हैं" rus "Вы не являетесь владельцем запроса %lu" + spa "No eres el propietario de la consulta (query) %lu" ER_NO_EIS_FOR_FIELD chi "没有收集无关的统计信息列'%s'" eng "Engine-independent statistics are not collected for column '%s'" hindi "Engine-independent सांख्यिकी कॉलम '%s' के लिए एकत्रित नहीं किया जा रहा है" + spa "No se han recolectado estadísticas independientes del motor para la columna '%s'" ukr "Незалежна від типу таблиці статистика не збирається для стовбця '%s'" ER_WARN_AGGFUNC_DEPENDENCE chi "聚合函数'%-.192s)'SELECT#%d的属于选择#%d" eng "Aggregate function '%-.192s)' of SELECT #%d belongs to SELECT #%d" + spa "La función de agregación '%-.192s)' del SELECT #%d pertenece a SELECT #%d" ukr "Агрегатна функція '%-.192s)' з SELECTу #%d належить до SELECTу #%d" WARN_INNODB_PARTITION_OPTION_IGNORED chi "<%-.64s> innodb分区忽略的选项" eng "<%-.64s> option ignored for InnoDB partition" + spa "<%-.64s> opción ignorada para partición InnoDB" # # Internal errors, not used @@ -8306,830 +8974,1089 @@ skip-to-error-number 3000 ER_FILE_CORRUPT chi "文件%s已损坏" eng "File %s is corrupted" + spa "El fichero/archivo %s está corrupto" ER_ERROR_ON_MASTER chi "查询在主设备上部分完成(主设备:%d)并中止。你的master在这一点上有可能不一致。如果您确定您的主站是可以的,请在从站上手动运行此查询,然后使用SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1; START SLAVE ;查询:'%s'" - eng "Query partially completed on the master (error on master: %d) and was aborted. There is a chance that your master is inconsistent at this point. If you are sure that your master is ok, run this query manually on the slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; START SLAVE;. Query:'%s'" + eng "Query partially completed on the master (error on master: %d) and was aborted. There is a chance that your master is inconsistent at this point. If you are sure that your master is ok, run this query manually on the slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; START SLAVE;. Query:'%s'" + spa "Consulta (query) completada de forma parcial en el maestro (master) (error en maestro (master): %d) y se ha abortado. Existe una posibilidad de que su maestro (master) esté inconsitente en este punto. Si está seguro de que su maestro (master) está ok, ejecute esta consulta (query) de forma manual en el esclavo y luego rearranque el esclavo mediante SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; START SLAVE;. Consulta (query):'%s'" ER_INCONSISTENT_ERROR chi "查询在主站和从站上引起了不同的错误。主站错误:消息(格式)='%s'错误代码=%d;从站错误:实际消息='%s',错误代码=%d。默认数据库:'%s'。查询:'%s'" eng "Query caused different errors on master and slave. Error on master: message (format)='%s' error code=%d; Error on slave:actual message='%s', error code=%d. Default database:'%s'. Query:'%s'" + spa "La consulta (query) ha ocasionado diferentes errores en el maestro (master) y en el esclavo. Error en maestro (master): mensaje (formato)='%s' código de error=%d; Error en esclavo: mensaje actual='%s', código de error=%d. Base de datos por defecto:'%s'. Consulta (query):'%s'" ER_STORAGE_ENGINE_NOT_LOADED chi "表'%s'的存储引擎'%s'没有加载。" eng "Storage engine for table '%s'.'%s' is not loaded." + spa "El motor de almacenaje para la tabla '%s'.'%s' no ha sido cargado." ER_GET_STACKED_DA_WITHOUT_ACTIVE_HANDLER 0Z002 chi "处理程序未激活时GET STACKED DIAGNOSTICS" eng "GET STACKED DIAGNOSTICS when handler not active" + spa "GET STACKED DIAGNOSTICS cuando el manejador no está activo" ER_WARN_LEGACY_SYNTAX_CONVERTED chi "不再支持%s。该语句被转换为%s。" eng "%s is no longer supported. The statement was converted to %s." + spa "%s ya no está soportada. Se ha convertido la sentencia a %s." ER_BINLOG_UNSAFE_FULLTEXT_PLUGIN chi "语句不安全,因为它使用全文解析器插件,它可能不会在从站上返回相同的值。" eng "Statement is unsafe because it uses a fulltext parser plugin which may not return the same value on the slave." + spa "La sentencia no es segura porque usa un enchufe (plugin) analizador de fulltext que puede que no devuelva el mismo valor en el esclavo." ER_CANNOT_DISCARD_TEMPORARY_TABLE chi "无法丢弃与临时表相关联的/导入表空间" eng "Cannot DISCARD/IMPORT tablespace associated with temporary table" + spa "No puedo DISCARD/IMPORT espacio de tabla asociado con tabla temporal" ER_FK_DEPTH_EXCEEDED chi "外键级联删除/更新超出了%d的最大深度。" eng "Foreign key cascade delete/update exceeds max depth of %d." + spa "La cascada borrar/actualizar en clave foránea excede la máxima profundidad de %d." ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE_V2 chi "列数为%s.%s是错误的。预期的%d,找到%d。使用MariaDB%d创建,现在运行%d。请使用mariadb-upgrade来修复此错误。" eng "Column count of %s.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mariadb-upgrade to fix this error." ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mariadb-upgrade, um den Fehler zu beheben" + spa "El contador de columna de %s.%s está equivocado. Se esperaba %d, hallado %d, Creado con MariaDB %d, ahora ejecutando %d. Por favor, use mariadb-upgrade para arreglar este error." ER_WARN_TRIGGER_DOESNT_HAVE_CREATED chi "触发器%s.%s.%s没有CREATE属性。" eng "Trigger %s.%s.%s does not have CREATED attribute." + spa "El disparador %s.%s.%s no tiene el atributo CREATED." ER_REFERENCED_TRG_DOES_NOT_EXIST_MYSQL chi "引用的触发器'%s'用于给定的动作时间和事件类型不存在。" eng "Referenced trigger '%s' for the given action time and event type does not exist." + spa "El disparador referenciado '%s' para el momento dado de acción y el tipo de evento no existe." ER_EXPLAIN_NOT_SUPPORTED chi "EXPLAIN FOR CONNECTION仅支持SELECT/UPDATE/INSERT/DELETE/REPLACE" eng "EXPLAIN FOR CONNECTION command is supported only for SELECT/UPDATE/INSERT/DELETE/REPLACE" + spa "El comando EXPLAIN FOR CONNECTION sólo se soporta para SELECT/UPDATE/INSERT/DELETE/REPLACE" ER_INVALID_FIELD_SIZE chi "列'%-.192s'的大小无效。" eng "Invalid size for column '%-.192s'." + spa "Tamaño inválido para columna '%-.192s'." ER_MISSING_HA_CREATE_OPTION chi "表存储引擎'%-.64s'所找到的必备创建选项丢失" eng "Table storage engine '%-.64s' found required create option missing" + spa "El motor hallado de almacenaje de tabla '%-.64s' requería de la opción de crear que falta" ER_ENGINE_OUT_OF_MEMORY chi "存储引擎'%-.64s'中的内存不足。" eng "Out of memory in storage engine '%-.64s'." + spa "Memoria agotada en motor de almacenaje '%-.64s'." ER_PASSWORD_EXPIRE_ANONYMOUS_USER chi "匿名用户的密码不能过期。" eng "The password for anonymous user cannot be expired." + spa "La contraseña para usuario anónimo no puede expirar." ER_SLAVE_SQL_THREAD_MUST_STOP chi "无法使用正在运行的从SQL线程执行此操作;首先运行STOP SLAVE SQL_THREAD" eng "This operation cannot be performed with a running slave sql thread; run STOP SLAVE SQL_THREAD first" + spa "Esta operación no se puede realizar con hilo (thread) sql esclavo en ejecución; ejecuta STOP SLAVE SQL_THREAD primero" ER_NO_FT_MATERIALIZED_SUBQUERY chi "无法在物化的子查询上创建FullText索引" eng "Cannot create FULLTEXT index on materialized subquery" + spa "No puedo crear índice FULLTEXT en subconsulta (subquery) materializada" ER_INNODB_UNDO_LOG_FULL chi "撤消日志错误:%s" eng "Undo Log error: %s" + spa "Error de Historial (log) de Deshacer: %s" ER_INVALID_ARGUMENT_FOR_LOGARITHM 2201E chi "对数的参数无效" eng "Invalid argument for logarithm" + spa "Argumento inválido para logaritmo" ER_SLAVE_CHANNEL_IO_THREAD_MUST_STOP chi "无法使用正在运行的slave IO线程执行此操作;首先运行STOP SLAVE IO_THREAD FOR CHANNEL'%s'。" eng "This operation cannot be performed with a running slave io thread; run STOP SLAVE IO_THREAD FOR CHANNEL '%s' first." + spa "Esta operación no se puede realizar con un hilo (thread) de e/s de esclavo en ejecución; ejecuta STOP SLAVE IO_THREAD FOR CHANNEL '%s' primero." ER_WARN_OPEN_TEMP_TABLES_MUST_BE_ZERO chi "当从站具有临时表时,此操作可能不安全。表将保持打开,直到服务器重新启动或通过任何复制的DROP语句删除表。建议等到Slave_open_temp_tables = 0。" eng "This operation may not be safe when the slave has temporary tables. The tables will be kept open until the server restarts or until the tables are deleted by any replicated DROP statement. Suggest to wait until slave_open_temp_tables = 0." + spa "Esta operación puede no ser segura cuando el esclavo tenga tablas temporales. Las tablas serán mantenidas abiertas hasta que el servidor rearranque o hasta que las tablas sean borradas por cualquier sentencia DROP replicada. Se sugiere esperar hasta slave_open_temp_tables = 0." ER_WARN_ONLY_MASTER_LOG_FILE_NO_POS chi "使用CHANGE MASTER TO master_log_file子句更改master,但没有master_log_pos子句可能不安全。旧位置值可能对新的二进制日志文件无效。" eng "CHANGE MASTER TO with a MASTER_LOG_FILE clause but no MASTER_LOG_POS clause may not be safe. The old position value may not be valid for the new binary log file." + spa "CHANGE MASTER TO mediante una cláusula MASTER_LOG_FILE pero sin existir cláusula MASTER_LOG_POS puede no ser seguro. El valor viejo de la posición puede no ser válido para el nuevo fichero/archivo binario de historial (log)." ER_QUERY_TIMEOUT chi "查询执行中断,超过了最大语句执行时间" eng "Query execution was interrupted, maximum statement execution time exceeded" + spa "Se ha interrumpido la ejecución de la consulta (query), se ha excedido el tiempo máximo de ejecución de sentencia" ER_NON_RO_SELECT_DISABLE_TIMER chi "SELECT不是只读语句,禁用计时器" eng "Select is not a read only statement, disabling timer" + spa "Select no es una sentencia de sólo lectura, desactivando cronómetro" ER_DUP_LIST_ENTRY chi "重复条目'%-.192s'。" eng "Duplicate entry '%-.192s'." + spa "Entrada duplicada '%-.192s'." ER_SQL_MODE_NO_EFFECT chi "'%s'模式不再有任何效果。使用STRICT_ALL_TABLES或STRICT_TRANS_TABLES。" eng "'%s' mode no longer has any effect. Use STRICT_ALL_TABLES or STRICT_TRANS_TABLES instead." + spa "El modo '%s' ya no tiene efecto alguno. Use STRICT_ALL_TABLES o STRICT_TRANS_TABLES en su lugar" ER_AGGREGATE_ORDER_FOR_UNION chi "表达式#%u ORDER BY包含聚合函数并适用于UNION" eng "Expression #%u of ORDER BY contains aggregate function and applies to a UNION" + spa "La expresión #%u de ORDER BY contiene función de agregación y se aplica a UNION" ER_AGGREGATE_ORDER_NON_AGG_QUERY chi "表达式#%u通过包含聚合函数,并适用于非聚合查询的结果" eng "Expression #%u of ORDER BY contains aggregate function and applies to the result of a non-aggregated query" + spa "La expresión #%u de ORDER BY contiene función de agregación y se aplica al resultado de una consulta (query) no agregada" ER_SLAVE_WORKER_STOPPED_PREVIOUS_THD_ERROR chi "在启用了slave保存提交次序时至少有一个以前的工人遇到错误后,slave工作者已停止。要保留提交次序,此线程执行的最后一项事务尚未提交。在修复任何故障线程后重新启动从站时,您也应该修复此工作人。" eng "Slave worker has stopped after at least one previous worker encountered an error when slave-preserve-commit-order was enabled. To preserve commit order, the last transaction executed by this thread has not been committed. When restarting the slave after fixing any failed threads, you should fix this worker as well." + spa "El trabajador esclavo se ha parado tras al menos encontrar un error en trabajador previo cuando slave-preserve-commit-order fue activado. Para preserver el orden de acometida (commit), la última transacción ejecutada por este hilo (thread) no se ha acometido (commit). Al rearrancar el esclavo tras arreglar cualquier hilo (thread) fallido, vd debería de arreglar este trabajador también" ER_DONT_SUPPORT_SLAVE_PRESERVE_COMMIT_ORDER chi "slave_preerve_commit_order不支持%s。" eng "slave_preserve_commit_order is not supported %s." + spa "slave_preserve_commit_order no está soportado %s." ER_SERVER_OFFLINE_MODE chi "服务器目前处于离线模式" eng "The server is currently in offline mode" + spa "El servidor se encuentra actualmente en modo fuera de línea" ER_GIS_DIFFERENT_SRIDS chi "二进制几何函数%s给定两个不同SRID的几何形状:%u和%u,应该是相同的。" eng "Binary geometry function %s given two geometries of different srids: %u and %u, which should have been identical." + spa "La función binaria de geomertía %s ha dado dos geometrías de diferente srids: %u y %u, que deberían de haber sido idénticas" ER_GIS_UNSUPPORTED_ARGUMENT chi "调用几何函数%s与不受支持类型的参数。" eng "Calling geometry function %s with unsupported types of arguments." + spa "Llamando a función de geometría %s con tipos de argumento no soportados." ER_GIS_UNKNOWN_ERROR chi "未知的GIS错误发生在功能%s中。" eng "Unknown GIS error occurred in function %s." + spa "Ha ocurrido un error GIS desconocido en función %s." ER_GIS_UNKNOWN_EXCEPTION chi "在GIS功能%s中捕获的未知异常。" eng "Unknown exception caught in GIS function %s." + spa "Excepción desconocida capturada en función GIS %s." ER_GIS_INVALID_DATA 22023 chi "提供给功能%s的GIS数据无效。" eng "Invalid GIS data provided to function %s." + spa "Suministrados datos GIS inválidos a función %s." ER_BOOST_GEOMETRY_EMPTY_INPUT_EXCEPTION chi "几何形状在功能%s中没有数据。" eng "The geometry has no data in function %s." + spa "La geometría no tiene datos en función %s." ER_BOOST_GEOMETRY_CENTROID_EXCEPTION chi "无法计算质心,因为在功能%s中几何为空。" eng "Unable to calculate centroid because geometry is empty in function %s." + spa "Imposible calcular centroid porque la geometría está vacía en la función %s." ER_BOOST_GEOMETRY_OVERLAY_INVALID_INPUT_EXCEPTION chi "几何叠加计算错误:几何数据在功能%s中无效。" eng "Geometry overlay calculation error: geometry data is invalid in function %s." + spa "Error de cálculo de superposición de geometría: el dato de geometría es inválido en la función %s." ER_BOOST_GEOMETRY_TURN_INFO_EXCEPTION chi "几何旋转信息计算错误:几何数据在功能%s中无效。" eng "Geometry turn info calculation error: geometry data is invalid in function %s." + spa "Error de cálculo de información devuelto: los datos de geometría son inválidos en la función %s." ER_BOOST_GEOMETRY_SELF_INTERSECTION_POINT_EXCEPTION chi "在功能%s中出乎意料地中断交叉点的分析程序。" eng "Analysis procedures of intersection points interrupted unexpectedly in function %s." + spa "Los procedimientos de análisis de puntos de intersección se interrumpieron inesperadamente en la función %s." ER_BOOST_GEOMETRY_UNKNOWN_EXCEPTION chi "在功能%s中抛出的未知异常。" eng "Unknown exception thrown in function %s." + spa "Excepción desconocida lanzada en la función %s." ER_STD_BAD_ALLOC_ERROR chi "内存分配错误:%-.256s。函数%s。" eng "Memory allocation error: %-.256s in function %s." + spa "Error en adjudicación de memoria: %-.256s en la función %s." ER_STD_DOMAIN_ERROR chi "域名错误:%-.256s. 函数%s" eng "Domain error: %-.256s in function %s." + spa "Error en dominio: %-.256s en función %s." ER_STD_LENGTH_ERROR chi "长度误差:%-.256s函数%s。" eng "Length error: %-.256s in function %s." + spa "Error de tamaño: %-.256s en función %s." ER_STD_INVALID_ARGUMENT chi "无效的参数错误:%-.256s函数%s。" eng "Invalid argument error: %-.256s in function %s." + spa "Error de argumento inválido: %-.256s en función %s." ER_STD_OUT_OF_RANGE_ERROR chi "超出范围错误:%-.256s 函数%s。" eng "Out of range error: %-.256s in function %s." + spa "Error de fuera de rango: %-.256s en función %s." ER_STD_OVERFLOW_ERROR chi "溢出错误:%-.256s。功能%s。" eng "Overflow error: %-.256s in function %s." + spa "Error de desbordamiento: %-.256s en función %s." ER_STD_RANGE_ERROR chi "范围错误:%-.256s函数%s。" eng "Range error: %-.256s in function %s." + spa "Error de rango: %-.256s en función %s." ER_STD_UNDERFLOW_ERROR chi "下溢错误:%-.256s函数%s。" eng "Underflow error: %-.256s in function %s." + spa "Error de refreno (underflow): %-.256s en la función %s." ER_STD_LOGIC_ERROR chi "逻辑错误:%-.256s 函数%s。" eng "Logic error: %-.256s in function %s." + spa "Error lógico: %-.256s en la función %s." ER_STD_RUNTIME_ERROR chi "运行时错误:%-.256s函数%s。" eng "Runtime error: %-.256s in function %s." + spa "Error en tiempo de ejecución: %-.256s en la función %s." ER_STD_UNKNOWN_EXCEPTION chi "未知例外:%-.384s在函数%s中。" eng "Unknown exception: %-.384s in function %s." + spa "Excepción desconocida: %-.384s en la función %s." ER_GIS_DATA_WRONG_ENDIANESS chi "几何字节字符串必须是小endian。" eng "Geometry byte string must be little endian." + spa "La cadena de byte en Geometría debe de ser 'little endian'." ER_CHANGE_MASTER_PASSWORD_LENGTH chi "为Replication User提供的密码超过32个字符的最大长度" eng "The password provided for the replication user exceeds the maximum length of 32 characters" + spa "La contraseña suministrada para el usuario de réplica excede el tamaño máximo de 32 caracteres" ER_USER_LOCK_WRONG_NAME 42000 chi "用户级锁名名称'%-.192s'不正确。" eng "Incorrect user-level lock name '%-.192s'." + spa "Nombre de bloqueo incorrecto a nivel de usuario '%-.192s'." # Should be different from ER_LOCK_DEADLOCK since it doesn't cause implicit # rollback. Should not be mapped to SQLSTATE 40001 for the same reason. ER_USER_LOCK_DEADLOCK chi "在尝试获得用户级锁时发现死锁;尝试回滚交易/释放锁定并重新启动锁定采集。" eng "Deadlock found when trying to get user-level lock; try rolling back transaction/releasing locks and restarting lock acquisition." + spa "Hallado estancamiento (deadlock) al intentar obtener bloqueo a nivel de usuario; intente retroceder (roll back) bloqueos de transacción/entrega y rearranque la adquisición de bloqueo." ER_REPLACE_INACCESSIBLE_ROWS chi "无法执行REPLACE,因为它需要删除不在视图中的行" eng "REPLACE cannot be executed as it requires deleting rows that are not in the view" + spa "REPLACE no se puede ejecutar ya que requiere borrar filas que no están en la vista" ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS chi "不要支持使用GIS索引的表中的在线操作" eng "Do not support online operation on table with GIS index" + spa "No soporta operación en línea en tabla con índice GIS" # MariaDB extra error numbers starts from 4000 skip-to-error-number 4000 -ER_COMMULTI_BADCONTEXT 0A000 - chi "COM_MULTI无法返回给定上下文中的结果集" - eng "COM_MULTI can't return a result set in the given context" - ger "COM_MULTI kann im gegebenen Kontext keine Ergebnismenge zurückgeben" - ukr "COM_MULTI не може повернути результати у цьому контексті" -ER_BAD_COMMAND_IN_MULTI - chi "COM_MULTI不允许命令'%s'" - eng "Command '%s' is not allowed for COM_MULTI" - ukr "Команда '%s' не дозволена для COM_MULTI" +ER_UNUSED_26 0A000 + eng "This error never happens" + spa "Este error nunca ocurre" +ER_UNUSED_27 + eng "This error never happens" + spa "Este error nunca ocurre" ER_WITH_COL_WRONG_LIST chi "使用列列表并选择字段列表具有不同的列计数" eng "WITH column list and SELECT field list have different column counts" + spa "La lista de columnas de WITH y lista de campos de SELECT tienen diferentes contadores de columna" ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE chi "WITH条款中的元素太多了" eng "Too many WITH elements in WITH clause" + spa "Demasiados elementos WITH en cláusua WITH" ER_DUP_QUERY_NAME chi "WITH子句重复查询名称%`-.64s" eng "Duplicate query name %`-.64s in WITH clause" + spa "Nombre de consulta (query) %`-.64s duplicada en cláusula WITH" ER_RECURSIVE_WITHOUT_ANCHORS chi "没有元素'%s'递归的锚点" eng "No anchors for recursive WITH element '%s'" + spa "No hay anclajes para elemento WITH recursivo '%s'" ER_UNACCEPTABLE_MUTUAL_RECURSION chi "锚定表'%s'不可接受的相互递归" eng "Unacceptable mutual recursion with anchored table '%s'" + spa "Recursión mutua inaceptable con tabla anclada '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED chi "物质化的衍生参考指向递归的WITH 表'%s'" eng "Reference to recursive WITH table '%s' in materialized derived" + spa "Referencia recursiva con WITH tabla '%s' en derivada materializada" ER_NOT_STANDARD_COMPLIANT_RECURSIVE chi "表'%s'R_WRONG_WINDOW_SPEC_NAME违反了递归定义的限制" eng "Restrictions imposed on recursive definitions are violated for table '%s'" ER_WRONG_WINDOW_SPEC_NAME chi "没有定义名称'%s'的窗口规范" eng "Window specification with name '%s' is not defined" + spa "Especificación de ventana con nombre '%s' no definida" ER_DUP_WINDOW_NAME chi "具有相同名称'%s'的多个窗口规范" eng "Multiple window specifications with the same name '%s'" + spa "Múltiples especificaciones de ventana con el mismo nombre '%s'" ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC chi "窗口规范引用另一个'%s'不能包含分区列表" eng "Window specification referencing another one '%s' cannot contain partition list" + spa "La especificación de ventana que referencia a otra '%s' no puede contener una lista de partición" ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC chi "引用的窗口规范'%s'已包含次序列表" eng "Referenced window specification '%s' already contains order list" + spa "La especificación de ventana referenciada '%s' ya contiene lista de orden" ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC chi "引用的窗口规范'%s'不能包含窗口框架" eng "Referenced window specification '%s' cannot contain window frame" + spa "La especificación referenciada de ventana '%s' no puede contener marco de ventana" ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS chi "窗框绑定规格的不可接受的组合" eng "Unacceptable combination of window frame bound specifications" + spa "Combinación inaceptable de especificaciones ligadas a marco de ventana" ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION chi "窗口函数仅在SELECT列表和ORDER BY子句中允许" eng "Window function is allowed only in SELECT list and ORDER BY clause" + spa "La función de ventana sólo se permite en lista SELECT y en cláusula ORDER BY" ER_WINDOW_FUNCTION_IN_WINDOW_SPEC chi "窗口规范中不允许窗口功能" eng "Window function is not allowed in window specification" + spa "La función de ventana no está permitida en especificación de ventana" ER_NOT_ALLOWED_WINDOW_FRAME chi "窗框不允许使用'%s'" eng "Window frame is not allowed with '%s'" + spa "El marco de ventana no está permitido con '%s'" ER_NO_ORDER_LIST_IN_WINDOW_SPEC chi "在“%s”的窗口规范中没有订单列表" eng "No order list in window specification for '%s'" + spa "No exite lista de orden en especificación de ventana para '%s'" ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY chi "范围型框架需要单个排序键订购逐个条款" eng "RANGE-type frame requires ORDER BY clause with single sort key" + spa "El marco tipo-RANGE requiere de la cláusula ORDER BY con clave única de clasificación" ER_WRONG_TYPE_FOR_ROWS_FRAME chi "行类型框架需要整数" eng "Integer is required for ROWS-type frame" + spa "Se requiere de un entero para marco tipo-ROWS" ER_WRONG_TYPE_FOR_RANGE_FRAME chi "范围类型框架需要数字数据类型" eng "Numeric datatype is required for RANGE-type frame" + spa "Se requiere de tipo de dato numérico para marco tipo-RANGE" ER_FRAME_EXCLUSION_NOT_SUPPORTED chi "帧排除尚不支持" eng "Frame exclusion is not supported yet" + spa "No se seporta aún la exclusión del marco" ER_WINDOW_FUNCTION_DONT_HAVE_FRAME chi "此窗口功能可能没有窗口框架" eng "This window function may not have a window frame" + spa "Esta función de ventana puede no tener un marco de ventana" ER_INVALID_NTILE_ARGUMENT chi "NTILE的参数必须大于0" eng "Argument of NTILE must be greater than 0" + spa "El argumento de NTILE debe de ser mayor de 0" ER_CONSTRAINT_FAILED 23000 chi "CONSTRAINT %`s失败的%`-.192s。%`-.192s" eng "CONSTRAINT %`s failed for %`-.192s.%`-.192s" ger "CONSTRAINT %`s fehlgeschlagen: %`-.192s.%`-.192s" rus "проверка CONSTRAINT %`s для %`-.192s.%`-.192s провалилась" + spa "No se cumple la RESTRICCIÓN %`s para %`-.192s.%`-.192s" ukr "Перевірка CONSTRAINT %`s для %`-.192s.%`-.192s не пройшла" ER_EXPRESSION_IS_TOO_BIG chi "%s条款中的表达太大了" eng "Expression in the %s clause is too big" + spa "La expresión en la cláusula %s es demasiado grande" ER_ERROR_EVALUATING_EXPRESSION chi "获得了一个错误评估存储的表达式%s" eng "Got an error evaluating stored expression %s" + spa "Obtenido error evaluando expresión almacenada %s" ER_CALCULATING_DEFAULT_VALUE chi "计算默认值为%`s时出错" eng "Got an error when calculating default value for %`s" + spa "Obtenido un error al calcular valor por defecto para %`s" ER_EXPRESSION_REFERS_TO_UNINIT_FIELD 01000 chi "字段%`-.64s的表达指的是未初始化的字段%`s" eng "Expression for field %`-.64s is referring to uninitialized field %`s" + spa "La expresión para campo %`-.64s se refiere a un campo sin inicializar %`s" ER_PARTITION_DEFAULT_ERROR chi "只允许一个默认分区" eng "Only one DEFAULT partition allowed" + spa "Sólo se permite una partición DEFAULT" ukr "Припустимо мати тільки один DEFAULT розділ" ER_REFERENCED_TRG_DOES_NOT_EXIST chi "给定动作时间和事件类型的引用触发'%s'不存在" eng "Referenced trigger '%s' for the given action time and event type does not exist" + spa "No existe disparador referenciado '%s' para el momento dado de acción y para el tipo de evento" ER_INVALID_DEFAULT_PARAM chi "此类参数使用不支持默认/忽略值" eng "Default/ignore value is not supported for such parameter usage" + spa "El valor por defecto/ignorado no está soportado para tal utilización de parámetro" ukr "Значення за замовчуванням або ігнороване значення не підтримано для цього випадку використання параьетра" ER_BINLOG_NON_SUPPORTED_BULK chi "仅支持基于行的复制,支持批量操作" eng "Only row based replication supported for bulk operations" + spa "Sólo la réplica basada en fila es soportada para operaciones enormes" ER_BINLOG_UNCOMPRESS_ERROR chi "解压压缩的binlog失败" eng "Uncompress the compressed binlog failed" + spa "Ha fallado la descompresión del binlog comprimido" ER_JSON_BAD_CHR chi "坏JSON,参数%d 函数'%s' 位置%d" eng "Broken JSON string in argument %d to function '%s' at position %d" + spa "Cadena JSON rota en argumento %d para función '%s' en posición %d" ER_JSON_NOT_JSON_CHR chi "变量%d出现禁止字符,函数'%s'在%d处" eng "Character disallowed in JSON in argument %d to function '%s' at position %d" + spa "Carácter no permitido en JSON en argumento %d para función '%s' en la posición %d" ER_JSON_EOS chi "JSON文本中的意外结尾,参数%d 函数'%s'" eng "Unexpected end of JSON text in argument %d to function '%s'" + spa "Fin inesperado de texto JSON en argumento %d a función '%s'" ER_JSON_SYNTAX chi "JSON文本语法错误 参数%d 函数'%s' 位置%d" eng "Syntax error in JSON text in argument %d to function '%s' at position %d" + spa "Error de sintaxis en texto JSON en argumento %d a función '%s' en la posición %d" ER_JSON_ESCAPING chi "JSON文本中逸出不正确 参数%d 函数'%s' 位置%d" eng "Incorrect escaping in JSON text in argument %d to function '%s' at position %d" + spa "Incorrecta escapatoria en texto JSON en argumento %d a función '%s' en la posicón %d" ER_JSON_DEPTH chi "超过JSON嵌套深度的%d限制 参数%d 函数'%s' 位置%d的" eng "Limit of %d on JSON nested structures depth is reached in argument %d to function '%s' at position %d" + spa "El límite de %d en profundidad de estructuras JSON anidadas se ha alcanzado en argumento %d a función '%s' en la posición %d" ER_JSON_PATH_EOS chi "JSON文本路径错误 参数%d 函数'%s'" eng "Unexpected end of JSON path in argument %d to function '%s'" + spa "Fin inesperado de ruta JSON en argumento %d a función '%s'" ER_JSON_PATH_SYNTAX chi "JSON路径语法错误 参数%d 函数'%s' 位置%d" eng "Syntax error in JSON path in argument %d to function '%s' at position %d" + spa "Error de sintaxis en ruta JSON en argumento %d a función '%s' en la posición %d" ER_JSON_PATH_DEPTH chi "JSON路径深度上限达到:%d 参数%d 函数'%s' 位置%d" eng "Limit of %d on JSON path depth is reached in argument %d to function '%s' at position %d" + spa "El límite de %d en profundidad de ruta JSON se ha alcanzado en argumento %d a función '%s' en la posición %d" ER_JSON_PATH_NO_WILDCARD chi "JSON路径中的通配符不允许 参数%d 函数'%s'" eng "Wildcards in JSON path not allowed in argument %d to function '%s'" + spa "Comodines en ruta JSON no permitidos en argumento %d a función '%s'" ER_JSON_PATH_ARRAY chi "JSON路径应当以排列为终 参数%d 函数'%s'" eng "JSON path should end with an array identifier in argument %d to function '%s'" + spa "La ruta JSON debería de terminar con identificador de arreglo en argumento %d a función '%s'" ER_JSON_ONE_OR_ALL chi "函数'%s'的第二个参数必须是'一个'或'全部'" eng "Argument 2 to function '%s' must be "one" or "all"." -ER_UNSUPPORT_COMPRESSED_TEMPORARY_TABLE + spa "El argumento 2 a función '%s' debe de ser "one" o "all"." +ER_UNSUPPORTED_COMPRESSED_TABLE chi "CREATE TEMPORARY TABLE 不允许用ROW_FORMAT=COMPRESSED或KEY_BLOCK_SIZE" - eng "CREATE TEMPORARY TABLE is not allowed with ROW_FORMAT=COMPRESSED or KEY_BLOCK_SIZE." + eng "InnoDB refuses to write tables with ROW_FORMAT=COMPRESSED or KEY_BLOCK_SIZE." + spa "InnoDB rechaza grabar en tablas con ROW_FORMAT=COMPRESSED o KEY_BLOCK_SIZE." ER_GEOJSON_INCORRECT chi "为st_geomfromgeojson函数指定了不正确的GeoJSON格式。" eng "Incorrect GeoJSON format specified for st_geomfromgeojson function." + spa "Especficado formato GeoJSON incorrecto para función st_geomfromgeojson." ER_GEOJSON_TOO_FEW_POINTS chi "Geojson格式不正确 - Linestring指定的太少点。" eng "Incorrect GeoJSON format - too few points for linestring specified." + spa "Formato GeoJSON incorrecto - demasiados pocos puntos especificados para linestring." ER_GEOJSON_NOT_CLOSED chi "Geojson格式不正确 - 多边形未关闭。" eng "Incorrect GeoJSON format - polygon not closed." + spa "Formato GeoJSON incorrect - polígono no cerrado." ER_JSON_PATH_EMPTY chi "path表达式'$'不允许在参数%d中允许运行'%s'。" eng "Path expression '$' is not allowed in argument %d to function '%s'." + spa "La expresión de ruta '$' no está permitida en argumento %d a función '%s'." ER_SLAVE_SAME_ID chi "与此从站相同的server_uuId / server_id的从站已连接到主设备" eng "A slave with the same server_uuid/server_id as this slave has connected to the master" + spa "Un esclavo con el mismo server_uuid/server_id que este esclavo se ha conectado al maestro (master)" ER_FLASHBACK_NOT_SUPPORTED chi "闪回不支持%s%s" eng "Flashback does not support %s %s" -# + spa "Retrospectiva no soporta %s %s" + +# # MyRocks error messages # ER_KEYS_OUT_OF_ORDER chi "钥匙在散装负载期间出现订单" eng "Keys are out order during bulk load" + spa "Claves desordenadas durante carga enorme" ER_OVERLAPPING_KEYS chi "批量负载行重叠现有行" eng "Bulk load rows overlap existing rows" + spa "La carga enorme de filas se superpone con filas existentes" ER_REQUIRE_ROW_BINLOG_FORMAT chi "binlog_format != ROW时无法在master上执行更新" eng "Can't execute updates on master with binlog_format != ROW." + spa "No puedo ejecutar actualizaciones en maestro (master) con binlog_format != ROW." ER_ISOLATION_MODE_NOT_SUPPORTED chi "MyRocks仅支持读取承诺和可重复读取隔离级别。请从当前隔离级别的%s改变" eng "MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level %s" + spa "MyRocks soporta sólo niveles de aislamiento READ COMMITTED y REPEATABLE READ. Por favor, cambie desde nivel de aislamiento actual %s" ER_ON_DUPLICATE_DISABLED chi "当在MyRocks禁用唯一检查时,INSERT,UPDATE, LOAD,使用Clauses更新或替换索引的子句(即,在重复的重复键更新,替换)中,不允许使用。查询:%s" eng "When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: %s" + spa "Al desactivar chequeo de único en MyRocks, las sentencias INSERT, UPDATE, LOAD con cláusulas que actualizan o reemplazan la clave (p.ej. INSERT ON DUPLICATE KEY UPDATE, REPLACE) no se permiten. Consulte (query): %s" ER_UPDATES_WITH_CONSISTENT_SNAPSHOT chi "START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT时,无法执行更新。" eng "Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT." + spa "No puedo ejecutar actualizaciones cuando has iniciado una transacción mediante START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT." ER_ROLLBACK_ONLY chi "此交易回滚并无法承诺。只支持支持的操作是滚动,因此将丢弃所有待处理的更改。请重新启动其他事务。" eng "This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction." + spa "Esta transacción se ha retrocedido (rolled back) y no puede ser acometida (commit). La única operación soportada es retroceder (roll back), de tal forma que se descartarán todos los cambios pendientes. Por favor, rearranque otra transacción." ER_ROLLBACK_TO_SAVEPOINT chi "如果修改行,MyRocks目前不支持保存点的回滚。" eng "MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows." + spa "MyRocks en este momento no soporta ROLLBACK TO SAVEPOINT si se están modificando filas." ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT chi "在RockSDB存储引擎中,START TRANSACTION WITH CONSISTENT SNAPSHOT 只支持REPEATABLE READ隔离" eng "Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine." + spa "Sólo el nivel de aislamiento REPEATABLE READ se soporta para START TRANSACTION WITH CONSISTENT SNAPSHOT en Motor de Almacenaje RocksDB." ER_UNSUPPORTED_COLLATION chi "字符串索引列%s的不受支持的归类。%s使用二进制校构(%s)。" eng "Unsupported collation on string indexed column %s.%s Use binary collation (%s)." + spa "Cotejo (collation) no soportado en columna indizada de cadena %s.%s Use cotejo binario (%s)." ER_METADATA_INCONSISTENCY chi "表'%s'不存在,但MyRocks内存存在元数据信息。这是数据不一致的标志。请检查是否存在'%s.frm',并尝试恢复如果它不存在。" eng "Table '%s' does not exist, but metadata information exists inside MyRocks. This is a sign of data inconsistency. Please check if '%s.frm' exists, and try to restore it if it does not exist." + spa "La tabla '%s' no existe, pero existe información de metadatos dentro de MyRocks. Esto es una señal de datos inconsistentes. Por favor, revise si existe '%s.frm' e intente restaurarla si no existe." ER_CF_DIFFERENT chi "列族('%s')标志(%d)与现有标志(%d)不同。分配新的CF标志,或者不要更改现有的CF标志。" eng "Column family ('%s') flag (%d) is different from an existing flag (%d). Assign a new CF flag, or do not change existing CF flag." + spa "La familia de columna ('%s') bandera (%d) es diferente de una bandera existente (%d). Asigne una nueva bandera CF o no cambie la bandera CF." ER_RDB_TTL_DURATION_FORMAT chi "Myrocks中的TTL持续时间(%s)必须是无符号非空64位整数。" eng "TTL duration (%s) in MyRocks must be an unsigned non-null 64-bit integer." + spa "La duración de TTL (%s) en MyRocks debe de ser un entero sin signo no-null de 64-bit." ER_RDB_STATUS_GENERAL chi "状态误差%d从RockSDB收到:%s" eng "Status error %d received from RocksDB: %s" + spa "Recibido error de estado %d desde RocksDB: %s" ER_RDB_STATUS_MSG chi "%s,状态误差%d从rocksdb收到:%s" eng "%s, Status error %d received from RocksDB: %s" + spa "%s, Recibido error de estado %d desde RocksDB: %s" ER_RDB_TTL_UNSUPPORTED chi "当表有隐藏的PK时,目前禁用TTL支持。" eng "TTL support is currently disabled when table has a hidden PK." + spa "El soporte TTL está desactivado en este momento cuando la tabla tiene una PK oculta." ER_RDB_TTL_COL_FORMAT chi "Myrocks中的TTL列(%s)必须是一个无符号的非空64位整数,存在于表内,并具有伴随的TTL持续时间。" eng "TTL column (%s) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration." + spa "La columna TTL (%s) en MyRocks debe de ser un entero sin signo no-null de 64-bit, debe de existir dentro de la tabla y debe de tener una duración ttl acompañante." ER_PER_INDEX_CF_DEPRECATED chi "已弃用每个索引列族选项" eng "The per-index column family option has been deprecated" + spa "La opcion de familia de columna por-índice está obsoleta" ER_KEY_CREATE_DURING_ALTER chi "MyRocks在Alter期间创建新的索引定义失败。" eng "MyRocks failed creating new key definitions during alter." + spa "MyRocks no pudo crear nuevas definiciones de clave durante 'alter'." ER_SK_POPULATE_DURING_ALTER chi "MyRocks在Alter期间失败填充次要索引。" eng "MyRocks failed populating secondary key during alter." + spa "MyRocks falló al poblar clave secundaria duante el 'alter'." # MyRocks messages end ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG chi "窗口函数不能用作组函数的参数。" eng "Window functions can not be used as arguments to group functions." + spa "Las funciones de ventana no se pueden usar como argumentos para agrupar funciones." ER_NET_OK_PACKET_TOO_LARGE chi "好的包太大了" eng "OK packet too large" + spa "Paquete OK demasiado grande" ER_GEOJSON_EMPTY_COORDINATES chi "Geojson格式不正确 - 空的'coordinates'阵列。" eng "Incorrect GeoJSON format - empty 'coordinates' array." + spa "Formato GeoJSON incorrecto - arreglo vacío de coordenadas." ER_MYROCKS_CANT_NOPAD_COLLATION chi "MyRocks目前不支持与“No Pad \”属性的归类。" eng "MyRocks doesn't currently support collations with \"No pad\" attribute." + spa "MyRocks no soporta en la actualidad cotejos con atributo \"No pad\"." ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION chi "非法参数数据类型%s和%s为操作'%s'" eng "Illegal parameter data types %s and %s for operation '%s'" + spa "Tipos de datos de parámetro ilegales %s y %s para operación '%s'" ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION chi "非法参数数据类型%s用于操作'%s'" eng "Illegal parameter data type %s for operation '%s'" + spa "Tipo de dato %s de parámetro ilegal para operación '%s'" ER_WRONG_PARAMCOUNT_TO_CURSOR 42000 chi "对Cursor的参数计数不正确'%-.192s'" eng "Incorrect parameter count to cursor '%-.192s'" + spa "Contador incorrecto de parámetro para cursor '%-.192s'" rus "Некорректное количество параметров для курсора '%-.192s'" ER_UNKNOWN_STRUCTURED_VARIABLE chi "未知的结构系统变量或行程变量'%-.*s'" eng "Unknown structured system variable or ROW routine variable '%-.*s'" + spa "Variable de sistema con estructura desconocida o variable de rutina ROW '%-.*s'" ER_ROW_VARIABLE_DOES_NOT_HAVE_FIELD chi "行变量'%-.192s'没有字段'%-.192s'" eng "Row variable '%-.192s' does not have a field '%-.192s'" + spa "La variable de fila '%-.192s' no tiene un campo '%-.192s'" ER_END_IDENTIFIER_DOES_NOT_MATCH chi "结束标识符'%-.192s'不匹配'%-.192s'" eng "END identifier '%-.192s' does not match '%-.192s'" + spa "Identificador END '%-.192s' no coincide con '%-.192s'" ER_SEQUENCE_RUN_OUT chi "序列'%-.64s。%-.64s'已经用完了" eng "Sequence '%-.64s.%-.64s' has run out" + spa "La secuencia '%-.64s.%-.64s' se ha agotado" ER_SEQUENCE_INVALID_DATA chi "序列'%-.64s。%-.64s的值冲突" eng "Sequence '%-.64s.%-.64s' has out of range value for options" + spa "La secuencia '%-.64s.%-.64s' tiene un valor fuera de rango para las opciones" ER_SEQUENCE_INVALID_TABLE_STRUCTURE chi "序列'%-.64s。%-.64s'表结构无效(%s)" eng "Sequence '%-.64s.%-.64s' table structure is invalid (%s)" + spa "La estuctura de tabla de secuencia '%-.64s.%-.64s' es inválida (%s)" ER_SEQUENCE_ACCESS_ERROR chi "序列'%-.64s。%-.64s的访问错误" eng "Sequence '%-.64s.%-.64s' access error" + spa "Error en acceso a secuencia '%-.64s.%-.64s'" ER_SEQUENCE_BINLOG_FORMAT eng "Sequences requires binlog_format mixed or row" + spa "Las secuencias requieren binlog_format mixto o fila" ER_NOT_SEQUENCE 42S02 chi "'%-.64s。%-.64s'不是序列" eng "'%-.64s.%-.64s' is not a SEQUENCE" + spa "'%-.64s.%-.64s' no es una SECUENCIA" ER_NOT_SEQUENCE2 42S02 chi "'%-.192s'不是序列" eng "'%-.192s' is not a SEQUENCE" + spa "'%-.192s' no es una SECUENCIA" ER_UNKNOWN_SEQUENCES 42S02 chi "未知序列:'%-.300s'" eng "Unknown SEQUENCE: '%-.300s'" + spa "SECUENCIA desconocida: '%-.300s'" ER_UNKNOWN_VIEW 42S02 chi "未知视图:'%-.300s'" eng "Unknown VIEW: '%-.300s'" + spa "VISTA desconocida: '%-.300s'" ER_WRONG_INSERT_INTO_SEQUENCE chi "错误插入序列。人们只能将单表插入到序列对象(与mariadb-dump)中进行。如果要更改序列,请使用更改序列。" eng "Wrong INSERT into a SEQUENCE. One can only do single table INSERT into a sequence object (like with mariadb-dump). If you want to change the SEQUENCE, use ALTER SEQUENCE instead." + spa "INSERT equivocado dentro de SEQUENCE. Uno sólo puede hacer INSERT único en tabla dentro de un objeto de secuencia (como con volcado-mariadb). Si desea cambiar la SECUENCIA, use ALTER SEQUENCE en su lugar." ER_SP_STACK_TRACE chi "在%u中以%s" eng "At line %u in %s" + spa "En la línea %u en %s" ER_PACKAGE_ROUTINE_IN_SPEC_NOT_DEFINED_IN_BODY chi "在包规范中声明子程序'%-.192s',但未在包主体中定义" eng "Subroutine '%-.192s' is declared in the package specification but is not defined in the package body" + spa "La subrutina '%-.192s' está declarada en la especificación del paquete pero no está definida en el cuerpo del paquete" ER_PACKAGE_ROUTINE_FORWARD_DECLARATION_NOT_DEFINED chi "子程序'%-.192s'具有前向声明但未定义" eng "Subroutine '%-.192s' has a forward declaration but is not defined" + spa "La subrutina '%-.192s' tiene una declaración adelantada pero no está definida" ER_COMPRESSED_COLUMN_USED_AS_KEY chi "压缩列'%-.192s'不能用于索引规范" eng "Compressed column '%-.192s' can't be used in key specification" + spa "Una columna comprimida '%-.192s' no se puede usar en especificación de clave" ER_UNKNOWN_COMPRESSION_METHOD chi "未知压缩方法:%s" eng "Unknown compression method: %s" + spa "Método de compresión desconocido: %s" ER_WRONG_NUMBER_OF_VALUES_IN_TVC chi "使用的表值构造函数具有不同数量的值" eng "The used table value constructor has a different number of values" + spa "El constructor del valor de tabla usado tiene un número diferente de valores" ER_FIELD_REFERENCE_IN_TVC chi "字段参考'%-.192s'不能用于表值构造函数" eng "Field reference '%-.192s' can't be used in table value constructor" + spa "La referencia a campo '%-.192s' no se puede usar en constructor de valor de tabla" ER_WRONG_TYPE_FOR_PERCENTILE_FUNC chi "%s函数需要数字数据类型" eng "Numeric datatype is required for %s function" + spa "Se requiere de tipo de dato numérico para función %s" ER_ARGUMENT_NOT_CONSTANT chi "%s函数的参数不是分区的常量" eng "Argument to the %s function is not a constant for a partition" + spa "El argumento de la función %s no es una constante para una partición" ER_ARGUMENT_OUT_OF_RANGE chi "%s函数的参数不属于范围[0,1]" eng "Argument to the %s function does not belong to the range [0,1]" + spa "El argumento de la función %s no pertenece al rango [0,1]" ER_WRONG_TYPE_OF_ARGUMENT chi "%s函数仅接受可以转换为数字类型的参数" eng "%s function only accepts arguments that can be converted to numerical types" + spa "La función %s sólo acepta argumentos que se puedan convertir a tipos numéricos" ER_NOT_AGGREGATE_FUNCTION chi "在错误的上下文中使用的聚合特定指令(fetch组下一行)" eng "Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context" + spa "Instrucción específica de agregación (FETCH GROUP NEXT ROW) usada en contexto equivocado" ER_INVALID_AGGREGATE_FUNCTION chi "聚合函数丢失的聚合特定指令(fetch组下一行)" eng "Aggregate specific instruction(FETCH GROUP NEXT ROW) missing from the aggregate function" + spa "Falta instrucción específica de agregación (FETCH GROUP NEXT ROW) de la función de agregación" ER_INVALID_VALUE_TO_LIMIT chi "限制仅接受整数值" eng "Limit only accepts integer values" + spa "El límite sólo acepta valores enteros" ER_INVISIBLE_NOT_NULL_WITHOUT_DEFAULT chi "隐形列%`s必须具有默认值" eng "Invisible column %`s must have a default value" + spa "Una columna invisible %`s debe de tener valor por defecto" + + # MariaDB error numbers related to System Versioning ER_UPDATE_INFO_WITH_SYSTEM_VERSIONING chi "匹配的行:%ld已更改:%ld插入:%ld警告:%ld" eng "Rows matched: %ld Changed: %ld Inserted: %ld Warnings: %ld" + spa "Filas coincidentes: %ld Cambiadas: %ld Insertadas: %ld Avisos: %ld" ER_VERS_FIELD_WRONG_TYPE chi "%`s必须为系统版本为表%s的类型%`s" eng "%`s must be of type %s for system-versioned table %`s" + spa "%`s debe de ser del tipo %s para tabla versionada del sistema %`s" ER_VERS_ENGINE_UNSUPPORTED chi "Transaction-Precise系统版本控制%`s不受支持" eng "Transaction-precise system versioning for %`s is not supported" + spa "No se soporta versionado de sistema de transacción precisa para %`s" ER_UNUSED_23 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_PARTITION_WRONG_TYPE chi "错误的分区类型,预期类型:%`s" eng "Wrong partitioning type, expected type: %`s" + spa "Tipo de partición equivocada, tipo esperado: %`s" WARN_VERS_PART_FULL chi "版本化表%`s.%`s:partition%`s已满,添加更多历史分区(out of %s)" eng "Versioned table %`s.%`s: last HISTORY partition (%`s) is out of %s, need more HISTORY partitions" + spa "Tabla versionada %`s.%`s: última partición HISTORY (%`s) fuera de %s, necesita de más particiones HISTORY" WARN_VERS_PARAMETERS chi "也许缺少参数:%s" eng "Maybe missing parameters: %s" + spa "Parámetros que quizás faltan: %s" ER_VERS_DROP_PARTITION_INTERVAL chi "只能在旋转间隔时丢弃最旧的分区" eng "Can only drop oldest partitions when rotating by INTERVAL" + spa "Sólo se pueden eliminar viejas particiones al rotar mediante INTERVAL" ER_UNUSED_25 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" + WARN_VERS_PART_NON_HISTORICAL chi "分区%`s包含非历史数据" eng "Partition %`s contains non-historical data" + spa "La partición %`s contiene datos no históricos" ER_VERS_ALTER_NOT_ALLOWED chi "系统版本为%`s.%`s不允许。更改@@system_versioning_alter_history用ALTER。" eng "Not allowed for system-versioned %`s.%`s. Change @@system_versioning_alter_history to proceed with ALTER." + spa "No permitido para versionado del sistema %`s.%`s. Cambie @@system_versioning_alter_history para proceder con ALTER." ER_VERS_ALTER_ENGINE_PROHIBITED chi "不允许系统版本为%`s.%`s。不支持更改返回/来自本机系统版本传输引擎。" eng "Not allowed for system-versioned %`s.%`s. Change to/from native system versioning engine is not supported." + spa "No permitido para versionado del sistema %`s.%`s. Cambio a/desde motor de versionado nativo no soportado." ER_VERS_RANGE_PROHIBITED chi "不允许使用SYSTEM_TIME范围选择器" eng "SYSTEM_TIME range selector is not allowed" + spa "Selector de rango SYSTEM_TIME no permitido" ER_CONFLICTING_FOR_SYSTEM_TIME chi "与递归的System_time子句相冲突" eng "Conflicting FOR SYSTEM_TIME clauses in WITH RECURSIVE" + spa "Cláusulas conflictivas FOR SYSTEM_TIME en WITH RECURSIVE" ER_VERS_TABLE_MUST_HAVE_COLUMNS chi "表%`s必须至少有一个版本后的列" eng "Table %`s must have at least one versioned column" + spa "La tabla %`s debe de tener al menos una columna versionada" ER_VERS_NOT_VERSIONED chi "表%`s不是系统版本的" eng "Table %`s is not system-versioned" + spa "La tabla %`s no es versionada del sistema" ER_MISSING chi "%`s的错误参数:缺少'%s'" eng "Wrong parameters for %`s: missing '%s'" + spa "Parámetros equivocados para %`s: falta '%s'" ER_VERS_PERIOD_COLUMNS chi "system_time的时期必须使用列%`s和%`s" eng "PERIOD FOR SYSTEM_TIME must use columns %`s and %`s" + spa "PERIOD FOR SYSTEM_TIME debe de usar columnas %`s y %`s" ER_PART_WRONG_VALUE chi "用于分区%`s的错误参数:'%s'的错误值" eng "Wrong parameters for partitioned %`s: wrong value for '%s'" + spa "Parámetros equivocados para particionado %`s: valor equivocado para '%s'" ER_VERS_WRONG_PARTS chi "%`s的错误分区:必须至少有一个HISTORY,只能有一个CURRENT" eng "Wrong partitions for %`s: must have at least one HISTORY and exactly one last CURRENT" + spa "Particiones equivocadas para %`s: debe de tener al menos una HISTORY y exactamente un último CURRENT" ER_VERS_NO_TRX_ID chi "TRX_ID%llu在`mysql.transaction_registry`中找不到" eng "TRX_ID %llu not found in `mysql.transaction_registry`" + spa "TRX_ID %llu no hallado en `mysql.transaction_registry`" ER_VERS_ALTER_SYSTEM_FIELD chi "无法更改系统版本配置字段%`s" eng "Can not change system versioning field %`s" + spa "No puedo cambiar campo de versionado de sistema %`s" ER_DROP_VERSIONING_SYSTEM_TIME_PARTITION chi "无法删除由SYSTEM_TIME分区的表%`s的系统版本" eng "Can not DROP SYSTEM VERSIONING for table %`s partitioned BY SYSTEM_TIME" + spa "No puedo DROP SYSTEM VERSIONING para la tabla %`s particionada BY SYSTEM_TIME" ER_VERS_DB_NOT_SUPPORTED chi "不支持%`s数据库中的系统版本化表" eng "System-versioned tables in the %`s database are not supported" + spa "No se soportan las tablas versionadas del sistema en la base de datos %`s" ER_VERS_TRT_IS_DISABLED chi "事务注册表已禁用" eng "Transaction registry is disabled" + spa "El registro de transaciones está desactivado" ER_VERS_DUPLICATE_ROW_START_END chi "重复行%s列%`s" eng "Duplicate ROW %s column %`s" + spa "Duplicada FILA %s columna %`s" ER_VERS_ALREADY_VERSIONED chi "表%`s已经是系统版本的" eng "Table %`s is already system-versioned" + spa "La tabla %`s ya es versionada del sistema" ER_UNUSED_24 chi "你永远不应该看到它" eng "You should never see it" + spa "Nunca debería vd de ver esto" ER_VERS_NOT_SUPPORTED chi "系统版本的表不支持%s" eng "System-versioned tables do not support %s" + spa "Las tablas versionadas del sistema no soportan %s" ER_VERS_TRX_PART_HISTORIC_ROW_NOT_SUPPORTED chi "事务 - 精确的系统 - 版本的表不支持按行开始或行末端分区" eng "Transaction-precise system-versioned tables do not support partitioning by ROW START or ROW END" + spa "Las tablas versionadas del sistemas de transacción precisa no soportan particionado mediante ROW START o ROW END" ER_INDEX_FILE_FULL chi "表'%-.192s'的索引文件已满" eng "The index file for table '%-.192s' is full" + spa "El fichero/archivo índice para la tabla '%-.192s' está lleno" ER_UPDATED_COLUMN_ONLY_ONCE chi "列%`s.%`s在单个更新语句中不能更换一次" eng "The column %`s.%`s cannot be changed more than once in a single UPDATE statement" + spa "La columna %`s.%`s no se pude cambiar más de ua vez en una sentencia UPDATE única" ER_EMPTY_ROW_IN_TVC chi "在此上下文中,表值构造函数不允许在没有元素的行" eng "Row with no elements is not allowed in table value constructor in this context" + spa "Fila sin elementos no se permite en constructor de valor de tabla en este contexto" ER_VERS_QUERY_IN_PARTITION chi "表%`s的SYSTEM_TIME分区不支持历史查询" eng "SYSTEM_TIME partitions in table %`s does not support historical query" + spa "Las particiones SYSTEM_TIME en la tabla %`s no soportan consulta (query) histórica" ER_KEY_DOESNT_SUPPORT chi "%s索引%`s不支持此操作" eng "%s index %`s does not support this operation" + spa "%s índice %`s no soporta esta operación" ER_ALTER_OPERATION_TABLE_OPTIONS_NEED_REBUILD chi "更改表选项需要将要重建的表格重建" eng "Changing table options requires the table to be rebuilt" + spa "Cambiar las opciones de tabla requiere que la tabla sea reconstruida" ER_BACKUP_LOCK_IS_ACTIVE chi "由于您在运行BACKUP STAGE,无法执行命令" eng "Can't execute the command as you have a BACKUP STAGE active" + spa "No puedo ejecutar el comando cuando vd tiene activo un BACKUP STAGE" ER_BACKUP_NOT_RUNNING chi "您必须启动备份“备份阶段开始”" eng "You must start backup with \"BACKUP STAGE START\"" + spa "Vd debe de arracar respaldo mediante \"BACKUP STAGE START\"" ER_BACKUP_WRONG_STAGE chi "备份阶段'%s'相同或在当前备份阶段'%s'之前" eng "Backup stage '%s' is same or before current backup stage '%s'" + spa "La fase de respaldo '%s' es la misma o anterior a la fase de respaldo actual '%s'" ER_BACKUP_STAGE_FAILED chi "备份阶段'%s'失败" eng "Backup stage '%s' failed" + spa "La fase de respaldo '%s' ha fallado" ER_BACKUP_UNKNOWN_STAGE chi "未知备份阶段:'%s'。阶段应该是START,FLUSH,BLOCK_DDL,BLOCK_COMIT或END之一" eng "Unknown backup stage: '%s'. Stage should be one of START, FLUSH, BLOCK_DDL, BLOCK_COMMIT or END" + spa "Fase de respaldo desconocida: '%s'. La fase debería de ser una de START, FLUSH, BLOCK_DDL, BLOCK_COMMIT o END" ER_USER_IS_BLOCKED chi "由于凭证错误太多,用户被阻止;用'FLUSH PRIVILEGES'解锁" eng "User is blocked because of too many credential errors; unblock with 'FLUSH PRIVILEGES'" + spa "El usuario está bloqueado a causa de demasiados errores de credenciales; desbloquee mediante 'FLUSH PRIVILEGES'" ER_ACCOUNT_HAS_BEEN_LOCKED chi "访问拒绝,此帐户已锁定" eng "Access denied, this account is locked" rum "Acces refuzat, acest cont este blocat" + spa "Acceso denegado, esta cuenta está bloqueada" ER_PERIOD_TEMPORARY_NOT_ALLOWED chi "应用程序时间段表不能临时" eng "Application-time period table cannot be temporary" + spa "Una tabla de período de momento-de-aplicación no puede ser temporal" ER_PERIOD_TYPES_MISMATCH chi "%`s的期间的字段有不同的类型" eng "Fields of PERIOD FOR %`s have different types" + spa "Los campos de PERIOD FOR %`s tienen tipos diferentes" ER_MORE_THAN_ONE_PERIOD chi "无法指定多个应用程序时间段" eng "Cannot specify more than one application-time period" + spa "No se puede especificar más de un período de momento de aplicación" ER_PERIOD_FIELD_WRONG_ATTRIBUTES chi "期间字段%`s不能是%s" eng "Period field %`s cannot be %s" + spa "El campo de período %`s no puede ser %s" ER_PERIOD_NOT_FOUND chi "期间%`s未在表中找到" eng "Period %`s is not found in table" + spa "El período %`s no se ha hallado en la tabla" ER_PERIOD_COLUMNS_UPDATED chi "列%`s在更新集列表中指定的周期%`s中使用" eng "Column %`s used in period %`s specified in update SET list" + spa "La columna %`s usada en período %`s especificado en lista de actualizar SET" ER_PERIOD_CONSTRAINT_DROP chi "无法DROP CONSTRAINT `%s`。使用DROP PERIOD `%s`" eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this" + spa "No puedo DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` para esto" ER_TOO_LONG_KEYPART 42000 S1009 chi "指定的索引部分太长;最大索引部分长度为 %u 个字节" eng "Specified key part was too long; max key part length is %u bytes" + spa "La parte de clave especificada es demasiado larga; el tamaño máximo de la parte de clave es de %u bytes" ER_TOO_LONG_DATABASE_COMMENT eng "Comment for database '%-.64s' is too long (max = %u)" + spa "El comentario para la base de datos '%-.64s' es demasiado largo (máx = %u)" ER_UNKNOWN_DATA_TYPE eng "Unknown data type: '%-.64s'" + spa "Tipo de datos desconocido: '%-.64s'" ER_UNKNOWN_OPERATOR eng "Operator does not exists: '%-.128s'" + spa "El operador no existe: '%-.128s'" ER_WARN_HISTORY_ROW_START_TIME eng "Table `%s.%s` history row start '%s' is later than row end '%s'" + spa "En la historia de la tabla `%s.%s` el inicio de fila '%s' es posterior al fin de fila '%s'" ER_PART_STARTS_BEYOND_INTERVAL eng "%`s: STARTS is later than query time, first history partition may exceed INTERVAL value" + spa "%`s: STARTS es posterior al momento de consulta (query), la primera partición de historia puede exceder el valor INTERVAL" ER_GALERA_REPLICATION_NOT_SUPPORTED - eng "DDL-statement is forbidden as table storage engine does not support Galera replication" + eng "Galera replication not supported" + spa "La replicación en Galera no está soportada" ER_LOAD_INFILE_CAPABILITY_DISABLED eng "The used command is not allowed because the MariaDB server or client has disabled the local infile capability" rum "Comanda folosită nu este permisă deoarece clientul sau serverul MariaDB a dezactivat această capabilitate" + spa "El comando usado no está permitido porque el servidor MariaDB o el cliente han desactivado la capacidad 'local infile'" ER_NO_SECURE_TRANSPORTS_CONFIGURED eng "No secure transports are configured, unable to set --require_secure_transport=ON" + spa "No se han configurado transportes seguros, imposible poner --require_secure_transport=ON" ER_SLAVE_IGNORED_SHARED_TABLE eng "Slave SQL thread ignored the '%s' because table is shared" ger "Slave-SQL-Thread hat die Abfrage '%s' ignoriert" nla "Slave SQL thread negeerde de query '%s'" por "Slave SQL thread ignorado a consulta devido '%s'" - spa "Slave SQL thread ignorado el query '%s'" + spa "Hilo (thread) SQL esclavo ignoró la '%s' porque la tabla está compartida" swe "Slav SQL tråden ignorerade '%s' pga tabellen är delad" ER_NO_AUTOINCREMENT_WITH_UNIQUE eng "AUTO_INCREMENT column %`s cannot be used in the UNIQUE index %`s" + spa "La columna %'s con AUTO_INCREMENT no se puede usar en índice UNIQUE %`s" ER_KEY_CONTAINS_PERIOD_FIELDS eng "Key %`s cannot explicitly include column %`s" + spa "La clave %`s no puede incluir de forma explícita la columna %`s" ER_KEY_CANT_HAVE_WITHOUT_OVERLAPS eng "Key %`s cannot have WITHOUT OVERLAPS" + spa "La clave %`s no puede tener WITHOUT OVERLAPS" ER_NOT_ALLOWED_IN_THIS_CONTEXT eng "'%-.128s' is not allowed in this context" + spa "'%-.128s' no está permitido en este contexto" ER_DATA_WAS_COMMITED_UNDER_ROLLBACK eng "Engine %s does not support rollback. Changes were committed during rollback call" + spa "El motor %s no soporta retroceso (rollback). Los cambios se acometieron (commit) durante la llamada a retroceso (rollback)" +ER_PK_INDEX_CANT_BE_IGNORED + eng "A primary key cannot be marked as IGNORE" + spa "Una clave primaria no se puede marcar como IGNORE" +ER_BINLOG_UNSAFE_SKIP_LOCKED + eng "SKIP LOCKED makes this statement unsafe" + spa "SKIP LOCKED hace que esta sentencia sea no segura" +ER_JSON_TABLE_ERROR_ON_FIELD + eng "Field '%s' can't be set for JSON_TABLE '%s'." + spa "El campo '%s' no se puede poner para JSON_TABLE '%s'." +ER_JSON_TABLE_ALIAS_REQUIRED + eng "Every table function must have an alias." + spa "Cada función de tabla debe de tener un alias." +ER_JSON_TABLE_SCALAR_EXPECTED + eng "Can't store an array or an object in the scalar column '%s' of JSON_TABLE '%s'." + spa "No puedo guardar un arreglo o un objeto en la columna escalar '%s' de JSON_TABLE '%s'." +ER_JSON_TABLE_MULTIPLE_MATCHES + eng "Can't store multiple matches of the path in the column '%s' of JSON_TABLE '%s'." + spa "No puedo guardar múltiples coincidencias de la ruta en la columna '%s' de JSON_TABLE '%s'." +ER_WITH_TIES_NEEDS_ORDER + eng "FETCH ... WITH TIES requires ORDER BY clause to be present" + spa "FETCH ... WITH TIES requiere que esté presente la cláusula ORDER BY" +ER_REMOVED_ORPHAN_TRIGGER + eng "Dropped orphan trigger '%-.64s', originally created for table: '%-.192s'" + spa "Eliminado disparador huérfano '%-.64s', creado originálmente para la tabla: '%-.192s'" +ER_STORAGE_ENGINE_DISABLED + eng "Storage engine %s is disabled" + spa "El motor de almacenaje %s está desactivado" diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 44e31125d49..4ca3c855066 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -25,7 +25,7 @@ #include "my_stacktrace.h" #include <source_revision.h> -#ifdef __WIN__ +#ifdef _WIN32 #include <crtdbg.h> #define SIGNAL_FMT "exception 0x%x" #else @@ -66,9 +66,9 @@ static inline void output_core_info() (int) len, buff); } #ifdef __FreeBSD__ - if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(0))) >= 0) + if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0) #else - if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(0))) >= 0) + if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0) #endif { my_safe_printf_stderr("Resource Limits:\n"); @@ -79,7 +79,8 @@ static inline void output_core_info() my_close(fd, MYF(0)); } #ifdef __linux__ - if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY, MYF(0))) >= 0) + if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY, + MYF(MY_NO_REGISTER))) >= 0) { len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); @@ -358,7 +359,7 @@ extern "C" sig_handler handle_fatal_signal(int sig) #endif end: -#ifndef __WIN__ +#ifndef _WIN32 /* Quit, without running destructors (etc.) Use a signal, because the parent (systemd) can check that with WIFSIGNALED diff --git a/sql/slave.cc b/sql/slave.cc index 715fa8cd69e..289441d9cab 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -171,7 +171,7 @@ static int safe_connect(THD* thd, MYSQL* mysql, Master_info* mi); static int safe_reconnect(THD*, MYSQL*, Master_info*, bool); static int connect_to_master(THD*, MYSQL*, Master_info*, bool, bool); static Log_event* next_event(rpl_group_info* rgi, ulonglong *event_size); -static int queue_event(Master_info* mi,const char* buf,ulong event_len); +static int queue_event(Master_info *mi,const uchar *buf, ulong event_len); static int terminate_slave_thread(THD *, mysql_mutex_t *, mysql_cond_t *, volatile uint *, bool); static bool check_io_slave_killed(Master_info *mi, const char *info); @@ -315,9 +315,11 @@ build_gtid_pos_create_query(THD *thd, String *query, LEX_CSTRING *engine_name) { bool err= false; - err|= query->append(gtid_pos_table_definition1); + err|= query->append(gtid_pos_table_definition1, + sizeof(gtid_pos_table_definition1)-1); err|= append_identifier(thd, query, table_name); - err|= query->append(gtid_pos_table_definition2); + err|= query->append(gtid_pos_table_definition2, + sizeof(gtid_pos_table_definition2)-1); err|= append_identifier(thd, query, engine_name); return err; } @@ -348,8 +350,7 @@ gtid_pos_table_creation(THD *thd, plugin_ref engine, LEX_CSTRING *table_name) err= parser_state.init(thd, thd->query(), thd->query_length()); if (err) goto end; - mysql_parse(thd, thd->query(), thd->query_length(), &parser_state, - FALSE, FALSE); + mysql_parse(thd, thd->query(), thd->query_length(), &parser_state); if (unlikely(thd->is_error())) err= 1; /* The warning is relevant to 10.3 and earlier. */ @@ -1767,7 +1768,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) { errmsg= err_buff2; snprintf(err_buff2, sizeof(err_buff2), - "Master reported unrecognized MySQL version: %s", + "Master reported unrecognized MariaDB version: %s", mysql->server_version); err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER_DEFAULT(err_code), err_buff2); @@ -1783,7 +1784,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) case 2: errmsg= err_buff2; snprintf(err_buff2, sizeof(err_buff2), - "Master reported unrecognized MySQL version: %s", + "Master reported unrecognized MariaDB version: %s", mysql->server_version); err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER_DEFAULT(err_code), err_buff2); @@ -1956,7 +1957,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) !mi->rli.replicate_same_server_id) { errmsg= "The slave I/O thread stops because master and slave have equal \ -MySQL server ids; these ids must be different for replication to work (or \ +MariaDB server ids; these ids must be different for replication to work (or \ the --replicate-same-server-id option must be used on slave but this does \ not always make sense; please check the manual before using it)."; err_code= ER_SLAVE_FATAL_ERROR; @@ -2029,7 +2030,8 @@ maybe it is a *VERY OLD MASTER*."); (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { - if (strcmp(master_row[0], global_system_variables.collation_server->name)) + if (strcmp(master_row[0], + global_system_variables.collation_server->coll_name.str)) { errmsg= "The slave I/O thread stops because master and slave have \ different values for the COLLATION_SERVER global variable. The values must \ @@ -2116,7 +2118,7 @@ be equal for the Statement-format replication to work"; /* We use ERROR_LEVEL to get the error logged to file */ mi->report(ERROR_LEVEL, err_code, NULL, - "MySQL master doesn't have a TIME_ZONE variable. Note that" + "MariaDB master doesn't have a TIME_ZONE variable. Note that" "if your timezone is not same between master and slave, your " "slave may get wrong data into timestamp columns"); } @@ -2541,15 +2543,20 @@ after_set_capability: char quote_buf[2*sizeof(mi->master_log_name)+1]; char str_buf[28+2*sizeof(mi->master_log_name)+10]; String query(str_buf, sizeof(str_buf), system_charset_info); + size_t quote_length; + my_bool overflow; query.length(0); - query.append("SELECT binlog_gtid_pos('"); - escape_quotes_for_mysql(&my_charset_bin, quote_buf, sizeof(quote_buf), - mi->master_log_name, strlen(mi->master_log_name)); - query.append(quote_buf); - query.append("',"); + query.append(STRING_WITH_LEN("SELECT binlog_gtid_pos('")); + quote_length= escape_quotes_for_mysql(&my_charset_bin, quote_buf, + sizeof(quote_buf), + mi->master_log_name, + strlen(mi->master_log_name), + &overflow); + query.append(quote_buf, quote_length); + query.append(STRING_WITH_LEN("',")); query.append_ulonglong(mi->master_log_pos); - query.append(")"); + query.append(')'); if (!mysql_real_query(mysql, query.c_ptr_safe(), query.length()) && (master_res= mysql_store_result(mysql)) && @@ -3115,7 +3122,20 @@ void show_master_info_get_fields(THD *thd, List<Item> *field_list, } /* Text for Slave_IO_Running */ -static const char *slave_running[]= { "No", "Connecting", "Preparing", "Yes" }; +static const LEX_CSTRING slave_running[]= +{ + { STRING_WITH_LEN("No") }, + { STRING_WITH_LEN("Connecting") }, + { STRING_WITH_LEN("Preparing") }, + { STRING_WITH_LEN("Yes") } +}; + +static const LEX_CSTRING msg_yes= { STRING_WITH_LEN("Yes") }; +static const LEX_CSTRING msg_no= { STRING_WITH_LEN("No") }; +#ifndef HAVE_OPENSSL +static const LEX_CSTRING msg_ignored= { STRING_WITH_LEN("Ignored") }; +#endif + static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, String *gtid_pos) @@ -3129,6 +3149,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, Protocol *protocol= thd->protocol; Rpl_filter *rpl_filter= mi->rpl_filter; StringBuffer<256> tmp; + const char *msg; protocol->prepare_for_resend(); @@ -3146,11 +3167,13 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, Show what the sql driver replication thread is doing This is only meaningful if there is only one slave thread. */ - protocol->store(mi->rli.sql_driver_thd ? - mi->rli.sql_driver_thd->get_proc_info() : "", - &my_charset_bin); + msg= (mi->rli.sql_driver_thd ? + mi->rli.sql_driver_thd->get_proc_info() : ""); + protocol->store_string_or_null(msg, &my_charset_bin); } - protocol->store(mi->io_thd ? mi->io_thd->get_proc_info() : "", &my_charset_bin); + msg= mi->io_thd ? mi->io_thd->get_proc_info() : ""; + protocol->store_string_or_null(msg, &my_charset_bin); + mysql_mutex_unlock(&mi->run_lock); mysql_mutex_lock(&mi->data_lock); @@ -3159,19 +3182,22 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, mysql_mutex_lock(&mi->err_lock); /* err_lock is to protect mi->rli.last_error() */ mysql_mutex_lock(&mi->rli.err_lock); - protocol->store(mi->host, &my_charset_bin); - protocol->store(mi->user, &my_charset_bin); + protocol->store_string_or_null(mi->host, &my_charset_bin); + protocol->store_string_or_null(mi->user, &my_charset_bin); protocol->store((uint32) mi->port); protocol->store((uint32) mi->connect_retry); - protocol->store(mi->master_log_name, &my_charset_bin); - protocol->store((ulonglong) mi->master_log_pos); - protocol->store(mi->rli.group_relay_log_name + - dirname_length(mi->rli.group_relay_log_name), + protocol->store(mi->master_log_name, strlen(mi->master_log_name), &my_charset_bin); + protocol->store((ulonglong) mi->master_log_pos); + msg= (mi->rli.group_relay_log_name + + dirname_length(mi->rli.group_relay_log_name)); + protocol->store(msg, strlen(msg), &my_charset_bin); protocol->store((ulonglong) mi->rli.group_relay_log_pos); - protocol->store(mi->rli.group_master_log_name, &my_charset_bin); - protocol->store(slave_running[mi->slave_running], &my_charset_bin); - protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin); + protocol->store(mi->rli.group_master_log_name, + strlen(mi->rli.group_master_log_name), + &my_charset_bin); + protocol->store(&slave_running[mi->slave_running], &my_charset_bin); + protocol->store(mi->rli.slave_running ? &msg_yes : &msg_no, &my_charset_bin); protocol->store(rpl_filter->get_do_db()); protocol->store(rpl_filter->get_ignore_db()); @@ -3185,29 +3211,30 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, protocol->store(&tmp); protocol->store(mi->rli.last_error().number); - protocol->store(mi->rli.last_error().message, &my_charset_bin); + protocol->store_string_or_null(mi->rli.last_error().message, + &my_charset_bin); protocol->store((uint32) mi->rli.slave_skip_counter); protocol->store((ulonglong) mi->rli.group_master_log_pos); protocol->store((ulonglong) mi->rli.log_space_total); - protocol->store( - mi->rli.until_condition==Relay_log_info::UNTIL_NONE ? "None": - ( mi->rli.until_condition==Relay_log_info::UNTIL_MASTER_POS? "Master": - ( mi->rli.until_condition==Relay_log_info::UNTIL_RELAY_POS? "Relay": - "Gtid")), &my_charset_bin); - protocol->store(mi->rli.until_log_name, &my_charset_bin); + msg= (mi->rli.until_condition==Relay_log_info::UNTIL_NONE ? "None" : + (mi->rli.until_condition==Relay_log_info::UNTIL_MASTER_POS? "Master": + (mi->rli.until_condition==Relay_log_info::UNTIL_RELAY_POS? "Relay": + "Gtid"))); + protocol->store(msg, strlen(msg), &my_charset_bin); + protocol->store_string_or_null(mi->rli.until_log_name, &my_charset_bin); protocol->store((ulonglong) mi->rli.until_log_pos); #ifdef HAVE_OPENSSL - protocol->store(mi->ssl? "Yes":"No", &my_charset_bin); + protocol->store(mi->ssl ? &msg_yes : &msg_no, &my_charset_bin); #else - protocol->store(mi->ssl? "Ignored":"No", &my_charset_bin); + protocol->store(mi->ssl ? &msg_ignored: &msg_no, &my_charset_bin); #endif - protocol->store(mi->ssl_ca, &my_charset_bin); - protocol->store(mi->ssl_capath, &my_charset_bin); - protocol->store(mi->ssl_cert, &my_charset_bin); - protocol->store(mi->ssl_cipher, &my_charset_bin); - protocol->store(mi->ssl_key, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_ca, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_capath, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_cert, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_cipher, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_key, &my_charset_bin); /* Seconds_Behind_Master: if SQL thread is running and I/O thread is @@ -3262,27 +3289,30 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, { protocol->store_null(); } - protocol->store(mi->ssl_verify_server_cert? "Yes":"No", &my_charset_bin); + protocol->store(mi->ssl_verify_server_cert? &msg_yes : &msg_no, + &my_charset_bin); // Last_IO_Errno protocol->store(mi->last_error().number); // Last_IO_Error - protocol->store(mi->last_error().message, &my_charset_bin); + protocol->store_string_or_null(mi->last_error().message, &my_charset_bin); // Last_SQL_Errno protocol->store(mi->rli.last_error().number); // Last_SQL_Error - protocol->store(mi->rli.last_error().message, &my_charset_bin); + protocol->store_string_or_null(mi->rli.last_error().message, + &my_charset_bin); // Replicate_Ignore_Server_Ids prot_store_ids(thd, &mi->ignore_server_ids); // Master_Server_id protocol->store((uint32) mi->master_id); // SQL_Delay // Master_Ssl_Crl - protocol->store(mi->ssl_crl, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_crl, &my_charset_bin); // Master_Ssl_Crlpath - protocol->store(mi->ssl_crlpath, &my_charset_bin); + protocol->store_string_or_null(mi->ssl_crlpath, &my_charset_bin); // Using_Gtid - protocol->store(mi->using_gtid_astext(mi->using_gtid), &my_charset_bin); + protocol->store_string_or_null(mi->using_gtid_astext(mi->using_gtid), + &my_charset_bin); // Gtid_IO_Pos { mi->gtid_current_pos.to_string(&tmp); @@ -3313,7 +3343,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, else protocol->store_null(); // Slave_SQL_Running_State - protocol->store(slave_sql_running_state, &my_charset_bin); + protocol->store_string_or_null(slave_sql_running_state, &my_charset_bin); protocol->store(mi->total_ddl_groups); protocol->store(mi->total_non_trans_groups); @@ -4302,6 +4332,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, DBUG_RETURN(1); } + rli->last_seen_gtid= serial_rgi->current_gtid; + rli->last_trans_retry_count= serial_rgi->trans_retries; if (opt_gtid_ignore_duplicates && rli->mi->using_gtid != Master_info::USE_GTID_NO) { @@ -4454,7 +4486,7 @@ Could not parse relay log event entry. The possible reasons are: the master's \ binary log is corrupted (you can check this by running 'mysqlbinlog' on the \ binary log), the slave's relay log is corrupted (you can check this by running \ 'mysqlbinlog' on the relay log), a network problem, or a bug in the master's \ -or slave's MySQL code. If you want to check the master's binary log or slave's \ +or slave's MariaDB code. If you want to check the master's binary log or slave's \ relay log, you will be able to know their names by issuing 'SHOW SLAVE STATUS' \ on this slave.\ "); @@ -4777,6 +4809,8 @@ connected: thd->set_command(COM_SLAVE_IO); while (!io_slave_killed(mi)) { + const uchar *event_buf; + THD_STAGE_INFO(thd, stage_requesting_binlog_dump); if (request_dump(thd, mysql, mi, &suppress_warnings)) { @@ -4788,8 +4822,6 @@ connected: goto connected; } - const char *event_buf; - mi->slave_running= MYSQL_SLAVE_RUN_READING; DBUG_ASSERT(mi->last_error().number == 0); ulonglong lastchecktime = my_hrtime().val; @@ -4841,10 +4873,11 @@ Stopping slave I/O thread due to out-of-memory error from master"); retry_count=0; // ok event, reset retry counter THD_STAGE_INFO(thd, stage_queueing_master_event_to_the_relay_log); - event_buf= (const char*)mysql->net.read_pos + 1; + event_buf= mysql->net.read_pos + 1; mi->semi_ack= 0; if (repl_semisync_slave. - slave_read_sync_header((const char*)mysql->net.read_pos + 1, event_len, + slave_read_sync_header((const uchar*) mysql->net.read_pos + 1, + event_len, &(mi->semi_ack), &event_buf, &event_len)) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, @@ -4966,20 +4999,17 @@ log space"); err: // print the current replication position if (mi->using_gtid == Master_info::USE_GTID_NO) - { sql_print_information("Slave I/O thread exiting, read up to log '%s', " - "position %llu", IO_RPL_LOG_NAME, mi->master_log_pos); - sql_print_information("master was %s:%d", mi->host, mi->port); - } + "position %llu, master %s:%d", IO_RPL_LOG_NAME, mi->master_log_pos, + mi->host, mi->port); else { StringBuffer<100> tmp; mi->gtid_current_pos.to_string(&tmp); sql_print_information("Slave I/O thread exiting, read up to log '%s', " - "position %llu; GTID position %s", + "position %llu; GTID position %s, master %s:%d", IO_RPL_LOG_NAME, mi->master_log_pos, - tmp.c_ptr_safe()); - sql_print_information("master was %s:%d", mi->host, mi->port); + tmp.c_ptr_safe(), mi->host, mi->port); } repl_semisync_slave.slave_stop(mi); thd->reset_query(); @@ -5021,6 +5051,7 @@ err_during_init: mi->abort_slave= 0; mi->slave_running= MYSQL_SLAVE_NOT_RUN; mi->io_thd= 0; + mi->do_accept_own_server_id= false; /* Note: the order of the two following calls (first broadcast, then unlock) is important. Otherwise a killer_thread can execute between the calls and @@ -5325,6 +5356,7 @@ pthread_handler_t handle_slave_sql(void *arg) serial_rgi->gtid_sub_id= 0; serial_rgi->gtid_pending= false; + rli->last_seen_gtid= serial_rgi->current_gtid; if (mi->using_gtid != Master_info::USE_GTID_NO && mi->using_parallel() && rli->restart_gtid_pos.count() > 0) { @@ -5582,9 +5614,9 @@ pthread_handler_t handle_slave_sql(void *arg) tmp.append(STRING_WITH_LEN("'")); } sql_print_information("Slave SQL thread exiting, replication stopped in " - "log '%s' at position %llu%s", RPL_LOG_NAME, - rli->group_master_log_pos, tmp.c_ptr_safe()); - sql_print_information("master was %s:%d", mi->host, mi->port); + "log '%s' at position %llu%s, master: %s:%d", RPL_LOG_NAME, + rli->group_master_log_pos, tmp.c_ptr_safe(), + mi->host, mi->port); } #ifdef WITH_WSREP wsrep_after_command_before_result(thd); @@ -5927,13 +5959,13 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev) Reads a 3.23 event and converts it to the slave's format. This code was copied from MySQL 4.0. */ -static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, - ulong event_len) +static int queue_binlog_ver_1_event(Master_info *mi, const uchar *buf, + ulong event_len) { const char *errmsg = 0; ulong inc_pos; bool ignore_event= 0; - char *tmp_buf = 0; + uchar *tmp_buf = 0; Relay_log_info *rli= &mi->rli; DBUG_ENTER("queue_binlog_ver_1_event"); @@ -5943,8 +5975,8 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, */ if ((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) { - if (unlikely(!(tmp_buf=(char*)my_malloc(key_memory_binlog_ver_1_event, - event_len+1,MYF(MY_WME))))) + if (unlikely(!(tmp_buf= (uchar*) my_malloc(key_memory_binlog_ver_1_event, + event_len+1, MYF(MY_WME))))) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed"); @@ -5960,7 +5992,7 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, */ tmp_buf[event_len++]=0; int4store(tmp_buf+EVENT_LEN_OFFSET, event_len); - buf = (const char*)tmp_buf; + buf= tmp_buf; } /* This will transform LOAD_EVENT into CREATE_FILE_EVENT, ask the master to @@ -6047,8 +6079,8 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, Reads a 4.0 event and converts it to the slave's format. This code was copied from queue_binlog_ver_1_event(), with some affordable simplifications. */ -static int queue_binlog_ver_3_event(Master_info *mi, const char *buf, - ulong event_len) +static int queue_binlog_ver_3_event(Master_info *mi, const uchar *buf, + ulong event_len) { const char *errmsg = 0; ulong inc_pos; @@ -6058,7 +6090,7 @@ static int queue_binlog_ver_3_event(Master_info *mi, const char *buf, /* read_log_event() will adjust log_pos to be end_log_pos */ Log_event *ev= - Log_event::read_log_event(buf,event_len, &errmsg, + Log_event::read_log_event(buf, event_len, &errmsg, mi->rli.relay_log.description_event_for_queue, 0); if (unlikely(!ev)) { @@ -6113,13 +6145,11 @@ err: setup with 3.23 master or 4.0 master */ -static int queue_old_event(Master_info *mi, const char *buf, - ulong event_len) +static int queue_old_event(Master_info *mi, const uchar *buf, ulong event_len) { DBUG_ENTER("queue_old_event"); - switch (mi->rli.relay_log.description_event_for_queue->binlog_version) - { + switch (mi->rli.relay_log.description_event_for_queue->binlog_version) { case 1: DBUG_RETURN(queue_binlog_ver_1_event(mi,buf,event_len)); case 3: @@ -6141,7 +6171,7 @@ static int queue_old_event(Master_info *mi, const char *buf, any >=5.0.0 format. */ -static int queue_event(Master_info* mi,const char* buf, ulong event_len) +static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) { int error= 0; StringBuffer<1024> error_msg; @@ -6156,8 +6186,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) rpl_gtid event_gtid; static uint dbug_rows_event_count __attribute__((unused))= 0; bool is_compress_event = false; - char* new_buf = NULL; - char new_buf_arr[4096]; + uchar *new_buf = NULL; + uchar new_buf_arr[4096]; bool is_malloc = false; bool is_rows_event= false; /* @@ -6170,8 +6200,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) mi->checksum_alg_before_fd != BINLOG_CHECKSUM_ALG_UNDEF ? mi->checksum_alg_before_fd : mi->rli.relay_log.relay_log_checksum_alg; - char *save_buf= NULL; // needed for checksumming the fake Rotate event - char rot_buf[LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN + FN_REFLEN]; + const uchar *save_buf= NULL; // needed for checksumming the fake Rotate event + uchar rot_buf[LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN + FN_REFLEN]; DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_OFF || checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF || @@ -6205,9 +6235,9 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) // Emulate the network corruption DBUG_EXECUTE_IF("corrupt_queue_event", - if ((uchar)buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT) + if (buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT) { - char *debug_event_buf_c = (char*) buf; + uchar *debug_event_buf_c= const_cast<uchar*>(buf); int debug_cor_pos = rand() % (event_len - BINLOG_CHECKSUM_LEN); debug_event_buf_c[debug_cor_pos] =~ debug_event_buf_c[debug_cor_pos]; DBUG_PRINT("info", ("Corrupt the event at queue_event: byte on position %d", debug_cor_pos)); @@ -6215,15 +6245,16 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) } ); - if (event_checksum_test((uchar *) buf, event_len, checksum_alg)) + if (event_checksum_test((uchar*) buf, event_len, checksum_alg)) { error= ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE; unlock_data_lock= FALSE; goto err; } + DBUG_ASSERT(((uchar) buf[FLAGS_OFFSET] & LOG_EVENT_ACCEPT_OWN_F) == 0); if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 && - (uchar)buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */) + buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */) DBUG_RETURN(queue_old_event(mi,buf,event_len)); #ifdef ENABLED_DEBUG_SYNC @@ -6247,9 +6278,11 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) dbug_rows_event_count = 0; };); #endif + s_id= uint4korr(buf + SERVER_ID_OFFSET); + mysql_mutex_lock(&mi->data_lock); - switch ((uchar)buf[EVENT_TYPE_OFFSET]) { + switch (buf[EVENT_TYPE_OFFSET]) { case STOP_EVENT: /* We needn't write this event to the relay log. Indeed, it just indicates a @@ -6384,7 +6417,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) mi->rli.relay_log.relay_log_checksum_alg); /* the first one */ DBUG_ASSERT(mi->checksum_alg_before_fd != BINLOG_CHECKSUM_ALG_UNDEF); - save_buf= (char *) buf; + save_buf= buf; buf= rot_buf; } else @@ -6404,7 +6437,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) mi->rli.relay_log.relay_log_checksum_alg); /* the first one */ DBUG_ASSERT(mi->checksum_alg_before_fd != BINLOG_CHECKSUM_ALG_UNDEF); - save_buf= (char *) buf; + save_buf= buf; buf= rot_buf; } /* @@ -6489,7 +6522,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) error= ER_SLAVE_HEARTBEAT_FAILURE; error_msg.append(STRING_WITH_LEN("inconsistent heartbeat event content;")); error_msg.append(STRING_WITH_LEN("the event's data: log_file_name ")); - error_msg.append(hb.get_log_ident(), (uint) hb.get_ident_len()); + error_msg.append((char*) hb.get_log_ident(), (uint) hb.get_ident_len()); error_msg.append(STRING_WITH_LEN(" log_pos ")); error_msg.append_ulonglong(hb.log_pos); goto err; @@ -6515,7 +6548,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) error= ER_SLAVE_HEARTBEAT_FAILURE; error_msg.append(STRING_WITH_LEN("heartbeat is not compatible with local info;")); error_msg.append(STRING_WITH_LEN("the event's data: log_file_name ")); - error_msg.append(hb.get_log_ident(), (uint) hb.get_ident_len()); + error_msg.append((char*) hb.get_log_ident(), (uint) hb.get_ident_len()); error_msg.append(STRING_WITH_LEN(" log_pos ")); error_msg.append_ulonglong(hb.log_pos); goto err; @@ -6746,6 +6779,19 @@ dbug_gtid_accept: ++mi->events_queued_since_last_gtid; inc_pos= event_len; + + /* + To compute `true` is normal for this *now* semisync slave server when + it has passed its crash-recovery as a former master. + */ + mi->do_accept_own_server_id= + (s_id == global_system_variables.server_id && + rpl_semi_sync_slave_enabled && opt_gtid_strict_mode && + mi->using_gtid != Master_info::USE_GTID_NO && + !mysql_bin_log.check_strict_gtid_sequence(event_gtid.domain_id, + event_gtid.server_id, + event_gtid.seq_no, + true)); // ...} eof else_likely } break; @@ -6757,7 +6803,7 @@ dbug_gtid_accept: if (query_event_uncompress(rli->relay_log.description_event_for_queue, checksum_alg == BINLOG_CHECKSUM_ALG_CRC32, buf, event_len, new_buf_arr, sizeof(new_buf_arr), - &is_malloc, (char **)&new_buf, &event_len)) + &is_malloc, &new_buf, &event_len)) { char llbuf[22]; error = ER_BINLOG_UNCOMPRESS_ERROR; @@ -6780,8 +6826,9 @@ dbug_gtid_accept: { if (row_log_event_uncompress(rli->relay_log.description_event_for_queue, checksum_alg == BINLOG_CHECKSUM_ALG_CRC32, - buf, event_len, new_buf_arr, sizeof(new_buf_arr), - &is_malloc, (char **)&new_buf, &event_len)) + buf, event_len, new_buf_arr, + sizeof(new_buf_arr), + &is_malloc, &new_buf, &event_len)) { char llbuf[22]; error = ER_BINLOG_UNCOMPRESS_ERROR; @@ -6868,7 +6915,8 @@ dbug_gtid_accept: { if ((uchar)buf[EVENT_TYPE_OFFSET] == XID_EVENT || ((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT && /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */ - Query_log_event::peek_is_commit_rollback(buf, event_len, + Query_log_event::peek_is_commit_rollback(buf, + event_len, checksum_alg))) { error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE; @@ -6971,7 +7019,6 @@ dbug_gtid_accept: */ mysql_mutex_lock(log_lock); - s_id= uint4korr(buf + SERVER_ID_OFFSET); /* Write the event to the relay log, unless we reconnected in the middle of an event group and now need to skip the initial part of the group that @@ -7016,7 +7063,8 @@ dbug_gtid_accept: } else if ((s_id == global_system_variables.server_id && - !mi->rli.replicate_same_server_id) || + !(mi->rli.replicate_same_server_id || + mi->do_accept_own_server_id)) || event_that_should_be_ignored(buf) || /* the following conjunction deals with IGNORE_SERVER_IDS, if set @@ -7076,6 +7124,19 @@ dbug_gtid_accept: } else { + if (mi->do_accept_own_server_id) + { + int2store(const_cast<uchar*>(buf + FLAGS_OFFSET), + uint2korr(buf + FLAGS_OFFSET) | LOG_EVENT_ACCEPT_OWN_F); + if (checksum_alg != BINLOG_CHECKSUM_ALG_OFF) + { + ha_checksum crc= 0; + + crc= my_checksum(crc, (const uchar *) buf, + event_len - BINLOG_CHECKSUM_LEN); + int4store(&buf[event_len - BINLOG_CHECKSUM_LEN], crc); + } + } if (likely(!rli->relay_log.write_event_buffer((uchar*)buf, event_len))) { mi->master_log_pos+= inc_pos; @@ -7295,16 +7356,16 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, charset, then set client charset to 'latin1' (default client charset). */ if (is_supported_parser_charset(default_charset_info)) - mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); + mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->cs_name.str); else { sql_print_information("'%s' can not be used as client character set. " "'%s' will be used as default client character set " "while connecting to master.", - default_charset_info->csname, - default_client_charset_info->csname); + default_charset_info->cs_name.str, + default_client_charset_info->cs_name.str); mysql_options(mysql, MYSQL_SET_CHARSET_NAME, - default_client_charset_info->csname); + default_client_charset_info->cs_name.str); } /* This one is not strictly needed but we have it here for completeness */ @@ -7451,7 +7512,8 @@ MYSQL *rpl_connect_master(MYSQL *mysql) } #endif - mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); + mysql_options(mysql, MYSQL_SET_CHARSET_NAME, + default_charset_info->cs_name.str); /* This one is not strictly needed but we have it here for completeness */ mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir); @@ -8161,19 +8223,19 @@ bool rpl_master_erroneous_autoinc(THD *thd) } -static bool get_row_event_stmt_end(const char* buf, +static bool get_row_event_stmt_end(const uchar *buf, const Format_description_log_event *fdle) { uint8 const common_header_len= fdle->common_header_len; Log_event_type event_type= (Log_event_type)(uchar)buf[EVENT_TYPE_OFFSET]; uint8 const post_header_len= fdle->post_header_len[event_type-1]; - const char *flag_start= buf + common_header_len; + const uchar *flag_start= buf + common_header_len; /* The term 4 below signifies that master is of 'an intermediate source', see Rows_log_event::Rows_log_event. */ - flag_start += RW_MAPID_OFFSET + ((post_header_len == 6) ? 4 : RW_FLAGS_OFFSET); + flag_start += RW_MAPID_OFFSET + ((post_header_len == 6) ? 4 : RW_FLAGS_OFFSET); return (uint2korr(flag_start) & Rows_log_event::STMT_END_F) != 0; } @@ -8198,8 +8260,8 @@ void Rows_event_tracker::reset() well as the end-of-statement status of the last one. */ -void Rows_event_tracker::update(const char* file_name, my_off_t pos, - const char* buf, +void Rows_event_tracker::update(const char *file_name, my_off_t pos, + const uchar *buf, const Format_description_log_event *fdle) { DBUG_ENTER("Rows_event_tracker::update"); diff --git a/sql/sp.cc b/sql/sp.cc index cbfab0b8ee4..74743347816 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2002, 2018, Oracle and/or its affiliates. - Copyright (c) 2009, 2020, MariaDB + Copyright (c) 2009, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -122,12 +122,12 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] = { { STRING_WITH_LEN("db") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("name") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("type") }, @@ -137,7 +137,7 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] = { { STRING_WITH_LEN("specific_name") }, { STRING_WITH_LEN("char(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("language") }, @@ -177,8 +177,8 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] = }, { { STRING_WITH_LEN("definer") }, - { STRING_WITH_LEN("char(") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("varchar(") }, + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("created") }, @@ -208,22 +208,22 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] = { { STRING_WITH_LEN("comment") }, { STRING_WITH_LEN("text") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("character_set_client") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("collation_connection") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("db_collation") }, { STRING_WITH_LEN("char(32)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("body_utf8") }, @@ -285,12 +285,14 @@ private: Stored_routine_creation_ctx implementation. **************************************************************************/ -bool load_charset(MEM_ROOT *mem_root, +bool load_charset(THD *thd, + MEM_ROOT *mem_root, Field *field, CHARSET_INFO *dflt_cs, CHARSET_INFO **cs) { LEX_CSTRING cs_name; + myf utf8_flag= thd->get_utf8_flag(); if (field->val_str_nopad(mem_root, &cs_name)) { @@ -299,7 +301,7 @@ bool load_charset(MEM_ROOT *mem_root, } DBUG_ASSERT(cs_name.str[cs_name.length] == 0); - *cs= get_charset_by_csname(cs_name.str, MY_CS_PRIMARY, MYF(0)); + *cs= get_charset_by_csname(cs_name.str, MY_CS_PRIMARY, MYF(utf8_flag)); if (*cs == NULL) { @@ -312,7 +314,7 @@ bool load_charset(MEM_ROOT *mem_root, /*************************************************************************/ -bool load_collation(MEM_ROOT *mem_root, +bool load_collation(THD *thd, MEM_ROOT *mem_root, Field *field, CHARSET_INFO *dflt_cl, CHARSET_INFO **cl) @@ -324,9 +326,10 @@ bool load_collation(MEM_ROOT *mem_root, *cl= dflt_cl; return TRUE; } + myf utf8_flag= thd->get_utf8_flag(); DBUG_ASSERT(cl_name.str[cl_name.length] == 0); - *cl= get_charset_by_name(cl_name.str, MYF(0)); + *cl= get_charset_by_name(cl_name.str, MYF(utf8_flag)); if (*cl == NULL) { @@ -355,7 +358,7 @@ Stored_routine_creation_ctx::load_from_db(THD *thd, bool invalid_creation_ctx= FALSE; - if (load_charset(thd->mem_root, + if (load_charset(thd, thd->mem_root, proc_tbl->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT], thd->variables.character_set_client, &client_cs)) @@ -368,7 +371,7 @@ Stored_routine_creation_ctx::load_from_db(THD *thd, invalid_creation_ctx= TRUE; } - if (load_collation(thd->mem_root, + if (load_collation(thd,thd->mem_root, proc_tbl->field[MYSQL_PROC_FIELD_COLLATION_CONNECTION], thd->variables.collation_connection, &connection_cl)) @@ -381,7 +384,7 @@ Stored_routine_creation_ctx::load_from_db(THD *thd, invalid_creation_ctx= TRUE; } - if (load_collation(thd->mem_root, + if (load_collation(thd,thd->mem_root, proc_tbl->field[MYSQL_PROC_FIELD_DB_COLLATION], NULL, &db_cl)) @@ -957,6 +960,7 @@ Sp_handler::db_load_routine(THD *thd, const Database_qualified_name *name, newlex.current_select= NULL; defstr.set_charset(creation_ctx->get_client_cs()); + defstr.set_thread_specific(); /* We have to add DEFINER clause and provide proper routine characterstics in @@ -1073,11 +1077,11 @@ sp_returns_type(THD *thd, String &result, const sp_head *sp) if (field->has_charset()) { result.append(STRING_WITH_LEN(" CHARSET ")); - result.append(field->charset()->csname); + result.append(field->charset()->cs_name); if (Charset(field->charset()).can_have_collate_clause()) { result.append(STRING_WITH_LEN(" COLLATE ")); - result.append(field->charset()->name); + result.append(field->charset()->coll_name); } } @@ -1201,10 +1205,9 @@ Sp_handler::sp_create_routine(THD *thd, const sp_head *sp) const TABLE *table; char definer_buf[USER_HOST_BUFF_SIZE]; LEX_CSTRING definer; - sql_mode_t saved_mode= thd->variables.sql_mode; - + sql_mode_t org_sql_mode= thd->variables.sql_mode; + enum_check_fields org_count_cuted_fields= thd->count_cuted_fields; CHARSET_INFO *db_cs= get_default_db_collation(thd, sp->m_db.str); - bool store_failed= FALSE; DBUG_ENTER("sp_create_routine"); DBUG_PRINT("enter", ("type: %s name: %.*s", @@ -1237,8 +1240,7 @@ Sp_handler::sp_create_routine(THD *thd, const sp_head *sp) const /* Reset sql_mode during data dictionary operations. */ thd->variables.sql_mode= 0; - - Check_level_instant_set check_level_save(thd, CHECK_FIELD_WARN); + thd->count_cuted_fields= CHECK_FIELD_WARN; if (!(table= open_proc_table_for_update(thd))) { @@ -1386,7 +1388,7 @@ Sp_handler::sp_create_routine(THD *thd, const sp_head *sp) const store_failed= store_failed || table->field[MYSQL_PROC_FIELD_SQL_MODE]-> - store((longlong)saved_mode, TRUE); + store((longlong) org_sql_mode, TRUE); if (sp->comment().str) { @@ -1423,22 +1425,19 @@ Sp_handler::sp_create_routine(THD *thd, const sp_head *sp) const table->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT]->set_notnull(); store_failed= store_failed || - table->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT]->store( - thd->charset()->csname, - strlen(thd->charset()->csname), - system_charset_info); + table->field[MYSQL_PROC_FIELD_CHARACTER_SET_CLIENT]-> + store(&thd->charset()->cs_name, system_charset_info); table->field[MYSQL_PROC_FIELD_COLLATION_CONNECTION]->set_notnull(); store_failed= store_failed || - table->field[MYSQL_PROC_FIELD_COLLATION_CONNECTION]->store( - thd->variables.collation_connection->name, - strlen(thd->variables.collation_connection->name), - system_charset_info); + table->field[MYSQL_PROC_FIELD_COLLATION_CONNECTION]-> + store(&thd->variables.collation_connection->coll_name, + system_charset_info); table->field[MYSQL_PROC_FIELD_DB_COLLATION]->set_notnull(); store_failed= store_failed || - table->field[MYSQL_PROC_FIELD_DB_COLLATION]->store( - db_cs->name, strlen(db_cs->name), system_charset_info); + table->field[MYSQL_PROC_FIELD_DB_COLLATION]-> + store(&db_cs->coll_name, system_charset_info); table->field[MYSQL_PROC_FIELD_BODY_UTF8]->set_notnull(); store_failed= store_failed || @@ -1477,13 +1476,13 @@ log: sp->chistics(), thd->lex->definer[0], thd->lex->create_info, - saved_mode)) + org_sql_mode)) { my_error(ER_OUT_OF_RESOURCES, MYF(0)); goto done; } /* restore sql_mode when binloging */ - thd->variables.sql_mode= saved_mode; + thd->variables.sql_mode= org_sql_mode; /* Such a statement can always go directly to binlog, no trans cache */ if (thd->binlog_query(THD::STMT_QUERY_TYPE, log_query.ptr(), log_query.length(), @@ -1492,12 +1491,12 @@ log: my_error(ER_ERROR_ON_WRITE, MYF(0), "binary log", -1); goto done; } - thd->variables.sql_mode= 0; } ret= FALSE; done: - thd->variables.sql_mode= saved_mode; + thd->variables.sql_mode= org_sql_mode; + thd->count_cuted_fields= org_count_cuted_fields; DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row()); DBUG_RETURN(ret); } @@ -1550,7 +1549,7 @@ Sp_handler_package::show_create_sp(THD *thd, String *buf, buf->append(STRING_WITH_LEN("OR REPLACE "))) || append_definer(thd, buf, &definer.user, &definer.host) || buf->append(type_lex_cstring()) || - buf->append(" ", 1) || + buf->append(' ') || (ddl_options.if_not_exists() && buf->append(STRING_WITH_LEN("IF NOT EXISTS "))) || (db.length > 0 && @@ -1558,7 +1557,7 @@ Sp_handler_package::show_create_sp(THD *thd, String *buf, buf->append('.'))) || append_identifier(thd, buf, name.str, name.length) || append_package_chistics(buf, chistics) || - buf->append(" ", 1) || + buf->append(' ') || buf->append(body.str, body.length); return rc; } @@ -657,12 +657,13 @@ extern "C" uchar* sp_sroutine_key(const uchar *ptr, size_t *plen, */ TABLE *open_proc_table_for_read(THD *thd); -bool load_charset(MEM_ROOT *mem_root, +bool load_charset(THD *thd, + MEM_ROOT *mem_root, Field *field, CHARSET_INFO *dflt_cs, CHARSET_INFO **cs); -bool load_collation(MEM_ROOT *mem_root, +bool load_collation(THD *thd,MEM_ROOT *mem_root, Field *field, CHARSET_INFO *dflt_cl, CHARSET_INFO **cl); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 2481f633b16..1f1bf07f78d 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -151,9 +151,9 @@ bool Item_splocal::append_value_for_log(THD *thd, String *str) Item *item= this_item(); String *str_value= item->type_handler()->print_item_value(thd, item, &str_value_holder); - return str_value ? - str->append(*str_value) : - str->append(STRING_WITH_LEN("NULL")); + return (str_value ? + str->append(*str_value) : + str->append(NULL_clex_str)); } @@ -167,7 +167,7 @@ bool Item_splocal_row_field::append_for_log(THD *thd, String *str) if (str->append(STRING_WITH_LEN(" NAME_CONST('")) || str->append(&m_name) || - str->append(".") || + str->append('.') || str->append(&m_field_name) || str->append(STRING_WITH_LEN("',"))) return true; @@ -1474,7 +1474,9 @@ sp_head::execute(THD *thd, bool merge_da_on_success) WSREP_DEBUG("MUST_REPLAY set after SP, err_status %d trx state: %d", err_status, thd->wsrep_trx().state()); } - (void) wsrep_after_statement(thd); + + if (wsrep_thd_is_local(thd)) + (void) wsrep_after_statement(thd); /* Reset the return code to zero if the transaction was @@ -2076,7 +2078,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, for (arg_no= 0; arg_no < argcount; arg_no++) { /* Arguments must be fixed in Item_func_sp::fix_fields */ - DBUG_ASSERT(argp[arg_no]->is_fixed()); + DBUG_ASSERT(argp[arg_no]->fixed()); if ((err_status= (*func_ctx)->set_parameter(thd, arg_no, &(argp[arg_no])))) goto err_with_cleanup; @@ -2116,7 +2118,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, if (str_value) binlog_buf.append(*str_value); else - binlog_buf.append(STRING_WITH_LEN("NULL")); + binlog_buf.append(NULL_clex_str); } binlog_buf.append(')'); } @@ -2993,7 +2995,7 @@ sp_head::show_create_routine_get_fields(THD *thd, const Sp_handler *sph, Item_empty_string *stmt_fld= new (mem_root) Item_empty_string(thd, col3_caption, 1024); - stmt_fld->maybe_null= TRUE; + stmt_fld->set_maybe_null(); fields->push_back(stmt_fld, mem_root); } @@ -3069,7 +3071,7 @@ sp_head::show_create_routine(THD *thd, const Sp_handler *sph) new (mem_root) Item_empty_string(thd, col3_caption, (uint)MY_MAX(m_defstr.length, 1024)); - stmt_fld->maybe_null= TRUE; + stmt_fld->set_maybe_null(); fields.push_back(stmt_fld, thd->mem_root); } @@ -3109,9 +3111,12 @@ sp_head::show_create_routine(THD *thd, const Sp_handler *sph) protocol->store_null(); - protocol->store(m_creation_ctx->get_client_cs()->csname, system_charset_info); - protocol->store(m_creation_ctx->get_connection_cl()->name, system_charset_info); - protocol->store(m_creation_ctx->get_db_cl()->name, system_charset_info); + protocol->store(&m_creation_ctx->get_client_cs()->cs_name, + system_charset_info); + protocol->store(&m_creation_ctx->get_connection_cl()->coll_name, + system_charset_info); + protocol->store(&m_creation_ctx->get_db_cl()->coll_name, + system_charset_info); err_status= protocol->write(); @@ -3485,10 +3490,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, thd->lex->safe_to_cache_query= 0; #endif - Opt_trace_start ots(thd, m_lex->query_tables, - SQLCOM_SELECT, &m_lex->var_list, - NULL, 0, - thd->variables.character_set_client); + Opt_trace_start ots(thd); + ots.init(thd, m_lex->query_tables, SQLCOM_SELECT, &m_lex->var_list, + NULL, 0, thd->variables.character_set_client); Json_writer_object trace_command(thd); Json_writer_array trace_command_steps(thd, "steps"); @@ -4208,7 +4212,8 @@ sp_instr_freturn::print(String *str) if (str->reserve(1024+8+32)) // Add some for the expr. too return; str->qs_append(STRING_WITH_LEN("freturn ")); - str->qs_append(m_type_handler->name().ptr()); + LEX_CSTRING name= m_type_handler->name().lex_cstring(); + str->qs_append(&name); str->qs_append(' '); m_value->print(str, enum_query_type(QT_ORDINARY | QT_ITEM_ORIGINAL_FUNC_NULLIF)); @@ -4953,7 +4958,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) } for (; table ; table= table->next_global) - if (!table->derived && !table->schema_table) + if (!table->derived && !table->schema_table && !table->table_function) { /* Structure of key for the multi-set is "db\0table\0alias\0". diff --git a/sql/sp_head.h b/sql/sp_head.h index b6bf868a05b..eee5212679f 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -1,7 +1,7 @@ /* -*- C++ -*- */ /* Copyright (c) 2002, 2011, Oracle and/or its affiliates. - Copyright (c) 2020, MariaDB + Copyright (c) 2020, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1058,8 +1058,9 @@ public: Query_arena(thd->lex->sphead->get_main_mem_root(), STMT_INITIALIZED_FOR_SP) { } ~sp_lex_cursor() { free_items(); } - void cleanup_stmt() { } - Query_arena *query_arena() { return this; } + virtual bool cleanup_stmt(bool /*restore_set_statement_vars*/) override + { return false; } + Query_arena *query_arena() override { return this; } bool validate() { DBUG_ASSERT(sql_command == SQLCOM_SELECT); @@ -1839,23 +1840,24 @@ public: virtual ~sp_instr_cpush() {} - virtual int execute(THD *thd, uint *nextp); + int execute(THD *thd, uint *nextp) override; - virtual void print(String *str); + void print(String *str) override; /** This call is used to cleanup the instruction when a sensitive cursor is closed. For now stored procedures always use materialized cursors and the call is not used. */ - virtual void cleanup_stmt() { /* no op */ } + virtual bool cleanup_stmt(bool /*restore_set_statement_vars*/) override + { return false; } private: sp_lex_keeper m_lex_keeper; uint m_cursor; /**< Frame offset (for debugging) */ public: - virtual PSI_statement_info* get_psi_info() { return & psi_info; } + PSI_statement_info* get_psi_info() override { return & psi_info; } static PSI_statement_info psi_info; }; // class sp_instr_cpush : public sp_instr diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index ffc9c0e19af..a2b26ac01e1 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -60,7 +60,8 @@ public: Spvar_definition field_def; /// Field-type of the SP-variable. - const Type_handler *type_handler() const { return field_def.type_handler(); } + const Type_handler *type_handler() const + { return field_def.type_handler(); } public: sp_variable(const LEX_CSTRING *name_arg, uint offset_arg) diff --git a/sql/spatial.cc b/sql/spatial.cc index ab6baf500e6..9bf6110c991 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -144,8 +144,6 @@ int MBR::within(const MBR *mbr) /***************************** Gis_class_info *******************************/ -String Geometry::bad_geometry_data("Bad object", &my_charset_bin); - Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_last+1]= { NULL, NULL, NULL, NULL, NULL, NULL, NULL @@ -382,7 +380,7 @@ int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end) if (wkt->reserve(4 + type_keyname_len + 2 + len + 2 + 2 + coord_keyname_len + 4, 512)) return 1; - wkt->qs_append("\"", 1); + wkt->qs_append('"'); wkt->qs_append((const char *) type_keyname, type_keyname_len); wkt->qs_append("\": \"", 4); wkt->qs_append(get_class_info()->m_geojson_name.str, len); @@ -406,7 +404,7 @@ int Geometry::bbox_as_json(String *wkt) const char *end; if (wkt->reserve(5 + bbox_keyname_len + (FLOATING_POINT_DECIMALS+2)*4, 512)) return 1; - wkt->qs_append("\"", 1); + wkt->qs_append('"'); wkt->qs_append((const char *) bbox_keyname, bbox_keyname_len); wkt->qs_append("\": [", 4); @@ -420,7 +418,7 @@ int Geometry::bbox_as_json(String *wkt) wkt->qs_append(mbr.xmax); wkt->qs_append(", ", 2); wkt->qs_append(mbr.ymax); - wkt->qs_append("]", 1); + wkt->qs_append(']'); return 0; } @@ -3526,13 +3524,13 @@ bool Gis_geometry_collection::get_data_as_json(String *txt, uint max_dec_digits, if (!(geom= create_by_typeid(&buffer, wkb_type))) return 1; geom->set_data_ptr(data, (uint) (m_data_end - data)); - if (txt->append("{", 1) || + if (txt->append('{') || geom->as_json(txt, max_dec_digits, &data) || txt->append(STRING_WITH_LEN("}, "), 512)) return 1; } txt->length(txt->length() - 2); - if (txt->append("]", 1)) + if (txt->append(']')) return 1; *end= data; diff --git a/sql/spatial.h b/sql/spatial.h index 635b6299afd..8974511adf9 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -227,8 +227,6 @@ public: static void operator delete(void *buffer) {} - static String bad_geometry_data; - enum wkbType { wkb_point= 1, diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index b0049bc6f2d..b6f7daf42c3 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -65,8 +65,8 @@ bool using_global_priv_table= true; // set that from field length in acl_load? #ifndef NO_EMBEDDED_ACCESS_CHECKS -const uint max_hostname_length= 60; -const uint max_dbname_length= 64; +const uint max_hostname_length= HOSTNAME_LENGTH; +const uint max_dbname_length= NAME_CHAR_LEN; #endif const char *safe_vio_type_name(Vio *vio) @@ -454,15 +454,15 @@ public: void print_grant(String *str) { str->append(STRING_WITH_LEN("GRANT PROXY ON '")); - str->append(proxied_user); + str->append(proxied_user, strlen(proxied_user)); str->append(STRING_WITH_LEN("'@'")); if (proxied_host.hostname) str->append(proxied_host.hostname, strlen(proxied_host.hostname)); str->append(STRING_WITH_LEN("' TO '")); - str->append(user); + str->append(user, strlen(user)); str->append(STRING_WITH_LEN("'@'")); if (host.hostname) - str->append(host.hostname); + str->append(host.hostname, strlen(host.hostname)); str->append(STRING_WITH_LEN("'")); if (with_grant) str->append(STRING_WITH_LEN(" WITH GRANT OPTION")); @@ -656,7 +656,7 @@ bool ROLE_GRANT_PAIR::init(MEM_ROOT *mem, const char *username, #define ROLE_OPENED (1L << 3) static DYNAMIC_ARRAY acl_hosts, acl_users, acl_proxy_users; -static Dynamic_array<ACL_DB> acl_dbs(PSI_INSTRUMENT_MEM, 0U, 50U); +static Dynamic_array<ACL_DB> acl_dbs(PSI_INSTRUMENT_MEM, 0, 50); typedef Dynamic_array<ACL_DB>::CMP_FUNC acl_dbs_cmp; static HASH acl_roles; /* @@ -1777,7 +1777,7 @@ class User_table_json: public User_table if (value_len) json.append(','); json.append('"'); - json.append(key); + json.append(key, strlen(key)); json.append(STRING_WITH_LEN("\":")); if (string) json.append('"'); @@ -1955,7 +1955,7 @@ class Grant_tables We can read privilege tables even when !initialized. This can be acl_load() - server startup or FLUSH PRIVILEGES */ - if (lock_type >= TL_WRITE_ALLOW_WRITE && !initialized) + if (lock_type >= TL_FIRST_WRITE && !initialized) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); DBUG_RETURN(-1); @@ -1970,7 +1970,7 @@ class Grant_tables NULL, lock_type); tl->open_type= OT_BASE_ONLY; tl->i_s_requested_object= OPEN_TABLE_ONLY; - tl->updating= lock_type >= TL_WRITE_ALLOW_WRITE; + tl->updating= lock_type >= TL_FIRST_WRITE; if (i >= FIRST_OPTIONAL_TABLE) tl->open_strategy= TABLE_LIST::OPEN_IF_EXISTS; tl->next_global= tl->next_local= *ptr_first; @@ -2006,7 +2006,7 @@ class Grant_tables NULL, lock_type); tl->open_type= OT_BASE_ONLY; tl->i_s_requested_object= OPEN_TABLE_ONLY; - tl->updating= lock_type >= TL_WRITE_ALLOW_WRITE; + tl->updating= lock_type >= TL_FIRST_WRITE; p_user_table= &m_user_table_tabular; counter++; res= really_open(thd, tl, &unused); @@ -2087,7 +2087,7 @@ class Grant_tables DBUG_ASSERT(tables); } - if (tables->lock_type >= TL_WRITE_ALLOW_WRITE) + if (tables->lock_type >= TL_FIRST_WRITE) { /* GRANT and REVOKE are applied the slave in/exclusion rules as they are @@ -2862,7 +2862,7 @@ void acl_free(bool end) bool acl_reload(THD *thd) { DYNAMIC_ARRAY old_acl_hosts, old_acl_users, old_acl_proxy_users; - Dynamic_array<ACL_DB> old_acl_dbs(0U,0U); + Dynamic_array<ACL_DB> old_acl_dbs(PSI_INSTRUMENT_MEM, 0, 0); HASH old_acl_roles, old_acl_roles_mappings; MEM_ROOT old_mem; int result; @@ -4116,10 +4116,9 @@ end: #ifdef WITH_WSREP wsrep_error_label: - if (WSREP(thd) && !thd->wsrep_applier) + if (WSREP(thd)) { - WSREP_TO_ISOLATION_END; - + wsrep_to_isolation_end(thd); thd->set_query(query_save); } #endif /* WITH_WSREP */ @@ -4270,10 +4269,9 @@ int acl_set_default_role(THD *thd, const char *host, const char *user, #ifdef WITH_WSREP wsrep_error_label: - if (WSREP(thd) && !thd->wsrep_applier) + if (WSREP(thd)) { - WSREP_TO_ISOLATION_END; - + wsrep_to_isolation_end(thd); thd->set_query(query_save); } #endif /* WITH_WSREP */ @@ -6300,8 +6298,8 @@ static int traverse_role_graph_impl(ACL_USER_BASE *user, void *context, It uses a Dynamic_array to reduce the number of malloc calls to a minimum */ - Dynamic_array<NODE_STATE> stack(20,50); - Dynamic_array<ACL_USER_BASE *> to_clear(20,50); + Dynamic_array<NODE_STATE> stack(PSI_INSTRUMENT_MEM, 20,50); + Dynamic_array<ACL_USER_BASE *> to_clear(PSI_INSTRUMENT_MEM, 20, 50); NODE_STATE state; /* variable used to insert elements in the stack */ int result= 0; @@ -7064,7 +7062,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, bool revoke_grant) { privilege_t column_priv(NO_ACL); - int result; + int result, res; List_iterator <LEX_USER> str_list (user_list); LEX_USER *Str, *tmp_Str; bool create_new_users=0; @@ -7091,13 +7089,14 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, while ((column = column_iter++)) { - uint unused_field_idx= NO_CACHED_FIELD_INDEX; + field_index_t unused_field_idx= NO_CACHED_FIELD_INDEX; TABLE_LIST *dummy; Field *f=find_field_in_table_ref(thd, table_list, column->column.ptr(), column->column.length(), column->column.ptr(), NULL, NULL, - NULL, TRUE, FALSE, - &unused_field_idx, FALSE, &dummy); + ignored_tables_list_t(NULL), NULL, + TRUE, FALSE, &unused_field_idx, FALSE, + &dummy); if (unlikely(f == (Field*)0)) { my_error(ER_BAD_FIELD_ERROR, MYF(0), @@ -7270,10 +7269,10 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, revoke_grant)) result= TRUE; } - if (int res= replace_table_table(thd, grant_table, - tables.tables_priv_table().table(), - *Str, db_name, table_name, - rights, column_priv, revoke_grant)) + if ((res= replace_table_table(thd, grant_table, + tables.tables_priv_table().table(), + *Str, db_name, table_name, + rights, column_priv, revoke_grant))) { if (res > 0) { @@ -8255,7 +8254,7 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, We want to have either SELECT or INSERT rights to sequences depending on how they are accessed */ - orig_want_access= ((t_ref->lock_type == TL_WRITE_ALLOW_WRITE) ? + orig_want_access= ((t_ref->lock_type >= TL_FIRST_WRITE) ? INSERT_ACL : SELECT_ACL); } @@ -8290,7 +8289,8 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, continue; // ok if (!(~t_ref->grant.privilege & want_access) || - t_ref->is_anonymous_derived_table() || t_ref->schema_table) + t_ref->is_anonymous_derived_table() || t_ref->schema_table || + t_ref->table_function) { /* It is subquery in the FROM clause. VIEW set t_ref->derived after @@ -8299,7 +8299,8 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, NOTE: is_derived() can't be used here because subquery in this case the FROM clase (derived tables) can be not be marked yet. */ - if (t_ref->is_anonymous_derived_table() || t_ref->schema_table) + if (t_ref->is_anonymous_derived_table() || t_ref->schema_table || + t_ref->table_function) { /* If it's a temporary table created for a subquery in the FROM @@ -9251,7 +9252,7 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user) goto end; } - result.append("CREATE USER "); + result.append(STRING_WITH_LEN("CREATE USER ")); append_identifier(thd, &result, username, strlen(username)); add_user_parameters(thd, &result, acl_user, false); @@ -9275,9 +9276,10 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user) of a user account, including both the manual expiration state of the account and the automatic expiration policy attached to it, we should print two statements here, a CREATE USER (printed above) and an ALTER USER */ - if (acl_user->password_expired && acl_user->password_lifetime > -1) { + if (acl_user->password_expired && acl_user->password_lifetime > -1) + { result.length(0); - result.append("ALTER USER "); + result.append(STRING_WITH_LEN("ALTER USER ")); append_identifier(thd, &result, username, strlen(username)); result.append('@'); append_identifier(thd, &result, acl_user->host.hostname, @@ -9533,7 +9535,7 @@ static bool show_default_role(THD *thd, ACL_USER *acl_entry, def_str.length(0); def_str.append(STRING_WITH_LEN("SET DEFAULT ROLE ")); append_identifier(thd, &def_str, def_rolename.str, def_rolename.length); - def_str.append(" FOR "); + def_str.append(STRING_WITH_LEN(" FOR ")); append_identifier(thd, &def_str, acl_entry->user.str, acl_entry->user.length); DBUG_ASSERT(!(acl_entry->flags & IS_ROLE)); def_str.append('@'); @@ -11382,7 +11384,7 @@ mysql_revoke_sp_privs(THD *thd, Grant_tables *tables, const Sp_handler *sph, bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) { uint counter, revoked; - int result; + int result, res; ACL_DB *acl_db; DBUG_ENTER("mysql_revoke_all"); @@ -11466,30 +11468,33 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) { for (counter= 0, revoked= 0 ; counter < column_priv_hash.records ; ) { - const char *user,*host; - GRANT_TABLE *grant_table= - (GRANT_TABLE*) my_hash_element(&column_priv_hash, counter); + const char *user,*host; + GRANT_TABLE *grant_table= ((GRANT_TABLE*) + my_hash_element(&column_priv_hash, counter)); + user= grant_table->user; host= safe_str(grant_table->host.hostname); if (!strcmp(lex_user->user.str,user) && !strcmp(lex_user->host.str, host)) - { - List<LEX_COLUMN> columns; - /* TODO(cvicentiu) refactor to use + { + List<LEX_COLUMN> columns; + /* TODO(cvicentiu) refactor replace_db_table to use + Db_table instead of TABLE directly. */ + if (replace_column_table(grant_table, + tables.columns_priv_table().table(), + *lex_user, columns, grant_table->db, + grant_table->tname, ALL_KNOWN_ACL, 1)) + result= -1; + + /* TODO(cvicentiu) refactor replace_db_table to use Db_table instead of TABLE directly. */ - if (replace_column_table(grant_table, - tables.columns_priv_table().table(), - *lex_user, columns, - grant_table->db, grant_table->tname, - ALL_KNOWN_ACL, 1)) - result= -1; - if (int res= replace_table_table(thd, grant_table, - tables.tables_priv_table().table(), - *lex_user, - grant_table->db, grant_table->tname, - ALL_KNOWN_ACL, NO_ACL, 1)) - { + if ((res= replace_table_table(thd, grant_table, + tables.tables_priv_table().table(), + *lex_user, grant_table->db, + grant_table->tname, ALL_KNOWN_ACL, + NO_ACL, 1))) + { if (res > 0) result= -1; else @@ -11502,8 +11507,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) continue; } } - } - counter++; + } + counter++; } } while (revoked); diff --git a/sql/sql_acl_getsort.ic b/sql/sql_acl_getsort.ic index df55c7c5f1e..046b412d5f6 100644 --- a/sql/sql_acl_getsort.ic +++ b/sql/sql_acl_getsort.ic @@ -14,6 +14,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ #ifndef NO_EMBEDDED_ACCESS_CHECKS + +#define magic_bits 30 /* Returns a number which, if sorted in descending order, magically puts patterns in the order from most specific (e.g. no wildcards) to most generic @@ -21,8 +23,8 @@ Takes a template that lists types of following patterns (by the first letter of _h_ostname, _d_bname, _u_sername) and up to four patterns. - No more than two can be of 'h' or 'd' type (because one magic value takes 26 - bits, see below). + No more than two can be of 'h' or 'd' type (because one magic value takes + magic_bits bits, see below). ======================================================================== @@ -142,7 +144,7 @@ case 2: ((M*2*(maxlen+1) + L)*(maxlen+1) + K)*(maxlen+1) + P upper bound: L<=maxlen, M<=maxlen, K<=maxlen/2, P<maxlen - for a current maxlen=64, the magic number needs 26 bits. + for a current maxlen=64, the magic number needs magic_bits bits. */ static ulonglong get_magic_sort(const char *templ, ...) @@ -165,9 +167,9 @@ static ulonglong get_magic_sort(const char *templ, ...) continue; } - /* A wildcard pattern. Encoded in 26 bits. */ + /* A wildcard pattern. Encoded in magic_bits bits. */ uint maxlen= *templ == 'd' ? max_dbname_length : max_hostname_length; - DBUG_ASSERT(maxlen <= 64); + DBUG_ASSERT(maxlen <= 255); DBUG_ASSERT(*templ == 'd' || *templ == 'h'); uint N= 0, M= 0, K= 0, P= 0; @@ -189,14 +191,19 @@ static ulonglong get_magic_sort(const char *templ, ...) if (pat[i] == wild_prefix && pat[i+1]) i++; N++; } - uint L= K ? maxlen - N - M : 0, d= maxlen + 1, magic; + + set_if_smaller(K, 31); + set_if_smaller(M, 31); + + ulonglong L= K ? maxlen - N - M : 0, d= maxlen + 1, magic; + ulonglong d1= MY_MIN(d, 32); if (L > M) - magic= (((L * 2 + 1) * d + K) * d + M) * d + P; + magic= (((L * 2 + 1) * d + K) * d1 + M) * d + P; else - magic= (((M * 2 + 0) * d + L) * d + K) * d + P; - DBUG_ASSERT(magic < 1<<26); - sort= (sort << 26) + magic; - IF_DBUG(bits_used+= 26,); + magic= (((M * 2 + 0) * d + L) * d1 + K) * d + P; + DBUG_ASSERT(magic < (1ULL << magic_bits)); + sort= (sort << magic_bits) + magic; + IF_DBUG(bits_used+= magic_bits,); } DBUG_ASSERT(bits_used < 8*sizeof(sort)); va_end(args); diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 060dcc059b5..39a5b7ff0db 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -34,6 +34,13 @@ #include "wsrep_mysqld.h" const LEX_CSTRING msg_status= {STRING_WITH_LEN("status")}; +const LEX_CSTRING msg_repair= { STRING_WITH_LEN("repair") }; +const LEX_CSTRING msg_assign_to_keycache= +{ STRING_WITH_LEN("assign_to_keycache") }; +const LEX_CSTRING msg_analyze= { STRING_WITH_LEN("analyze") }; +const LEX_CSTRING msg_check= { STRING_WITH_LEN("check") }; +const LEX_CSTRING msg_preload_keys= { STRING_WITH_LEN("preload_keys") }; +const LEX_CSTRING msg_optimize= { STRING_WITH_LEN("optimize") }; /* Prepare, run and cleanup for mysql_recreate_table() */ @@ -74,15 +81,16 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list) static int send_check_errmsg(THD *thd, TABLE_LIST* table, - const char* operator_name, const char* errmsg) + const LEX_CSTRING *operator_name, + const char* errmsg) { Protocol *protocol= thd->protocol; protocol->prepare_for_resend(); protocol->store(table->alias.str, table->alias.length, system_charset_info); - protocol->store((char*) operator_name, system_charset_info); - protocol->store(STRING_WITH_LEN("error"), system_charset_info); - protocol->store(errmsg, system_charset_info); + protocol->store(operator_name, system_charset_info); + protocol->store(&error_clex_str, system_charset_info); + protocol->store(errmsg, strlen(errmsg), system_charset_info); thd->clear_error(); if (protocol->write()) return -1; @@ -151,7 +159,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, */ if (table->s->tmp_table) { - error= send_check_errmsg(thd, table_list, "repair", + error= send_check_errmsg(thd, table_list, &msg_repair, "Cannot repair temporary table from .frm file"); goto end; } @@ -169,8 +177,12 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, if (table->s->frm_version < FRM_VER_TRUE_VARCHAR && table->s->varchar_fields) { - error= send_check_errmsg(thd, table_list, "repair", - "Failed repairing a very old .frm file as the data file format has changed between versions. Please dump the table in your old system with mysqldump and read it into this system with mysql or mysqlimport"); + error= send_check_errmsg(thd, table_list, &msg_repair, + "Failed repairing a very old .frm file as the " + "data file format has changed between versions. " + "Please dump the table in your old system with " + "mysqldump and read it into this system with " + "mysql or mysqlimport"); goto end; } @@ -228,12 +240,12 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, if (my_rename(from, tmp, MYF(MY_WME))) { - error= send_check_errmsg(thd, table_list, "repair", + error= send_check_errmsg(thd, table_list, &msg_repair, "Failed renaming data file"); goto end; } if (dd_recreate_table(thd, table_list->db.str, table_list->table_name.str)) - create_error= send_check_errmsg(thd, table_list, "repair", + create_error= send_check_errmsg(thd, table_list, &msg_repair, "Failed generating table from .frm file"); /* 'FALSE' for 'using_transactions' means don't postpone @@ -243,7 +255,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, query_cache_invalidate3(thd, table_list, FALSE); if (mysql_file_rename(key_file_misc, tmp, from, MYF(MY_WME))) { - error= send_check_errmsg(thd, table_list, "repair", + error= send_check_errmsg(thd, table_list, &msg_repair, "Failed restoring .MYD file"); goto end; } @@ -265,7 +277,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, */ if (open_table(thd, table_list, &ot_ctx)) { - error= send_check_errmsg(thd, table_list, "repair", + error= send_check_errmsg(thd, table_list, &msg_repair, "Failed to open partially repaired table"); goto end; } @@ -448,6 +460,37 @@ static void send_read_only_warning(THD *thd, const LEX_CSTRING *msg_status, } +/** + Collect field names of result set that will be sent to a client + + @param thd Thread data object + @param[out] fields List of fields whose metadata should be collected for + sending to client +*/ + +void fill_check_table_metadata_fields(THD *thd, List<Item>* fields) +{ + Item *item; + + item= new (thd->mem_root) Item_empty_string(thd, "Table", NAME_CHAR_LEN * 2); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); + + item= new (thd->mem_root) Item_empty_string(thd, "Op", 10); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); + + item= new (thd->mem_root) Item_empty_string(thd, "Msg_type", 10); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); + + item= new (thd->mem_root) Item_empty_string(thd, "Msg_text", + SQL_ADMIN_MSG_TEXT_SIZE); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); +} + + /* RETURN VALUES FALSE Message sent to net (admin operation went ok) @@ -456,7 +499,7 @@ static void send_read_only_warning(THD *thd, const LEX_CSTRING *msg_status, */ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt, - const char *operator_name, + const LEX_CSTRING *operator_name, thr_lock_type lock_type, bool org_open_for_modify, bool repair_table_use_frm, @@ -471,7 +514,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, { TABLE_LIST *table; List<Item> field_list; - Item *item; Protocol *protocol= thd->protocol; LEX *lex= thd->lex; int result_code; @@ -479,25 +521,12 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, bool need_repair_or_alter= 0; wait_for_commit* suspended_wfc; bool is_table_modified= false; - + LEX_CUSTRING tabledef_version; DBUG_ENTER("mysql_admin_table"); DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options)); - field_list.push_back(item= new (thd->mem_root) - Item_empty_string(thd, "Table", - NAME_CHAR_LEN * 2), thd->mem_root); - item->maybe_null = 1; - field_list.push_back(item= new (thd->mem_root) - Item_empty_string(thd, "Op", 10), thd->mem_root); - item->maybe_null = 1; - field_list.push_back(item= new (thd->mem_root) - Item_empty_string(thd, "Msg_type", 10), thd->mem_root); - item->maybe_null = 1; - field_list.push_back(item= new (thd->mem_root) - Item_empty_string(thd, "Msg_text", - SQL_ADMIN_MSG_TEXT_SIZE), - thd->mem_root); - item->maybe_null = 1; + fill_check_table_metadata_fields(thd, &field_list); + if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); @@ -523,12 +552,17 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, { char table_name_buff[SAFE_NAME_LEN*2+2]; LEX_CSTRING table_name= { table_name_buff, 0}; + char storage_engine_name[NAME_LEN]; + bool storage_engine_partitioned= 0; + uchar tabledef_version_buff[MY_UUID_SIZE]; const char *db= table->db.str; bool fatal_error=0; - bool open_error; + bool open_error= 0; bool collect_eis= FALSE; bool open_for_modify= org_open_for_modify; + storage_engine_name[0]= 0; // Marker that's not used + DBUG_PRINT("admin", ("table: '%s'.'%s'", db, table->table_name.str)); DEBUG_SYNC(thd, "admin_command_kill_before_modify"); @@ -544,7 +578,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, */ table->mdl_request.set_type(lex->sql_command == SQLCOM_REPAIR ? MDL_SHARED_NO_READ_WRITE - : lock_type >= TL_WRITE_ALLOW_WRITE + : lock_type >= TL_FIRST_WRITE ? MDL_SHARED_WRITE : MDL_SHARED_READ); if (thd->check_killed()) @@ -621,7 +655,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, protocol->prepare_for_resend(); protocol->store(&table_name, system_charset_info); protocol->store(operator_name, system_charset_info); - protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(&error_clex_str, system_charset_info); length= my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_DROP_PARTITION_NON_EXISTENT), table_name.str); @@ -705,7 +739,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, { /* purecov: begin inspected */ enum_sql_command save_sql_command= lex->sql_command; - LEX_CSTRING error_clex_str= { STRING_WITH_LEN("error") }; DBUG_PRINT("admin", ("sending error message")); protocol->prepare_for_resend(); protocol->store(&table_name, system_charset_info); @@ -747,6 +780,16 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, thd->close_unused_temporary_table_instances(tables); else { + /* Store information about table for ddl log */ + storage_engine_partitioned= table->table->file->partition_engine(); + strmake(storage_engine_name, table->table->file->real_table_type(), + sizeof(storage_engine_name)-1); + tabledef_version.str= tabledef_version_buff; + if ((tabledef_version.length= table->table->s->tabledef_version.length)) + memcpy((char*) tabledef_version.str, + table->table->s->tabledef_version.str, + MY_UUID_SIZE); + if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED)) goto err; DEBUG_SYNC(thd, "after_admin_flush"); @@ -827,7 +870,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, if (result_code == HA_ADMIN_OK) { - DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name)); + DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name->str)); THD_STAGE_INFO(thd, stage_executing); result_code = (table->table->file->*operator_func)(thd, check_opt); THD_STAGE_INFO(thd, stage_sending_data); @@ -980,13 +1023,14 @@ send_result: const Sql_condition *err; while ((err= it++)) { + const char *err_msg= err->get_message_text(); protocol->prepare_for_resend(); protocol->store(&table_name, system_charset_info); - protocol->store((char*) operator_name, system_charset_info); + protocol->store(operator_name, system_charset_info); protocol->store(warning_level_names[err->get_level()].str, warning_level_names[err->get_level()].length, system_charset_info); - protocol->store(err->get_message_text(), system_charset_info); + protocol->store(err_msg, strlen(err_msg), system_charset_info); if (protocol->write()) goto err; } @@ -1005,7 +1049,7 @@ send_result_message: char buf[MYSQL_ERRMSG_SIZE]; size_t length=my_snprintf(buf, sizeof(buf), ER_THD(thd, ER_CHECK_NOT_IMPLEMENTED), - operator_name); + operator_name->str); protocol->store(STRING_WITH_LEN("note"), system_charset_info); protocol->store(buf, length, system_charset_info); } @@ -1047,13 +1091,13 @@ send_result_message: break; case HA_ADMIN_CORRUPT: - protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(&error_clex_str, system_charset_info); protocol->store(STRING_WITH_LEN("Corrupt"), system_charset_info); fatal_error=1; break; case HA_ADMIN_INVALID: - protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(&error_clex_str, system_charset_info); protocol->store(STRING_WITH_LEN("Invalid argument"), system_charset_info); break; @@ -1136,8 +1180,8 @@ send_result_message: else { /* Hijack the row already in-progress. */ - protocol->store(STRING_WITH_LEN("error"), system_charset_info); - protocol->store(err_msg, system_charset_info); + protocol->store(&error_clex_str, system_charset_info); + protocol->store(err_msg, strlen(err_msg), system_charset_info); if (protocol->write()) goto err; /* Start off another row for HA_ADMIN_FAILED */ @@ -1173,7 +1217,7 @@ send_result_message: const char *what_to_upgrade= table->view ? "VIEW" : table->table->file->ha_table_flags() & HA_CAN_REPAIR ? "TABLE" : 0; - protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(&error_clex_str, system_charset_info); if (what_to_upgrade) length= my_snprintf(buf, sizeof(buf), ER_THD(thd, ER_TABLE_NEEDS_UPGRADE), @@ -1197,7 +1241,7 @@ send_result_message: size_t length=my_snprintf(buf, sizeof(buf), "Unknown - internal error %d during operation", result_code); - protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(&error_clex_str, system_charset_info); protocol->store(buf, length, system_charset_info); fatal_error=1; break; @@ -1210,6 +1254,7 @@ send_result_message: with conflicting DMLs resulting in deadlock. */ thd->transaction->stmt.mark_executed_table_admin_cmd(); + if (table->table && !table->view) { /* @@ -1256,6 +1301,22 @@ send_result_message: is_table_modified= true; } close_thread_tables(thd); + + if (storage_engine_name[0]) + { + /* Table was changed (repair, optimize or something similar) */ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + lex_string_set(&ddl_log.org_storage_engine_name, + storage_engine_name); + ddl_log.query= *operator_name; + ddl_log.org_partitioned= storage_engine_partitioned; + ddl_log.org_database= table->db; + ddl_log.org_table= table->table_name; + ddl_log.org_table_id= tabledef_version; + backup_log_ddl(&ddl_log); + } + thd->release_transactional_locks(); /* @@ -1347,8 +1408,9 @@ bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, } check_opt.key_cache= key_cache; DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt, - "assign_to_keycache", TL_READ_NO_INSERT, 0, 0, - 0, 0, &handler::assign_to_keycache, 0, false)); + &msg_assign_to_keycache, TL_READ_NO_INSERT, 0, + 0, 0, 0, + &handler::assign_to_keycache, 0, false)); } @@ -1374,8 +1436,9 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) outdated information if parallel inserts into cache blocks happen. */ DBUG_RETURN(mysql_admin_table(thd, tables, 0, - "preload_keys", TL_READ_NO_INSERT, 0, 0, 0, 0, - &handler::preload_keys, 0, false)); + &msg_preload_keys, TL_READ_NO_INSERT, + 0, 0, 0, 0, + &handler::preload_keys, 0, false)); } @@ -1395,9 +1458,8 @@ bool Sql_cmd_analyze_table::execute(THD *thd) WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table); res= mysql_admin_table(thd, first_table, &m_lex->check_opt, - "analyze", lock_type, 1, 0, 0, 0, + &msg_analyze, lock_type, 1, 0, 0, 0, &handler::ha_analyze, 0, true); - m_lex->first_select_lex()->table_list.first= first_table; m_lex->query_tables= first_table; @@ -1421,7 +1483,7 @@ bool Sql_cmd_check_table::execute(THD *thd) TRUE, UINT_MAX, FALSE)) goto error; /* purecov: inspected */ - res= mysql_admin_table(thd, first_table, &m_lex->check_opt, "check", + res= mysql_admin_table(thd, first_table, &m_lex->check_opt, &msg_check, lock_type, 0, 0, HA_OPEN_FOR_REPAIR, 0, &handler::ha_check, &view_check, false); @@ -1448,9 +1510,8 @@ bool Sql_cmd_optimize_table::execute(THD *thd) res= (specialflag & SPECIAL_NO_NEW_FUNC) ? mysql_recreate_table(thd, first_table, true) : mysql_admin_table(thd, first_table, &m_lex->check_opt, - "optimize", TL_WRITE, 1, 0, 0, 0, + &msg_optimize, TL_WRITE, 1, 0, 0, 0, &handler::ha_optimize, 0, true); - m_lex->first_select_lex()->table_list.first= first_table; m_lex->query_tables= first_table; @@ -1472,9 +1533,8 @@ bool Sql_cmd_repair_table::execute(THD *thd) if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table, FALSE, UINT_MAX, FALSE)) goto error; /* purecov: inspected */ - WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table); - res= mysql_admin_table(thd, first_table, &m_lex->check_opt, "repair", + res= mysql_admin_table(thd, first_table, &m_lex->check_opt, &msg_repair, TL_WRITE, 1, MY_TEST(m_lex->check_opt.sql_flags & TT_USEFRM), HA_OPEN_FOR_REPAIR, &prepare_for_repair, diff --git a/sql/sql_admin.h b/sql/sql_admin.h index d31726d32a4..0c7f1c3cee5 100644 --- a/sql/sql_admin.h +++ b/sql/sql_admin.h @@ -24,7 +24,7 @@ bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, bool mysql_preload_keys(THD* thd, TABLE_LIST* table_list); int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache, KEY_CACHE *dst_cache); - +void fill_check_table_metadata_fields(THD *thd, List<Item>* fields); /** Sql_cmd_analyze_table represents the ANALYZE TABLE statement. */ diff --git a/sql/sql_alloc.h b/sql/sql_alloc.h index f475ecdff73..f5d2d4e8b1a 100644 --- a/sql/sql_alloc.h +++ b/sql/sql_alloc.h @@ -18,20 +18,18 @@ #include <my_sys.h> /* alloc_root, MEM_ROOT, TRASH */ -THD *thd_get_current_thd(); - -/* mysql standard class memory allocator */ +/* MariaDB standard class memory allocator */ class Sql_alloc { public: static void *operator new(size_t size) throw () { - return thd_alloc(thd_get_current_thd(), size); + return thd_alloc(_current_thd(), size); } static void *operator new[](size_t size) throw () { - return thd_alloc(thd_get_current_thd(), size); + return thd_alloc(_current_thd(), size); } static void *operator new[](size_t size, MEM_ROOT *mem_root) throw () { return alloc_root(mem_root, size); } @@ -42,9 +40,5 @@ public: static void operator delete[](void *, MEM_ROOT *) { /* never called */ } static void operator delete[](void *ptr, size_t size) { TRASH_FREE(ptr, size); } -#ifdef HAVE_valgrind - bool dummy_for_valgrind; - inline Sql_alloc() :dummy_for_valgrind(0) {} -#endif }; #endif /* SQL_ALLOC_INCLUDED */ diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc index dee5ea2fe4b..a21933892de 100644 --- a/sql/sql_alter.cc +++ b/sql/sql_alter.cc @@ -27,6 +27,7 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) key_list(rhs.key_list, mem_root), alter_rename_key_list(rhs.alter_rename_key_list, mem_root), create_list(rhs.create_list, mem_root), + alter_index_ignorability_list(rhs.alter_index_ignorability_list, mem_root), check_constraint_list(rhs.check_constraint_list, mem_root), flags(rhs.flags), partition_flags(rhs.partition_flags), keys_onoff(rhs.keys_onoff), @@ -253,16 +254,8 @@ Alter_info::algorithm(const THD *thd) const Alter_table_ctx::Alter_table_ctx() - : implicit_default_value_error_field(NULL), - error_if_not_empty(false), - tables_opened(0), - db(null_clex_str), table_name(null_clex_str), alias(null_clex_str), - new_db(null_clex_str), new_name(null_clex_str), new_alias(null_clex_str), - fk_error_if_delete_row(false), fk_error_id(NULL), - fk_error_table(NULL), modified_primary_key(false) -#ifdef DBUG_ASSERT_EXISTS - , tmp_table(false) -#endif + : db(null_clex_str), table_name(null_clex_str), alias(null_clex_str), + new_db(null_clex_str), new_name(null_clex_str), new_alias(null_clex_str) { } @@ -275,14 +268,8 @@ Alter_table_ctx::Alter_table_ctx(THD *thd, TABLE_LIST *table_list, uint tables_opened_arg, const LEX_CSTRING *new_db_arg, const LEX_CSTRING *new_name_arg) - : implicit_default_value_error_field(NULL), error_if_not_empty(false), - tables_opened(tables_opened_arg), - new_db(*new_db_arg), new_name(*new_name_arg), - fk_error_if_delete_row(false), fk_error_id(NULL), - fk_error_table(NULL), modified_primary_key(false) -#ifdef DBUG_ASSERT_EXISTS - , tmp_table(false) -#endif + : tables_opened(tables_opened_arg), + new_db(*new_db_arg), new_name(*new_name_arg) { /* Assign members db, table_name, new_db and new_name @@ -361,10 +348,22 @@ Alter_table_ctx::Alter_table_ctx(THD *thd, TABLE_LIST *table_list, this case. This fact is enforced with assert. */ build_tmptable_filename(thd, tmp_path, sizeof(tmp_path)); -#ifdef DBUG_ASSERT_EXISTS tmp_table= true; -#endif } + if ((id.length= table_list->table->s->tabledef_version.length)) + memcpy(id_buff, table_list->table->s->tabledef_version.str, MY_UUID_SIZE); + id.str= id_buff; + storage_engine_partitioned= table_list->table->file->partition_engine(); + storage_engine_name.str= storage_engine_buff; + storage_engine_name.length= ((strmake(storage_engine_buff, + table_list->table->file-> + real_table_type(), + sizeof(storage_engine_buff)-1)) - + storage_engine_buff); + tmp_storage_engine_name.str= tmp_storage_engine_buff; + tmp_storage_engine_name.length= 0; + tmp_id.str= 0; + tmp_id.length= 0; } diff --git a/sql/sql_alter.h b/sql/sql_alter.h index a499e978eef..d91984d4b26 100644 --- a/sql/sql_alter.h +++ b/sql/sql_alter.h @@ -1,5 +1,5 @@ /* Copyright (c) 2010, 2014, Oracle and/or its affiliates. - Copyright (c) 2013, 2020, MariaDB Corporation. + Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -20,6 +20,7 @@ class Alter_drop; class Alter_column; class Alter_rename_key; +class Alter_index_ignorability; class Key; /** @@ -95,6 +96,8 @@ public: List<Alter_rename_key> alter_rename_key_list; // List of columns, used by both CREATE and ALTER TABLE. List<Create_field> create_list; + // Indexes whose ignorability needs to be changed. + List<Alter_index_ignorability> alter_index_ignorability_list; List<Virtual_column_info> check_constraint_list; // Type of ALTER TABLE operation. alter_table_operations flags; @@ -129,6 +132,7 @@ public: key_list.empty(); alter_rename_key_list.empty(); create_list.empty(); + alter_index_ignorability_list.empty(); check_constraint_list.empty(); flags= 0; partition_flags= 0; @@ -290,6 +294,12 @@ public: const char *get_tmp_path() const { return tmp_path; } + const LEX_CSTRING get_tmp_cstring_path() const + { + LEX_CSTRING tmp= { tmp_path, strlen(tmp_path) }; + return tmp; + }; + /** Mark ALTER TABLE as needing to produce foreign key error if it deletes a row from the table being changed. @@ -303,28 +313,38 @@ public: void report_implicit_default_value_error(THD *thd, const TABLE_SHARE *) const; public: - Create_field *implicit_default_value_error_field; - bool error_if_not_empty; - uint tables_opened; + Create_field *implicit_default_value_error_field= nullptr; + bool error_if_not_empty= false; + uint tables_opened= 0; LEX_CSTRING db; LEX_CSTRING table_name; + LEX_CSTRING storage_engine_name; LEX_CSTRING alias; LEX_CSTRING new_db; LEX_CSTRING new_name; LEX_CSTRING new_alias; LEX_CSTRING tmp_name; + LEX_CSTRING tmp_storage_engine_name; + LEX_CUSTRING tmp_id, id; char tmp_buff[80]; + uchar id_buff[MY_UUID_SIZE]; + char storage_engine_buff[NAME_LEN], tmp_storage_engine_buff[NAME_LEN]; + bool storage_engine_partitioned; + bool tmp_storage_engine_name_partitioned; + /** Indicates that if a row is deleted during copying of data from old version of table to the new version ER_FK_CANNOT_DELETE_PARENT error should be emitted. */ - bool fk_error_if_delete_row; + bool fk_error_if_delete_row= false; /** Name of foreign key for the above error. */ - const char *fk_error_id; + const char *fk_error_id= nullptr; /** Name of table for the above error. */ - const char *fk_error_table; - bool modified_primary_key; + const char *fk_error_table= nullptr; + bool modified_primary_key= false; + /** Indicates that we are altering temporary table */ + bool tmp_table= false; private: char new_filename[FN_REFLEN + 1]; @@ -334,11 +354,6 @@ private: char new_path[FN_REFLEN + 1]; char tmp_path[FN_REFLEN + 1]; -#ifdef DBUG_ASSERT_EXISTS - /** Indicates that we are altering temporary table. Used only in asserts. */ - bool tmp_table; -#endif - Alter_table_ctx &operator=(const Alter_table_ctx &rhs); // not implemented Alter_table_ctx(const Alter_table_ctx &rhs); // not implemented }; diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 8e833035b96..4c853689504 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -679,10 +679,19 @@ int analyse::end_of_records() String *res, s_min(buff, sizeof(buff),&my_charset_bin), s_max(buff, sizeof(buff),&my_charset_bin), ans(buff, sizeof(buff),&my_charset_bin); + StringBuffer<NAME_LEN> name; for (; f != f_end; f++) { - func_items[0]->set((*f)->item->full_name()); + /* + We have to make a copy of full_name() as it stores it's value in str_value, + which is reset by save_str_in_field + */ + LEX_CSTRING col_name= (*f)->item->full_name_cstring(); + name.set_buffer_if_not_allocated(&my_charset_bin); + name.copy(col_name.str, col_name.length, &my_charset_bin); + func_items[0]->set((char*) name.ptr(), name.length(), &my_charset_bin); + if (!(*f)->found) { func_items[1]->null_value = 1; @@ -1161,16 +1170,16 @@ bool analyse::change_columns(THD *thd, List<Item> &field_list) func_items[0]= new (mem_root) Item_proc_string(thd, "Field_name", 255); func_items[1]= new (mem_root) Item_proc_string(thd, "Min_value", 255); - func_items[1]->maybe_null = 1; + func_items[1]->set_maybe_null(); func_items[2]= new (mem_root) Item_proc_string(thd, "Max_value", 255); - func_items[2]->maybe_null = 1; + func_items[2]->set_maybe_null(); func_items[3]= new (mem_root) Item_proc_int(thd, "Min_length"); func_items[4]= new (mem_root) Item_proc_int(thd, "Max_length"); func_items[5]= new (mem_root) Item_proc_int(thd, "Empties_or_zeros"); func_items[6]= new (mem_root) Item_proc_int(thd, "Nulls"); func_items[7]= new (mem_root) Item_proc_string(thd, "Avg_value_or_avg_length", 255); func_items[8]= new (mem_root) Item_proc_string(thd, "Std", 255); - func_items[8]->maybe_null = 1; + func_items[8]->set_maybe_null(); func_items[9]= new (mem_root) Item_proc_string(thd, "Optimal_fieldtype", MY_MAX(64, output_str_length)); @@ -1228,4 +1237,4 @@ uint check_ulonglong(const char *str, uint length) } while (*cmp && *cmp++ == *str++) ; return ((uchar) str[-1] <= (uchar) cmp[-1]) ? smaller : bigger; -} /* check_ulonlong */ +} /* check_ulonglong */ diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h index 9cdb93f4d6f..e76ff13c06e 100644 --- a/sql/sql_analyse.h +++ b/sql/sql_analyse.h @@ -116,8 +116,8 @@ class field_str :public field_info public: field_str(Item* a, analyse* b) :field_info(a,b), - min_arg("",default_charset_info), - max_arg("",default_charset_info), sum(0), + min_arg("",0,default_charset_info), + max_arg("",0,default_charset_info), sum(0), must_be_blob(0), was_zero_fill(0), was_maybe_zerofill(0), can_be_still_num(1) { init_tree(&tree, 0, 0, sizeof(String), (qsort_cmp2) sortcmp2, diff --git a/sql/sql_analyze_stmt.cc b/sql/sql_analyze_stmt.cc index 2f87b9b0d40..60a08c86bf7 100644 --- a/sql/sql_analyze_stmt.cc +++ b/sql/sql_analyze_stmt.cc @@ -81,26 +81,26 @@ void Filesort_tracker::print_json_members(Json_writer *writer) } get_data_format(&str); - writer->add_member("r_sort_mode").add_str(str.c_ptr(), str.length()); + writer->add_member("r_sort_mode").add_str(str.ptr(), str.length()); } void Filesort_tracker::get_data_format(String *str) { if (r_sort_keys_packed) - str->append("packed_sort_key"); + str->append(STRING_WITH_LEN("packed_sort_key")); else - str->append("sort_key"); - str->append(","); + str->append(STRING_WITH_LEN("sort_key")); + str->append(','); if (r_using_addons) { if (r_packed_addon_fields) - str->append("packed_addon_fields"); + str->append(STRING_WITH_LEN("packed_addon_fields")); else - str->append("addon_fields"); + str->append(STRING_WITH_LEN("addon_fields")); } else - str->append("rowid"); + str->append(STRING_WITH_LEN("rowid")); } void attach_gap_time_tracker(THD *thd, Gap_time_tracker *gap_tracker, diff --git a/sql/sql_array.h b/sql/sql_array.h index b6de1b18d78..8610e971016 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -112,7 +112,7 @@ private: template <class Elem> class Dynamic_array { - DYNAMIC_ARRAY array; + DYNAMIC_ARRAY array; public: Dynamic_array(PSI_memory_key psi_key, uint prealloc=16, uint increment=16) { @@ -170,6 +170,8 @@ public: return ((const Elem*)array.buffer) + array.elements - 1; } + size_t size() const { return array.elements; } + const Elem *end() const { return back() + 1; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 607d03d3450..5de9782fd09 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -54,7 +54,7 @@ #include "sql_table.h" // build_table_filename #include "datadict.h" // dd_frm_is_view() #include "rpl_rli.h" // rpl_group_info -#ifdef __WIN__ +#ifdef _WIN32 #include <io.h> #endif #include "wsrep_mysqld.h" @@ -515,7 +515,7 @@ public: Sql_condition ** cond_hdl) { *cond_hdl= NULL; - if (sql_errno == ER_OPEN_AS_READONLY) + if (sql_errno == ER_OPEN_AS_READONLY || sql_errno == ER_LOCK_WAIT_TIMEOUT) { handled_errors++; return TRUE; @@ -554,6 +554,7 @@ bool flush_tables(THD *thd, flush_tables_type flag) DBUG_ENTER("flush_tables"); purge_tables(); /* Flush unused tables and shares */ + DEBUG_SYNC(thd, "after_purge_tables"); /* Loop over all shares and collect shares that have open tables @@ -593,29 +594,49 @@ bool flush_tables(THD *thd, flush_tables_type flag) if (table) { (void) table->file->extra(HA_EXTRA_FLUSH); + DEBUG_SYNC(table->in_use, "before_tc_release_table"); tc_release_table(table); } else { /* - HA_OPEN_FOR_FLUSH is used to allow us to open the table even if - TABLE_SHARE::incompatible_version is set. It also will tell - SEQUENCE engine that we don't have to read the sequence information - (which may cause deadlocks with concurrently running ALTER TABLE or - ALTER SEQUENCE) as we will close the table at once. + No free TABLE instances available. We have to open a new one. + + Try to take a MDL lock to ensure we can open a new table instance. + If the lock fails, it means that some DDL operation or flush tables + with read lock is ongoing. + In this case we cannot sending the HA_EXTRA_FLUSH signal. */ - if (!open_table_from_share(thd, share, &empty_clex_str, - HA_OPEN_KEYFILE, 0, - HA_OPEN_FOR_ALTER | HA_OPEN_FOR_FLUSH, - tmp_table, FALSE, - NULL)) + + MDL_request mdl_request; + MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, + share->db.str, + share->table_name.str, + MDL_SHARED, MDL_EXPLICIT); + + if (!thd->mdl_context.acquire_lock(&mdl_request, 0)) { - (void) tmp_table->file->extra(HA_EXTRA_FLUSH); /* - We don't put the table into the TDC as the table was not fully - opened (we didn't open triggers) + HA_OPEN_FOR_FLUSH is used to allow us to open the table even if + TABLE_SHARE::incompatible_version is set. It also will tell + SEQUENCE engine that we don't have to read the sequence information + (which may cause deadlocks with concurrently running ALTER TABLE or + ALTER SEQUENCE) as we will close the table at once. */ - closefrm(tmp_table); + if (!open_table_from_share(thd, share, &empty_clex_str, + HA_OPEN_KEYFILE, 0, + HA_OPEN_FOR_ALTER | HA_OPEN_FOR_FLUSH, + tmp_table, FALSE, + NULL)) + { + (void) tmp_table->file->extra(HA_EXTRA_FLUSH); + /* + We don't put the table into the TDC as the table was not fully + opened (we didn't open triggers) + */ + closefrm(tmp_table); + } + thd->mdl_context.release_lock(mdl_request.ticket); } } tdc_release_share(share); @@ -2596,10 +2617,15 @@ void Locked_tables_list::mark_table_for_reopen(THD *thd, TABLE *table) { TABLE_SHARE *share= table->s; - for (TABLE_LIST *table_list= m_locked_tables; + for (TABLE_LIST *table_list= m_locked_tables; table_list; table_list= table_list->next_global) { - if (table_list->table->s == share) + /* + table_list->table can be NULL in the case of TRUNCATE TABLE where + the table was locked twice and one instance closed in + close_all_tables_for_name(). + */ + if (table_list->table && table_list->table->s == share) table_list->table->internal_set_needs_reopen(true); } /* This is needed in the case where lock tables where not used */ @@ -2754,7 +2780,7 @@ bool Locked_tables_list::restore_lock(THD *thd, TABLE_LIST *dst_table_list, add_back_last_deleted_lock(dst_table_list); table->mdl_ticket->downgrade_lock(table->reginfo.lock_type >= - TL_WRITE_ALLOW_WRITE ? + TL_FIRST_WRITE ? MDL_SHARED_NO_READ_WRITE : MDL_SHARED_READ); @@ -2828,7 +2854,13 @@ static bool check_and_update_table_version(THD *thd, TABLE_LIST *tables, TABLE_SHARE *table_share) { - if (! tables->is_the_same_definition(thd, table_share)) + /* + First, verify that TABLE_LIST was indeed *created by the parser* - + it must be in the global TABLE_LIST list. Standalone TABLE_LIST objects + created with TABLE_LIST::init_one_table() have a short life time and + aren't linked anywhere. + */ + if (tables->prev_global && !tables->is_the_same_definition(thd, table_share)) { if (thd->m_reprepare_observer && thd->m_reprepare_observer->report_error(thd)) @@ -2982,9 +3014,9 @@ static bool open_table_entry_fini(THD *thd, TABLE_SHARE *share, TABLE *entry) String query(query_buf, sizeof(query_buf), system_charset_info); query.length(0); - query.append("DELETE FROM "); + query.append(STRING_WITH_LEN("DELETE FROM ")); append_identifier(thd, &query, &share->db); - query.append("."); + query.append('.'); append_identifier(thd, &query, &share->table_name); /* @@ -3549,7 +3581,7 @@ bool extend_table_list(THD *thd, TABLE_LIST *tables, bool error= false; LEX *lex= thd->lex; bool maybe_need_prelocking= - (tables->updating && tables->lock_type >= TL_WRITE_ALLOW_WRITE) + (tables->updating && tables->lock_type >= TL_FIRST_WRITE) || thd->lex->default_used; if (thd->locked_tables_mode <= LTM_LOCK_TABLES && @@ -3690,6 +3722,14 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags, error= TRUE; goto end; } + + if (tables->table_function) + { + if (!create_table_for_function(thd, tables)) + error= TRUE; + goto end; + } + DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: %p", tables->db.str, tables->table_name.str, tables)); (*counter)++; @@ -3924,7 +3964,7 @@ static bool upgrade_lock_if_not_exists(THD *thd, DEBUG_SYNC(thd,"create_table_before_check_if_exists"); if (!create_info.or_replace() && ha_table_exists(thd, &create_table->db, &create_table->table_name, - &create_table->db_type)) + NULL, NULL, &create_table->db_type)) { if (create_info.if_not_exists()) { @@ -4436,47 +4476,73 @@ restart: /* Set appropriate TABLE::lock_type. */ if (tbl && tables->lock_type != TL_UNLOCK && !thd->locked_tables_mode) { - if (tables->lock_type == TL_WRITE_DEFAULT) - tbl->reginfo.lock_type= thd->update_lock_default; - else if (tables->lock_type == TL_READ_DEFAULT) - tbl->reginfo.lock_type= - read_lock_type_for_table(thd, thd->lex, tables, - some_routine_modifies_data); + if (tables->lock_type == TL_WRITE_DEFAULT || + unlikely(tables->lock_type == TL_WRITE_SKIP_LOCKED && + !(tables->table->file->ha_table_flags() & HA_CAN_SKIP_LOCKED))) + tbl->reginfo.lock_type= thd->update_lock_default; + else if (likely(tables->lock_type == TL_READ_DEFAULT) || + (tables->lock_type == TL_READ_SKIP_LOCKED && + !(tables->table->file->ha_table_flags() & HA_CAN_SKIP_LOCKED))) + tbl->reginfo.lock_type= read_lock_type_for_table(thd, thd->lex, tables, + some_routine_modifies_data); else tbl->reginfo.lock_type= tables->lock_type; + tbl->reginfo.skip_locked= tables->skip_locked; } - } - #ifdef WITH_WSREP - if (WSREP(thd) && - wsrep_replicate_myisam && - (*start) && - (*start)->table && - (*start)->table->file->ht == myisam_hton && - wsrep_thd_is_local(thd) && - !is_stat_table(&(*start)->db, &(*start)->alias) && - thd->get_command() != COM_STMT_PREPARE && - !thd->stmt_arena->is_stmt_prepare() && - ((thd->lex->sql_command == SQLCOM_INSERT || - thd->lex->sql_command == SQLCOM_INSERT_SELECT || - thd->lex->sql_command == SQLCOM_REPLACE || - thd->lex->sql_command == SQLCOM_REPLACE_SELECT || - thd->lex->sql_command == SQLCOM_UPDATE || - thd->lex->sql_command == SQLCOM_UPDATE_MULTI || - thd->lex->sql_command == SQLCOM_LOAD || - thd->lex->sql_command == SQLCOM_DELETE))) - { - wsrep_before_rollback(thd, true); - wsrep_after_rollback(thd, true); - wsrep_after_statement(thd); - WSREP_TO_ISOLATION_BEGIN(NULL, NULL, (*start)); - } + /* + At this point we have SE associated with table so we can check wsrep_mode + rules at this point. + */ + if (WSREP(thd) && + wsrep_thd_is_local(thd) && + tbl && + tables == *start && + !wsrep_check_mode_after_open_table(thd, + tbl->file->ht, tables)) + { + error= TRUE; + goto error; + } + + /* If user has issued wsrep_on = OFF and wsrep was on before + we need to check is local gtid feature disabled */ + if (thd->wsrep_was_on && + thd->variables.sql_log_bin == 1 && + !WSREP(thd) && + wsrep_check_mode(WSREP_MODE_DISALLOW_LOCAL_GTID)) + { + enum_sql_command sql_command= thd->lex->sql_command; + bool is_dml_stmt= thd->get_command() != COM_STMT_PREPARE && + !thd->stmt_arena->is_stmt_prepare() && + (sql_command == SQLCOM_INSERT || + sql_command == SQLCOM_INSERT_SELECT || + sql_command == SQLCOM_REPLACE || + sql_command == SQLCOM_REPLACE_SELECT || + sql_command == SQLCOM_UPDATE || + sql_command == SQLCOM_UPDATE_MULTI || + sql_command == SQLCOM_LOAD || + sql_command == SQLCOM_DELETE); + + if (is_dml_stmt && !is_temporary_table(tables)) + { + /* wsrep_mode = WSREP_MODE_DISALLOW_LOCAL_GTID, treat as error */ + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "You can't execute statements that would generate local " + "GTIDs when wsrep_mode = DISALLOW_LOCAL_GTID is set. " + "Try disabling binary logging with SET sql_log_bin=0 " + "to execute this statement."); + + error= TRUE; + goto error; + } + } #endif /* WITH_WSREP */ + } error: -#ifdef WITH_WSREP -wsrep_error_label: -#endif THD_STAGE_INFO(thd, stage_after_opening_tables); thd_proc_info(thd, 0); @@ -4513,9 +4579,9 @@ wsrep_error_label: @retval TRUE Failure (OOM). */ -bool DML_prelocking_strategy:: -handle_routine(THD *thd, Query_tables_list *prelocking_ctx, - Sroutine_hash_entry *rt, sp_head *sp, bool *need_prelocking) +bool DML_prelocking_strategy::handle_routine(THD *thd, + Query_tables_list *prelocking_ctx, Sroutine_hash_entry *rt, + sp_head *sp, bool *need_prelocking) { /* We assume that for any "CALL proc(...)" statement sroutines_list will @@ -4659,9 +4725,9 @@ prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access thr_lock_type lock_type; - if ((op & (1 << TRG_EVENT_DELETE) && fk_modifies_child(fk->delete_method)) - || (op & (1 << TRG_EVENT_UPDATE) && fk_modifies_child(fk->update_method))) - lock_type= TL_WRITE_ALLOW_WRITE; + if ((op & trg2bit(TRG_EVENT_DELETE) && fk_modifies_child(fk->delete_method)) + || (op & trg2bit(TRG_EVENT_UPDATE) && fk_modifies_child(fk->update_method))) + lock_type= TL_FIRST_WRITE; else lock_type= TL_READ; @@ -4706,14 +4772,14 @@ prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, @retval TRUE Failure (OOM). */ -bool DML_prelocking_strategy:: -handle_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list, bool *need_prelocking) +bool DML_prelocking_strategy::handle_table(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, + bool *need_prelocking) { DBUG_ENTER("handle_table"); TABLE *table= table_list->table; /* We rely on a caller to check that table is going to be changed. */ - DBUG_ASSERT(table_list->lock_type >= TL_WRITE_ALLOW_WRITE || + DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE || thd->lex->default_used); if (table_list->trg_event_map) @@ -4837,9 +4903,9 @@ err: @retval TRUE Failure (OOM). */ -bool DML_prelocking_strategy:: -handle_view(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list, bool *need_prelocking) +bool DML_prelocking_strategy::handle_view(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, + bool *need_prelocking) { if (table_list->view->uses_stored_routines()) { @@ -4877,9 +4943,9 @@ handle_view(THD *thd, Query_tables_list *prelocking_ctx, @retval TRUE Failure (OOM). */ -bool Lock_tables_prelocking_strategy:: -handle_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list, bool *need_prelocking) +bool Lock_tables_prelocking_strategy::handle_table(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, + bool *need_prelocking) { TABLE_LIST **last= prelocking_ctx->query_tables_last; @@ -4895,7 +4961,7 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, tl->open_strategy= TABLE_LIST::OPEN_NORMAL; /* We rely on a caller to check that table is going to be changed. */ - DBUG_ASSERT(table_list->lock_type >= TL_WRITE_ALLOW_WRITE); + DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE); return FALSE; } @@ -4910,9 +4976,9 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, a simple view, but one that uses stored routines. */ -bool Alter_table_prelocking_strategy:: -handle_routine(THD *thd, Query_tables_list *prelocking_ctx, - Sroutine_hash_entry *rt, sp_head *sp, bool *need_prelocking) +bool Alter_table_prelocking_strategy::handle_routine(THD *thd, + Query_tables_list *prelocking_ctx, Sroutine_hash_entry *rt, + sp_head *sp, bool *need_prelocking) { return FALSE; } @@ -4936,9 +5002,9 @@ handle_routine(THD *thd, Query_tables_list *prelocking_ctx, @retval TRUE Failure (OOM). */ -bool Alter_table_prelocking_strategy:: -handle_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list, bool *need_prelocking) +bool Alter_table_prelocking_strategy::handle_table(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, + bool *need_prelocking) { return FALSE; } @@ -4951,9 +5017,9 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, to be materialized. */ -bool Alter_table_prelocking_strategy:: -handle_view(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list, bool *need_prelocking) +bool Alter_table_prelocking_strategy::handle_view(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, + bool *need_prelocking) { return FALSE; } @@ -5002,8 +5068,8 @@ static bool check_lock_and_start_stmt(THD *thd, else lock_type= table_list->lock_type; - if ((int) lock_type >= (int) TL_WRITE_ALLOW_WRITE && - (int) table_list->table->reginfo.lock_type < (int) TL_WRITE_ALLOW_WRITE) + if ((int) lock_type >= (int) TL_FIRST_WRITE && + (int) table_list->table->reginfo.lock_type < (int) TL_FIRST_WRITE) { my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table_list->table->alias.c_ptr()); @@ -5490,7 +5556,8 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags) DEBUG_SYNC(thd, "after_lock_tables_takes_lock"); if (thd->lex->requires_prelocking() && - thd->lex->sql_command != SQLCOM_LOCK_TABLES) + thd->lex->sql_command != SQLCOM_LOCK_TABLES && + thd->lex->sql_command != SQLCOM_FLUSH) { /* We just have done implicit LOCK TABLES, and now we have @@ -5549,7 +5616,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags) a table that is already used by the calling statement. */ if (thd->locked_tables_mode >= LTM_PRELOCKED && - table->lock_type >= TL_WRITE_ALLOW_WRITE) + table->lock_type >= TL_FIRST_WRITE) { for (TABLE* opentab= thd->open_tables; opentab; opentab= opentab->next) { @@ -5783,7 +5850,7 @@ find_field_in_view(THD *thd, TABLE_LIST *table_list, replace. If the item was aliased by the user, set the alias to the replacing item. */ - if (*ref && !(*ref)->is_autogenerated_name()) + if (*ref && (*ref)->is_explicit_name()) item->set_name(thd, (*ref)->name); if (register_tree_change) thd->change_item_tree(ref, item); @@ -5874,7 +5941,7 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, si replace. If the item was aliased by the user, set the alias to the replacing item. */ - if (*ref && !(*ref)->is_autogenerated_name()) + if (*ref && (*ref)->is_explicit_name()) item->set_name(thd, (*ref)->name); if (register_tree_change && arena) thd->restore_active_arena(arena, &backup); @@ -5948,10 +6015,10 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, si Field * find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length, - bool allow_rowid, uint *cached_field_index_ptr) + bool allow_rowid, field_index_t *cached_field_index_ptr) { Field *field; - uint cached_field_index= *cached_field_index_ptr; + field_index_t cached_field_index= *cached_field_index_ptr; DBUG_ENTER("find_field_in_table"); DBUG_PRINT("enter", ("table: '%s', field name: '%s'", table->alias.c_ptr(), name)); @@ -6043,9 +6110,11 @@ Field * find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, const char *name, size_t length, const char *item_name, const char *db_name, - const char *table_name, Item **ref, + const char *table_name, + ignored_tables_list_t ignored_tables, + Item **ref, bool check_privileges, bool allow_rowid, - uint *cached_field_index_ptr, + field_index_t *cached_field_index_ptr, bool register_tree_change, TABLE_LIST **actual_table) { Field *fld; @@ -6135,9 +6204,16 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, TABLE_LIST *table; while ((table= it++)) { + /* + Check if the table is in the ignore list. Only base tables can be in + the ignore list. + */ + if (table->table && ignored_list_includes_table(ignored_tables, table)) + continue; + if ((fld= find_field_in_table_ref(thd, table, name, length, item_name, - db_name, table_name, ref, - check_privileges, allow_rowid, + db_name, table_name, ignored_tables, + ref, check_privileges, allow_rowid, cached_field_index_ptr, register_tree_change, actual_table))) DBUG_RETURN(fld); @@ -6261,6 +6337,8 @@ Field *find_field_in_table_sef(TABLE *table, const char *name) first_table list of tables to be searched for item last_table end of the list of tables to search for item. If NULL then search to the end of the list 'first_table'. + ignored_tables Set of tables that should be ignored. Do not try to + find the field in those. ref if 'item' is resolved to a view field, ref is set to point to the found view field report_error Degree of error reporting: @@ -6288,6 +6366,7 @@ Field *find_field_in_table_sef(TABLE *table, const char *name) Field * find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *first_table, TABLE_LIST *last_table, + ignored_tables_list_t ignored_tables, Item **ref, find_item_error_report_type report_error, bool check_privileges, bool register_tree_change) { @@ -6344,8 +6423,9 @@ find_field_in_tables(THD *thd, Item_ident *item, } else found= find_field_in_table_ref(thd, table_ref, name, length, item->name.str, - NULL, NULL, ref, check_privileges, - TRUE, &(item->cached_field_index), + NULL, NULL, ignored_tables, ref, + check_privileges, TRUE, + &(item->cached_field_index), register_tree_change, &actual_table); if (found) @@ -6414,8 +6494,13 @@ find_field_in_tables(THD *thd, Item_ident *item, for (; cur_table != last_table ; cur_table= cur_table->next_name_resolution_table) { + if (cur_table->table && + ignored_list_includes_table(ignored_tables, cur_table)) + continue; + Field *cur_field= find_field_in_table_ref(thd, cur_table, name, length, - item->name.str, db, table_name, ref, + item->name.str, db, table_name, + ignored_tables, ref, (thd->lex->sql_command == SQLCOM_SHOW_FIELDS) ? false : check_privileges, @@ -6432,8 +6517,8 @@ find_field_in_tables(THD *thd, Item_ident *item, thd->clear_error(); cur_field= find_field_in_table_ref(thd, cur_table, name, length, - item->name.str, db, table_name, ref, - false, + item->name.str, db, table_name, + ignored_tables, ref, false, allow_rowid, &(item->cached_field_index), register_tree_change, @@ -7348,7 +7433,7 @@ store_top_level_join_columns(THD *thd, TABLE_LIST *table_ref, /* Add a TRUE condition to outer joins that have no common columns. */ if (table_ref_2->outer_join && !table_ref_1->on_expr && !table_ref_2->on_expr) - table_ref_2->on_expr= new (thd->mem_root) Item_int(thd, (longlong) 1, 1); // Always true. + table_ref_2->on_expr= (Item*) &Item_true; /* Change this table reference to become a leaf for name resolution. */ if (left_neighbor) @@ -7644,8 +7729,8 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, Item_window_func::split_sum_func. */ if (sum_func_list && - ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) || - item->with_window_func)) + ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) || + item->with_window_func())) { item->split_sum_func(thd, ref_pointer_array, *sum_func_list, SPLIT_SUM_SELECT); @@ -7653,6 +7738,8 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, thd->lex->current_select->select_list_tables|= item->used_tables(); thd->lex->used_tables|= item->used_tables(); thd->lex->current_select->cur_pos_in_select_list++; + + thd->lex->current_select->rownum_in_field_list |= item->with_rownum_func(); } thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS; @@ -7942,7 +8029,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context, if (table_list->jtbm_subselect) { Item *item= table_list->jtbm_subselect->optimizer; - if (!table_list->jtbm_subselect->optimizer->fixed && + if (!table_list->jtbm_subselect->optimizer->fixed() && table_list->jtbm_subselect->optimizer->fix_fields(thd, &item)) { my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES)); /* psergey-todo: WHY ER_TOO_MANY_TABLES ???*/ @@ -9027,7 +9114,7 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) Item_func_match *ifm; while ((ifm=li++)) - if (unlikely(!ifm->is_fixed())) + if (unlikely(!ifm->fixed())) /* it mean that clause where was FT function was removed, so we have to remove the function from the list. @@ -9099,7 +9186,7 @@ open_system_tables_for_read(THD *thd, TABLE_LIST *table_list) if (open_and_lock_tables(thd, table_list, FALSE, (MYSQL_OPEN_IGNORE_FLUSH | MYSQL_OPEN_IGNORE_LOGGING_FORMAT | - (table_list->lock_type < TL_WRITE_ALLOW_WRITE ? + (table_list->lock_type < TL_FIRST_WRITE ? MYSQL_LOCK_IGNORE_TIMEOUT : 0)))) { lex->restore_backup_query_tables_list(&query_tables_list_backup); @@ -9297,21 +9384,6 @@ int dynamic_column_error_message(enum_dyncol_func_result rc) return rc; } - -/** - Turn on the SELECT_DESCRIBE flag for the primary SELECT_LEX of the statement - being processed in case the statement is EXPLAIN UPDATE/DELETE. - - @param lex current LEX -*/ - -void promote_select_describe_flag_if_needed(LEX *lex) -{ - if (lex->describe) - lex->first_select_lex()->options|= SELECT_DESCRIBE; -} - - /** @} (end of group Data_Dictionary) */ diff --git a/sql/sql_base.h b/sql/sql_base.h index 79f54dfe1ed..5b449fdddac 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -195,19 +195,21 @@ bool fill_record(THD *thd, TABLE *table, Field **field, List<Item> &values, Field * find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *first_table, TABLE_LIST *last_table, + ignored_tables_list_t ignored_tables, Item **ref, find_item_error_report_type report_error, bool check_privileges, bool register_tree_change); Field * find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, const char *name, size_t length, const char *item_name, const char *db_name, - const char *table_name, Item **ref, - bool check_privileges, bool allow_rowid, - uint *cached_field_index_ptr, + const char *table_name, + ignored_tables_list_t ignored_tables, + Item **ref, bool check_privileges, bool allow_rowid, + field_index_t *cached_field_index_ptr, bool register_tree_change, TABLE_LIST **actual_table); Field * find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length, - bool allow_rowid, uint *cached_field_index_ptr); + bool allow_rowid, field_index_t *cached_field_index_ptr); Field * find_field_in_table_sef(TABLE *table, const char *name); Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, @@ -516,8 +518,6 @@ bool extend_table_list(THD *thd, TABLE_LIST *tables, Prelocking_strategy *prelocking_strategy, bool has_prelocking_list); -void promote_select_describe_flag_if_needed(LEX *lex); - /** A context of open_tables() function, used to recover from a failed open_table() or open_routine() attempt. diff --git a/sql/sql_basic_types.h b/sql/sql_basic_types.h index 3200228618f..f592aed05a8 100644 --- a/sql/sql_basic_types.h +++ b/sql/sql_basic_types.h @@ -150,9 +150,11 @@ public: explicit time_round_mode_t(ulonglong mode) :m_mode((value_t) mode) { +#ifdef MYSQL_SERVER DBUG_ASSERT(mode == FRAC_NONE || mode == FRAC_TRUNCATE || mode == FRAC_ROUND); +#endif } // Conversion operators explicit operator ulonglong() const diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 4dd5f16f351..9f61135232f 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -206,7 +206,7 @@ void mysql_client_binlog_statement(THD* thd) int err; Relay_log_info *rli; rpl_group_info *rgi; - char *buf= NULL; + uchar *buf= NULL; size_t coded_len= 0, decoded_len= 0; rli= thd->rli_fake; @@ -242,7 +242,7 @@ void mysql_client_binlog_statement(THD* thd) } decoded_len= my_base64_needed_decoded_length((int)coded_len); - if (!(buf= (char *) my_malloc(key_memory_binlog_statement_buffer, + if (!(buf= (uchar *) my_malloc(key_memory_binlog_statement_buffer, decoded_len, MYF(MY_WME)))) { my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1); @@ -298,7 +298,7 @@ void mysql_client_binlog_statement(THD* thd) Now we start to read events of the buffer, until there are no more. */ - for (char *bufptr= buf ; bytes_decoded > 0 ; ) + for (uchar *bufptr= buf ; bytes_decoded > 0 ; ) { /* Checking that the first event in the buffer is not truncated. @@ -353,8 +353,28 @@ void mysql_client_binlog_statement(THD* thd) (ev->flags & LOG_EVENT_SKIP_REPLICATION_F ? OPTION_SKIP_REPLICATION : 0); - err= ev->apply_event(rgi); + { + /* + For conventional statements thd->lex points to thd->main_lex, that is + thd->lex == &thd->main_lex. On the other hand, for prepared statement + thd->lex points to the LEX object explicitly allocated for execution + of the prepared statement and in this case thd->lex != &thd->main_lex. + On handling the BINLOG statement, invocation of ev->apply_event(rgi) + initiates the following sequence of calls + Rows_log_event::do_apply_event -> THD::reset_for_next_command + Since the method THD::reset_for_next_command() contains assert + DBUG_ASSERT(lex == &main_lex) + this sequence of calls results in crash when a binlog event is + applied in PS mode. So, reset the current lex temporary to point to + thd->main_lex before running ev->apply_event() and restore its + original value on return. + */ + LEX *backup_lex; + thd->backup_and_reset_current_lex(&backup_lex); + err= ev->apply_event(rgi); + thd->restore_current_lex(backup_lex); + } thd->variables.option_bits= (thd->variables.option_bits & ~OPTION_SKIP_REPLICATION) | save_skip_replication; diff --git a/sql/sql_bootstrap.cc b/sql/sql_bootstrap.cc index dbeb971cd5a..b39d7a57bc0 100644 --- a/sql/sql_bootstrap.cc +++ b/sql/sql_bootstrap.cc @@ -18,9 +18,20 @@ #include <ctype.h> #include <string.h> #include "sql_bootstrap.h" +#include <string> -int read_bootstrap_query(char *query, int *query_length, - fgets_input_t input, fgets_fn_t fgets_fn, int *error) +static bool is_end_of_query(const char *line, size_t len, + const std::string& delimiter) +{ + if (delimiter.length() > len) + return false; + return !strcmp(line + len-delimiter.length(),delimiter.c_str()); +} + +static std::string delimiter= ";"; +extern "C" int read_bootstrap_query(char *query, int *query_length, + fgets_input_t input, fgets_fn_t fgets_fn, + int preserve_delimiter, int *error) { char line_buffer[MAX_BOOTSTRAP_LINE_SIZE]; const char *line; @@ -73,9 +84,32 @@ int read_bootstrap_query(char *query, int *query_length, if ((line[0] == '-') && (line[1] == '-')) continue; - /* Skip delimiter, ignored. */ - if (strncmp(line, "delimiter", 9) == 0) + size_t i=0; + while (line[i] == ' ') + i++; + + /* Skip -- comments */ + if (line[i] == '-' && line[i+1] == '-') + continue; + + if (strncmp(line, "DELIMITER", 9) == 0) + { + const char *p= strrchr(line,' '); + if (!p || !p[1]) + { + /* Invalid DELIMITER specifier */ + return READ_BOOTSTRAP_ERROR; + } + delimiter.assign(p+1); + if (preserve_delimiter) + { + memcpy(query,line,len); + query[len]=0; + *query_length = (int)len; + return READ_BOOTSTRAP_SUCCESS; + } continue; + } /* Append the current line to a multi line query. If the new line will make the query too long, preserve the partial line to provide context for the @@ -105,13 +139,18 @@ int read_bootstrap_query(char *query, int *query_length, memcpy(query + query_len, line, len); query_len+= len; - if (line[len - 1] == ';') + if (is_end_of_query(line, len, delimiter)) { /* - The last line is terminated by ';'. + The last line is terminated by delimiter Return the query found. */ - query[query_len]= '\0'; + if (!preserve_delimiter) + { + query_len-= delimiter.length(); + query[query_len++]= ';'; + } + query[query_len]= 0; *query_length= (int)query_len; return READ_BOOTSTRAP_SUCCESS; } diff --git a/sql/sql_bootstrap.h b/sql/sql_bootstrap.h index dc6abb0a5bd..e5b9b3a55c2 100644 --- a/sql/sql_bootstrap.h +++ b/sql/sql_bootstrap.h @@ -39,8 +39,16 @@ typedef void *fgets_input_t; typedef char * (*fgets_fn_t)(char *, size_t, fgets_input_t, int *error); -int read_bootstrap_query(char *query, int *query_length, - fgets_input_t input, fgets_fn_t fgets_fn, int *error); +#ifdef __cplusplus +extern "C" { +#endif +int read_bootstrap_query(char *query, int *query_length, fgets_input_t input, + fgets_fn_t fgets_fn, + int preserve_delimiter, + int *error); +#ifdef __cplusplus +} +#endif #endif diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 8655a75a455..b284189db23 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2299,7 +2299,7 @@ void Query_cache::invalidate_locked_for_write(THD *thd, for (; tables_used; tables_used= tables_used->next_local) { THD_STAGE_INFO(thd, stage_invalidating_query_cache_entries_table); - if (tables_used->lock_type >= TL_WRITE_ALLOW_WRITE && + if (tables_used->lock_type >= TL_FIRST_WRITE && tables_used->table) { invalidate_table(thd, tables_used->table); @@ -3395,9 +3395,10 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, tables_used; tables_used= tables_used->next_global, n++, (*block_table)++) { - if (tables_used->is_anonymous_derived_table()) + if (tables_used->is_anonymous_derived_table() || + tables_used->table_function) { - DBUG_PRINT("qcache", ("derived table skipped")); + DBUG_PRINT("qcache", ("derived table or table function skipped")); n--; (*block_table)--; continue; @@ -4100,11 +4101,13 @@ Query_cache::process_and_count_tables(THD *thd, TABLE_LIST *tables_used, *tables_type|= HA_CACHE_TBL_NONTRANSACT; continue; } - if (tables_used->derived) + if (tables_used->derived || tables_used->table_function) { DBUG_PRINT("qcache", ("table: %s", tables_used->alias.str)); table_count--; - DBUG_PRINT("qcache", ("derived table skipped")); + DBUG_PRINT("qcache", (tables_used->table_function ? + "table function skipped" : + "derived table skipped")); continue; } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index bdb582e4ebf..6276b00b939 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -326,17 +326,6 @@ bool Foreign_key::validate(List<Create_field> &table_fields) ** Thread specific functions ****************************************************************************/ -/** - Get current THD object from thread local data - - @retval The THD object for the thread, NULL if not connection thread -*/ -THD *thd_get_current_thd() -{ - return current_thd; -} - - extern "C" unsigned long long thd_query_id(const MYSQL_THD thd) { return((unsigned long long)thd->query_id); @@ -512,6 +501,18 @@ int thd_sql_command(const THD *thd) return (int) thd->lex->sql_command; } +/* + Returns options used with DDL's, like IF EXISTS etc... + Will returns 'nonsense' if the command was not a DDL. +*/ + +extern "C" +struct DDL_options_st *thd_ddl_options(const THD *thd) +{ + return &thd->lex->create_info; +} + + extern "C" int thd_tx_isolation(const THD *thd) { @@ -650,6 +651,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) thread_dbug_id(id), os_thread_id(0), global_disable_checkpoint(0), + current_backup_stage(BACKUP_FINISHED), failed_com_change_user(0), is_fatal_error(0), transaction_rollback_request(0), @@ -674,7 +676,8 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) m_stmt_da(&main_da), tdc_hash_pins(0), xid_hash_pins(0), - m_tmp_tables_locked(false) + m_tmp_tables_locked(false), + async_state() #ifdef HAVE_REPLICATION , current_linfo(0), @@ -704,6 +707,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) wsrep_current_gtid_seqno(0), wsrep_affected_rows(0), wsrep_has_ignored_error(false), + wsrep_was_on(false), wsrep_ignore_table(false), wsrep_aborter(0), wsrep_delayed_BF_abort(false), @@ -801,6 +805,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) mysys_var=0; binlog_evt_union.do_union= FALSE; binlog_table_maps= FALSE; + binlog_xid= 0; enable_slow_log= 0; durability_property= HA_REGULAR_DURABILITY; @@ -1854,7 +1859,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, } #define SECONDS_TO_WAIT_FOR_KILL 2 -#if !defined(__WIN__) && defined(HAVE_SELECT) +#if !defined(_WIN32) && defined(HAVE_SELECT) /* my_sleep() can wait for sub second times */ #define WAIT_FOR_KILL_TRY_TIMES 20 #else @@ -1872,13 +1877,14 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, @note Do always call this while holding LOCK_thd_kill. NOT_KILLED is used to awake a thread for a slave */ - +extern std::atomic<my_thread_id> shutdown_thread_id; void THD::awake_no_mutex(killed_state state_to_set) { DBUG_ENTER("THD::awake_no_mutex"); DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d", this, current_thd, (int) state_to_set)); THD_CHECK_SENTRY(this); + mysql_mutex_assert_owner(&LOCK_thd_data); mysql_mutex_assert_owner(&LOCK_thd_kill); print_aborted_warning(3, "KILLED"); @@ -1895,7 +1901,7 @@ void THD::awake_no_mutex(killed_state state_to_set) if (state_to_set >= KILL_CONNECTION || state_to_set == NOT_KILLED) { #ifdef SIGNAL_WITH_VIO_CLOSE - if (this != current_thd) + if (this != current_thd && thread_id != shutdown_thread_id) { if(active_vio) vio_shutdown(active_vio, SHUT_RDWR); @@ -2043,8 +2049,6 @@ bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use, if (needs_thr_lock_abort) { - bool mutex_released= false; - mysql_mutex_lock(&in_use->LOCK_thd_kill); mysql_mutex_lock(&in_use->LOCK_thd_data); /* If not already dying */ if (in_use->killed != KILL_CONNECTION_HARD) @@ -2061,25 +2065,12 @@ bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use, thread can see those instances (e.g. see partitioning code). */ if (!thd_table->needs_reopen()) + { signalled|= mysql_lock_abort_for_thread(this, thd_table); + } } -#ifdef WITH_WSREP - if (WSREP(this) && wsrep_thd_is_BF(this, false)) - { - WSREP_DEBUG("notify_shared_lock: BF thread %llu query %s" - " victim %llu query %s", - this->real_id, wsrep_thd_query(this), - in_use->real_id, wsrep_thd_query(in_use)); - wsrep_abort_thd(this, in_use, false); - mutex_released= true; - } -#endif /* WITH_WSREP */ - } - if (!mutex_released) - { - mysql_mutex_unlock(&in_use->LOCK_thd_data); - mysql_mutex_unlock(&in_use->LOCK_thd_kill); } + mysql_mutex_unlock(&in_use->LOCK_thd_data); } DBUG_RETURN(signalled); } @@ -2363,7 +2354,7 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, { my_error(ER_BAD_DATA, MYF(0), ErrConvString(from, from_length, from_cs).ptr(), - to_cs->csname); + to_cs->cs_name.str); DBUG_RETURN(true); } DBUG_RETURN(false); @@ -2467,7 +2458,8 @@ public: if (most_important_error_pos()) { ErrConvString err(src, src_length, &my_charset_bin); - my_error(ER_INVALID_CHARACTER_STRING, MYF(0), srccs->csname, err.ptr()); + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), srccs->cs_name.str, + err.ptr()); return true; } return false; @@ -2535,7 +2527,7 @@ bool THD::check_string_for_wellformedness(const char *str, if (wlen < length) { ErrConvString err(str, length, &my_charset_bin); - my_error(ER_INVALID_CHARACTER_STRING, MYF(0), cs->csname, err.ptr()); + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), cs->cs_name.str, err.ptr()); return true; } return false; @@ -2782,45 +2774,45 @@ void THD::make_explain_field_list(List<Item> &field_list, uint8 explain_flags, field_list.push_back(item= new (mem_root) Item_return_int(this, "id", 3, MYSQL_TYPE_LONGLONG), mem_root); - item->maybe_null= 1; + item->set_maybe_null(); field_list.push_back(new (mem_root) Item_empty_string(this, "select_type", 19, cs), mem_root); field_list.push_back(item= new (mem_root) Item_empty_string(this, "table", NAME_CHAR_LEN, cs), mem_root); - item->maybe_null= 1; + item->set_maybe_null(); if (explain_flags & DESCRIBE_PARTITIONS) { /* Maximum length of string that make_used_partitions_str() can produce */ item= new (mem_root) Item_empty_string(this, "partitions", MAX_PARTITIONS * (1 + FN_LEN), cs); field_list.push_back(item, mem_root); - item->maybe_null= 1; + item->set_maybe_null(); } field_list.push_back(item= new (mem_root) Item_empty_string(this, "type", 10, cs), mem_root); - item->maybe_null= 1; + item->set_maybe_null(); field_list.push_back(item= new (mem_root) Item_empty_string(this, "possible_keys", NAME_CHAR_LEN*MAX_KEY, cs), mem_root); - item->maybe_null=1; + item->set_maybe_null(); field_list.push_back(item=new (mem_root) Item_empty_string(this, "key", NAME_CHAR_LEN, cs), mem_root); - item->maybe_null=1; + item->set_maybe_null(); field_list.push_back(item=new (mem_root) Item_empty_string(this, "key_len", NAME_CHAR_LEN*MAX_KEY), mem_root); - item->maybe_null=1; + item->set_maybe_null(); field_list.push_back(item=new (mem_root) Item_empty_string(this, "ref", NAME_CHAR_LEN*MAX_REF_PARTS, cs), mem_root); - item->maybe_null=1; + item->set_maybe_null(); field_list.push_back(item=new (mem_root) Item_empty_string(this, "rows", NAME_CHAR_LEN, cs), mem_root); @@ -2829,7 +2821,7 @@ void THD::make_explain_field_list(List<Item> &field_list, uint8 explain_flags, field_list.push_back(item= new (mem_root) Item_empty_string(this, "r_rows", NAME_CHAR_LEN, cs), mem_root); - item->maybe_null=1; + item->set_maybe_null(); } if (is_analyze || (explain_flags & DESCRIBE_EXTENDED)) @@ -2837,7 +2829,7 @@ void THD::make_explain_field_list(List<Item> &field_list, uint8 explain_flags, field_list.push_back(item= new (mem_root) Item_float(this, "filtered", 0.1234, 2, 4), mem_root); - item->maybe_null=1; + item->set_maybe_null(); } if (is_analyze) @@ -2845,10 +2837,10 @@ void THD::make_explain_field_list(List<Item> &field_list, uint8 explain_flags, field_list.push_back(item= new (mem_root) Item_float(this, "r_filtered", 0.1234, 2, 4), mem_root); - item->maybe_null=1; + item->set_maybe_null(); } - item->maybe_null= 1; + item->set_maybe_null(); field_list.push_back(new (mem_root) Item_empty_string(this, "Extra", 255, cs), mem_root); @@ -3045,11 +3037,11 @@ bool select_result::check_simple_select() const } -static String default_line_term("\n",default_charset_info); -static String default_escaped("\\",default_charset_info); -static String default_field_term("\t",default_charset_info); -static String default_enclosed_and_line_start("", default_charset_info); -static String default_xml_row_term("<row>", default_charset_info); +static String default_line_term("\n", 1, default_charset_info); +static String default_escaped("\\", 1, default_charset_info); +static String default_field_term("\t", 1, default_charset_info); +static String default_enclosed_and_line_start("", 0, default_charset_info); +static String default_xml_row_term("<row>", 5, default_charset_info); sql_exchange::sql_exchange(const char *name, bool flag, enum enum_filetype filetype_arg) @@ -3947,9 +3939,10 @@ void Query_arena::set_query_arena(Query_arena *set) } -void Query_arena::cleanup_stmt() +bool Query_arena::cleanup_stmt(bool /*restore_set_statement_vars*/) { DBUG_ASSERT(! "Query_arena::cleanup_stmt() not implemented"); + return false; } /* @@ -4413,6 +4406,7 @@ void TMP_TABLE_PARAM::init() materialized_subquery= 0; force_not_null_cols= 0; skip_create_table= 0; + tmp_name= "temptable"; // Name of temp table on disk DBUG_VOID_RETURN; } @@ -5123,6 +5117,64 @@ void reset_thd(MYSQL_THD thd) free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC)); } +/** + This function can be used by storage engine + to indicate a start of an async operation. + + This asynchronous is such operation needs to be + finished before we write response to the client +. + An example of this operation is Innodb's asynchronous + group commit. Server needs to wait for the end of it + before writing response to client, to provide durability + guarantees, in other words, server can't send OK packet + before modified data is durable in redo log. + + NOTE: system THD (those that are not associated with client + connection) do not allows async operations yet. + + @param thd a THD + @return thd + @retval nullptr if this is system THD */ +extern "C" MYSQL_THD thd_increment_pending_ops(MYSQL_THD thd) +{ + if (!thd || thd->system_thread != NON_SYSTEM_THREAD) + return nullptr; + thd->async_state.inc_pending_ops(); + return thd; +} + + +/** + This function can be used by plugin/engine to indicate + end of async operation (such as end of group commit + write flush) + + @param thd THD +*/ +extern "C" void thd_decrement_pending_ops(MYSQL_THD thd) +{ + DBUG_ASSERT(thd); + DBUG_ASSERT(thd->system_thread == NON_SYSTEM_THREAD); + + thd_async_state::enum_async_state state; + if (thd->async_state.dec_pending_ops(&state) == 0) + { + switch(state) + { + case thd_async_state::enum_async_state::SUSPENDED: + DBUG_ASSERT(thd->scheduler->thd_resume); + thd->scheduler->thd_resume(thd); + break; + case thd_async_state::enum_async_state::NONE: + break; + default: + DBUG_ASSERT(0); + } + } +} + + unsigned long long thd_get_query_id(const MYSQL_THD thd) { return((unsigned long long)thd->query_id); @@ -5238,14 +5290,13 @@ extern "C" enum enum_server_command thd_current_command(MYSQL_THD thd) return thd->get_command(); } - -extern "C" int thd_slave_thread(const MYSQL_THD thd) +#ifdef HAVE_REPLICATION /* Working around MDEV-24622 */ +/** @return whether the current thread is for applying binlog in a replica */ +extern "C" int thd_is_slave(const MYSQL_THD thd) { - return(thd->slave_thread); + return thd && thd->slave_thread; } - - - +#endif /* HAVE_REPLICATION */ /* Returns high resolution timestamp for the start of the current query. */ @@ -5284,7 +5335,7 @@ thd_need_wait_reports(const MYSQL_THD thd) } /* - Used by storage engines (currently TokuDB and InnoDB) to report that + Used by storage engines (currently InnoDB) to report that one transaction THD is about to go to wait for a transactional lock held by another transactions OTHER_THD. @@ -5413,9 +5464,8 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) the caller should guarantee that the BF state won't change. (e.g. InnoDB does it by keeping lock_sys.mutex locked) */ - if (WSREP_ON && - wsrep_thd_is_BF(const_cast<THD *>(thd), false) && - wsrep_thd_is_BF(const_cast<THD *>(other_thd), false)) + if (WSREP_ON && wsrep_thd_is_BF(thd, false) && + wsrep_thd_is_BF(other_thd, false)) return 0; #endif /* WITH_WSREP */ rgi= thd->rgi_slave; @@ -6391,7 +6441,7 @@ int THD::decide_logging_format(TABLE_LIST *tables) */ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE); - if (tbl->lock_type >= TL_WRITE_ALLOW_WRITE) + if (tbl->lock_type >= TL_FIRST_WRITE) { non_replicated_tables_count++; continue; @@ -6404,10 +6454,10 @@ int THD::decide_logging_format(TABLE_LIST *tables) if (tbl->prelocking_placeholder != TABLE_LIST::PRELOCK_FK) { - if (tbl->lock_type <= TL_READ_NO_INSERT) + if (tbl->lock_type < TL_FIRST_WRITE) has_read_tables= true; else if (table->found_next_number_field && - (tbl->lock_type >= TL_WRITE_ALLOW_WRITE)) + (tbl->lock_type >= TL_FIRST_WRITE)) { has_auto_increment_write_tables= true; has_auto_increment_write_tables_not_first= found_first_not_own_table; @@ -6418,7 +6468,7 @@ int THD::decide_logging_format(TABLE_LIST *tables) } } - if (tbl->lock_type >= TL_WRITE_ALLOW_WRITE) + if (tbl->lock_type >= TL_FIRST_WRITE) { bool trans; if (prev_write_table && prev_write_table->file->ht != @@ -6692,10 +6742,10 @@ int THD::decide_logging_format(TABLE_LIST *tables) if (table->placeholder()) continue; if (table->table->file->ht->db_type == DB_TYPE_BLACKHOLE_DB && - table->lock_type >= TL_WRITE_ALLOW_WRITE) + table->lock_type >= TL_FIRST_WRITE) { table_names.append(&table->table_name); - table_names.append(","); + table_names.append(','); } } if (!table_names.is_empty()) diff --git a/sql/sql_class.h b/sql/sql_class.h index 1dced52c133..1147e5c21a0 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -39,7 +39,7 @@ #include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA, THR_LOCK_INFO */ #include "thr_timer.h" #include "thr_malloc.h" -#include "log_slow.h" /* LOG_SLOW_DISABLE_... */ +#include "log_slow.h" /* LOG_SLOW_DISABLE_... */ #include <my_tree.h> #include "sql_digest_stream.h" // sql_digest_state #include <mysql/psi/mysql_stage.h> @@ -50,6 +50,7 @@ #include "session_tracker.h" #include "backup.h" #include "xa.h" +#include "ddl_log.h" /* DDL_LOG_STATE */ extern "C" void set_thd_stage_info(void *thd, @@ -195,6 +196,7 @@ enum enum_binlog_row_image { #define OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE (1 << 0) #define OLD_MODE_NO_PROGRESS_INFO (1 << 1) #define OLD_MODE_ZERO_DATE_TIME_CAST (1 << 2) +#define OLD_MODE_UTF8_IS_UTF8MB3 (1 << 3) extern char internal_table_name[2]; extern char empty_c_string[1]; @@ -270,10 +272,12 @@ typedef struct st_user_var_events was actually changed or not. */ typedef struct st_copy_info { - ha_rows records; /**< Number of processed records */ - ha_rows deleted; /**< Number of deleted records */ - ha_rows updated; /**< Number of updated records */ - ha_rows copied; /**< Number of copied records */ + ha_rows records; /**< Number of processed records */ + ha_rows deleted; /**< Number of deleted records */ + ha_rows updated; /**< Number of updated records */ + ha_rows copied; /**< Number of copied records */ + ha_rows accepted_rows; /**< Number of accepted original rows + (same as number of rows in RETURNING) */ ha_rows error_count; ha_rows touched; /* Number of touched records */ enum enum_duplicates handle_duplicates; @@ -389,6 +393,31 @@ public: }; +/* An ALTER INDEX operation that changes the ignorability of an index. */ +class Alter_index_ignorability: public Sql_alloc +{ +public: + Alter_index_ignorability(const char *name, bool is_ignored, bool if_exists) : + m_name(name), m_is_ignored(is_ignored), m_if_exists(if_exists) + { + assert(name != NULL); + } + + const char *name() const { return m_name; } + bool if_exists() const { return m_if_exists; } + + /* The ignorability after the operation is performed. */ + bool is_ignored() const { return m_is_ignored; } + Alter_index_ignorability *clone(MEM_ROOT *mem_root) const + { return new (mem_root) Alter_index_ignorability(*this); } + +private: + const char *m_name; + bool m_is_ignored; + bool m_if_exists; +}; + + class Key :public Sql_alloc, public DDL_options { public: enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY}; @@ -845,7 +874,6 @@ typedef struct system_status_var ulong com_create_tmp_table; ulong com_drop_tmp_table; ulong com_other; - ulong com_multi; ulong com_stmt_prepare; ulong com_stmt_reprepare; @@ -942,6 +970,12 @@ typedef struct system_status_var ulong lost_connections; ulong max_statement_time_exceeded; /* + Number of times where column info was not + sent with prepared statement metadata. + */ + ulong skip_metadata_count; + + /* Number of statements sent from the client */ ulong questions; @@ -960,6 +994,7 @@ typedef struct system_status_var ulonglong table_open_cache_hits; ulonglong table_open_cache_misses; ulonglong table_open_cache_overflows; + ulonglong send_metadata_skips; double last_query_cost; double cpu_time, busy_time; uint32 threads_running; @@ -1032,13 +1067,14 @@ static inline void update_global_memory_status(int64 size) @retval Pointter to CHARSET_INFO with the given name on success */ static inline CHARSET_INFO * -mysqld_collation_get_by_name(const char *name, +mysqld_collation_get_by_name(const char *name, myf utf8_flag, CHARSET_INFO *name_cs= system_charset_info) { CHARSET_INFO *cs; MY_CHARSET_LOADER loader; my_charset_loader_init_mysys(&loader); - if (!(cs= my_collation_get_by_name(&loader, name, MYF(0)))) + + if (!(cs= my_collation_get_by_name(&loader, name, MYF(utf8_flag)))) { ErrConvString err(name, name_cs); my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr()); @@ -1200,7 +1236,7 @@ public: void free_items(); /* Close the active state associated with execution of this statement */ - virtual void cleanup_stmt(); + virtual bool cleanup_stmt(bool /*restore_set_statement_vars*/); }; @@ -1235,6 +1271,38 @@ public: class Server_side_cursor; +/* + Struct to catch changes in column metadata that is sent to client. + in the "result set metadata". Used to support + MARIADB_CLIENT_CACHE_METADATA. +*/ +struct send_column_info_state +{ + /* Last client charset (affects metadata) */ + CHARSET_INFO *last_charset= nullptr; + + /* Checksum, only used to check changes if 'immutable' is false*/ + uint32 checksum= 0; + + /* + Column info can only be changed by PreparedStatement::reprepare() + + There is a class of "weird" prepared statements like SELECT ? or SELECT @a + that are not immutable, and depend on input parameters or user variables + */ + bool immutable= false; + + bool initialized= false; + + /* Used by PreparedStatement::reprepare()*/ + void reset() + { + initialized= false; + checksum= 0; + } +}; + + /** @class Statement @brief State of a single command executed against this connection. @@ -1325,6 +1393,8 @@ public: LEX_CSTRING db; + send_column_info_state column_info_state; + /* This is set to 1 of last call to send_result_to_client() was ok */ my_bool query_cache_is_applicable; @@ -1970,6 +2040,25 @@ private: }; +class Turn_errors_to_warnings_handler : public Internal_error_handler +{ +public: + Turn_errors_to_warnings_handler() {} + bool handle_condition(THD *thd, + uint sql_errno, + const char* sqlstate, + Sql_condition::enum_warning_level *level, + const char* msg, + Sql_condition ** cond_hdl) + { + *cond_hdl= NULL; + if (*level == Sql_condition::WARN_LEVEL_ERROR) + *level= Sql_condition::WARN_LEVEL_WARN; + return(0); + } +}; + + struct Suppress_warnings_error_handler : public Internal_error_handler { bool handle_condition(THD *thd, @@ -2343,6 +2432,162 @@ public: }; /** + Support structure for asynchronous group commit, or more generally + any asynchronous operation that needs to finish before server writes + response to client. + + An engine, or any other server component, can signal that there is + a pending operation by incrementing a counter, i.e inc_pending_ops() + and that pending operation is finished by decrementing that counter + dec_pending_ops(). + + NOTE: Currently, pending operations can not fail, i.e there is no + way to pass a return code in dec_pending_ops() + + The server does not write response to the client before the counter + becomes 0. In case of group commit it ensures that data is persistent + before success reported to client, i.e durability in ACID. +*/ +struct thd_async_state +{ + enum class enum_async_state + { + NONE, + SUSPENDED, /* do_command() did not finish, and needs to be resumed */ + RESUMED /* do_command() is resumed*/ + }; + enum_async_state m_state{enum_async_state::NONE}; + + /* Stuff we need to resume do_command where we finished last time*/ + enum enum_server_command m_command{COM_SLEEP}; + LEX_STRING m_packet{0,0}; + + mysql_mutex_t m_mtx; + mysql_cond_t m_cond; + + /** Pending counter*/ + Atomic_counter<int> m_pending_ops=0; + +#ifndef DBUG_OFF + /* Checks */ + pthread_t m_dbg_thread; +#endif + + thd_async_state() + { + mysql_mutex_init(PSI_NOT_INSTRUMENTED, &m_mtx, 0); + mysql_cond_init(PSI_INSTRUMENT_ME, &m_cond, 0); + } + + /* + Currently only used with threadpool, one can "suspend" and "resume" a THD. + Suspend only means leaving do_command earlier, after saving some state. + Resume is continuing suspended THD's do_command(), from where it finished last time. + */ + bool try_suspend() + { + bool ret; + mysql_mutex_lock(&m_mtx); + DBUG_ASSERT(m_state == enum_async_state::NONE); + DBUG_ASSERT(m_pending_ops >= 0); + + if(m_pending_ops) + { + ret=true; + m_state= enum_async_state::SUSPENDED; + } + else + { + /* + If there is no pending operations, can't suspend, since + nobody can resume it. + */ + ret=false; + } + mysql_mutex_unlock(&m_mtx); + return ret; + } + + ~thd_async_state() + { + wait_for_pending_ops(); + mysql_mutex_destroy(&m_mtx); + mysql_cond_destroy(&m_cond); + } + + /* + Increment pending asynchronous operations. + The client response may not be written if + this count > 0. + So, without threadpool query needs to wait for + the operations to finish. + With threadpool, THD can be suspended and resumed + when this counter goes to 0. + */ + void inc_pending_ops() + { + mysql_mutex_lock(&m_mtx); + +#ifndef DBUG_OFF + /* + Check that increments are always done by the same thread. + */ + if (!m_pending_ops) + m_dbg_thread= pthread_self(); + else + DBUG_ASSERT(pthread_equal(pthread_self(),m_dbg_thread)); +#endif + + m_pending_ops++; + mysql_mutex_unlock(&m_mtx); + } + + int dec_pending_ops(enum_async_state* state) + { + int ret; + mysql_mutex_lock(&m_mtx); + ret= --m_pending_ops; + if (!ret) + mysql_cond_signal(&m_cond); + *state = m_state; + mysql_mutex_unlock(&m_mtx); + return ret; + } + + /* + This is used for "dirty" reading pending ops, + when dirty read is OK. + */ + int pending_ops() + { + return m_pending_ops; + } + + /* Wait for pending operations to finish.*/ + void wait_for_pending_ops() + { + /* + It is fine to read m_pending_ops and compare it with 0, + without mutex protection. + + The value is only incremented by the current thread, and will + be decremented by another one, thus "dirty" may show positive number + when it is really 0, but this is not a problem, and the only + bad thing from that will be rechecking under mutex. + */ + if (!pending_ops()) + return; + + mysql_mutex_lock(&m_mtx); + DBUG_ASSERT(m_pending_ops >= 0); + while (m_pending_ops) + mysql_cond_wait(&m_cond, &m_mtx); + mysql_mutex_unlock(&m_mtx); + } +}; + + +/** @class THD For each client connection we create a separate thread with THD serving as a thread/connection descriptor @@ -2461,6 +2706,8 @@ public: /* Last created prepared statement */ Statement *last_stmt; + Statement *cur_stmt= 0; + inline void set_last_stmt(Statement *stmt) { last_stmt= (is_error() ? NULL : stmt); } inline void clear_last_stmt() { last_stmt= NULL; } @@ -2670,6 +2917,11 @@ public: #ifndef MYSQL_CLIENT binlog_cache_mngr * binlog_setup_trx_data(); + /* + If set, tell binlog to store the value as query 'xid' in the next + Query_log_event + */ + ulonglong binlog_xid; /* Public interface to write RBR events to the binlog @@ -2856,7 +3108,7 @@ public: } default_transaction, *transaction; Global_read_lock global_read_lock; Field *dup_field; -#ifndef __WIN__ +#ifndef _WIN32 sigset_t signals; #endif #ifdef SIGNAL_WITH_VIO_CLOSE @@ -5043,6 +5295,7 @@ private: } public: + thd_async_state async_state; #ifdef HAVE_REPLICATION /* If we do a purge of binary logs, log index info of the threads @@ -5113,6 +5366,8 @@ public: uint64 wsrep_current_gtid_seqno; ulong wsrep_affected_rows; bool wsrep_has_ignored_error; + /* true if wsrep_on was ON in last wsrep_on_update */ + bool wsrep_was_on; /* When enabled, do not replicate/binlog updates from the current table that's @@ -5215,9 +5470,9 @@ public: thr_timer_end(&query_timer); #endif } - void restore_set_statement_var() + bool restore_set_statement_var() { - main_lex.restore_set_statement_var(); + return main_lex.restore_set_statement_var(); } /* Copy relevant `stmt` transaction flags to `all` transaction. */ void merge_unsafe_rollback_flags() @@ -5285,6 +5540,38 @@ public: bool sql_parser(LEX *old_lex, LEX *lex, char *str, uint str_len, bool stmt_prepare_mode); + myf get_utf8_flag() const + { + return (variables.old_behavior & OLD_MODE_UTF8_IS_UTF8MB3 ? + MY_UTF8_IS_UTF8MB3 : 0); + } + + /** + Save current lex to the output parameter and reset it to point to + main_lex. This method is called from mysql_client_binlog_statement() + to temporary + + @param[out] backup_lex original value of current lex + */ + + void backup_and_reset_current_lex(LEX **backup_lex) + { + *backup_lex= lex; + lex= &main_lex; + } + + + /** + Restore current lex to its original value it had before calling the method + backup_and_reset_current_lex(). + + @param backup_lex original value of current lex + */ + + void restore_current_lex(LEX *backup_lex) + { + lex= backup_lex; + } }; @@ -5861,6 +6148,7 @@ class select_create: public select_insert { MYSQL_LOCK **m_plock; bool exit_done; TMP_TABLE_SHARE *saved_tmp_table_share; + DDL_LOG_STATE ddl_log_state_create, ddl_log_state_rm; public: select_create(THD *thd_arg, TABLE_LIST *table_arg, @@ -5876,7 +6164,10 @@ public: alter_info(alter_info_arg), m_plock(NULL), exit_done(0), saved_tmp_table_share(0) - {} + { + bzero(&ddl_log_state_create, sizeof(ddl_log_state_create)); + bzero(&ddl_log_state_rm, sizeof(ddl_log_state_rm)); + } int prepare(List<Item> &list, SELECT_LEX_UNIT *u); int binlog_show_create_table(TABLE **tables, uint count); @@ -5931,6 +6222,7 @@ public: List<Item> copy_funcs; Copy_field *copy_field, *copy_field_end; uchar *group_buff; + const char *tmp_name; Item **items_to_copy; /* Fields in tmp table */ TMP_ENGINE_COLUMNDEF *recinfo, *start_recinfo; KEY *keyinfo; @@ -6004,7 +6296,9 @@ public: schema_table(0), materialized_subquery(0), force_not_null_cols(0), precomputed_group_by(0), force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0) - {} + { + init(); + } ~TMP_TABLE_PARAM() { cleanup(); @@ -6829,7 +7123,8 @@ public: ~my_var_sp() { } bool set(THD *thd, Item *val); my_var_sp *get_my_var_sp() { return this; } - const Type_handler *type_handler() const { return m_type_handler; } + const Type_handler *type_handler() const + { return m_type_handler; } sp_rcontext *get_rcontext(sp_rcontext *local_ctx) const; }; @@ -7046,10 +7341,6 @@ public: #define CF_SKIP_WSREP_CHECK 0 #endif /* WITH_WSREP */ -/** - Do not allow it for COM_MULTI batch -*/ -#define CF_NO_COM_MULTI (1U << 3) /* Inline functions */ @@ -7442,10 +7733,10 @@ public: ErrConvDQName(const Database_qualified_name *name) :m_name(name) { } - const char *ptr() const + LEX_CSTRING lex_cstring() const override { - m_name->make_qname(err_buffer, sizeof(err_buffer)); - return err_buffer; + size_t length= m_name->make_qname(err_buffer, sizeof(err_buffer)); + return {err_buffer, length}; } }; @@ -7462,10 +7753,10 @@ public: m_maybe_null(false) { } - void set_maybe_null(bool maybe_null_arg) { m_maybe_null= maybe_null_arg; } + void set_type_maybe_null(bool maybe_null_arg) { m_maybe_null= maybe_null_arg; } bool get_maybe_null() const { return m_maybe_null; } - uint decimal_precision() const + decimal_digits_t decimal_precision() const { /* Type_holder is not used directly to create fields, so @@ -7488,11 +7779,12 @@ public: bool aggregate_attributes(THD *thd) { + static LEX_CSTRING union_name= { STRING_WITH_LEN("UNION") }; for (uint i= 0; i < arg_count; i++) - m_maybe_null|= args[i]->maybe_null; + m_maybe_null|= args[i]->maybe_null(); return type_handler()->Item_hybrid_func_fix_attributes(thd, - "UNION", this, this, + union_name, this, this, args, arg_count); } }; @@ -7619,5 +7911,8 @@ public: extern THD_list server_threads; +void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, + uint field_count); + #endif /* MYSQL_SERVER */ #endif /* SQL_CLASS_INCLUDED */ diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 79980ba7797..6b195ac9fe7 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -23,7 +23,7 @@ #include "mariadb.h" #include "mysqld.h" #include "sql_priv.h" -#ifndef __WIN__ +#ifndef _WIN32 #include <netdb.h> // getservbyname, servent #endif #include "sql_audit.h" @@ -798,7 +798,7 @@ bool thd_init_client_charset(THD *thd, uint cs_number) { /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", - cs->csname); + cs->cs_name.str); return true; } thd->org_charset= cs; @@ -1109,8 +1109,8 @@ static int check_connection(THD *thd) void setup_connection_thread_globals(THD *thd) { DBUG_EXECUTE_IF("CONNECT_wait", { - extern MYSQL_SOCKET unix_sock; - while (unix_sock.fd >= 0) + extern Dynamic_array<MYSQL_SOCKET> listen_sockets; + while (listen_sockets.size()) my_sleep(1000); }); thd->store_globals(); diff --git a/sql/sql_const.h b/sql/sql_const.h index b2548b4ef8c..490b870d768 100644 --- a/sql/sql_const.h +++ b/sql/sql_const.h @@ -49,7 +49,7 @@ #else #define MAX_REFLENGTH 4 /* Max length for record ref */ #endif -#define MAX_HOSTNAME 61 /* len+1 in mysql.user */ +#define MAX_HOSTNAME (HOSTNAME_LENGTH + 1) /* len+1 in mysql.user */ #define MAX_CONNECTION_NAME NAME_LEN #define MAX_MBWIDTH 3 /* Max multibyte sequence */ @@ -297,19 +297,6 @@ */ #define MAX_TIME_ZONE_NAME_LENGTH (NAME_LEN + 1) -#if defined(__WIN__) - -#define INTERRUPT_PRIOR -2 -#define CONNECT_PRIOR -1 -#define WAIT_PRIOR 0 -#define QUERY_PRIOR 2 -#else -#define INTERRUPT_PRIOR 10 -#define CONNECT_PRIOR 9 -#define WAIT_PRIOR 8 -#define QUERY_PRIOR 6 -#endif /* __WIN92__ */ - #define SP_PSI_STATEMENT_INFO_COUNT 19 #endif /* SQL_CONST_INCLUDED */ diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 91300e3a326..a526bfee2d2 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -1259,7 +1259,7 @@ With_element::process_columns_of_derived_unit(THD *thd, while ((item= it++, name= nm++)) { item->set_name(thd, *name); - item->common_flags&= ~IS_AUTO_GENERATED_NAME; + item->base_flags|= item_base_t::IS_EXPLICIT_NAME; } if (arena) @@ -1302,7 +1302,7 @@ With_element::process_columns_of_derived_unit(THD *thd, my_error(ER_BAD_FIELD_ERROR, MYF(0), name->str, "CYCLE clause"); return true; } - item->common_flags|= IS_IN_WITH_CYCLE; + item->base_flags|= item_base_t::IS_IN_WITH_CYCLE; } } unit->columns_are_renamed= true; @@ -1693,7 +1693,7 @@ void With_clause::print(THD *thd, String *str, enum_query_type query_type) with_elem= with_elem->next) { if (with_elem != with_list.first) - str->append(", "); + str->append(STRING_WITH_LEN(", ")); with_elem->print(thd, str, query_type); } } diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index f473b563f52..d45f2ac8777 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -185,7 +185,7 @@ int mysql_open_cursor(THD *thd, select_result *result, } *pcursor= materialized_cursor; - thd->stmt_arena->cleanup_stmt(); + rc|= (thd->stmt_arena->cleanup_stmt(true)? 1 : 0); } end: diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 3447032f193..37e136927f2 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -40,13 +40,14 @@ #include "events.h" #include "sql_handler.h" #include "sql_statistics.h" +#include "ddl_log.h" // ddl_log functions #include <my_dir.h> #include <m_ctype.h> #include "log.h" -#ifdef __WIN__ +#ifdef _WIN32 #include <direct.h> #endif -#include "debug_sync.h" +#include "debug.h" // debug_crash_here #define MAX_DROP_TABLE_Q_LEN 1024 @@ -58,7 +59,7 @@ static bool find_db_tables_and_rm_known_files(THD *, MY_DIR *, const char *, const char *, TABLE_LIST **); long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path); -static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); +my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); static void mysql_change_db_impl(THD *thd, LEX_CSTRING *new_db_name, privilege_t new_db_access, @@ -104,8 +105,137 @@ cmp_db_names(LEX_CSTRING *db1_name, const LEX_CSTRING *db2_name) db1_name->str, db2_name->str) == 0)); } +#ifdef HAVE_PSI_INTERFACE +static PSI_rwlock_key key_rwlock_LOCK_dboptions; +static PSI_rwlock_key key_rwlock_LOCK_dbnames; +static PSI_rwlock_key key_rwlock_LOCK_rmdir; + +static PSI_rwlock_info all_database_names_rwlocks[]= { + {&key_rwlock_LOCK_dboptions, "LOCK_dboptions", PSI_FLAG_GLOBAL}, + {&key_rwlock_LOCK_dbnames, "LOCK_dbnames", PSI_FLAG_GLOBAL}, + {&key_rwlock_LOCK_rmdir, "LOCK_rmdir",PSI_FLAG_GLOBAL}, +}; + +static void init_database_names_psi_keys(void) +{ + const char *category= "sql"; + int count; + + if (PSI_server == NULL) + return; + + count= array_elements(all_database_names_rwlocks); + PSI_server->register_rwlock(category, all_database_names_rwlocks, count); +} +#endif + +static mysql_rwlock_t rmdir_lock; /* + Cache of C strings for existing database names. + + The only use of it is to avoid repeated expensive + my_access() calls. + + Provided operations are lookup, insert (after successfull my_access()) + and clear (this is called whenever rmdir is called). +*/ +struct dbname_cache_t +{ +private: + Hash_set<LEX_STRING> m_set; + mysql_rwlock_t m_lock; + + static uchar *get_key(const LEX_STRING *ls, size_t *sz, my_bool) + { + *sz= ls->length; + return (uchar *) ls->str; + } + +public: + dbname_cache_t() + : m_set(key_memory_dbnames_cache, table_alias_charset, 10, 0, + sizeof(char *), (my_hash_get_key) get_key, my_free, 0) + { + mysql_rwlock_init(key_rwlock_LOCK_dbnames, &m_lock); + } + + bool contains(const char *s) + { + auto sz= strlen(s); + mysql_rwlock_rdlock(&m_lock); + bool ret= m_set.find(s, sz) != 0; + mysql_rwlock_unlock(&m_lock); + return ret; + } + + void insert(const char *s) + { + auto len= strlen(s); + auto ls= (LEX_STRING *) my_malloc(key_memory_dbnames_cache, + sizeof(LEX_STRING) + strlen(s) + 1, 0); + + if (!ls) + return; + + ls->length= len; + ls->str= (char *) (ls + 1); + + memcpy(ls->str, s, len + 1); + mysql_rwlock_wrlock(&m_lock); + bool found= m_set.find(s, len) != 0; + if (!found) + m_set.insert(ls); + mysql_rwlock_unlock(&m_lock); + if (found) + my_free(ls); + } + + void clear() + { + mysql_rwlock_wrlock(&m_lock); + m_set.clear(); + mysql_rwlock_unlock(&m_lock); + } + + ~dbname_cache_t() + { + mysql_rwlock_destroy(&m_lock); + } +}; + +static dbname_cache_t* dbname_cache; + +static void dbname_cache_init() +{ + static MY_ALIGNED(16) char buf[sizeof(dbname_cache_t)]; + DBUG_ASSERT(!dbname_cache); + dbname_cache= new (buf) dbname_cache_t; + mysql_rwlock_init(key_rwlock_LOCK_rmdir, &rmdir_lock); +} + +static void dbname_cache_destroy() +{ + if (!dbname_cache) + return; + + dbname_cache->~dbname_cache_t(); + dbname_cache= 0; + mysql_rwlock_destroy(&rmdir_lock); +} + +static int my_rmdir(const char *dir) +{ + auto ret= rmdir(dir); + if (ret) + return ret; + mysql_rwlock_wrlock(&rmdir_lock); + dbname_cache->clear(); + mysql_rwlock_unlock(&rmdir_lock); + return 0; +} + + /* Function we use in the creation of our hash to get key. */ @@ -131,7 +261,7 @@ static inline int write_to_binlog(THD *thd, const char *query, size_t q_len, qinfo.db= db; qinfo.db_len= (uint32)db_len; return mysql_bin_log.write(&qinfo); -} +} /* @@ -145,26 +275,7 @@ void free_dbopt(void *dbopt) my_free(dbopt); } -#ifdef HAVE_PSI_INTERFACE -static PSI_rwlock_key key_rwlock_LOCK_dboptions; -static PSI_rwlock_info all_database_names_rwlocks[]= -{ - { &key_rwlock_LOCK_dboptions, "LOCK_dboptions", PSI_FLAG_GLOBAL} -}; - -static void init_database_names_psi_keys(void) -{ - const char* category= "sql"; - int count; - - if (PSI_server == NULL) - return; - - count= array_elements(all_database_names_rwlocks); - PSI_server->register_rwlock(category, all_database_names_rwlocks, count); -} -#endif /** Initialize database option cache. @@ -190,6 +301,7 @@ bool my_dboptions_cache_init(void) table_alias_charset, 32, 0, 0, (my_hash_get_key) dboptions_get_key, free_dbopt, 0); } + dbname_cache_init(); return error; } @@ -205,6 +317,7 @@ void my_dboptions_cache_free(void) { dboptions_init= 0; my_hash_free(&dboptions); + dbname_cache_destroy(); mysql_rwlock_destroy(&LOCK_dboptions); } } @@ -394,9 +507,9 @@ static bool write_db_opt(THD *thd, const char *path, { ulong length; length= (ulong) (strxnmov(buf, sizeof(buf)-1, "default-character-set=", - create->default_table_charset->csname, + create->default_table_charset->cs_name.str, "\ndefault-collation=", - create->default_table_charset->name, + create->default_table_charset->coll_name.str, "\n", NullS) - buf); if (create->schema_comment) @@ -435,6 +548,7 @@ bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create) DBUG_ENTER("load_db_opt"); bool error=1; size_t nbytes; + myf utf8_flag= thd->get_utf8_flag(); bzero((char*) create,sizeof(*create)); create->default_table_charset= thd->variables.collation_server; @@ -471,9 +585,9 @@ bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create) default-collation commands. */ if (!(create->default_table_charset= - get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && + get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(utf8_flag))) && !(create->default_table_charset= - get_charset_by_name(pos+1, MYF(0)))) + get_charset_by_name(pos+1, MYF(utf8_flag)))) { sql_print_error("Error while loading database options: '%s':",path); sql_print_error(ER_THD(thd, ER_UNKNOWN_CHARACTER_SET),pos+1); @@ -482,8 +596,7 @@ bool load_db_opt(THD *thd, const char *path, Schema_specification_st *create) } else if (!strncmp(buf,"default-collation", (pos-buf))) { - if (!(create->default_table_charset= get_charset_by_name(pos+1, - MYF(0)))) + if (!(create->default_table_charset= get_charset_by_name(pos+1, MYF(utf8_flag)))) { sql_print_error("Error while loading database options: '%s':",path); sql_print_error(ER_THD(thd, ER_UNKNOWN_COLLATION),pos+1); @@ -676,7 +789,6 @@ mysql_create_db_internal(THD *thd, const LEX_CSTRING *db, DBUG_RETURN(-1); } - if (my_mkdir(path, 0777, MYF(0)) < 0) { my_error(ER_CANT_CREATE_DB, MYF(0), db->str, my_errno); @@ -692,7 +804,7 @@ mysql_create_db_internal(THD *thd, const LEX_CSTRING *db, Restore things to beginning. */ path[path_len]= 0; - if (rmdir(path) >= 0) + if (my_rmdir(path) >= 0) DBUG_RETURN(-1); /* We come here when we managed to create the database, but not the option @@ -702,6 +814,14 @@ mysql_create_db_internal(THD *thd, const LEX_CSTRING *db, thd->clear_error(); } + /* Log command to ddl log */ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("DATABASE") }; + ddl_log.org_database= *db; + backup_log_ddl(&ddl_log); + not_silent: if (!silent) { @@ -785,6 +905,14 @@ mysql_alter_db_internal(THD *thd, const LEX_CSTRING *db, thd->variables.collation_database= thd->db_charset; } + /* Log command to ddl log */ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("ALTER") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("DATABASE") }; + ddl_log.org_database= *db; + backup_log_ddl(&ddl_log); + if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); @@ -840,6 +968,56 @@ bool mysql_alter_db(THD *thd, const LEX_CSTRING *db, /** + Drop database objects + + @param thd THD object + @param path Path to database (for ha_drop_database) + @param db Normalized database name + @param rm_mysql_schema If the schema is 'mysql', in which case we don't + log the query to binary log or delete related + routines or events. +*/ + +void drop_database_objects(THD *thd, const LEX_CSTRING *path, + const LEX_CSTRING *db, + bool rm_mysql_schema) +{ + debug_crash_here("ddl_log_drop_before_ha_drop_database"); + + ha_drop_database(path->str); + + /* + We temporarily disable the binary log while dropping the objects + in the database. Since the DROP DATABASE statement is always + replicated as a statement, execution of it will drop all objects + in the database on the slave as well, so there is no need to + replicate the removal of the individual objects in the database + as well. + + This is more of a safety precaution, since normally no objects + should be dropped while the database is being cleaned, but in + the event that a change in the code to remove other objects is + made, these drops should still not be logged. + */ + + debug_crash_here("ddl_log_drop_before_drop_db_routines"); + + query_cache_invalidate1(thd, db->str); + + if (!rm_mysql_schema) + { + tmp_disable_binlog(thd); + (void) sp_drop_db_routines(thd, db->str); /* @todo Do not ignore errors */ +#ifdef HAVE_EVENT_SCHEDULER + Events::drop_schema_events(thd, db->str); +#endif + reenable_binlog(thd); + } + debug_crash_here("ddl_log_drop_after_drop_db_routines"); +} + + +/** Drop all tables, routines and events in a database and the database itself. @param thd Thread handle @@ -855,41 +1033,31 @@ bool mysql_alter_db(THD *thd, const LEX_CSTRING *db, */ static bool -mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silent) +mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, + bool silent) { ulong deleted_tables= 0; bool error= true, rm_mysql_schema; char path[FN_REFLEN + 16]; MY_DIR *dirp; - uint length; + uint path_length; TABLE_LIST *tables= NULL; TABLE_LIST *table; + DDL_LOG_STATE ddl_log_state; Drop_table_error_handler err_handler; + LEX_CSTRING rm_db; + char db_tmp[SAFE_NAME_LEN+1]; + const char *dbnorm; DBUG_ENTER("mysql_rm_db"); - char db_tmp[SAFE_NAME_LEN+1]; - const char *dbnorm= normalize_db_name(db->str, db_tmp, sizeof(db_tmp)); + dbnorm= normalize_db_name(db->str, db_tmp, sizeof(db_tmp)); + lex_string_set(&rm_db, dbnorm); + bzero(&ddl_log_state, sizeof(ddl_log_state)); if (lock_schema_name(thd, dbnorm)) DBUG_RETURN(true); - length= build_table_filename(path, sizeof(path) - 1, db->str, "", "", 0); - strmov(path+length, MY_DB_OPT_FILE); // Append db option file name - del_dbopt(path); // Remove dboption hash entry - /* - Now remove the db.opt file. - The 'find_db_tables_and_rm_known_files' doesn't remove this file - if there exists a table with the name 'db', so let's just do it - separately. We know this file exists and needs to be deleted anyway. - */ - if (mysql_file_delete_with_symlink(key_file_misc, path, "", MYF(0)) && - my_errno != ENOENT) - { - my_error(EE_DELETE, MYF(0), path, my_errno); - DBUG_RETURN(true); - } - - path[length]= '\0'; // Remove file name + path_length= build_table_filename(path, sizeof(path) - 1, db->str, "", "", 0); /* See if the directory exists */ if (!(dirp= my_dir(path,MYF(MY_DONT_SORT)))) @@ -940,7 +1108,10 @@ mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silen } } - /* mysql_ha_rm_tables() requires a non-null TABLE_LIST. */ + /* + Close active HANDLER's for tables in the database. + Note that mysql_ha_rm_tables() requires a non-null TABLE_LIST. + */ if (tables) mysql_ha_rm_tables(thd, tables); @@ -950,44 +1121,59 @@ mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silen thd->push_internal_handler(&err_handler); if (!thd->killed && !(tables && - mysql_rm_table_no_locks(thd, tables, true, false, true, false, true, - false))) + mysql_rm_table_no_locks(thd, tables, &rm_db, &ddl_log_state, true, false, + true, false, true, false))) { + debug_crash_here("ddl_log_drop_after_drop_tables"); + + LEX_CSTRING cpath{ path, path_length}; + ddl_log_drop_db(thd, &ddl_log_state, &rm_db, &cpath); + + drop_database_objects(thd, &cpath, &rm_db, rm_mysql_schema); + /* - We temporarily disable the binary log while dropping the objects - in the database. Since the DROP DATABASE statement is always - replicated as a statement, execution of it will drop all objects - in the database on the slave as well, so there is no need to - replicate the removal of the individual objects in the database - as well. - - This is more of a safety precaution, since normally no objects - should be dropped while the database is being cleaned, but in - the event that a change in the code to remove other objects is - made, these drops should still not be logged. + Now remove the db.opt file. + The 'find_db_tables_and_rm_known_files' doesn't remove this file + if there exists a table with the name 'db', so let's just do it + separately. We know this file exists and needs to be deleted anyway. */ - - ha_drop_database(path); - tmp_disable_binlog(thd); - query_cache_invalidate1(thd, dbnorm); - if (!rm_mysql_schema) + debug_crash_here("ddl_log_drop_before_drop_option_file"); + strmov(path+path_length, MY_DB_OPT_FILE); // Append db option file name + if (mysql_file_delete_with_symlink(key_file_misc, path, "", MYF(0)) && + my_errno != ENOENT) { - (void) sp_drop_db_routines(thd, dbnorm); /* @todo Do not ignore errors */ -#ifdef HAVE_EVENT_SCHEDULER - Events::drop_schema_events(thd, dbnorm); -#endif + thd->pop_internal_handler(); + my_error(EE_DELETE, MYF(0), path, my_errno); + error= true; + ddl_log_complete(&ddl_log_state); + goto end; } - reenable_binlog(thd); + del_dbopt(path); // Remove dboption hash entry + path[path_length]= '\0'; // Remove file name /* If the directory is a symbolic link, remove the link first, then remove the directory the symbolic link pointed at */ + debug_crash_here("ddl_log_drop_before_drop_dir"); error= rm_dir_w_symlink(path, true); + debug_crash_here("ddl_log_drop_after_drop_dir"); } + thd->pop_internal_handler(); update_binlog: + if (likely(!error)) + { + /* Log command to ddl log */ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("DROP") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("DATABASE") }; + ddl_log.org_database= *db; + backup_log_ddl(&ddl_log); + } + if (!silent && likely(!error)) { const char *query; @@ -1000,6 +1186,7 @@ update_binlog: if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); + int res; Query_log_event qinfo(thd, query, query_length, FALSE, TRUE, /* suppress_use */ TRUE, errcode); /* @@ -1014,7 +1201,14 @@ update_binlog: These DDL methods and logging are protected with the exclusive metadata lock on the schema. */ - if (mysql_bin_log.write(&qinfo)) + debug_crash_here("ddl_log_drop_before_binlog"); + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + res= mysql_bin_log.write(&qinfo); + thd->binlog_xid= 0; + debug_crash_here("ddl_log_drop_after_binlog"); + + if (res) { error= true; goto exit; @@ -1064,13 +1258,21 @@ update_binlog: *query_pos++ = ','; } - if (query_pos != query_data_start) + if (query_pos != query_data_start) // If database was not empty { + int res; /* These DDL methods and logging are protected with the exclusive metadata lock on the schema. */ - if (write_to_binlog(thd, query, (uint)(query_pos -1 - query), db->str, db->length)) + debug_crash_here("ddl_log_drop_before_binlog"); + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + res= write_to_binlog(thd, query, (uint)(query_pos -1 - query), db->str, + db->length); + thd->binlog_xid= 0; + debug_crash_here("ddl_log_drop_after_binlog"); + if (res) { error= true; goto exit; @@ -1079,6 +1281,7 @@ update_binlog: } exit: + ddl_log_complete(&ddl_log_state); /* If this database was the client's selected database, we silently change the client's selected database to nothing (to have an empty @@ -1090,6 +1293,7 @@ exit: mysql_change_db_impl(thd, NULL, NO_ACL, thd->variables.collation_server); thd->session_tracker.current_schema.mark_as_changed(thd); } +end: my_dirend(dirp); DBUG_RETURN(error); } @@ -1116,7 +1320,7 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp, DBUG_PRINT("enter",("path: %s", path)); /* first, get the list of tables */ - Dynamic_array<LEX_CSTRING*> files(dirp->number_of_files); + Dynamic_array<LEX_CSTRING*> files(PSI_INSTRUMENT_MEM, dirp->number_of_files); Discovered_table_list tl(thd, &files); if (ha_discover_table_names(thd, &db, dirp, &tl, true)) DBUG_RETURN(1); @@ -1220,22 +1424,24 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp, 1 ERROR */ -static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) +my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) { char tmp_path[FN_REFLEN], *pos; char *path= tmp_path; DBUG_ENTER("rm_dir_w_symlink"); unpack_filename(tmp_path, org_path); -#ifdef HAVE_READLINK - int error; - char tmp2_path[FN_REFLEN]; - /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ + /* Remove end FN_LIBCHAR as this causes problem on Linux and OS/2 */ pos= strend(path); if (pos > path && pos[-1] == FN_LIBCHAR) *--pos=0; - if (unlikely((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0)) +#ifdef HAVE_READLINK + int error; + char tmp2_path[FN_REFLEN]; + + if (unlikely((error= my_readlink(tmp2_path, path, + MYF(send_error ? MY_WME : 0))) < 0)) DBUG_RETURN(1); if (likely(!error)) { @@ -1247,12 +1453,8 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) path= tmp2_path; } #endif - /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ - pos= strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; - if (unlikely(rmdir(path) < 0 && send_error)) + if (unlikely(my_rmdir(path) < 0 && send_error)) { my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); DBUG_RETURN(1); @@ -1824,7 +2026,7 @@ bool mysql_upgrade_db(THD *thd, const LEX_CSTRING *old_db) length= build_table_filename(path, sizeof(path)-1, new_db.str, "", "", 0); if (length && path[length-1] == FN_LIBCHAR) path[length-1]=0; // remove ending '\' - rmdir(path); + my_rmdir(path); goto exit; } @@ -1919,20 +2121,34 @@ exit: TRUE The directory does not exist. */ + bool check_db_dir_existence(const char *db_name) { char db_dir_path[FN_REFLEN + 1]; uint db_dir_path_len; + if (dbname_cache->contains(db_name)) + return 0; + db_dir_path_len= build_table_filename(db_dir_path, sizeof(db_dir_path) - 1, db_name, "", "", 0); if (db_dir_path_len && db_dir_path[db_dir_path_len - 1] == FN_LIBCHAR) db_dir_path[db_dir_path_len - 1]= 0; - /* Check access. */ + /* + Check access. - return my_access(db_dir_path, F_OK); + The locking is to prevent creating permanent stale + entries for deleted databases, in case of + race condition with my_rmdir. + */ + mysql_rwlock_rdlock(&rmdir_lock); + int ret= my_access(db_dir_path, F_OK); + if (!ret) + dbname_cache->insert(db_name); + mysql_rwlock_unlock(&rmdir_lock); + return ret; } diff --git a/sql/sql_db.h b/sql/sql_db.h index c9f1ed068e6..3c037d668e0 100644 --- a/sql/sql_db.h +++ b/sql/sql_db.h @@ -47,6 +47,10 @@ void my_dbopt_cleanup(void); const char *normalize_db_name(const char *db, char *buffer, size_t buffer_size); +void drop_database_objects(THD *thd, const LEX_CSTRING *path, + const LEX_CSTRING *db, + bool rm_mysql_schema); +my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); #define MY_DB_OPT_FILE "db.opt" #endif /* SQL_DB_INCLUDED */ diff --git a/sql/sql_debug.h b/sql/sql_debug.h index c5aa3b5f94e..6109ca38048 100644 --- a/sql/sql_debug.h +++ b/sql/sql_debug.h @@ -31,75 +31,75 @@ public: bool append_key_type(ha_base_keytype type) { - static const char *names[20]= + static LEX_CSTRING names[20]= { - "END", - "TEXT", - "BINARY", - "SHORT_INT", - "LONG_INT", - "FLOAT", - "DOUBLE", - "NUM", - "USHORT_INT", - "ULONG_INT", - "LONGLONG", - "ULONGLONG", - "INT24", - "UINT24", - "INT8", - "VARTEXT1", - "VARBINARY1", - "VARTEXT2", - "VARBINARY2", - "BIT" + {STRING_WITH_LEN("END")}, + {STRING_WITH_LEN("TEXT")}, + {STRING_WITH_LEN("BINARY")}, + {STRING_WITH_LEN("SHORT_INT")}, + {STRING_WITH_LEN("LONG_INT")}, + {STRING_WITH_LEN("FLOAT")}, + {STRING_WITH_LEN("DOUBLE")}, + {STRING_WITH_LEN("NUM")}, + {STRING_WITH_LEN("USHORT_INT")}, + {STRING_WITH_LEN("ULONG_INT")}, + {STRING_WITH_LEN("LONGLONG")}, + {STRING_WITH_LEN("ULONGLONG")}, + {STRING_WITH_LEN("INT24")}, + {STRING_WITH_LEN("UINT24")}, + {STRING_WITH_LEN("INT8")}, + {STRING_WITH_LEN("VARTEXT1")}, + {STRING_WITH_LEN("VARBINARY1")}, + {STRING_WITH_LEN("VARTEXT2")}, + {STRING_WITH_LEN("VARBINARY2")}, + {STRING_WITH_LEN("BIT")} }; if ((uint) type >= array_elements(names)) - return append("???"); + return append(STRING_WITH_LEN("???")); return append(names[(uint) type]); } bool append_KEY_flag_names(ulong flags) { - static const char *names[17]= + static LEX_CSTRING names[17]= { - "HA_NOSAME", // 1 - "HA_PACK_KEY", // 2 - used in both HA_KEYSEG and KEY/MI_KEYDEF - "HA_SPACE_PACK_USED", // 4 - "HA_VAR_LENGTH_KEY", // 8 - "HA_AUTO_KEY", // 16 - "HA_BINARY_PACK_KEY", // 32 - "HA_NULL_PART_KEY", // 64 - "HA_FULLTEXT", // 128 - "HA_UNIQUE_CHECK", // 256 - "HA_SORT_ALLOWS_SAME", // 512 - "HA_SPATIAL", // 1024 - "HA_NULL_ARE_EQUAL", // 2048 - "HA_USES_COMMENT", // 4096 - "HA_GENERATED_KEY", // 8192 - "HA_USES_PARSER", // 16384 - "HA_USES_BLOCK_SIZE", // 32768 - "HA_KEY_HAS_PART_KEY_SEG" // 65536 + {STRING_WITH_LEN("HA_NOSAME")}, // 1 + {STRING_WITH_LEN("HA_PACK_KEY")}, // 2; also in HA_KEYSEG + {STRING_WITH_LEN("HA_SPACE_PACK_USED")}, // 4 + {STRING_WITH_LEN("HA_VAR_LENGTH_KEY")}, // 8 + {STRING_WITH_LEN("HA_AUTO_KEY")}, // 16 + {STRING_WITH_LEN("HA_BINARY_PACK_KEY")}, // 32 + {STRING_WITH_LEN("HA_NULL_PART_KEY")}, // 64 + {STRING_WITH_LEN("HA_FULLTEXT")}, // 128 + {STRING_WITH_LEN("HA_UNIQUE_CHECK")}, // 256 + {STRING_WITH_LEN("HA_SORT_ALLOWS_SAME")}, // 512 + {STRING_WITH_LEN("HA_SPATIAL")}, // 1024 + {STRING_WITH_LEN("HA_NULL_ARE_EQUAL")}, // 2048 + {STRING_WITH_LEN("HA_USES_COMMENT")}, // 4096 + {STRING_WITH_LEN("HA_GENERATED_KEY")}, // 8192 + {STRING_WITH_LEN("HA_USES_PARSER")}, // 16384 + {STRING_WITH_LEN("HA_USES_BLOCK_SIZE")}, // 32768 + {STRING_WITH_LEN("HA_KEY_HAS_PART_KEY_SEG")}// 65536 }; return append_flag32_names((uint) flags, names, array_elements(names)); } bool append_HA_KEYSEG_flag_names(uint32 flags) { - static const char *names[]= + static LEX_CSTRING names[]= { - "HA_SPACE_PACK", // 1 - "HA_PACK_KEY", // 2 - used in both HA_KEYSEG and KEY/MI_KEYDEF - "HA_PART_KEY_SEG", // 4 - "HA_VAR_LENGTH_PART", // 8 - "HA_NULL_PART", // 16 - "HA_BLOB_PART", // 32 - "HA_SWAP_KEY", // 64 - "HA_REVERSE_SORT", // 128 - "HA_NO_SORT", // 256 - "??? 512 ???", // 512 - "HA_BIT_PART", // 1024 - "HA_CAN_MEMCMP" // 2048 + {STRING_WITH_LEN("HA_SPACE_PACK")}, // 1 + {STRING_WITH_LEN("HA_PACK_KEY")}, // 2; also in KEY/MI/KEY_DEF + {STRING_WITH_LEN("HA_PART_KEY_SEG")}, // 4 + {STRING_WITH_LEN("HA_VAR_LENGTH_PART")}, // 8 + {STRING_WITH_LEN("HA_NULL_PART")}, // 16 + {STRING_WITH_LEN("HA_BLOB_PART")}, // 32 + {STRING_WITH_LEN("HA_SWAP_KEY")}, // 64 + {STRING_WITH_LEN("HA_REVERSE_SORT")}, // 128 + {STRING_WITH_LEN("HA_NO_SORT")}, // 256 + {STRING_WITH_LEN("??? 512 ???")}, // 512 + {STRING_WITH_LEN("HA_BIT_PART")}, // 1024 + {STRING_WITH_LEN("HA_CAN_MEMCMP")} // 2048 }; return append_flag32_names(flags, names, array_elements(names)); } @@ -158,7 +158,7 @@ public: for (uint i= 0; i < key_count; i++) { Debug_key tmp; - if (!tmp.append(where) && !tmp.append_KEY(keys[i])) + if (!tmp.append(where, strlen(where)) && !tmp.append_KEY(keys[i])) tmp.print(thd); } } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e628ce60d2d..4c679267456 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -42,6 +42,7 @@ #include "uniques.h" #include "sql_derived.h" // mysql_handle_derived // end_read_record +#include "sql_insert.h" // fix_rownum_pointers #include "sql_partition.h" // make_used_partitions_str #define MEM_STRIP_BUF_SIZE ((size_t) thd->variables.sortbuff_size) @@ -365,11 +366,21 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, query_plan.select_lex= thd->lex->first_select_lex(); query_plan.table= table; - promote_select_describe_flag_if_needed(thd->lex); + thd->lex->promote_select_describe_flag_if_needed(); if (mysql_prepare_delete(thd, table_list, &conds, &delete_while_scanning)) DBUG_RETURN(TRUE); + if (table_list->has_period()) + { + if (!table_list->period_conditions.start.item->const_item() + || !table_list->period_conditions.end.item->const_item()) + { + my_error(ER_NOT_CONSTANT_EXPRESSION, MYF(0), "FOR PORTION OF"); + DBUG_RETURN(true); + } + } + if (delete_history) table->vers_write= false; @@ -785,6 +796,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, DBUG_ASSERT(table->file->inited != handler::NONE); THD_STAGE_INFO(thd, stage_updating); + fix_rownum_pointers(thd, thd->lex->current_select, &deleted); + while (likely(!(error=info.read_record())) && likely(!thd->killed) && likely(!thd->is_error())) { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index fa060afde8d..6f0857239dd 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -673,14 +673,29 @@ static bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) { SELECT_LEX_UNIT *unit= derived->get_unit(); - bool res= FALSE; + SELECT_LEX *first_select; + bool res= FALSE, keep_row_order; DBUG_ENTER("mysql_derived_prepare"); DBUG_PRINT("enter", ("unit: %p table_list: %p alias: '%s'", unit, derived, derived->alias.str)); if (!unit) DBUG_RETURN(FALSE); - SELECT_LEX *first_select= unit->first_select(); + first_select= unit->first_select(); + /* + If rownum() is used we have to preserve the insert row order + to make GROUP BY and ORDER BY with filesort work. + + SELECT * from (SELECT a,b from t1 ORDER BY a)) WHERE rownum <= 0; + + When rownum is not used the optimizer will skip the ORDER BY clause. + With rownum we have to keep the ORDER BY as this is what is expected. + We also have to create any sort result temporary table in such a way + that the inserted row order is maintained. + */ + keep_row_order= (thd->lex->with_rownum && + (first_select->group_list.elements || + first_select->order_list.elements)); if (derived->is_recursive_with_table() && !derived->is_with_table_recursive_reference() && @@ -717,7 +732,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) (first_select->options | thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS), - &derived->alias, FALSE, FALSE, FALSE, 0); + &derived->alias, FALSE, FALSE, + keep_row_order, 0); thd->create_tmp_table_for_derived= FALSE; if (likely(!res) && !derived->table) @@ -870,7 +886,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS), &derived->alias, - FALSE, FALSE, FALSE, + FALSE, FALSE, keep_row_order, 0)) { thd->create_tmp_table_for_derived= FALSE; @@ -1495,7 +1511,8 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived) DBUG_RETURN(false); /* Do not push conditions into unit with global ORDER BY ... LIMIT */ - if (unit->fake_select_lex && unit->fake_select_lex->explicit_limit) + if (unit->fake_select_lex && + unit->fake_select_lex->limit_params.explicit_limit) DBUG_RETURN(false); /* Check whether any select of 'unit' allows condition pushdown */ diff --git a/sql/sql_digest.cc b/sql/sql_digest.cc index b81a54b2af6..36a6b398ad3 100644 --- a/sql/sql_digest.cc +++ b/sql/sql_digest.cc @@ -182,7 +182,7 @@ void compute_digest_text(const sql_digest_storage* digest_storage, if (byte_count > digest_storage->m_token_array_length) { - digest_output->append("\0", 1); + digest_output->append('\0'); return; } @@ -196,7 +196,7 @@ void compute_digest_text(const sql_digest_storage* digest_storage, Can happen, as we do dirty reads on digest_storage, which can be written to in another thread. */ - digest_output->append("\0", 1); + digest_output->append('\0'); return; } @@ -256,7 +256,7 @@ void compute_digest_text(const sql_digest_storage* digest_storage, break; } /* Copy the converted identifier into the digest string. */ - digest_output->append("`", 1); + digest_output->append('`'); if (id_length > 0) digest_output->append(id_string, id_length); digest_output->append("` ", 2); @@ -273,7 +273,7 @@ void compute_digest_text(const sql_digest_storage* digest_storage, digest_output->append(tok_data->m_token_string, tok_length); if (tok_data->m_append_space) - digest_output->append(" ", 1); + digest_output->append(' '); break; } } diff --git a/sql/sql_error.cc b/sql/sql_error.cc index 80cdc0bc734..cef9e6cec00 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -302,7 +302,6 @@ void Diagnostics_area::reset_diagnostics_area() { DBUG_ENTER("reset_diagnostics_area"); - m_skip_flush= FALSE; #ifdef DBUG_OFF m_can_overwrite_status= FALSE; /** Don't take chances in production */ @@ -877,11 +876,11 @@ extern "C" int my_wc_mb_utf8_null_terminated(CHARSET_INFO *cs, @param from_cs charset from convert @retval - result string + result string length */ -char *err_conv(char *buff, uint to_length, const char *from, - uint from_length, CHARSET_INFO *from_cs) +size_t err_conv(char *buff, uint to_length, const char *from, + uint from_length, CHARSET_INFO *from_cs) { char *to= buff; const char *from_start= from; @@ -932,7 +931,7 @@ char *err_conv(char *buff, uint to_length, const char *from, &errors); to[res]= 0; } - return buff; + return res; } diff --git a/sql/sql_error.h b/sql/sql_error.h index f1c540a5eed..8c0deb5e172 100644 --- a/sql/sql_error.h +++ b/sql/sql_error.h @@ -818,8 +818,8 @@ private: }; -extern char *err_conv(char *buff, uint to_length, const char *from, - uint from_length, CHARSET_INFO *from_cs); +extern size_t err_conv(char *buff, uint to_length, const char *from, + uint from_length, CHARSET_INFO *from_cs); class ErrBuff { @@ -831,31 +831,37 @@ public: err_buffer[0]= '\0'; } const char *ptr() const { return err_buffer; } - const char *set_longlong(const Longlong_hybrid &nr) const + LEX_CSTRING set_longlong(const Longlong_hybrid &nr) const { - return nr.is_unsigned() ? ullstr(nr.value(), err_buffer) : - llstr(nr.value(), err_buffer); + int radix= nr.is_unsigned() ? 10 : -10; + const char *end= longlong10_to_str(nr.value(), err_buffer, radix); + DBUG_ASSERT(end >= err_buffer); + return {err_buffer, (size_t) (end - err_buffer)}; } - const char *set_double(double nr) const + LEX_CSTRING set_double(double nr) const { - my_gcvt(nr, MY_GCVT_ARG_DOUBLE, sizeof(err_buffer), err_buffer, 0); - return err_buffer; + size_t length= my_gcvt(nr, MY_GCVT_ARG_DOUBLE, + sizeof(err_buffer), err_buffer, 0); + return {err_buffer, length}; } - const char *set_decimal(const decimal_t *d) const + LEX_CSTRING set_decimal(const decimal_t *d) const { - int len= sizeof(err_buffer); - decimal2string(d, err_buffer, &len, 0, 0, ' '); - return err_buffer; + int length= sizeof(err_buffer); + decimal2string(d, err_buffer, &length, 0, 0, ' '); + DBUG_ASSERT(length >= 0); + return {err_buffer, (size_t) length}; } - const char *set_str(const char *str, size_t len, CHARSET_INFO *cs) const + LEX_CSTRING set_str(const char *str, size_t len, CHARSET_INFO *cs) const { DBUG_ASSERT(len < UINT_MAX32); - return err_conv(err_buffer, (uint) sizeof(err_buffer), str, (uint) len, cs); + len= err_conv(err_buffer, (uint) sizeof(err_buffer), str, (uint) len, cs); + return {err_buffer, len}; } - const char *set_mysql_time(const MYSQL_TIME *ltime) const + LEX_CSTRING set_mysql_time(const MYSQL_TIME *ltime) const { - my_TIME_to_str(ltime, err_buffer, AUTO_SEC_PART_DIGITS); - return err_buffer; + int length= my_TIME_to_str(ltime, err_buffer, AUTO_SEC_PART_DIGITS); + DBUG_ASSERT(length >= 0); + return {err_buffer, (size_t) length}; } }; @@ -865,7 +871,11 @@ class ErrConv: public ErrBuff public: ErrConv() {} virtual ~ErrConv() {} - virtual const char *ptr() const = 0; + virtual LEX_CSTRING lex_cstring() const= 0; + inline const char *ptr() const + { + return lex_cstring().str; + } }; class ErrConvString : public ErrConv @@ -880,7 +890,7 @@ public: : ErrConv(), str(str_arg), len(strlen(str_arg)), cs(cs_arg) {} ErrConvString(const String *s) : ErrConv(), str(s->ptr()), len(s->length()), cs(s->charset()) {} - const char *ptr() const + LEX_CSTRING lex_cstring() const override { return set_str(str, len, cs); } @@ -891,7 +901,7 @@ class ErrConvInteger : public ErrConv, public Longlong_hybrid public: ErrConvInteger(const Longlong_hybrid &nr) : ErrConv(), Longlong_hybrid(nr) { } - const char *ptr() const + LEX_CSTRING lex_cstring() const override { return set_longlong(static_cast<Longlong_hybrid>(*this)); } @@ -902,7 +912,7 @@ class ErrConvDouble: public ErrConv double num; public: ErrConvDouble(double num_arg) : ErrConv(), num(num_arg) {} - const char *ptr() const + LEX_CSTRING lex_cstring() const override { return set_double(num); } @@ -913,7 +923,7 @@ class ErrConvTime : public ErrConv const MYSQL_TIME *ltime; public: ErrConvTime(const MYSQL_TIME *ltime_arg) : ErrConv(), ltime(ltime_arg) {} - const char *ptr() const + LEX_CSTRING lex_cstring() const override { return set_mysql_time(ltime); } @@ -924,7 +934,7 @@ class ErrConvDecimal : public ErrConv const decimal_t *d; public: ErrConvDecimal(const decimal_t *d_arg) : ErrConv(), d(d_arg) {} - const char *ptr() const + LEX_CSTRING lex_cstring() const override { return set_decimal(d); } @@ -1034,14 +1044,6 @@ public: return m_message; } - bool skip_flush() const - { - DBUG_ASSERT(m_status == DA_OK || m_status == DA_OK_BULK); - return m_skip_flush; - } - - void set_skip_flush() - { m_skip_flush= TRUE; } uint sql_errno() const { @@ -1230,9 +1232,6 @@ private: /** Set to make set_error_status after set_{ok,eof}_status possible. */ bool m_can_overwrite_status; - /** Skip flushing network buffer after writing OK (for COM_MULTI) */ - bool m_skip_flush; - /** Message buffer. Can be used by OK or ERROR status. */ char m_message[MYSQL_ERRMSG_SIZE]; diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1b59dce10b9..480a1259ba8 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -296,7 +296,7 @@ static void push_string_list(THD *thd, List<Item> *item_list, else buf->append(','); - buf->append(line); + buf->append(line, strlen(line)); } push_string(thd, item_list, buf); } @@ -406,7 +406,7 @@ int print_explain_row(select_result_sink *result, Item_float *fl= new (mem_root) Item_float(thd, *r_rows, 2); String tmp; String *res= fl->val_str(&tmp); - r_rows_str.append(res->ptr()); + r_rows_str.append(*res); item_list.push_back(new (mem_root) Item_string_sys(thd, r_rows_str.ptr(), r_rows_str.length()), mem_root); @@ -552,7 +552,7 @@ int Explain_union::print_explain(Explain_query *query, Item_float *fl= new (mem_root) Item_float(thd, avg_rows, 2); String tmp; String *res= fl->val_str(&tmp); - r_rows_str.append(res->ptr()); + r_rows_str.append(*res); item_list.push_back(new (mem_root) Item_string_sys(thd, r_rows_str.ptr(), r_rows_str.length()), mem_root); @@ -1056,11 +1056,11 @@ void Explain_aggr_filesort::print_json_members(Json_writer *writer, first= false; else { - str.append(", "); + str.append(STRING_WITH_LEN(", ")); } append_item_to_str(&str, item); if (*direction == ORDER::ORDER_DESC) - str.append(" desc"); + str.append(STRING_WITH_LEN(" desc")); } writer->add_member("sort_key").add_str(str.c_ptr_safe()); @@ -1138,14 +1138,15 @@ void Explain_table_access::fill_key_str(String *key_str, bool is_json) const CHARSET_INFO *cs= system_charset_info; bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT || type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE); - const char *hash_key_prefix= "#hash#"; + LEX_CSTRING hash_key_prefix= { STRING_WITH_LEN("#hash#") }; + const char *key_name; - if (key.get_key_name()) + if ((key_name= key.get_key_name())) { if (is_hj) - key_str->append(hash_key_prefix, strlen(hash_key_prefix), cs); + key_str->append(hash_key_prefix.str, hash_key_prefix.length, cs); - key_str->append(key.get_key_name()); + key_str->append(key_name, strlen(key_name)); if (is_hj && type != JT_HASH) key_str->append(':'); @@ -1161,7 +1162,10 @@ void Explain_table_access::fill_key_str(String *key_str, bool is_json) const key_str->append(buf2); } if (type == JT_HASH_NEXT) - key_str->append(hash_next_key.get_key_name()); + { + key_name= hash_next_key.get_key_name(); + key_str->append(key_name, strlen(key_name)); + } } @@ -1303,8 +1307,8 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai push_str(thd, &item_list, join_type_str[type]); else { - join_type_buf.append(join_type_str[type]); - join_type_buf.append("|filter"); + join_type_buf.append(join_type_str[type], strlen(join_type_str[type])); + join_type_buf.append(STRING_WITH_LEN("|filter")); item_list.push_back(new (mem_root) Item_string_sys(thd, join_type_buf.ptr(), join_type_buf.length()), @@ -1324,7 +1328,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai if (rowid_filter) { - key_str.append("|"); + key_str.append('|'); StringBuffer<64> rowid_key_str; rowid_filter->quick->print_key(&rowid_key_str); key_str.append(rowid_key_str); @@ -1367,10 +1371,10 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai if (rowid_filter) { - rows_str.append(" ("); + rows_str.append(STRING_WITH_LEN(" (")); rows_str.append_ulonglong((ulonglong) (round(rowid_filter->selectivity * 100.0))); - rows_str.append("%)"); + rows_str.append(STRING_WITH_LEN("%)")); } item_list.push_back(new (mem_root) Item_string_sys(thd, rows_str.ptr(), @@ -1393,13 +1397,13 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai Item_float *fl= new (mem_root) Item_float(thd, avg_rows, 2); String tmp; String *res= fl->val_str(&tmp); - r_rows_str.append(res->ptr()); + r_rows_str.append(*res); if (rowid_filter) { - r_rows_str.append(" ("); + r_rows_str.append(STRING_WITH_LEN(" (")); r_rows_str.append_ulonglong( (ulonglong) (rowid_filter->tracker->get_r_selectivity_pct() * 100.0)); - r_rows_str.append("%)"); + r_rows_str.append(STRING_WITH_LEN("%)")); } item_list.push_back(new (mem_root) Item_string_sys(thd, r_rows_str.ptr(), @@ -1648,6 +1652,9 @@ void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_t case ET_DISTINCT: writer->add_member("distinct").add_bool(true); break; + case ET_TABLE_FUNCTION: + writer->add_member("table_function").add_str("json_table"); + break; default: DBUG_ASSERT(0); @@ -1940,42 +1947,41 @@ void Explain_table_access::print_explain_json(Explain_query *query, sql_explain.h */ -const char * extra_tag_text[]= +const LEX_CSTRING extra_tag_text[]= { - "ET_none", - "Using index condition", - "Using index condition(BKA)", - "Using ", // special handling - "Range checked for each record (index map: 0x", // special handling - "Using where with pushed condition", - "Using where", - "Not exists", + { STRING_WITH_LEN("ET_none") }, + { STRING_WITH_LEN("Using index condition") }, + { STRING_WITH_LEN("Using index condition(BKA)") }, + { STRING_WITH_LEN("Using ") }, // special handling + { STRING_WITH_LEN("Range checked for each record (index map: 0x") }, // special handling + { STRING_WITH_LEN("Using where with pushed condition") }, + { STRING_WITH_LEN("Using where") }, + { STRING_WITH_LEN("Not exists") }, - "Using index", - "Full scan on NULL key", - "Skip_open_table", - "Open_frm_only", - "Open_full_table", - - "Scanned 0 databases", - "Scanned 1 database", - "Scanned all databases", - - "Using index for group-by", // special handling - - "USING MRR: DONT PRINT ME", // special handling - - "Distinct", - "LooseScan", - "Start temporary", - "End temporary", - "FirstMatch", // special handling - - "Using join buffer", // special handling - - "Const row not found", - "Unique row not found", - "Impossible ON condition", + { STRING_WITH_LEN("Using index") }, + { STRING_WITH_LEN("Full scan on NULL key") }, + { STRING_WITH_LEN("Skip_open_table") }, + { STRING_WITH_LEN("Open_frm_only") }, + { STRING_WITH_LEN("Open_full_table") }, + + { STRING_WITH_LEN("Scanned 0 databases") }, + { STRING_WITH_LEN("Scanned 1 database") }, + { STRING_WITH_LEN("Scanned all databases") }, + + { STRING_WITH_LEN("Using index for group-by") }, // special handling + { STRING_WITH_LEN("USING MRR: DONT PRINT ME") }, // special handling + + { STRING_WITH_LEN("Distinct") }, + { STRING_WITH_LEN("LooseScan") }, + { STRING_WITH_LEN("Start temporary") }, + { STRING_WITH_LEN("End temporary") }, + { STRING_WITH_LEN("FirstMatch") }, // special handling + + { STRING_WITH_LEN("Using join buffer") }, // special handling + + { STRING_WITH_LEN("Const row not found") }, + { STRING_WITH_LEN("Unique row not found") }, + { STRING_WITH_LEN("Impossible ON condition") } }; @@ -1995,7 +2001,8 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t char buf[MAX_KEY / 4 + 1]; str->append(STRING_WITH_LEN("Range checked for each " "record (index map: 0x")); - str->append(range_checked_fer->keys_map.print(buf)); + range_checked_fer->keys_map.print(buf); + str->append(buf, strlen(buf)); str->append(')'); break; } @@ -2009,12 +2016,16 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t str->append(extra_tag_text[tag]); str->append(STRING_WITH_LEN(" (")); - const char *buffer_type= bka_type.incremental ? "incremental" : "flat"; + LEX_CSTRING buffer_type; + if (bka_type.incremental) + buffer_type= { STRING_WITH_LEN("incremental") }; + else + buffer_type= { STRING_WITH_LEN("flat") }; str->append(buffer_type); str->append(STRING_WITH_LEN(", ")); - str->append(bka_type.join_alg); + str->append(bka_type.join_alg, strlen(bka_type.join_alg)); str->append(STRING_WITH_LEN(" join")); - str->append(STRING_WITH_LEN(")")); + str->append(')'); if (bka_type.mrr_type.length()) { str->append(STRING_WITH_LEN("; ")); @@ -2027,9 +2038,9 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t { if (firstmatch_table_name.length()) { - str->append("FirstMatch("); + str->append(STRING_WITH_LEN("FirstMatch(")); str->append(firstmatch_table_name); - str->append(")"); + str->append(')'); } else str->append(extra_tag_text[tag]); @@ -2039,7 +2050,10 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t { str->append(extra_tag_text[tag]); if (loose_scan_is_scanning) - str->append(" (scanning)"); + str->append(STRING_WITH_LEN(" (scanning)")); + break; + case ET_TABLE_FUNCTION: + str->append(STRING_WITH_LEN("Table function: json_table")); break; } default: @@ -2094,13 +2108,16 @@ void Explain_quick_select::print_json(Json_writer *writer) void Explain_quick_select::print_extra_recursive(String *str) { + const char *name; if (is_basic()) { - str->append(range.get_key_name()); + name= range.get_key_name(); + str->append(name, strlen(name)); } else { - str->append(get_name_by_type()); + name= get_name_by_type(); + str->append(name, strlen(name)); str->append('('); List_iterator_fast<Explain_quick_select> it (children); Explain_quick_select* child; @@ -2149,7 +2166,7 @@ void Explain_quick_select::print_key(String *str) { if (str->length() > 0) str->append(','); - str->append(range.get_key_name()); + str->append(range.get_key_name(), strlen(range.get_key_name())); } else { diff --git a/sql/sql_explain.h b/sql/sql_explain.h index 31c5543d2df..bfd52290374 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -74,7 +74,7 @@ class Json_writer; *************************************************************************************/ -const int FAKE_SELECT_LEX_ID= (int)UINT_MAX; +const uint FAKE_SELECT_LEX_ID= UINT_MAX; class Explain_query; @@ -108,7 +108,7 @@ public: }; virtual enum explain_node_type get_type()= 0; - virtual int get_select_id()= 0; + virtual uint get_select_id()= 0; /** expression cache statistics @@ -168,9 +168,9 @@ public: bool add_table(Explain_table_access *tab, Explain_query *query); - int get_select_id() { return select_id; } + uint get_select_id() { return select_id; } - int select_id; + uint select_id; int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); @@ -354,7 +354,7 @@ public: enum explain_node_type get_type() { return EXPLAIN_UNION; } unit_common_op operation; - int get_select_id() + uint get_select_id() { DBUG_ASSERT(union_members.elements() > 0); return union_members.at(0); @@ -552,6 +552,7 @@ enum explain_extra_tag ET_CONST_ROW_NOT_FOUND, ET_UNIQUE_ROW_NOT_FOUND, ET_IMPOSSIBLE_ON_CONDITION, + ET_TABLE_FUNCTION, ET_total }; @@ -881,7 +882,7 @@ public: {} virtual enum explain_node_type get_type() { return EXPLAIN_UPDATE; } - virtual int get_select_id() { return 1; /* always root */ } + virtual uint get_select_id() { return 1; /* always root */ } const char *select_type; @@ -961,7 +962,7 @@ public: StringBuffer<64> table_name; enum explain_node_type get_type() { return EXPLAIN_INSERT; } - int get_select_id() { return 1; /* always root */ } + uint get_select_id() { return 1; /* always root */ } int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); @@ -988,7 +989,7 @@ public: bool deleting_all_rows; virtual enum explain_node_type get_type() { return EXPLAIN_DELETE; } - virtual int get_select_id() { return 1; /* always root */ } + virtual uint get_select_id() { return 1; /* always root */ } virtual int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 276cef6236b..7235dc6472d 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -616,16 +616,28 @@ static SQL_HANDLER *mysql_ha_find_handler(THD *thd, const LEX_CSTRING *name) static bool mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, enum enum_ha_read_modes mode, const char *keyname, - List<Item> *key_expr, enum ha_rkey_function ha_rkey_mode, + List<Item> *key_expr, + enum ha_rkey_function ha_rkey_mode, Item *cond, bool in_prepare) { THD *thd= handler->thd; TABLE *table= handler->table; if (cond) { + bool ret; + Item::vcol_func_processor_result res; + /* This can only be true for temp tables */ if (table->query_id != thd->query_id) cond->cleanup(); // File was reopened + + ret= cond->walk(&Item::check_handler_func_processor, 0, &res); + if (ret || res.errors) + { + my_error(ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, + "WHERE", "HANDLER"); + return 1; // ROWNUM() used + } if (cond->fix_fields_if_needed_for_bool(thd, &cond)) return 1; } @@ -1023,7 +1035,8 @@ err0: SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, enum enum_ha_read_modes mode, const char *keyname, - List<Item> *key_expr, enum ha_rkey_function ha_rkey_mode, + List<Item> *key_expr, + enum ha_rkey_function ha_rkey_mode, Item *cond) { SQL_HANDLER *handler; diff --git a/sql/sql_help.cc b/sql/sql_help.cc index ebda874d1fe..f9932f11798 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -99,6 +99,7 @@ static bool init_fields(THD *thd, TABLE_LIST *tables, Lex_cstring_strlen(find_fields->table_name), Lex_cstring_strlen(find_fields->field_name))); if (!(find_fields->field= find_field_in_tables(thd, field, tables, NULL, + ignored_tables_list_t(NULL), 0, REPORT_ALL_ERRORS, 1, TRUE))) DBUG_RETURN(1); @@ -296,10 +297,10 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, DBUG_ENTER("get_topics_for_keyword"); if ((iindex_topic= - find_type(primary_key_name, &topics->s->keynames, + find_type(primary_key_name.str, &topics->s->keynames, FIND_TYPE_NO_PREFIX) - 1) < 0 || (iindex_relations= - find_type(primary_key_name, &relations->s->keynames, + find_type(primary_key_name.str, &relations->s->keynames, FIND_TYPE_NO_PREFIX) - 1) < 0) { my_message(ER_CORRUPT_HELP_DB, ER_THD(thd, ER_CORRUPT_HELP_DB), MYF(0)); @@ -429,6 +430,46 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname, DBUG_VOID_RETURN; } + +/** + Collect field names of HELP header that will be sent to a client + + @param thd Thread data object + @param[out] field_list List of fields whose metadata should be collected for + sending to client +*/ + +static void fill_answer_1_fields(THD *thd, List<Item> *field_list) +{ + MEM_ROOT *mem_root= thd->mem_root; + + field_list->push_back(new (mem_root) Item_empty_string(thd, "name", 64), + mem_root); + field_list->push_back(new (mem_root) Item_empty_string(thd, "description", + 1000), + mem_root); + field_list->push_back(new (mem_root) Item_empty_string(thd, "example", 1000), + mem_root); +} + + +/** + Send metadata of an answer on help request to a client + + @param protocol protocol for sending +*/ + +static bool send_answer_1_metadata(Protocol *protocol) +{ + List<Item> field_list; + + fill_answer_1_fields(protocol->thd, &field_list); + return protocol->send_result_set_metadata(&field_list, + Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF); +} + + /* Send to client answer for help request @@ -454,22 +495,11 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname, 0 Successeful send */ -int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) +static int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) { - THD *thd= protocol->thd; - MEM_ROOT *mem_root= thd->mem_root; DBUG_ENTER("send_answer_1"); - List<Item> field_list; - field_list.push_back(new (mem_root) Item_empty_string(thd, "name", 64), - mem_root); - field_list.push_back(new (mem_root) Item_empty_string(thd, "description", 1000), - mem_root); - field_list.push_back(new (mem_root) Item_empty_string(thd, "example", 1000), - mem_root); - - if (protocol->send_result_set_metadata(&field_list, - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + if (send_answer_1_metadata(protocol)) DBUG_RETURN(1); protocol->prepare_for_resend(); @@ -482,13 +512,39 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) } +/** + Collect field names of HELP header that will be sent to a client + + @param thd Thread data object + @param[out] field_list List of fields whose metadata should be collected for + sending to client + @param for_category need column 'source_category_name' +*/ + +static void fill_header_2_fields(THD *thd, List<Item> *field_list, + bool for_category) +{ + MEM_ROOT *mem_root= thd->mem_root; + if (for_category) + field_list->push_back(new (mem_root) + Item_empty_string(thd, "source_category_name", 64), + mem_root); + field_list->push_back(new (mem_root) + Item_empty_string(thd, "name", 64), + mem_root); + field_list->push_back(new (mem_root) + Item_empty_string(thd, "is_it_category", 1), + mem_root); +} + + /* Send to client help header SYNOPSIS send_header_2() protocol - protocol for sending - is_it_category - need column 'source_category_name' + for_category - need column 'source_category_name' IMPLEMENTATION +- -+ @@ -503,22 +559,12 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) result of protocol->send_result_set_metadata */ -int send_header_2(Protocol *protocol, bool for_category) +static int send_header_2(Protocol *protocol, bool for_category) { - THD *thd= protocol->thd; - MEM_ROOT *mem_root= thd->mem_root; DBUG_ENTER("send_header_2"); List<Item> field_list; - if (for_category) - field_list.push_back(new (mem_root) - Item_empty_string(thd, "source_category_name", 64), - mem_root); - field_list.push_back(new (mem_root) - Item_empty_string(thd, "name", 64), - mem_root); - field_list.push_back(new (mem_root) - Item_empty_string(thd, "is_it_category", 1), - mem_root); + + fill_header_2_fields(protocol->thd, &field_list, for_category); DBUG_RETURN(protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)); @@ -540,7 +586,12 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2) { String *str1= *(String**)ptr1; String *str2= *(String**)ptr2; - return strcmp(str1->c_ptr(),str2->c_ptr()); + uint length1= str1->length(); + uint length2= str2->length(); + int tmp= memcmp(str1->ptr(),str2->ptr(), MY_MIN(length1, length2)); + if (tmp) + return tmp; + return (int) length2 - (int) length1; } /* @@ -633,7 +684,6 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, thd Thread handler mask mask for compare with name mlen length of mask - tables list of tables, used in WHERE table goal table pfname field "name" in table @@ -644,8 +694,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, */ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, size_t mlen, - TABLE_LIST *tables, TABLE *table, - Field *pfname, int *error) + TABLE *table, Field *pfname, int *error) { MEM_ROOT *mem_root= thd->mem_root; Item *cond= new (mem_root) @@ -662,6 +711,205 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, size_t mlen, } +/** + Initialize the TABLE_LIST with tables used in HELP statement handling. + + @param thd Thread handler + @param tables Array of four TABLE_LIST objects to initialize with data + about the tables help_topic, help_category, help_relation, + help_keyword +*/ + +static void initialize_tables_for_help_command(THD *thd, TABLE_LIST *tables) +{ + LEX_CSTRING MYSQL_HELP_TOPIC_NAME= {STRING_WITH_LEN("help_topic") }; + LEX_CSTRING MYSQL_HELP_CATEGORY_NAME= {STRING_WITH_LEN("help_category") }; + LEX_CSTRING MYSQL_HELP_RELATION_NAME= {STRING_WITH_LEN("help_relation") }; + LEX_CSTRING MYSQL_HELP_KEYWORD_NAME= {STRING_WITH_LEN("help_keyword") }; + + tables[0].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_TOPIC_NAME, 0, + TL_READ); + tables[1].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_CATEGORY_NAME, 0, + TL_READ); + tables[2].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_RELATION_NAME, 0, + TL_READ); + tables[3].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_KEYWORD_NAME, 0, + TL_READ); + tables[0].next_global= tables[0].next_local= + tables[0].next_name_resolution_table= &tables[1]; + tables[1].next_global= tables[1].next_local= + tables[1].next_name_resolution_table= &tables[2]; + tables[2].next_global= tables[2].next_local= + tables[2].next_name_resolution_table= &tables[3]; +} + + +/** + Setup tables and fields for query. + + @param thd Thread handler + @param first_select_lex SELECT_LEX of the parsed statement + @param tables Array of tables used in handling of the HELP + statement + @param used_fields Array of fields used in handling of the HELP + statement + + @return false on success, else true. +*/ + +template <size_t M, size_t N> +static bool init_items_for_help_command(THD *thd, + SELECT_LEX *first_select_lex, + TABLE_LIST (&tables)[M], + st_find_field (& used_fields)[N]) +{ + List<TABLE_LIST> leaves; + + /* + Initialize tables and fields to be usable from items. + tables do not contain VIEWs => we can pass 0 as conds + */ + first_select_lex->context.table_list= + first_select_lex->context.first_name_resolution_table= + &tables[0]; + + if (setup_tables(thd, &first_select_lex->context, + &first_select_lex->top_join_list, + &tables[0], leaves, false, false)) + return true; + + memcpy((char*) used_fields, (char*) init_used_fields, + sizeof(used_fields[0]) * N); + if (init_fields(thd, &tables[0], used_fields, N)) + return true; + + for (size_t i= 0; i < M; i++) + tables[i].table->file->init_table_handle_for_HANDLER(); + + return false; +} + + +/** + Prepare (in the sense of prepared statement) the HELP statement. + + @param thd Thread handler + @param mask string value passed to the HELP statement + @oaram[out] fields fields for result set metadata + + @return false on success, else true. +*/ + +bool mysqld_help_prepare(THD *thd, const char *mask, List<Item> *fields) +{ + TABLE_LIST tables[4]; + st_find_field used_fields[array_elements(init_used_fields)]; + SQL_SELECT *select; + + List<String> topics_list; + + Sql_mode_instant_remove sms(thd, MODE_PAD_CHAR_TO_FULL_LENGTH); + initialize_tables_for_help_command(thd, tables); + + /* + HELP must be available under LOCK TABLES. + Reset and backup the current open tables state to + make it possible. + */ + start_new_trans new_trans(thd); + + if (open_system_tables_for_read(thd, tables)) + return true; + + auto cleanup_and_return= [&](bool ret) + { + thd->commit_whole_transaction_and_close_tables(); + new_trans.restore_old_transaction(); + return ret; + }; + + if (init_items_for_help_command(thd, thd->lex->first_select_lex(), + tables, used_fields)) + return cleanup_and_return(false); + + size_t mlen= strlen(mask); + int error; + + /* + Prepare the query 'SELECT * FROM help_topic WHERE name LIKE mask' + for execution + */ + if (!(select= + prepare_select_for_name(thd,mask, mlen, tables[0].table, + used_fields[help_topic_name].field, &error))) + return cleanup_and_return(true); + + String name, description, example; + /* + Run the query 'SELECT * FROM help_topic WHERE name LIKE mask' + */ + int count_topics= search_topics(thd, tables[0].table, used_fields, + select, &topics_list, + &name, &description, &example); + delete select; + + if (thd->is_error()) + return cleanup_and_return(true); + + if (count_topics == 0) + { + int UNINIT_VAR(key_id); + /* + Prepare the query 'SELECT * FROM help_keyword WHERE name LIKE mask' + for execution + */ + if (!(select= + prepare_select_for_name(thd, mask, mlen, tables[3].table, + used_fields[help_keyword_name].field, + &error))) + return cleanup_and_return(true); + + /* + Run the query 'SELECT * FROM help_keyword WHERE name LIKE mask' + */ + count_topics= search_keyword(thd,tables[3].table, used_fields, select, + &key_id); + delete select; + count_topics= (count_topics != 1) ? 0 : + get_topics_for_keyword(thd, tables[0].table, tables[2].table, + used_fields, key_id, &topics_list, &name, + &description, &example); + + } + + if (count_topics == 0) + { + if (!(select= + prepare_select_for_name(thd, mask, mlen, tables[1].table, + used_fields[help_category_name].field, + &error))) + return cleanup_and_return(true); + + List<String> categories_list; + int16 category_id; + int count_categories= search_categories(thd, tables[1].table, used_fields, + select, + &categories_list,&category_id); + delete select; + if (count_categories == 1) + fill_header_2_fields(thd, fields, true); + else + fill_header_2_fields(thd, fields, false); + } + else if (count_topics == 1) + fill_answer_1_fields(thd, fields); + else + fill_header_2_fields(thd, fields, false); + + return cleanup_and_return(false); +} + + /* Server-side function 'help' @@ -679,30 +927,15 @@ static bool mysqld_help_internal(THD *thd, const char *mask) Protocol *protocol= thd->protocol; SQL_SELECT *select; st_find_field used_fields[array_elements(init_used_fields)]; - List<TABLE_LIST> leaves; TABLE_LIST tables[4]; List<String> topics_list, categories_list, subcategories_list; String name, description, example; int count_topics, count_categories, error; size_t mlen= strlen(mask); - size_t i; MEM_ROOT *mem_root= thd->mem_root; - LEX_CSTRING MYSQL_HELP_TOPIC_NAME= {STRING_WITH_LEN("help_topic") }; - LEX_CSTRING MYSQL_HELP_CATEGORY_NAME= {STRING_WITH_LEN("help_category") }; - LEX_CSTRING MYSQL_HELP_RELATION_NAME= {STRING_WITH_LEN("help_relation") }; - LEX_CSTRING MYSQL_HELP_KEYWORD_NAME= {STRING_WITH_LEN("help_keyword") }; DBUG_ENTER("mysqld_help"); - tables[0].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_TOPIC_NAME, 0, TL_READ); - tables[1].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_CATEGORY_NAME, 0, TL_READ); - tables[2].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_RELATION_NAME, 0, TL_READ); - tables[3].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HELP_KEYWORD_NAME, 0, TL_READ); - tables[0].next_global= tables[0].next_local= - tables[0].next_name_resolution_table= &tables[1]; - tables[1].next_global= tables[1].next_local= - tables[1].next_name_resolution_table= &tables[2]; - tables[2].next_global= tables[2].next_local= - tables[2].next_name_resolution_table= &tables[3]; + initialize_tables_for_help_command(thd, tables); /* HELP must be available under LOCK TABLES. @@ -714,25 +947,12 @@ static bool mysqld_help_internal(THD *thd, const char *mask) if (open_system_tables_for_read(thd, tables)) goto error2; - /* - Init tables and fields to be usable from items - tables do not contain VIEWs => we can pass 0 as conds - */ - thd->lex->first_select_lex()->context.table_list= - thd->lex->first_select_lex()->context.first_name_resolution_table= - &tables[0]; - if (setup_tables(thd, &thd->lex->first_select_lex()->context, - &thd->lex->first_select_lex()->top_join_list, - tables, leaves, FALSE, FALSE)) + if (init_items_for_help_command(thd, thd->lex->first_select_lex(), + tables, used_fields)) goto error; - memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields)); - if (init_fields(thd, tables, used_fields, array_elements(used_fields))) - goto error; - for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++) - tables[i].table->file->init_table_handle_for_HANDLER(); if (!(select= - prepare_select_for_name(thd,mask,mlen,tables,tables[0].table, + prepare_select_for_name(thd,mask,mlen,tables[0].table, used_fields[help_topic_name].field,&error))) goto error; @@ -748,7 +968,7 @@ static bool mysqld_help_internal(THD *thd, const char *mask) { int UNINIT_VAR(key_id); if (!(select= - prepare_select_for_name(thd,mask,mlen,tables,tables[3].table, + prepare_select_for_name(thd,mask,mlen,tables[3].table, used_fields[help_keyword_name].field, &error))) goto error; @@ -767,7 +987,7 @@ static bool mysqld_help_internal(THD *thd, const char *mask) int16 category_id; Field *cat_cat_id= used_fields[help_category_parent_category_id].field; if (!(select= - prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, + prepare_select_for_name(thd,mask,mlen,tables[1].table, used_fields[help_category_name].field, &error))) goto error; @@ -835,7 +1055,7 @@ static bool mysqld_help_internal(THD *thd, const char *mask) send_variant_2_list(mem_root,protocol, &topics_list, "N", 0)) goto error; if (!(select= - prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, + prepare_select_for_name(thd,mask,mlen,tables[1].table, used_fields[help_category_name].field,&error))) goto error; search_categories(thd, tables[1].table, used_fields, diff --git a/sql/sql_help.h b/sql/sql_help.h index cb3314b756c..b0117649f03 100644 --- a/sql/sql_help.h +++ b/sql/sql_help.h @@ -25,4 +25,6 @@ class THD; bool mysqld_help (THD *thd, const char *text); +bool mysqld_help_prepare(THD *thd, const char *text, List<Item> *fields); + #endif /* SQL_HELP_INCLUDED */ diff --git a/sql/sql_i_s.h b/sql/sql_i_s.h index b44caedb911..a3614d889c9 100644 --- a/sql/sql_i_s.h +++ b/sql/sql_i_s.h @@ -69,8 +69,10 @@ public: { } const Type_handler *type_handler() const { return m_type_handler; } uint char_length() const { return m_char_length; } - uint decimal_precision() const { return (m_char_length / 100) % 100; } - uint decimal_scale() const { return m_char_length % 10; } + decimal_digits_t decimal_precision() const + { return (decimal_digits_t) ((m_char_length / 100) % 100); } + decimal_digits_t decimal_scale() const + { return (decimal_digits_t) (m_char_length % 10); } uint fsp() const { DBUG_ASSERT(m_char_length <= TIME_SECOND_PART_DIGITS); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d9796fb4380..4b58514a7a3 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -77,10 +77,10 @@ #include "sql_audit.h" #include "sql_derived.h" // mysql_handle_derived #include "sql_prepare.h" +#include "debug_sync.h" // DEBUG_SYNC +#include "debug.h" // debug_crash_here #include <my_bit.h> -#include "debug_sync.h" - #ifdef WITH_WSREP #include "wsrep_trans_observer.h" /* wsrep_start_transction() */ #endif /* WITH_WSREP */ @@ -642,7 +642,7 @@ static int create_insert_stmt_from_insert_delayed(THD *thd, String *buf) { /* Make a copy of thd->query() and then remove the "DELAYED" keyword */ - if (buf->append(thd->query()) || + if (buf->append(thd->query(), thd->query_length()) || buf->replace(thd->lex->keyword_delayed_begin_offset, thd->lex->keyword_delayed_end_offset - thd->lex->keyword_delayed_begin_offset, NULL, 0)) @@ -709,7 +709,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, List_item *values; Name_resolution_context *context; Name_resolution_context_state ctx_state; - SELECT_LEX *returning= thd->lex->has_returning() ? thd->lex->returning() : 0; + SELECT_LEX *returning= thd->lex->has_returning() ? thd->lex->returning() : 0; unsigned char *readbuff= NULL; #ifndef EMBEDDED_LIBRARY @@ -987,7 +987,8 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, */ if (returning && result->send_result_set_metadata(returning->item_list, - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) goto values_loop_end; THD_STAGE_INFO(thd, stage_update); @@ -997,6 +998,10 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, restore_record(table,s->default_values); // Get empty record thd->reconsider_logging_format_for_iodup(table); } + fix_rownum_pointers(thd, thd->lex->current_select, &info.accepted_rows); + if (returning) + fix_rownum_pointers(thd, thd->lex->returning(), &info.accepted_rows); + do { DBUG_PRINT("info", ("iteration %llu", iteration)); @@ -1123,6 +1128,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, error= write_record(thd, table, &info, result); if (unlikely(error)) break; + info.accepted_rows++; thd->get_stmt_da()->inc_current_row_for_warning(); } its.rewind(); @@ -2160,6 +2166,9 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink) goto after_trg_or_ignored_err; } + /* Notify the engine about insert ignore operation */ + if (info->handle_duplicates == DUP_ERROR && info->ignore) + table->file->extra(HA_EXTRA_IGNORE_INSERT); after_trg_n_copied_inc: info->copied++; thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row); @@ -2174,8 +2183,11 @@ ok: autoinc values (generated inside the handler::ha_write()) and values updated in ON DUPLICATE KEY UPDATE. */ - if (sink && sink->send_data(thd->lex->returning()->item_list) < 0) - trg_error= 1; + if (sink) + { + if (sink->send_data(thd->lex->returning()->item_list) < 0) + trg_error= 1; + } after_trg_or_ignored_err: if (key) @@ -3013,8 +3025,8 @@ public: }; -bool Delayed_prelocking_strategy:: -handle_table(THD *thd, Query_tables_list *prelocking_ctx, +bool Delayed_prelocking_strategy::handle_table(THD *thd, + Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, bool *need_prelocking) { DBUG_ASSERT(table_list->lock_type == TL_WRITE_DELAYED); @@ -3028,10 +3040,9 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, } -bool Delayed_prelocking_strategy:: -handle_routine(THD *thd, Query_tables_list *prelocking_ctx, - Sroutine_hash_entry *rt, sp_head *sp, - bool *need_prelocking) +bool Delayed_prelocking_strategy::handle_routine(THD *thd, + Query_tables_list *prelocking_ctx, Sroutine_hash_entry *rt, + sp_head *sp, bool *need_prelocking) { /* LEX used by the delayed insert thread has no routines. */ DBUG_ASSERT(0); @@ -4182,6 +4193,7 @@ bool select_insert::prepare_eof() if (info.ignore || info.handle_duplicates != DUP_ERROR) if (table->file->ha_table_flags() & HA_DUPLICATE_POS) table->file->ha_rnd_end(); + table->file->extra(HA_EXTRA_END_ALTER_COPY); table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -4411,7 +4423,8 @@ Field *Item::create_field_for_create_select(MEM_ROOT *root, TABLE *table) */ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, - MYSQL_LOCK **lock, TABLEOP_HOOKS *hooks) + MYSQL_LOCK **lock, + TABLEOP_HOOKS *hooks) { TABLE tmp_table; // Used during 'Create_field()' TABLE_SHARE share; @@ -4469,7 +4482,7 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, if (!cr_field) DBUG_RETURN(NULL); - if (item->maybe_null) + if (item->maybe_null()) cr_field->flags &= ~NOT_NULL_FLAG; alter_info->create_list.push_back(cr_field, thd->mem_root); } @@ -4519,7 +4532,8 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, open_table(). */ - if (!mysql_create_table_no_lock(thd, &create_table->db, + if (!mysql_create_table_no_lock(thd, &ddl_log_state_create, &ddl_log_state_rm, + &create_table->db, &create_table->table_name, create_info, alter_info, NULL, select_field_count, create_table)) @@ -4579,6 +4593,8 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, { if (likely(!thd->is_error())) // CREATE ... IF NOT EXISTS my_ok(thd); // succeed, but did nothing + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); DBUG_RETURN(NULL); } @@ -4617,7 +4633,9 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items, *lock= 0; } drop_open_table(thd, table, &create_table->db, &create_table->table_name); - DBUG_RETURN(0); + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); + DBUG_RETURN(NULL); /* purecov: end */ } table->s->table_creation_was_logged= save_table_creation_was_logged; @@ -4723,6 +4741,9 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u) { /* Original table was deleted. We have to log it */ log_drop_table(thd, &create_table->db, &create_table->table_name, + &create_info->org_storage_engine_name, + create_info->db_type == partition_hton, + &create_info->org_tabledef_version, thd->lex->tmp_table()); } @@ -4794,7 +4815,11 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u) if (info.handle_duplicates == DUP_UPDATE) table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE); if (thd->locked_tables_mode <= LTM_LOCK_TABLES) + { table->file->ha_start_bulk_insert((ha_rows) 0); + if (thd->lex->duplicates == DUP_ERROR && !thd->lex->ignore) + table->file->extra(HA_EXTRA_BEGIN_ALTER_COPY); + } thd->abort_on_warning= !info.ignore && thd->is_strict_mode(); if (check_that_all_fields_are_given_values(thd, table, table_list)) DBUG_RETURN(1); @@ -4925,12 +4950,12 @@ bool binlog_drop_table(THD *thd, TABLE *table) if (!thd->binlog_table_should_be_logged(&table->s->db)) return 0; - query.append("DROP "); + query.append(STRING_WITH_LEN("DROP ")); if (table->s->tmp_table) - query.append("TEMPORARY "); - query.append("TABLE IF EXISTS "); + query.append(STRING_WITH_LEN("TEMPORARY ")); + query.append(STRING_WITH_LEN("TABLE IF EXISTS ")); append_identifier(thd, &query, &table->s->db); - query.append("."); + query.append('.'); append_identifier(thd, &query, &table->s->table_name); return thd->binlog_query(THD::STMT_QUERY_TYPE, @@ -4964,11 +4989,30 @@ bool select_create::send_eof() if (thd->slave_thread) thd->variables.binlog_annotate_row_events= 0; + debug_crash_here("ddl_log_create_before_binlog"); + + /* + In case of crash, we have to add DROP TABLE to the binary log as + the CREATE TABLE will already be logged if we are not using row based + replication. + */ + if (!thd->is_current_stmt_binlog_format_row()) + { + if (ddl_log_state_create.is_active()) // Not temporary table + ddl_log_update_phase(&ddl_log_state_create, DDL_CREATE_TABLE_PHASE_LOG); + /* + We can ignore if we replaced an old table as ddl_log_state_create will + now handle the logging of the drop if needed. + */ + ddl_log_complete(&ddl_log_state_rm); + } + if (prepare_eof()) { abort_result_set(); DBUG_RETURN(true); } + debug_crash_here("ddl_log_create_after_prepare_eof"); if (table->s->tmp_table) { @@ -5035,9 +5079,15 @@ bool select_create::send_eof() thd->get_stmt_da()->set_overwrite_status(true); } #endif /* WITH_WSREP */ + thd->binlog_xid= thd->query_id; + /* Remember xid's for the case of row based logging */ + ddl_log_update_xid(&ddl_log_state_create, thd->binlog_xid); + ddl_log_update_xid(&ddl_log_state_rm, thd->binlog_xid); trans_commit_stmt(thd); if (!(thd->variables.option_bits & OPTION_GTID_BEGIN)) trans_commit_implicit(thd); + thd->binlog_xid= 0; + #ifdef WITH_WSREP if (WSREP(thd)) { @@ -5055,7 +5105,30 @@ bool select_create::send_eof() mysql_mutex_unlock(&thd->LOCK_thd_data); } #endif /* WITH_WSREP */ + + /* Log query to ddl log */ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + if ((ddl_log.org_partitioned= (create_info->db_type == partition_hton))) + ddl_log.org_storage_engine_name= create_info->new_storage_engine_name; + else + lex_string_set(&ddl_log.org_storage_engine_name, + ha_resolve_storage_engine_name(create_info->db_type)); + ddl_log.org_database= create_table->db; + ddl_log.org_table= create_table->table_name; + ddl_log.org_table_id= create_info->tabledef_version; + backup_log_ddl(&ddl_log); } + /* + If are using statement based replication the table will be deleted here + in case of a crash as we can't use xid to check if the query was logged + (as the query was logged before commit!) + */ + debug_crash_here("ddl_log_create_after_binlog"); + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); + debug_crash_here("ddl_log_create_log_complete"); /* exit_done must only be set after last potential call to @@ -5160,17 +5233,46 @@ void select_create::abort_result_set() drop_open_table(thd, table, &create_table->db, &create_table->table_name); table=0; // Safety - if (thd->log_current_statement && mysql_bin_log.is_open()) + if (thd->log_current_statement) { - /* Remove logging of drop, create + insert rows */ - binlog_reset_cache(thd); - /* Original table was deleted. We have to log it */ - if (table_creation_was_logged) - log_drop_table(thd, &create_table->db, &create_table->table_name, - tmp_table); + if (mysql_bin_log.is_open()) + { + /* Remove logging of drop, create + insert rows */ + binlog_reset_cache(thd); + /* Original table was deleted. We have to log it */ + if (table_creation_was_logged) + { + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state_create, thd->binlog_xid); + ddl_log_update_xid(&ddl_log_state_rm, thd->binlog_xid); + debug_crash_here("ddl_log_create_before_binlog"); + log_drop_table(thd, &create_table->db, &create_table->table_name, + &create_info->org_storage_engine_name, + create_info->db_type == partition_hton, + &create_info->tabledef_version, + tmp_table); + debug_crash_here("ddl_log_create_after_binlog"); + thd->binlog_xid= 0; + } + } + else if (!tmp_table) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("DROP_AFTER_CREATE") }; + ddl_log.org_partitioned= (create_info->db_type == partition_hton); + ddl_log.org_storage_engine_name= create_info->org_storage_engine_name; + ddl_log.org_database= create_table->db; + ddl_log.org_table= create_table->table_name; + ddl_log.org_table_id= create_info->tabledef_version; + backup_log_ddl(&ddl_log); + } } } + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); + if (create_info->table_was_deleted) { /* Unlock locked table that was dropped by CREATE. */ diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h index d0bf4761f65..2c3bf4022ca 100644 --- a/sql/sql_join_cache.h +++ b/sql/sql_join_cache.h @@ -206,11 +206,14 @@ protected: /* This flag indicates that records written into the join buffer contain - a match flag field. The flag must be set by the init method. + a match flag field. + This is set to true for the first inner table of an outer join or a + semi-join. + The flag must be set by the init method. Currently any implementation of the virtial init method calls the function JOIN_CACHE::calc_record_fields() to set this flag. */ - bool with_match_flag; + bool with_match_flag; /* This flag indicates that any record is prepended with the length of the record which allows us to skip the record or part of it without reading. diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 413cd0f18e7..b743141598f 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -55,7 +55,8 @@ const LEX_CSTRING null_clex_str= {NULL, 0}; const LEX_CSTRING empty_clex_str= {"", 0}; const LEX_CSTRING star_clex_str= {"*", 1}; const LEX_CSTRING param_clex_str= {"?", 1}; - +const LEX_CSTRING NULL_clex_str= {STRING_WITH_LEN("NULL")}; +const LEX_CSTRING error_clex_str= {STRING_WITH_LEN("error")}; /** Helper action for a case expression statement (the expr in 'CASE expr'). @@ -623,7 +624,8 @@ Query_tables_list::binlog_stmt_unsafe_errcode[BINLOG_STMT_UNSAFE_COUNT] = the correspondent text to the existing error message during merging to non-GA release. */ - ER_BINLOG_UNSAFE_SYSTEM_VARIABLE + ER_BINLOG_UNSAFE_SYSTEM_VARIABLE, + ER_BINLOG_UNSAFE_SKIP_LOCKED }; @@ -1262,35 +1264,42 @@ void LEX::start(THD *thd_arg) unit.slave= current_select= all_selects_list= &builtin_select; sql_cache= LEX::SQL_CACHE_UNSPECIFIED; describe= 0; - analyze_stmt= 0; - explain_json= false; context_analysis_only= 0; derived_tables= 0; with_cte_resolution= false; only_cte_resolution= false; - safe_to_cache_query= 1; parsing_options.reset(); - empty_field_list_on_rset= 0; part_info= 0; m_sql_cmd= NULL; duplicates= DUP_ERROR; - ignore= 0; spname= NULL; spcont= NULL; proc_list.first= 0; - escape_used= FALSE; - default_used= FALSE; query_tables= 0; reset_query_tables_list(FALSE); clause_that_disallows_subselect= NULL; - selects_allow_into= FALSE; - selects_allow_procedure= FALSE; - use_only_table_context= FALSE; - parse_vcol_expr= FALSE; - check_exists= FALSE; - create_info.lex_start(); + + /* reset bool variables */ + is_shutdown_wait_for_slaves= 0; + selects_allow_procedure= 0; + parse_vcol_expr= 0; + analyze_stmt= 0; + explain_json= 0; + local_file= 0; + check_exists= 0; verbose= 0; + safe_to_cache_query= 1; + ignore= 0; + next_is_main= 0; + next_is_down= 0; + empty_field_list_on_rset= 0; + use_only_table_context= 0; + escape_used= 0; + default_used= 0; + with_rownum= FALSE; + is_lex_started= 1; + create_info.lex_start(); name= null_clex_str; event_parse_data= NULL; profile_options= PROFILE_NONE; @@ -1319,11 +1328,6 @@ void LEX::start(THD *thd_arg) vers_conditions.empty(); period_conditions.empty(); - is_lex_started= TRUE; - - next_is_main= FALSE; - next_is_down= FALSE; - wild= 0; exchange= 0; @@ -1413,6 +1417,7 @@ int Lex_input_stream::find_keyword(Lex_ident_cli_st *kwd, case EXCEPTION_MARIADB_SYM: return EXCEPTION_ORACLE_SYM; case EXIT_MARIADB_SYM: return EXIT_ORACLE_SYM; case GOTO_MARIADB_SYM: return GOTO_ORACLE_SYM; + case MINUS_ORACLE_SYM: return EXCEPT_SYM; case NUMBER_MARIADB_SYM: return NUMBER_ORACLE_SYM; case OTHERS_MARIADB_SYM: return OTHERS_ORACLE_SYM; case PACKAGE_MARIADB_SYM: return PACKAGE_ORACLE_SYM; @@ -2786,8 +2791,10 @@ int Lex_input_stream::scan_ident_middle(THD *thd, Lex_ident_cli_st *str, if (resolve_introducer && m_tok_start[0] == '_') { ErrConvString csname(str->str + 1, str->length - 1, &my_charset_bin); + myf utf8_flag= thd->get_utf8_flag(); CHARSET_INFO *cs= get_charset_by_csname(csname.ptr(), - MY_CS_PRIMARY, MYF(0)); + MY_CS_PRIMARY, + MYF(utf8_flag)); if (cs) { body_utf8_append(m_cpp_text_start, m_cpp_tok_start + length); @@ -2901,28 +2908,33 @@ void st_select_lex_unit::init_query() { init_query_common(); set_linkage(GLOBAL_OPTIONS_TYPE); - lim.set_unlimited(); + lim.clear(); union_distinct= 0; - prepared= optimized= optimized_2= executed= 0; - bag_set_op_optimized= 0; - optimize_started= 0; item= 0; union_result= 0; table= 0; fake_select_lex= 0; saved_fake_select_lex= 0; - cleaned= 0; item_list.empty(); - describe= 0; found_rows_for_union= 0; derived= 0; - is_view= false; with_clause= 0; with_element= 0; + + /* reset all bit fields */ + prepared= 0; + optimized= 0; + optimized_2= 0; + executed= 0; + cleaned= 0; + bag_set_op_optimized= 0; + optimize_started= 0; + have_except_all_or_intersect_all= 0; + with_wrapped_tvc= 0; + is_view= 0; + describe= 0; cloned_from= 0; - columns_are_renamed= false; - with_wrapped_tvc= false; - have_except_all_or_intersect_all= false; + columns_are_renamed= 0; } void st_select_lex::init_query() @@ -2935,15 +2947,40 @@ void st_select_lex::init_query() leaf_tables_prep.empty(); leaf_tables.empty(); item_list.empty(); + fix_after_optimize.empty(); min_max_opt_list.empty(); + limit_params.clear(); join= 0; cur_pos_in_select_list= UNDEF_POS; having= prep_having= where= prep_where= 0; cond_pushed_into_where= cond_pushed_into_having= 0; attach_to_conds.empty(); olap= UNSPECIFIED_OLAP_TYPE; + + /* reset all bit fields */ + is_item_list_lookup= 0; + have_merged_subqueries= 0; + is_set_query_expr_tail= 0; + with_sum_func= with_rownum= 0; + braces= 0; + automatic_brackets= 0; having_fix_field= 0; having_fix_field_for_pushed_cond= 0; + subquery_in_having= 0; + is_item_list_lookup= 0; + with_all_modifier= 0; + is_correlated= 0; + first_natural_join_processing= 1; + first_cond_optimization= 1; + no_wrap_view_item= 0; + exclude_from_table_unique_test= 0; + in_tvc= 0; + skip_locked= 0; + m_non_agg_field_used= 0; + m_agg_func_used= 0; + m_custom_agg_func_used= 0; + is_service_select= 0; + context.select_lex= this; context.init(); cond_count= between_count= with_wild= 0; @@ -2956,29 +2993,20 @@ void st_select_lex::init_query() n_child_sum_items= 0; hidden_bit_fields= 0; fields_in_window_functions= 0; - subquery_in_having= explicit_limit= 0; - is_item_list_lookup= 0; changed_elements= 0; - first_natural_join_processing= 1; - first_cond_optimization= 1; - is_service_select= 0; parsing_place= NO_MATTER; save_parsing_place= NO_MATTER; context_analysis_place= NO_MATTER; - exclude_from_table_unique_test= no_wrap_view_item= FALSE; nest_level= 0; link_next= 0; prep_leaf_list_state= UNINIT; - have_merged_subqueries= FALSE; bzero((char*) expr_cache_may_be_used, sizeof(expr_cache_may_be_used)); select_list_tables= 0; - m_non_agg_field_used= false; - m_agg_func_used= false; - m_custom_agg_func_used= false; + rownum_in_field_list= 0; + window_specs.empty(); window_funcs.empty(); tvc= 0; - in_tvc= false; versioned_tables= 0; pushdown_select= 0; } @@ -2994,6 +3022,7 @@ void st_select_lex::init_select() db= null_clex_str; having= 0; table_join_options= 0; + select_lock= select_lock_type::NONE; in_sum_expr= with_wild= 0; options= 0; ftfunc_list_alloc.empty(); @@ -3001,20 +3030,24 @@ void st_select_lex::init_select() ftfunc_list= &ftfunc_list_alloc; order_list.empty(); /* Set limit and offset to default values */ - select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ - offset_limit= 0; /* denotes the default offset = 0 */ - is_set_query_expr_tail= false; + limit_params.clear(); + + /* Reset bit fields */ + is_set_query_expr_tail= 0; with_sum_func= 0; with_all_modifier= 0; is_correlated= 0; + in_tvc= 0; + skip_locked= 0; + m_non_agg_field_used= 0; + m_agg_func_used= 0; + m_custom_agg_func_used= 0; + cur_pos_in_select_list= UNDEF_POS; cond_value= having_value= Item::COND_UNDEF; inner_refs_list.empty(); insert_tables= 0; merged_into= 0; - m_non_agg_field_used= false; - m_agg_func_used= false; - m_custom_agg_func_used= false; name_visibility_map.clear_all(); with_dep= 0; join= 0; @@ -3024,7 +3057,6 @@ void st_select_lex::init_select() tvc= 0; in_funcs.empty(); curr_tvc_name= 0; - in_tvc= false; versioned_tables= 0; nest_flags= 0; } @@ -3374,7 +3406,7 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last, */ bool st_select_lex::test_limit() { - if (select_limit != 0) + if (limit_params.select_limit) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LIMIT & IN/ALL/ANY/SOME subquery"); @@ -3393,24 +3425,26 @@ st_select_lex* st_select_lex_unit::outer_select() ha_rows st_select_lex::get_offset() { - ulonglong val= 0; + ha_rows val= 0; + Item *offset_limit= limit_params.offset_limit; if (offset_limit) { // see comment for st_select_lex::get_limit() bool err= offset_limit->fix_fields_if_needed(master_unit()->thd, NULL); DBUG_ASSERT(!err); - val= err ? HA_POS_ERROR : offset_limit->val_uint(); + val= err ? HA_POS_ERROR : (ha_rows)offset_limit->val_uint(); } - return (ha_rows)val; + return val; } ha_rows st_select_lex::get_limit() { - ulonglong val= HA_POS_ERROR; + ha_rows val= HA_POS_ERROR; + Item *select_limit= limit_params.select_limit; if (select_limit) { /* @@ -3441,10 +3475,10 @@ ha_rows st_select_lex::get_limit() */ bool err= select_limit->fix_fields_if_needed(master_unit()->thd, NULL); DBUG_ASSERT(!err); - val= err ? HA_POS_ERROR : select_limit->val_uint(); + val= err ? HA_POS_ERROR : (ha_rows) select_limit->val_uint(); } - return (ha_rows)val; + return val; } @@ -3509,12 +3543,6 @@ List<Item>* st_select_lex::get_item_list() return &item_list; } -ulong st_select_lex::get_table_join_options() -{ - return table_join_options; -} - - bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) { @@ -3618,10 +3646,10 @@ void LEX::print(String *str, enum_query_type query_type) (*ord->item)->print(str, query_type); } } - if (sel->select_limit) + if (sel->limit_params.select_limit) { str->append(STRING_WITH_LEN(" LIMIT ")); - sel->select_limit->print(str, query_type); + sel->limit_params.select_limit->print(str, query_type); } } else if (sql_command == SQLCOM_DELETE) @@ -3653,10 +3681,10 @@ void LEX::print(String *str, enum_query_type query_type) (*ord->item)->print(str, query_type); } } - if (sel->select_limit) + if (sel->limit_params.select_limit) { str->append(STRING_WITH_LEN(" LIMIT ")); - sel->select_limit->print(str, query_type); + sel->limit_params.select_limit->print(str, query_type); } } else @@ -3758,15 +3786,38 @@ void st_select_lex::print_limit(THD *thd, return; } } - if (explicit_limit && select_limit) + if (limit_params.explicit_limit && + limit_params.select_limit) { - str->append(STRING_WITH_LEN(" limit ")); - if (offset_limit) + /* + [OFFSET n] + FETCH FIRST n ROWS WITH TIES + + For FETCH FIRST n ROWS ONLY we fall back to the "limit" specification + as it's identical. + */ + if (limit_params.with_ties) { - offset_limit->print(str, query_type); - str->append(','); + if (limit_params.offset_limit) + { + str->append(STRING_WITH_LEN(" offset ")); + limit_params.offset_limit->print(str, query_type); + str->append(STRING_WITH_LEN(" rows ")); + } + str->append(STRING_WITH_LEN(" fetch first ")); + limit_params.select_limit->print(str, query_type); + str->append(STRING_WITH_LEN(" rows with ties")); + } + else + { + str->append(STRING_WITH_LEN(" limit ")); + if (limit_params.offset_limit) + { + limit_params.offset_limit->print(str, query_type); + str->append(','); + } + limit_params.select_limit->print(str, query_type); } - select_limit->print(str, query_type); } } @@ -3820,6 +3871,12 @@ void LEX::cleanup_lex_after_parse_error(THD *thd) thd->lex->sphead= NULL; } } + + /* + json_table must be NULL before the query. + Didn't want to overload LEX::start, it's enough to put it here. + */ + thd->lex->json_table= 0; } /* @@ -3906,9 +3963,10 @@ void Query_tables_list::destroy_query_tables_list() */ LEX::LEX() - : explain(NULL), result(0), part_info(NULL), arena_for_set_stmt(0), mem_root_for_set_stmt(0), - option_type(OPT_DEFAULT), context_analysis_only(0), sphead(0), - default_used(0), is_lex_started(0), limit_rows_examined_cnt(ULONGLONG_MAX) + : explain(NULL), result(0), part_info(NULL), arena_for_set_stmt(0), + mem_root_for_set_stmt(0), json_table(NULL), default_used(0), + with_rownum(0), is_lex_started(0), option_type(OPT_DEFAULT), + context_analysis_only(0), sphead(0), limit_rows_examined_cnt(ULONGLONG_MAX) { init_dynamic_array2(PSI_INSTRUMENT_ME, &plugins, sizeof(plugin_ref), @@ -3974,7 +4032,7 @@ bool LEX::can_be_merged() first_select_lex()->with_sum_func == 0 && first_select_lex()->table_list.elements >= 1 && !(first_select_lex()->options & SELECT_DISTINCT) && - first_select_lex()->select_limit == 0); + first_select_lex()->limit_params.select_limit == 0); } @@ -4021,6 +4079,10 @@ bool LEX::can_use_merged() SYNOPSIS LEX::can_not_use_merged() + @param no_update_or_delete Set to 1 if we can't use merge with multiple-table + updates, like when used from + TALE_LIST::init_derived() + DESCRIPTION Temporary table algorithm will be used on all SELECT levels for queries listed here (see also LEX::can_use_merged()). @@ -4030,10 +4092,9 @@ bool LEX::can_use_merged() TRUE - VIEWs with MERGE algorithms can be used */ -bool LEX::can_not_use_merged() +bool LEX::can_not_use_merged(bool no_update_or_delete) { - switch (sql_command) - { + switch (sql_command) { case SQLCOM_CREATE_VIEW: case SQLCOM_SHOW_CREATE: /* @@ -4043,6 +4104,13 @@ bool LEX::can_not_use_merged() */ case SQLCOM_SHOW_FIELDS: return TRUE; + + case SQLCOM_UPDATE_MULTI: + case SQLCOM_DELETE_MULTI: + if (no_update_or_delete) + return TRUE; + /* Fall through */ + default: return FALSE; } @@ -4173,10 +4241,9 @@ void st_select_lex_unit::set_limit(st_select_lex *sl) { DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare()); - lim.set_limit(sl->get_limit(), sl->get_offset()); + lim.set_limit(sl->get_limit(), sl->get_offset(), sl->limit_params.with_ties); } - /** Decide if a temporary table is needed for the UNION. @@ -4356,7 +4423,7 @@ void LEX::set_trg_event_type_for_tables() parsing. */ if (static_cast<int>(tables->lock_type) >= - static_cast<int>(TL_WRITE_ALLOW_WRITE)) + static_cast<int>(TL_FIRST_WRITE)) tables->trg_event_map= new_trg_event_map; tables= tables->next_local; } @@ -4800,7 +4867,7 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only) if (subquery_predicate) { - if (!subquery_predicate->fixed) + if (!subquery_predicate->fixed()) { /* This subquery was excluded as part of some expression so it is @@ -5048,6 +5115,7 @@ void st_select_lex::remap_tables(TABLE_LIST *derived, table_map map, uint tablenr, SELECT_LEX *parent_lex) { bool first_table= TRUE; + bool has_table_function= FALSE; TABLE_LIST *tl; table_map first_map; uint first_tablenr; @@ -5089,6 +5157,19 @@ void st_select_lex::remap_tables(TABLE_LIST *derived, table_map map, emb && emb->select_lex == old_sl; emb= emb->embedding) emb->select_lex= parent_lex; + + if (tl->table_function) + has_table_function= TRUE; + } + + if (has_table_function) + { + ti.rewind(); + while ((tl= ti++)) + { + if (tl->table_function) + tl->table_function->fix_after_pullout(tl, parent_lex, true); + } } } @@ -5264,6 +5345,9 @@ void SELECT_LEX::update_used_tables() left_expr->walk(&Item::update_table_bitmaps_processor, FALSE, NULL); } + if (tl->table_function) + tl->table_function->update_used_tables(); + embedding= tl->embedding; while (embedding) { @@ -8729,21 +8813,21 @@ bool st_select_lex::collect_grouping_fields(THD *thd) condition over the grouping fields of this select. The method uses the call-back parameter checker to check whether a primary formula depends only on grouping fields. - The subformulas that are not usable are marked with the flag NO_EXTRACTION_FL. + The subformulas that are not usable are marked with the flag MARKER_NO_EXTRACTION. The subformulas that can be entierly extracted are marked with the flag - FULL_EXTRACTION_FL. + MARKER_FULL_EXTRACTION. @note This method is called before any call of extract_cond_for_grouping_fields. - The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone + The flag MARKER_NO_EXTRACTION set in a subformula allows to avoid building clone for the subformula when extracting the pushable condition. - The flag FULL_EXTRACTION_FL allows to delete later all top level conjuncts + The flag MARKER_FULL_EXTRACTION allows to delete later all top level conjuncts from cond. */ void st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) { - if (cond->get_extraction_flag() == NO_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_NO_EXTRACTION) return; cond->clear_extraction_flag(); if (cond->type() == Item::COND_ITEM) @@ -8754,26 +8838,26 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) List<Item> *arg_list= ((Item_cond*) cond)->argument_list(); List_iterator<Item> li(*arg_list); - uint count= 0; // to count items not containing NO_EXTRACTION_FL - uint count_full= 0; // to count items with FULL_EXTRACTION_FL + uint count= 0; // to count items not containing MARKER_NO_EXTRACTION + uint count_full= 0; // to count items with MARKER_FULL_EXTRACTION Item *item; while ((item=li++)) { check_cond_extraction_for_grouping_fields(thd, item); - if (item->get_extraction_flag() != NO_EXTRACTION_FL) + if (item->get_extraction_flag() != MARKER_NO_EXTRACTION) { count++; - if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_FULL_EXTRACTION) count_full++; } else if (!and_cond) break; } if ((and_cond && count == 0) || item) - cond->set_extraction_flag(NO_EXTRACTION_FL); + cond->set_extraction_flag(MARKER_NO_EXTRACTION); if (count_full == arg_list->elements) { - cond->set_extraction_flag(FULL_EXTRACTION_FL); + cond->set_extraction_flag(MARKER_FULL_EXTRACTION); } if (cond->get_extraction_flag() != 0) { @@ -8785,7 +8869,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) else { int fl= cond->excl_dep_on_grouping_fields(this) && !cond->is_expensive() ? - FULL_EXTRACTION_FL : NO_EXTRACTION_FL; + MARKER_FULL_EXTRACTION : MARKER_NO_EXTRACTION; cond->set_extraction_flag(fl); } } @@ -8805,7 +8889,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) For the given condition cond this method finds out what condition depended only on the grouping fields can be extracted from cond. If such condition C exists the method builds the item for it. - This method uses the flags NO_EXTRACTION_FL and FULL_EXTRACTION_FL set by the + This method uses the flags MARKER_NO_EXTRACTION and MARKER_FULL_EXTRACTION set by the preliminary call of st_select_lex::check_cond_extraction_for_grouping_fields to figure out whether a subformula depends only on these fields or not. @note @@ -8825,7 +8909,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, bool no_top_clones) { - if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_FULL_EXTRACTION) { if (no_top_clones) return cond; @@ -8849,7 +8933,7 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, Item *item; while ((item=li++)) { - if (item->get_extraction_flag() == NO_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_NO_EXTRACTION) { DBUG_ASSERT(cond_and); item->clear_extraction_flag(); @@ -9320,6 +9404,26 @@ Item *LEX::make_item_func_substr(THD *thd, Item *a, Item *b) } +Item *LEX::make_item_func_sysdate(THD *thd, uint fsp) +{ + /* + Unlike other time-related functions, SYSDATE() is + replication-unsafe because it is not affected by the + TIMESTAMP variable. It is unsafe even if + sysdate_is_now=1, because the slave may have + sysdate_is_now=0. + */ + set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); + Item *item= global_system_variables.sysdate_is_now == 0 ? + (Item *) new (thd->mem_root) Item_func_sysdate_local(thd, fsp) : + (Item *) new (thd->mem_root) Item_func_now_local(thd, fsp); + if (unlikely(item == NULL)) + return NULL; + safe_to_cache_query=0; + return item; +} + + Item *LEX::make_item_func_replace(THD *thd, Item *org, Item *find, @@ -9831,19 +9935,30 @@ void Lex_select_lock::set_to(SELECT_LEX *sel) sel->master_unit()->set_lock_to_the_last_select(*this); else { + thr_lock_type lock_type; sel->parent_lex->safe_to_cache_query= 0; - if (update_lock) + if (unlikely(skip_locked)) { - sel->lock_type= TL_WRITE; - sel->set_lock_for_tables(TL_WRITE, false); + lock_type= update_lock ? TL_WRITE_SKIP_LOCKED : TL_READ_SKIP_LOCKED; } else { - sel->lock_type= TL_READ_WITH_SHARED_LOCKS; - sel->set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS, false); + lock_type= update_lock ? TL_WRITE : TL_READ_WITH_SHARED_LOCKS; } + sel->lock_type= lock_type; + sel->select_lock= (update_lock ? st_select_lex::select_lock_type::FOR_UPDATE : + st_select_lex::select_lock_type::IN_SHARE_MODE); + sel->set_lock_for_tables(lock_type, false, skip_locked); } } + else + { + /* + select_lock can be FOR_UPDATE in case of + (SELECT x FROM t WINDOW w1 AS () FOR UPDATE) LIMIT 1 + */ + sel->select_lock= st_select_lex::select_lock_type::NONE; + } } bool Lex_order_limit_lock::set_to(SELECT_LEX *sel) @@ -9863,9 +9978,7 @@ bool Lex_order_limit_lock::set_to(SELECT_LEX *sel) return TRUE; } lock.set_to(sel); - sel->explicit_limit= limit.explicit_limit; - sel->select_limit= limit.select_limit; - sel->offset_limit= limit.offset_limit; + sel->limit_params= limit; if (order_list) { if (sel->get_linkage() != GLOBAL_OPTIONS_TYPE && @@ -10184,7 +10297,7 @@ LEX::add_tail_to_query_expression_body_ext_parens(SELECT_LEX_UNIT *unit, pop_select(); if (sel->is_set_query_expr_tail) { - if (!l->order_list && !sel->explicit_limit) + if (!l->order_list && !sel->limit_params.explicit_limit) l->order_list= &sel->order_list; else { @@ -10335,7 +10448,6 @@ bool LEX::parsed_create_view(SELECT_LEX_UNIT *unit, int check) bool LEX::select_finalize(st_select_lex_unit *expr) { sql_command= SQLCOM_SELECT; - selects_allow_into= TRUE; selects_allow_procedure= TRUE; if (set_main_unit(expr)) return true; @@ -10646,7 +10758,7 @@ void st_select_lex::pushdown_cond_into_where_clause(THD *thd, Item *cond, This will cause duplicate conditions in WHERE of dt. To avoid repeatable pushdown such OR conditions as or1 describen - above are marked with NO_EXTRACTION_FL. + above are marked with MARKER_NO_EXTRACTION. @note This method is called for pushdown into materialized @@ -10664,12 +10776,12 @@ void mark_or_conds_to_avoid_pushdown(Item *cond) { if (item->type() == Item::COND_ITEM && ((Item_cond*) item)->functype() == Item_func::COND_OR_FUNC) - item->set_extraction_flag(NO_EXTRACTION_FL); + item->set_extraction_flag(MARKER_NO_EXTRACTION); } } else if (cond->type() == Item::COND_ITEM && ((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC) - cond->set_extraction_flag(NO_EXTRACTION_FL); + cond->set_extraction_flag(MARKER_NO_EXTRACTION); } /** @@ -10683,16 +10795,16 @@ void mark_or_conds_to_avoid_pushdown(Item *cond) The method collects in attach_to_conds list conditions from cond that can be pushed from HAVING into WHERE. - Conditions that can be pushed were marked with FULL_EXTRACTION_FL in + Conditions that can be pushed were marked with MARKER_FULL_EXTRACTION in check_cond_extraction_for_grouping_fields() method. - Conditions that can't be pushed were marked with NO_EXTRACTION_FL. + Conditions that can't be pushed were marked with MARKER_NO_EXTRACTION. Conditions which parts can be pushed weren't marked. There are two types of conditions that can be pushed: 1. Condition that can be simply moved from HAVING - (if cond is marked with FULL_EXTRACTION_FL or + (if cond is marked with MARKER_FULL_EXTRACTION or cond is an AND condition and some of its parts are marked with - FULL_EXTRACTION_FL) + MARKER_FULL_EXTRACTION) In this case condition is transformed and pushed into attach_to_conds list. 2. Part of some other condition c1 that can't be entirely pushed @@ -10731,14 +10843,14 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) List<Item> equalities; /* Condition can't be pushed */ - if (cond->get_extraction_flag() == NO_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_NO_EXTRACTION) return false; /** Condition can be pushed entirely. Transform its multiple equalities and add to attach_to_conds list. */ - if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_FULL_EXTRACTION) { Item *result= cond->top_level_transform(thd, &Item::multiple_equality_transformer, (uchar *)this); @@ -10788,9 +10900,9 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) Item *item; while ((item=li++)) { - if (item->get_extraction_flag() == NO_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_NO_EXTRACTION) continue; - else if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + else if (item->get_extraction_flag() == MARKER_FULL_EXTRACTION) { Item *result= item->transform(thd, &Item::multiple_equality_transformer, @@ -10923,13 +11035,13 @@ bool st_select_lex::collect_fields_equal_to_grouping(THD *thd) Item *remove_pushed_top_conjuncts_for_having(THD *thd, Item *cond) { /* Nothing to extract */ - if (cond->get_extraction_flag() == NO_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_NO_EXTRACTION) { cond->clear_extraction_flag(); return cond; } /* cond can be pushed in WHERE entirely */ - if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_FULL_EXTRACTION) { cond->clear_extraction_flag(); return 0; @@ -10943,13 +11055,13 @@ Item *remove_pushed_top_conjuncts_for_having(THD *thd, Item *cond) Item *item; while ((item=li++)) { - if (item->get_extraction_flag() == NO_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_NO_EXTRACTION) item->clear_extraction_flag(); - else if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + else if (item->get_extraction_flag() == MARKER_FULL_EXTRACTION) { if (item->type() == Item::FUNC_ITEM && ((Item_func*) item)->functype() == Item_func::MULT_EQUAL_FUNC) - item->set_extraction_flag(DELETION_FL); + item->set_extraction_flag(MARKER_DELETION); else { item->clear_extraction_flag(); @@ -11018,7 +11130,7 @@ Item *remove_pushed_top_conjuncts_for_having(THD *thd, Item *cond) the condition is put into attach_to_conds as the only its element. 4. Remove conditions from HAVING clause that can be entirely pushed into WHERE. - Multiple equalities are not removed but marked with DELETION_FL flag. + Multiple equalities are not removed but marked with MARKER_DELETION flag. They will be deleted later in substitite_for_best_equal_field() called for the HAVING condition. 5. Unwrap fields wrapped in Item_ref wrappers contained in the condition @@ -11072,7 +11184,7 @@ Item *st_select_lex::pushdown_from_having_into_where(THD *thd, Item *having) /* 4. Remove conditions from HAVING clause that can be entirely pushed into WHERE. - Multiple equalities are not removed but marked with DELETION_FL flag. + Multiple equalities are not removed but marked with MARKER_DELETION flag. They will be deleted later in substitite_for_best_equal_field() called for the HAVING condition. */ diff --git a/sql/sql_lex.h b/sql/sql_lex.h index ecad3ea60ec..82453ad8259 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -34,6 +34,7 @@ #include "sql_tvc.h" #include "item.h" #include "sql_limit.h" // Select_limit_counters +#include "json_table.h" // Json_table_column #include "sql_schema.h" #include "table.h" @@ -388,7 +389,7 @@ void binlog_unsafe_map_init(); #ifdef MYSQL_SERVER extern const LEX_STRING empty_lex_str; -extern MYSQL_PLUGIN_IMPORT const LEX_CSTRING empty_clex_str; +extern const LEX_CSTRING empty_clex_str; extern const LEX_CSTRING star_clex_str; extern const LEX_CSTRING param_clex_str; @@ -455,6 +456,7 @@ enum enum_drop_mode #define TL_OPTION_IGNORE_LEAVES 4 #define TL_OPTION_ALIAS 8 #define TL_OPTION_SEQUENCE 16 +#define TL_OPTION_TABLE_FUNCTION 32 typedef List<Item> List_item; typedef Mem_root_array<ORDER*, true> Group_list_ptrs; @@ -726,12 +728,15 @@ protected: st_select_lex_node *next, **prev, /* neighbor list */ *master, *slave, /* vertical links */ *link_next, **link_prev; /* list of whole SELECT_LEX */ + enum sub_select_type linkage; void init_query_common(); -public: +public: ulonglong options; - + uint8 uncacheable; + bool distinct:1; + bool no_table_names_allowed:1; /* used for global order by */ /* result of this query can't be cached, bit field, can be : UNCACHEABLE_DEPENDENT_GENERATED @@ -741,18 +746,12 @@ public: UNCACHEABLE_EXPLAIN UNCACHEABLE_PREPARE */ - uint8 uncacheable; -private: - enum sub_select_type linkage; -public: + bool is_linkage_set() const { return linkage == UNION_TYPE || linkage == INTERSECT_TYPE || linkage == EXCEPT_TYPE; } enum sub_select_type get_linkage() { return linkage; } - bool distinct; - bool no_table_names_allowed; /* used for global order by */ - static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr,size_t size) { TRASH_FREE(ptr, size); } @@ -857,7 +856,6 @@ protected: TABLE_LIST result_table_list; select_unit *union_result; ulonglong found_rows_for_union; - bool saved_error; bool prepare_join(THD *thd, SELECT_LEX *sl, select_result *result, ulonglong additional_options, @@ -868,27 +866,53 @@ protected: class Type_holder *holders, uint count); public: bool join_union_item_types(THD *thd, List<Item> &types, uint count); -public: // Ensures that at least all members used during cleanup() are initialized. st_select_lex_unit() - : union_result(NULL), table(NULL), result(NULL), - cleaned(false), bag_set_op_optimized(false), - have_except_all_or_intersect_all(false), fake_select_lex(NULL) + : union_result(NULL), table(NULL), result(NULL), fake_select_lex(NULL), + last_procedure(NULL),cleaned(false), bag_set_op_optimized(false), + have_except_all_or_intersect_all(false) { } TABLE *table; /* temporary table using for appending UNION results */ select_result *result; st_select_lex *pre_last_parse; - bool prepared, // prepare phase already performed for UNION (unit) - optimized, // optimize phase already performed for UNION (unit) - optimized_2, - executed, // already executed - cleaned, - bag_set_op_optimized; + /* + Node on which we should return current_select pointer after parsing + subquery + */ + st_select_lex *return_to; + /* LIMIT clause runtime counters */ + Select_limit_counters lim; + /* not NULL if unit used in subselect, point to subselect item */ + Item_subselect *item; + /* + TABLE_LIST representing this union in the embedding select. Used for + derived tables/views handling. + */ + TABLE_LIST *derived; + /* With clause attached to this unit (if any) */ + With_clause *with_clause; + /* With element where this unit is used as the specification (if any) */ + With_element *with_element; + /* The unit used as a CTE specification from which this unit is cloned */ + st_select_lex_unit *cloned_from; + /* thread handler */ + THD *thd; + /* + SELECT_LEX for hidden SELECT in union which process global + ORDER BY and LIMIT + */ + st_select_lex *fake_select_lex; + /** + SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when noq + fake_select_lex is used. + */ + st_select_lex *saved_fake_select_lex; - bool optimize_started; - bool have_except_all_or_intersect_all; + /* pointer to the last node before last subsequence of UNION ALL */ + st_select_lex *union_distinct; + Procedure *last_procedure; /* Pointer to procedure, if such exists */ // list of fields which points to temporary table for union List<Item> item_list; @@ -900,12 +924,30 @@ public: any SELECT of this unit execution */ List<Item> types; + + bool prepared:1; // prepare phase already performed for UNION (unit) + bool optimized:1; // optimize phase already performed for UNION (unit) + bool optimized_2:1; + bool executed:1; // already executed + bool cleaned:1; + bool bag_set_op_optimized:1; + bool optimize_started:1; + bool have_except_all_or_intersect_all:1; /** TRUE if the unit contained TVC at the top level that has been wrapped into SELECT: VALUES (v1) ... (vn) => SELECT * FROM (VALUES (v1) ... (vn)) as tvc */ - bool with_wrapped_tvc; + bool with_wrapped_tvc:1; + bool is_view:1; + bool describe:1; /* union exec() called for EXPLAIN */ + bool columns_are_renamed:1; + +protected: + /* This is bool, not bit, as it's used and set in many places */ + bool saved_error; +public: + /** Pointer to 'last' select, or pointer to select where we stored global parameters for union. @@ -928,43 +970,6 @@ public: return saved_fake_select_lex; return first_select(); }; - //node on which we should return current_select pointer after parsing subquery - st_select_lex *return_to; - /* LIMIT clause runtime counters */ - Select_limit_counters lim; - /* not NULL if unit used in subselect, point to subselect item */ - Item_subselect *item; - /* - TABLE_LIST representing this union in the embedding select. Used for - derived tables/views handling. - */ - TABLE_LIST *derived; - bool is_view; - /* With clause attached to this unit (if any) */ - With_clause *with_clause; - /* With element where this unit is used as the specification (if any) */ - With_element *with_element; - /* The unit used as a CTE specification from which this unit is cloned */ - st_select_lex_unit *cloned_from; - /* thread handler */ - THD *thd; - /* - SELECT_LEX for hidden SELECT in union which process global - ORDER BY and LIMIT - */ - st_select_lex *fake_select_lex; - /** - SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when noq - fake_select_lex is used. - */ - st_select_lex *saved_fake_select_lex; - - /* pointer to the last node before last subsequence of UNION ALL */ - st_select_lex *union_distinct; - bool describe; /* union exec() called for EXPLAIN */ - Procedure *last_procedure; /* Pointer to procedure, if such exists */ - - bool columns_are_renamed; void init_query(); st_select_lex* outer_select(); @@ -1085,18 +1090,10 @@ public: while select3->first_nested points to select2 and select1->first_nested points to select1. */ - st_select_lex *first_nested; - uint8 nest_flags; + Name_resolution_context context; LEX_CSTRING db; - Item *where, *having; /* WHERE & HAVING clauses */ - Item *prep_where; /* saved WHERE clause for prepared statement processing */ - Item *prep_having;/* saved HAVING clause for prepared statement processing */ - Item *cond_pushed_into_where; /* condition pushed into the select's WHERE */ - Item *cond_pushed_into_having; /* condition pushed into the select's HAVING */ - List<Item> attach_to_conds; - /* Saved values of the WHERE and HAVING clauses*/ - Item::cond_result cond_value, having_value; + /* Point to the LEX in which it was created, used in view subquery detection. @@ -1105,22 +1102,53 @@ public: instead of global (from THD) references where it is possible. */ LEX *parent_lex; - enum olap_type olap; - /* FROM clause - points to the beginning of the TABLE_LIST::next_local list */ - SQL_I_List<TABLE_LIST> table_list; + st_select_lex *first_nested; + Item *where, *having; /* WHERE & HAVING clauses */ + Item *prep_where; /* saved WHERE clause for prepared statement processing */ + Item *prep_having;/* saved HAVING clause for prepared statement processing */ + Item *cond_pushed_into_where; /* condition pushed into WHERE */ + Item *cond_pushed_into_having; /* condition pushed into HAVING */ /* - GROUP BY clause. - This list may be mutated during optimization (by remove_const()), - so for prepared statements, we keep a copy of the ORDER.next pointers in - group_list_ptrs, and re-establish the original list before each execution. + nest_levels are local to the query or VIEW, + and that view merge procedure does not re-calculate them. + So we also have to remember unit against which we count levels. */ - SQL_I_List<ORDER> group_list; - Group_list_ptrs *group_list_ptrs; + SELECT_LEX_UNIT *nest_level_base; + Item_sum *inner_sum_func_list; /* list of sum func in nested selects */ + /* + This is a copy of the original JOIN USING list that comes from + the parser. The parser : + 1. Sets the natural_join of the second TABLE_LIST in the join + and the st_select_lex::prev_join_using. + 2. Makes a parent TABLE_LIST and sets its is_natural_join/ + join_using_fields members. + 3. Uses the wrapper TABLE_LIST as a table in the upper level. + We cannot assign directly to join_using_fields in the parser because + at stage (1.) the parent TABLE_LIST is not constructed yet and + the assignment will override the JOIN USING fields of the lower level + joins on the right. + */ + List<String> *prev_join_using; + JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ + TABLE_LIST *embedding; /* table embedding to the above list */ + table_value_constr *tvc; - List<Item> item_list; /* list of fields & expressions */ - List<Item> pre_fix; /* above list before fix_fields */ - bool is_item_list_lookup; + /* The interface employed to execute the select query by a foreign engine */ + select_handler *select_h; + /* The object used to organize execution of the query by a foreign engine */ + select_handler *pushdown_select; + List<TABLE_LIST> *join_list; /* list for the currently parsed join */ + st_select_lex *merged_into; /* select which this select is merged into */ + /* (not 0 only for views/derived tables) */ + const char *type; /* type of select for EXPLAIN */ + + + /* List of references to fields referenced from inner selects */ + List<Item_outer_ref> inner_refs_list; + List<Item> attach_to_conds; + /* Saved values of the WHERE and HAVING clauses*/ + Item::cond_result cond_value, having_value; /* Usually it is pointer to ftfunc_list_alloc, but in union used to create fake select_lex for calling mysql_select under results of union @@ -1132,10 +1160,7 @@ public: have been applied. Used to rollback those optimizations if it's needed. */ List<Item_sum> min_max_opt_list; - JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ List<TABLE_LIST> top_join_list; /* join list of the top level */ - List<TABLE_LIST> *join_list; /* list for the currently parsed join */ - TABLE_LIST *embedding; /* table embedding to the above list */ List<TABLE_LIST> sj_nests; /* Semi-join nests within this join */ /* Beginning of the list of leaves in a FROM clause, where the leaves @@ -1156,37 +1181,84 @@ public: can be transformed into IN-subselect defined with TVC. */ List<Item_func_in> in_funcs; - /* - Number of current derived table made with TVC during the - transformation of IN-predicate into IN-subquery for this - st_select_lex. - */ - uint curr_tvc_name; - - /* - Needed to correctly generate 'PRIMARY' or 'SIMPLE' for select_type column - of EXPLAIN - */ - bool have_merged_subqueries; - List<TABLE_LIST> leaf_tables; List<TABLE_LIST> leaf_tables_exec; List<TABLE_LIST> leaf_tables_prep; - enum leaf_list_state {UNINIT, READY, SAVED}; - enum leaf_list_state prep_leaf_list_state; - uint insert_tables; - st_select_lex *merged_into; /* select which this select is merged into */ - /* (not 0 only for views/derived tables) */ - const char *type; /* type of select for EXPLAIN */ + /* current index hint kind. used in filling up index_hints */ + enum index_hint_type current_index_hint_type; + + /* + FROM clause - points to the beginning of the TABLE_LIST::next_local list. + */ + SQL_I_List<TABLE_LIST> table_list; + /* + GROUP BY clause. + This list may be mutated during optimization (by remove_const()), + so for prepared statements, we keep a copy of the ORDER.next pointers in + group_list_ptrs, and re-establish the original list before each execution. + */ + SQL_I_List<ORDER> group_list; + Group_list_ptrs *group_list_ptrs; + + List<Item> item_list; /* list of fields & expressions */ + List<Item> pre_fix; /* above list before fix_fields */ + List<Item> fix_after_optimize; SQL_I_List<ORDER> order_list; /* ORDER clause */ SQL_I_List<ORDER> gorder_list; - Item *select_limit, *offset_limit; /* LIMIT clause parameters */ - bool is_set_query_expr_tail; + Lex_select_limit limit_params; /* LIMIT clause parameters */ + + /* Structure to store fields that are used in the GROUP BY of this select */ + List<Field_pair> grouping_tmp_fields; + List<udf_func> udf_list; /* udf function calls stack */ + List<Index_hint> *index_hints; /* list of USE/FORCE/IGNORE INDEX */ + List<List_item> save_many_values; + List<Item> *save_insert_list; + + bool is_item_list_lookup:1; + /* + Needed to correctly generate 'PRIMARY' or 'SIMPLE' for select_type column + of EXPLAIN + */ + bool have_merged_subqueries:1; + bool is_set_query_expr_tail:1; + bool with_sum_func:1; /* sum function indicator */ + bool with_rownum:1; /* rownum() function indicator */ + bool braces:1; /* SELECT ... UNION (SELECT ... ) <- this braces */ + bool automatic_brackets:1; /* dummy select for INTERSECT precedence */ + /* TRUE when having fix field called in processing of this SELECT */ + bool having_fix_field:1; + /* + TRUE when fix field is called for a new condition pushed into the + HAVING clause of this SELECT + */ + bool having_fix_field_for_pushed_cond:1; + /* + there are subquery in HAVING clause => we can't close tables before + query processing end even if we use temporary table + */ + bool subquery_in_having:1; + /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ + bool with_all_modifier:1; /* used for selects in union */ + bool is_correlated:1; + bool first_natural_join_processing:1; + bool first_cond_optimization:1; + /* do not wrap view fields with Item_ref */ + bool no_wrap_view_item:1; + /* exclude this select from check of unique_table() */ + bool exclude_from_table_unique_test:1; + bool in_tvc:1; + bool skip_locked:1; + bool m_non_agg_field_used:1; + bool m_agg_func_used:1; + bool m_custom_agg_func_used:1; + /* the select is "service-select" and can not have tables */ + bool is_service_select:1; /// Array of pointers to top elements of all_fields list Ref_ptr_array ref_pointer_array; + ulong table_join_options; /* number of items in select_list and HAVING clause used to get number @@ -1194,9 +1266,9 @@ public: list during split_sum_func */ uint select_n_having_items; - uint cond_count; /* number of sargable Items in where/having/on */ - uint between_count; /* number of between predicates in where/having/on */ - uint max_equal_elems; /* maximal number of elements in multiple equalities */ + uint cond_count; /* number of sargable Items in where/having/on */ + uint between_count; /* number of between predicates in where/having/on */ + uint max_equal_elems; /* max number of elements in multiple equalities */ /* Number of fields used in select list or where clause of current select and all inner subselects. @@ -1205,8 +1277,8 @@ public: /* reserved for exists 2 in */ uint select_n_reserved; /* - it counts the number of bit fields in the SELECT list. These are used when DISTINCT is - converted to a GROUP BY involving BIT fields. + it counts the number of bit fields in the SELECT list. These are used when + DISTINCT is converted to a GROUP BY involving BIT fields. */ uint hidden_bit_fields; /* @@ -1216,56 +1288,42 @@ public: 2) Fields in the PARTITION BY clause 3) Fields in the ORDER BY clause */ + /* + Number of current derived table made with TVC during the + transformation of IN-predicate into IN-subquery for this + st_select_lex. + */ + uint curr_tvc_name; uint fields_in_window_functions; + uint insert_tables; enum_parsing_place parsing_place; /* where we are parsing expression */ enum_parsing_place save_parsing_place; enum_parsing_place context_analysis_place; /* where we are in prepare */ - bool with_sum_func; /* sum function indicator */ + enum leaf_list_state {UNINIT, READY, SAVED}; + enum leaf_list_state prep_leaf_list_state; + enum olap_type olap; + /* SELECT [FOR UPDATE/LOCK IN SHARE MODE] [SKIP LOCKED] */ + enum select_lock_type {NONE, IN_SHARE_MODE, FOR_UPDATE}; + enum select_lock_type select_lock; - ulong table_join_options; uint in_sum_expr; uint select_number; /* number of select (used for EXPLAIN) */ - - /* - nest_levels are local to the query or VIEW, - and that view merge procedure does not re-calculate them. - So we also have to remember unit against which we count levels. - */ - SELECT_LEX_UNIT *nest_level_base; - int nest_level; /* nesting level of select */ - Item_sum *inner_sum_func_list; /* list of sum func in nested selects */ - uint with_wild; /* item list contain '*' */ - bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */ - bool automatic_brackets; /* dummy select for INTERSECT precedence */ - /* TRUE when having fix field called in processing of this SELECT */ - bool having_fix_field; - /* - TRUE when fix field is called for a new condition pushed into the - HAVING clause of this SELECT - */ - bool having_fix_field_for_pushed_cond; - /* List of references to fields referenced from inner selects */ - List<Item_outer_ref> inner_refs_list; + uint with_wild; /* item list contain '*' ; Counter */ /* Number of Item_sum-derived objects in this SELECT */ uint n_sum_items; /* Number of Item_sum-derived objects in children and descendant SELECTs */ uint n_child_sum_items; + uint versioned_tables; /* For versioning */ + int nest_level; /* nesting level of select */ + /* index in the select list of the expression currently being fixed */ + int cur_pos_in_select_list; - /* explicit LIMIT clause was used */ - bool explicit_limit; /* This array is used to note whether we have any candidates for expression caching in the corresponding clauses */ bool expr_cache_may_be_used[PARSING_PLACE_SIZE]; - /* - there are subquery in HAVING clause => we can't close tables before - query processing end even if we use temporary table - */ - bool subquery_in_having; - /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ - bool with_all_modifier; /* used for selects in union */ - bool is_correlated; + uint8 nest_flags; /* This variable is required to ensure proper work of subqueries and stored procedures. Generally, one should use the states of @@ -1279,34 +1337,6 @@ public: case of an error during prepare the PS is not created. */ uint8 changed_elements; // see TOUCHED_SEL_* - /* TODO: add foloowing first_* to bitmap above */ - bool first_natural_join_processing; - bool first_cond_optimization; - /* do not wrap view fields with Item_ref */ - bool no_wrap_view_item; - /* exclude this select from check of unique_table() */ - bool exclude_from_table_unique_test; - /* the select is "service-select" and can not have tables*/ - bool is_service_select; - /* index in the select list of the expression currently being fixed */ - int cur_pos_in_select_list; - - List<udf_func> udf_list; /* udf function calls stack */ - - /* - This is a copy of the original JOIN USING list that comes from - the parser. The parser : - 1. Sets the natural_join of the second TABLE_LIST in the join - and the st_select_lex::prev_join_using. - 2. Makes a parent TABLE_LIST and sets its is_natural_join/ - join_using_fields members. - 3. Uses the wrapper TABLE_LIST as a table in the upper level. - We cannot assign directly to join_using_fields in the parser because - at stage (1.) the parent TABLE_LIST is not constructed yet and - the assignment will override the JOIN USING fields of the lower level - joins on the right. - */ - List<String> *prev_join_using; /** The set of those tables whose fields are referenced in the select list of @@ -1314,30 +1344,22 @@ public: */ table_map select_list_tables; + /* Set to 1 if any field in field list has ROWNUM() */ + bool rownum_in_field_list; + /* namp of nesting SELECT visibility (for aggregate functions check) */ nesting_map name_visibility_map; - table_map with_dep; - /* the structure to store fields that are used in the GROUP BY of this select */ - List<Field_pair> grouping_tmp_fields; + index_clause_map current_index_hint_clause; /* it is for correct printing SELECT options */ thr_lock_type lock_type; - List<List_item> save_many_values; - List<Item> *save_insert_list; - table_value_constr *tvc; - bool in_tvc; - - /* The object used to organize execution of the query by a foreign engine */ - select_handler *pushdown_select; - /** System Versioning */ -public: - uint versioned_tables; int vers_setup_conds(THD *thd, TABLE_LIST *tables); /* push new Item_field into item_list */ - bool vers_push_field(THD *thd, TABLE_LIST *table, const LEX_CSTRING field_name); + bool vers_push_field(THD *thd, TABLE_LIST *table, + const LEX_CSTRING field_name); int period_setup_conds(THD *thd, TABLE_LIST *table); void init_query(); @@ -1404,7 +1426,8 @@ public: TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); ulong get_table_join_options(); - void set_lock_for_tables(thr_lock_type lock_type, bool for_update); + void set_lock_for_tables(thr_lock_type lock_type, bool for_update, + bool skip_locks); /* This method created for reiniting LEX in mysql_admin_table() and can be used only if you are going remove all SELECT_LEX & units except belonger @@ -1431,8 +1454,8 @@ public: ha_rows get_limit(); friend struct LEX; - st_select_lex() : group_list_ptrs(NULL), braces(0), automatic_brackets(0), - n_sum_items(0), n_child_sum_items(0) + st_select_lex() : group_list_ptrs(NULL), braces(0), + automatic_brackets(0), n_sum_items(0), n_child_sum_items(0) {} void make_empty_select() { @@ -1475,7 +1498,7 @@ public: return hints; } - void clear_index_hints(void) { index_hints= NULL; } + inline void clear_index_hints(void) { index_hints= NULL; } bool is_part_of_union() { return master_unit()->is_unit_op(); } bool is_top_level_node() { @@ -1495,9 +1518,9 @@ public: inline bool is_mergeable() { return (next_select() == 0 && group_list.elements == 0 && - having == 0 && with_sum_func == 0 && + having == 0 && with_sum_func == 0 && with_rownum == 0 && table_list.elements >= 1 && !(options & SELECT_DISTINCT) && - select_limit == 0); + limit_params.select_limit == 0); } void mark_as_belong_to_derived(TABLE_LIST *derived); void increase_derived_records(ha_rows records); @@ -1563,7 +1586,7 @@ public: ORDER *find_common_window_func_partition_fields(THD *thd); bool cond_pushdown_is_allowed() const - { return !olap && !explicit_limit && !tvc; } + { return !olap && !limit_params.explicit_limit && !tvc && !with_rownum; } bool build_pushable_cond_for_having_pushdown(THD *thd, Item *cond); void pushdown_cond_into_where_clause(THD *thd, Item *extracted_cond, @@ -1581,18 +1604,6 @@ public: linkage == INTERSECT_TYPE; } -private: - bool m_non_agg_field_used; - bool m_agg_func_used; - bool m_custom_agg_func_used; - - /* current index hint kind. used in filling up index_hints */ - enum index_hint_type current_index_hint_type; - index_clause_map current_index_hint_clause; - /* a list of USE/FORCE/IGNORE INDEX */ - List<Index_hint> *index_hints; - -public: inline void add_where_field(st_select_lex *sel) { DBUG_ASSERT(this != sel); @@ -1936,6 +1947,13 @@ public: */ BINLOG_STMT_UNSAFE_AUTOINC_LOCK_MODE, + /** + INSERT .. SELECT ... SKIP LOCKED is unlikely to have the same + rows locked on the replica. + primary key. + */ + BINLOG_STMT_UNSAFE_SKIP_LOCKED, + /* The last element of this enumeration type. */ BINLOG_STMT_UNSAFE_COUNT }; @@ -2787,8 +2805,8 @@ private: size_t m_buf_length; /** Echo the parsed stream to the pre-processed buffer. */ - bool m_echo; - bool m_echo_saved; + bool m_echo:1; + bool m_echo_saved:1; /** Pre-processed buffer. */ char *m_cpp_buf; @@ -2838,17 +2856,17 @@ public: const char *found_semicolon; /** SQL_MODE = IGNORE_SPACE. */ - bool ignore_space; + bool ignore_space:1; /** TRUE if we're parsing a prepared statement: in this mode we should allow placeholders. */ - bool stmt_prepare_mode; + bool stmt_prepare_mode:1; /** TRUE if we should allow multi-statements. */ - bool multi_statements; + bool multi_statements:1; /** Current line number. */ uint yylineno; @@ -3208,8 +3226,6 @@ public: /** SELECT of CREATE VIEW statement */ LEX_STRING create_view_select; - uint current_select_number; // valid for statment LEX (not view) - /** Start of 'ON table', in trigger statements. */ const char* raw_trg_on_table_name_begin; /** End of 'ON table', in trigger statements. */ @@ -3298,6 +3314,7 @@ public: SQL_I_List<ORDER> proc_list; SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list; Column_definition *last_field; + Table_function_json_table *json_table; Item_sum *in_sum_func; udf_func udf; HA_CHECK_OPT check_opt; // check/repair options @@ -3312,7 +3329,65 @@ public: /* The following is used by KILL */ killed_state kill_signal; killed_type kill_type; - bool is_shutdown_wait_for_slaves; + uint current_select_number; // valid for statment LEX (not view) + + /* + The following bool variables should not be bit fields as they are not + reset for every query + */ + bool autocommit; // Often used, better as bool + bool sp_lex_in_use; // Keep track on lex usage in SPs for error handling + + /* Bit fields, reset for every query */ + bool is_shutdown_wait_for_slaves:1; + bool selects_allow_procedure:1; + /* + A special command "PARSE_VCOL_EXPR" is defined for the parser + to translate a defining expression of a virtual column into an + Item object. + The following flag is used to prevent other applications to use + this command. + */ + bool parse_vcol_expr:1; + bool analyze_stmt:1; /* TRUE<=> this is "ANALYZE $stmt" */ + bool explain_json:1; + /* + true <=> The parsed fragment requires resolution of references to CTE + at the end of parsing. This name resolution process involves searching + for possible dependencies between CTE defined in the parsed fragment and + detecting possible recursive references. + The flag is set to true if the fragment contains CTE definitions. + */ + bool with_cte_resolution:1; + /* + true <=> only resolution of references to CTE are required in the parsed + fragment, no checking of dependencies between CTE is required. + This flag is used only when parsing clones of CTE specifications. + */ + bool only_cte_resolution:1; + bool local_file:1; + bool check_exists:1; + bool verbose:1, no_write_to_binlog:1; + bool safe_to_cache_query:1; + bool ignore:1; + bool next_is_main:1; // use "main" SELECT_LEX for nrxt allocation; + bool next_is_down:1; // use "main" SELECT_LEX for nrxt allocation; + /* + field_list was created for view and should be removed before PS/SP + rexecuton + */ + bool empty_field_list_on_rset:1; + /** + During name resolution search only in the table list given by + Name_resolution_context::first_name_resolution_table and + Name_resolution_context::last_name_resolution_table + (see Item_field::fix_fields()). + */ + bool use_only_table_context:1; + bool escape_used:1; + bool default_used:1; /* using default() function */ + bool with_rownum:1; /* Using rownum() function */ + bool is_lex_started:1; /* If lex_start() did run. For debugging. */ /* This variable is used in post-parse stage to declare that sum-functions, or functions which have sense only if GROUP BY is present, are allowed. @@ -3333,16 +3408,6 @@ public: clause name to get an error. */ const char *clause_that_disallows_subselect; - bool selects_allow_into; - bool selects_allow_procedure; - /* - A special command "PARSE_VCOL_EXPR" is defined for the parser - to translate a defining expression of a virtual column into an - Item object. - The following flag is used to prevent other applications to use - this command. - */ - bool parse_vcol_expr; enum enum_duplicates duplicates; enum enum_tx_isolation tx_isolation; @@ -3356,23 +3421,32 @@ public: enum enum_var_type option_type; enum enum_drop_mode drop_mode; - uint profile_query_id; - uint profile_options; enum backup_stages backup_stage; enum Foreign_key::fk_match_opt fk_match_option; enum_fk_option fk_update_opt; enum_fk_option fk_delete_opt; + enum enum_yes_no_unknown tx_chain, tx_release; + st_parsing_options parsing_options; + /* + In sql_cache we store SQL_CACHE flag as specified by user to be + able to restore SELECT statement from internal structures. + */ + enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE }; + e_sql_cache sql_cache; + uint slave_thd_opt, start_transaction_opt; + uint profile_query_id; + uint profile_options; int nest_level; + /* In LEX representing update which were transformed to multi-update stores total number of tables. For LEX representing multi-delete holds number of tables from which we will delete records. */ uint table_count_update; + uint8 describe; - bool analyze_stmt; /* TRUE<=> this is "ANALYZE $stmt" */ - bool explain_json; /* A flag that indicates what kinds of derived tables are present in the query (0 if no derived tables, otherwise a combination of flags @@ -3380,50 +3454,18 @@ public: */ uint8 derived_tables; uint8 context_analysis_only; - /* - true <=> The parsed fragment requires resolution of references to CTE - at the end of parsing. This name resolution process involves searching - for possible dependencies between CTE defined in the parsed fragment and - detecting possible recursive references. - The flag is set to true if the fragment contains CTE definitions. - */ - bool with_cte_resolution; - /* - true <=> only resolution of references to CTE are required in the parsed - fragment, no checking of dependencies between CTE is required. - This flag is used only when parsing clones of CTE specifications. - */ - bool only_cte_resolution; - bool local_file; - bool check_exists; - bool autocommit; - bool verbose, no_write_to_binlog; - - enum enum_yes_no_unknown tx_chain, tx_release; - bool safe_to_cache_query; - bool ignore; - bool next_is_main; // use "main" SELECT_LEX for nrxt allocation; - bool next_is_down; // use "main" SELECT_LEX for nrxt allocation; - st_parsing_options parsing_options; uint8 lex_options; // see OPTION_LEX_* - /* - In sql_cache we store SQL_CACHE flag as specified by user to be - able to restore SELECT statement from internal structures. - */ - enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE }; - e_sql_cache sql_cache; Alter_info alter_info; + Lex_prepared_stmt prepared_stmt; /* For CREATE TABLE statement last element of table list which is not part of SELECT or LIKE part (i.e. either element for table we are creating or last of tables referenced by foreign keys). */ TABLE_LIST *create_last_non_select_table; - Lex_prepared_stmt prepared_stmt; sp_head *sphead; sp_name *spname; - bool sp_lex_in_use; // Keep track on lex usage in SPs for error handling sp_pcontext *spcont; @@ -3431,11 +3473,6 @@ public: Event_parse_data *event_parse_data; - /* - field_list was created for view and should be removed before PS/SP - rexecuton - */ - bool empty_field_list_on_rset; /* Characterstics of trigger being created */ st_trg_chistics trg_chistics; /* @@ -3485,23 +3522,12 @@ public: */ engine_option_value *option_list_last; - /** - During name resolution search only in the table list given by - Name_resolution_context::first_name_resolution_table and - Name_resolution_context::last_name_resolution_table - (see Item_field::fix_fields()). - */ - bool use_only_table_context; /* Reference to a struct that contains information in various commands to add/create/drop/change table spaces. */ st_alter_tablespace *alter_tablespace_info; - - bool escape_used; - bool default_used; /* using default() function */ - bool is_lex_started; /* If lex_start() did run. For debugging. */ /* The set of those tables whose fields are referenced in all subqueries @@ -3626,7 +3652,7 @@ public: bool can_be_merged(); bool can_use_merged(); - bool can_not_use_merged(); + bool can_not_use_merged(bool no_update_or_delete); bool only_view_structure(); bool need_correct_ident(); uint8 get_effective_with_check(TABLE_LIST *view); @@ -4101,6 +4127,7 @@ public: Item *make_item_func_replace(THD *thd, Item *org, Item *find, Item *replace); Item *make_item_func_substr(THD *thd, Item *a, Item *b, Item *c); Item *make_item_func_substr(THD *thd, Item *a, Item *b); + Item *make_item_func_sysdate(THD *thd, uint fsp); Item *make_item_func_call_generic(THD *thd, Lex_ident_cli_st *db, Lex_ident_cli_st *name, List<Item> *args); Item *make_item_func_call_generic(THD *thd, @@ -4784,6 +4811,19 @@ public: bool resolve_references_to_cte(TABLE_LIST *tables, TABLE_LIST **tables_last); + /** + Turn on the SELECT_DESCRIBE flag for every SELECT_LEX involved into + the statement being processed in case the statement is EXPLAIN UPDATE/DELETE. + + @param lex current LEX + */ + + void promote_select_describe_flag_if_needed() + { + if (describe) + builtin_select.options |= SELECT_DESCRIBE; + } + }; diff --git a/sql/sql_limit.h b/sql/sql_limit.h index 19c1ce57e99..41308bc12db 100644 --- a/sql/sql_limit.h +++ b/sql/sql_limit.h @@ -23,24 +23,32 @@ class Select_limit_counters { ha_rows select_limit_cnt, offset_limit_cnt; + bool with_ties; public: Select_limit_counters(): - select_limit_cnt(0), offset_limit_cnt(0) + select_limit_cnt(0), offset_limit_cnt(0), with_ties(false) {}; - Select_limit_counters(Select_limit_counters &orig): + Select_limit_counters(const Select_limit_counters &orig): select_limit_cnt(orig.select_limit_cnt), - offset_limit_cnt(orig.offset_limit_cnt) + offset_limit_cnt(orig.offset_limit_cnt), + with_ties(orig.with_ties) {}; - void set_limit(ha_rows limit, ha_rows offset) + void set_limit(ha_rows limit, ha_rows offset, bool with_ties_arg) { if (limit == 0) offset= 0; offset_limit_cnt= offset; select_limit_cnt= limit; - if (select_limit_cnt + offset_limit_cnt >= - select_limit_cnt) + with_ties= with_ties_arg; + /* + Guard against an overflow condition, where limit + offset exceede + ha_rows value range. This case covers unreasonably large parameter + values that do not have any practical use so assuming in this case + that the query does not have a limit is fine. + */ + if (select_limit_cnt + offset_limit_cnt >= select_limit_cnt) select_limit_cnt+= offset_limit_cnt; else select_limit_cnt= HA_POS_ERROR; @@ -50,25 +58,35 @@ class Select_limit_counters { offset_limit_cnt= 0; select_limit_cnt= 1; + with_ties= false; } - bool is_unlimited() + bool is_unlimited() const { return select_limit_cnt == HA_POS_ERROR; } - bool is_unrestricted() - { return select_limit_cnt == HA_POS_ERROR && offset_limit_cnt == 0; } + /* + Set the limit to allow returning an unlimited number of rows. Useful + for cases when we want to continue execution indefinitely after the limit + is reached (for example for SQL_CALC_ROWS extension). + */ void set_unlimited() - { select_limit_cnt= HA_POS_ERROR; offset_limit_cnt= 0; } + { select_limit_cnt= HA_POS_ERROR; } - bool check_offset(ha_rows sent) + /* Reset the limit entirely. */ + void clear() + { select_limit_cnt= HA_POS_ERROR; offset_limit_cnt= 0; with_ties= false;} + + bool check_offset(ha_rows sent) const { return sent < offset_limit_cnt; } void remove_offset() { offset_limit_cnt= 0; } - ha_rows get_select_limit() + ha_rows get_select_limit() const { return select_limit_cnt; } - ha_rows get_offset_limit() + ha_rows get_offset_limit() const { return offset_limit_cnt; } + bool is_with_ties() const + { return with_ties; } }; #endif // INCLUDES_MARIADB_SQL_LIMIT_H diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 2869011e313..2f1ee0b11bd 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -44,6 +44,8 @@ #include "wsrep_mysqld.h" +#include "scope.h" // scope_exit + extern "C" int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t Count); class XML_TAG { @@ -51,11 +53,11 @@ public: int level; String field; String value; - XML_TAG(int l, String f, String v); + XML_TAG(int l, const String &f, const String &v); }; -XML_TAG::XML_TAG(int l, String f, String v) +XML_TAG::XML_TAG(int l, const String &f, const String &v) { level= l; field.append(f); @@ -444,6 +446,12 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, if (check_duplic_insert_without_overlaps(thd, table, handle_duplicates) != 0) DBUG_RETURN(true); + auto scope_cleaner = make_scope_exit( + [&fields_vars]() { + fields_vars.empty(); + } + ); + if (!fields_vars.elements) { Field_iterator_table_ref field_iterator; @@ -471,6 +479,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, } else { // Part field list + scope_cleaner.release(); /* TODO: use this conds for 'WITH CHECK OPTIONS' */ if (setup_fields(thd, Ref_ptr_array(), fields_vars, MARK_COLUMNS_WRITE, 0, NULL, 0) || @@ -567,7 +576,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, DBUG_RETURN(TRUE); } -#if !defined(__WIN__) && ! defined(__NETWARE__) +#if !defined(_WIN32) MY_STAT stat_info; if (!my_stat(name, &stat_info, MYF(MY_WME))) DBUG_RETURN(TRUE); @@ -660,6 +669,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, } table->file->prepare_for_insert(create_lookup_handler); thd_progress_init(thd, 2); + fix_rownum_pointers(thd, thd->lex->current_select, &info.copied); if (table_list->table->validate_default_values_of_unset_fields(thd)) { read_info.error= true; @@ -867,7 +877,7 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex, */ qualify_db= db_arg; } - lle.print_query(thd, FALSE, (const char *) ex->cs?ex->cs->csname:NULL, + lle.print_query(thd, FALSE, (const char*) ex->cs ? ex->cs->cs_name.str : NULL, &query_str, &fname_start, &fname_end, qualify_db); /* @@ -877,18 +887,18 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex, { List_iterator<Item> li(thd->lex->field_list); - query_str.append(" ("); + query_str.append(STRING_WITH_LEN(" (")); n= 0; while ((item= li++)) { if (n++) - query_str.append(", "); + query_str.append(STRING_WITH_LEN(", ")); const Load_data_outvar *var= item->get_load_data_outvar(); DBUG_ASSERT(var); var->load_data_print_for_log_event(thd, &query_str); } - query_str.append(")"); + query_str.append(')'); } if (!thd->lex->update_list.is_empty()) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b56e5742845..90c9e3c7c4c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -98,8 +98,6 @@ #include "my_json_writer.h" -#define PRIV_LOCK_TABLES (SELECT_ACL | LOCK_TABLES_ACL) - #define FLAGSTR(V,F) ((V)&(F)?#F" ":"") #ifdef WITH_ARIA_STORAGE_ENGINE @@ -113,9 +111,7 @@ #include "wsrep_trans_observer.h" /* wsrep transaction hooks */ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, - Parser_state *parser_state, - bool is_com_multi, - bool is_next_command); + Parser_state *parser_state); #endif /* WITH_WSREP */ /** @@ -134,7 +130,7 @@ static int show_create_db(THD *thd, LEX *lex); static bool alter_routine(THD *thd, LEX *lex); static bool drop_routine(THD *thd, LEX *lex); -const char *any_db="*any*"; // Special symbol for check_access +const LEX_CSTRING any_db= {STRING_WITH_LEN("*any*")}; const LEX_CSTRING command_name[257]={ { STRING_WITH_LEN("Sleep") }, //0 @@ -391,7 +387,7 @@ const LEX_CSTRING command_name[257]={ { STRING_WITH_LEN("Slave_worker") }, //251 { STRING_WITH_LEN("Slave_IO") }, //252 { STRING_WITH_LEN("Slave_SQL") }, //253 - { STRING_WITH_LEN("Com_multi") }, //254 + { 0, 0}, { STRING_WITH_LEN("Error") } // Last command number 255 }; @@ -490,7 +486,7 @@ void init_update_queries(void) memset(server_command_flags, 0, sizeof(server_command_flags)); server_command_flags[COM_STATISTICS]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK; - server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI; + server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK; server_command_flags[COM_QUIT]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_PROCESS_INFO]= CF_SKIP_WSREP_CHECK; @@ -519,7 +515,6 @@ void init_update_queries(void) server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_STMT_SEND_LONG_DATA]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_REGISTER_SLAVE]= CF_SKIP_WSREP_CHECK; - server_command_flags[COM_MULTI]= CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI; /* Initialize the sql command flags array. */ memset(sql_command_flags, 0, sizeof(sql_command_flags)); @@ -958,7 +953,7 @@ void execute_init_command(THD *thd, LEX_STRING *init_command, save_vio= thd->net.vio; thd->net.vio= 0; thd->clear_error(1); - dispatch_command(COM_QUERY, thd, buf, (uint)len, FALSE, FALSE); + dispatch_command(COM_QUERY, thd, buf, (uint)len); thd->client_capabilities= save_client_capabilities; thd->net.vio= save_vio; @@ -981,6 +976,7 @@ int bootstrap(MYSQL_FILE *file) DBUG_ENTER("handle_bootstrap"); THD *thd= new THD(next_thread_id()); + char *buffer= new char[MAX_BOOTSTRAP_QUERY_SIZE]; #ifdef WITH_WSREP thd->variables.wsrep_on= 0; #endif @@ -1014,12 +1010,12 @@ int bootstrap(MYSQL_FILE *file) for ( ; ; ) { - char buffer[MAX_BOOTSTRAP_QUERY_SIZE] = ""; + buffer[0]= 0; int rc, length; char *query; int error= 0; - rc= read_bootstrap_query(buffer, &length, file, fgets_fn, &error); + rc= read_bootstrap_query(buffer, &length, file, fgets_fn, 0, &error); if (rc == READ_BOOTSTRAP_EOF) break; @@ -1084,7 +1080,7 @@ int bootstrap(MYSQL_FILE *file) break; } - mysql_parse(thd, thd->query(), length, &parser_state, FALSE, FALSE); + mysql_parse(thd, thd->query(), length, &parser_state); bootstrap_error= thd->is_error(); thd->protocol->end_statement(); @@ -1102,6 +1098,7 @@ int bootstrap(MYSQL_FILE *file) thd->lex->restore_set_statement_var(); } delete thd; + delete[] buffer; DBUG_RETURN(bootstrap_error); } @@ -1132,23 +1129,6 @@ void cleanup_items(Item *item) DBUG_VOID_RETURN; } -static enum enum_server_command fetch_command(THD *thd, char *packet) -{ - enum enum_server_command - command= (enum enum_server_command) (uchar) packet[0]; - DBUG_ENTER("fetch_command"); - - if (command >= COM_END || - (command >= COM_MDB_GAP_BEG && command <= COM_MDB_GAP_END)) - command= COM_END; // Wrong command - - DBUG_PRINT("info",("Command on %s = %d (%s)", - vio_description(thd->net.vio), command, - command_name[command].str)); - DBUG_RETURN(command); -} - - #ifdef WITH_WSREP static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) { @@ -1169,28 +1149,80 @@ static bool wsrep_command_no_result(char command) } #endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY +static enum enum_server_command fetch_command(THD *thd, char *packet) +{ + enum enum_server_command + command= (enum enum_server_command) (uchar) packet[0]; + DBUG_ENTER("fetch_command"); + + if (command >= COM_END || + (command >= COM_MDB_GAP_BEG && command <= COM_MDB_GAP_END)) + command= COM_END; // Wrong command + + DBUG_PRINT("info",("Command on %s = %d (%s)", + vio_description(thd->net.vio), command, + command_name[command].str)); + DBUG_RETURN(command); +} /** Read one command from connection and execute it (query or simple command). - This function is called in loop from thread function. + This function is to be used by different schedulers (one-thread-per-connection, + pool-of-threads) For profiling to work, it must never be called recursively. + @param thd - client connection context + + @param blocking - wait for command to finish. + if false (nonblocking), then the function might + return when command is "half-finished", with + DISPATCH_COMMAND_WOULDBLOCK. + Currenly, this can *only* happen when using + threadpool. The command will resume, after all outstanding + async operations (i.e group commit) finish. + Threadpool scheduler takes care of "resume". + + @retval + DISPATCH_COMMAND_SUCCESS - success @retval - 0 success + DISPATCH_COMMAND_CLOSE_CONNECTION request of THD shutdown + (s. dispatch_command() description) @retval - 1 request of thread shutdown (see dispatch_command() description) + DISPATCH_COMMAND_WOULDBLOCK - need to wait for asynchronous operations + to finish. Only returned if parameter + 'blocking' is false. */ -bool do_command(THD *thd) +dispatch_command_return do_command(THD *thd, bool blocking) { - bool return_value; + dispatch_command_return return_value; char *packet= 0; ulong packet_length; NET *net= &thd->net; enum enum_server_command command; DBUG_ENTER("do_command"); +#ifdef WITH_WSREP + DBUG_ASSERT(!thd->async_state.pending_ops() || + (WSREP(thd) && + thd->wsrep_trx().state() == wsrep::transaction::s_aborted)); +#else + DBUG_ASSERT(!thd->async_state.pending_ops()); +#endif + + if (thd->async_state.m_state == thd_async_state::enum_async_state::RESUMED) + { + /* + Resuming previously suspended command. + Restore the state + */ + command = thd->async_state.m_command; + packet = thd->async_state.m_packet.str; + packet_length = (ulong)thd->async_state.m_packet.length; + goto resume; + } + /* indicator of uninitialized lex => normal flow of errors handling (see my_message_sql) @@ -1257,12 +1289,12 @@ bool do_command(THD *thd) if (net->error != 3) { - return_value= TRUE; // We have to close it. + return_value= DISPATCH_COMMAND_CLOSE_CONNECTION; // We have to close it. goto out; } net->error= 0; - return_value= FALSE; + return_value= DISPATCH_COMMAND_SUCCESS; goto out; } @@ -1335,7 +1367,7 @@ bool do_command(THD *thd) MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da()); thd->m_statement_psi= NULL; thd->m_digest= NULL; - return_value= FALSE; + return_value= DISPATCH_COMMAND_SUCCESS; wsrep_after_command_before_result(thd); goto out; @@ -1361,7 +1393,7 @@ bool do_command(THD *thd) thd->m_statement_psi= NULL; thd->m_digest= NULL; - return_value= FALSE; + return_value= DISPATCH_COMMAND_SUCCESS; wsrep_after_command_before_result(thd); goto out; } @@ -1372,8 +1404,18 @@ bool do_command(THD *thd) DBUG_ASSERT(packet_length); DBUG_ASSERT(!thd->apc_target.is_enabled()); + +resume: return_value= dispatch_command(command, thd, packet+1, - (uint) (packet_length-1), FALSE, FALSE); + (uint) (packet_length-1), blocking); + if (return_value == DISPATCH_COMMAND_WOULDBLOCK) + { + /* Save current state, and resume later.*/ + thd->async_state.m_command= command; + thd->async_state.m_packet={packet,packet_length}; + DBUG_RETURN(return_value); + } + DBUG_ASSERT(!thd->apc_target.is_enabled()); out: @@ -1479,45 +1521,6 @@ static void wsrep_copy_query(THD *thd) } #endif /* WITH_WSREP */ -/** - check COM_MULTI packet - - @param thd thread handle - @param packet pointer on the packet of commands - @param packet_length length of this packet - - @retval 0 - Error - @retval # - Number of commands in the batch -*/ - -uint maria_multi_check(THD *thd, char *packet, size_t packet_length) -{ - uint counter= 0; - DBUG_ENTER("maria_multi_check"); - while (packet_length) - { - char *packet_start= packet; - size_t subpacket_length= net_field_length((uchar **)&packet_start); - size_t length_length= packet_start - packet; - // length of command + 3 bytes where that length was stored - DBUG_PRINT("info", ("sub-packet length: %zu + %zu command: %x", - subpacket_length, length_length, - packet_start[3])); - - if (subpacket_length == 0 || - (subpacket_length + length_length) > packet_length) - { - my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR), - MYF(0)); - DBUG_RETURN(0); - } - - counter++; - packet= packet_start + subpacket_length; - packet_length-= (subpacket_length + length_length); - } - DBUG_RETURN(counter); -} #if defined(WITH_ARIA_STORAGE_ENGINE) @@ -1554,8 +1557,13 @@ public: @param packet_length length of packet + 1 (to show that data is null-terminated) except for COM_SLEEP, where it can be zero. - @param is_com_multi recursive call from COM_MULTI - @param is_next_command there will be more command in the COM_MULTI batch + @param blocking if false (nonblocking), then the function might + return when command is "half-finished", with + DISPATCH_COMMAND_WOULDBLOCK. + Currenly, this can *only* happen when using threadpool. + The current command will resume, after all outstanding + async operations (i.e group commit) finish. + Threadpool scheduler takes care of "resume". @todo set thd->lex->sql_command to SQLCOM_END here. @@ -1568,9 +1576,8 @@ public: 1 request of thread shutdown, i. e. if command is COM_QUIT/COM_SHUTDOWN */ -bool dispatch_command(enum enum_server_command command, THD *thd, - char* packet, uint packet_length, bool is_com_multi, - bool is_next_command) +dispatch_command_return dispatch_command(enum enum_server_command command, THD *thd, + char* packet, uint packet_length, bool blocking) { NET *net= &thd->net; bool error= 0; @@ -1582,6 +1589,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, "<?>"))); bool drop_more_results= 0; + if (thd->async_state.m_state == thd_async_state::enum_async_state::RESUMED) + { + thd->async_state.m_state = thd_async_state::enum_async_state::NONE; + goto resume; + } + /* keep it withing 1 byte */ compile_time_assert(COM_END == 255); @@ -1651,21 +1664,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd, beginning of each command. */ thd->server_status&= ~SERVER_STATUS_CLEAR_SET; - if (is_next_command) - { - drop_more_results= !MY_TEST(thd->server_status & - SERVER_MORE_RESULTS_EXISTS); - thd->server_status|= SERVER_MORE_RESULTS_EXISTS; - if (is_com_multi) - thd->get_stmt_da()->set_skip_flush(); - } if (unlikely(thd->security_ctx->password_expired && command != COM_QUERY && command != COM_PING && command != COM_QUIT && command != COM_STMT_PREPARE && - command != COM_STMT_EXECUTE)) + command != COM_STMT_EXECUTE && + command != COM_STMT_CLOSE)) { my_error(ER_MUST_CHANGE_PASSWORD, MYF(0)); goto dispatch_end; @@ -1875,8 +1881,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (WSREP(thd)) { if (wsrep_mysql_parse(thd, thd->query(), thd->query_length(), - &parser_state, - is_com_multi, is_next_command)) + &parser_state)) { WSREP_DEBUG("Deadlock error for: %s", thd->query()); mysql_mutex_lock(&thd->LOCK_thd_data); @@ -1888,8 +1893,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } else #endif /* WITH_WSREP */ - mysql_parse(thd, thd->query(), thd->query_length(), &parser_state, - is_com_multi, is_next_command); + mysql_parse(thd, thd->query(), thd->query_length(), &parser_state); while (!thd->killed && (parser_state.m_lip.found_semicolon != NULL) && ! thd->is_error()) @@ -1973,8 +1977,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (WSREP(thd)) { if (wsrep_mysql_parse(thd, beginning_of_next_stmt, - length, &parser_state, - is_com_multi, is_next_command)) + length, &parser_state)) { WSREP_DEBUG("Deadlock error for: %s", thd->query()); mysql_mutex_lock(&thd->LOCK_thd_data); @@ -1987,8 +1990,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } else #endif /* WITH_WSREP */ - mysql_parse(thd, beginning_of_next_stmt, length, &parser_state, - is_com_multi, is_next_command); + mysql_parse(thd, beginning_of_next_stmt, length, &parser_state); } @@ -2039,13 +2041,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } packet= arg_end + 1; - // thd->reset_for_next_command reset state => restore it - if (is_next_command) - { - thd->server_status|= SERVER_MORE_RESULTS_EXISTS; - if (is_com_multi) - thd->get_stmt_da()->set_skip_flush(); - } lex_start(thd); /* Must be before we init the table list. */ @@ -2333,84 +2328,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, general_log_print(thd, command, NullS); my_eof(thd); break; - case COM_MULTI: - { - uint counter; - uint current_com= 0; - DBUG_ASSERT(!is_com_multi); - if (!(thd->client_capabilities & CLIENT_MULTI_RESULTS)) - { - /* The client does not support multiple result sets being sent back */ - my_error(ER_COMMULTI_BADCONTEXT, MYF(0)); - break; - } - - if (!(counter= maria_multi_check(thd, packet, packet_length))) - break; - - { - char *packet_start= packet; - /* We have to store next length because it will be destroyed by '\0' */ - size_t next_subpacket_length= net_field_length((uchar **)&packet_start); - size_t next_length_length= packet_start - packet; - unsigned char *readbuff= net->buff; - - if (net_allocate_new_packet(net, thd, MYF(MY_THREAD_SPECIFIC))) - break; - - PSI_statement_locker *save_locker= thd->m_statement_psi; - sql_digest_state *save_digest= thd->m_digest; - thd->m_statement_psi= NULL; - thd->m_digest= NULL; - - while (packet_length) - { - current_com++; - size_t subpacket_length= next_subpacket_length + next_length_length; - size_t length_length= next_length_length; - if (subpacket_length < packet_length) - { - packet_start= packet + subpacket_length; - next_subpacket_length= net_field_length((uchar**)&packet_start); - next_length_length= packet_start - (packet + subpacket_length); - } - /* safety like in do_command() */ - packet[subpacket_length]= '\0'; - - enum enum_server_command subcommand= - fetch_command(thd, (packet + length_length)); - - if (server_command_flags[subcommand] & CF_NO_COM_MULTI) - { - my_error(ER_BAD_COMMAND_IN_MULTI, MYF(0), - command_name[subcommand].str); - goto com_multi_end; - } - - if (dispatch_command(subcommand, thd, packet + (1 + length_length), - (uint)(subpacket_length - (1 + length_length)), TRUE, - (current_com != counter))) - { - DBUG_ASSERT(thd->is_error()); - goto com_multi_end; - } - - DBUG_ASSERT(subpacket_length <= packet_length); - packet+= subpacket_length; - packet_length-= (uint)subpacket_length; - } -com_multi_end: - thd->m_statement_psi= save_locker; - thd->m_digest= save_digest; - - /* release old buffer */ - net_flush(net); - DBUG_ASSERT(net->buff == net->write_pos); // nothing to send - my_free(readbuff); - } - break; - } case COM_SLEEP: case COM_CONNECT: // Impossible here case COM_TIME: // Impossible from client @@ -2424,7 +2342,18 @@ com_multi_end: } dispatch_end: - do_end_of_statement= true; + /* + For the threadpool i.e if non-blocking call, if not all async operations + are finished, return without cleanup. The cleanup will be done on + later, when command execution is resumed. + */ + if (!blocking && !error && thd->async_state.pending_ops()) + { + DBUG_RETURN(DISPATCH_COMMAND_WOULDBLOCK); + } + +resume: + #ifdef WITH_WSREP /* Next test should really be WSREP(thd), but that causes a failure when doing @@ -2471,11 +2400,8 @@ dispatch_end: thd_proc_info(thd, "Updating status"); /* Finalize server status flags after executing a command. */ thd->update_server_status(); - if (command != COM_MULTI) - { - thd->protocol->end_statement(); - query_cache_end_of_result(thd); - } + thd->protocol->end_statement(); + query_cache_end_of_result(thd); } if (drop_more_results) thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; @@ -2490,7 +2416,18 @@ dispatch_end: thd->update_all_stats(); - log_slow_statement(thd); + /* + Write to slow query log only those statements that received via the text + protocol except the EXECUTE statement. The reason we do that way is + that for statements received via binary protocol and for the EXECUTE + statement, the slow statements have been already written to slow query log + inside the method Prepared_statement::execute(). + */ + if(command == COM_QUERY && + thd->lex->sql_command != SQLCOM_EXECUTE) + log_slow_statement(thd); + else + delete_explain_query(thd->lex); THD_STAGE_INFO(thd, stage_cleaning_up); thd->reset_query(); @@ -2503,8 +2440,7 @@ dispatch_end: thd->m_statement_psi= NULL; thd->m_digest= NULL; - if (!is_com_multi) - thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory + thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory thd->reset_kill_query(); /* Ensure that killed_errmsg is released */ /* @@ -2532,7 +2468,7 @@ dispatch_end: /* Check that some variables are reset properly */ DBUG_ASSERT(thd->abort_on_warning == 0); thd->lex->restore_set_statement_var(); - DBUG_RETURN(error); + DBUG_RETURN(error?DISPATCH_COMMAND_CLOSE_CONNECTION: DISPATCH_COMMAND_SUCCESS); } static bool slow_filter_masked(THD *thd, ulonglong mask) @@ -2864,9 +2800,10 @@ bool sp_process_definer(THD *thd) } else { - LEX_USER *d= lex->definer= get_current_user(thd, lex->definer); + LEX_USER *d= get_current_user(thd, lex->definer); if (!d) DBUG_RETURN(TRUE); + thd->change_item_tree((Item**)&lex->definer, (Item*)d); /* If the specified definer differs from the current user or role, we @@ -3506,7 +3443,7 @@ bool run_set_statement_if_requested(THD *thd, LEX *lex) */ int -mysql_execute_command(THD *thd) +mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt) { int res= 0; int up_result= 0; @@ -3709,12 +3646,7 @@ mysql_execute_command(THD *thd) #ifdef HAVE_REPLICATION } /* endif unlikely slave */ #endif - Opt_trace_start ots(thd, all_tables, lex->sql_command, &lex->var_list, - thd->query(), thd->query_length(), - thd->variables.character_set_client); - - Json_writer_object trace_command(thd); - Json_writer_array trace_command_steps(thd, "steps"); + Opt_trace_start ots(thd); /* store old value of binlog format */ enum_binlog_format orig_binlog_format,orig_current_stmt_binlog_format; @@ -3731,7 +3663,7 @@ mysql_execute_command(THD *thd) { for (TABLE_LIST *table= all_tables; table; table= table->next_global) { - if (table->lock_type >= TL_WRITE_ALLOW_WRITE) + if (table->lock_type >= TL_FIRST_WRITE) { lex->sql_command= SQLCOM_BEGIN; thd->wsrep_converted_lock_session= true; @@ -3782,6 +3714,10 @@ mysql_execute_command(THD *thd) if (run_set_statement_if_requested(thd, lex)) goto error; + /* After SET STATEMENT is done, we can initialize the Optimizer Trace: */ + ots.init(thd, all_tables, lex->sql_command, &lex->var_list, thd->query(), + thd->query_length(), thd->variables.character_set_client); + if (thd->lex->mi.connection_name.str == NULL) thd->lex->mi.connection_name= thd->variables.default_master_connection; @@ -3886,6 +3822,11 @@ mysql_execute_command(THD *thd) thd->set_query_timer(); #ifdef WITH_WSREP + /* Check wsrep_mode rules before command execution. */ + if (WSREP_NNULL(thd) && + wsrep_thd_is_local(thd) && !wsrep_check_mode_before_cmd_execute(thd)) + goto error; + /* Always start a new transaction for a wsrep THD unless the current command is DDL or explicit BEGIN. This will guarantee that @@ -4002,7 +3943,7 @@ mysql_execute_command(THD *thd) privileges_requested, all_tables, FALSE, UINT_MAX, FALSE); else - res= check_access(thd, privileges_requested, any_db, NULL, NULL, 0, 0); + res= check_access(thd, privileges_requested, any_db.str, NULL,NULL,0,0); if (!res) res= execute_sqlcom_select(thd, all_tables); @@ -4464,7 +4405,7 @@ mysql_execute_command(THD *thd) if (lex->ignore) lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_UPDATE_IGNORE); - DBUG_ASSERT(select_lex->offset_limit == 0); + DBUG_ASSERT(select_lex->limit_params.offset_limit == 0); unit->set_limit(select_lex); MYSQL_UPDATE_START(thd->query()); res= up_result= mysql_update(thd, all_tables, @@ -4840,7 +4781,7 @@ mysql_execute_command(THD *thd) if ((res= delete_precheck(thd, all_tables))) break; - DBUG_ASSERT(select_lex->offset_limit == 0); + DBUG_ASSERT(select_lex->limit_params.offset_limit == 0); unit->set_limit(select_lex); MYSQL_DELETE_START(thd->query()); @@ -4998,7 +4939,7 @@ mysql_execute_command(THD *thd) { if (!lex->tmp_table() && (!thd->is_current_stmt_binlog_format_row() || - !thd->find_temporary_table(table))) + !is_temporary_table(table))) { WSREP_TO_ISOLATION_BEGIN(NULL, NULL, all_tables); break; @@ -5047,7 +4988,7 @@ mysql_execute_command(THD *thd) goto error; #else { - if (check_access(thd, FILE_ACL, any_db, NULL, NULL, 0, 0)) + if (check_access(thd, FILE_ACL, any_db.str, NULL, NULL, 0, 0)) goto error; res= ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_LOGS); break; @@ -5330,7 +5271,7 @@ mysql_execute_command(THD *thd) } while (0); /* Don't do it, if we are inside a SP */ - if (!thd->spcont) + if (!thd->spcont && !is_called_from_prepared_stmt) { sp_head::destroy(lex->sphead); lex->sphead= NULL; @@ -6187,7 +6128,8 @@ finish: #ifdef WITH_WSREP thd->wsrep_consistency_check= NO_CONSISTENCY_CHECK; - WSREP_TO_ISOLATION_END; + if (wsrep_thd_is_toi(thd) || wsrep_thd_is_in_rsu(thd)) + wsrep_to_isolation_end(thd); /* Force release of transactional locks if not in active MST and wsrep is on. */ @@ -6229,8 +6171,8 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) /* assign global limit variable if limit is not given */ { SELECT_LEX *param= lex->unit.global_parameters(); - if (!param->explicit_limit) - param->select_limit= + if (!param->limit_params.explicit_limit) + param->limit_params.select_limit= new (thd->mem_root) Item_int(thd, (ulonglong) thd->variables.select_limit); } @@ -6767,7 +6709,7 @@ check_access(THD *thd, privilege_t want_access, DBUG_RETURN(FALSE); // CTE reference or an error later } - if (likely((db != NULL) && (db != any_db))) + if (likely((db != NULL) && (db != any_db.str))) { /* Check if this is reserved database, like information schema or @@ -6854,7 +6796,7 @@ check_access(THD *thd, privilege_t want_access, DBUG_RETURN(TRUE); /* purecov: tested */ } - if (unlikely(db == any_db)) + if (unlikely(db == any_db.str)) { /* Access granted; Allow select on *any* db. @@ -7182,11 +7124,12 @@ check_table_access(THD *thd, privilege_t requirements, TABLE_LIST *tables, /* We want to have either SELECT or INSERT rights to sequences depending on how they are accessed */ - want_access= ((table_ref->lock_type == TL_WRITE_ALLOW_WRITE) ? + want_access= ((table_ref->lock_type >= TL_FIRST_WRITE) ? INSERT_ACL : SELECT_ACL); } - if (check_access(thd, want_access, table_ref->get_db_name(), + if (check_access(thd, want_access, + table_ref->get_db_name(), &table_ref->grant.privilege, &table_ref->grant.m_internal, 0, no_errors)) @@ -7815,8 +7758,8 @@ void mysql_init_multi_delete(LEX *lex) { lex->sql_command= SQLCOM_DELETE_MULTI; mysql_init_select(lex); - lex->first_select_lex()->select_limit= 0; - lex->unit.lim.set_unlimited(); + lex->first_select_lex()->limit_params.clear(); + lex->unit.lim.clear(); lex->first_select_lex()->table_list. save_and_clear(&lex->auxiliary_table_list); lex->query_tables= 0; @@ -7876,9 +7819,7 @@ static void wsrep_prepare_for_autocommit_retry(THD* thd, } static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, - Parser_state *parser_state, - bool is_com_multi, - bool is_next_command) + Parser_state *parser_state) { bool is_autocommit= !thd->in_multi_stmt_transaction_mode() && @@ -7887,7 +7828,7 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, do { retry_autocommit= false; - mysql_parse(thd, rawbuf, length, parser_state, is_com_multi, is_next_command); + mysql_parse(thd, rawbuf, length, parser_state); /* Convert all ER_QUERY_INTERRUPTED errors to ER_LOCK_DEADLOCK @@ -7995,15 +7936,10 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, @param thd Current thread @param rawbuf Begining of the query text @param length Length of the query text - @param[out] found_semicolon For multi queries, position of the character of - the next query in the query text. - @param is_next_command there will be more command in the COM_MULTI batch */ void mysql_parse(THD *thd, char *rawbuf, uint length, - Parser_state *parser_state, - bool is_com_multi, - bool is_next_command) + Parser_state *parser_state) { DBUG_ENTER("mysql_parse"); DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on_MYSQLparse();); @@ -8027,12 +7963,6 @@ void mysql_parse(THD *thd, char *rawbuf, uint length, */ lex_start(thd); thd->reset_for_next_command(); - if (is_next_command) - { - thd->server_status|= SERVER_MORE_RESULTS_EXISTS; - if (is_com_multi) - thd->get_stmt_da()->set_skip_flush(); - } if (query_cache_send_result_to_client(thd, rawbuf, length) <= 0) { @@ -8256,6 +8186,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } if (unlikely(table->is_derived_table() == FALSE && table->db.str && + !(table_options & TL_OPTION_TABLE_FUNCTION) && check_db_name((LEX_STRING*) &table->db))) { my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str); @@ -8293,7 +8224,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, if (table->table.length) table->table.length= my_casedn_str(files_charset_info, (char*) table->table.str); - if (ptr->db.length && ptr->db.str != any_db) + if (ptr->db.length && ptr->db.str != any_db.str) ptr->db.length= my_casedn_str(files_charset_info, (char*) ptr->db.str); } @@ -8345,7 +8276,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str, tables->alias.str) && - !cmp(&ptr->db, &tables->db) && ! tables->sequence)) + (tables->db.str == any_db.str || ptr->db.str == any_db.str || + !cmp(&ptr->db, &tables->db)) && + !tables->sequence)) { my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ @@ -8964,7 +8897,8 @@ bool st_select_lex::add_window_spec(THD *thd, /** Set lock for all tables in current select level. - @param lock_type Lock to set for tables + @param lock_type Lock to set for tables + @param skip_locked (SELECT {FOR UPDATE/LOCK IN SHARED MODE} SKIP LOCKED) @note If lock is a write lock, then tables->updating is set 1 @@ -8972,20 +8906,23 @@ bool st_select_lex::add_window_spec(THD *thd, query */ -void st_select_lex::set_lock_for_tables(thr_lock_type lock_type, bool for_update) +void st_select_lex::set_lock_for_tables(thr_lock_type lock_type, bool for_update, + bool skip_locked_arg) { DBUG_ENTER("set_lock_for_tables"); - DBUG_PRINT("enter", ("lock_type: %d for_update: %d", lock_type, - for_update)); + DBUG_PRINT("enter", ("lock_type: %d for_update: %d skip_locked %d", + lock_type, for_update, skip_locked)); + skip_locked= skip_locked_arg; for (TABLE_LIST *tables= table_list.first; tables; tables= tables->next_local) { tables->lock_type= lock_type; + tables->skip_locked= skip_locked; tables->updating= for_update; if (tables->db.length) - tables->mdl_request.set_type((lock_type >= TL_WRITE_ALLOW_WRITE) ? + tables->mdl_request.set_type((lock_type >= TL_FIRST_WRITE) ? MDL_SHARED_WRITE : MDL_SHARED_READ); } DBUG_VOID_RETURN; @@ -9028,11 +8965,10 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg) DBUG_RETURN(1); fake_select_lex->include_standalone(this, (SELECT_LEX_NODE**)&fake_select_lex); - fake_select_lex->select_number= INT_MAX; + fake_select_lex->select_number= FAKE_SELECT_LEX_ID; fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */ fake_select_lex->make_empty_select(); fake_select_lex->set_linkage(GLOBAL_OPTIONS_TYPE); - fake_select_lex->select_limit= 0; fake_select_lex->no_table_names_allowed= 1; @@ -9074,6 +9010,10 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg) @param left_op left operand of the JOIN @param right_op rigth operand of the JOIN + @seealso + push_table_function_arg_context() serves similar purpose for table + functions + @retval FALSE if all is OK @retval @@ -9087,7 +9027,6 @@ push_new_name_resolution_context(THD *thd, Name_resolution_context *on_context; if (!(on_context= new (thd->mem_root) Name_resolution_context)) return TRUE; - on_context->init(); on_context->first_name_resolution_table= left_op->first_leaf_for_name_resolution(); on_context->last_name_resolution_table= @@ -9453,9 +9392,7 @@ void sql_kill(THD *thd, longlong id, killed_state state, killed_type type) #ifdef WITH_WSREP return; wsrep_error_label: - error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR : - ER_KILL_DENIED_ERROR); - my_error(error, MYF(0), id); + my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); #endif /* WITH_WSREP */ } @@ -9492,7 +9429,7 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) #ifdef WITH_WSREP return; wsrep_error_label: - my_error(ER_CANNOT_USER, MYF(0), user ? user->user.str : "NULL"); + my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); #endif /* WITH_WSREP */ } @@ -9817,7 +9754,7 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex) walk->updating= target_tbl->updating; walk->lock_type= target_tbl->lock_type; /* We can assume that tables to be deleted from are locked for write. */ - DBUG_ASSERT(walk->lock_type >= TL_WRITE_ALLOW_WRITE); + DBUG_ASSERT(walk->lock_type >= TL_FIRST_WRITE); walk->mdl_request.set_type(MDL_SHARED_WRITE); target_tbl->correspondent_table= walk; // Remember corresponding table } @@ -10470,10 +10407,8 @@ bool parse_sql(THD *thd, Parser_state *parser_state, /* Parse the query. */ - bool mysql_parse_status= - ((thd->variables.sql_mode & MODE_ORACLE) ? - ORAparse(thd) : - MYSQLparse(thd)) != 0; + bool mysql_parse_status= thd->variables.sql_mode & MODE_ORACLE + ? ORAparse(thd) : MYSQLparse(thd); DBUG_ASSERT(opt_bootstrap || mysql_parse_status || thd->lex->select_stack_top == 0); thd->lex->current_select= thd->lex->first_select_lex(); @@ -10551,7 +10486,8 @@ merge_charset_and_collation(CHARSET_INFO *cs, CHARSET_INFO *cl) { if (!my_charset_same(cs, cl)) { - my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), cl->name, cs->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), cl->coll_name.str, + cs->cs_name.str); return NULL; } return cl; @@ -10563,8 +10499,11 @@ merge_charset_and_collation(CHARSET_INFO *cs, CHARSET_INFO *cl) */ CHARSET_INFO *find_bin_collation(CHARSET_INFO *cs) { - const char *csname= cs->csname; - cs= get_charset_by_csname(csname, MY_CS_BINSORT, MYF(0)); + const char *csname= cs->cs_name.str; + THD *thd= current_thd; + myf utf8_flag= thd->get_utf8_flag(); + + cs= get_charset_by_csname(csname, MY_CS_BINSORT, MYF(utf8_flag)); if (!cs) { char tmp[65]; diff --git a/sql/sql_parse.h b/sql/sql_parse.h index ac5786dbaa0..ebe3fe97114 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -91,8 +91,7 @@ bool is_log_table_write_query(enum enum_sql_command command); bool alloc_query(THD *thd, const char *packet, size_t packet_length); void mysql_init_select(LEX *lex); void mysql_parse(THD *thd, char *rawbuf, uint length, - Parser_state *parser_state, bool is_com_multi, - bool is_next_command); + Parser_state *parser_state); bool mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *sel); void create_select_for_variable(THD *thd, LEX_CSTRING *var_name); void create_table_set_open_action_and_adjust_tables(LEX *lex); @@ -101,11 +100,17 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex); void create_table_set_open_action_and_adjust_tables(LEX *lex); int bootstrap(MYSQL_FILE *file); bool run_set_statement_if_requested(THD *thd, LEX *lex); -int mysql_execute_command(THD *thd); -bool do_command(THD *thd); -bool dispatch_command(enum enum_server_command command, THD *thd, - char* packet, uint packet_length, - bool is_com_multi, bool is_next_command); +int mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt=false); +enum dispatch_command_return +{ + DISPATCH_COMMAND_SUCCESS=0, + DISPATCH_COMMAND_CLOSE_CONNECTION= 1, + DISPATCH_COMMAND_WOULDBLOCK= 2 +}; + +dispatch_command_return do_command(THD *thd, bool blocking = true); +dispatch_command_return dispatch_command(enum enum_server_command command, THD *thd, + char* packet, uint packet_length, bool blocking = true); void log_slow_statement(THD *thd); bool append_file_to_dir(THD *thd, const char **filename_ptr, const LEX_CSTRING *table_name); @@ -126,7 +131,7 @@ bool check_stack_overrun(THD *thd, long margin, uchar *dummy); /* Variables */ -extern const char* any_db; +extern const LEX_CSTRING any_db; extern uint sql_command_flags[]; extern uint server_command_flags[]; extern const LEX_CSTRING command_name[]; diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index f004981bd2a..adb1ad391cf 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -68,6 +68,7 @@ #include "sql_alter.h" // Alter_table_ctx #include "sql_select.h" #include "sql_tablespace.h" // check_tablespace_name +#include "ddl_log.h" #include "tztime.h" // my_tz_OFFSET0 #include <algorithm> @@ -2119,17 +2120,17 @@ static int add_keyword_string(String *str, const char *keyword, bool quoted, const char *keystr) { int err= str->append(' '); - err+= str->append(keyword); + err+= str->append(keyword, strlen(keyword)); str->append(STRING_WITH_LEN(" = ")); if (quoted) { err+= str->append('\''); - err+= str->append_for_single_quote(keystr); + err+= str->append_for_single_quote(keystr, strlen(keystr)); err+= str->append('\''); } else - err+= str->append(keystr); + err+= str->append(keystr, strlen(keystr)); return err; } @@ -2182,7 +2183,7 @@ static int add_keyword_path(String *str, const char *keyword, { char temp_path[FN_REFLEN]; strcpy(temp_path, path); -#ifdef __WIN__ +#ifdef _WIN32 /* Convert \ to / to be able to create table on unix */ char *pos, *end; size_t length= strlen(temp_path); @@ -2205,7 +2206,7 @@ static int add_keyword_path(String *str, const char *keyword, static int add_keyword_int(String *str, const char *keyword, longlong num) { int err= str->append(' '); - err+= str->append(keyword); + err+= str->append(keyword, strlen(keyword)); str->append(STRING_WITH_LEN(" = ")); return err + str->append_longlong(num); } @@ -2294,12 +2295,12 @@ static int add_column_list_values(String *str, partition_info *part_info, if (col_val->max_value) err+= str->append(STRING_WITH_LEN("MAXVALUE")); else if (col_val->null_value) - err+= str->append(STRING_WITH_LEN("NULL")); + err+= str->append(NULL_clex_str); else { Item *item_expr= col_val->item_expression; if (item_expr->null_value) - err+= str->append(STRING_WITH_LEN("NULL")); + err+= str->append(NULL_clex_str); else { CHARSET_INFO *field_cs; @@ -2404,7 +2405,7 @@ static int add_partition_values(String *str, partition_info *part_info, err+= str->append('('); if (p_elem->has_null_value) { - err+= str->append(STRING_WITH_LEN("NULL")); + err+= str->append(NULL_clex_str); if (num_items == 0) { err+= str->append(')'); @@ -6166,8 +6167,9 @@ static void release_part_info_log_entries(DDL_LOG_MEMORY_ENTRY *log_entry) while (log_entry) { - release_ddl_log_memory_entry(log_entry); - log_entry= log_entry->next_active_log_entry; + DDL_LOG_MEMORY_ENTRY *next= log_entry->next_active_log_entry; + ddl_log_release_memory_entry(log_entry); + log_entry= next; } DBUG_VOID_RETURN; } @@ -6201,16 +6203,18 @@ static bool write_log_replace_delete_frm(ALTER_PARTITION_PARAM_TYPE *lpt, DDL_LOG_MEMORY_ENTRY *log_entry; DBUG_ENTER("write_log_replace_delete_frm"); + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); if (replace_flag) ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION; else ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION; ddl_log_entry.next_entry= next_entry; - ddl_log_entry.handler_name= reg_ext; - ddl_log_entry.name= to_path; + lex_string_set(&ddl_log_entry.handler_name, reg_ext); + lex_string_set(&ddl_log_entry.name, to_path); + if (replace_flag) - ddl_log_entry.from_name= from_path; - if (write_ddl_log_entry(&ddl_log_entry, &log_entry)) + lex_string_set(&ddl_log_entry.from_name, from_path); + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) { DBUG_RETURN(TRUE); } @@ -6261,6 +6265,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, if (part_elem->part_state == PART_IS_CHANGED || (part_elem->part_state == PART_IS_ADDED && temp_partitions)) { + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); if (part_info->is_sub_partitioned()) { List_iterator<partition_element> sub_it(part_elem->subpartitions); @@ -6270,8 +6275,9 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, { partition_element *sub_elem= sub_it++; ddl_log_entry.next_entry= *next_entry; - ddl_log_entry.handler_name= - ha_resolve_storage_engine_name(sub_elem->engine_type); + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(sub_elem-> + engine_type)); if (create_subpartition_name(tmp_path, sizeof(tmp_path), path, part_elem->partition_name, sub_elem->partition_name, @@ -6281,16 +6287,15 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, sub_elem->partition_name, NORMAL_PART_NAME)) DBUG_RETURN(TRUE); - ddl_log_entry.name= normal_path; - ddl_log_entry.from_name= tmp_path; + lex_string_set(&ddl_log_entry.name, normal_path); + lex_string_set(&ddl_log_entry.from_name, tmp_path); if (part_elem->part_state == PART_IS_CHANGED) ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION; else ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION; - if (write_ddl_log_entry(&ddl_log_entry, &log_entry)) - { + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) DBUG_RETURN(TRUE); - } + *next_entry= log_entry->entry_pos; sub_elem->log_entry= log_entry; insert_part_info_log_entry_list(part_info, log_entry); @@ -6299,8 +6304,8 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, else { ddl_log_entry.next_entry= *next_entry; - ddl_log_entry.handler_name= - ha_resolve_storage_engine_name(part_elem->engine_type); + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(part_elem->engine_type)); if (create_partition_name(tmp_path, sizeof(tmp_path), path, part_elem->partition_name, TEMP_PART_NAME, TRUE) || @@ -6308,13 +6313,13 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, part_elem->partition_name, NORMAL_PART_NAME, TRUE)) DBUG_RETURN(TRUE); - ddl_log_entry.name= normal_path; - ddl_log_entry.from_name= tmp_path; + lex_string_set(&ddl_log_entry.name, normal_path); + lex_string_set(&ddl_log_entry.from_name, tmp_path); if (part_elem->part_state == PART_IS_CHANGED) ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION; else ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION; - if (write_ddl_log_entry(&ddl_log_entry, &log_entry)) + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) { DBUG_RETURN(TRUE); } @@ -6353,6 +6358,7 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, uint num_elements= part_info->partitions.elements; DBUG_ENTER("write_log_dropped_partitions"); + bzero(&ddl_log_entry, sizeof(ddl_log_entry)); ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION; if (temp_list) num_elements= num_temp_partitions; @@ -6383,14 +6389,15 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, { partition_element *sub_elem= sub_it++; ddl_log_entry.next_entry= *next_entry; - ddl_log_entry.handler_name= - ha_resolve_storage_engine_name(sub_elem->engine_type); + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(sub_elem-> + engine_type)); if (create_subpartition_name(tmp_path, sizeof(tmp_path), path, part_elem->partition_name, sub_elem->partition_name, name_variant)) DBUG_RETURN(TRUE); - ddl_log_entry.name= tmp_path; - if (write_ddl_log_entry(&ddl_log_entry, &log_entry)) + lex_string_set(&ddl_log_entry.name, tmp_path); + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) { DBUG_RETURN(TRUE); } @@ -6402,14 +6409,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, else { ddl_log_entry.next_entry= *next_entry; - ddl_log_entry.handler_name= - ha_resolve_storage_engine_name(part_elem->engine_type); + lex_string_set(&ddl_log_entry.handler_name, + ha_resolve_storage_engine_name(part_elem->engine_type)); if (create_partition_name(tmp_path, sizeof(tmp_path), path, part_elem->partition_name, name_variant, TRUE)) DBUG_RETURN(TRUE); - ddl_log_entry.name= tmp_path; - if (write_ddl_log_entry(&ddl_log_entry, &log_entry)) + lex_string_set(&ddl_log_entry.name, tmp_path); + if (ddl_log_write_entry(&ddl_log_entry, &log_entry)) { DBUG_RETURN(TRUE); } @@ -6471,8 +6478,8 @@ static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt) (const char*)shadow_path, FALSE)) goto error; log_entry= part_info->first_log_entry; - if (write_execute_ddl_log_entry(log_entry->entry_pos, - FALSE, &exec_log_entry)) + if (ddl_log_write_execute_entry(log_entry->entry_pos, + &exec_log_entry)) goto error; mysql_mutex_unlock(&LOCK_gdl); set_part_info_exec_log_entry(part_info, exec_log_entry); @@ -6518,8 +6525,8 @@ static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt) goto error; log_entry= part_info->first_log_entry; part_info->frm_log_entry= log_entry; - if (write_execute_ddl_log_entry(log_entry->entry_pos, - FALSE, &exec_log_entry)) + if (ddl_log_write_execute_entry(log_entry->entry_pos, + &exec_log_entry)) goto error; release_part_info_log_entries(old_first_log_entry); mysql_mutex_unlock(&LOCK_gdl); @@ -6573,8 +6580,8 @@ static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt) goto error; log_entry= part_info->first_log_entry; part_info->frm_log_entry= log_entry; - if (write_execute_ddl_log_entry(log_entry->entry_pos, - FALSE, &exec_log_entry)) + if (ddl_log_write_execute_entry(log_entry->entry_pos, + &exec_log_entry)) goto error; release_part_info_log_entries(old_first_log_entry); mysql_mutex_unlock(&LOCK_gdl); @@ -6632,8 +6639,7 @@ static bool write_log_add_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt) goto error; log_entry= part_info->first_log_entry; - if (write_execute_ddl_log_entry(log_entry->entry_pos, - FALSE, + if (ddl_log_write_execute_entry(log_entry->entry_pos, /* Reuse the old execute ddl_log_entry */ &exec_log_entry)) goto error; @@ -6702,8 +6708,8 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt) log_entry= part_info->first_log_entry; part_info->frm_log_entry= log_entry; /* Overwrite the revert execute log entry with this retry execute entry */ - if (write_execute_ddl_log_entry(log_entry->entry_pos, - FALSE, &exec_log_entry)) + if (ddl_log_write_execute_entry(log_entry->entry_pos, + &exec_log_entry)) goto error; release_part_info_log_entries(old_first_log_entry); mysql_mutex_unlock(&LOCK_gdl); @@ -6739,7 +6745,7 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt, DBUG_ASSERT(log_entry); mysql_mutex_lock(&LOCK_gdl); - if (write_execute_ddl_log_entry(0UL, TRUE, &log_entry)) + if (ddl_log_disable_execute_entry(&log_entry)) { /* Failed to write, Bad... @@ -6906,7 +6912,7 @@ static void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, } if (part_info->first_log_entry && - execute_ddl_log_entry(thd, part_info->first_log_entry->entry_pos)) + ddl_log_execute_entry(thd, part_info->first_log_entry->entry_pos)) { /* We couldn't recover from error, most likely manual interaction @@ -7045,6 +7051,30 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket, } +bool log_partition_alter_to_ddl_log(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + LEX_CSTRING old_engine_lex; + lex_string_set(&old_engine_lex, lpt->table->file->real_table_type()); + + ddl_log.query= { C_STRING_WITH_LEN("ALTER") }; + ddl_log.org_storage_engine_name= old_engine_lex; + ddl_log.org_partitioned= true; + ddl_log.org_database= lpt->db; + ddl_log.org_table= lpt->table_name; + ddl_log.org_table_id= lpt->org_tabledef_version; + ddl_log.new_storage_engine_name= old_engine_lex; + ddl_log.new_partitioned= true; + ddl_log.new_database= lpt->db; + ddl_log.new_table= lpt->table_name; + ddl_log.new_table_id= lpt->create_info->tabledef_version; + backup_log_ddl(&ddl_log); // This sets backup_log_error on failure + return 0; +} + + + /** Actually perform the change requested by ALTER TABLE of partitions previously prepared. @@ -7097,6 +7127,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, lpt->key_count= 0; lpt->db= *db; lpt->table_name= *table_name; + lpt->org_tabledef_version= table->s->tabledef_version; lpt->copied= 0; lpt->deleted= 0; lpt->pack_frm_data= NULL; @@ -7234,6 +7265,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_ERROR("fail_drop_partition_6") || (frm_install= TRUE, FALSE) || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || + log_partition_alter_to_ddl_log(lpt) || (frm_install= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_drop_partition_7") || ERROR_INJECT_ERROR("fail_drop_partition_7") || @@ -7313,6 +7345,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_ERROR("fail_add_partition_8") || (frm_install= TRUE, FALSE) || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || + log_partition_alter_to_ddl_log(lpt) || (frm_install= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_add_partition_9") || ERROR_INJECT_ERROR("fail_add_partition_9") || @@ -7411,6 +7444,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_ERROR("fail_change_partition_8") || ((frm_install= TRUE), FALSE) || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || + log_partition_alter_to_ddl_log(lpt) || (frm_install= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_change_partition_9") || ERROR_INJECT_ERROR("fail_change_partition_9") || @@ -7561,9 +7595,9 @@ void append_row_to_str(String &str, const uchar *row, TABLE *table) field_ptr++) { Field *field= *field_ptr; - str.append(" "); + str.append(' '); str.append(&field->field_name); - str.append(":"); + str.append(':'); field_unpack(&str, field, rec, 0, false); } diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 58ba82dcd9f..57e6d0600ed 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -59,6 +59,7 @@ typedef struct st_lock_param_type KEY *key_info_buffer; LEX_CSTRING db; LEX_CSTRING table_name; + LEX_CUSTRING org_tabledef_version; uchar *pack_frm_data; uint key_count; uint db_options; diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc index 68dd3379d64..6ca96300b7a 100644 --- a/sql/sql_partition_admin.cc +++ b/sql/sql_partition_admin.cc @@ -32,6 +32,7 @@ #include "ha_partition.h" // ha_partition #endif #include "sql_base.h" // open_and_lock_tables +#include "ddl_log.h" #include "wsrep_mysqld.h" #ifndef WITH_PARTITION_STORAGE_ENGINE @@ -360,13 +361,14 @@ static bool exchange_name_with_ddl_log(THD *thd, DBUG_RETURN(TRUE); /* prepare the action entry */ + bzero(&exchange_entry, sizeof(exchange_entry)); exchange_entry.entry_type= DDL_LOG_ENTRY_CODE; exchange_entry.action_type= DDL_LOG_EXCHANGE_ACTION; - exchange_entry.next_entry= 0; - exchange_entry.name= name; - exchange_entry.from_name= from_name; - exchange_entry.tmp_name= tmp_name; - exchange_entry.handler_name= ha_resolve_storage_engine_name(ht); + lex_string_set(&exchange_entry.name, name); + lex_string_set(&exchange_entry.from_name, from_name); + lex_string_set(&exchange_entry.tmp_name, tmp_name); + lex_string_set(&exchange_entry.handler_name, + ha_resolve_storage_engine_name(ht)); exchange_entry.phase= EXCH_PHASE_NAME_TO_TEMP; mysql_mutex_lock(&LOCK_gdl); @@ -377,13 +379,13 @@ static bool exchange_name_with_ddl_log(THD *thd, */ DBUG_EXECUTE_IF("exchange_partition_fail_1", goto err_no_action_written;); DBUG_EXECUTE_IF("exchange_partition_abort_1", DBUG_SUICIDE();); - if (unlikely(write_ddl_log_entry(&exchange_entry, &log_entry))) + if (unlikely(ddl_log_write_entry(&exchange_entry, &log_entry))) goto err_no_action_written; DBUG_EXECUTE_IF("exchange_partition_fail_2", goto err_no_execute_written;); DBUG_EXECUTE_IF("exchange_partition_abort_2", DBUG_SUICIDE();); - if (unlikely(write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, - &exec_log_entry))) + if (unlikely(ddl_log_write_execute_entry(log_entry->entry_pos, + &exec_log_entry))) goto err_no_execute_written; /* ddl_log is written and synced */ @@ -409,7 +411,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_4", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_4", DBUG_SUICIDE();); - if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) + if (unlikely(ddl_log_increment_phase(log_entry->entry_pos))) goto err_rename; /* call rename table from partition to table */ @@ -426,7 +428,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_6", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_6", DBUG_SUICIDE();); - if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) + if (unlikely(ddl_log_increment_phase(log_entry->entry_pos))) goto err_rename; /* call rename table from tmp-nam to partition */ @@ -443,7 +445,7 @@ static bool exchange_name_with_ddl_log(THD *thd, } DBUG_EXECUTE_IF("exchange_partition_fail_8", goto err_rename;); DBUG_EXECUTE_IF("exchange_partition_abort_8", DBUG_SUICIDE();); - if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos))) + if (unlikely(ddl_log_increment_phase(log_entry->entry_pos))) goto err_rename; /* The exchange is complete and ddl_log is deactivated */ @@ -459,15 +461,15 @@ err_rename: will log to the error log about the failures... */ /* execute the ddl log entry to revert the renames */ - (void) execute_ddl_log_entry(current_thd, log_entry->entry_pos); + (void) ddl_log_execute_entry(current_thd, log_entry->entry_pos); mysql_mutex_lock(&LOCK_gdl); /* mark the execute log entry done */ - (void) write_execute_ddl_log_entry(0, TRUE, &exec_log_entry); + (void) ddl_log_disable_execute_entry(&exec_log_entry); /* release the execute log entry */ - (void) release_ddl_log_memory_entry(exec_log_entry); + (void) ddl_log_release_memory_entry(exec_log_entry); err_no_execute_written: /* release the action log entry */ - (void) release_ddl_log_memory_entry(log_entry); + (void) ddl_log_release_memory_entry(log_entry); err_no_action_written: mysql_mutex_unlock(&LOCK_gdl); delete file; @@ -511,6 +513,16 @@ bool Sql_cmd_alter_table_exchange_partition:: char part_file_name[2*FN_REFLEN+1]; char swap_file_name[FN_REFLEN+1]; char temp_file_name[FN_REFLEN+1]; + char part_table_name[NAME_LEN + 1]; + char part_db[NAME_LEN + 1]; + char swap_table_name[NAME_LEN + 1]; + char swap_db[NAME_LEN + 1]; + uchar part_tabledef_version[MY_UUID_SIZE]; + uchar swap_tabledef_version[MY_UUID_SIZE]; + + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + uint swap_part_id; uint part_file_name_len; Alter_table_prelocking_strategy alter_prelocking_strategy; @@ -588,6 +600,35 @@ bool Sql_cmd_alter_table_exchange_partition:: HTON_TABLE_MAY_NOT_EXIST_ON_SLAVE) force_if_exists= 1; + ddl_log.org_table.str= part_table_name; + DBUG_ASSERT(part_table->s->table_name.length <= NAME_LEN); + ddl_log.org_table.length= part_table->s->table_name.length; + strmake(part_table_name, part_table->s->table_name.str, NAME_LEN); + + ddl_log.org_database.str= part_db; + DBUG_ASSERT(part_table->s->db.length <= NAME_LEN); + ddl_log.org_database.length= part_table->s->db.length; + strmake(part_db, part_table->s->db.str, NAME_LEN); + + ddl_log.new_table.str= swap_table_name; + DBUG_ASSERT(swap_table->s->table_name.length <= NAME_LEN); + ddl_log.new_table.length= swap_table->s->table_name.length; + strmake(swap_table_name, swap_table->s->table_name.str, NAME_LEN); + + ddl_log.new_database.str= swap_db; + DBUG_ASSERT(swap_table->s->db.length <= NAME_LEN); + ddl_log.new_database.length= swap_table->s->db.length; + strmake(swap_db, swap_table->s->db.str, NAME_LEN); + + memcpy(part_tabledef_version, part_table->s->tabledef_version.str, + MY_UUID_SIZE); + ddl_log.org_table_id.str= part_tabledef_version; + ddl_log.org_table_id.length= MY_UUID_SIZE; + memcpy(swap_tabledef_version, swap_table->s->tabledef_version.str, + MY_UUID_SIZE); + ddl_log.new_table_id.str= swap_tabledef_version; + ddl_log.new_table_id.length= MY_UUID_SIZE; + /* set lock pruning on first table */ partition_name= alter_info->partition_names.head(); if (unlikely(table_list->table->part_info-> @@ -598,7 +639,6 @@ bool Sql_cmd_alter_table_exchange_partition:: if (unlikely(lock_tables(thd, table_list, table_counter, 0))) DBUG_RETURN(true); - table_hton= swap_table->file->ht; THD_STAGE_INFO(thd, stage_verifying_table); @@ -702,6 +742,15 @@ bool Sql_cmd_alter_table_exchange_partition:: (void) exchange_name_with_ddl_log(thd, part_file_name, swap_file_name, temp_file_name, table_hton); } + else + { + ddl_log.query= { C_STRING_WITH_LEN("EXCHANGE_PARTITION") }; + ddl_log.org_partitioned= true; + ddl_log.new_partitioned= false; + ddl_log.org_storage_engine_name= *hton_name(table_hton); + ddl_log.new_storage_engine_name= *hton_name(table_hton); + backup_log_ddl(&ddl_log); + } thd->variables.option_bits= save_option_bits; err: @@ -884,7 +933,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) { const char *partition_name= partition_names_it++; String *str_partition_name= new (thd->mem_root) - String(partition_name, system_charset_info); + String(partition_name, strlen(partition_name), system_charset_info); if (!str_partition_name) DBUG_RETURN(true); partition_names_list.push_back(str_partition_name, thd->mem_root); diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 8c2dbc91ec2..a429732a1c1 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -407,12 +407,16 @@ static int item_value_type(struct st_mysql_value *value) static const char *item_val_str(struct st_mysql_value *value, char *buffer, int *length) { - String str(buffer, *length, system_charset_info), *res; + size_t org_length= *length; + String str(buffer, org_length, system_charset_info), *res; if (!(res= ((st_item_value_holder*)value)->item->val_str(&str))) return NULL; *length= res->length(); - if (res->c_ptr_quick() == buffer) + if (res->ptr() == buffer && res->length() < org_length) + { + buffer[res->length()]= 0; return buffer; + } /* Lets be nice and create a temporary string since the @@ -1723,7 +1727,8 @@ int plugin_init(int *argc, char **argv, int flags) { char path[FN_REFLEN + 1]; build_table_filename(path, sizeof(path) - 1, "mysql", "plugin", reg_ext, 0); - Table_type ttype= dd_frm_type(0, path, &plugin_table_engine_name); + Table_type ttype= dd_frm_type(0, path, &plugin_table_engine_name, + NULL, NULL); if (ttype != TABLE_TYPE_NORMAL) plugin_table_engine_name=empty_clex_str; } @@ -1957,7 +1962,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list) list= NULL; /* terminate the loop */ /* fall through */ case ';': -#ifndef __WIN__ +#ifndef _WIN32 case ':': /* can't use this as delimiter as it may be drive letter */ #endif p[-1]= 0; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index e46a23c1d24..ab6734dffb5 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2002, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2021, MariaDB + Copyright (c) 2008, 2022, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -89,6 +89,7 @@ When one supplies long data for a placeholder: #include "unireg.h" #include "sql_class.h" // set_var.h: THD #include "set_var.h" +#include "sql_admin.h" // fill_check_table_metadata_fields #include "sql_prepare.h" #include "sql_parse.h" // insert_precheck, update_precheck, delete_precheck #include "sql_base.h" // open_normal_and_derived_tables @@ -105,6 +106,8 @@ When one supplies long data for a placeholder: #include "sql_cursor.h" #include "sql_show.h" #include "sql_repl.h" +#include "sql_help.h" // mysqld_help_prepare +#include "sql_table.h" // fill_checksum_table_metadata_fields #include "slave.h" #include "sp_head.h" #include "sp.h" @@ -129,6 +132,7 @@ static const uint PARAMETER_FLAG_UNSIGNED= 128U << 8; #include "wsrep_mysqld.h" #include "wsrep_trans_observer.h" #endif /* WITH_WSREP */ +#include "xa.h" // xa_recover_get_fields /** A result class used to send cursor rows using the binary protocol. @@ -179,6 +183,7 @@ public: my_bool iterations; my_bool start_param; my_bool read_types; + #ifndef EMBEDDED_LIBRARY bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end, uchar *read_pos, String *expanded_query); @@ -195,8 +200,8 @@ public: Prepared_statement(THD *thd_arg); virtual ~Prepared_statement(); void setup_set_params(); - virtual Query_arena::Type type() const; - virtual void cleanup_stmt(); + Query_arena::Type type() const override; + bool cleanup_stmt(bool restore_set_statement_vars) override; bool set_name(const LEX_CSTRING *name); inline void close_cursor() { delete cursor; cursor= 0; } inline bool is_in_use() { return flags & (uint) IS_IN_USE; } @@ -334,9 +339,13 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) error= my_net_write(net, buff, sizeof(buff)); if (stmt->param_count && likely(!error)) { - error= thd->protocol_text.send_result_set_metadata((List<Item> *) - &stmt->lex->param_list, - Protocol::SEND_EOF); + /* + Force the column info to be written + (in this case PS parameter type info). + */ + error= thd->protocol_text.send_result_set_metadata( + (List<Item> *)&stmt->lex->param_list, + Protocol::SEND_EOF | Protocol::SEND_FORCE_COLUMN_INFO); } if (likely(!error)) @@ -1254,11 +1263,17 @@ insert_params_from_actual_params_with_log(Prepared_statement *stmt, DBUG_RETURN(0); } -/** + +/* Validate INSERT statement. @param stmt prepared statement - @param tables global/local table list + @param table_list global/local table list + @param fields list of the table's fields to insert values + @param values_list values to be inserted into the table + @param update_fields the update fields. + @param update_values the update values. + @param duplic a way to handle duplicates @retval FALSE success @@ -1266,29 +1281,18 @@ insert_params_from_actual_params_with_log(Prepared_statement *stmt, TRUE error, error message is set in THD */ -static bool mysql_test_insert(Prepared_statement *stmt, - TABLE_LIST *table_list, - List<Item> &fields, - List<List_item> &values_list, - List<Item> &update_fields, - List<Item> &update_values, - enum_duplicates duplic) +static bool mysql_test_insert_common(Prepared_statement *stmt, + TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic) { THD *thd= stmt->thd; List_iterator_fast<List_item> its(values_list); List_item *values; - DBUG_ENTER("mysql_test_insert"); - - /* - Since INSERT DELAYED doesn't support temporary tables, we could - not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE. - Open them here instead. - */ - if (table_list->lock_type != TL_WRITE_DELAYED) - { - if (thd->open_temporary_tables(table_list)) - goto error; - } + DBUG_ENTER("mysql_test_insert_common"); if (insert_precheck(thd, table_list)) goto error; @@ -1356,6 +1360,44 @@ error: /** + Open temporary tables if required and validate INSERT statement. + + @param stmt prepared statement + @param tables global/local table list + + @retval + FALSE success + @retval + TRUE error, error message is set in THD +*/ + +static bool mysql_test_insert(Prepared_statement *stmt, + TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic) +{ + THD *thd= stmt->thd; + + /* + Since INSERT DELAYED doesn't support temporary tables, we could + not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE. + Open them here instead. + */ + if (table_list->lock_type != TL_WRITE_DELAYED) + { + if (thd->open_temporary_tables(table_list)) + return true; + } + + return mysql_test_insert_common(stmt, table_list, fields, values_list, + update_fields, update_values, duplic); +} + + +/** Validate UPDATE statement. @param stmt prepared statement @@ -1544,7 +1586,7 @@ static int mysql_test_select(Prepared_statement *stmt, if (check_table_access(thd, privilege, tables, FALSE, UINT_MAX, FALSE)) goto error; } - else if (check_access(thd, privilege, any_db, NULL, NULL, 0, 0)) + else if (check_access(thd, privilege, any_db.str, NULL, NULL, 0, 0)) goto error; if (!lex->result && !(lex->result= new (stmt->mem_root) select_send(thd))) @@ -2278,6 +2320,83 @@ static int mysql_test_handler_read(Prepared_statement *stmt, /** + Send metadata to a client on PREPARE phase of XA RECOVER statement + processing + + @param stmt prepared statement + + @return 0 on success, 1 on failure, 2 in case metadata was already sent +*/ + +static int mysql_test_xa_recover(Prepared_statement *stmt) +{ + THD *thd= stmt->thd; + List<Item> field_list; + + xa_recover_get_fields(thd, &field_list, nullptr); + return send_stmt_metadata(thd, stmt, &field_list); +} + + +/** + Send metadata to a client on PREPARE phase of HELP statement processing + + @param stmt prepared statement + + @return 0 on success, 1 on failure, 2 in case metadata was already sent +*/ + +static int mysql_test_help(Prepared_statement *stmt) +{ + THD *thd= stmt->thd; + List<Item> fields; + + if (mysqld_help_prepare(thd, stmt->lex->help_arg, &fields)) + return 1; + + return send_stmt_metadata(thd, stmt, &fields); +} + + +/** + Send metadata to a client on PREPARE phase of admin related statements + processing + + @param stmt prepared statement + + @return 0 on success, 1 on failure, 2 in case metadata was already sent +*/ + +static int mysql_test_admin_table(Prepared_statement *stmt) +{ + THD *thd= stmt->thd; + List<Item> fields; + + fill_check_table_metadata_fields(thd, &fields); + return send_stmt_metadata(thd, stmt, &fields); +} + + +/** + Send metadata to a client on PREPARE phase of CHECKSUM TABLE statement + processing + + @param stmt prepared statement + + @return 0 on success, 1 on failure, 2 in case metadata was already sent +*/ + +static int mysql_test_checksum_table(Prepared_statement *stmt) +{ + THD *thd= stmt->thd; + List<Item> fields; + + fill_checksum_table_metadata_fields(thd, &fields); + return send_stmt_metadata(thd, stmt, &fields); +} + + +/** Perform semantic analysis of the parsed tree and send a response packet to the client. @@ -2318,9 +2437,9 @@ static bool check_prepared_statement(Prepared_statement *stmt) For the optimizer trace, this is the symmetric, for statement preparation, of what is done at statement execution (in mysql_execute_command()). */ - Opt_trace_start ots(thd, tables, lex->sql_command, &lex->var_list, - thd->query(), thd->query_length(), - thd->variables.character_set_client); + Opt_trace_start ots(thd); + ots.init(thd, tables, lex->sql_command, &lex->var_list, thd->query(), + thd->query_length(), thd->variables.character_set_client); Json_writer_object trace_command(thd); Json_writer_array trace_command_steps(thd, "steps"); @@ -2355,6 +2474,13 @@ static bool check_prepared_statement(Prepared_statement *stmt) lex->duplicates); break; + case SQLCOM_LOAD: + res= mysql_test_insert_common(stmt, tables, lex->field_list, + lex->many_values, + lex->update_list, lex->value_list, + lex->duplicates); + break; + case SQLCOM_UPDATE: res= mysql_test_update(stmt, tables); /* mysql_test_update returns 2 if we need to switch to multi-update */ @@ -2491,11 +2617,6 @@ static bool check_prepared_statement(Prepared_statement *stmt) } break; case SQLCOM_CREATE_VIEW: - if (lex->create_view->mode == VIEW_ALTER) - { - my_message(ER_UNSUPPORTED_PS, ER_THD(thd, ER_UNSUPPORTED_PS), MYF(0)); - goto error; - } res= mysql_test_create_view(stmt); break; case SQLCOM_DO: @@ -2523,71 +2644,47 @@ static bool check_prepared_statement(Prepared_statement *stmt) /* Statement and field info has already been sent */ DBUG_RETURN(res == 1 ? TRUE : FALSE); - /* - Note that we don't need to have cases in this list if they are - marked with CF_STATUS_COMMAND in sql_command_flags - */ - case SQLCOM_SHOW_EXPLAIN: - case SQLCOM_DROP_TABLE: - case SQLCOM_DROP_SEQUENCE: - case SQLCOM_RENAME_TABLE: - case SQLCOM_ALTER_TABLE: - case SQLCOM_ALTER_SEQUENCE: - case SQLCOM_COMMIT: - case SQLCOM_CREATE_INDEX: - case SQLCOM_DROP_INDEX: - case SQLCOM_ROLLBACK: - case SQLCOM_ROLLBACK_TO_SAVEPOINT: - case SQLCOM_TRUNCATE: - case SQLCOM_DROP_VIEW: - case SQLCOM_REPAIR: + case SQLCOM_XA_RECOVER: + res= mysql_test_xa_recover(stmt); + if (res == 2) + /* Statement and field info has already been sent */ + DBUG_RETURN(false); + break; + + case SQLCOM_HELP: + res= mysql_test_help(stmt); + if (res == 2) + /* Statement and field info has already been sent */ + DBUG_RETURN(false); + break; + case SQLCOM_ANALYZE: - case SQLCOM_OPTIMIZE: - case SQLCOM_CHANGE_MASTER: - case SQLCOM_RESET: - case SQLCOM_FLUSH: - case SQLCOM_SLAVE_START: - case SQLCOM_SLAVE_STOP: - case SQLCOM_SLAVE_ALL_START: - case SQLCOM_SLAVE_ALL_STOP: - case SQLCOM_INSTALL_PLUGIN: - case SQLCOM_UNINSTALL_PLUGIN: - case SQLCOM_CREATE_DB: - case SQLCOM_DROP_DB: - case SQLCOM_ALTER_DB_UPGRADE: - case SQLCOM_CHECKSUM: - case SQLCOM_CREATE_USER: - case SQLCOM_ALTER_USER: - case SQLCOM_RENAME_USER: - case SQLCOM_DROP_USER: - case SQLCOM_CREATE_ROLE: - case SQLCOM_DROP_ROLE: case SQLCOM_ASSIGN_TO_KEYCACHE: + case SQLCOM_CHECK: + case SQLCOM_OPTIMIZE: case SQLCOM_PRELOAD_KEYS: - case SQLCOM_GRANT: - case SQLCOM_GRANT_ROLE: - case SQLCOM_REVOKE: - case SQLCOM_REVOKE_ALL: - case SQLCOM_REVOKE_ROLE: - case SQLCOM_KILL: - case SQLCOM_COMPOUND: - case SQLCOM_SHUTDOWN: + case SQLCOM_REPAIR: + res= mysql_test_admin_table(stmt); + if (res == 2) + /* Statement and field info has already been sent */ + DBUG_RETURN(false); + break; + + case SQLCOM_CHECKSUM: + res= mysql_test_checksum_table(stmt); + if (res == 2) + /* Statement and field info has already been sent */ + DBUG_RETURN(false); break; case SQLCOM_PREPARE: case SQLCOM_EXECUTE: + case SQLCOM_EXECUTE_IMMEDIATE: case SQLCOM_DEALLOCATE_PREPARE: + my_message(ER_UNSUPPORTED_PS, ER_THD(thd, ER_UNSUPPORTED_PS), MYF(0)); + goto error; + default: - /* - Trivial check of all status commands. This is easier than having - things in the above case list, as it's less chance for mistakes. - */ - if (!(sql_command_flags[sql_command] & CF_STATUS_COMMAND)) - { - /* All other statements are not supported yet. */ - my_message(ER_UNSUPPORTED_PS, ER_THD(thd, ER_UNSUPPORTED_PS), MYF(0)); - goto error; - } break; } if (res == 0) @@ -3470,10 +3567,15 @@ static void mysql_stmt_execute_common(THD *thd, thd->protocol= &thd->protocol_binary; MYSQL_EXECUTE_PS(thd->m_statement_psi, stmt->m_prepared_stmt); + auto save_cur_stmt= thd->cur_stmt; + thd->cur_stmt= stmt; + if (!bulk_op) stmt->execute_loop(&expanded_query, open_cursor, packet, packet_end); else stmt->execute_bulk_loop(&expanded_query, open_cursor, packet, packet_end); + + thd->cur_stmt= save_cur_stmt; thd->protocol= save_protocol; sp_cache_enforce_limit(thd->sp_proc_cache, stored_program_cache_size); @@ -3492,7 +3594,7 @@ static void mysql_stmt_execute_common(THD *thd, SQLCOM_EXECUTE implementation. Execute prepared statement using parameter values from - lex->prepared_stmt_params and send result to the client using + lex->prepared_stmt.params() and send result to the client using text protocol. This is called from mysql_execute_command and therefore should behave like an ordinary query (e.g. not change global THD data, such as warning count, server status, etc). @@ -4108,16 +4210,20 @@ Query_arena::Type Prepared_statement::type() const } -void Prepared_statement::cleanup_stmt() +bool Prepared_statement::cleanup_stmt(bool restore_set_statement_vars) { + bool error= false; DBUG_ENTER("Prepared_statement::cleanup_stmt"); DBUG_PRINT("enter",("stmt: %p", this)); - lex->restore_set_statement_var(); + + if (restore_set_statement_vars) + error= lex->restore_set_statement_var(); + thd->rollback_item_tree_changes(); cleanup_items(free_list); thd->cleanup_after_query(); - DBUG_VOID_RETURN; + DBUG_RETURN(error); } @@ -4232,6 +4338,8 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) old_stmt_arena= thd->stmt_arena; thd->stmt_arena= this; + auto save_cur_stmt= thd->cur_stmt; + thd->cur_stmt= this; Parser_state parser_state; if (parser_state.init(thd, thd->query(), thd->query_length())) @@ -4239,6 +4347,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) thd->restore_backup_statement(this, &stmt_backup); thd->restore_active_arena(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; + thd->cur_stmt = save_cur_stmt; DBUG_RETURN(TRUE); } @@ -4248,16 +4357,21 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) lex_start(thd); lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_PREPARE; + error= (parse_sql(thd, & parser_state, NULL) || thd->is_error() || init_param_array(this)); if (thd->security_ctx->password_expired && - lex->sql_command != SQLCOM_SET_OPTION) + lex->sql_command != SQLCOM_SET_OPTION && + lex->sql_command != SQLCOM_PREPARE && + lex->sql_command != SQLCOM_EXECUTE && + lex->sql_command != SQLCOM_DEALLOCATE_PREPARE) { thd->restore_backup_statement(this, &stmt_backup); thd->restore_active_arena(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; + thd->cur_stmt = save_cur_stmt; my_error(ER_MUST_CHANGE_PASSWORD, MYF(0)); DBUG_RETURN(true); } @@ -4313,12 +4427,6 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_PREPARE; } - /* - Restore original values of variables modified on handling - SET STATEMENT clause. - */ - error|= thd->lex->restore_set_statement_var(); - /* The order is important */ lex->unit.cleanup(); @@ -4347,9 +4455,15 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) if (lex->sql_command != SQLCOM_SET_OPTION) lex_unlock_plugins(lex); - cleanup_stmt(); + /* + Pass the value true to restore original values of variables modified + on handling SET STATEMENT clause. + */ + error|= cleanup_stmt(true); + thd->restore_backup_statement(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; + thd->cur_stmt= save_cur_stmt; if (likely(error == 0)) { @@ -4835,6 +4949,7 @@ Prepared_statement::reprepare() it's failed, we need to return all the warnings to the user. */ thd->get_stmt_da()->clear_warning_info(thd->query_id); + column_info_state.reset(); } else { @@ -5030,8 +5145,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) /* Allocate query. */ if (expanded_query->length() && - alloc_query(thd, (char*) expanded_query->ptr(), - expanded_query->length())) + alloc_query(thd, expanded_query->ptr(), expanded_query->length())) { my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), expanded_query->length()); goto error; @@ -5056,6 +5170,25 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) /* Go! */ + /* + Log COM_EXECUTE to the general log. Note, that in case of SQL + prepared statements this causes two records to be output: + + Query EXECUTE <statement name> + Execute <statement SQL text> + + This is considered user-friendly, since in the + second log entry we output values of parameter markers. + + Do not print anything if this is an SQL prepared statement and + we're inside a stored procedure (also called Dynamic SQL) -- + sub-statements inside stored procedures are not logged into + the general log. + */ + + if (thd->spcont == nullptr) + general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length()); + if (open_cursor) error= mysql_open_cursor(thd, &result, &cursor); else @@ -5068,14 +5201,12 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) if (query_cache_send_result_to_client(thd, thd->query(), thd->query_length()) <= 0) { - MYSQL_QUERY_EXEC_START(thd->query(), - thd->thread_id, - thd->get_db(), + MYSQL_QUERY_EXEC_START(thd->query(), thd->thread_id, thd->get_db(), &thd->security_ctx->priv_user[0], - (char *) thd->security_ctx->host_or_ip, - 1); - error= mysql_execute_command(thd); + (char *) thd->security_ctx->host_or_ip, 1); + error= mysql_execute_command(thd, true); MYSQL_QUERY_EXEC_DONE(error); + thd->update_server_status(); } else { @@ -5101,8 +5232,48 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) DBUG_ASSERT(! (error && cursor)); if (! cursor) - cleanup_stmt(); - + /* + Pass the value false to don't restore set statement variables. + See the next comment block for more details. + */ + cleanup_stmt(false); + + /* + Log the statement to slow query log if it passes filtering. + We do it here for prepared statements despite of the fact that the function + log_slow_statement() is also called upper the stack from the function + dispatch_command(). The reason for logging slow queries here is that + the function log_slow_statement() must be called before restoring system + variables that could be set on execution of SET STATEMENT clause. Since + for prepared statement restoring of system variables set on execution of + SET STATEMENT clause is performed on return from the method + Prepared_statement::execute(), by the time the function log_slow_statement() + be invoked from the function dispatch_command() all variables set by + the SET STATEMEN clause would be already reset to their original values + that break semantic of the SET STATEMENT clause. + + E.g., lets consider the following statements + SET slow_query_log= 1; + SET @@long_query_time=0.01; + PREPARE stmt FROM 'set statement slow_query_log=0 for select sleep(0.1)'; + EXECUTE stmt; + + It's expected that the above statements don't write any record + to slow query log since the system variable slow_query_log is set to 0 + during execution of the whole statement + 'set statement slow_query_log=0 for select sleep(0.1)' + + However, if the function log_slow_statement wasn't called here the record + for the statement would be written to slow query log since the variable + slow_query_log is restored to its original value by the time the function + log_slow_statement is called from disptach_command() to write a record + into slow query log. + */ + log_slow_statement(thd); + + error|= lex->restore_set_statement_var(); + + /* EXECUTE command has its own dummy "explain data". We don't need it, instead, we want to keep the query plan of the statement that was @@ -5144,26 +5315,8 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) thd->protocol->send_out_parameters(&this->lex->param_list); } - /* - Log COM_EXECUTE to the general log. Note, that in case of SQL - prepared statements this causes two records to be output: - - Query EXECUTE <statement name> - Execute <statement SQL text> - - This is considered user-friendly, since in the - second log entry we output values of parameter markers. - - Do not print anything if this is an SQL prepared statement and - we're inside a stored procedure (also called Dynamic SQL) -- - sub-statements inside stored procedures are not logged into - the general log. - */ - if (likely(error == 0 && thd->spcont == NULL)) - general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length()); - error: - thd->lex->restore_set_statement_var(); + error|= thd->lex->restore_set_statement_var(); flags&= ~ (uint) IS_IN_USE; return error; } @@ -5477,7 +5630,7 @@ protected: CHARSET_INFO *fromcs, CHARSET_INFO *tocs); bool net_send_eof(THD *thd, uint server_status, uint statement_warn_count); bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *, - bool, bool); + bool); bool net_send_error_packet(THD *, uint, const char *, const char *); bool begin_dataset(); bool begin_dataset(THD *thd, uint numfields); @@ -5640,7 +5793,7 @@ bool Protocol_local::net_store_data_cs(const uchar *from, size_t length, bool Protocol_local::net_send_ok(THD *thd, uint server_status, uint statement_warn_count, - ulonglong affected_rows, ulonglong id, const char *message, bool, bool) + ulonglong affected_rows, ulonglong id, const char *message, bool) { DBUG_ENTER("emb_net_send_ok"); MYSQL_DATA *data; @@ -6185,5 +6338,3 @@ extern "C" int execute_sql_command(const char *command, } #endif /*!EMBEDDED_LIBRARY*/ - - diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc index d8ecd2abee7..f576e693a0e 100644 --- a/sql/sql_profile.cc +++ b/sql/sql_profile.cc @@ -610,7 +610,7 @@ int PROFILING::fill_statistics_info(THD *thd_arg, TABLE_LIST *tables, Item *cond table->field[9]->store((uint32)(entry->rusage.ru_oublock - previous->rusage.ru_oublock)); table->field[9]->set_notnull(); -#elif defined(__WIN__) +#elif defined(_WIN32) ULONGLONG reads_delta = entry->io_count.ReadOperationCount - previous->io_count.ReadOperationCount; ULONGLONG writes_delta = entry->io_count.WriteOperationCount - @@ -643,7 +643,7 @@ int PROFILING::fill_statistics_info(THD *thd_arg, TABLE_LIST *tables, Item *cond table->field[13]->store((uint32)(entry->rusage.ru_minflt - previous->rusage.ru_minflt), true); table->field[13]->set_notnull(); -#elif defined(__WIN__) +#elif defined(_WIN32) /* Windows APIs don't easily distinguish between hard and soft page faults, so we just fill the 'major' column and leave the second NULL. */ diff --git a/sql/sql_profile.h b/sql/sql_profile.h index 85018a2598b..881365596ed 100644 --- a/sql/sql_profile.h +++ b/sql/sql_profile.h @@ -46,7 +46,7 @@ int make_profile_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table); #include "sql_priv.h" #include "unireg.h" -#ifdef __WIN__ +#ifdef _WIN32 #include <psapi.h> #endif diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index cd75cd45319..3448e157e10 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -24,6 +24,7 @@ #include "sql_connect.h" // reset_mqh #include "thread_cache.h" #include "sql_base.h" // close_cached_tables +#include "sql_parse.h" // check_single_table_access #include "sql_db.h" // my_dbopt_cleanup #include "hostname.h" // hostname_cache_refresh #include "sql_repl.h" // reset_master, reset_slave @@ -589,27 +590,30 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables) &lock_tables_prelocking_strategy)) goto error_reset_bits; - if (thd->lex->type & REFRESH_FOR_EXPORT) + if (thd->lex->type & (REFRESH_FOR_EXPORT|REFRESH_READ_LOCK)) { - // Check if all storage engines support FOR EXPORT. for (TABLE_LIST *table_list= all_tables; table_list; table_list= table_list->next_global) { - if (!(table_list->table->file->ha_table_flags() & HA_CAN_EXPORT)) + if (table_list->belong_to_view && + check_single_table_access(thd, PRIV_LOCK_TABLES, table_list, FALSE)) + { + table_list->hide_view_error(thd); + goto error_reset_bits; + } + if (table_list->is_view_or_derived()) + continue; + if (thd->lex->type & REFRESH_FOR_EXPORT && + table_list->table && + !(table_list->table->file->ha_table_flags() & HA_CAN_EXPORT)) { my_error(ER_ILLEGAL_HA, MYF(0),table_list->table->file->table_type(), table_list->db.str, table_list->table_name.str); goto error_reset_bits; } - } - } - - if (thd->lex->type & REFRESH_READ_LOCK) - { - for (auto table_list= all_tables; table_list; - table_list= table_list->next_global) - { - if (table_list->table->file->extra(HA_EXTRA_FLUSH)) + if (thd->lex->type & REFRESH_READ_LOCK && + table_list->table && + table_list->table->file->extra(HA_EXTRA_FLUSH)) goto error_reset_bits; } } diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 063645a4dce..5948b8ebc3d 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2011, 2013, Monty Program Ab. + Copyright (c) 2011, 2021, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,18 +30,21 @@ #include "sql_base.h" // tdc_remove_table, lock_table_names, #include "sql_handler.h" // mysql_ha_rm_tables #include "sql_statistics.h" +#include "ddl_log.h" #include "wsrep_mysqld.h" +#include "debug.h" + +/* used to hold table entries for as part of list of renamed temporary tables */ +struct TABLE_PAIR +{ + TABLE_LIST *from, *to; +}; -static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list, - bool skip_error, bool if_exits, - bool *force_if_exists); -static bool do_rename(THD *thd, TABLE_LIST *ren_table, - const LEX_CSTRING *new_db, - const LEX_CSTRING *new_table_name, - const LEX_CSTRING *new_table_alias, - bool skip_error, bool if_exists, bool *force_if_exists); -static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list); +static bool rename_tables(THD *thd, TABLE_LIST *table_list, + DDL_LOG_STATE *ddl_log_state, + bool skip_error, bool if_exits, + bool *force_if_exists); /* Every two entries in the table_list form a pair of original name and @@ -56,6 +59,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent, TABLE_LIST *ren_table= 0; int to_table; const char *rename_log_table[2]= {NULL, NULL}; + DDL_LOG_STATE ddl_log_state; DBUG_ENTER("mysql_rename_tables"); /* @@ -152,32 +156,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent, goto err; error=0; + bzero(&ddl_log_state, sizeof(ddl_log_state)); + /* An exclusive lock on table names is satisfactory to ensure no other thread accesses this table. */ - if ((ren_table= rename_tables(thd, table_list, 0, if_exists, - &force_if_exists))) - { - /* Rename didn't succeed; rename back the tables in reverse order */ - TABLE_LIST *table; - - /* Reverse the table list */ - table_list= reverse_table_list(table_list); - - /* Find the last renamed table */ - for (table= table_list; - table->next_local != ren_table ; - table= table->next_local->next_local) ; - table= table->next_local->next_local; // Skip error table - /* Revert to old names */ - rename_tables(thd, table, 1, if_exists, &force_if_exists); - - /* Revert the table list (for prepared statements) */ - table_list= reverse_table_list(table_list); - - error= 1; - } + error= rename_tables(thd, table_list, &ddl_log_state, + 0, if_exists, &force_if_exists); if (likely(!silent && !error)) { @@ -187,49 +173,43 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent, /* Add IF EXISTS to binary log */ thd->variables.option_bits|= OPTION_IF_EXISTS; } + + debug_crash_here("ddl_log_rename_before_binlog"); + /* + Store xid in ddl log and binary log so that we can check on ddl recovery + if the item is in the binary log (and thus the operation was complete + */ + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); binlog_error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); + if (binlog_error) + error= 1; + thd->binlog_xid= 0; thd->variables.option_bits= save_option_bits; + debug_crash_here("ddl_log_rename_after_binlog"); if (likely(!binlog_error)) my_ok(thd); } if (likely(!error)) + { query_cache_invalidate3(thd, table_list, 0); + ddl_log_complete(&ddl_log_state); + } + else + { + /* Revert the renames of normal tables with the help of the ddl log */ + ddl_log_revert(thd, &ddl_log_state); + } err: DBUG_RETURN(error || binlog_error); } -/* - reverse table list - - SYNOPSIS - reverse_table_list() - table_list pointer to table _list - - RETURN - pointer to new (reversed) list -*/ -static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) -{ - TABLE_LIST *prev= 0; - - while (table_list) - { - TABLE_LIST *next= table_list->next_local; - table_list->next_local= prev; - prev= table_list; - table_list= next; - } - return (prev); -} - - static bool -do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table, - bool skip_error) +do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table) { LEX_CSTRING *new_alias; DBUG_ENTER("do_rename_temporary"); @@ -243,90 +223,138 @@ do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table, DBUG_RETURN(1); // This can't be skipped } - DBUG_RETURN(thd->rename_temporary_table(ren_table->table, &new_table->db, new_alias)); } -/* - Rename a single table or a view +/** + Parameters for do_rename +*/ - SYNPOSIS - do_rename() - thd Thread handle - ren_table A table/view to be renamed - new_db The database to which the table to be moved to - new_table_name The new table/view name - new_table_alias The new table/view alias - skip_error Whether to skip error - if_exists Skip error, but only if the table didn't exists - force_if_exists Set to 1 if we have to log the query with 'IF EXISTS' - Otherwise don't touch the value +struct rename_param +{ + LEX_CSTRING old_alias, new_alias; + LEX_CUSTRING old_version; + handlerton *from_table_hton; +}; - DESCRIPTION - Rename a single table or a view. - RETURN - false Ok - true rename failed +/** + check_rename() + + Check pre-conditions for rename + - From table should exists + - To table should not exists. + + SYNOPSIS + @param new_table_name The new table/view name + @param new_table_alias The new table/view alias + @param if_exists If not set, give an error if the table does not + exists. If set, just give a warning in this case. + @return + @retval 0 ok + @retval >0 Error (from table doesn't exists or to table exists) + @retval <0 Can't do rename, but no error */ -static bool -do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, - const LEX_CSTRING *new_table_name, - const LEX_CSTRING *new_table_alias, - bool skip_error, bool if_exists, bool *force_if_exists) +static int +check_rename(THD *thd, rename_param *param, + TABLE_LIST *ren_table, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table_name, + const LEX_CSTRING *new_table_alias, + bool if_exists) { - int rc= 1; - handlerton *hton, *new_hton; - LEX_CSTRING old_alias, new_alias; - DBUG_ENTER("do_rename"); - DBUG_PRINT("enter", ("skip_error: %d if_exists: %d", (int) skip_error, - (int) if_exists)); + DBUG_ENTER("check_rename"); + DBUG_PRINT("enter", ("if_exists: %d", (int) if_exists)); + if (lower_case_table_names == 2) { - old_alias= ren_table->alias; - new_alias= *new_table_alias; + param->old_alias= ren_table->alias; + param->new_alias= *new_table_alias; } else { - old_alias= ren_table->table_name; - new_alias= *new_table_name; + param->old_alias= ren_table->table_name; + param->new_alias= *new_table_name; } - DBUG_ASSERT(new_alias.str); + DBUG_ASSERT(param->new_alias.str); - if (!ha_table_exists(thd, &ren_table->db, &old_alias, &hton) || !hton) + if (!ha_table_exists(thd, &ren_table->db, ¶m->old_alias, + ¶m->old_version, NULL, + ¶m->from_table_hton) || + !param->from_table_hton) { - my_error(ER_NO_SUCH_TABLE, MYF((skip_error | if_exists) ? ME_NOTE : 0), - ren_table->db.str, old_alias.str); - DBUG_RETURN(skip_error || if_exists ? 0 : 1); + my_error(ER_NO_SUCH_TABLE, MYF(if_exists ? ME_NOTE : 0), + ren_table->db.str, param->old_alias.str); + DBUG_RETURN(if_exists ? -1 : 1); } - if (hton != view_pseudo_hton && - ha_check_if_updates_are_ignored(thd, hton, "RENAME")) + if (param->from_table_hton != view_pseudo_hton && + ha_check_if_updates_are_ignored(thd, param->from_table_hton, "RENAME")) { /* Shared table. Just drop the old .frm as it's not correct anymore Discovery will find the old table when it's accessed */ tdc_remove_table(thd, ren_table->db.str, ren_table->table_name.str); - quick_rm_table(thd, 0, &ren_table->db, &old_alias, FRM_ONLY, 0); - DBUG_RETURN(0); + quick_rm_table(thd, 0, &ren_table->db, ¶m->old_alias, FRM_ONLY, 0); + DBUG_RETURN(-1); } - if (ha_table_exists(thd, new_db, &new_alias, &new_hton)) + if (ha_table_exists(thd, new_db, ¶m->new_alias, NULL, NULL, 0)) { - my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias.str); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), param->new_alias.str); DBUG_RETURN(1); // This can't be skipped } + DBUG_RETURN(0); +} + + +/* + Rename a single table or a view + + SYNPOSIS + do_rename() + thd Thread handle + ren_table A table/view to be renamed + new_db The database to which the table to be moved to + skip_error Skip error, but only if the table didn't exists + force_if_exists Set to 1 if we have to log the query with 'IF EXISTS' + Otherwise don't touch the value + + DESCRIPTION + Rename a single table or a view. + In case of failure, all changes will be reverted + + RETURN + false Ok + true rename failed +*/ + +static bool +do_rename(THD *thd, rename_param *param, DDL_LOG_STATE *ddl_log_state, + TABLE_LIST *ren_table, const LEX_CSTRING *new_db, + bool skip_error, bool *force_if_exists) +{ + int rc= 1; + handlerton *hton; + LEX_CSTRING *old_alias, *new_alias; + TRIGGER_RENAME_PARAM rename_param; + DBUG_ENTER("do_rename"); + DBUG_PRINT("enter", ("skip_error: %d", (int) skip_error)); + + old_alias= ¶m->old_alias; + new_alias= ¶m->new_alias; + hton= param->from_table_hton; DBUG_ASSERT(!thd->locked_tables_mode); #ifdef WITH_WSREP if (WSREP(thd) && hton && hton != view_pseudo_hton && - !wsrep_should_replicate_ddl(thd, hton->db_type)) + !wsrep_should_replicate_ddl(thd, hton)) DBUG_RETURN(1); #endif @@ -337,18 +365,48 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, if (hton->flags & HTON_TABLE_MAY_NOT_EXIST_ON_SLAVE) *force_if_exists= 1; + /* Check if we can rename triggers */ + if (Table_triggers_list::prepare_for_rename(thd, &rename_param, + &ren_table->db, + old_alias, + &ren_table->table_name, + new_db, + new_alias)) + DBUG_RETURN(!skip_error); + thd->replication_flags= 0; - if (!(rc= mysql_rename_table(hton, &ren_table->db, &old_alias, - new_db, &new_alias, 0))) + + if (ddl_log_rename_table(thd, ddl_log_state, hton, + &ren_table->db, old_alias, new_db, new_alias)) + DBUG_RETURN(1); + + debug_crash_here("ddl_log_rename_before_rename_table"); + if (!(rc= mysql_rename_table(hton, &ren_table->db, old_alias, + new_db, new_alias, ¶m->old_version, 0))) { - (void) rename_table_in_stat_tables(thd, &ren_table->db, - &ren_table->table_name, - new_db, &new_alias); - if ((rc= Table_triggers_list::change_table_name(thd, &ren_table->db, - &old_alias, - &ren_table->table_name, - new_db, - &new_alias))) + /* Table rename succeded. + It's safe to start recovery at rename trigger phase + */ + debug_crash_here("ddl_log_rename_before_phase_trigger"); + ddl_log_update_phase(ddl_log_state, DDL_RENAME_PHASE_TRIGGER); + + debug_crash_here("ddl_log_rename_before_rename_trigger"); + + if (!(rc= Table_triggers_list::change_table_name(thd, + &rename_param, + &ren_table->db, + old_alias, + &ren_table->table_name, + new_db, + new_alias))) + { + debug_crash_here("ddl_log_rename_before_stat_tables"); + (void) rename_table_in_stat_tables(thd, &ren_table->db, + &ren_table->table_name, + new_db, new_alias); + debug_crash_here("ddl_log_rename_after_stat_tables"); + } + else { /* We've succeeded in renaming table's .frm and in updating @@ -356,8 +414,13 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, triggers appropriately. So let us revert operations on .frm and handler's data and report about failure to rename table. */ - (void) mysql_rename_table(hton, new_db, &new_alias, - &ren_table->db, &old_alias, NO_FK_CHECKS); + debug_crash_here("ddl_log_rename_after_failed_rename_trigger"); + (void) mysql_rename_table(hton, new_db, new_alias, + &ren_table->db, old_alias, ¶m->old_version, + NO_FK_CHECKS); + debug_crash_here("ddl_log_rename_after_revert_rename_table"); + ddl_log_disable_entry(ddl_log_state); + debug_crash_here("ddl_log_rename_after_disable_entry"); } } if (thd->replication_flags & OPTION_IF_EXISTS) @@ -372,9 +435,25 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, */ if (thd->lex->sql_command != SQLCOM_ALTER_DB_UPGRADE && cmp(&ren_table->db, new_db)) + { my_error(ER_FORBID_SCHEMA_CHANGE, MYF(0), ren_table->db.str, new_db->str); - else - rc= mysql_rename_view(thd, new_db, &new_alias, ren_table); + DBUG_RETURN(1); + } + + ddl_log_rename_view(thd, ddl_log_state, &ren_table->db, + &ren_table->table_name, new_db, new_alias); + debug_crash_here("ddl_log_rename_before_rename_view"); + rc= mysql_rename_view(thd, new_db, new_alias, &ren_table->db, + &ren_table->table_name); + debug_crash_here("ddl_log_rename_after_rename_view"); + if (rc) + { + /* + On error mysql_rename_view() will leave things as such. + */ + ddl_log_disable_entry(ddl_log_state); + debug_crash_here("ddl_log_rename_after_disable_entry"); + } } DBUG_RETURN(rc && !skip_error ? 1 : 0); } @@ -392,6 +471,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, rename_tables() thd Thread handle table_list List of tables to rename + ddl_log_state ddl logging skip_error Whether to skip errors if_exists Don't give an error if table doesn't exists force_if_exists Set to 1 if we have to log the query with 'IF EXISTS' @@ -404,14 +484,16 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db, RETURN 0 Ok - table pointer to the table list element which rename failed + 1 error + All tables are reverted to their original names */ -static TABLE_LIST * -rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error, - bool if_exists, bool *force_if_exists) +static bool +rename_tables(THD *thd, TABLE_LIST *table_list, DDL_LOG_STATE *ddl_log_state, + bool skip_error, bool if_exists, bool *force_if_exists) { TABLE_LIST *ren_table, *new_table; + List<TABLE_PAIR> tmp_tables; DBUG_ENTER("rename_tables"); *force_if_exists= 0; @@ -420,11 +502,48 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error, { new_table= ren_table->next_local; - if (is_temporary_table(ren_table) ? - do_rename_temporary(thd, ren_table, new_table, skip_error) : - do_rename(thd, ren_table, &new_table->db, &new_table->table_name, - &new_table->alias, skip_error, if_exists, force_if_exists)) - DBUG_RETURN(ren_table); + if (is_temporary_table(ren_table)) + { + /* + Store renamed temporary tables into a list. + We don't store these in the ddl log to avoid writes and syncs + when only using temporary tables. We don't need the log as + all temporary tables will disappear anyway in a crash. + */ + TABLE_PAIR *pair= (TABLE_PAIR*) thd->alloc(sizeof(*pair)); + if (! pair || tmp_tables.push_front(pair, thd->mem_root)) + goto revert_rename; + pair->from= ren_table; + pair->to= new_table; + + if (do_rename_temporary(thd, ren_table, new_table)) + goto revert_rename; + } + else + { + int error; + rename_param param; + error= check_rename(thd, ¶m, ren_table, &new_table->db, + &new_table->table_name, + &new_table->alias, (skip_error || if_exists)); + if (error < 0) + continue; // Ignore rename (if exists) + if (error > 0) + goto revert_rename; + + if (do_rename(thd, ¶m, ddl_log_state, + ren_table, &new_table->db, + skip_error, force_if_exists)) + goto revert_rename; + } } DBUG_RETURN(0); + +revert_rename: + /* Revert temporary tables. Normal tables are reverted in the caller */ + List_iterator_fast<TABLE_PAIR> it(tmp_tables); + while (TABLE_PAIR *pair= it++) + do_rename_temporary(thd, pair->to, pair->from); + + DBUG_RETURN(1); } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index decddba6c4f..e990b94b43c 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1519,9 +1519,10 @@ gtid_state_from_pos(const char *name, uint32 offset, goto end; } - current_checksum_alg= get_checksum_alg(packet.ptr(), packet.length()); + current_checksum_alg= get_checksum_alg((uchar*) packet.ptr(), + packet.length()); found_format_description_event= true; - if (unlikely(!(tmp= new Format_description_log_event(packet.ptr(), + if (unlikely(!(tmp= new Format_description_log_event((uchar*) packet.ptr(), packet.length(), fdev)))) { @@ -1539,7 +1540,7 @@ gtid_state_from_pos(const char *name, uint32 offset, { sele_len -= BINLOG_CHECKSUM_LEN; } - Start_encryption_log_event sele(packet.ptr(), sele_len, fdev); + Start_encryption_log_event sele((uchar*) packet.ptr(), sele_len, fdev); if (fdev->start_decryption(&sele)) { errormsg= "Could not start decryption of binlog."; @@ -1596,7 +1597,7 @@ gtid_state_from_pos(const char *name, uint32 offset, { rpl_gtid gtid; uchar flags2; - if (unlikely(Gtid_log_event::peek(packet.ptr(), packet.length(), + if (unlikely(Gtid_log_event::peek((uchar*) packet.ptr(), packet.length(), current_checksum_alg, >id.domain_id, >id.server_id, >id.seq_no, &flags2, fdev))) @@ -1682,9 +1683,9 @@ is_until_reached(binlog_send_info *info, ulong *ev_offset, if (event_type != XID_EVENT && event_type != XA_PREPARE_LOG_EVENT && (event_type != QUERY_EVENT || /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */ !Query_log_event::peek_is_commit_rollback - (info->packet->ptr()+*ev_offset, - info->packet->length()-*ev_offset, - info->current_checksum_alg))) + ((uchar*) info->packet->ptr() + *ev_offset, + info->packet->length() - *ev_offset, + info->current_checksum_alg))) return false; break; } @@ -1762,7 +1763,7 @@ send_event_to_slave(binlog_send_info *info, Log_event_type event_type, rpl_gtid event_gtid; if (ev_offset > len || - Gtid_log_event::peek(packet->ptr()+ev_offset, len - ev_offset, + Gtid_log_event::peek((uchar*) packet->ptr()+ev_offset, len - ev_offset, current_checksum_alg, &event_gtid.domain_id, &event_gtid.server_id, &event_gtid.seq_no, &flags2, info->fdev)) @@ -1916,7 +1917,8 @@ send_event_to_slave(binlog_send_info *info, Log_event_type event_type, case GTID_SKIP_TRANSACTION: if (event_type == XID_EVENT || event_type == XA_PREPARE_LOG_EVENT || (event_type == QUERY_EVENT && /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */ - Query_log_event::peek_is_commit_rollback(packet->ptr() + ev_offset, + Query_log_event::peek_is_commit_rollback((uchar*) packet->ptr() + + ev_offset, len - ev_offset, current_checksum_alg))) info->gtid_skip_group= GTID_SKIP_NOT; @@ -2153,7 +2155,7 @@ static int init_binlog_sender(binlog_send_info *info, "Start binlog_dump to slave_server(%lu), pos(%s, %lu), " "using_gtid(%d), gtid('%s')", thd->variables.server_id, log_ident, (ulong)*pos, info->using_gtid_state, - connect_gtid_state.c_ptr_quick()); + connect_gtid_state.c_ptr_safe()); } #ifndef DBUG_OFF @@ -2176,7 +2178,7 @@ static int init_binlog_sender(binlog_send_info *info, const char *name=search_file_name; if (info->using_gtid_state) { - if (info->gtid_state.load(connect_gtid_state.c_ptr_quick(), + if (info->gtid_state.load(connect_gtid_state.ptr(), connect_gtid_state.length())) { info->errmsg= "Out of memory or malformed slave request when obtaining " @@ -2185,7 +2187,7 @@ static int init_binlog_sender(binlog_send_info *info, return 1; } if (info->until_gtid_state && - info->until_gtid_state->load(slave_until_gtid_str.c_ptr_quick(), + info->until_gtid_state->load(slave_until_gtid_str.ptr(), slave_until_gtid_str.length())) { info->errmsg= "Out of memory or malformed slave request when " @@ -2320,7 +2322,8 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log, DBUG_RETURN(1); } - info->current_checksum_alg= get_checksum_alg(packet->ptr() + ev_offset, + info->current_checksum_alg= get_checksum_alg((uchar*) packet->ptr() + + ev_offset, packet->length() - ev_offset); DBUG_ASSERT(info->current_checksum_alg == BINLOG_CHECKSUM_ALG_OFF || @@ -2345,7 +2348,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log, ev_len-= BINLOG_CHECKSUM_LEN; Format_description_log_event *tmp; - if (!(tmp= new Format_description_log_event(packet->ptr() + ev_offset, + if (!(tmp= new Format_description_log_event((uchar*) packet->ptr() + ev_offset, ev_len, info->fdev))) { info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG; @@ -2437,7 +2440,8 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log, if (event_type == START_ENCRYPTION_EVENT) { Start_encryption_log_event *sele= (Start_encryption_log_event *) - Log_event::read_log_event(packet->ptr() + ev_offset, packet->length() + Log_event::read_log_event((uchar*) packet->ptr() + ev_offset, + packet->length() - ev_offset, &info->errmsg, info->fdev, BINLOG_CHECKSUM_ALG_OFF); if (!sele) @@ -2824,6 +2828,12 @@ static int send_one_binlog_file(binlog_send_info *info, */ if (send_events(info, log, linfo, end_pos)) return 1; + DBUG_EXECUTE_IF("Notify_binlog_EOF", + { + const char act[]= "now signal eof_reached"; + DBUG_ASSERT(!debug_sync_set_action(current_thd, + STRING_WITH_LEN(act))); + };); } return 1; @@ -4337,7 +4347,9 @@ bool show_binlog_info(THD* thd) LOG_INFO li; mysql_bin_log.get_current_log(&li); size_t dir_len = dirname_length(li.log_file_name); - protocol->store(li.log_file_name + dir_len, &my_charset_bin); + const char *base= li.log_file_name + dir_len; + + protocol->store(base, strlen(base), &my_charset_bin); protocol->store((ulonglong) li.pos); protocol->store(binlog_filter->get_do_db()); protocol->store(binlog_filter->get_ignore_db()); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 98d5ccb7eb2..3c2be0b1c8b 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -68,6 +68,7 @@ #include "my_json_writer.h" #include "opt_trace.h" #include "derived_handler.h" +#include "create_tmp_table.h" /* A key part number that means we're using a fulltext scan. @@ -111,12 +112,19 @@ static void optimize_straight_join(JOIN *join, table_map join_tables); static bool greedy_search(JOIN *join, table_map remaining_tables, uint depth, uint prune_level, uint use_cond_selectivity); -static bool best_extension_by_limited_search(JOIN *join, - table_map remaining_tables, - uint idx, double record_count, - double read_time, uint depth, - uint prune_level, - uint use_cond_selectivity); +enum enum_best_search { + SEARCH_ABORT= -2, + SEARCH_ERROR= -1, + SEARCH_OK= 0, + SEARCH_FOUND_EDGE=1 +}; +static enum_best_search +best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, double record_count, + double read_time, uint depth, + uint prune_level, + uint use_cond_selectivity); static uint determine_search_depth(JOIN* join); C_MODE_START static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2); @@ -255,12 +263,14 @@ static ORDER *create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, ORDER *order, List<Item> &fields, List<Item> &all_fields, bool *all_order_by_fields_used); -static bool test_if_subpart(ORDER *a,ORDER *b); +static bool test_if_subpart(ORDER *group_by, ORDER *order_by); static TABLE *get_sort_by_table(ORDER *a,ORDER *b,List<TABLE_LIST> &tables, table_map const_tables); -static void calc_group_buffer(JOIN *join,ORDER *group); +static void calc_group_buffer(JOIN *join, ORDER *group); static bool make_group_fields(JOIN *main_join, JOIN *curr_join); -static bool alloc_group_fields(JOIN *join,ORDER *group); +static bool alloc_group_fields(JOIN *join, ORDER *group); +static bool alloc_order_fields(JOIN *join, ORDER *group, + uint max_number_of_elements); // Create list for using with tempory table static bool change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List<Item> &new_list1, @@ -276,7 +286,8 @@ static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table); static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end); static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab); static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr); -static bool prepare_sum_aggregators(Item_sum **func_ptr, bool need_distinct); +static bool prepare_sum_aggregators(THD *thd, Item_sum **func_ptr, + bool need_distinct); static bool init_sum_functions(Item_sum **func, Item_sum **end); static bool update_sum_func(Item_sum **func); static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, @@ -309,7 +320,10 @@ bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond, static void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join, TABLE_LIST *nest_tbl); - +static void fix_items_after_optimize(THD *thd, SELECT_LEX *select_lex); +static void optimize_rownum(THD *thd, SELECT_LEX_UNIT *unit, Item *cond); +static bool process_direct_rownum_comparison(THD *thd, SELECT_LEX_UNIT *unit, + Item *cond); #ifndef DBUG_OFF @@ -378,6 +392,8 @@ POSITION::POSITION() range_rowid_filter_info= 0; ref_depend_map= dups_producing_tables= 0; inner_tables_handled_with_other_sjs= 0; + type= JT_UNKNOWN; + key_dependent= 0; dups_weedout_picker.set_empty(); firstmatch_picker.set_empty(); loosescan_picker.set_empty(); @@ -385,6 +401,99 @@ POSITION::POSITION() } +void JOIN::init(THD *thd_arg, List<Item> &fields_arg, + ulonglong select_options_arg, select_result *result_arg) +{ + join_tab= 0; + table= 0; + table_count= 0; + top_join_tab_count= 0; + const_tables= 0; + const_table_map= found_const_table_map= 0; + aggr_tables= 0; + eliminated_tables= 0; + join_list= 0; + implicit_grouping= FALSE; + sort_and_group= 0; + first_record= 0; + do_send_rows= 1; + duplicate_rows= send_records= 0; + found_records= accepted_rows= 0; + fetch_limit= HA_POS_ERROR; + thd= thd_arg; + sum_funcs= sum_funcs2= 0; + procedure= 0; + having= tmp_having= having_history= 0; + having_is_correlated= false; + group_list_for_estimates= 0; + select_options= select_options_arg; + result= result_arg; + lock= thd_arg->lock; + select_lex= 0; //for safety + select_distinct= MY_TEST(select_options & SELECT_DISTINCT); + no_order= 0; + simple_order= 0; + simple_group= 0; + ordered_index_usage= ordered_index_void; + need_distinct= 0; + skip_sort_order= 0; + with_two_phase_optimization= 0; + save_qep= 0; + spl_opt_info= 0; + ext_keyuses_for_splitting= 0; + spl_opt_info= 0; + need_tmp= 0; + hidden_group_fields= 0; /*safety*/ + error= 0; + select= 0; + return_tab= 0; + ref_ptrs.reset(); + items0.reset(); + items1.reset(); + items2.reset(); + items3.reset(); + zero_result_cause= 0; + optimization_state= JOIN::NOT_OPTIMIZED; + have_query_plan= QEP_NOT_PRESENT_YET; + initialized= 0; + cleaned= 0; + cond_equal= 0; + having_equal= 0; + exec_const_cond= 0; + group_optimized_away= 0; + no_rows_in_result_called= 0; + positions= best_positions= 0; + pushdown_query= 0; + original_join_tab= 0; + explain= NULL; + tmp_table_keep_current_rowid= 0; + + all_fields= fields_arg; + if (&fields_list != &fields_arg) /* Avoid valgrind-warning */ + fields_list= fields_arg; + non_agg_fields.empty(); + bzero((char*) &keyuse,sizeof(keyuse)); + having_value= Item::COND_UNDEF; + tmp_table_param.init(); + tmp_table_param.end_write_records= HA_POS_ERROR; + rollup.state= ROLLUP::STATE_NONE; + + no_const_tables= FALSE; + first_select= sub_select; + set_group_rpa= false; + group_sent= 0; + + outer_ref_cond= pseudo_bits_cond= NULL; + in_to_exists_where= NULL; + in_to_exists_having= NULL; + emb_sjm_nest= NULL; + sjm_lookup_tables= 0; + sjm_scan_tables= 0; + is_orig_degenerated= false; + with_ties_order_count= 0; +}; + + static void trace_table_dependencies(THD *thd, JOIN_TAB *join_tabs, uint table_count) { @@ -820,7 +929,7 @@ void vers_select_conds_t::print(String *str, enum_query_type query_type) const // nothing to add break; case SYSTEM_TIME_ALL: - str->append(" FOR SYSTEM_TIME ALL"); + str->append(STRING_WITH_LEN(" FOR SYSTEM_TIME ALL")); break; } } @@ -1104,7 +1213,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) if (vers_conditions.is_set()) { if (vers_conditions.was_set() && - table->lock_type > TL_READ_NO_INSERT && + table->lock_type >= TL_FIRST_WRITE && !vers_conditions.delete_history) { my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table->alias.str); @@ -1212,6 +1321,9 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, // simple check that we got usable conds dbug_print_item(conds); + /* Fix items that requires the join structure to exist */ + fix_items_after_optimize(thd, select_lex); + /* It is hack which force creating EXPLAIN object always on runt-time arena (because very top JOIN::prepare executes always with runtime arena, but @@ -1261,7 +1373,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, { if (select_el->with_sum_func()) found_sum_func_elem= true; - if (select_el->with_field) + if (select_el->with_field()) found_field_elem= true; if (found_sum_func_elem && found_field_elem) { @@ -1306,6 +1418,17 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, enum_parsing_place save_place= thd->lex->current_select->context_analysis_place; thd->lex->current_select->context_analysis_place= SELECT_LIST; + + { + List_iterator_fast<TABLE_LIST> it(select_lex->leaf_tables); + while ((tbl= it++)) + { + if (tbl->table_function && + tbl->table_function->setup(thd, tbl, select_lex_arg)) + DBUG_RETURN(-1); + } + } + if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ, &all_fields, &select_lex->pre_fix, 1)) DBUG_RETURN(-1); @@ -1376,7 +1499,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, DBUG_RETURN(-1); /* purecov: inspected */ thd->lex->allow_sum_func= save_allow_sum_func; - if (having->with_window_func) + if (having->with_window_func()) { my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); DBUG_RETURN(-1); @@ -1394,7 +1517,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, Item *item; while ((item= it++)) { - if (item->with_window_func) + if (item->with_window_func()) item->update_used_tables(); } } @@ -1420,9 +1543,23 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, if (order) { - bool real_order= FALSE; - ORDER *ord; - for (ord= order; ord; ord= ord->next) + bool requires_sorting= FALSE; + /* + WITH TIES forces the results to be sorted, even if it's not sanely + sortable. + */ + if (select_lex->limit_params.with_ties) + requires_sorting= true; + + /* + Go through each ORDER BY item and perform the following: + 1. Detect if none of the items contain meaningful data, which means we + can drop the sorting altogether. + 2. Split any columns with aggregation functions or window functions into + their base components and store them as separate fields. + (see split_sum_func) for more details. + */ + for (ORDER *ord= order; ord; ord= ord->next) { Item *item= *ord->item; /* @@ -1431,27 +1568,38 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, zero length NOT NULL string functions there. Such tuples don't contain any data to sort. */ - if (!real_order && + if (!requires_sorting && /* Not a zero length NOT NULL field */ ((item->type() != Item::FIELD_ITEM || ((Item_field *) item)->field->maybe_null() || ((Item_field *) item)->field->sort_length()) && /* AND not a zero length NOT NULL string function. */ (item->type() != Item::FUNC_ITEM || - item->maybe_null || + item->maybe_null() || item->result_type() != STRING_RESULT || item->max_length))) - real_order= TRUE; + requires_sorting= TRUE; if ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) || - item->with_window_func) + item->with_window_func()) item->split_sum_func(thd, ref_ptrs, all_fields, SPLIT_SUM_SELECT); } - if (!real_order) + /* Drop the ORDER BY clause if none of the columns contain any data that + can produce a meaningful sorted set. */ + if (!requires_sorting) order= NULL; } + else + { + /* The current select does not have an ORDER BY */ + if (select_lex->limit_params.with_ties) + { + my_error(ER_WITH_TIES_NEEDS_ORDER, MYF(0)); + DBUG_RETURN(-1); + } + } - if (having && having->with_sum_func()) + if (having && (having->with_sum_func() || having->with_rownum_func())) having->split_sum_func2(thd, ref_ptrs, all_fields, &having, SPLIT_SUM_SKIP_REGISTERED); if (select_lex->inner_sum_func_list) @@ -1650,7 +1798,7 @@ bool JOIN::build_explain() JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt(); for (uint i= 0; i < aggr_tables; i++, curr_tab++) { - if (select_nr == INT_MAX) + if (select_nr == FAKE_SELECT_LEX_ID) { /* this is a fake_select_lex of a union */ select_nr= select_lex->master_unit()->first_select()->select_number; @@ -1837,7 +1985,6 @@ JOIN::init_range_rowid_filters() DBUG_RETURN(0); } - /** global select optimisation. @@ -1855,15 +2002,21 @@ JOIN::optimize_inner() { DBUG_ENTER("JOIN::optimize_inner"); subq_exit_fl= false; - do_send_rows = (unit->lim.get_select_limit()) ? 1 : 0; DEBUG_SYNC(thd, "before_join_optimize"); - THD_STAGE_INFO(thd, stage_optimizing); #ifndef DBUG_OFF dbug_join_tab_array_size= 0; #endif + // rownum used somewhere in query, no limits and it is derived + if (unlikely(thd->lex->with_rownum && + select_lex->first_cond_optimization && + select_lex->master_unit()->derived)) + optimize_upper_rownum_func(); + + do_send_rows = (unit->lim.get_select_limit()) ? 1 : 0; + set_allowed_join_cache_types(); need_distinct= TRUE; @@ -1890,7 +2043,12 @@ JOIN::optimize_inner() transform_in_predicates_into_in_subq(thd)) DBUG_RETURN(1); - // Update used tables after all handling derived table procedures + /* + Update used tables after all handling derived table procedures + After this call, select_lex->select_list_tables contains the table + bits of all items in the select list (but not bits from WHERE clause or + other items). + */ select_lex->update_used_tables(); /* @@ -1952,7 +2110,7 @@ JOIN::optimize_inner() { /* Item_cond_and can't be fixed after creation, so we do not check - conds->is_fixed() + conds->fixed() */ conds->fix_fields(thd, &conds); conds->change_ref_to_fields(thd, tables_list); @@ -1981,6 +2139,9 @@ JOIN::optimize_inner() /* Convert all outer joins to inner joins if possible */ conds= simplify_joins(this, join_list, conds, TRUE, FALSE); + + add_table_function_dependencies(join_list, table_map(-1)); + if (thd->is_error() || select_lex->save_leaf_tables(thd)) { if (arena) @@ -2064,6 +2225,9 @@ JOIN::optimize_inner() ignore_on_expr= true; break; } + + transform_in_predicates_into_equalities(thd); + conds= optimize_cond(this, conds, join_list, ignore_on_expr, &cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS); @@ -2073,6 +2237,9 @@ JOIN::optimize_inner() DBUG_PRINT("error",("Error from optimize_cond")); DBUG_RETURN(1); } + if (select_lex->with_rownum && ! order && ! group_list && + !select_distinct && conds && select_lex == unit->global_parameters()) + optimize_rownum(thd, unit, conds); having= optimize_cond(this, having, join_list, TRUE, &having_value, &having_equal); @@ -2445,7 +2612,7 @@ int JOIN::optimize_stage2() if (!conds && outer_join) { /* Handle the case where we have an OUTER JOIN without a WHERE */ - conds= new (thd->mem_root) Item_bool(thd, true); // Always true + conds= (Item*) &Item_true; } if (impossible_where) @@ -2489,6 +2656,10 @@ int JOIN::optimize_stage2() DBUG_RETURN(1); } conds->update_used_tables(); + + if (unlikely(thd->trace_started())) + trace_condition(thd, "WHERE", "substitute_best_equal", conds); + DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal", @@ -2505,7 +2676,12 @@ int JOIN::optimize_stage2() DBUG_RETURN(1); } if (having) + { having->update_used_tables(); + if (unlikely(thd->trace_started())) + trace_condition(thd, "HAVING", "substitute_best_equal", having); + } + DBUG_EXECUTE("having", print_where(having, "after substitute_best_equal", @@ -2532,6 +2708,11 @@ int JOIN::optimize_stage2() DBUG_RETURN(1); } (*tab->on_expr_ref)->update_used_tables(); + if (unlikely(thd->trace_started())) + { + trace_condition(thd, "ON expr", "substitute_best_equal", + (*tab->on_expr_ref), tab->table->alias.c_ptr()); + } } } @@ -2602,7 +2783,7 @@ int JOIN::optimize_stage2() if (conds && const_table_map != found_const_table_map && (select_options & SELECT_DESCRIBE)) { - conds=new (thd->mem_root) Item_bool(thd, false); // Always false + conds= (Item*) &Item_false; } /* Cache constant expressions in WHERE, HAVING, ON clauses. */ @@ -2639,6 +2820,19 @@ int JOIN::optimize_stage2() if (!order && org_order) skip_sort_order= 1; } + + /* + For FETCH ... WITH TIES save how many items order by had, after we've + removed constant items that have no relevance on the final sorting. + */ + if (unit->lim.is_with_ties()) + { + DBUG_ASSERT(with_ties_order_count == 0); + for (ORDER *it= order; it; it= it->next) + with_ties_order_count+= 1; + } + + /* Check if we can optimize away GROUP BY/DISTINCT. We can do that if there are no aggregate functions, the @@ -2902,7 +3096,7 @@ int JOIN::optimize_stage2() having= having->remove_eq_conds(thd, &select_lex->having_value, true); if (select_lex->having_value == Item::COND_FALSE) { - having= new (thd->mem_root) Item_bool(thd, false); + having= (Item*) &Item_false; zero_result_cause= "Impossible HAVING noticed after reading const tables"; error= 0; select_lex->mark_const_derived(zero_result_cause); @@ -3162,8 +3356,7 @@ bool JOIN::add_having_as_table_cond(JOIN_TAB *tab) DBUG_ENTER("JOIN::add_having_as_table_cond"); Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables, - (table_map) 0, false, - false, false); + (table_map) 0, 0, false, false); if (sort_table_cond) { if (!tab->select) @@ -3204,7 +3397,7 @@ bool JOIN::add_having_as_table_cond(JOIN_TAB *tab) QT_ORDINARY);); having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0, - ~used_tables, false, false, false); + ~used_tables, 0, false, false); DBUG_EXECUTE("where", print_where(having, "having after sort", QT_ORDINARY);); } @@ -3259,11 +3452,11 @@ bool JOIN::make_aggr_tables_info() JOIN_TAB *curr_tab= join_tab + const_tables; TABLE *exec_tmp_table= NULL; bool distinct= false; - bool keep_row_order= false; + const bool has_group_by= this->group; + bool keep_row_order= thd->lex->with_rownum && (group_list || order); bool is_having_added_as_table_cond= false; DBUG_ENTER("JOIN::make_aggr_tables_info"); - const bool has_group_by= this->group; sort_and_group_aggr_tab= NULL; @@ -3347,10 +3540,10 @@ bool JOIN::make_aggr_tables_info() DBUG_RETURN(1); TABLE* table= create_tmp_table(thd, curr_tab->tmp_table_param, all_fields, - NULL, query.distinct, + NULL, distinct, TRUE, select_options, HA_POS_ERROR, &empty_clex_str, !need_tmp, - query.order_by || query.group_by); + keep_row_order); if (!table) DBUG_RETURN(1); @@ -3450,7 +3643,7 @@ bool JOIN::make_aggr_tables_info() distinct= select_distinct && !group_list && !select_lex->have_window_funcs(); - keep_row_order= false; + keep_row_order= thd->lex->with_rownum && (group_list || order); bool save_sum_fields= (group_list && simple_group) || implicit_grouping_with_window_funcs; if (create_postjoin_aggr_table(curr_tab, @@ -3597,7 +3790,7 @@ bool JOIN::make_aggr_tables_info() { if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true)) DBUG_RETURN(true); - if (prepare_sum_aggregators(sum_funcs, + if (prepare_sum_aggregators(thd, sum_funcs, !join_tab->is_using_agg_loose_index_scan())) DBUG_RETURN(true); group_list= NULL; @@ -3707,7 +3900,7 @@ bool JOIN::make_aggr_tables_info() } if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true)) DBUG_RETURN(true); - if (prepare_sum_aggregators(sum_funcs, + if (prepare_sum_aggregators(thd, sum_funcs, !join_tab || !join_tab-> is_using_agg_loose_index_scan())) DBUG_RETURN(true); @@ -3779,6 +3972,9 @@ bool JOIN::make_aggr_tables_info() sort_tab->filesort->limit= (has_group_by || (join_tab + top_join_tab_count > curr_tab + 1)) ? select_limit : unit->lim.get_select_limit(); + + if (unit->lim.is_with_ties()) + sort_tab->filesort->limit= HA_POS_ERROR; } if (!only_const_tables() && !join_tab[const_tables].filesort && @@ -3815,6 +4011,18 @@ bool JOIN::make_aggr_tables_info() if (select_lex->custom_agg_func_used()) status_var_increment(thd->status_var.feature_custom_aggregate_functions); + /* + Allocate Cached_items of ORDER BY for FETCH FIRST .. WITH TIES. + The order list might have been modified prior to this, but we are + only interested in the initial order by columns, after all const + elements are removed. + */ + if (unit->lim.is_with_ties()) + { + if (alloc_order_fields(this, order, with_ties_order_count)) + DBUG_RETURN(true); + } + fields= curr_fields_list; // Reset before execution set_items_ref_array(items0); @@ -3843,10 +4051,16 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields, when there is ORDER BY or GROUP BY or there is no GROUP BY, but there are aggregate functions, because in all these cases we need all result rows. + + We also can not push limit if the limit is WITH TIES, as we do not know + how many rows we will actually have. This can happen if ORDER BY was + a constant and removed (during remove_const), thus we have an "unlimited" + WITH TIES. */ ha_rows table_rows_limit= ((order == NULL || skip_sort_order) && !table_group && - !select_lex->with_sum_func) ? select_limit + !select_lex->with_sum_func && + !unit->lim.is_with_ties()) ? select_limit : HA_POS_ERROR; if (!(tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param))) @@ -3888,7 +4102,7 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields, goto err; if (make_sum_func_list(all_fields, fields_list, true)) goto err; - if (prepare_sum_aggregators(sum_funcs, + if (prepare_sum_aggregators(thd, sum_funcs, !(tables_list && join_tab->is_using_agg_loose_index_scan()))) goto err; @@ -3898,7 +4112,7 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields, } else { - if (prepare_sum_aggregators(sum_funcs, + if (prepare_sum_aggregators(thd, sum_funcs, !join_tab->is_using_agg_loose_index_scan())) goto err; if (setup_sum_funcs(thd, sum_funcs)) @@ -3976,6 +4190,16 @@ JOIN::add_sorting_to_table(JOIN_TAB *tab, ORDER *order) tab->select); if (!tab->filesort) return true; + + TABLE *table= tab->table; + if ((tab == join_tab + const_tables) && + table->pos_in_table_list && + table->pos_in_table_list->is_sjm_scan_table()) + { + tab->filesort->set_all_read_bits= TRUE; + tab->filesort->unpack= unpack_to_base_table_fields; + } + /* Select was moved to filesort->select to force join_init_read_record to use sorted result instead of reading table through select. @@ -4166,6 +4390,7 @@ JOIN::reinit() first_record= false; group_sent= false; cleaned= false; + accepted_rows= 0; if (aggr_tables) { @@ -4284,14 +4509,12 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite, If there is SELECT in this statement with the same number it must be the same SELECT */ - DBUG_ASSERT(select_lex->select_number == UINT_MAX || - select_lex->select_number == INT_MAX || !output || + DBUG_ASSERT(select_lex->select_number == FAKE_SELECT_LEX_ID || !output || !output->get_select(select_lex->select_number) || output->get_select(select_lex->select_number)->select_lex == select_lex); - if (select_lex->select_number != UINT_MAX && - select_lex->select_number != INT_MAX /* this is not a UNION's "fake select */ && + if (select_lex->select_number != FAKE_SELECT_LEX_ID && have_query_plan != JOIN::QEP_NOT_PRESENT_YET && have_query_plan != JOIN::QEP_DELETED && // this happens when there was // no QEP ever, but then @@ -4511,9 +4734,11 @@ void JOIN::exec_inner() { List_iterator_fast<Item> const_item_it(exec_const_order_group_cond); Item *cur_const_item; + StringBuffer<MAX_FIELD_WIDTH> tmp; while ((cur_const_item= const_item_it++)) { - cur_const_item->val_str(); // This caches val_str() to Item::str_value + tmp.set_buffer_if_not_allocated(&my_charset_bin); + cur_const_item->val_str(&tmp); if (unlikely(thd->is_error())) { error= thd->is_error(); @@ -5468,7 +5693,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, if (join->cond_value == Item::COND_FALSE) { join->impossible_where= true; - conds= new (join->thd->mem_root) Item_bool(join->thd, false); + conds= (Item*) &Item_false; } join->cond_equal= NULL; @@ -5901,8 +6126,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, new_fields->null_rejecting); } else if (old->eq_func && new_fields->eq_func && - ((old->val->const_item() && !old->val->is_expensive() && - old->val->is_null()) || + ((old->val->can_eval_in_optimize() && old->val->is_null()) || (!new_fields->val->is_expensive() && new_fields->val->is_null()))) { @@ -6098,7 +6322,11 @@ add_key_field(JOIN *join, Field IN ... */ if (field->flags & PART_KEY_FLAG) - stat[0].key_dependent|=used_tables; + { + stat[0].key_dependent|= used_tables; + if (field->key_start.bits_set()) + stat[0].key_start_dependent= 1; + } bool is_const=1; for (uint i=0; i<num_values; i++) @@ -6158,7 +6386,7 @@ add_key_field(JOIN *join, { if ((cond->functype() == Item_func::EQ_FUNC || cond->functype() == Item_func::MULT_EQUAL_FUNC) && - ((*value)->maybe_null || field->real_maybe_null())) + ((*value)->maybe_null() || field->real_maybe_null())) (*key_fields)->null_rejecting= true; else (*key_fields)->null_rejecting= false; @@ -7049,6 +7277,7 @@ bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse, use= save_pos= dynamic_element(keyuse,0,KEYUSE*); prev= &key_end; found_eq_constant= 0; + /* Loop over all elements except the last 'key_end' */ for (i=0 ; i < keyuse->elements-1 ; i++,use++) { if (!use->is_for_hash_join()) @@ -7442,7 +7671,7 @@ double cost_for_index_read(const THD *thd, const TABLE *table, uint key, Adjust cost from table->quick_costs calculated by multi_range_read_info_const() to be comparable with cost_for_index_read() - This functions is needed because best_access_patch doesn't add + This functions is needed because best_access_path() doesn't add TIME_FOR_COMPARE to it's costs until very late. Preferably we should fix so that all costs are comparably. (All compared costs should include TIME_FOR_COMPARE for all found @@ -7507,6 +7736,13 @@ best_access_path(JOIN *join, double records= DBL_MAX; ha_rows records_for_key= 0; table_map best_ref_depends_map= 0; + /* + key_dependent is 0 if all key parts could be used or if there was an + EQ_REF table found (which uses all key parts). In other words, we cannot + find a better key for the table even if remaining_tables is reduced. + Otherwise it's a bitmap of tables that could improve key usage. + */ + table_map key_dependent= 0; Range_rowid_filter_cost_info *best_filter= 0; double tmp; double keyread_tmp= 0; @@ -7525,7 +7761,6 @@ best_access_path(JOIN *join, DBUG_ENTER("best_access_path"); Json_writer_object trace_wrapper(thd, "best_access_path"); - Json_writer_array trace_paths(thd, "considered_access_paths"); bitmap_clear_all(eq_join_set); @@ -7533,6 +7768,7 @@ best_access_path(JOIN *join, if (s->table->is_splittable()) spl_plan= s->choose_best_splitting(record_count, remaining_tables); + Json_writer_array trace_paths(thd, "considered_access_paths"); if (s->keyuse) { /* Use key if possible */ @@ -7559,6 +7795,8 @@ best_access_path(JOIN *join, key_part_map const_part= 0; /* The or-null keypart in ref-or-null access: */ key_part_map ref_or_null_part= 0; + key_part_map all_parts= 0; + if (is_hash_join_key_no(key)) { /* @@ -7590,15 +7828,16 @@ best_access_path(JOIN *join, do /* For each keypart */ { uint keypart= keyuse->keypart; - table_map best_part_found_ref= 0; + table_map best_part_found_ref= 0, key_parts_dependent= 0; double best_prev_record_reads= DBL_MAX; - + do /* For each way to access the keypart */ { /* if 1. expression doesn't refer to forward tables 2. we won't get two ref-or-null's */ + all_parts|= keyuse->keypart_map; if (!(remaining_tables & keyuse->used_tables) && (!keyuse->validity_ref || *keyuse->validity_ref) && s->access_from_tables_is_allowed(keyuse->used_tables, @@ -7607,10 +7846,11 @@ best_access_path(JOIN *join, KEY_OPTIMIZE_REF_OR_NULL))) { found_part|= keyuse->keypart_map; + key_parts_dependent= 0; if (!(keyuse->used_tables & ~join->const_table_map)) const_part|= keyuse->keypart_map; - if (!keyuse->val->maybe_null || keyuse->null_rejecting) + if (!keyuse->val->maybe_null() || keyuse->null_rejecting) notnull_part|=keyuse->keypart_map; double tmp2= prev_record_reads(join_positions, idx, @@ -7629,10 +7869,16 @@ best_access_path(JOIN *join, if (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) ref_or_null_part |= keyuse->keypart_map; } + else if (!(found_part & keyuse->keypart_map)) + key_parts_dependent|= keyuse->used_tables; + loose_scan_opt.add_keyuse(remaining_tables, keyuse); keyuse++; } while (keyuse->table == table && keyuse->key == key && keyuse->keypart == keypart); + /* If we found a usable key, remember the dependent tables */ + if (all_parts & 1) + key_dependent|= key_parts_dependent; found_ref|= best_part_found_ref; } while (keyuse->table == table && keyuse->key == key); @@ -7737,6 +7983,7 @@ best_access_path(JOIN *join, { if (!(records= keyinfo->actual_rec_per_key(key_parts-1))) { /* Prefer longer keys */ + trace_access_idx.add("rec_per_key_stats_missing", true); records= ((double) s->records / (double) rec * (1.0 + @@ -7763,7 +8010,7 @@ best_access_path(JOIN *join, records > (double) table->opt_range[key].rows) { records= (double) table->opt_range[key].rows; - trace_access_idx.add("used_range_estimates", true); + trace_access_idx.add("used_range_estimates", "clipped down"); } else { @@ -7884,19 +8131,15 @@ best_access_path(JOIN *join, if (!found_ref && // (1) records < rows) // (3) { - trace_access_idx.add("used_range_estimates", true); + trace_access_idx.add("used_range_estimates", "clipped up"); records= rows; } } - else /* (table->quick_key_parts[key] < max_key_part) */ - { - trace_access_idx.add("chosen", true); - cause= "range uses less keyparts"; - } } } else { + trace_access_idx.add("rec_per_key_stats_missing", true); /* Assume that the first key part matches 1% of the file and that the whole key matches 10 (duplicates) or 1 @@ -7960,6 +8203,7 @@ best_access_path(JOIN *join, const_part)) && records > (double) table->opt_range[key].rows) { + trace_access_idx.add("used_range_estimates", true); records= (double) table->opt_range[key].rows; } } @@ -8084,6 +8328,27 @@ best_access_path(JOIN *join, } /* for each key */ records= best_records; } + else + { + /* + No usable keys found. However, there may still be an option to use + "Range checked for each record" when all depending tables has + been read. s->key_dependent tells us which tables these could be and + s->key_start_dependent tells us if a first key part was used. + s->key_dependent may include more tables than could be used, + but this is ok as not having any usable keys is a rare thing and + the performance penalty for extra table bits is that + best_extension_by_limited_search() would not be able to prune tables + earlier. + Example query: + SELECT * FROM t1,t2 where t1.key1=t2.key1 OR t2.key2<1 + */ + if (s->key_start_dependent) + key_dependent= s->key_dependent; + } + /* Check that s->key_dependent contains all used_tables found in s->keyuse */ + key_dependent&= ~PSEUDO_TABLE_BITS; + DBUG_ASSERT((key_dependent & s->key_dependent) == key_dependent); /* If there is no key to access the table, but there is an equi-join @@ -8154,7 +8419,8 @@ best_access_path(JOIN *join, access is to use the same index IDX, with the same or more key parts. (note: it is not clear how this rule is/should be extended to index_merge quick selects). Also if we have a hash join we prefer that - over a table scan + over a table scan. This heuristic doesn't apply if the quick select + uses the group-by min-max optimization. (3) See above note about InnoDB. (4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access path, but there is no quick select) @@ -8172,7 +8438,9 @@ best_access_path(JOIN *join, Json_writer_object trace_access_scan(thd); if ((records >= s->found_records || best > s->read_time) && // (1) !(best_key && best_key->key == MAX_KEY) && // (2) - !(s->quick && best_key && s->quick->index == best_key->key && // (2) + !(s->quick && + s->quick->get_type() != QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX && // (2) + best_key && s->quick->index == best_key->key && // (2) best_max_key_part >= s->table->opt_range[best_key->key].key_parts) &&// (2) !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3) ! s->table->covering_keys.is_clear_all() && best_key && !s->quick) &&// (3) @@ -8337,12 +8605,15 @@ best_access_path(JOIN *join, pos->records_read= records; pos->read_time= best; pos->key= best_key; + pos->type= best_type; pos->table= s; pos->ref_depend_map= best_ref_depends_map; pos->loosescan_picker.loosescan_key= MAX_KEY; pos->use_join_buffer= best_uses_jbuf; pos->spl_plan= spl_plan; pos->range_rowid_filter_info= best_filter; + pos->key_dependent= (best_type == JT_EQ_REF ? (table_map) 0 : + key_dependent & remaining_tables); loose_scan_opt.save_to_position(s, loose_scan_pos); @@ -8828,7 +9099,7 @@ determine_search_depth(JOIN *join) */ static void -optimize_straight_join(JOIN *join, table_map join_tables) +optimize_straight_join(JOIN *join, table_map remaining_tables) { JOIN_TAB *s; uint idx= join->const_tables; @@ -8846,30 +9117,32 @@ optimize_straight_join(JOIN *join, table_map join_tables) Json_writer_object trace_one_table(thd); if (unlikely(thd->trace_started())) { - trace_plan_prefix(join, idx, join_tables); + trace_plan_prefix(join, idx, remaining_tables); trace_one_table.add_table_name(s); } /* Find the best access method from 's' to the current partial plan */ - best_access_path(join, s, join_tables, join->positions, idx, + best_access_path(join, s, remaining_tables, join->positions, idx, disable_jbuf, record_count, position, &loose_scan_pos); - /* compute the cost of the new plan extended with 's' */ + /* Compute the cost of the new plan extended with 's' */ record_count= COST_MULT(record_count, position->records_read); const double filter_cmp_gain= position->range_rowid_filter_info ? position->range_rowid_filter_info->get_cmp_gain(record_count) : 0; - read_time+= COST_ADD(read_time - filter_cmp_gain, - COST_ADD(position->read_time, - record_count / TIME_FOR_COMPARE)); - optimize_semi_joins(join, join_tables, idx, &record_count, &read_time, + read_time= COST_ADD(read_time, + COST_ADD(position->read_time - + filter_cmp_gain, + record_count / + TIME_FOR_COMPARE)); + optimize_semi_joins(join, remaining_tables, idx, &record_count, &read_time, &loose_scan_pos); - join_tables&= ~(s->table->map); + remaining_tables&= ~(s->table->map); double pushdown_cond_selectivity= 1.0; if (use_cond_selectivity > 1) pushdown_cond_selectivity= table_cond_selectivity(join, idx, s, - join_tables); + remaining_tables); position->cond_selectivity= pushdown_cond_selectivity; ++idx; } @@ -8996,9 +9269,12 @@ greedy_search(JOIN *join, do { /* Find the extension of the current QEP with the lowest cost */ join->best_read= DBL_MAX; - if (best_extension_by_limited_search(join, remaining_tables, idx, record_count, - read_time, search_depth, prune_level, - use_cond_selectivity)) + if ((int) best_extension_by_limited_search(join, remaining_tables, idx, + record_count, + read_time, search_depth, + prune_level, + use_cond_selectivity) < + (int) SEARCH_OK) DBUG_RETURN(TRUE); /* 'best_read < DBL_MAX' means that optimizer managed to find @@ -9054,8 +9330,13 @@ greedy_search(JOIN *join, while (pos && best_table != pos) pos= join->best_ref[++best_idx]; DBUG_ASSERT((pos != NULL)); // should always find 'best_table' - /* move 'best_table' at the first free position in the array of joins */ - swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]); + /* + move 'best_table' at the first free position in the array of joins, + keeping the sorted table order intact + */ + memmove(join->best_ref + idx + 1, join->best_ref + idx, + sizeof(JOIN_TAB*) * (best_idx - idx)); + join->best_ref[idx]= best_table; /* compute the cost of the new plan extended with 'best_table' */ record_count= COST_MULT(record_count, join->positions[idx].records_read); @@ -9274,10 +9555,7 @@ double table_multi_eq_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s, double sel= 1.0; COND_EQUAL *cond_equal= join->cond_equal; - if (!cond_equal || !cond_equal->current_level.elements) - return sel; - - if (!s->keyuse) + if (!cond_equal || !cond_equal->current_level.elements || !s->keyuse) return sel; Item_equal *item_equal; @@ -9641,6 +9919,28 @@ exit: } +/* + Check if the table is an EQ_REF or similar table and there is no cost + to gain by moveing it to a later stage. + We call such a table a edge table (or hanging leaf) as it will read at + most one row and will not add to the number of row combinations in the join. +*/ + +static inline enum_best_search +check_if_edge_table(POSITION *pos, + double pushdown_cond_selectivity) +{ + + if ((pos->type == JT_EQ_REF || + (pos->type == JT_REF && + pos->records_read == 1 && + !pos->range_rowid_filter_info)) && + pushdown_cond_selectivity >= 0.999) + return SEARCH_FOUND_EDGE; + return SEARCH_OK; +} + + /** Find a good, possibly optimal, query execution plan (QEP) by a possibly exhaustive search. @@ -9755,12 +10055,17 @@ exit: pushed to a table should be taken into account @retval - FALSE ok + enum_best_search::SEARCH_OK All fine @retval - TRUE Fatal error + enum_best_search::SEARCH_FOUND_EDGE All remaning tables are edge tables + @retval + enum_best_search::SEARCH_ABORT Killed by user + @retval + enum_best_search::SEARCH_ERROR Fatal error */ -static bool + +static enum_best_search best_extension_by_limited_search(JOIN *join, table_map remaining_tables, uint idx, @@ -9770,9 +10075,17 @@ best_extension_by_limited_search(JOIN *join, uint prune_level, uint use_cond_selectivity) { - DBUG_ENTER("best_extension_by_limited_search"); - THD *thd= join->thd; + /* + 'join' is a partial plan with lower cost than the best plan so far, + so continue expanding it further with the tables in 'remaining_tables'. + */ + JOIN_TAB *s, **pos; + double best_record_count= DBL_MAX; + double best_read_time= DBL_MAX; + bool disable_jbuf= join->thd->variables.join_cache_level == 0; + enum_best_search best_res; + DBUG_ENTER("best_extension_by_limited_search"); DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search", if (dbug_user_var_equals_int(thd, @@ -9782,24 +10095,12 @@ best_extension_by_limited_search(JOIN *join, ); if (unlikely(thd->check_killed())) // Abort - DBUG_RETURN(TRUE); - - DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx, - "SOFAR:");); - - /* - 'join' is a partial plan with lower cost than the best plan so far, - so continue expanding it further with the tables in 'remaining_tables'. - */ - JOIN_TAB *s; - double best_record_count= DBL_MAX; - double best_read_time= DBL_MAX; - bool disable_jbuf= join->thd->variables.join_cache_level == 0; + DBUG_RETURN(SEARCH_ABORT); DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time, "part_plan");); - /* + /* If we are searching for the execution plan of a materialized semi-join nest then allowed_tables contains bits only for the tables from this nest. */ @@ -9807,19 +10108,23 @@ best_extension_by_limited_search(JOIN *join, if (join->emb_sjm_nest) allowed_tables= join->emb_sjm_nest->sj_inner_tables & ~join->const_table_map; - for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + for (pos= join->best_ref + idx ; (s= *pos) ; pos++) { table_map real_table_bit= s->table->map; DBUG_ASSERT(remaining_tables & real_table_bit); + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + if ((allowed_tables & real_table_bit) && !(remaining_tables & s->dependent) && !check_interleaving_with_nj(s)) { double current_record_count, current_read_time; + double partial_join_cardinality; POSITION *position= join->positions + idx; - + POSITION loose_scan_pos; Json_writer_object trace_one_table(thd); + if (unlikely(thd->trace_started())) { trace_plan_prefix(join, idx, remaining_tables); @@ -9827,20 +10132,19 @@ best_extension_by_limited_search(JOIN *join, } /* Find the best access method from 's' to the current partial plan */ - POSITION loose_scan_pos; best_access_path(join, s, remaining_tables, join->positions, idx, disable_jbuf, record_count, position, &loose_scan_pos); - /* Compute the cost of extending the plan with 's' */ + /* Compute the cost of the new plan extended with 's' */ current_record_count= COST_MULT(record_count, position->records_read); const double filter_cmp_gain= position->range_rowid_filter_info ? position->range_rowid_filter_info->get_cmp_gain(current_record_count) : 0; - current_read_time=COST_ADD(read_time, - COST_ADD(position->read_time - - filter_cmp_gain, - current_record_count / - TIME_FOR_COMPARE)); + current_read_time= COST_ADD(read_time, + COST_ADD(position->read_time - + filter_cmp_gain, + current_record_count / + TIME_FOR_COMPARE)); if (unlikely(thd->trace_started())) { @@ -9875,11 +10179,18 @@ best_extension_by_limited_search(JOIN *join, (idx == join->const_tables && // 's' is the first table in the QEP s->table == join->sort_by_table)) { + /* + Store the current record count and cost as the best + possible cost at this level if the following holds: + - It's the lowest record number and cost so far + - There is no remaing table that could improve index usage + or we found an EQ_REF or REF key with less than 2 + matching records (good enough). + */ if (best_record_count >= current_record_count && best_read_time >= current_read_time && - /* TODO: What is the reasoning behind this condition? */ - (!(s->key_dependent & allowed_tables & remaining_tables) || - join->positions[idx].records_read < 2.0)) + (!(position->key_dependent & allowed_tables) || + position->records_read < 2.0)) { best_record_count= current_record_count; best_read_time= current_read_time; @@ -9902,32 +10213,49 @@ best_extension_by_limited_search(JOIN *join, double pushdown_cond_selectivity= 1.0; if (use_cond_selectivity > 1) pushdown_cond_selectivity= table_cond_selectivity(join, idx, s, - remaining_tables & + remaining_tables & ~real_table_bit); join->positions[idx].cond_selectivity= pushdown_cond_selectivity; - if (unlikely(thd->trace_started()) && pushdown_cond_selectivity < 1.0) - trace_one_table.add("selectivity", pushdown_cond_selectivity); + partial_join_cardinality= (current_record_count * + pushdown_cond_selectivity); + + if (unlikely(thd->trace_started())) + { + if (pushdown_cond_selectivity < 1.0) + { + trace_one_table.add("selectivity", pushdown_cond_selectivity); + trace_one_table.add("estimated_join_cardinality", + partial_join_cardinality); + } + } - double partial_join_cardinality= current_record_count * - pushdown_cond_selectivity; - if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables ) - { /* Recursively expand the current partial plan */ - swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + if ((search_depth > 1) && (remaining_tables & ~real_table_bit) & + allowed_tables) + { + /* Recursively expand the current partial plan */ Json_writer_array trace_rest(thd, "rest_of_plan"); - if (best_extension_by_limited_search(join, - remaining_tables & ~real_table_bit, - idx + 1, - partial_join_cardinality, - current_read_time, - search_depth - 1, - prune_level, - use_cond_selectivity)) - DBUG_RETURN(TRUE); - swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + best_res= + best_extension_by_limited_search(join, + remaining_tables & + ~real_table_bit, + idx + 1, + partial_join_cardinality, + current_read_time, + search_depth - 1, + prune_level, + use_cond_selectivity); + if ((int) best_res < (int) SEARCH_OK) + goto end; // Return best_res + if (best_res == SEARCH_FOUND_EDGE && + check_if_edge_table(join->positions+ idx, + pushdown_cond_selectivity) != + SEARCH_FOUND_EDGE) + best_res= SEARCH_OK; } else - { /* + { + /* 'join' is either the best partial QEP with 'search_depth' relations, or the best complete QEP so far, whichever is smaller. */ @@ -9936,15 +10264,13 @@ best_extension_by_limited_search(JOIN *join, join->positions[join->const_tables].table->table) { /* - We may have to make a temp table, note that this is only a - heuristic since we cannot know for sure at this point. - Hence it may be wrong. + We may have to make a temp table, note that this is only a + heuristic since we cannot know for sure at this point. + Hence it may be wrong. */ trace_one_table.add("cost_for_sorting", current_record_count); current_read_time= COST_ADD(current_read_time, current_record_count); } - trace_one_table.add("estimated_join_cardinality", - partial_join_cardinality); if (current_read_time < join->best_read) { memcpy((uchar*) join->best_positions, (uchar*) join->positions, @@ -9957,12 +10283,35 @@ best_extension_by_limited_search(JOIN *join, read_time, current_read_time, "full_plan");); + best_res= check_if_edge_table(join->positions + idx, + pushdown_cond_selectivity); } restore_prev_nj_state(s); restore_prev_sj_state(remaining_tables, s, idx); + if (best_res == SEARCH_FOUND_EDGE) + { + trace_one_table.add("pruned_by_hanging_leaf", true); + goto end; + } } } - DBUG_RETURN(FALSE); + best_res= SEARCH_OK; + +end: + /* Restore original table order */ + if (!*pos) + pos--; // Revert last pos++ in for loop + if (pos != join->best_ref + idx) + { + JOIN_TAB *tmp= join->best_ref[idx]; + uint elements= (uint) (pos - (join->best_ref + idx)); + + memmove((void*) (join->best_ref + idx), + (void*) (join->best_ref + idx + 1), + elements * sizeof(JOIN_TAB*)); + *pos= tmp; + } + DBUG_RETURN(best_res); } @@ -11084,7 +11433,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, j->ref.items[i]=keyuse->val; // Save for cond removal j->ref.cond_guards[i]= keyuse->cond_guard; - if (!keyuse->val->maybe_null || keyuse->null_rejecting) + if (!keyuse->val->maybe_null() || keyuse->null_rejecting) not_null_keyparts++; /* Set ref.null_rejecting to true only if we are going to inject a @@ -11115,7 +11464,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, FALSE); if (unlikely(thd->is_fatal_error)) DBUG_RETURN(TRUE); - tmp.copy(); + tmp.copy(thd); j->ref.const_ref_part_map |= key_part_map(1) << i ; } else @@ -11315,8 +11664,7 @@ static void add_not_null_conds(JOIN *join) Item *item= tab->ref.items[keypart]; Item *notnull; Item *real= item->real_item(); - if (real->const_item() && real->type() != Item::FIELD_ITEM && - !real->is_expensive()) + if (real->can_eval_in_optimize() && real->type() != Item::FIELD_ITEM) { /* It could be constant instead of field after constant @@ -11721,7 +12069,6 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) */ Json_writer_object trace_wrapper(thd); Json_writer_object trace_conditions(thd, "attaching_conditions_to_tables"); - trace_conditions.add("original_condition", cond); Json_writer_array trace_attached_comp(thd, "attached_conditions_computation"); uint i; @@ -11866,7 +12213,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) below to check if we should use 'quick' instead. */ DBUG_PRINT("info", ("Item_int")); - tmp= new (thd->mem_root) Item_bool(thd, true); // Always true + tmp= (Item*) &Item_true; } } @@ -12014,7 +12361,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) Yet attributes of the just built condition are not needed. Thus we call sel->cond->quick_fix_field for safety. */ - if (sel->cond && !sel->cond->is_fixed()) + if (sel->cond && !sel->cond->fixed()) sel->cond->quick_fix_field(); if (sel->test_quick_select(thd, tab->keys, @@ -12379,6 +12726,7 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys) (uchar *) &first_keyuse)) { + JOIN_TAB *tab; first_keyuse= save_first_keyuse; if (table->add_tmp_key(table->s->keys, parts, get_next_field_for_derived_key, @@ -12386,6 +12734,9 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys) FALSE)) return TRUE; table->reginfo.join_tab->keys.set_bit(table->s->keys); + tab= table->reginfo.join_tab; + for (uint i=0; i < parts; i++) + tab->key_dependent|= save_first_keyuse[i].used_tables; } else { @@ -12982,6 +13333,10 @@ uint check_join_cache_usage(JOIN_TAB *tab, !join->allowed_outer_join_with_cache) goto no_join_cache; + if (tab->table->pos_in_table_list->table_function && + !tab->table->pos_in_table_list->table_function->join_cache_allowed()) + goto no_join_cache; + /* Non-linked join buffers can't guarantee one match */ @@ -13583,8 +13938,11 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) char buff[256]; String str(buff,sizeof(buff),system_charset_info); str.length(0); - str.append(tab->table? tab->table->alias.c_ptr() :"<no_table_name>"); - str.append(" final_pushdown_cond"); + if (tab->table) + str.append(tab->table->alias); + else + str.append(STRING_WITH_LEN("<no_table_name>")); + str.append(STRING_WITH_LEN(" final_pushdown_cond")); print_where(tab->select_cond, str.c_ptr_safe(), QT_ORDINARY);); } uint n_top_tables= (uint)(join->join_tab_ranges.head()->end - @@ -14225,6 +14583,7 @@ void JOIN::cleanup(bool full) // Run Cached_item DTORs! group_fields.delete_elements(); + order_fields.delete_elements(); /* We can't call delete_elements() on copy_funcs as this will cause @@ -14466,7 +14825,25 @@ static ORDER * remove_const(JOIN *join,ORDER *first_order, COND *cond, bool change_list, bool *simple_order) { - *simple_order= join->rollup.state == ROLLUP::STATE_NONE; + /* + We can't do ORDER BY using filesort if the select list contains a non + deterministic value like RAND() or ROWNUM(). + For example: + SELECT a,ROWNUM() FROM t1 ORDER BY a; + + If we would first sort the table 't1', the ROWNUM() column would be + generated during end_send() and the order would be wrong. + + Previously we had here also a test of ROLLUP: + 'join->rollup.state == ROLLUP::STATE_NONE' + + I deleted this because the ROLLUP was never enforced because of a + bug where the inital value of simple_order was ignored. Having + ROLLUP tested now when the code is fixed, causes many test failure + and some wrong results so better to leave the code as it was + related to ROLLUP. + */ + *simple_order= !join->select_lex->rownum_in_field_list; if (join->only_const_tables()) return change_list ? 0 : first_order; // No need to sort @@ -14500,7 +14877,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, tab->cached_eq_ref_table= FALSE; JOIN_TAB *head= join->join_tab + join->const_tables; - *simple_order= head->on_expr_ref[0] == NULL; + *simple_order&= head->on_expr_ref[0] == NULL; if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS) { uint u1, u2, u3, u4; @@ -14532,7 +14909,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, { table_map order_tables=order->item[0]->used_tables(); if (order->item[0]->with_sum_func() || - order->item[0]->with_window_func || + order->item[0]->with_window_func() || /* If the outer table of an outer join is const (either by itself or after applying WHERE condition), grouping on a field from such a @@ -14586,37 +14963,8 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, can be used without tmp. table. */ bool can_subst_to_first_table= false; - bool first_is_in_sjm_nest= false; - if (first_is_base_table) - { - TABLE_LIST *tbl_for_first= - join->join_tab[join->const_tables].table->pos_in_table_list; - first_is_in_sjm_nest= tbl_for_first->sj_mat_info && - tbl_for_first->sj_mat_info->is_used; - } - /* - Currently we do not employ the optimization that uses multiple - equalities for ORDER BY to remove tmp table in the case when - the first table happens to be the result of materialization of - a semi-join nest ( <=> first_is_in_sjm_nest == true). - - When a semi-join nest is materialized and scanned to look for - possible matches in the remaining tables for every its row - the fields from the result of materialization are copied - into the record buffers of tables from the semi-join nest. - So these copies are used to access the remaining tables rather - than the fields from the result of materialization. - - Unfortunately now this so-called 'copy back' technique is - supported only if the rows are scanned with the rr_sequential - function, but not with other rr_* functions that are employed - when the result of materialization is required to be sorted. - - TODO: either to support 'copy back' technique for the above case, - or to get rid of this technique altogether. - */ if (optimizer_flag(join->thd, OPTIMIZER_SWITCH_ORDERBY_EQ_PROP) && - first_is_base_table && !first_is_in_sjm_nest && + first_is_base_table && order->item[0]->real_item()->type() == Item::FIELD_ITEM && join->cond_equal) { @@ -15147,7 +15495,7 @@ bool check_simple_equality(THD *thd, const Item::Context &ctx, Item *orig_field_item= 0; if (left_item->type() == Item::FIELD_ITEM && !((Item_field*)left_item)->get_depended_from() && - right_item->const_item() && !right_item->is_expensive()) + right_item->can_eval_in_optimize()) { orig_field_item= orig_left_item; field_item= (Item_field *) left_item; @@ -15155,7 +15503,7 @@ bool check_simple_equality(THD *thd, const Item::Context &ctx, } else if (right_item->type() == Item::FIELD_ITEM && !((Item_field*)right_item)->get_depended_from() && - left_item->const_item() && !left_item->is_expensive()) + left_item->can_eval_in_optimize()) { orig_field_item= orig_right_item; field_item= (Item_field *) right_item; @@ -15303,7 +15651,7 @@ static bool check_row_equality(THD *thd, const Arg_comparator *comparators, { Item_func_eq *eq_item; if (!(eq_item= new (thd->mem_root) Item_func_eq(thd, left_item, right_item)) || - eq_item->set_cmp_func()) + eq_item->set_cmp_func(thd)) return FALSE; eq_item->quick_fix_field(); eq_list->push_back(eq_item, thd->mem_root); @@ -15486,7 +15834,7 @@ COND *Item_cond_and::build_equal_items(THD *thd, if (!cond_args->elements && !cond_equal.current_level.elements && !eq_list.elements) - return new (thd->mem_root) Item_bool(thd, true); + return (Item*) &Item_true; List_iterator_fast<Item_equal> it(cond_equal.current_level); while ((item_equal= it++)) @@ -15593,7 +15941,7 @@ COND *Item_func_eq::build_equal_items(THD *thd, Item_equal *item_equal; int n= cond_equal.current_level.elements + eq_list.elements; if (n == 0) - return new (thd->mem_root) Item_bool(thd, true); + return (Item*) &Item_true; else if (n == 1) { if ((item_equal= cond_equal.current_level.pop())) @@ -15784,6 +16132,16 @@ static COND *build_equal_items(JOIN *join, COND *cond, table->on_expr= build_equal_items(join, table->on_expr, inherited, nested_join_list, ignore_on_conds, &table->cond_equal); + if (unlikely(join->thd->trace_started())) + { + const char *table_name; + if (table->nested_join) + table_name= table->nested_join->join_list.head()->alias.str; + else + table_name= table->alias.str; + trace_condition(join->thd, "ON expr", "build_equal_items", + table->on_expr, table_name); + } } } } @@ -15987,7 +16345,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, List<Item> eq_list; Item_func_eq *eq_item= 0; if (((Item *) item_equal)->const_item() && !item_equal->val_int()) - return new (thd->mem_root) Item_bool(thd, false); + return (Item*) &Item_false; Item *item_const= item_equal->get_const(); Item_equal_fields_iterator it(*item_equal); Item *head; @@ -16053,7 +16411,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, Don't produce equality if const is equal to item_const. */ Item_func_eq *func= new (thd->mem_root) Item_func_eq(thd, item_const, upper_const); - func->set_cmp_func(); + func->set_cmp_func(thd); func->quick_fix_field(); if (func->val_int()) item= 0; @@ -16101,7 +16459,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, field_item->remove_item_direct_ref(), head_item->remove_item_direct_ref()); - if (!eq_item || eq_item->set_cmp_func()) + if (!eq_item || eq_item->set_cmp_func(thd)) return 0; eq_item->quick_fix_field(); } @@ -16132,7 +16490,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, switch (eq_list.elements) { case 0: - res= cond ? cond : new (thd->mem_root) Item_bool(thd, true); + res= cond ? cond : (Item*) &Item_true; break; case 1: if (!cond || cond->is_bool_literal()) @@ -16274,7 +16632,7 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab, bool all_deleted= true; while ((item_equal= it++)) { - if (item_equal->get_extraction_flag() == DELETION_FL) + if (item_equal->get_extraction_flag() == MARKER_DELETION) continue; all_deleted= false; eq_cond= eliminate_item_equal(thd, eq_cond, cond_equal->upper_levels, @@ -16334,7 +16692,7 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab, cond_equal= item_equal->upper_levels; if (cond_equal && cond_equal->current_level.head() == item_equal) cond_equal= cond_equal->upper_levels; - if (item_equal->get_extraction_flag() == DELETION_FL) + if (item_equal->get_extraction_flag() == MARKER_DELETION) return 0; cond= eliminate_item_equal(thd, 0, cond_equal, item_equal); return cond ? cond : org_cond; @@ -16504,7 +16862,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, if ((functype == Item_func::EQ_FUNC || functype == Item_func::EQUAL_FUNC) && and_father != cond && !left_item->const_item()) { - cond->marker=1; + cond->marker= MARKER_CHANGE_COND; COND_CMP *tmp2; /* Will work, even if malloc would fail */ if ((tmp2= new (thd->mem_root) COND_CMP(and_father, func))) @@ -16519,7 +16877,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, So make sure to use set_cmp_func() only for non-LIKE operators. */ if (functype != Item_func::LIKE_FUNC) - ((Item_bool_rowready_func2*) func)->set_cmp_func(); + ((Item_bool_rowready_func2*) func)->set_cmp_func(thd); } } else if (can_change_cond_ref_to_const(func, left_item, right_item, @@ -16537,14 +16895,14 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, { args[0]= args[1]; // For easy check thd->change_item_tree(args + 1, value); - cond->marker=1; + cond->marker= MARKER_CHANGE_COND; COND_CMP *tmp2; /* Will work, even if malloc would fail */ if ((tmp2=new (thd->mem_root) COND_CMP(and_father, func))) save_list->push_back(tmp2); } if (functype != Item_func::LIKE_FUNC) - ((Item_bool_rowready_func2*) func)->set_cmp_func(); + ((Item_bool_rowready_func2*) func)->set_cmp_func(thd); } } } @@ -16579,7 +16937,7 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, } } } - else if (and_father != cond && !cond->marker) // In a AND group + else if (and_father != cond && cond->marker == MARKER_UNUSED) // In a AND group { if (cond->type() == Item::FUNC_ITEM && (((Item_func*) cond)->functype() == Item_func::EQ_FUNC || @@ -16587,8 +16945,8 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, { Item_bool_func2 *func= dynamic_cast<Item_bool_func2*>(cond); Item **args= func->arguments(); - bool left_const= args[0]->const_item() && !args[0]->is_expensive(); - bool right_const= args[1]->const_item() && !args[1]->is_expensive(); + bool left_const= args[0]->can_eval_in_optimize(); + bool right_const= args[1]->can_eval_in_optimize(); if (!(left_const && right_const) && args[0]->cmp_type() == args[1]->cmp_type()) { @@ -16837,7 +17195,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top, conds= and_conds(join->thd, conds, table->on_expr); conds->top_level_item(); /* conds is always a new item as both cond and on_expr existed */ - DBUG_ASSERT(!conds->is_fixed()); + DBUG_ASSERT(!conds->fixed()); conds->fix_fields(join->thd, &conds); } else @@ -17891,7 +18249,7 @@ Item_cond::remove_eq_conds(THD *thd, Item::cond_result *cond_value, COND * Item::remove_eq_conds(THD *thd, Item::cond_result *cond_value, bool top_level_arg) { - if (const_item() && !is_expensive()) + if (can_eval_in_optimize()) { *cond_value= eval_const_cond() ? Item::COND_TRUE : Item::COND_FALSE; return (COND*) 0; @@ -17905,7 +18263,7 @@ COND * Item_bool_func2::remove_eq_conds(THD *thd, Item::cond_result *cond_value, bool top_level_arg) { - if (const_item() && !is_expensive()) + if (can_eval_in_optimize()) { *cond_value= eval_const_cond() ? Item::COND_TRUE : Item::COND_FALSE; return (COND*) 0; @@ -17915,7 +18273,7 @@ Item_bool_func2::remove_eq_conds(THD *thd, Item::cond_result *cond_value, if (args[0]->eq(args[1], true)) { if (*cond_value == Item::COND_FALSE || - !args[0]->maybe_null || functype() == Item_func::EQUAL_FUNC) + !args[0]->maybe_null() || functype() == Item_func::EQUAL_FUNC) return (COND*) 0; // Compare of identical items } } @@ -17970,7 +18328,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value, */ - Item *item0= new(thd->mem_root) Item_bool(thd, false); + Item *item0= (Item*) &Item_false; Item *eq_cond= new(thd->mem_root) Item_func_eq(thd, args[0], item0); if (!eq_cond) return this; @@ -18040,7 +18398,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value, cond= new_cond; /* Item_func_eq can't be fixed after creation so we do not check - cond->is_fixed(), also it do not need tables so we use 0 as second + cond->fixed(), also it do not need tables so we use 0 as second argument. */ cond->fix_fields(thd, &cond); @@ -18208,7 +18566,7 @@ Field *Item::create_tmp_field_int(MEM_ROOT *root, TABLE *table, h= &type_handler_slonglong; if (unsigned_flag) h= h->type_handler_unsigned(); - return h->make_and_init_table_field(root, &name, Record_addr(maybe_null), + return h->make_and_init_table_field(root, &name, Record_addr(maybe_null()), *this, table); } @@ -18242,7 +18600,7 @@ Field *Item_sum::create_tmp_field(MEM_ROOT *root, bool group, TABLE *table) case REAL_RESULT: { new_field= new (root) - Field_double(max_char_length(), maybe_null, &name, decimals, TRUE); + Field_double(max_char_length(), maybe_null(), &name, decimals, TRUE); break; } case INT_RESULT: @@ -18289,9 +18647,9 @@ Item_field::create_tmp_field_from_item_field(MEM_ROOT *root, TABLE *new_table, If item have to be able to store NULLs but underlaid field can't do it, create_tmp_field_from_field() can't be used for tmp field creation. */ - if (((maybe_null && in_rollup) || + if (((maybe_null() && in_rollup()) || (new_table->in_use->create_tmp_table_for_derived && /* for mat. view/dt */ - orig_item && orig_item->maybe_null)) && + orig_item && orig_item->maybe_null())) && !field->maybe_null()) { /* @@ -18300,7 +18658,7 @@ Item_field::create_tmp_field_from_item_field(MEM_ROOT *root, TABLE *new_table, when the outer query decided at some point after name resolution phase that this field might be null. Take this into account here. */ - Record_addr rec(orig_item ? orig_item->maybe_null : maybe_null); + Record_addr rec(orig_item ? orig_item->maybe_null() : maybe_null()); const Type_handler *handler= type_handler()-> type_handler_for_tmp_table(this); result= handler->make_and_init_table_field(root, new_name, @@ -18312,12 +18670,12 @@ Item_field::create_tmp_field_from_item_field(MEM_ROOT *root, TABLE *new_table, const Type_handler *handler= Type_handler::type_handler_long_or_longlong(max_char_length(), true); result= handler->make_and_init_table_field(root, new_name, - Record_addr(maybe_null), + Record_addr(maybe_null()), *this, new_table); } else { - bool tmp_maybe_null= param->modify_item() ? maybe_null : + bool tmp_maybe_null= param->modify_item() ? maybe_null() : field->maybe_null(); result= field->create_tmp_field(root, new_table, tmp_maybe_null); if (result && ! param->modify_item()) @@ -18423,13 +18781,13 @@ Item_result_field::create_tmp_field_ex_from_handler( - Item_func - Item_subselect */ - DBUG_ASSERT(fixed); + DBUG_ASSERT(fixed()); DBUG_ASSERT(is_result_field()); DBUG_ASSERT(type() != NULL_ITEM); get_tmp_field_src(src, param); Field *result; if ((result= h->make_and_init_table_field(root, &name, - Record_addr(maybe_null), + Record_addr(maybe_null()), *this, table)) && param->modify_item()) result_field= result; @@ -18452,6 +18810,25 @@ Field *Item_func_sp::create_tmp_field_ex(MEM_ROOT *root, TABLE *table, return result; } + +static bool make_json_valid_expr(TABLE *table, Field *field) +{ + THD *thd= table->in_use; + Query_arena backup_arena; + Item *expr, *item_field; + + if (!table->expr_arena && table->init_expr_arena(thd->mem_root)) + return 1; + + thd->set_n_backup_active_arena(table->expr_arena, &backup_arena); + if ((item_field= new (thd->mem_root) Item_field(thd, field)) && + (expr= new (thd->mem_root) Item_func_json_valid(thd, item_field))) + field->check_constraint= add_virtual_expression(thd, expr); + thd->restore_active_arena(table->expr_arena, &backup_arena); + return field->check_constraint == NULL; +} + + /** Create field for temporary table. @@ -18497,6 +18874,9 @@ Field *create_tmp_field(TABLE *table, Item *item, make_copy_field); Field *result= item->create_tmp_field_ex(table->in_use->mem_root, table, &src, &prm); + if (is_json_type(item) && make_json_valid_expr(table, result)) + result= NULL; + *from_field= src.field(); *default_field= src.default_field(); if (src.item_result_field()) @@ -18541,50 +18921,10 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count) } -void -setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps) -{ - setup_tmp_table_column_bitmaps(table, bitmaps, table->s->fields); -} - - -class Create_tmp_table: public Data_type_statistics -{ - // The following members are initialized only in start() - Field **m_from_field, **m_default_field; - KEY_PART_INFO *m_key_part_info; - uchar *m_group_buff, *m_bitmaps; - // The following members are initialized in ctor - uint m_alloced_field_count; - bool m_using_unique_constraint; - uint m_temp_pool_slot; - ORDER *m_group; - bool m_distinct; - bool m_save_sum_fields; - bool m_with_cycle; - ulonglong m_select_options; - ha_rows m_rows_limit; - uint m_group_null_items; - - // counter for distinct/other fields - uint m_field_count[2]; - // counter for distinct/other fields which can be NULL - uint m_null_count[2]; - // counter for distinct/other blob fields - uint m_blobs_count[2]; - // counter for "tails" of bit fields which do not fit in a byte - uint m_uneven_bit[2]; - -public: - enum counter {distinct, other}; - /* - shows which field we are processing: distinct/other (set in processing - cycles) - */ - counter current_counter; - Create_tmp_table(const TMP_TABLE_PARAM *param, - ORDER *group, bool distinct, bool save_sum_fields, - ulonglong select_options, ha_rows rows_limit) +Create_tmp_table::Create_tmp_table(ORDER *group, bool distinct, + bool save_sum_fields, + ulonglong select_options, + ha_rows rows_limit) :m_alloced_field_count(0), m_using_unique_constraint(false), m_temp_pool_slot(MY_BIT_NONE), @@ -18596,39 +18936,23 @@ public: m_rows_limit(rows_limit), m_group_null_items(0), current_counter(other) - { - m_field_count[Create_tmp_table::distinct]= 0; - m_field_count[Create_tmp_table::other]= 0; - m_null_count[Create_tmp_table::distinct]= 0; - m_null_count[Create_tmp_table::other]= 0; - m_blobs_count[Create_tmp_table::distinct]= 0; - m_blobs_count[Create_tmp_table::other]= 0; - m_uneven_bit[Create_tmp_table::distinct]= 0; - m_uneven_bit[Create_tmp_table::other]= 0; - } - - void add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols); - - TABLE *start(THD *thd, - TMP_TABLE_PARAM *param, - const LEX_CSTRING *table_alias); - - bool add_fields(THD *thd, TABLE *table, - TMP_TABLE_PARAM *param, List<Item> &fields); - - bool add_schema_fields(THD *thd, TABLE *table, - TMP_TABLE_PARAM *param, - const ST_SCHEMA_TABLE &schema_table); - - bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, - bool do_not_open, bool keep_row_order); - void cleanup_on_failure(THD *thd, TABLE *table); -}; +{ + m_field_count[Create_tmp_table::distinct]= 0; + m_field_count[Create_tmp_table::other]= 0; + m_null_count[Create_tmp_table::distinct]= 0; + m_null_count[Create_tmp_table::other]= 0; + m_blobs_count[Create_tmp_table::distinct]= 0; + m_blobs_count[Create_tmp_table::other]= 0; + m_uneven_bit[Create_tmp_table::distinct]= 0; + m_uneven_bit[Create_tmp_table::other]= 0; +} -void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols) +void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr, + bool force_not_null_cols) { - DBUG_ASSERT(!field->field_name.str || strlen(field->field_name.str) == field->field_name.length); + DBUG_ASSERT(!field->field_name.str || + strlen(field->field_name.str) == field->field_name.length); if (force_not_null_cols) { @@ -18714,13 +19038,13 @@ TABLE *Create_tmp_table::start(THD *thd, m_temp_pool_slot = bitmap_lock_set_next(&temp_pool); if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot - sprintf(path, "%s-temptable-%lx-%i", tmp_file_prefix, + sprintf(path, "%s-%s-%lx-%i", tmp_file_prefix, param->tmp_name, current_pid, m_temp_pool_slot); else { /* if we run out of slots or we are not using tempool */ - sprintf(path, "%s-temptable-%lx-%llx-%x", tmp_file_prefix,current_pid, - thd->thread_id, thd->tmp_table++); + sprintf(path, "%s-%s-%lx-%llx-%x", tmp_file_prefix, param->tmp_name, + current_pid, thd->thread_id, thd->tmp_table++); } /* @@ -18751,7 +19075,7 @@ TABLE *Create_tmp_table::start(THD *thd, - convert BIT fields to 64-bit long, needed because MEMORY tables can't index BIT fields. */ - (*tmp->item)->marker=4; // Store null in key + (*tmp->item)->marker= MARKER_NULL_KEY; // Store null in key if ((*tmp->item)->too_big_for_varchar()) m_using_unique_constraint= true; } @@ -18873,7 +19197,7 @@ bool Create_tmp_table::add_fields(THD *thd, Item *item; Field **tmp_from_field= m_from_field; while (!m_with_cycle && (item= li++)) - if (item->common_flags & IS_IN_WITH_CYCLE) + if (item->is_in_with_cycle()) { m_with_cycle= true; /* @@ -18891,8 +19215,7 @@ bool Create_tmp_table::add_fields(THD *thd, uint uneven_delta; current_counter= (((param->hidden_field_count < (fieldnr + 1)) && distinct_record_structure && - (!m_with_cycle || - (item->common_flags & IS_IN_WITH_CYCLE)))? + (!m_with_cycle || item->is_in_with_cycle())) ? distinct : other); Item::Type type= item->type(); @@ -18960,7 +19283,7 @@ bool Create_tmp_table::add_fields(THD *thd, new_field->maybe_null() is still false, it will be changed below. But we have to setup Item_field correctly */ - arg->maybe_null=1; + arg->set_maybe_null(); } if (current_counter == distinct) new_field->flags|= FIELD_PART_OF_TMP_UNIQUE; @@ -18990,13 +19313,15 @@ bool Create_tmp_table::add_fields(THD *thd, !param->force_copy_fields && (not_all_columns || m_group !=0), /* - If item->marker == 4 then we force create_tmp_field - to create a 64-bit longs for BIT fields because HEAP - tables can't index BIT fields directly. We do the - same for distinct, as we want the distinct index - to be usable in this case too. + If item->marker == MARKER_NULL_KEY then we + force create_tmp_field to create a 64-bit + longs for BIT fields because HEAP tables + can't index BIT fields directly. We do the + same for distinct, as we want the distinct + index to be usable in this case too. */ - item->marker == 4 || param->bit_fields_as_long, + item->marker == MARKER_NULL_KEY || + param->bit_fields_as_long, param->force_copy_fields); if (unlikely(!new_field)) { @@ -19036,7 +19361,7 @@ bool Create_tmp_table::add_fields(THD *thd, m_field_count[current_counter]++; m_uneven_bit[current_counter]+= (m_uneven_bit_length - uneven_delta); - if (item->marker == 4 && item->maybe_null) + if (item->marker == MARKER_NULL_KEY && item->maybe_null()) { m_group_null_items++; new_field->flags|= GROUP_FLAG; @@ -19064,6 +19389,40 @@ err: } +bool Create_tmp_table::choose_engine(THD *thd, TABLE *table, + TMP_TABLE_PARAM *param) +{ + TABLE_SHARE *share= table->s; + DBUG_ENTER("Create_tmp_table::choose_engine"); + /* + If result table is small; use a heap, otherwise TMP_TABLE_HTON (Aria) + In the future we should try making storage engine selection more dynamic + */ + + if (share->blob_fields || m_using_unique_constraint || + (thd->variables.big_tables && + !(m_select_options & SELECT_SMALL_RESULT)) || + (m_select_options & TMP_TABLE_FORCE_MYISAM) || + thd->variables.tmp_memory_table_size == 0) + { + share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON); + table->file= get_new_handler(share, &table->mem_root, + share->db_type()); + if (m_group && + (param->group_parts > table->file->max_key_parts() || + param->group_length > table->file->max_key_length())) + m_using_unique_constraint= true; + } + else + { + share->db_plugin= ha_lock_engine(0, heap_hton); + table->file= get_new_handler(share, &table->mem_root, + share->db_type()); + } + DBUG_RETURN(!table->file); +} + + bool Create_tmp_table::finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, @@ -19090,28 +19449,7 @@ bool Create_tmp_table::finalize(THD *thd, DBUG_ASSERT(m_alloced_field_count >= share->fields); DBUG_ASSERT(m_alloced_field_count >= share->blob_fields); - /* If result table is small; use a heap */ - /* future: storage engine selection can be made dynamic? */ - if (share->blob_fields || m_using_unique_constraint - || (thd->variables.big_tables && !(m_select_options & SELECT_SMALL_RESULT)) - || (m_select_options & TMP_TABLE_FORCE_MYISAM) - || thd->variables.tmp_memory_table_size == 0) - { - share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON); - table->file= get_new_handler(share, &table->mem_root, - share->db_type()); - if (m_group && - (param->group_parts > table->file->max_key_parts() || - param->group_length > table->file->max_key_length())) - m_using_unique_constraint= true; - } - else - { - share->db_plugin= ha_lock_engine(0, heap_hton); - table->file= get_new_handler(share, &table->mem_root, - share->db_type()); - } - if (!table->file) + if (choose_engine(thd, table, param)) goto err; if (table->file->set_ha_share_ref(&share->ha_share)) @@ -19163,7 +19501,7 @@ bool Create_tmp_table::finalize(THD *thd, share->default_values= table->record[1]+alloc_length; } - setup_tmp_table_column_bitmaps(table, m_bitmaps); + setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields); recinfo=param->start_recinfo; null_flags=(uchar*) table->record[0]; @@ -19322,7 +19660,7 @@ bool Create_tmp_table::finalize(THD *thd, { Field *field=(*cur_group->item)->get_tmp_table_field(); DBUG_ASSERT(field->table == table); - bool maybe_null=(*cur_group->item)->maybe_null; + bool maybe_null=(*cur_group->item)->maybe_null(); m_key_part_info->null_bit=0; m_key_part_info->field= field; m_key_part_info->fieldnr= field->field_index + 1; @@ -19350,7 +19688,8 @@ bool Create_tmp_table::finalize(THD *thd, We solve this by marking the item as !maybe_null to ensure that the key,field and item definition match. */ - (*cur_group->item)->maybe_null= maybe_null= 0; + maybe_null= 0; + (*cur_group->item)->base_flags&= ~item_base_t::MAYBE_NULL; } if (!(cur_group->field= field->new_key_field(thd->mem_root,table, @@ -19613,8 +19952,8 @@ TABLE *create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, bool keep_row_order) { TABLE *table; - Create_tmp_table maker(param, group, - distinct, save_sum_fields, select_options, rows_limit); + Create_tmp_table maker(group, distinct, save_sum_fields, select_options, + rows_limit); if (!(table= maker.start(thd, param, table_alias)) || maker.add_fields(thd, table, param, fields) || maker.finalize(thd, table, param, do_not_open, keep_row_order)) @@ -19633,7 +19972,7 @@ TABLE *create_tmp_table_for_schema(THD *thd, TMP_TABLE_PARAM *param, bool do_not_open, bool keep_row_order) { TABLE *table; - Create_tmp_table maker(param, (ORDER *) NULL, false, false, + Create_tmp_table maker((ORDER *) NULL, false, false, select_options, HA_POS_ERROR); if (!(table= maker.start(thd, param, &table_alias)) || maker.add_schema_fields(thd, table, param, schema_table) || @@ -19804,7 +20143,7 @@ bool Virtual_tmp_table::sp_set_all_fields_from_item_list(THD *thd, bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value) { - DBUG_ASSERT(value->is_fixed()); + DBUG_ASSERT(value->fixed()); DBUG_ASSERT(value->cols() == s->fields); for (uint i= 0; i < value->cols(); i++) { @@ -20686,19 +21025,6 @@ do_select(JOIN *join, Procedure *procedure) } -int rr_sequential_and_unpack(READ_RECORD *info) -{ - int error; - if (unlikely((error= rr_sequential(info)))) - return error; - - for (Copy_field *cp= info->copy_field; cp != info->copy_field_end; cp++) - (*cp->do_copy)(cp); - - return error; -} - - /** @brief Instantiates temporary table @@ -22017,6 +22343,8 @@ bool test_if_use_dynamic_range_scan(JOIN_TAB *join_tab) int join_init_read_record(JOIN_TAB *tab) { + bool need_unpacking= FALSE; + JOIN *join= tab->join; /* Note: the query plan tree for the below operations is constructed in save_agg_explain_data. @@ -22024,6 +22352,12 @@ int join_init_read_record(JOIN_TAB *tab) if (tab->distinct && tab->remove_duplicates()) // Remove duplicates. return 1; + if (join->top_join_tab_count != join->const_tables) + { + TABLE_LIST *tbl= tab->table->pos_in_table_list; + need_unpacking= tbl ? tbl->is_sjm_scan_table() : FALSE; + } + tab->build_range_rowid_filter_if_needed(); if (tab->filesort && tab->sort_table()) // Sort table. @@ -22031,6 +22365,11 @@ int join_init_read_record(JOIN_TAB *tab) DBUG_EXECUTE_IF("kill_join_init_read_record", tab->join->thd->set_killed(KILL_QUERY);); + + + if (!tab->preread_init_done && tab->preread_init()) + return 1; + if (tab->select && tab->select->quick && tab->select->quick->reset()) { /* Ensures error status is propagated back to client */ @@ -22041,19 +22380,7 @@ int join_init_read_record(JOIN_TAB *tab) /* make sure we won't get ER_QUERY_INTERRUPTED from any code below */ DBUG_EXECUTE_IF("kill_join_init_read_record", tab->join->thd->reset_killed();); - if (!tab->preread_init_done && tab->preread_init()) - return 1; - - if (init_read_record(&tab->read_record, tab->join->thd, tab->table, - tab->select, tab->filesort_result, 1,1, FALSE)) - return 1; - return tab->read_record.read_record(); -} - -int -join_read_record_no_init(JOIN_TAB *tab) -{ Copy_field *save_copy, *save_copy_end; /* @@ -22063,12 +22390,19 @@ join_read_record_no_init(JOIN_TAB *tab) save_copy= tab->read_record.copy_field; save_copy_end= tab->read_record.copy_field_end; - init_read_record(&tab->read_record, tab->join->thd, tab->table, - tab->select, tab->filesort_result, 1, 1, FALSE); + if (init_read_record(&tab->read_record, tab->join->thd, tab->table, + tab->select, tab->filesort_result, 1, 1, FALSE)) + return 1; tab->read_record.copy_field= save_copy; tab->read_record.copy_field_end= save_copy_end; - tab->read_record.read_record_func= rr_sequential_and_unpack; + + if (need_unpacking) + { + tab->read_record.read_record_func_and_unpack_calls= + tab->read_record.read_record_func; + tab->read_record.read_record_func = read_record_func_for_rr_and_unpack; + } return tab->read_record.read_record(); } @@ -22262,8 +22596,7 @@ join_read_next_same_or_null(READ_RECORD *info) /* ARGSUSED */ static enum_nested_loop_state -end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), - bool end_of_records) +end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) { DBUG_ENTER("end_send"); /* @@ -22275,40 +22608,60 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), //TODO pass fields via argument List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields; - if (!end_of_records) + if (end_of_records) { - if (join->table_count && - join->join_tab->is_using_loose_index_scan()) - { - /* Copy non-aggregated fields when loose index scan is used. */ - copy_fields(&join->tmp_table_param); - } - if (join->having && join->having->val_int() == 0) - DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - if (join->procedure) + if (join->procedure && join->procedure->end_of_records()) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + + if (join->table_count && + join->join_tab->is_using_loose_index_scan()) + { + /* Copy non-aggregated fields when loose index scan is used. */ + copy_fields(&join->tmp_table_param); + } + if (join->having && join->having->val_int() == 0) + DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having + if (join->procedure) + { + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + + if (join->send_records >= join->unit->lim.get_select_limit() && + join->unit->lim.is_with_ties()) + { + /* + Stop sending rows if the order fields corresponding to WITH TIES + have changed. + */ + int idx= test_if_item_cache_changed(join->order_fields); + if (idx >= 0) + join->do_send_rows= false; + } + + if (join->do_send_rows) + { + int error; + /* result < 0 if row was not accepted and should not be counted */ + if (unlikely((error= join->result->send_data_with_check(*fields, + join->unit, + join->send_records)))) { - if (join->procedure->send_row(join->procedure_fields_list)) + if (error > 0) DBUG_RETURN(NESTED_LOOP_ERROR); - DBUG_RETURN(NESTED_LOOP_OK); - } - if (join->do_send_rows) - { - int error; - /* result < 0 if row was not accepted and should not be counted */ - if (unlikely((error= join->result->send_data_with_check(*fields, - join->unit, - join->send_records)))) - { - if (error > 0) - DBUG_RETURN(NESTED_LOOP_ERROR); - // error < 0 => duplicate row - join->duplicate_rows++; - } + // error < 0 => duplicate row + join->duplicate_rows++; } + } - ++join->send_records; - if (join->send_records >= join->unit->lim.get_select_limit() && - !join->do_send_rows) + join->send_records++; + join->accepted_rows++; + if (join->send_records >= join->unit->lim.get_select_limit()) + { + if (!join->do_send_rows) { /* If we have used Priority Queue for optimizing order by with limit, @@ -22324,55 +22677,57 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT")); DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); } + DBUG_RETURN(NESTED_LOOP_OK); } - if (join->send_records >= join->unit->lim.get_select_limit() && - join->do_send_rows) + + /* For WITH TIES we keep sending rows until a group has changed. */ + if (join->unit->lim.is_with_ties()) { - if (join->select_options & OPTION_FOUND_ROWS) + /* Prepare the order_fields comparison for with ties. */ + if (join->send_records == join->unit->lim.get_select_limit()) + (void) test_if_group_changed(join->order_fields); + /* One more loop, to check if the next row matches with_ties or not. */ + DBUG_RETURN(NESTED_LOOP_OK); + } + if (join->select_options & OPTION_FOUND_ROWS) + { + JOIN_TAB *jt=join->join_tab; + if ((join->table_count == 1) && !join->sort_and_group + && !join->send_group_parts && !join->having && !jt->select_cond && + !(jt->select && jt->select->quick) && + (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) && + (jt->ref.key < 0)) { - JOIN_TAB *jt=join->join_tab; - if ((join->table_count == 1) && !join->sort_and_group - && !join->send_group_parts && !join->having && !jt->select_cond && - !(jt->select && jt->select->quick) && - (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) && - (jt->ref.key < 0)) - { - /* Join over all rows in table; Return number of found rows */ - TABLE *table=jt->table; + /* Join over all rows in table; Return number of found rows */ + TABLE *table=jt->table; - if (jt->filesort_result) // If filesort was used - { - join->send_records= jt->filesort_result->found_rows; - } - else - { - table->file->info(HA_STATUS_VARIABLE); - join->send_records= table->file->stats.records; - } - } - else - { - join->do_send_rows= 0; - if (join->unit->fake_select_lex) - join->unit->fake_select_lex->select_limit= 0; - DBUG_RETURN(NESTED_LOOP_OK); - } + if (jt->filesort_result) // If filesort was used + { + join->send_records= jt->filesort_result->found_rows; + } + else + { + table->file->info(HA_STATUS_VARIABLE); + join->send_records= table->file->stats.records; + } + } + else + { + join->do_send_rows= 0; + if (join->unit->fake_select_lex) + join->unit->fake_select_lex->limit_params.select_limit= 0; + DBUG_RETURN(NESTED_LOOP_OK); } - DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely - } - else if (join->send_records >= join->fetch_limit) - { - /* - There is a server side cursor and all rows for - this fetch request are sent. - */ - DBUG_RETURN(NESTED_LOOP_CURSOR_LIMIT); } + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely } - else + else if (join->send_records >= join->fetch_limit) { - if (join->procedure && join->procedure->end_of_records()) - DBUG_RETURN(NESTED_LOOP_ERROR); + /* + There is a server side cursor and all rows for + this fetch request are sent. + */ + DBUG_RETURN(NESTED_LOOP_CURSOR_LIMIT); } DBUG_RETURN(NESTED_LOOP_OK); } @@ -22388,8 +22743,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), */ enum_nested_loop_state -end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), - bool end_of_records) +end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) { int idx= -1; enum_nested_loop_state ok_code= NESTED_LOOP_OK; @@ -22411,6 +22765,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->procedure) join->procedure->end_group(); + /* Test if there was a group change. */ if (idx < (int) join->send_group_parts) { int error=0; @@ -22418,7 +22773,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->having && join->having->val_int() == 0) error= -1; // Didn't satisfy having - else + else { if (join->do_send_rows) error=join->procedure->send_row(*fields) ? 1 : 0; @@ -22429,6 +22784,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } else { + /* Reset all sum functions on group change. */ if (!join->first_record) { List_iterator_fast<Item> it(*join->fields); @@ -22468,21 +22824,27 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) DBUG_RETURN(NESTED_LOOP_OK); - if (join->send_records >= join->unit->lim.get_select_limit() && - join->do_send_rows) - { - if (!(join->select_options & OPTION_FOUND_ROWS)) - DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely - join->do_send_rows=0; - join->unit->lim.set_unlimited(); + if (join->send_records >= join->unit->lim.get_select_limit() && + join->do_send_rows) + { + /* WITH TIES can be computed during end_send_group if + the order by is a subset of group by and we had an index + available to compute group by order directly. */ + if (!join->unit->lim.is_with_ties() || + idx < (int)join->with_ties_order_count) + { + if (!(join->select_options & OPTION_FOUND_ROWS)) + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely + join->do_send_rows= 0; + join->unit->lim.set_unlimited(); + } } else if (join->send_records >= join->fetch_limit) { /* There is a server side cursor and all rows for this fetch request are sent. - */ - /* + Preventing code duplication. When finished with the group reset the group functions and copy_fields. We fall through. bug #11904 */ @@ -22509,11 +22871,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->procedure) join->procedure->add(); join->group_sent= false; + join->accepted_rows++; DBUG_RETURN(ok_code); } } if (update_sum_func(join->sum_funcs)) DBUG_RETURN(NESTED_LOOP_ERROR); + join->accepted_rows++; if (join->procedure) join->procedure->add(); DBUG_RETURN(NESTED_LOOP_OK); @@ -22538,6 +22902,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int error; join->found_records++; + join->accepted_rows++; if ((error= table->file->ha_write_tmp_row(table->record[0]))) { if (likely(!table->file->is_fatal_error(error, HA_CHECK_DUP))) @@ -22612,7 +22977,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } item->save_org_in_field(group->field, group->fast_field_copier_func); /* Store in the used key if the field was 0 */ - if (item->maybe_null) + if (item->maybe_null()) group->buff[-1]= (char) group->field->is_null(); } if (!table->file->ha_index_read_map(table->record[1], @@ -22653,6 +23018,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } join_tab->send_records++; end: + join->accepted_rows++; // For rownum() if (unlikely(join->thd->check_killed())) { DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ @@ -22681,6 +23047,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ + join->accepted_rows++; if (likely(!(error= table->file->ha_write_tmp_row(table->record[0])))) join_tab->send_records++; // New group else @@ -22724,6 +23091,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } + join->accepted_rows++; // For rownum() DBUG_RETURN(NESTED_LOOP_OK); } @@ -22747,6 +23115,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), int idx= -1; DBUG_ENTER("end_write_group"); + join->accepted_rows++; if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) { @@ -23068,6 +23437,8 @@ make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond, return new_cond; } } + else if (cond->basic_const_item()) + return cond; if (is_top_and_level && used_table == rand_table_bit && (cond->used_tables() & ~OUTER_REF_TABLE_BIT) != rand_table_bit) @@ -23081,13 +23452,13 @@ make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond, table_count times, we mark each item that we have examined with the result of the test */ - if ((cond->marker == 3 && !retain_ref_cond) || + if ((cond->marker == MARKER_CHECK_ON_READ && !retain_ref_cond) || (cond->used_tables() & ~tables)) return (COND*) 0; // Can't check this yet - if (cond->marker == 2 || cond->eq_cmp_result() == Item::COND_OK) + if (cond->marker == MARKER_PROCESSED || cond->eq_cmp_result() == Item::COND_OK) { - cond->set_join_tab_idx(join_tab_idx_arg); + cond->set_join_tab_idx((uint8) join_tab_idx_arg); return cond; // Not boolean op } @@ -23099,18 +23470,18 @@ make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond, if (left_item->type() == Item::FIELD_ITEM && !retain_ref_cond && test_if_ref(root_cond, (Item_field*) left_item,right_item)) { - cond->marker=3; // Checked when read + cond->marker= MARKER_CHECK_ON_READ; // Checked when read return (COND*) 0; } if (right_item->type() == Item::FIELD_ITEM && !retain_ref_cond && test_if_ref(root_cond, (Item_field*) right_item,left_item)) { - cond->marker=3; // Checked when read + cond->marker= MARKER_CHECK_ON_READ; // Checked when read return (COND*) 0; } } - cond->marker=2; - cond->set_join_tab_idx(join_tab_idx_arg); + cond->marker= MARKER_PROCESSED; + cond->set_join_tab_idx((uint8) join_tab_idx_arg); return cond; } @@ -23207,9 +23578,10 @@ make_cond_after_sjm(THD *thd, Item *root_cond, Item *cond, table_map tables, of the test */ - if (cond->marker == 3 || (cond->used_tables() & ~(tables | sjm_tables))) + if (cond->marker == MARKER_CHECK_ON_READ || + (cond->used_tables() & ~(tables | sjm_tables))) return (COND*) 0; // Can't check this yet - if (cond->marker == 2 || cond->eq_cmp_result() == Item::COND_OK) + if (cond->marker == MARKER_PROCESSED || cond->eq_cmp_result() == Item::COND_OK) return cond; // Not boolean op /* @@ -23223,17 +23595,17 @@ make_cond_after_sjm(THD *thd, Item *root_cond, Item *cond, table_map tables, if (left_item->type() == Item::FIELD_ITEM && test_if_ref(root_cond, (Item_field*) left_item,right_item)) { - cond->marker=3; // Checked when read + cond->marker= MARKER_CHECK_ON_READ; return (COND*) 0; } if (right_item->type() == Item::FIELD_ITEM && test_if_ref(root_cond, (Item_field*) right_item,left_item)) { - cond->marker=3; // Checked when read + cond->marker= MARKER_CHECK_ON_READ; return (COND*) 0; } } - cond->marker=2; + cond->marker= MARKER_PROCESSED; return cond; } @@ -24367,6 +24739,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort) if (table->s->tmp_table) table->file->info(HA_STATUS_VARIABLE); // Get record count + fsort->accepted_rows= &join->accepted_rows; // For ROWNUM file_sort= filesort(thd, table, fsort, fsort->tracker, join, tab->table->map); DBUG_ASSERT(tab->filesort_result == 0); tab->filesort_result= file_sort; @@ -24760,18 +25133,20 @@ cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref) bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) { - Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); + enum_check_fields org_count_cuted_fields= thd->count_cuted_fields; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); bool result= 0; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; for (store_key **copy=ref->key_copy ; *copy ; copy++) { - if ((*copy)->copy() & 1) + if ((*copy)->copy(thd) & 1) { result= 1; break; } } + thd->count_cuted_fields= org_count_cuted_fields; dbug_tmp_restore_column_map(&table->write_set, old_map); return result; } @@ -24878,8 +25253,8 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array, order_item_type == Item::REF_ITEM) { from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables, - NULL, &view_ref, IGNORE_ERRORS, FALSE, - FALSE); + NULL, ignored_tables_list_t(NULL), + &view_ref, IGNORE_ERRORS, FALSE, FALSE); if (!from_field) from_field= (Field*) not_found_field; } @@ -24946,7 +25321,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array, inspite of that fix_fields() calls find_item_in_list() one more time. - We check order_item->is_fixed() because Item_func_group_concat can put + We check order_item->fixed() because Item_func_group_concat can put arguments for which fix_fields already was called. */ if (order_item->fix_fields_if_needed_for_order_by(thd, order->item) || @@ -25000,7 +25375,7 @@ int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, all_fields, false, true, from_window_spec)) return 1; Item * const item= *order->item; - if (item->with_window_func && context_analysis_place != IN_ORDER_BY) + if (item->with_window_func() && context_analysis_place != IN_ORDER_BY) { my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0)); return 1; @@ -25011,14 +25386,14 @@ int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, an ORDER BY clause */ - if (for_union && (item->with_sum_func() || item->with_window_func)) + if (for_union && (item->with_sum_func() || item->with_window_func())) { my_error(ER_AGGREGATE_ORDER_FOR_UNION, MYF(0), number); return 1; } if ((from_window_spec && item->with_sum_func() && - item->type() != Item::SUM_FUNC_ITEM) || item->with_window_func) + item->type() != Item::SUM_FUNC_ITEM) || item->with_window_func()) { item->split_sum_func(thd, ref_pointer_array, all_fields, SPLIT_SUM_SELECT); @@ -25076,13 +25451,13 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, if (find_order_in_list(thd, ref_pointer_array, tables, ord, fields, all_fields, true, true, from_window_spec)) return 1; - (*ord->item)->marker= UNDEF_POS; /* Mark found */ + (*ord->item)->marker= MARKER_UNDEF_POS; /* Mark found */ if ((*ord->item)->with_sum_func() && context_analysis_place == IN_GROUP_BY) { my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name()); return 1; } - if ((*ord->item)->with_window_func) + if ((*ord->item)->with_window_func()) { if (context_analysis_place == IN_GROUP_BY) my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); @@ -25112,6 +25487,9 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, we throw an error. If there are no fields in the created list for a select list expression this means that all fields in it are used under aggregate functions. + + Note that for items in the select list (fields), Item_field->markers + contains the position of the field in the select list. */ Item *item; Item_field *field; @@ -25122,7 +25500,8 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, field= naf_it++; while (field && (item=li++)) { - if (item->type() != Item::SUM_FUNC_ITEM && item->marker >= 0 && + if (item->type() != Item::SUM_FUNC_ITEM && + item->marker != MARKER_UNDEF_POS && !item->const_item() && !(item->real_item()->type() == Item::FIELD_ITEM && item->used_tables() & OUTER_REF_TABLE_BIT)) @@ -25219,7 +25598,7 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, *all_order_by_fields_used= 1; while ((item=li++)) - item->marker=0; /* Marker that field is not used */ + item->marker= MARKER_UNUSED; /* Marker that field is not used */ prev= &group; group=0; for (order=order_list ; order; order=order->next) @@ -25231,7 +25610,7 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, return 0; *prev=ord; prev= &ord->next; - (*ord->item)->marker=1; + (*ord->item)->marker= MARKER_FOUND_IN_ORDER; } else *all_order_by_fields_used= 0; @@ -25240,7 +25619,8 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, li.rewind(); while ((item=li++)) { - if (!item->const_item() && !item->with_sum_func() && !item->marker) + if (!item->const_item() && !item->with_sum_func() && + item->marker == MARKER_UNUSED) { /* Don't put duplicate columns from the SELECT list into the @@ -25337,11 +25717,9 @@ count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param, } else { - With_sum_func_cache *cache= field->get_with_sum_func_cache(); param->func_count++; - // "field" can point to Item_std_field, so "cache" can be NULL here. - if (reset_with_sum_func && cache) - cache->reset_with_sum_func(); + if (reset_with_sum_func) + field->with_flags&= ~item_with_t::SUM_FUNC; } } } @@ -25350,21 +25728,27 @@ count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param, /** Return 1 if second is a subpart of first argument. - If first parts has different direction, change it to second part - (group is sorted like order) + SIDE EFFECT: + For all the first items in the group by list that match, the sort + direction of the GROUP BY items are set to the same as those given by the + ORDER BY. + The direction of the group does not matter if the ORDER BY clause overrides + it anyway. */ static bool -test_if_subpart(ORDER *a,ORDER *b) +test_if_subpart(ORDER *group_by, ORDER *order_by) { - for (; a && b; a=a->next,b=b->next) + while (group_by && order_by) { - if ((*a->item)->eq(*b->item,1)) - a->direction=b->direction; + if ((*group_by->item)->eq(*order_by->item, 1)) + group_by->direction= order_by->direction; else return 0; + group_by= group_by->next; + order_by= order_by->next; } - return MY_TEST(!b); + return MY_TEST(!order_by); } /** @@ -25495,7 +25879,7 @@ void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group) } } parts++; - if (group_item->maybe_null) + if (group_item->maybe_null()) null_parts++; } param->group_length= key_length + null_parts; @@ -25541,6 +25925,19 @@ make_group_fields(JOIN *main_join, JOIN *curr_join) return (0); } +static bool +fill_cached_item_list(THD *thd, List<Cached_item> *list, ORDER *order, + uint max_number_of_elements = UINT_MAX) +{ + for (; order && max_number_of_elements ; + order= order->next, max_number_of_elements--) + { + Cached_item *tmp= new_Cached_item(thd, *order->item, true); + if (!tmp || list->push_front(tmp)) + return true; + } + return false; +} /** Get a list of buffers for saving last group. @@ -25549,21 +25946,20 @@ make_group_fields(JOIN *main_join, JOIN *curr_join) */ static bool -alloc_group_fields(JOIN *join,ORDER *group) +alloc_group_fields(JOIN *join, ORDER *group) { - if (group) - { - for (; group ; group=group->next) - { - Cached_item *tmp=new_Cached_item(join->thd, *group->item, TRUE); - if (!tmp || join->group_fields.push_front(tmp)) - return TRUE; - } - } + if (fill_cached_item_list(join->thd, &join->group_fields, group)) + return true; join->sort_and_group=1; /* Mark for do_select */ - return FALSE; + return false; } +static bool +alloc_order_fields(JOIN *join, ORDER *order, uint max_number_of_elements) +{ + return fill_cached_item_list(join->thd, &join->order_fields, order, + max_number_of_elements); +} /* @@ -25952,7 +26348,7 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, Field *field; enum Item::Type item_type= item->type(); if ((item->with_sum_func() && item_type != Item::SUM_FUNC_ITEM) || - item->with_window_func) + item->with_window_func()) item_field= item; else if (item_type == Item::FIELD_ITEM || item_type == Item::DEFAULT_VALUE_ITEM) @@ -26118,13 +26514,15 @@ static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr) } -static bool prepare_sum_aggregators(Item_sum **func_ptr, bool need_distinct) +static bool prepare_sum_aggregators(THD *thd,Item_sum **func_ptr, + bool need_distinct) { Item_sum *func; DBUG_ENTER("prepare_sum_aggregators"); while ((func= *(func_ptr++))) { - if (func->set_aggregator(need_distinct && func->has_with_distinct() ? + if (func->set_aggregator(thd, + need_distinct && func->has_with_distinct() ? Aggregator::DISTINCT_AGGREGATOR : Aggregator::SIMPLE_AGGREGATOR)) DBUG_RETURN(TRUE); @@ -26216,7 +26614,7 @@ copy_funcs(Item **func_ptr, const THD *thd) for (; (func = *func_ptr) ; func_ptr++) { if (func->type() == Item::FUNC_ITEM && - ((Item_func *) func)->with_window_func) + ((Item_func *) func)->with_window_func()) continue; func->save_in_result_field(1); /* @@ -26261,7 +26659,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) } if (unlikely(thd->is_fatal_error)) DBUG_RETURN(TRUE); - if (!cond->is_fixed()) + if (!cond->fixed()) { Item *tmp_item= (Item*) cond; cond->fix_fields(thd, &tmp_item); @@ -26392,8 +26790,7 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list, } if (arg_changed) { - expr->maybe_null= 1; - expr->in_rollup= 1; + expr->base_flags|= item_base_t::MAYBE_NULL | item_base_t::IN_ROLLUP; *changed= TRUE; } } @@ -26464,8 +26861,7 @@ bool JOIN::rollup_init() { if (*group_tmp->item == item) { - item->maybe_null= 1; - item->in_rollup= 1; + item->base_flags|= item_base_t::MAYBE_NULL | item_base_t::IN_ROLLUP; found_in_group= 1; break; } @@ -26481,7 +26877,7 @@ bool JOIN::rollup_init() Marking the expression item as 'with_sum_func' will ensure this. */ if (changed) - item->get_with_sum_func_cache()->set_with_sum_func(); + item->with_flags|= item_with_t::SUM_FUNC; } } return 0; @@ -26651,7 +27047,8 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields, Item_null_result *null_item= new (thd->mem_root) Item_null_result(thd); if (!null_item) return 1; - item->maybe_null= 1; // Value will be null sometimes + // Value will be null sometimes + item->set_maybe_null(); null_item->result_field= item->get_tmp_table_field(); item= null_item; break; @@ -26735,7 +27132,8 @@ int JOIN::rollup_send_data(uint idx) 1 if write_data_failed() */ -int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABLE *table_arg) +int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, + TABLE *table_arg) { uint i; for (i= send_group_parts ; i-- > idx ; ) @@ -27184,6 +27582,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta, !((QUICK_ROR_INTERSECT_SELECT*)cur_quick)->need_to_fetch_row) key_read=1; + if (table_list->table_function) + eta->push_extra(ET_TABLE_FUNCTION); + if (info) { eta->push_extra(info); @@ -27673,7 +28074,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, for such queries, we'll get here before having called subquery_expr->fix_fields(), which will cause failure to */ - if (unit->item && !unit->item->is_fixed()) + if (unit->item && !unit->item->fixed()) { Item *ref= unit->item; if (unit->item->fix_fields(thd, &ref)) @@ -27931,13 +28332,13 @@ Index_hint::print(THD *thd, String *str) case INDEX_HINT_USE: str->append(STRING_WITH_LEN("USE INDEX")); break; case INDEX_HINT_FORCE: str->append(STRING_WITH_LEN("FORCE INDEX")); break; } - str->append (STRING_WITH_LEN(" (")); + str->append(STRING_WITH_LEN(" (")); if (key_name.length) { if (thd && !system_charset_info->strnncoll( (const uchar *)key_name.str, key_name.length, - (const uchar *)primary_key_name, - strlen(primary_key_name))) + (const uchar *)primary_key_name.str, + primary_key_name.length)) str->append(primary_key_name); else append_identifier(thd, str, &key_name); @@ -28020,6 +28421,14 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, cmp_name= table_name.str; } } + else if (table_function) + { + /* A table function. */ + (void) table_function->print(thd, this, str, query_type); + str->append(' '); + append_identifier(thd, str, &alias); + cmp_name= alias.str; + } else { // A normal table @@ -28050,7 +28459,7 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, for (i= 1; i <= num_parts; i++) { String *name= name_it++; - append_identifier(thd, str, name->c_ptr(), name->length()); + append_identifier(thd, str, name->ptr(), name->length()); if (i != num_parts) str->append(','); } @@ -28087,8 +28496,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, while ((hint= it++)) { - str->append (STRING_WITH_LEN(" ")); - hint->print (thd, str); + str->append(' '); + hint->print(thd, str); } } } @@ -28108,24 +28517,23 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if ((query_type & QT_SHOW_SELECT_NUMBER) && thd->lex->all_selects_list && thd->lex->all_selects_list->link_next && - select_number != UINT_MAX && - select_number != INT_MAX) + select_number != FAKE_SELECT_LEX_ID) { - str->append("/* select#"); + str->append(STRING_WITH_LEN("/* select#")); str->append_ulonglong(select_number); if (thd->lex->describe & DESCRIBE_EXTENDED2) { - str->append("/"); + str->append('/'); str->append_ulonglong(nest_level); if (master_unit()->fake_select_lex && master_unit()->first_select() == this) { - str->append(" Filter Select: "); + str->append(STRING_WITH_LEN(" Filter Select: ")); master_unit()->fake_select_lex->print(thd, str, query_type); } } - str->append(" */ "); + str->append(STRING_WITH_LEN(" */ ")); } str->append(STRING_WITH_LEN("select ")); @@ -28189,7 +28597,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) else str->append(','); - if ((is_subquery_function() && item->is_autogenerated_name()) || + if ((is_subquery_function() && !item->is_explicit_name()) || !item->name.str) { /* @@ -28207,7 +28615,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) it is not "table field". */ if (top_level || - !item->is_autogenerated_name() || + item->is_explicit_name() || !check_column_name(item->name.str)) item->print_item_w_name(str, query_type); else @@ -28244,7 +28652,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if (cur_where) cur_where->print(str, query_type); else - str->append(cond_value != Item::COND_FALSE ? "1" : "0"); + str->append(cond_value != Item::COND_FALSE ? '1' : '0'); } // group by & olap @@ -28276,7 +28684,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) if (cur_having) cur_having->print(str, query_type); else - str->append(having_value != Item::COND_FALSE ? "1" : "0"); + str->append(having_value != Item::COND_FALSE ? '1' : '0'); } if (order_list.elements) @@ -28289,10 +28697,12 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) print_limit(thd, str, query_type); // lock type - if (lock_type == TL_READ_WITH_SHARED_LOCKS) - str->append(" lock in share mode"); - else if (lock_type == TL_WRITE) - str->append(" for update"); + if (select_lock == select_lock_type::IN_SHARE_MODE) + str->append(STRING_WITH_LEN(" lock in share mode")); + else if (select_lock == select_lock_type::FOR_UPDATE) + str->append(STRING_WITH_LEN(" for update")); + if (unlikely(skip_locked)) + str->append(STRING_WITH_LEN(" skip locked")); // PROCEDURE unsupported here } @@ -29478,7 +29888,7 @@ AGGR_OP::end_send() @details The function removes all top conjuncts marked with the flag - FULL_EXTRACTION_FL from the condition 'cond'. The resulting + MARKER_FULL_EXTRACTION from the condition 'cond'. The resulting formula is returned a the result of the function If 'cond' s marked with such flag the function returns 0. The function clear the extraction flags for the removed @@ -29491,7 +29901,7 @@ AGGR_OP::end_send() Item *remove_pushed_top_conjuncts(THD *thd, Item *cond) { - if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + if (cond->get_extraction_flag() == MARKER_FULL_EXTRACTION) { cond->clear_extraction_flag(); return 0; @@ -29504,7 +29914,7 @@ Item *remove_pushed_top_conjuncts(THD *thd, Item *cond) Item *item; while ((item= li++)) { - if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + if (item->get_extraction_flag() == MARKER_FULL_EXTRACTION) { item->clear_extraction_flag(); li.remove(); @@ -29642,17 +30052,13 @@ void JOIN::make_notnull_conds_for_range_scans() if (conds && build_notnull_conds_for_range_scans(this, conds, conds->used_tables())) { - Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1); - if (false_cond) - { - /* - Found a IS NULL conjunctive predicate for a null-rejected field - in the WHERE clause - */ - conds= false_cond; - cond_equal= 0; - impossible_where= true; - } + /* + Found a IS NULL conjunctive predicate for a null-rejected field + in the WHERE clause + */ + conds= (Item*) &Item_false; + cond_equal= 0; + impossible_where= true; DBUG_VOID_RETURN; } @@ -29673,9 +30079,7 @@ void JOIN::make_notnull_conds_for_range_scans() Found a IS NULL conjunctive predicate for a null-rejected field of the inner table of an outer join with ON expression tbl->on_expr */ - Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1); - if (false_cond) - tbl->on_expr= false_cond; + tbl->on_expr= (Item*) &Item_false; } } } @@ -29818,7 +30222,6 @@ void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join, { TABLE_LIST *tbl; table_map used_tables= 0; - THD *thd= join->thd; List_iterator<TABLE_LIST> li(nest_tbl->nested_join->join_list); while ((tbl= li++)) @@ -29829,9 +30232,7 @@ void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join, if (used_tables && build_notnull_conds_for_range_scans(join, nest_tbl->on_expr, used_tables)) { - Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1); - if (false_cond) - nest_tbl->on_expr= false_cond; + nest_tbl->on_expr= (Item*) &Item_false; } li.rewind(); @@ -29845,11 +30246,7 @@ void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join, } else if (build_notnull_conds_for_range_scans(join, tbl->on_expr, tbl->table->map)) - { - Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1); - if (false_cond) - tbl->on_expr= false_cond; - } + tbl->on_expr= (Item*) &Item_false; } } } @@ -29952,6 +30349,329 @@ void JOIN::init_join_cache_and_keyread() } +/* + @brief + Unpack temp table fields to base table fields. +*/ + +void unpack_to_base_table_fields(TABLE *table) +{ + JOIN_TAB *tab= table->reginfo.join_tab; + for (Copy_field *cp= tab->read_record.copy_field; + cp != tab->read_record.copy_field_end; cp++) + (*cp->do_copy)(cp); +} + +/* + Call item->fix_after_optimize for all items registered in + lex->fix_after_optimize + + This is needed for items like ROWNUM(), which needs access to structures + created by the early optimizer pass, like JOIN +*/ + +static void fix_items_after_optimize(THD *thd, SELECT_LEX *select_lex) +{ + List_iterator<Item> li(select_lex->fix_after_optimize); + + while (Item *item= li++) + item->fix_after_optimize(thd); +} + + +/* + Set a limit for the SELECT_LEX_UNIT based on ROWNUM usage. + The limit is shown in EXPLAIN +*/ + +static bool set_limit_for_unit(THD *thd, SELECT_LEX_UNIT *unit, ha_rows lim) +{ + SELECT_LEX *gpar= unit->global_parameters(); + if (gpar->limit_params.select_limit != 0 && + // limit can not be an expression but can be parameter + (!gpar->limit_params.select_limit->basic_const_item() || + ((ha_rows)gpar->limit_params.select_limit->val_int()) < lim)) + return false; + + Query_arena *arena, backup; + arena= thd->activate_stmt_arena_if_needed(&backup); + + gpar->limit_params.select_limit= + new (thd->mem_root) Item_int(thd, lim, MAX_BIGINT_WIDTH); + if (gpar->limit_params.select_limit == 0) + return true; // EOM + + unit->set_limit(gpar); + + gpar->limit_params.explicit_limit= true; // to show in EXPLAIN + + if (arena) + thd->restore_active_arena(arena, &backup); + + return false; +} + + +/** + Check possibility of LIMIT setting by rownum() of upper SELECT and do it + + @note Ideal is to convert something like + SELECT ... + FROM (SELECT ...) table + WHERE rownum() < <CONSTANT>; + to + SELECT ... + FROM (SELECT ... LIMIT <CONSTANT>) table + WHERE rownum() < <CONSTANT>; + + @retval true EOM + @retval false no errors +*/ + +bool JOIN::optimize_upper_rownum_func() +{ + DBUG_ASSERT(select_lex->master_unit()->derived); + + if (select_lex->master_unit()->first_select() != select_lex) + return false; // first will set parameter + + if (select_lex->master_unit()->global_parameters()-> + limit_params.offset_limit != NULL) + return false; // offset is set, we cannot set limit + + SELECT_LEX *outer_select= select_lex->master_unit()->outer_select(); + /* + Check that it is safe to use rownum-limit from the outer query + (the one that has 'WHERE rownum()...') + */ + if (outer_select == NULL || + !outer_select->with_rownum || + (outer_select->options & SELECT_DISTINCT) || + outer_select->table_list.elements != 1 || + outer_select->where == NULL || + outer_select->where->type() != Item::FUNC_ITEM) + return false; + + return process_direct_rownum_comparison(thd, unit, outer_select->where); +} + + +/** + Test if the predicate compares rownum() with a constant + + @return 1 No or invalid rownum() compare + @return 0 rownum() is compared with a constant. + In this case *args contains the constant and + *inv_order constains 1 if the rownum() was the right + argument, like in 'WHERE 2 >= rownum()'. +*/ + +static bool check_rownum_usage(Item_func *func_item, longlong *limit, + bool *inv_order) +{ + Item *arg1, *arg2; + *inv_order= 0; + DBUG_ASSERT(func_item->argument_count() == 2); + + /* 'rownum op const' or 'const op field' */ + arg1= func_item->arguments()[0]->real_item(); + if (arg1->type() == Item::FUNC_ITEM && + ((Item_func*) arg1)->functype() == Item_func::ROWNUM_FUNC) + { + arg2= func_item->arguments()[1]->real_item(); + if (arg2->can_eval_in_optimize()) + { + *limit= arg2->val_int(); + return *limit <= 0 || (ulonglong) *limit >= HA_POS_ERROR; + } + } + else if (arg1->can_eval_in_optimize()) + { + arg2= func_item->arguments()[1]->real_item(); + if (arg2->type() == Item::FUNC_ITEM && + ((Item_func*) arg2)->functype() == Item_func::ROWNUM_FUNC) + { + *limit= arg1->val_int(); + *inv_order= 1; + return *limit <= 0 || (ulonglong) *limit >= HA_POS_ERROR; + } + } + return 1; +} + + +/* + Limit optimization for ROWNUM() + + Go through the WHERE clause and find out if there are any of the following + constructs on the top level: + rownum() <= integer_constant + rownum() < integer_constant + rownum() = 1 + + If yes, then threat the select as if 'LIMIT integer_constant' would + have been used +*/ + +static void optimize_rownum(THD *thd, SELECT_LEX_UNIT *unit, + Item *cond) +{ + DBUG_ENTER("optimize_rownum"); + + if (cond->type() == Item::COND_ITEM) + { + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item= li++)) + optimize_rownum(thd, unit, item); + } + DBUG_VOID_RETURN; + } + + process_direct_rownum_comparison(thd, unit, cond); + DBUG_VOID_RETURN; +} + + +static bool process_direct_rownum_comparison(THD *thd, SELECT_LEX_UNIT *unit, + Item *cond) +{ + DBUG_ENTER("process_direct_rownum_comparison"); + if (cond->real_type() == Item::FUNC_ITEM) + { + Item_func *pred= (Item_func*) cond; + longlong limit; + bool inv; + + if (pred->argument_count() != 2) + DBUG_RETURN(false); // Not a compare functions + if (check_rownum_usage(pred, &limit, &inv)) + DBUG_RETURN(false); + + Item_func::Functype pred_type= pred->functype(); + + if (inv && pred_type != Item_func::EQ_FUNC) + { + if (pred_type == Item_func::GT_FUNC) // # > rownum() + pred_type= Item_func::LT_FUNC; + else if (pred_type == Item_func::GE_FUNC) // # >= rownum() + pred_type= Item_func::LE_FUNC; + else + DBUG_RETURN(false); + } + switch (pred_type) { + case Item_func::LT_FUNC: // rownum() < # + { + if (limit <= 0) + DBUG_RETURN(false); + DBUG_RETURN(set_limit_for_unit(thd, unit, limit - 1)); + case Item_func::LE_FUNC: + DBUG_RETURN(set_limit_for_unit(thd, unit, limit)); + case Item_func::EQ_FUNC: + if (limit == 1) + DBUG_RETURN(set_limit_for_unit(thd, unit, limit)); + break; + default: + break; + } + } + } + DBUG_RETURN(false); +} + +/** + @brief + Transform IN predicates having equal constant elements to equalities + + @param thd The context of the statement + + @details + If all elements in an IN predicate are constant and equal to each other + then clause + - "a IN (e1,..,en)" can be transformed to "a = e1" + - "a NOT IN (e1,..,en)" can be transformed to "a != e1". + This means an object of Item_func_in can be replaced with an object of + Item_func_eq for IN (e1,..,en) clause or Item_func_ne for + NOT IN (e1,...,en). + Such a replacement allows the optimizer to choose a better execution plan. + + This methods applies such transformation for each IN predicate of the WHERE + condition and ON expressions of this join where possible + + @retval + false success + true failure +*/ +bool JOIN::transform_in_predicates_into_equalities(THD *thd) +{ + DBUG_ENTER("JOIN::transform_in_predicates_into_equalities"); + DBUG_RETURN(transform_all_conds_and_on_exprs( + thd, &Item::in_predicate_to_equality_transformer)); +} + + +/** + @brief + Transform all items in WHERE and ON expressions using a given transformer + + @param thd The context of the statement + transformer Pointer to the transformation function + + @details + For each item of the WHERE condition and ON expressions of the SELECT + for this join the method performs the intransformation using the given + transformation function + + @retval + false success + true failure +*/ +bool JOIN::transform_all_conds_and_on_exprs(THD *thd, + Item_transformer transformer) +{ + if (conds) + { + conds= conds->top_level_transform(thd, transformer, (uchar *) 0); + if (!conds) + return true; + } + if (join_list) + { + if (transform_all_conds_and_on_exprs_in_join_list(thd, join_list, + transformer)) + return true; + } + return false; +} + + +bool JOIN::transform_all_conds_and_on_exprs_in_join_list( + THD *thd, List<TABLE_LIST> *join_list, Item_transformer transformer) +{ + TABLE_LIST *table; + List_iterator<TABLE_LIST> li(*join_list); + + while ((table= li++)) + { + if (table->nested_join) + { + if (transform_all_conds_and_on_exprs_in_join_list( + thd, &table->nested_join->join_list, transformer)) + return true; + } + if (table->on_expr) + { + table->on_expr= table->on_expr->top_level_transform(thd, transformer, 0); + if (!table->on_expr) + return true; + } + } + return false; +} + + /** @} (end of group Query_Optimizer) */ diff --git a/sql/sql_select.h b/sql/sql_select.h index d8b0ed290db..ba38cdade55 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -229,7 +229,7 @@ typedef enum_nested_loop_state (*Next_select_func)(JOIN *, struct st_join_table *, bool); Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab); int rr_sequential(READ_RECORD *info); -int rr_sequential_and_unpack(READ_RECORD *info); +int read_record_func_for_rr_and_unpack(READ_RECORD *info); Item *remove_pushed_top_conjuncts(THD *thd, Item *cond); Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond, COND_EQUAL **cond_eq, @@ -376,6 +376,8 @@ typedef struct st_join_table { uint used_null_fields; uint used_uneven_bit_fields; enum join_type type; + /* If first key part is used for any key in 'key_dependent' */ + bool key_start_dependent; bool cached_eq_ref_table,eq_ref_table; bool shortcut_for_distinct; bool sorted; @@ -566,7 +568,7 @@ typedef struct st_join_table { } bool is_first_inner_for_outer_join() { - return first_inner && first_inner == this; + return first_inner == this; } bool use_match_flag() { @@ -958,6 +960,8 @@ public: /* If ref-based access is used: bitmap of tables this table depends on */ table_map ref_depend_map; + /* tables that may help best_access_path() to find a better key */ + table_map key_dependent; /* Bitmap of semi-join inner tables that are in the join prefix and for which there's no provision for how to eliminate semi-join duplicates @@ -989,6 +993,8 @@ public: */ enum sj_strategy_enum sj_strategy; + /* Type of join (EQ_REF, REF etc) */ + enum join_type type; /* Valid only after fix_semijoin_strategies_for_picked_join_order() call: if sj_strategy!=SJ_OPT_NONE, this is the number of subsequent tables that @@ -1184,6 +1190,16 @@ public: uint aggr_tables; ///< Number of post-join tmp tables uint send_group_parts; /* + This represents the number of items in ORDER BY *after* removing + all const items. This is computed before other optimizations take place, + such as removal of ORDER BY when it is a prefix of GROUP BY, for example: + GROUP BY a, b ORDER BY a + + This is used when deciding to send rows, by examining the correct number + of items in the group_fields list when ORDER BY was previously eliminated. + */ + uint with_ties_order_count; + /* True if the query has GROUP BY. (that is, if group_by != NULL. when DISTINCT is converted into GROUP BY, it will set this, too. It is not clear why we need a separate var from @@ -1238,7 +1254,7 @@ public: table_map outer_join; /* Bitmap of tables used in the select list items */ table_map select_list_used_tables; - ha_rows send_records,found_records,join_examined_rows; + ha_rows send_records,found_records,join_examined_rows, accepted_rows; /* LIMIT for the JOIN operation. When not using aggregation or DISITNCT, this @@ -1328,6 +1344,10 @@ public: */ double join_record_count; List<Item> *fields; + + /* Used only for FETCH ... WITH TIES to identify peers. */ + List<Cached_item> order_fields; + /* Used during GROUP BY operations to identify when a group has changed. */ List<Cached_item> group_fields, group_fields_cache; THD *thd; Item_sum **sum_funcs, ***sum_funcs_end; @@ -1543,95 +1563,7 @@ public: } void init(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg, - select_result *result_arg) - { - join_tab= 0; - table= 0; - table_count= 0; - top_join_tab_count= 0; - const_tables= 0; - const_table_map= found_const_table_map= 0; - aggr_tables= 0; - eliminated_tables= 0; - join_list= 0; - implicit_grouping= FALSE; - sort_and_group= 0; - first_record= 0; - do_send_rows= 1; - duplicate_rows= send_records= 0; - found_records= 0; - fetch_limit= HA_POS_ERROR; - thd= thd_arg; - sum_funcs= sum_funcs2= 0; - procedure= 0; - having= tmp_having= having_history= 0; - having_is_correlated= false; - group_list_for_estimates= 0; - select_options= select_options_arg; - result= result_arg; - lock= thd_arg->lock; - select_lex= 0; //for safety - select_distinct= MY_TEST(select_options & SELECT_DISTINCT); - no_order= 0; - simple_order= 0; - simple_group= 0; - ordered_index_usage= ordered_index_void; - need_distinct= 0; - skip_sort_order= 0; - with_two_phase_optimization= 0; - save_qep= 0; - spl_opt_info= 0; - ext_keyuses_for_splitting= 0; - spl_opt_info= 0; - need_tmp= 0; - hidden_group_fields= 0; /*safety*/ - error= 0; - select= 0; - return_tab= 0; - ref_ptrs.reset(); - items0.reset(); - items1.reset(); - items2.reset(); - items3.reset(); - zero_result_cause= 0; - optimization_state= JOIN::NOT_OPTIMIZED; - have_query_plan= QEP_NOT_PRESENT_YET; - initialized= 0; - cleaned= 0; - cond_equal= 0; - having_equal= 0; - exec_const_cond= 0; - group_optimized_away= 0; - no_rows_in_result_called= 0; - positions= best_positions= 0; - pushdown_query= 0; - original_join_tab= 0; - explain= NULL; - tmp_table_keep_current_rowid= 0; - - all_fields= fields_arg; - if (&fields_list != &fields_arg) /* Avoid valgrind-warning */ - fields_list= fields_arg; - non_agg_fields.empty(); - bzero((char*) &keyuse,sizeof(keyuse)); - having_value= Item::COND_UNDEF; - tmp_table_param.init(); - tmp_table_param.end_write_records= HA_POS_ERROR; - rollup.state= ROLLUP::STATE_NONE; - - no_const_tables= FALSE; - first_select= sub_select; - set_group_rpa= false; - group_sent= 0; - - outer_ref_cond= pseudo_bits_cond= NULL; - in_to_exists_where= NULL; - in_to_exists_having= NULL; - emb_sjm_nest= NULL; - sjm_lookup_tables= 0; - sjm_scan_tables= 0; - is_orig_degenerated= false; - } + select_result *result_arg); /* True if the plan guarantees that it will be returned zero or one row */ bool only_const_tables() { return const_tables == table_count; } @@ -1817,6 +1749,9 @@ public: void make_notnull_conds_for_range_scans(); bool transform_in_predicates_into_in_subq(THD *thd); + + bool optimize_upper_rownum_func(); + private: /** Create a temporary table to be used for processing DISTINCT/ORDER @@ -1853,6 +1788,12 @@ private: bool add_fields_for_current_rowid(JOIN_TAB *cur, List<Item> *fields); void free_pushdown_handlers(List<TABLE_LIST>& join_list); void init_join_cache_and_keyread(); + bool transform_in_predicates_into_equalities(THD *thd); + bool transform_all_conds_and_on_exprs(THD *thd, + Item_transformer transformer); + bool transform_all_conds_and_on_exprs_in_join_list(THD *thd, + List<TABLE_LIST> *join_list, + Item_transformer transformer); }; enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS}; @@ -1923,11 +1864,12 @@ public: @details this function makes sure truncation warnings when preparing the key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). */ - enum store_key_result copy() + enum store_key_result copy(THD *thd) { - enum store_key_result result; + enum_check_fields org_count_cuted_fields= thd->count_cuted_fields; Use_relaxed_field_copy urfc(to_field->table->in_use); - result= copy_inner(); + store_key_result result= copy_inner(); + thd->count_cuted_fields= org_count_cuted_fields; return result; } @@ -1958,8 +1900,8 @@ class store_key_field: public store_key } } - enum Type type() const { return FIELD_STORE_KEY; } - const char *name() const { return field_name; } + enum Type type() const override { return FIELD_STORE_KEY; } + const char *name() const override { return field_name; } void change_source_field(Item_field *fld_item) { @@ -1968,7 +1910,7 @@ class store_key_field: public store_key } protected: - enum store_key_result copy_inner() + enum store_key_result copy_inner() override { TABLE *table= copy_field.to_field->table; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, @@ -2003,7 +1945,7 @@ public: store_key_item(THD *thd, Field *to_field_arg, uchar *ptr, uchar *null_ptr_arg, uint length, Item *item_arg, bool val) :store_key(thd, to_field_arg, ptr, - null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? + null_ptr_arg ? null_ptr_arg : item_arg->maybe_null() ? &err : (uchar*) 0, length), item(item_arg), use_value(val) {} store_key_item(store_key &arg, Item *new_item, bool val) @@ -2011,11 +1953,11 @@ public: {} - enum Type type() const { return ITEM_STORE_KEY; } - const char *name() const { return "func"; } + enum Type type() const override { return ITEM_STORE_KEY; } + const char *name() const override { return "func"; } protected: - enum store_key_result copy_inner() + enum store_key_result copy_inner() override { TABLE *table= to_field->table; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, @@ -2056,7 +1998,7 @@ public: uchar *null_ptr_arg, uint length, Item *item_arg) :store_key_item(thd, to_field_arg, ptr, - null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? + null_ptr_arg ? null_ptr_arg : item_arg->maybe_null() ? &err : (uchar*) 0, length, item_arg, FALSE), inited(0) { } @@ -2064,12 +2006,12 @@ public: :store_key_item(arg, new_item, FALSE), inited(0) {} - enum Type type() const { return CONST_ITEM_STORE_KEY; } - const char *name() const { return "const"; } - bool store_key_is_const() { return true; } + enum Type type() const override { return CONST_ITEM_STORE_KEY; } + const char *name() const override { return "const"; } + bool store_key_is_const() override { return true; } protected: - enum store_key_result copy_inner() + enum store_key_result copy_inner() override { int res; if (!inited) @@ -2374,7 +2316,6 @@ create_virtual_tmp_table(THD *thd, Field *field) int test_if_item_cache_changed(List<Cached_item> &list); int join_init_read_record(JOIN_TAB *tab); -int join_read_record_no_init(JOIN_TAB *tab); void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key); inline Item * and_items(THD *thd, Item* cond, Item *item) { @@ -2432,6 +2373,7 @@ int print_explain_message_line(select_result_sink *result, void explain_append_mrr_info(QUICK_RANGE_SELECT *quick, String *res); int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table, key_map possible_keys); +void unpack_to_base_table_fields(TABLE *table); /**************************************************************************** Temporary table support for SQL Runtime @@ -2469,7 +2411,6 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, TMP_ENGINE_COLUMNDEF **recinfo, ulonglong options); bool open_tmp_table(TABLE *table); -void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps); double prev_record_reads(const POSITION *positions, uint idx, table_map found_ref); void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List<TABLE_LIST> *tlist); double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 891d81379fc..cbb0d739bc4 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -120,6 +120,10 @@ static const LEX_CSTRING trg_event_type_names[]= { STRING_WITH_LEN("DELETE") } }; + +LEX_CSTRING DATA_clex_str= { STRING_WITH_LEN("DATA") }; +LEX_CSTRING INDEX_clex_str= { STRING_WITH_LEN("INDEX") }; + #ifndef NO_EMBEDDED_ACCESS_CHECKS static const char *grant_names[]={ "select","insert","update","delete","create","drop","reload","shutdown", @@ -131,7 +135,12 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), #endif /* Match the values of enum ha_choice */ -static const char *ha_choice_values[] = {"", "0", "1"}; +static const LEX_CSTRING ha_choice_values[]= +{ + { STRING_WITH_LEN("") }, + { STRING_WITH_LEN("0") }, + { STRING_WITH_LEN("1") } +}; static void store_key_options(THD *, String *, TABLE *, KEY *); @@ -408,9 +417,11 @@ bool mysqld_show_authors(THD *thd) for (authors= show_table_authors; authors->name; authors++) { protocol->prepare_for_resend(); - protocol->store(authors->name, system_charset_info); - protocol->store(authors->location, system_charset_info); - protocol->store(authors->comment, system_charset_info); + protocol->store(authors->name, strlen(authors->name), system_charset_info); + protocol->store(authors->location, strlen(authors->location), + system_charset_info); + protocol->store(authors->comment, strlen(authors->comment), + system_charset_info); if (protocol->write()) DBUG_RETURN(TRUE); } @@ -447,9 +458,12 @@ bool mysqld_show_contributors(THD *thd) for (contributors= show_table_contributors; contributors->name; contributors++) { protocol->prepare_for_resend(); - protocol->store(contributors->name, system_charset_info); - protocol->store(contributors->location, system_charset_info); - protocol->store(contributors->comment, system_charset_info); + protocol->store(contributors->name, strlen(contributors->name), + system_charset_info); + protocol->store(contributors->location, strlen(contributors->location), + system_charset_info); + protocol->store(contributors->comment, strlen(contributors->comment), + system_charset_info); if (protocol->write()) DBUG_RETURN(TRUE); } @@ -540,9 +554,12 @@ bool mysqld_show_privileges(THD *thd) for (privilege= sys_privileges; privilege->privilege ; privilege++) { protocol->prepare_for_resend(); - protocol->store(privilege->privilege, system_charset_info); - protocol->store(privilege->context, system_charset_info); - protocol->store(privilege->comment, system_charset_info); + protocol->store(privilege->privilege, strlen(privilege->privilege), + system_charset_info); + protocol->store(privilege->context, strlen(privilege->context), + system_charset_info); + protocol->store(privilege->comment, strlen(privilege->comment), + system_charset_info); if (protocol->write()) DBUG_RETURN(TRUE); } @@ -1313,28 +1330,32 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) protocol->prepare_for_resend(); if (table_list->view) - protocol->store(table_list->view_name.str, system_charset_info); + protocol->store(&table_list->view_name, system_charset_info); else { if (table_list->schema_table) - protocol->store(table_list->schema_table->table_name, system_charset_info); + protocol->store(table_list->schema_table->table_name, + strlen(table_list->schema_table->table_name), + system_charset_info); else - protocol->store(table_list->table->alias.c_ptr(), system_charset_info); + protocol->store(table_list->table->alias.ptr(), + table_list->table->alias.length(), + system_charset_info); } if (table_list->view) { - protocol->store(buffer.ptr(), buffer.length(), - table_list->view_creation_ctx->get_client_cs()); + buffer.set_charset(table_list->view_creation_ctx->get_client_cs()); + protocol->store(&buffer); - protocol->store(table_list->view_creation_ctx->get_client_cs()->csname, + protocol->store(&table_list->view_creation_ctx->get_client_cs()->cs_name, system_charset_info); - protocol->store(table_list->view_creation_ctx->get_connection_cl()->name, - system_charset_info); + protocol->store(&table_list->view_creation_ctx->get_connection_cl()-> + coll_name, system_charset_info); } else - protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + protocol->store(&buffer); if (protocol->write()) goto exit; @@ -1434,11 +1455,11 @@ bool mysqld_show_create_db(THD *thd, LEX_CSTRING *dbname, { buffer.append(STRING_WITH_LEN(" /*!40100")); buffer.append(STRING_WITH_LEN(" DEFAULT CHARACTER SET ")); - buffer.append(create.default_table_charset->csname); + buffer.append(create.default_table_charset->cs_name); if (Charset(create.default_table_charset).can_have_collate_clause()) { buffer.append(STRING_WITH_LEN(" COLLATE ")); - buffer.append(create.default_table_charset->name); + buffer.append(create.default_table_charset->coll_name); } buffer.append(STRING_WITH_LEN(" */")); } @@ -1636,7 +1657,7 @@ int get_quote_char_for_identifier(THD *thd, const char *name, size_t length) /* Append directory name (if exists) to CREATE INFO */ -static void append_directory(THD *thd, String *packet, const char *dir_type, +static void append_directory(THD *thd, String *packet, LEX_CSTRING *dir_type, const char *filename) { if (filename && !(thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)) @@ -1645,7 +1666,7 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, packet->append(' '); packet->append(dir_type); packet->append(STRING_WITH_LEN(" DIRECTORY='")); -#ifdef __WIN__ +#ifdef _WIN32 /* Convert \ to / to be able to create table on unix */ char *winfilename= (char*) thd->memdup(filename, length); char *pos, *end; @@ -1894,11 +1915,11 @@ static void add_table_options(THD *thd, TABLE *table, (create_info_arg->used_fields & HA_CREATE_USED_DEFAULT_CHARSET)) { packet->append(STRING_WITH_LEN(" DEFAULT CHARSET=")); - packet->append(share->table_charset->csname); + packet->append(share->table_charset->cs_name); if (Charset(table->s->table_charset).can_have_collate_clause()) { packet->append(STRING_WITH_LEN(" COLLATE=")); - packet->append(table->s->table_charset->name); + packet->append(table->s->table_charset->coll_name); } } } @@ -1945,19 +1966,19 @@ static void add_table_options(THD *thd, TABLE *table, if (create_info.page_checksum != HA_CHOICE_UNDEF) { packet->append(STRING_WITH_LEN(" PAGE_CHECKSUM=")); - packet->append(ha_choice_values[create_info.page_checksum], 1); + packet->append(ha_choice_values[create_info.page_checksum]); } if (create_info.options & HA_OPTION_DELAY_KEY_WRITE) packet->append(STRING_WITH_LEN(" DELAY_KEY_WRITE=1")); if (create_info.row_type != ROW_TYPE_DEFAULT) { packet->append(STRING_WITH_LEN(" ROW_FORMAT=")); - packet->append(ha_row_type[(uint) create_info.row_type]); + packet->append(&ha_row_type[(uint) create_info.row_type]); } if (share->transactional != HA_CHOICE_UNDEF) { packet->append(STRING_WITH_LEN(" TRANSACTIONAL=")); - packet->append(ha_choice_values[(uint) share->transactional], 1); + packet->append(ha_choice_values[(uint) share->transactional]); } if (share->table_type == TABLE_TYPE_SEQUENCE) packet->append(STRING_WITH_LEN(" SEQUENCE=1")); @@ -1981,8 +2002,8 @@ end_options: } append_create_options(thd, packet, share->option_list, check_options, hton->table_options); - append_directory(thd, packet, "DATA", create_info.data_file_name); - append_directory(thd, packet, "INDEX", create_info.index_file_name); + append_directory(thd, packet, &DATA_clex_str, create_info.data_file_name); + append_directory(thd, packet, &INDEX_clex_str, create_info.index_file_name); } static void append_period(THD *thd, String *packet, const LEX_CSTRING &start, @@ -2179,12 +2200,12 @@ int show_create_table_ex(THD *thd, TABLE_LIST *table_list, { if (field->charset() != share->table_charset) { - packet->append(STRING_WITH_LEN(" CHARACTER SET ")); - packet->append(field->charset()->csname); + packet->append(STRING_WITH_LEN(" CHARACTER SET ")); + packet->append(field->charset()->cs_name); if (Charset(field->charset()).can_have_collate_clause()) { packet->append(STRING_WITH_LEN(" COLLATE ")); - packet->append(field->charset()->name); + packet->append(field->charset()->coll_name); } } } @@ -2293,7 +2314,7 @@ int show_create_table_ex(THD *thd, TABLE_LIST *table_list, bool found_primary=0; packet->append(STRING_WITH_LEN(",\n ")); - if (i == primary_key && !strcmp(key_info->name.str, primary_key_name)) + if (i == primary_key && !strcmp(key_info->name.str, primary_key_name.str)) { found_primary=1; /* @@ -2395,6 +2416,7 @@ int show_create_table_ex(THD *thd, TABLE_LIST *table_list, /* Add table level check constraints */ if (share->table_check_constraints) { + StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci); for (uint i= share->field_check_constraints; i < share->table_check_constraints ; i++) { @@ -2403,7 +2425,8 @@ int show_create_table_ex(THD *thd, TABLE_LIST *table_list, if (share->period.constr_name.streq(check->name)) continue; - StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci); + str.set_buffer_if_not_allocated(&my_charset_utf8mb4_general_ci); + str.length(0); // Print appends to str check->print(&str); packet->append(STRING_WITH_LEN(",\n ")); @@ -2499,6 +2522,9 @@ static void store_key_options(THD *thd, String *packet, TABLE *table, append_unescaped(packet, key_info->comment.str, key_info->comment.length); } + + if (key_info->is_ignored) + packet->append(STRING_WITH_LEN(" IGNORED")); } } @@ -2869,7 +2895,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) field_list.push_back(field=new (mem_root) Item_empty_string(thd, "db", NAME_CHAR_LEN), mem_root); - field->maybe_null=1; + field->set_maybe_null();; field_list.push_back(new (mem_root) Item_empty_string(thd, "Command", 16), mem_root); field_list.push_back(field= new (mem_root) @@ -2879,18 +2905,18 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) field_list.push_back(field=new (mem_root) Item_empty_string(thd, "State", 30), mem_root); - field->maybe_null=1; + field->set_maybe_null();; field_list.push_back(field=new (mem_root) Item_empty_string(thd, "Info", arg.max_query_length), mem_root); - field->maybe_null=1; + field->set_maybe_null();; if (!thd->variables.old_mode && !(thd->variables.old_behavior & OLD_MODE_NO_PROGRESS_INFO)) { field_list.push_back(field= new (mem_root) Item_float(thd, "Progress", 0.0, 3, 7), mem_root); - field->maybe_null= 0; + field->base_flags&= ~item_base_t::MAYBE_NULL; } if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | @@ -2904,22 +2930,23 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) ulonglong now= microsecond_interval_timer(); - while (auto thd_info= arg.thread_infos.get()) + while (thread_info *thd_info= arg.thread_infos.get()) { protocol->prepare_for_resend(); protocol->store(thd_info->thread_id); - protocol->store(thd_info->user, system_charset_info); - protocol->store(thd_info->host, system_charset_info); - protocol->store(thd_info->db, system_charset_info); + protocol->store(thd_info->user, strlen(thd_info->user), system_charset_info); + protocol->store(thd_info->host, strlen(thd_info->host), system_charset_info); + protocol->store_string_or_null(thd_info->db, system_charset_info); if (thd_info->proc_info) - protocol->store(thd_info->proc_info, system_charset_info); + protocol->store(thd_info->proc_info, strlen(thd_info->proc_info), + system_charset_info); else - protocol->store(command_name[thd_info->command].str, system_charset_info); + protocol->store(&command_name[thd_info->command], system_charset_info); if (thd_info->start_time && now > thd_info->start_time) protocol->store_long((now - thd_info->start_time) / HRTIME_RESOLUTION); else protocol->store_null(); - protocol->store(thd_info->state_info, system_charset_info); + protocol->store_string_or_null(thd_info->state_info, system_charset_info); if (thd_info->query_string.length()) protocol->store(thd_info->query_string.str(), thd_info->query_string.length(), @@ -3018,14 +3045,16 @@ int select_result_text_buffer::append_row(List<Item> &items, bool send_names) rows.push_back(row, thd->mem_root)) return true; + StringBuffer<32> buf; + while ((item= it++)) { DBUG_ASSERT(column < n_columns); - StringBuffer<32> buf; const char *data_ptr; char *ptr; size_t data_len; + buf.set_buffer_if_not_allocated(&my_charset_bin); if (send_names) { DBUG_ASSERT(strlen(item->name.str) == item->name.length); @@ -3062,19 +3091,19 @@ void select_result_text_buffer::save_to(String *res) { List_iterator<char*> it(rows); char **row; - res->append("#\n"); + res->append(STRING_WITH_LEN("#\n")); while ((row= it++)) { - res->append("# explain: "); + res->append(STRING_WITH_LEN("# explain: ")); for (int i=0; i < n_columns; i++) { if (i) res->append('\t'); - res->append(row[i]); + res->append(row[i], strlen(row[i])); } - res->append("\n"); + res->append('\n'); } - res->append("#\n"); + res->append(STRING_WITH_LEN("#\n")); } @@ -4521,7 +4550,7 @@ static void get_table_engine_for_i_s(THD *thd, char *buf, TABLE_LIST *tl, char path[FN_REFLEN]; build_table_filename(path, sizeof(path) - 1, db->str, table->str, reg_ext, 0); - if (dd_frm_type(thd, path, &engine_name) == TABLE_TYPE_NORMAL) + if (dd_frm_type(thd, path, &engine_name, NULL, NULL) == TABLE_TYPE_NORMAL) tl->option= engine_name.str; } } @@ -4741,7 +4770,9 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables, CHARSET_INFO *cs= system_charset_info; handlerton *hton; bool is_sequence; - if (ha_table_exists(thd, db_name, table_name, &hton, &is_sequence)) + + if (ha_table_exists(thd, db_name, table_name, NULL, NULL, + &hton, &is_sequence)) { if (hton == view_pseudo_hton) table->field[3]->store(STRING_WITH_LEN("VIEW"), cs); @@ -5347,9 +5378,9 @@ bool store_schema_schemata(THD* thd, TABLE *table, LEX_CSTRING *db_name, { restore_record(table, s->default_values); table->field[0]->store(STRING_WITH_LEN("def"), system_charset_info); - table->field[1]->store(db_name->str, db_name->length, system_charset_info); - table->field[2]->store(cs->csname, strlen(cs->csname), system_charset_info); - table->field[3]->store(cs->name, strlen(cs->name), system_charset_info); + table->field[1]->store(db_name, system_charset_info); + table->field[2]->store(&cs->cs_name, system_charset_info); + table->field[3]->store(&cs->coll_name, system_charset_info); if (schema_comment) table->field[5]->store(schema_comment->str, schema_comment->length, system_charset_info); @@ -5579,7 +5610,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, if (share->page_checksum != HA_CHOICE_UNDEF) { str.qs_append(STRING_WITH_LEN(" page_checksum=")); - str.qs_append(ha_choice_values[(uint) share->page_checksum]); + str.qs_append(&ha_choice_values[(uint) share->page_checksum]); } if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE) @@ -5588,7 +5619,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, if (share->row_type != ROW_TYPE_DEFAULT) { str.qs_append(STRING_WITH_LEN(" row_format=")); - str.qs_append(ha_row_type[(uint) share->row_type]); + str.qs_append(&ha_row_type[(uint) share->row_type]); } if (share->key_block_size) @@ -5620,7 +5651,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, HA_CHOICE_NO : HA_CHOICE_YES); str.qs_append(STRING_WITH_LEN(" transactional=")); - str.qs_append(ha_choice_values[choice]); + str.qs_append(&ha_choice_values[choice]); } append_create_options(thd, &str, share->option_list, false, 0); @@ -5629,20 +5660,22 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, HA_CREATE_INFO create_info; create_info.init(); file->update_create_info(&create_info); - append_directory(thd, &str, "DATA", create_info.data_file_name); - append_directory(thd, &str, "INDEX", create_info.index_file_name); + append_directory(thd, &str, &DATA_clex_str, create_info.data_file_name); + append_directory(thd, &str, &INDEX_clex_str, create_info.index_file_name); } if (str.length()) table->field[19]->store(str.ptr()+1, str.length()-1, cs); - tmp_buff= (share->table_charset ? - share->table_charset->name : "default"); - - table->field[17]->store(tmp_buff, strlen(tmp_buff), cs); + LEX_CSTRING tmp_str; + if (share->table_charset) + tmp_str= share->table_charset->coll_name; + else + tmp_str= { STRING_WITH_LEN("default") }; + table->field[17]->store(&tmp_str, cs); if (share->comment.str) - table->field[20]->store(share->comment.str, share->comment.length, cs); + table->field[20]->store(&share->comment, cs); /* Collect table info from the storage engine */ @@ -5863,12 +5896,10 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, if (field->has_charset()) { /* CHARACTER_SET_NAME column*/ - tmp_buff= field->charset()->csname; - table->field[offset + 6]->store(tmp_buff, strlen(tmp_buff), cs); + table->field[offset + 6]->store(&field->charset()->cs_name, cs); table->field[offset + 6]->set_notnull(); /* COLLATION_NAME column */ - tmp_buff= field->charset()->name; - table->field[offset + 7]->store(tmp_buff, strlen(tmp_buff), cs); + table->field[offset + 7]->store(&field->charset()->coll_name, cs); table->field[offset + 7]->set_notnull(); } } @@ -6163,12 +6194,12 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond) (tmp_cs->state & MY_CS_AVAILABLE) && !(tmp_cs->state & MY_CS_HIDDEN) && !(wild && wild[0] && - wild_case_compare(scs, tmp_cs->csname,wild))) + wild_case_compare(scs, tmp_cs->cs_name.str,wild))) { const char *comment; restore_record(table, s->default_values); - table->field[0]->store(tmp_cs->csname, strlen(tmp_cs->csname), scs); - table->field[1]->store(tmp_cs->name, strlen(tmp_cs->name), scs); + table->field[0]->store(&tmp_cs->cs_name, scs); + table->field[1]->store(&tmp_cs->coll_name, scs); comment= tmp_cs->comment ? tmp_cs->comment : ""; table->field[2]->store(comment, strlen(comment), scs); table->field[3]->store((longlong) tmp_cs->mbmaxlen, TRUE); @@ -6280,12 +6311,13 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond) !my_charset_same(tmp_cs, tmp_cl)) continue; if (!(wild && wild[0] && - wild_case_compare(scs, tmp_cl->name,wild))) + wild_case_compare(scs, tmp_cl->coll_name.str, wild))) { const char *tmp_buff; restore_record(table, s->default_values); - table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); - table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + table->field[0]->store(tmp_cl->coll_name.str, tmp_cl->coll_name.length, + scs); + table->field[1]->store(&tmp_cl->cs_name, scs); table->field[2]->store((longlong) tmp_cl->number, TRUE); tmp_buff= (tmp_cl->state & MY_CS_PRIMARY) ? "Yes" : ""; table->field[3]->store(tmp_buff, strlen(tmp_buff), scs); @@ -6325,8 +6357,8 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) !my_charset_same(tmp_cs,tmp_cl)) continue; restore_record(table, s->default_values); - table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); - table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + table->field[0]->store(&tmp_cl->coll_name, scs); + table->field[1]->store(&tmp_cl->cs_name, scs); if (schema_table_store_record(thd, table)) return 1; } @@ -6803,6 +6835,12 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, if (key_info->flags & HA_USES_COMMENT) table->field[15]->store(key_info->comment.str, key_info->comment.length, cs); + + // IGNORED column + const char *is_ignored= key_info->is_ignored ? "YES" : "NO"; + table->field[16]->store(is_ignored, strlen(is_ignored), cs); + table->field[16]->set_notnull(); + if (schema_table_store_record(thd, table)) DBUG_RETURN(1); } @@ -6933,15 +6971,10 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables, else table->field[7]->store(STRING_WITH_LEN("INVOKER"), cs); - table->field[8]->store(tables->view_creation_ctx->get_client_cs()->csname, - strlen(tables->view_creation_ctx-> - get_client_cs()->csname), cs); - - table->field[9]->store(tables->view_creation_ctx-> - get_connection_cl()->name, - strlen(tables->view_creation_ctx-> - get_connection_cl()->name), cs); - + table->field[8]->store(&tables->view_creation_ctx->get_client_cs()->cs_name, + cs); + table->field[9]->store(&tables->view_creation_ctx-> + get_connection_cl()->coll_name, cs); table->field[10]->store(view_algorithm(tables), cs); if (schema_table_store_record(thd, table)) @@ -7054,7 +7087,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, if (i != primary_key && !(key_info->flags & HA_NOSAME)) continue; - if (i == primary_key && !strcmp(key_info->name.str, primary_key_name)) + if (i == primary_key && !strcmp(key_info->name.str, primary_key_name.str)) { if (store_constraints(thd, table, db_name, table_name, key_info->name.str, key_info->name.length, @@ -7144,12 +7177,9 @@ static bool store_trigger(THD *thd, Trigger *trigger, sql_mode_string_representation(thd, trigger->sql_mode, &sql_mode_rep); table->field[17]->store(sql_mode_rep.str, sql_mode_rep.length, cs); table->field[18]->store(definer_buffer.str, definer_buffer.length, cs); - table->field[19]->store(trigger->client_cs_name.str, - trigger->client_cs_name.length, cs); - table->field[20]->store(trigger->connection_cl_name.str, - trigger->connection_cl_name.length, cs); - table->field[21]->store(trigger->db_cl_name.str, - trigger->db_cl_name.length, cs); + table->field[19]->store(&trigger->client_cs_name, cs); + table->field[20]->store(&trigger->connection_cl_name, cs); + table->field[21]->store(&trigger->db_cl_name, cs); return schema_table_store_record(thd, table); } @@ -7324,7 +7354,7 @@ static void collect_partition_expr(THD *thd, List<const char> &field_list, { append_identifier(thd, str, field_str, strlen(field_str)); if (--no_fields != 0) - str->append(","); + str->append(','); } return; } @@ -7417,7 +7447,7 @@ static int get_partition_column_description(THD *thd, partition_info *part_info, if (col_val->max_value) tmp_str.append(STRING_WITH_LEN("MAXVALUE")); else if (col_val->null_value) - tmp_str.append("NULL"); + tmp_str.append(NULL_clex_str); else { Item *item= col_val->item_expression; @@ -7430,7 +7460,7 @@ static int get_partition_column_description(THD *thd, partition_info *part_info, tmp_str.append(val); } if (i != num_elements - 1) - tmp_str.append(","); + tmp_str.append(','); } DBUG_RETURN(0); } @@ -7591,9 +7621,9 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, tmp_res.length(0); if (part_elem->has_null_value) { - tmp_str.append(STRING_WITH_LEN("NULL")); + tmp_str.append(NULL_clex_str); if (num_items > 0) - tmp_str.append(","); + tmp_str.append(','); } while ((list_value= list_val_it++)) { @@ -7605,7 +7635,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, tmp_str)) DBUG_RETURN(1); if (part_info->part_field_list.elements > 1U) - tmp_str.append(")"); + tmp_str.append(')'); } else { @@ -7616,7 +7646,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, tmp_str.append(tmp_res); } if (--num_items != 0) - tmp_str.append(","); + tmp_str.append(','); } table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs); table->field[11]->set_notnull(); @@ -7836,22 +7866,15 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) store(et.comment.str, et.comment.length, scs); sch_table->field[ISE_CLIENT_CS]->set_notnull(); - sch_table->field[ISE_CLIENT_CS]->store( - et.creation_ctx->get_client_cs()->csname, - strlen(et.creation_ctx->get_client_cs()->csname), - scs); - + sch_table->field[ISE_CLIENT_CS]->store(&et.creation_ctx->get_client_cs()-> + cs_name, scs); sch_table->field[ISE_CONNECTION_CL]->set_notnull(); - sch_table->field[ISE_CONNECTION_CL]->store( - et.creation_ctx->get_connection_cl()->name, - strlen(et.creation_ctx->get_connection_cl()->name), - scs); - + sch_table->field[ISE_CONNECTION_CL]->store(&et.creation_ctx-> + get_connection_cl()->coll_name, + scs); sch_table->field[ISE_DB_CL]->set_notnull(); - sch_table->field[ISE_DB_CL]->store( - et.creation_ctx->get_db_cl()->name, - strlen(et.creation_ctx->get_db_cl()->name), - scs); + sch_table->field[ISE_DB_CL]->store(&et.creation_ctx->get_db_cl()->coll_name, + scs); if (schema_table_store_record(thd, sch_table)) DBUG_RETURN(1); @@ -8303,7 +8326,7 @@ int make_schemata_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) if (lex->wild && lex->wild->ptr()) { buffer.append(STRING_WITH_LEN(" (")); - buffer.append(lex->wild->ptr()); + buffer.append(*lex->wild); buffer.append(')'); } field->set_name(thd, &buffer); @@ -8327,7 +8350,7 @@ int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) if (lex->wild && lex->wild->ptr()) { buffer.append(STRING_WITH_LEN(" (")); - buffer.append(lex->wild->ptr()); + buffer.append(*lex->wild); buffer.append(')'); } Item_field *field= new (thd->mem_root) Item_field(thd, context, field_name); @@ -9249,6 +9272,7 @@ ST_FIELD_INFO stat_fields_info[]= Column("COMMENT", Varchar(16), NULLABLE, "Comment", OPEN_FRM_ONLY), Column("INDEX_COMMENT", Varchar(INDEX_COMMENT_MAXLEN), NOT_NULL, "Index_comment",OPEN_FRM_ONLY), + Column("IGNORED", Varchar(3), NOT_NULL, "Ignored", OPEN_FRM_ONLY), CEnd() }; @@ -9889,8 +9913,9 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) sql_mode_string_representation(thd, trigger->sql_mode, &trg_sql_mode_str); /* Resolve trigger client character set. */ - - if (resolve_charset(trigger->client_cs_name.str, NULL, &trg_client_cs)) + myf utf8_flag= thd->get_utf8_flag(); + if (resolve_charset(trigger->client_cs_name.str, NULL, &trg_client_cs, + MYF(utf8_flag))) return TRUE; /* Send header. */ @@ -9912,7 +9937,7 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) (uint)MY_MAX(trg_sql_original_stmt.length, 1024)); - stmt_fld->maybe_null= TRUE; + stmt_fld->set_maybe_null(); fields.push_back(stmt_fld, mem_root); } @@ -9959,17 +9984,11 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) trg_sql_original_stmt.length, trg_client_cs); - p->store(trigger->client_cs_name.str, - trigger->client_cs_name.length, - system_charset_info); + p->store(&trigger->client_cs_name, system_charset_info); - p->store(trigger->connection_cl_name.str, - trigger->connection_cl_name.length, - system_charset_info); + p->store(&trigger->connection_cl_name, system_charset_info); - p->store(trigger->db_cl_name.str, - trigger->db_cl_name.length, - system_charset_info); + p->store(&trigger->db_cl_name, system_charset_info); if (trigger->hr_create_time.val) { @@ -9979,7 +9998,7 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) (my_time_t) hrtime_to_time(trigger->hr_create_time)); timestamp.second_part= hrtime_sec_part(trigger->hr_create_time); - p->store(×tamp, 2); + p->store_datetime(×tamp, 2); } else p->store_null(); @@ -10215,7 +10234,7 @@ char *thd_get_error_context_description(THD *thd, char *buffer, size_t len; len= my_snprintf(header, sizeof(header), - "MySQL thread id %u, OS thread handle %lu, query id %llu", + "MariaDB thread id %u, OS thread handle %lu, query id %llu", (uint)thd->thread_id, (ulong) thd->real_id, (ulonglong) thd->query_id); str.length(0); str.append(header, len); @@ -10223,19 +10242,19 @@ char *thd_get_error_context_description(THD *thd, char *buffer, if (sctx->host) { str.append(' '); - str.append(sctx->host); + str.append(sctx->host, strlen(sctx->host)); } if (sctx->ip) { str.append(' '); - str.append(sctx->ip); + str.append(sctx->ip, strlen(sctx->ip)); } if (sctx->user) { str.append(' '); - str.append(sctx->user); + str.append(sctx->user, strlen(sctx->user)); } /* Don't wait if LOCK_thd_data is used as this could cause a deadlock */ @@ -10244,7 +10263,7 @@ char *thd_get_error_context_description(THD *thd, char *buffer, if (const char *info= thread_state_info(thd)) { str.append(' '); - str.append(info); + str.append(info, strlen(info)); } if (thd->query()) @@ -10268,7 +10287,7 @@ char *thd_get_error_context_description(THD *thd, char *buffer, */ DBUG_ASSERT(buffer != NULL); length= MY_MIN(str.length(), length-1); - memcpy(buffer, str.c_ptr_quick(), length); + memcpy(buffer, str.ptr(), length); /* Make sure that the new string is null terminated */ buffer[length]= '\0'; return buffer; diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc index 115f5fa4347..8e973f9b0b3 100644 --- a/sql/sql_signal.cc +++ b/sql/sql_signal.cc @@ -324,7 +324,7 @@ end: set= m_set_signal_information.m_item[i]; if (set) { - if (set->is_fixed()) + if (set->fixed()) set->cleanup(); } } diff --git a/sql/sql_sort.h b/sql/sql_sort.h index a474d7c25e9..6c9a81a32c9 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -558,7 +558,9 @@ public: Bounds_checked_array<SORT_FIELD> local_sortorder; Addon_fields *addon_fields; // Descriptors for companion fields. Sort_keys *sort_keys; + ha_rows *accepted_rows; /* For ROWNUM */ bool using_pq; + bool set_all_read_bits; uchar *unique_buff; bool not_killable; @@ -578,7 +580,9 @@ public: tmp_buffer.set_charset(&my_charset_bin); } void init_for_filesort(uint sortlen, TABLE *table, - ha_rows maxrows, bool sort_positions); + ha_rows maxrows, Filesort *filesort); + + void (*unpack)(TABLE *); /// Enables the packing of addons if possible. void try_to_pack_addons(ulong max_length_for_sort_data); diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 51619795eac..3c5e3859939 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -111,12 +111,12 @@ TABLE_FIELD_TYPE table_stat_fields[TABLE_STAT_N_FIELDS] = { { STRING_WITH_LEN("db_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("table_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("cardinality") }, @@ -134,17 +134,17 @@ TABLE_FIELD_TYPE column_stat_fields[COLUMN_STAT_N_FIELDS] = { { STRING_WITH_LEN("db_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("table_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("column_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("min_value") }, @@ -179,7 +179,7 @@ TABLE_FIELD_TYPE column_stat_fields[COLUMN_STAT_N_FIELDS] = { { STRING_WITH_LEN("hist_type") }, { STRING_WITH_LEN("enum('SINGLE_PREC_HB','DOUBLE_PREC_HB')") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("histogram") }, @@ -197,17 +197,17 @@ TABLE_FIELD_TYPE index_stat_fields[INDEX_STAT_N_FIELDS] = { { STRING_WITH_LEN("db_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("table_name") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("index") }, { STRING_WITH_LEN("varchar(64)") }, - { STRING_WITH_LEN("utf8") } + { STRING_WITH_LEN("utf8mb3") } }, { { STRING_WITH_LEN("prefix_arity") }, diff --git a/sql/sql_string.cc b/sql/sql_string.cc index f4fa880eeb3..fbc97ab54fb 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -37,15 +37,15 @@ bool Binary_string::real_alloc(size_t length) DBUG_ASSERT(arg_length > length); if (arg_length <= length) return TRUE; /* Overflow */ + DBUG_ASSERT(length < UINT_MAX32); // cast to uint32 is safe str_length=0; if (Alloced_length < arg_length) { - free(); - if (!(Ptr=(char*) my_malloc(PSI_INSTRUMENT_ME, + free_buffer(); + if (!(Ptr=(char*) my_malloc(STRING_PSI_MEMORY_KEY, arg_length,MYF(MY_WME | (thread_specific ? MY_THREAD_SPECIFIC : 0))))) return TRUE; - DBUG_ASSERT(length < UINT_MAX32); Alloced_length=(uint32) arg_length; alloced=1; } @@ -55,7 +55,8 @@ bool Binary_string::real_alloc(size_t length) /** - Allocates a new buffer on the heap for this String. + Allocates a new buffer on the heap for this String if current buffer is + smaller. - If the String's internal buffer is privately owned and heap allocated, one of the following is performed. @@ -70,7 +71,8 @@ bool Binary_string::real_alloc(size_t length) will be allocated and the string copied accoring to its length, as found in String::length(). - For C compatibility, the new string buffer is null terminated. + For C compatibility, the new string buffer is null terminated if it was + allocated. @param alloc_length The requested string size in characters, excluding any null terminator. @@ -81,9 +83,10 @@ bool Binary_string::real_alloc(size_t length) @retval true An error occurred when attempting to allocate memory. */ + bool Binary_string::realloc_raw(size_t alloc_length) { - if (Alloced_length <= alloc_length) + if (Alloced_length < alloc_length) { char *new_ptr; uint32 len= ALIGN_SIZE(alloc_length+1); @@ -92,13 +95,13 @@ bool Binary_string::realloc_raw(size_t alloc_length) return TRUE; /* Overflow */ if (alloced) { - if (!(new_ptr= (char*) my_realloc(PSI_INSTRUMENT_ME, Ptr,len, + if (!(new_ptr= (char*) my_realloc(STRING_PSI_MEMORY_KEY, Ptr,len, MYF(MY_WME | (thread_specific ? MY_THREAD_SPECIFIC : 0))))) return TRUE; // Signal error } - else if ((new_ptr= (char*) my_malloc(PSI_INSTRUMENT_ME, len, + else if ((new_ptr= (char*) my_malloc(STRING_PSI_MEMORY_KEY, len, MYF(MY_WME | (thread_specific ? MY_THREAD_SPECIFIC : 0))))) @@ -118,9 +121,14 @@ bool Binary_string::realloc_raw(size_t alloc_length) return FALSE; } + bool String::set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs) { - uint l=20*cs->mbmaxlen+1; + /* + This allocates a few bytes extra in the unlikely case that cs->mb_maxlen + > 1, but we can live with that + */ + uint l= LONGLONG_BUFFER_SIZE * cs->mbmaxlen; int base= unsigned_flag ? 10 : -10; if (alloc(l)) @@ -155,8 +163,9 @@ static inline void APPEND_HEX(char *&to, uchar value) } -void Static_binary_string::qs_append_hex(const char *str, uint32 len) +void Binary_string::qs_append_hex(const char *str, uint32 len) { + ASSERT_LENGTH(len*2); const char *str_end= str + len; for (char *to= Ptr + str_length ; str < str_end; str++) APPEND_HEX(to, (uchar) *str); @@ -164,7 +173,7 @@ void Static_binary_string::qs_append_hex(const char *str, uint32 len) } -void Static_binary_string::qs_append_hex_uint32(uint32 num) +void Binary_string::qs_append_hex_uint32(uint32 num) { char *to= Ptr + str_length; APPEND_HEX(to, (uchar) (num >> 24)); @@ -246,7 +255,7 @@ bool Binary_string::copy() */ bool Binary_string::copy(const Binary_string &str) { - if (alloc(str.str_length)) + if (alloc(str.str_length+1)) return TRUE; if ((str_length=str.str_length)) bmove(Ptr,str.Ptr,str_length); // May be overlapping @@ -257,7 +266,7 @@ bool Binary_string::copy(const Binary_string &str) bool Binary_string::copy(const char *str, size_t arg_length) { DBUG_ASSERT(arg_length < UINT_MAX32); - if (alloc(arg_length)) + if (alloc(arg_length+1)) return TRUE; if (Ptr == str && arg_length == uint32(str_length)) { @@ -283,7 +292,7 @@ bool Binary_string::copy(const char *str, size_t arg_length) bool Binary_string::copy_or_move(const char *str, size_t arg_length) { DBUG_ASSERT(arg_length < UINT_MAX32); - if (alloc(arg_length)) + if (alloc(arg_length+1)) return TRUE; if ((str_length=uint32(arg_length))) memmove(Ptr,str,arg_length); @@ -400,7 +409,7 @@ bool String::copy_aligned(const char *str, size_t arg_length, size_t offset, DBUG_ASSERT(offset && offset != cs->mbminlen); size_t aligned_length= arg_length + offset; - if (alloc(aligned_length)) + if (alloc(aligned_length+1)) return TRUE; /* @@ -505,16 +514,17 @@ bool String::set_ascii(const char *str, size_t arg_length) /* This is used by mysql.cc */ -bool Binary_string::fill(uint32 max_length,char fill_char) +bool Binary_string::fill(size_t max_length,char fill_char) { + DBUG_ASSERT(max_length < UINT_MAX32); // cast to uint32 is safe if (str_length > max_length) - Ptr[str_length=max_length]=0; + Ptr[str_length= (uint32) max_length]=0; else { if (realloc(max_length)) return TRUE; bfill(Ptr+str_length,max_length-str_length,fill_char); - str_length=max_length; + str_length= (uint32) max_length; } return FALSE; } @@ -532,7 +542,7 @@ void String::strip_sp() bool String::append(const char *s,size_t size) { - DBUG_ASSERT(size <= UINT_MAX32); + DBUG_ASSERT(size <= UINT_MAX32); // cast to uint32 is safe uint32 arg_length= (uint32) size; if (!arg_length) return FALSE; @@ -667,7 +677,7 @@ bool String::append_with_prefill(const char *s,uint32 arg_length, } -int Static_binary_string::strstr(const Static_binary_string &s, uint32 offset) +int Binary_string::strstr(const Binary_string &s, uint32 offset) { if (s.length()+offset <= str_length) { @@ -698,7 +708,7 @@ skip: ** Search string from end. Offset is offset to the end of string */ -int Static_binary_string::strrstr(const Static_binary_string &s, uint32 offset) +int Binary_string::strrstr(const Binary_string &s, uint32 offset) { if (s.length() <= offset && offset <= str_length) { @@ -768,38 +778,43 @@ int Binary_string::reserve(size_t space_needed, size_t grow_by) return FALSE; } -void Static_binary_string::qs_append(const char *str, size_t len) +void Binary_string::qs_append(const char *str, size_t len) { + ASSERT_LENGTH(len); memcpy(Ptr + str_length, str, len + 1); str_length += (uint32)len; } -void Static_binary_string::qs_append(double d) +void Binary_string::qs_append(double d) { char *buff = Ptr + str_length; - str_length+= (uint32) my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff, - NULL); + size_t length= my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, + buff, NULL); + ASSERT_LENGTH(length); + str_length+= (uint32) length; } -void Static_binary_string::qs_append(const double *d) +void Binary_string::qs_append(const double *d) { double ld; float8get(ld, (const char*) d); qs_append(ld); } -void Static_binary_string::qs_append(int i) +void Binary_string::qs_append(int i) { char *buff= Ptr + str_length; char *end= int10_to_str(i, buff, -10); - str_length+= (int) (end-buff); + ASSERT_LENGTH((size_t) (end-buff)); + str_length+= (uint32) (end-buff); } -void Static_binary_string::qs_append(ulonglong i) +void Binary_string::qs_append(ulonglong i) { char *buff= Ptr + str_length; char *end= longlong10_to_str(i, buff, 10); - str_length+= (int) (end-buff); + ASSERT_LENGTH((size_t) (end-buff)); + str_length+= (uint32) (end-buff); } @@ -844,10 +859,9 @@ bool Binary_string::copy_printable_hhhh(CHARSET_INFO *to_cs, */ -int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) +int sortcmp(const Binary_string *s, const Binary_string *t, CHARSET_INFO *cs) { - return cs->strnncollsp(s->ptr(), s->length(), - t->ptr(), t->length()); + return cs->strnncollsp(s->ptr(), s->length(), t->ptr(), t->length()); } @@ -869,7 +883,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) */ -int stringcmp(const String *s,const String *t) +int stringcmp(const Binary_string *s, const Binary_string *t) { uint32 s_len=s->length(),t_len=t->length(),len=MY_MIN(s_len,t_len); int cmp= len ? memcmp(s->ptr(), t->ptr(), len) : 0; @@ -1115,7 +1129,8 @@ uint String_copier::well_formed_copy(CHARSET_INFO *to_cs, char *to, size_t to_length, CHARSET_INFO *from_cs, - const char *from, size_t from_length, size_t nchars) + const char *from, size_t from_length, + size_t nchars) { if ((to_cs == &my_charset_bin) || (from_cs == &my_charset_bin) || @@ -1262,24 +1277,16 @@ bool String::append_semi_hex(const char *s, uint len, CHARSET_INFO *cs) return false; } + // Shrink the buffer, but only if it is allocated on the heap. void Binary_string::shrink(size_t arg_length) { - if (!is_alloced()) - return; - if (ALIGN_SIZE(arg_length + 1) < Alloced_length) - { - char* new_ptr; - if (!(new_ptr = (char*)my_realloc(STRING_PSI_MEMORY_KEY, Ptr, arg_length, - MYF(thread_specific ? MY_THREAD_SPECIFIC : 0)))) - { - Alloced_length = 0; - real_alloc(arg_length); - } - else - { - Ptr = new_ptr; - Alloced_length = (uint32)arg_length; - } - } + if (is_alloced() && ALIGN_SIZE(arg_length + 1) < Alloced_length) + { + /* my_realloc() can't fail as new buffer is less than the original one */ + Ptr= (char*) my_realloc(STRING_PSI_MEMORY_KEY, Ptr, arg_length, + MYF(thread_specific ? + MY_THREAD_SPECIFIC : 0)); + Alloced_length= (uint32) arg_length; + } } diff --git a/sql/sql_string.h b/sql/sql_string.h index 06fc2c073d0..b1f02bdb43b 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -39,16 +39,19 @@ extern PSI_memory_key key_memory_String_value; typedef struct st_io_cache IO_CACHE; typedef struct st_mem_root MEM_ROOT; +#define ASSERT_LENGTH(A) DBUG_ASSERT(str_length + (uint32) (A) <= Alloced_length) #include "pack.h" -int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); +class Binary_string; +int sortcmp(const Binary_string *s, const Binary_string *t, CHARSET_INFO *cs); +int stringcmp(const Binary_string *s, const Binary_string *t); String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); -inline uint32 copy_and_convert(char *to, size_t to_length, - CHARSET_INFO *to_cs, +inline uint32 copy_and_convert(char *to, size_t to_length, CHARSET_INFO *to_cs, const char *from, size_t from_length, CHARSET_INFO *from_cs, uint *errors) { - return my_convert(to, (uint)to_length, to_cs, from, (uint)from_length, from_cs, errors); + return my_convert(to, (uint)to_length, to_cs, from, (uint)from_length, + from_cs, errors); } @@ -110,7 +113,8 @@ public: "dstcs" and "srccs" cannot be &my_charset_bin. */ size_t convert_fix(CHARSET_INFO *dstcs, char *dst, size_t dst_length, - CHARSET_INFO *srccs, const char *src, size_t src_length, size_t nchars) + CHARSET_INFO *srccs, const char *src, size_t src_length, + size_t nchars) { return my_convert_fix(dstcs, dst, dst_length, srccs, src, src_length, nchars, this, this); @@ -119,10 +123,12 @@ public: Copy a string. Fix bad bytes/characters to '?'. */ uint well_formed_copy(CHARSET_INFO *to_cs, char *to, size_t to_length, - CHARSET_INFO *from_cs, const char *from, size_t from_length, size_t nchars); + CHARSET_INFO *from_cs, const char *from, + size_t from_length, size_t nchars); // Same as above, but without the "nchars" limit. uint well_formed_copy(CHARSET_INFO *to_cs, char *to, size_t to_length, - CHARSET_INFO *from_cs, const char *from, size_t from_length) + CHARSET_INFO *from_cs, const char *from, + size_t from_length) { return well_formed_copy(to_cs, to, to_length, from_cs, from, from_length, @@ -183,7 +189,7 @@ public: } bool same_encoding(const Charset &other) const { - return !strcmp(m_charset->csname, other.m_charset->csname); + return my_charset_same(m_charset, other.m_charset); } /* Collation name without the character set name. @@ -200,26 +206,75 @@ public: }; -/* - A storage for String. - Should be eventually derived from LEX_STRING. +/** + Storage for strings with both length and allocated length. + Automatically grows on demand. */ -class Static_binary_string : public Sql_alloc + +class Binary_string: public Sql_alloc { protected: char *Ptr; - uint32 str_length; + uint32 str_length, Alloced_length, extra_alloc; + bool alloced, thread_specific; + void init_private_data() + { + Ptr= 0; + Alloced_length= extra_alloc= str_length= 0; + alloced= thread_specific= false; + } + inline void free_buffer() + { + if (alloced) + { + alloced=0; + my_free(Ptr); + } + } public: - Static_binary_string() - :Ptr(NULL), - str_length(0) - { } - Static_binary_string(char *str, size_t length_arg) - :Ptr(str), - str_length((uint32) length_arg) + Binary_string() + { + init_private_data(); + } + explicit Binary_string(size_t length_arg) + { + init_private_data(); + (void) real_alloc(length_arg); + } + /* + NOTE: If one intend to use the c_ptr() method, the following two + contructors need the size of memory for STR to be at least LEN+1 (to make + room for zero termination). + */ + Binary_string(const char *str, size_t len) + { + Ptr= (char*) str; + str_length= (uint32) len; + Alloced_length= 0; /* Memory cannot be written to */ + extra_alloc= 0; + alloced= thread_specific= 0; + } + Binary_string(char *str, size_t len) + { + Ptr= str; + str_length= Alloced_length= (uint32) len; + extra_alloc= 0; + alloced= thread_specific= 0; + } + explicit Binary_string(const Binary_string &str) + { + Ptr= str.Ptr; + str_length= str.str_length; + Alloced_length= str.Alloced_length; + extra_alloc= 0; + alloced= thread_specific= 0; + } + + ~Binary_string() { - DBUG_ASSERT(length_arg < UINT_MAX32); + free(); } + inline uint32 length() const { return str_length;} inline char& operator [] (size_t i) const { return Ptr[i]; } inline void length(size_t len) { str_length=(uint32)len ; } @@ -236,24 +291,12 @@ public: return false; } - bool bin_eq(const Static_binary_string *other) const + bool bin_eq(const Binary_string *other) const { return length() == other->length() && !memcmp(ptr(), other->ptr(), length()); } - void set(char *str, size_t len) - { - Ptr= str; - str_length= (uint32) len; - } - - void swap(Static_binary_string &s) - { - swap_variables(char *, Ptr, s.Ptr); - swap_variables(uint32, str_length, s.str_length); - } - /* PMG 2004.11.12 This is a method that works the same as perl's "chop". It simply @@ -277,47 +320,57 @@ public: */ inline void chop() { - str_length--; - Ptr[str_length]= '\0'; - DBUG_ASSERT(strlen(Ptr) == str_length); + if (str_length) + { + str_length--; + Ptr[str_length]= '\0'; + DBUG_ASSERT(strlen(Ptr) == str_length); + } } // Returns offset to substring or -1 - int strstr(const Static_binary_string &search, uint32 offset=0); + int strstr(const Binary_string &search, uint32 offset=0); // Returns offset to substring or -1 - int strrstr(const Static_binary_string &search, uint32 offset=0); + int strrstr(const Binary_string &search, uint32 offset=0); /* - The following append operations do NOT check alloced memory + The following append operations do not extend the strings and in production + mode do NOT check that alloced memory! q_*** methods writes values of parameters itself qs_*** methods writes string representation of value */ void q_append(const char c) { + ASSERT_LENGTH(1); Ptr[str_length++] = c; } void q_append2b(const uint32 n) { + ASSERT_LENGTH(2); int2store(Ptr + str_length, n); str_length += 2; } void q_append(const uint32 n) { + ASSERT_LENGTH(4); int4store(Ptr + str_length, n); str_length += 4; } void q_append(double d) { + ASSERT_LENGTH(8); float8store(Ptr + str_length, d); str_length += 8; } void q_append(double *d) { + ASSERT_LENGTH(8); float8store(Ptr + str_length, *d); str_length += 8; } void q_append(const char *data, size_t data_len) { + ASSERT_LENGTH(data_len); if (data_len) memcpy(Ptr + str_length, data, data_len); DBUG_ASSERT(str_length <= UINT_MAX32 - data_len); @@ -331,15 +384,12 @@ public: q_append(ls->str, (uint32) ls->length); } - void write_at_position(int position, uint32 value) + void write_at_position(uint32 position, uint32 value) { + DBUG_ASSERT(str_length >= position + 4); int4store(Ptr + position,value); } - void qs_append(const char *str) - { - qs_append(str, (uint32)strlen(str)); - } void qs_append(const LEX_CSTRING *ls) { DBUG_ASSERT(ls->length < UINT_MAX32 && @@ -354,8 +404,9 @@ public: void qs_append(const double *d); inline void qs_append(const char c) { - Ptr[str_length]= c; - str_length++; + ASSERT_LENGTH(1); + Ptr[str_length]= c; + str_length++; } void qs_append(int i); void qs_append(uint i) @@ -369,61 +420,11 @@ public: void qs_append(ulonglong i); void qs_append(longlong i, int radix) { + ASSERT_LENGTH(22); char *buff= Ptr + str_length; char *end= ll2str(i, buff, radix, 0); - str_length+= uint32(end-buff); + str_length+= (uint32) (end-buff); } -}; - - -class Binary_string: public Static_binary_string -{ - uint32 Alloced_length, extra_alloc; - bool alloced, thread_specific; - void init_private_data() - { - Alloced_length= extra_alloc= 0; - alloced= thread_specific= false; - } -public: - Binary_string() - { - init_private_data(); - } - explicit Binary_string(size_t length_arg) - { - init_private_data(); - (void) real_alloc(length_arg); - } - explicit Binary_string(const char *str) - :Binary_string(str, strlen(str)) - { } - /* - NOTE: If one intend to use the c_ptr() method, the following two - contructors need the size of memory for STR to be at least LEN+1 (to make - room for zero termination). - */ - Binary_string(const char *str, size_t len) - :Static_binary_string((char *) str, len) - { - init_private_data(); - } - Binary_string(char *str, size_t len) - :Static_binary_string(str, len) - { - Alloced_length= (uint32) len; - extra_alloc= 0; - alloced= thread_specific= 0; - } - explicit Binary_string(const Binary_string &str) - :Static_binary_string(str) - { - Alloced_length= str.Alloced_length; - extra_alloc= 0; - alloced= thread_specific= 0; - } - - ~Binary_string() { free(); } /* Mark variable thread specific it it's not allocated already */ inline void set_thread_specific() @@ -439,13 +440,14 @@ public: inline bool uses_buffer_owned_by(const Binary_string *s) const { - return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); + return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->Alloced_length); } /* Swap two string objects. Efficient way to exchange data without memcpy. */ void swap(Binary_string &s) { - Static_binary_string::swap(s); + swap_variables(char *, Ptr, s.Ptr); + swap_variables(uint32, str_length, s.str_length); swap_variables(uint32, Alloced_length, s.Alloced_length); swap_variables(bool, alloced, s.alloced); } @@ -457,31 +459,41 @@ public: null character. @note The new buffer will not be null terminated. */ - void set_alloced(char *str, size_t length_arg, size_t alloced_length_arg) + void set_alloced(char *str, size_t length, size_t alloced_length) { - free(); - Static_binary_string::set(str, length_arg); - DBUG_ASSERT(alloced_length_arg < UINT_MAX32); - Alloced_length= (uint32) alloced_length_arg; + free_buffer(); + Ptr= str; + str_length= (uint32) length; + DBUG_ASSERT(alloced_length < UINT_MAX32); + Alloced_length= (uint32) alloced_length; } inline void set(char *str, size_t arg_length) { set_alloced(str, arg_length, arg_length); } - inline void set(const char *str, size_t arg_length) + inline void set(const char *str, size_t length) { - free(); - Static_binary_string::set((char *) str, arg_length); + free_buffer(); + Ptr= (char*) str; + str_length= (uint32) length; + Alloced_length= 0; } - void set(Binary_string &str, size_t offset, size_t arg_length) + void set(Binary_string &str, size_t offset, size_t length) { DBUG_ASSERT(&str != this); - free(); - Static_binary_string::set((char*) str.ptr() + offset, arg_length); + free_buffer(); + Ptr= str.Ptr + offset; + str_length= (uint32) length; + Alloced_length= 0; if (str.Alloced_length) Alloced_length= (uint32) (str.Alloced_length - offset); } + LEX_CSTRING to_lex_cstring() const + { + LEX_CSTRING tmp= {Ptr, str_length}; + return tmp; + } inline LEX_CSTRING *get_value(LEX_CSTRING *res) { res->str= Ptr; @@ -500,18 +512,29 @@ public: char *release() { char *old= Ptr; - Static_binary_string::set(NULL, 0); init_private_data(); return old; } - inline void set_quick(char *str, size_t arg_length) + /* + This is used to set a new buffer for String. + However if the String already has an allocated buffer, it will + keep that one. + It's not to be used to set the value or length of the string. + */ + inline void set_buffer_if_not_allocated(char *str, size_t arg_length) { if (!alloced) { - Static_binary_string::set(str, arg_length); - Alloced_length= (uint32) arg_length; + /* + Following should really set str_length= 0, but some code may + depend on that the String length is same as buffer length. + */ + Ptr= str; + str_length= Alloced_length= (uint32) arg_length; } + /* One should set str_length before using it */ + MEM_UNDEFINED(&str_length, sizeof(str_length)); } inline Binary_string& operator=(const Binary_string &s) @@ -611,19 +634,49 @@ public: inline char *c_ptr() { - DBUG_ASSERT(!alloced || !Ptr || !Alloced_length || - (Alloced_length >= (str_length + 1))); - - if (!Ptr || Ptr[str_length]) // Should be safe - (void) realloc(str_length); + if (unlikely(!Ptr)) + return (char*) ""; + /* + Here we assume that any buffer used to initalize String has + an end \0 or have at least an accessable character at end. + This is to handle the case of String("Hello",5) and + String("hello",5) efficiently. + + We have two options here. To test for !Alloced_length or !alloced. + Using "Alloced_length" is slightly safer so that we do not read + from potentially unintialized memory (normally not dangerous but + may give warnings in valgrind), but "alloced" is safer as there + are less change to get memory loss from code that is using + String((char*), length) or String.set((char*), length) and does + not free things properly (and there is several places in the code + where this happens and it is hard to find out if any of these will call + c_ptr(). + */ + if (unlikely(!alloced && !Ptr[str_length])) + return Ptr; + if (str_length < Alloced_length) + { + Ptr[str_length]=0; + return Ptr; + } + (void) realloc(str_length); /* This will add end \0 */ return Ptr; } + /* + One should use c_ptr() instead for most cases. This will be deleted soon, + kept for compatiblity. + */ inline char *c_ptr_quick() { - if (Ptr && str_length < Alloced_length) - Ptr[str_length]=0; - return Ptr; + return c_ptr_safe(); } + /* + This is to be used only in the case when one cannot use c_ptr(). + The cases are: + - When one initializes String with an external buffer and length and + buffer[length] could be uninitalized when c_ptr() is called. + - When valgrind gives warnings about uninitialized memory with c_ptr(). + */ inline char *c_ptr_safe() { if (Ptr && str_length < Alloced_length) @@ -635,17 +688,28 @@ public: inline void free() { - if (alloced) - { - alloced=0; - my_free(Ptr); - } + free_buffer(); + /* + We have to clear the values as some Strings, like in Field, are + reused after free(). Because of this we cannot use MEM_UNDEFINED() here. + */ + Ptr= 0; + str_length= 0; Alloced_length= extra_alloc= 0; - Static_binary_string::set(NULL, 0); // Safety } + inline bool alloc(size_t arg_length) { - if (arg_length < Alloced_length) + /* + Allocate if we need more space or if we don't have done any + allocation yet (we don't want to have Ptr to be NULL for empty strings). + + Note that if arg_length == Alloced_length then we don't allocate. + This ensures we don't do any extra allocations in protocol and String:int, + but the string will not be automatically null terminated if c_ptr() is not + called. + */ + if (arg_length <= Alloced_length && Alloced_length) return 0; return real_alloc(arg_length); } @@ -653,7 +717,7 @@ public: bool realloc_raw(size_t arg_length); bool realloc(size_t arg_length) { - if (realloc_raw(arg_length)) + if (realloc_raw(arg_length+1)) return TRUE; Ptr[arg_length]= 0; // This make other funcs shorter return FALSE; @@ -687,13 +751,13 @@ public: thread_specific= s.thread_specific; s.alloced= 0; } - bool fill(uint32 max_length,char fill); + bool fill(size_t max_length,char fill); /* Replace substring with string If wrong parameter or not enough memory, do nothing */ bool replace(uint32 offset,uint32 arg_length, const char *to, uint32 length); - bool replace(uint32 offset,uint32 arg_length, const Static_binary_string &to) + bool replace(uint32 offset,uint32 arg_length, const Binary_string &to) { return replace(offset,arg_length,to.ptr(),to.length()); } @@ -740,12 +804,7 @@ class String: public Charset, public Binary_string { public: String() { } - String(size_t length_arg) - :Binary_string(length_arg) - { } - String(const char *str, CHARSET_INFO *cs) - :Charset(cs), - Binary_string(str) + String(size_t length_arg) :Binary_string(length_arg) { } /* NOTE: If one intend to use the c_ptr() method, the following two @@ -753,16 +812,13 @@ public: room for zero termination). */ String(const char *str, size_t len, CHARSET_INFO *cs) - :Charset(cs), - Binary_string((char *) str, len) + :Charset(cs), Binary_string(str, len) { } String(char *str, size_t len, CHARSET_INFO *cs) - :Charset(cs), - Binary_string(str, len) + :Charset(cs), Binary_string(str, len) { } String(const String &str) - :Charset(str), - Binary_string(str) + :Charset(str), Binary_string(str) { } void set(String &str,size_t offset,size_t arg_length) @@ -781,9 +837,10 @@ public: set_charset(cs); } bool set_ascii(const char *str, size_t arg_length); - inline void set_quick(char *str,size_t arg_length, CHARSET_INFO *cs) + inline void set_buffer_if_not_allocated(char *str,size_t arg_length, + CHARSET_INFO *cs) { - Binary_string::set_quick(str, arg_length); + Binary_string::set_buffer_if_not_allocated(str, arg_length); set_charset(cs); } bool set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs); @@ -879,7 +936,8 @@ public: if (unlikely(alloc(tocs->mbmaxlen * src_length))) return true; str_length= copier->well_formed_copy(tocs, Ptr, alloced_length(), - fromcs, src, (uint)src_length, (uint)nchars); + fromcs, src, (uint) src_length, + (uint) nchars); set_charset(tocs); return false; } @@ -903,8 +961,8 @@ public: bool append_introducer_and_hex(const String *str) { return - append(STRING_WITH_LEN("_")) || - append(str->charset()->csname) || + append('_') || + append(str->charset()->cs_name) || append(STRING_WITH_LEN(" 0x")) || append_hex(str->ptr(), (uint32) str->length()); } @@ -918,10 +976,6 @@ public: } // Append with optional character set conversion from ASCII (e.g. to UCS2) - bool append(const char *s) - { - return append(s, strlen(s)); - } bool append(const LEX_STRING *ls) { DBUG_ASSERT(ls->length < UINT_MAX32 && @@ -972,7 +1026,7 @@ public: @param names - an array of flag names @param count - the number of available elements in "names" */ - bool append_flag32_names(uint32 flags, const char *names[], size_t count) + bool append_flag32_names(uint32 flags, LEX_CSTRING names[], size_t count) { bool added= false; if (flags && append('(')) @@ -984,7 +1038,7 @@ public: { if (added && append('|')) return true; - if (append(bit < count ? names[bit] : "?")) + if (bit < count ? append(names[bit]) : append('?')) return true; added= true; } @@ -995,8 +1049,6 @@ public: } void strip_sp(); - friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); - friend int stringcmp(const String *a,const String *b); friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); friend class Field; uint32 numchars() const @@ -1029,12 +1081,6 @@ public: { return append_for_single_quote(s->ptr(), s->length()); } - bool append_for_single_quote(const char *st) - { - size_t len= strlen(st); - DBUG_ASSERT(len < UINT_MAX32); - return append_for_single_quote(st, (uint32) len); - } void swap(String &s) { @@ -1086,6 +1132,18 @@ public: { length(0); } + void set_buffer_if_not_allocated(CHARSET_INFO *cs) + { + if (!is_alloced()) + { + Ptr= buff; + Alloced_length= (uint32) buff_sz; + } + str_length= 0; /* Safety, not required */ + /* One should set str_length before using it */ + MEM_UNDEFINED(&str_length, sizeof(str_length)); + set_charset(cs); + } }; @@ -1097,18 +1155,6 @@ public: BinaryStringBuffer() : Binary_string(buff, buff_sz) { length(0); } }; - -class String_space: public String -{ -public: - String_space(uint n) - { - if (fill(n, ' ')) - set("", 0, &my_charset_bin); - } -}; - - static inline bool check_if_only_end_space(CHARSET_INFO *cs, const char *str, const char *end) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 519a5f38868..a951b5d6abb 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -49,22 +49,27 @@ #include "sp_head.h" #include "sp.h" #include "sql_trigger.h" -#include "sql_parse.h" #include "sql_show.h" #include "transaction.h" #include "sql_audit.h" #include "sql_sequence.h" #include "tztime.h" -#include "sql_insert.h" // binlog_drop_table +#include "sql_insert.h" // binlog_drop_table +#include "ddl_log.h" +#include "debug.h" // debug_crash_here() #include <algorithm> #include "wsrep_mysqld.h" #include "sql_debug.h" -#ifdef __WIN__ +#ifdef _WIN32 #include <io.h> #endif -const char *primary_key_name="PRIMARY"; +const LEX_CSTRING primary_key_name= { STRING_WITH_LEN("PRIMARY") }; +static const LEX_CSTRING generated_by_server= +{ STRING_WITH_LEN(" /* generated by server */") }; +static const LEX_CSTRING SEQUENCE_clex_str= { STRING_WITH_LEN("SEQUENCE") }; +static const LEX_CSTRING TABLE_clex_str= { STRING_WITH_LEN("TABLE") }; static int check_if_keyname_exists(const char *name,KEY *start, KEY *end); static char *make_unique_key_name(THD *, const char *, KEY *, KEY *); @@ -547,8 +552,13 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, (void) tablename_to_filename(db, dbbuff, sizeof(dbbuff)); - /* Check if this is a temporary table name. Allow it if a corresponding .frm file exists */ - if (is_prefix(table_name, tmp_file_prefix) && strlen(table_name) < NAME_CHAR_LEN && + /* + Check if this is a temporary table name. Allow it if a corresponding .frm + file exists. + */ + if (!(flags & FN_IS_TMP) && + is_prefix(table_name, tmp_file_prefix) && + strlen(table_name) < NAME_CHAR_LEN && check_if_frm_exists(tbbuff, dbbuff, table_name)) flags|= FN_IS_TMP; @@ -558,13 +568,16 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, (void) tablename_to_filename(table_name, tbbuff, sizeof(tbbuff)); char *end = buff + bufflen; - /* Don't add FN_ROOTDIR if mysql_data_home already includes it */ - char *pos = strnmov(buff, mysql_data_home, bufflen); - size_t rootdir_len= strlen(FN_ROOTDIR); - if (pos - rootdir_len >= buff && - memcmp(pos - rootdir_len, FN_ROOTDIR, rootdir_len) != 0) - pos= strnmov(pos, FN_ROOTDIR, end - pos); - pos= strxnmov(pos, end - pos, dbbuff, FN_ROOTDIR, NullS); + char *pos= strnmov(buff, mysql_data_home, bufflen-3); + /* + Add FN_LIBCHAR if mysql_data_home does not include it + In most cases mysql_data_home is just '.' + */ + if (pos[-1] != FN_LIBCHAR) + *pos++= FN_LIBCHAR; + pos= strxnmov(pos, end - 2 - pos, dbbuff,NullS); + *pos++= FN_LIBCHAR; + *pos= 0; #ifdef USE_SYMDIR if (!(flags & SKIP_SYMDIR_ACCESS)) { @@ -615,1138 +628,31 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) } /* --------------------------------------------------------------------------- - - MODULE: DDL log - ----------------- - - This module is used to ensure that we can recover from crashes that occur - in the middle of a meta-data operation in MySQL. E.g. DROP TABLE t1, t2; - We need to ensure that both t1 and t2 are dropped and not only t1 and - also that each table drop is entirely done and not "half-baked". - - To support this we create log entries for each meta-data statement in the - ddl log while we are executing. These entries are dropped when the - operation is completed. - - At recovery those entries that were not completed will be executed. - - There is only one ddl log in the system and it is protected by a mutex - and there is a global struct that contains information about its current - state. - - History: - First version written in 2006 by Mikael Ronstrom --------------------------------------------------------------------------- -*/ - -struct st_global_ddl_log -{ - /* - We need to adjust buffer size to be able to handle downgrades/upgrades - where IO_SIZE has changed. We'll set the buffer size such that we can - handle that the buffer size was upto 4 times bigger in the version - that wrote the DDL log. - */ - char file_entry_buf[4*IO_SIZE]; - char file_name_str[FN_REFLEN]; - char *file_name; - DDL_LOG_MEMORY_ENTRY *first_free; - DDL_LOG_MEMORY_ENTRY *first_used; - uint num_entries; - File file_id; - uint name_len; - uint io_size; - bool inited; - bool do_release; - bool recovery_phase; - st_global_ddl_log() : inited(false), do_release(false) {} -}; - -st_global_ddl_log global_ddl_log; - -mysql_mutex_t LOCK_gdl; - -#define DDL_LOG_ENTRY_TYPE_POS 0 -#define DDL_LOG_ACTION_TYPE_POS 1 -#define DDL_LOG_PHASE_POS 2 -#define DDL_LOG_NEXT_ENTRY_POS 4 -#define DDL_LOG_NAME_POS 8 - -#define DDL_LOG_NUM_ENTRY_POS 0 -#define DDL_LOG_NAME_LEN_POS 4 -#define DDL_LOG_IO_SIZE_POS 8 - -/** - Read one entry from ddl log file. - - @param entry_no Entry number to read - - @return Operation status - @retval true Error - @retval false Success -*/ - -static bool read_ddl_log_file_entry(uint entry_no) -{ - bool error= FALSE; - File file_id= global_ddl_log.file_id; - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; - size_t io_size= global_ddl_log.io_size; - DBUG_ENTER("read_ddl_log_file_entry"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (mysql_file_pread(file_id, file_entry_buf, io_size, io_size * entry_no, - MYF(MY_WME)) != io_size) - error= TRUE; - DBUG_RETURN(error); -} - - -/** - Write one entry to ddl log file. - - @param entry_no Entry number to write - - @return Operation status - @retval true Error - @retval false Success -*/ - -static bool write_ddl_log_file_entry(uint entry_no) -{ - bool error= FALSE; - File file_id= global_ddl_log.file_id; - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; - DBUG_ENTER("write_ddl_log_file_entry"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (mysql_file_pwrite(file_id, file_entry_buf, - IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE) - error= TRUE; - DBUG_RETURN(error); -} - - -/** - Sync the ddl log file. - - @return Operation status - @retval FALSE Success - @retval TRUE Error -*/ - - -static bool sync_ddl_log_file() -{ - DBUG_ENTER("sync_ddl_log_file"); - DBUG_RETURN(mysql_file_sync(global_ddl_log.file_id, MYF(MY_WME))); -} - - -/** - Write ddl log header. - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool write_ddl_log_header() -{ - uint16 const_var; - DBUG_ENTER("write_ddl_log_header"); - - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS], - global_ddl_log.num_entries); - const_var= FN_REFLEN; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS], - (ulong) const_var); - const_var= IO_SIZE; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS], - (ulong) const_var); - if (write_ddl_log_file_entry(0UL)) - { - sql_print_error("Error writing ddl log header"); - DBUG_RETURN(TRUE); - } - DBUG_RETURN(sync_ddl_log_file()); -} - - -/** - Create ddl log file name. - @param file_name Filename setup -*/ - -static inline void create_ddl_log_file_name(char *file_name) -{ - strxmov(file_name, mysql_data_home, "/", "ddl_log.log", NullS); -} - - -/** - Read header of ddl log file. - - When we read the ddl log header we get information about maximum sizes - of names in the ddl log and we also get information about the number - of entries in the ddl log. - - @return Last entry in ddl log (0 if no entries) -*/ - -static uint read_ddl_log_header() -{ - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; - char file_name[FN_REFLEN]; - uint entry_no; - bool successful_open= FALSE; - DBUG_ENTER("read_ddl_log_header"); - - mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_SLOW); - mysql_mutex_lock(&LOCK_gdl); - create_ddl_log_file_name(file_name); - if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log, - file_name, - O_RDWR | O_BINARY, MYF(0))) >= 0) - { - if (read_ddl_log_file_entry(0UL)) - { - /* Write message into error log */ - sql_print_error("Failed to read ddl log file in recovery"); - } - else - successful_open= TRUE; - } - if (successful_open) - { - entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]); - global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]); - global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]); - DBUG_ASSERT(global_ddl_log.io_size <= - sizeof(global_ddl_log.file_entry_buf)); - } - else - { - entry_no= 0; - } - global_ddl_log.first_free= NULL; - global_ddl_log.first_used= NULL; - global_ddl_log.num_entries= 0; - global_ddl_log.do_release= true; - mysql_mutex_unlock(&LOCK_gdl); - DBUG_RETURN(entry_no); -} - - -/** - Convert from ddl_log_entry struct to file_entry_buf binary blob. - - @param ddl_log_entry filled in ddl_log_entry struct. -*/ - -static void set_global_from_ddl_log_entry(const DDL_LOG_ENTRY *ddl_log_entry) -{ - mysql_mutex_assert_owner(&LOCK_gdl); - global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= - (char)DDL_LOG_ENTRY_CODE; - global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= - (char)ddl_log_entry->action_type; - global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0; - int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], - ddl_log_entry->next_entry); - DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_REFLEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - ddl_log_entry->name, FN_REFLEN - 1); - if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION || - ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION || - ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) - { - DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_REFLEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN], - ddl_log_entry->from_name, FN_REFLEN - 1); - } - else - global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0; - DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_REFLEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_REFLEN)], - ddl_log_entry->handler_name, FN_REFLEN - 1); - if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) - { - DBUG_ASSERT(strlen(ddl_log_entry->tmp_name) < FN_REFLEN); - strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)], - ddl_log_entry->tmp_name, FN_REFLEN - 1); - } - else - global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)]= 0; -} - - -/** - Convert from file_entry_buf binary blob to ddl_log_entry struct. - - @param[out] ddl_log_entry struct to fill in. - - @note Strings (names) are pointing to the global_ddl_log structure, - so LOCK_gdl needs to be hold until they are read or copied. -*/ - -static void set_ddl_log_entry_from_global(DDL_LOG_ENTRY *ddl_log_entry, - const uint read_entry) -{ - char *file_entry_buf= (char*) global_ddl_log.file_entry_buf; - uint inx; - uchar single_char; - - mysql_mutex_assert_owner(&LOCK_gdl); - ddl_log_entry->entry_pos= read_entry; - single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]; - ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char; - single_char= file_entry_buf[DDL_LOG_ACTION_TYPE_POS]; - ddl_log_entry->action_type= (enum ddl_log_action_code)single_char; - ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS]; - ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]); - ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS]; - inx= DDL_LOG_NAME_POS + global_ddl_log.name_len; - ddl_log_entry->from_name= &file_entry_buf[inx]; - inx+= global_ddl_log.name_len; - ddl_log_entry->handler_name= &file_entry_buf[inx]; - if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) - { - inx+= global_ddl_log.name_len; - ddl_log_entry->tmp_name= &file_entry_buf[inx]; - } - else - ddl_log_entry->tmp_name= NULL; -} - - -/** - Read a ddl log entry. - - Read a specified entry in the ddl log. - - @param read_entry Number of entry to read - @param[out] entry_info Information from entry - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) -{ - DBUG_ENTER("read_ddl_log_entry"); - - if (read_ddl_log_file_entry(read_entry)) - { - DBUG_RETURN(TRUE); - } - set_ddl_log_entry_from_global(ddl_log_entry, read_entry); - DBUG_RETURN(FALSE); -} - - -/** - Initialise ddl log. - - Write the header of the ddl log file and length of names. Also set - number of entries to zero. - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool init_ddl_log() -{ - char file_name[FN_REFLEN]; - DBUG_ENTER("init_ddl_log"); - - if (global_ddl_log.inited) - goto end; - - global_ddl_log.io_size= IO_SIZE; - global_ddl_log.name_len= FN_REFLEN; - create_ddl_log_file_name(file_name); - if ((global_ddl_log.file_id= mysql_file_create(key_file_global_ddl_log, - file_name, CREATE_MODE, - O_RDWR | O_TRUNC | O_BINARY, - MYF(MY_WME))) < 0) - { - /* Couldn't create ddl log file, this is serious error */ - sql_print_error("Failed to open ddl log file"); - DBUG_RETURN(TRUE); - } - global_ddl_log.inited= TRUE; - if (write_ddl_log_header()) - { - (void) mysql_file_close(global_ddl_log.file_id, MYF(MY_WME)); - global_ddl_log.inited= FALSE; - DBUG_RETURN(TRUE); - } - -end: - DBUG_RETURN(FALSE); -} - - -/** - Sync ddl log file. - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool sync_ddl_log_no_lock() -{ - DBUG_ENTER("sync_ddl_log_no_lock"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if ((!global_ddl_log.recovery_phase) && - init_ddl_log()) - { - DBUG_RETURN(TRUE); - } - DBUG_RETURN(sync_ddl_log_file()); -} - - -/** - @brief Deactivate an individual entry. - - @details For complex rename operations we need to deactivate individual - entries. - - During replace operations where we start with an existing table called - t1 and a replacement table called t1#temp or something else and where - we want to delete t1 and rename t1#temp to t1 this is not possible to - do in a safe manner unless the ddl log is informed of the phases in - the change. - - Delete actions are 1-phase actions that can be ignored immediately after - being executed. - Rename actions from x to y is also a 1-phase action since there is no - interaction with any other handlers named x and y. - Replace action where drop y and x -> y happens needs to be a two-phase - action. Thus the first phase will drop y and the second phase will - rename x -> y. - - @param entry_no Entry position of record to change - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool deactivate_ddl_log_entry_no_lock(uint entry_no) -{ - uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; - DBUG_ENTER("deactivate_ddl_log_entry_no_lock"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (!read_ddl_log_file_entry(entry_no)) - { - if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE) - { - /* - Log entry, if complete mark it done (IGNORE). - Otherwise increase the phase by one. - */ - if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION || - file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION || - (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION && - file_entry_buf[DDL_LOG_PHASE_POS] == 1) || - (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION && - file_entry_buf[DDL_LOG_PHASE_POS] >= EXCH_PHASE_TEMP_TO_FROM)) - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE; - else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION) - { - DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0); - file_entry_buf[DDL_LOG_PHASE_POS]= 1; - } - else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION) - { - DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] <= - EXCH_PHASE_FROM_TO_NAME); - file_entry_buf[DDL_LOG_PHASE_POS]++; - } - else - { - DBUG_ASSERT(0); - } - if (write_ddl_log_file_entry(entry_no)) - { - sql_print_error("Error in deactivating log entry. Position = %u", - entry_no); - DBUG_RETURN(TRUE); - } - } - } - else - { - sql_print_error("Failed in reading entry before deactivating it"); - DBUG_RETURN(TRUE); - } - DBUG_RETURN(FALSE); -} - - -/** - Execute one action in a ddl log entry - - @param ddl_log_entry Information in action entry to execute - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) -{ - bool frm_action= FALSE; - LEX_CSTRING handler_name; - handler *file= NULL; - MEM_ROOT mem_root; - int error= 1; - char to_path[FN_REFLEN]; - char from_path[FN_REFLEN]; - handlerton *hton; - DBUG_ENTER("execute_ddl_log_action"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE) - { - DBUG_RETURN(FALSE); - } - DBUG_PRINT("ddl_log", - ("execute type %c next %u name '%s' from_name '%s' handler '%s'" - " tmp_name '%s'", - ddl_log_entry->action_type, - ddl_log_entry->next_entry, - ddl_log_entry->name, - ddl_log_entry->from_name, - ddl_log_entry->handler_name, - ddl_log_entry->tmp_name)); - handler_name.str= (char*)ddl_log_entry->handler_name; - handler_name.length= strlen(ddl_log_entry->handler_name); - init_sql_alloc(key_memory_gdl, &mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, - MYF(MY_THREAD_SPECIFIC)); - if (!strcmp(ddl_log_entry->handler_name, reg_ext)) - frm_action= TRUE; - else - { - plugin_ref plugin= ha_resolve_by_name(thd, &handler_name, false); - if (!plugin) - { - my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), ddl_log_entry->handler_name); - goto error; - } - hton= plugin_data(plugin, handlerton*); - file= get_new_handler((TABLE_SHARE*)0, &mem_root, hton); - if (unlikely(!file)) - goto error; - } - switch (ddl_log_entry->action_type) - { - case DDL_LOG_REPLACE_ACTION: - case DDL_LOG_DELETE_ACTION: - { - if (ddl_log_entry->phase == 0) - { - if (frm_action) - { - strxmov(to_path, ddl_log_entry->name, reg_ext, NullS); - if (unlikely((error= mysql_file_delete(key_file_frm, to_path, - MYF(MY_WME | - MY_IGNORE_ENOENT))))) - break; -#ifdef WITH_PARTITION_STORAGE_ENGINE - strxmov(to_path, ddl_log_entry->name, PAR_EXT, NullS); - (void) mysql_file_delete(key_file_partition_ddl_log, to_path, - MYF(0)); -#endif - } - else - { - if (unlikely((error= hton->drop_table(hton, ddl_log_entry->name)))) - { - if (!non_existing_table_error(error)) - break; - } - } - if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos))) - break; - (void) sync_ddl_log_no_lock(); - error= 0; - if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION) - break; - } - DBUG_ASSERT(ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION); - /* - Fall through and perform the rename action of the replace - action. We have already indicated the success of the delete - action in the log entry by stepping up the phase. - */ - } - /* fall through */ - case DDL_LOG_RENAME_ACTION: - { - error= TRUE; - if (frm_action) - { - strxmov(to_path, ddl_log_entry->name, reg_ext, NullS); - strxmov(from_path, ddl_log_entry->from_name, reg_ext, NullS); - if (mysql_file_rename(key_file_frm, from_path, to_path, MYF(MY_WME))) - break; -#ifdef WITH_PARTITION_STORAGE_ENGINE - strxmov(to_path, ddl_log_entry->name, PAR_EXT, NullS); - strxmov(from_path, ddl_log_entry->from_name, PAR_EXT, NullS); - (void) mysql_file_rename(key_file_partition_ddl_log, from_path, to_path, MYF(MY_WME)); -#endif - } - else - { - if (file->ha_rename_table(ddl_log_entry->from_name, - ddl_log_entry->name)) - break; - } - if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos))) - break; - (void) sync_ddl_log_no_lock(); - error= FALSE; - break; - } - case DDL_LOG_EXCHANGE_ACTION: - { - /* We hold LOCK_gdl, so we can alter global_ddl_log.file_entry_buf */ - char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf; - /* not yet implemented for frm */ - DBUG_ASSERT(!frm_action); - /* - Using a case-switch here to revert all currently done phases, - since it will fall through until the first phase is undone. - */ - switch (ddl_log_entry->phase) { - case EXCH_PHASE_TEMP_TO_FROM: - /* tmp_name -> from_name possibly done */ - (void) file->ha_rename_table(ddl_log_entry->from_name, - ddl_log_entry->tmp_name); - /* decrease the phase and sync */ - file_entry_buf[DDL_LOG_PHASE_POS]--; - if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) - break; - if (sync_ddl_log_no_lock()) - break; - /* fall through */ - case EXCH_PHASE_FROM_TO_NAME: - /* from_name -> name possibly done */ - (void) file->ha_rename_table(ddl_log_entry->name, - ddl_log_entry->from_name); - /* decrease the phase and sync */ - file_entry_buf[DDL_LOG_PHASE_POS]--; - if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) - break; - if (sync_ddl_log_no_lock()) - break; - /* fall through */ - case EXCH_PHASE_NAME_TO_TEMP: - /* name -> tmp_name possibly done */ - (void) file->ha_rename_table(ddl_log_entry->tmp_name, - ddl_log_entry->name); - /* disable the entry and sync */ - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE; - if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) - break; - if (sync_ddl_log_no_lock()) - break; - error= FALSE; - break; - default: - DBUG_ASSERT(0); - break; - } - - break; - } - default: - DBUG_ASSERT(0); - break; - } - delete file; -error: - free_root(&mem_root, MYF(0)); - DBUG_RETURN(error); -} - - -/** - Get a free entry in the ddl log - - @param[out] active_entry A ddl log memory entry returned - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, - bool *write_header) -{ - DDL_LOG_MEMORY_ENTRY *used_entry; - DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used; - DBUG_ENTER("get_free_ddl_log_entry"); - - if (global_ddl_log.first_free == NULL) - { - if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(key_memory_DDL_LOG_MEMORY_ENTRY, - sizeof(DDL_LOG_MEMORY_ENTRY), MYF(MY_WME)))) - { - sql_print_error("Failed to allocate memory for ddl log free list"); - DBUG_RETURN(TRUE); - } - global_ddl_log.num_entries++; - used_entry->entry_pos= global_ddl_log.num_entries; - *write_header= TRUE; - } - else - { - used_entry= global_ddl_log.first_free; - global_ddl_log.first_free= used_entry->next_log_entry; - *write_header= FALSE; - } - /* - Move from free list to used list - */ - used_entry->next_log_entry= first_used; - used_entry->prev_log_entry= NULL; - used_entry->next_active_log_entry= NULL; - global_ddl_log.first_used= used_entry; - if (first_used) - first_used->prev_log_entry= used_entry; - - *active_entry= used_entry; - DBUG_RETURN(FALSE); -} - - -/** - Execute one entry in the ddl log. - - Executing an entry means executing a linked list of actions. - - @param first_entry Reference to first action in entry - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -static bool execute_ddl_log_entry_no_lock(THD *thd, uint first_entry) -{ - DDL_LOG_ENTRY ddl_log_entry; - uint read_entry= first_entry; - DBUG_ENTER("execute_ddl_log_entry_no_lock"); - - mysql_mutex_assert_owner(&LOCK_gdl); - do - { - if (read_ddl_log_entry(read_entry, &ddl_log_entry)) - { - /* Write to error log and continue with next log entry */ - sql_print_error("Failed to read entry = %u from ddl log", - read_entry); - break; - } - DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || - ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE); - - if (execute_ddl_log_action(thd, &ddl_log_entry)) - { - /* Write to error log and continue with next log entry */ - sql_print_error("Failed to execute action for entry = %u from ddl log", - read_entry); - break; - } - read_entry= ddl_log_entry.next_entry; - } while (read_entry); - DBUG_RETURN(FALSE); -} - - -/* - External interface methods for the DDL log Module - --------------------------------------------------- -*/ - -/** - Write a ddl log entry. - - A careful write of the ddl log is performed to ensure that we can - handle crashes occurring during CREATE and ALTER TABLE processing. - - @param ddl_log_entry Information about log entry - @param[out] entry_written Entry information written into - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, - DDL_LOG_MEMORY_ENTRY **active_entry) -{ - bool error, write_header; - DBUG_ENTER("write_ddl_log_entry"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (init_ddl_log()) - { - DBUG_RETURN(TRUE); - } - set_global_from_ddl_log_entry(ddl_log_entry); - if (get_free_ddl_log_entry(active_entry, &write_header)) - { - DBUG_RETURN(TRUE); - } - error= FALSE; - DBUG_PRINT("ddl_log", - ("write type %c next %u name '%s' from_name '%s' handler '%s'" - " tmp_name '%s'", - (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS], - ddl_log_entry->next_entry, - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + FN_REFLEN], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + (2*FN_REFLEN)], - (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS - + (3*FN_REFLEN)])); - if (unlikely(write_ddl_log_file_entry((*active_entry)->entry_pos))) - { - error= TRUE; - sql_print_error("Failed to write entry_no = %u", - (*active_entry)->entry_pos); - } - if (write_header && likely(!error)) - { - (void) sync_ddl_log_no_lock(); - if (write_ddl_log_header()) - error= TRUE; - } - if (unlikely(error)) - release_ddl_log_memory_entry(*active_entry); - DBUG_RETURN(error); -} - - -/** - @brief Write final entry in the ddl log. - - @details This is the last write in the ddl log. The previous log entries - have already been written but not yet synched to disk. - We write a couple of log entries that describes action to perform. - This entries are set-up in a linked list, however only when a first - execute entry is put as the first entry these will be executed. - This routine writes this first. - - @param first_entry First entry in linked list of entries - to execute, if 0 = NULL it means that - the entry is removed and the entries - are put into the free list. - @param complete Flag indicating we are simply writing - info about that entry has been completed - @param[in,out] active_entry Entry to execute, 0 = NULL if the entry - is written first time and needs to be - returned. In this case the entry written - is returned in this parameter - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -bool write_execute_ddl_log_entry(uint first_entry, - bool complete, - DDL_LOG_MEMORY_ENTRY **active_entry) -{ - bool write_header= FALSE; - char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; - DBUG_ENTER("write_execute_ddl_log_entry"); - - mysql_mutex_assert_owner(&LOCK_gdl); - if (init_ddl_log()) - { - DBUG_RETURN(TRUE); - } - if (!complete) - { - /* - We haven't synched the log entries yet, we synch them now before - writing the execute entry. If complete is true we haven't written - any log entries before, we are only here to write the execute - entry to indicate it is done. - */ - (void) sync_ddl_log_no_lock(); - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE; - } - else - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE; - file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */ - file_entry_buf[DDL_LOG_PHASE_POS]= 0; - int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry); - file_entry_buf[DDL_LOG_NAME_POS]= 0; - file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0; - file_entry_buf[DDL_LOG_NAME_POS + 2*FN_REFLEN]= 0; - if (!(*active_entry)) - { - if (get_free_ddl_log_entry(active_entry, &write_header)) - { - DBUG_RETURN(TRUE); - } - write_header= TRUE; - } - if (write_ddl_log_file_entry((*active_entry)->entry_pos)) - { - sql_print_error("Error writing execute entry in ddl log"); - release_ddl_log_memory_entry(*active_entry); - DBUG_RETURN(TRUE); - } - (void) sync_ddl_log_no_lock(); - if (write_header) - { - if (write_ddl_log_header()) - { - release_ddl_log_memory_entry(*active_entry); - DBUG_RETURN(TRUE); - } - } - DBUG_RETURN(FALSE); -} - - -/** - Deactivate an individual entry. - - @details see deactivate_ddl_log_entry_no_lock. - - @param entry_no Entry position of record to change - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -bool deactivate_ddl_log_entry(uint entry_no) -{ - bool error; - DBUG_ENTER("deactivate_ddl_log_entry"); - - mysql_mutex_lock(&LOCK_gdl); - error= deactivate_ddl_log_entry_no_lock(entry_no); - mysql_mutex_unlock(&LOCK_gdl); - DBUG_RETURN(error); -} - - -/** - Sync ddl log file. - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -bool sync_ddl_log() -{ - bool error; - DBUG_ENTER("sync_ddl_log"); - - mysql_mutex_lock(&LOCK_gdl); - error= sync_ddl_log_no_lock(); - mysql_mutex_unlock(&LOCK_gdl); - - DBUG_RETURN(error); -} - - -/** - Release a log memory entry. - @param log_memory_entry Log memory entry to release -*/ - -void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) -{ - DDL_LOG_MEMORY_ENTRY *first_free= global_ddl_log.first_free; - DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry; - DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry; - DBUG_ENTER("release_ddl_log_memory_entry"); - - mysql_mutex_assert_owner(&LOCK_gdl); - global_ddl_log.first_free= log_entry; - log_entry->next_log_entry= first_free; - - if (prev_log_entry) - prev_log_entry->next_log_entry= next_log_entry; - else - global_ddl_log.first_used= next_log_entry; - if (next_log_entry) - next_log_entry->prev_log_entry= prev_log_entry; - DBUG_VOID_RETURN; -} - - -/** - Execute one entry in the ddl log. - - Executing an entry means executing a linked list of actions. - - @param first_entry Reference to first action in entry - - @return Operation status - @retval TRUE Error - @retval FALSE Success -*/ - -bool execute_ddl_log_entry(THD *thd, uint first_entry) -{ - bool error; - DBUG_ENTER("execute_ddl_log_entry"); - - mysql_mutex_lock(&LOCK_gdl); - error= execute_ddl_log_entry_no_lock(thd, first_entry); - mysql_mutex_unlock(&LOCK_gdl); - DBUG_RETURN(error); -} - - -/** - Close the ddl log. -*/ - -static void close_ddl_log() -{ - DBUG_ENTER("close_ddl_log"); - if (global_ddl_log.file_id >= 0) - { - (void) mysql_file_close(global_ddl_log.file_id, MYF(MY_WME)); - global_ddl_log.file_id= (File) -1; - } - DBUG_VOID_RETURN; -} - - -/** - Execute the ddl log at recovery of MySQL Server. + Create lower case paths for engines that requires them */ -void execute_ddl_log_recovery() +void build_lower_case_table_filename(char *buff, size_t bufflen, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + uint flags) { - uint num_entries, i; - THD *thd; - DDL_LOG_ENTRY ddl_log_entry; - char file_name[FN_REFLEN]; - static char recover_query_string[]= "INTERNAL DDL LOG RECOVER IN PROGRESS"; - DBUG_ENTER("execute_ddl_log_recovery"); - - /* - Initialise global_ddl_log struct - */ - bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf)); - global_ddl_log.inited= FALSE; - global_ddl_log.recovery_phase= TRUE; - global_ddl_log.io_size= IO_SIZE; - global_ddl_log.file_id= (File) -1; - - /* - To be able to run this from boot, we allocate a temporary THD - */ - if (!(thd=new THD(0))) - DBUG_VOID_RETURN; - thd->thread_stack= (char*) &thd; - thd->store_globals(); + char table_name[SAFE_NAME_LEN+1], db_name[SAFE_NAME_LEN+1]; - thd->set_query(recover_query_string, strlen(recover_query_string)); - - /* this also initialize LOCK_gdl */ - num_entries= read_ddl_log_header(); - mysql_mutex_lock(&LOCK_gdl); - for (i= 1; i < num_entries + 1; i++) - { - if (read_ddl_log_entry(i, &ddl_log_entry)) - { - sql_print_error("Failed to read entry no = %u from ddl log", - i); - continue; - } - if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE) - { - if (execute_ddl_log_entry_no_lock(thd, ddl_log_entry.next_entry)) - { - /* Real unpleasant scenario but we continue anyways. */ - continue; - } - } - } - close_ddl_log(); - create_ddl_log_file_name(file_name); - (void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0)); - global_ddl_log.recovery_phase= FALSE; - mysql_mutex_unlock(&LOCK_gdl); - thd->reset_query(); - delete thd; - DBUG_VOID_RETURN; -} + DBUG_ASSERT(db->length <= SAFE_NAME_LEN && table->length <= SAFE_NAME_LEN); + memcpy(db_name, db->str, db->length); + db_name[db->length]= 0; + my_casedn_str(files_charset_info, db_name); -/** - Release all memory allocated to the ddl log. -*/ + memcpy(table_name, table->str, table->length); + table_name[table->length]= 0; + my_casedn_str(files_charset_info, table_name); -void release_ddl_log() -{ - DDL_LOG_MEMORY_ENTRY *free_list; - DDL_LOG_MEMORY_ENTRY *used_list; - DBUG_ENTER("release_ddl_log"); - - if (!global_ddl_log.do_release) - DBUG_VOID_RETURN; - - mysql_mutex_lock(&LOCK_gdl); - free_list= global_ddl_log.first_free; - used_list= global_ddl_log.first_used; - while (used_list) - { - DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry; - my_free(used_list); - used_list= tmp; - } - while (free_list) - { - DDL_LOG_MEMORY_ENTRY *tmp= free_list->next_log_entry; - my_free(free_list); - free_list= tmp; - } - close_ddl_log(); - global_ddl_log.inited= 0; - mysql_mutex_unlock(&LOCK_gdl); - mysql_mutex_destroy(&LOCK_gdl); - global_ddl_log.do_release= false; - DBUG_VOID_RETURN; + build_table_filename(buff, bufflen, db_name, table_name, "", + flags & FN_IS_TMP); } -/* ---------------------------------------------------------------------------- - - END MODULE DDL log - -------------------- - ---------------------------------------------------------------------------- -*/ - - /** @brief construct a temporary shadow file name. @@ -1898,8 +804,8 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) #ifdef WITH_PARTITION_STORAGE_ENGINE lpt->table->file->ha_create_partitioning_metadata(path, shadow_path, CHF_DELETE_FLAG) || - deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) || - (sync_ddl_log(), FALSE) || + ddl_log_increment_phase(part_info->frm_log_entry->entry_pos) || + (ddl_log_sync(), FALSE) || mysql_file_rename(key_file_frm, shadow_frm_name, frm_name, MYF(MY_WME)) || lpt->table->file->ha_create_partitioning_metadata(path, shadow_path, @@ -1946,9 +852,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) err: #ifdef WITH_PARTITION_STORAGE_ENGINE - deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos); + ddl_log_increment_phase(part_info->frm_log_entry->entry_pos); part_info->frm_log_entry= NULL; - (void) sync_ddl_log(); + (void) ddl_log_sync(); #endif ; } @@ -2130,15 +1036,16 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, { LEX_CSTRING db_name= table->db; LEX_CSTRING table_name= table->table_name; - if (table->open_type == OT_BASE_ONLY || - !thd->find_temporary_table(table)) + if (!is_temporary_table(table)) (void) delete_statistics_for_table(thd, &db_name, &table_name); } } /* mark for close and remove all cached entries */ thd->push_internal_handler(&err_handler); - error= mysql_rm_table_no_locks(thd, tables, if_exists, drop_temporary, + error= mysql_rm_table_no_locks(thd, tables, &thd->db, (DDL_LOG_STATE*) 0, + if_exists, + drop_temporary, false, drop_sequence, dont_log_query, false); thd->pop_internal_handler(); @@ -2165,14 +1072,15 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, @retval >0 the lenght of the comment found */ -static uint32 comment_length(THD *thd, uint32 comment_pos, - const char **comment_start) +static uint32 get_comment(THD *thd, uint32 comment_pos, + const char **comment_start) { /* We use uchar * here to make array indexing portable */ const uchar *query= (uchar*) thd->query(); const uchar *query_end= (uchar*) query + thd->query_length(); const uchar *const state_map= thd->charset()->state_map; + *comment_start= ""; // Ensure it points to something for (; query < query_end; query++) { if (state_map[static_cast<uchar>(*query)] == MY_LEX_SKIP) @@ -2195,10 +1103,15 @@ static uint32 comment_length(THD *thd, uint32 comment_pos, } /** - Execute the drop of a normal or temporary table. + Execute the drop of a sequence, view or table (normal or temporary). @param thd Thread handler @param tables Tables to drop + @param current_db Current database, used for ddl logs + @param ddl_log_state DDL log state, for global ddl logging (used by + DROP DATABASE. If not set, an internal ddl log state + will be used. If set then the caller must call + ddl_log_complete(ddl_log_state); @param if_exists If set, don't give an error if table doesn't exists. In this case we give an warning of level 'NOTE' @param drop_temporary Only drop temporary tables @@ -2227,7 +1140,10 @@ static uint32 comment_length(THD *thd, uint32 comment_pos, not all. */ -int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, +int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, + const LEX_CSTRING *current_db, + DDL_LOG_STATE *ddl_log_state, + bool if_exists, bool drop_temporary, bool drop_view, bool drop_sequence, bool dont_log_query, @@ -2236,19 +1152,33 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, TABLE_LIST *table; char path[FN_REFLEN + 1]; LEX_CSTRING alias= null_clex_str; + LEX_CUSTRING version; + LEX_CSTRING partition_engine_name= {NULL, 0}; StringBuffer<160> unknown_tables(system_charset_info); + DDL_LOG_STATE local_ddl_log_state; + const char *comment_start; + uint table_count= 0, non_temp_tables_count= 0; int error= 0; - int non_temp_tables_count= 0; + uint32 comment_len; bool trans_tmp_table_deleted= 0, non_trans_tmp_table_deleted= 0; - bool non_tmp_table_deleted= 0; - bool is_drop_tmp_if_exists_added= 0; - bool was_view= 0, log_if_exists= if_exists; - const char *object_to_drop= (drop_sequence) ? "SEQUENCE" : "TABLE"; + bool is_drop_tmp_if_exists_added= 0, non_tmp_table_deleted= 0; + bool log_if_exists= if_exists; + const LEX_CSTRING *object_to_drop= ((drop_sequence) ? + &SEQUENCE_clex_str : + &TABLE_clex_str); String normal_tables; String built_trans_tmp_query, built_non_trans_tmp_query; DBUG_ENTER("mysql_rm_table_no_locks"); + if (!ddl_log_state) + { + ddl_log_state= &local_ddl_log_state; + bzero(ddl_log_state, sizeof(*ddl_log_state)); + } + unknown_tables.length(0); + comment_len= get_comment(thd, if_exists ? 17:9, &comment_start); + /* Prepares the drop statements that will be written into the binary log as follows: @@ -2288,13 +1218,13 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, if (!dont_log_query) { built_trans_tmp_query.set_charset(system_charset_info); - built_trans_tmp_query.append("DROP TEMPORARY "); + built_trans_tmp_query.append(STRING_WITH_LEN("DROP TEMPORARY ")); built_trans_tmp_query.append(object_to_drop); built_trans_tmp_query.append(' '); if (thd->is_current_stmt_binlog_format_row() || if_exists) { is_drop_tmp_if_exists_added= true; - built_trans_tmp_query.append("IF EXISTS "); + built_trans_tmp_query.append(STRING_WITH_LEN("IF EXISTS ")); } built_non_trans_tmp_query.set_charset(system_charset_info); built_non_trans_tmp_query.copy(built_trans_tmp_query); @@ -2305,9 +1235,11 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, bool is_trans= 0, temporary_table_was_dropped= 0; bool table_creation_was_logged= 0; bool wrong_drop_sequence= 0; - bool table_dropped= 0; + bool table_dropped= 0, res; + bool is_temporary= 0; const LEX_CSTRING db= table->db; const LEX_CSTRING table_name= table->table_name; + LEX_CSTRING cpath= {0,0}; handlerton *hton= 0; Table_type table_type; size_t path_length= 0; @@ -2370,6 +1302,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, table->table= 0; temporary_table_was_dropped= 1; } + is_temporary= 1; } if ((drop_temporary && if_exists) || temporary_table_was_dropped) @@ -2401,10 +1334,10 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, is_drop_tmp_if_exists_added ) { append_identifier(thd, built_ptr_query, &db); - built_ptr_query->append("."); + built_ptr_query->append('.'); } append_identifier(thd, built_ptr_query, &table_name); - built_ptr_query->append(","); + built_ptr_query->append(','); } /* This means that a temporary table was droped and as such there @@ -2440,11 +1373,14 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, continue; } + lex_string_set3(&cpath, path, (size_t) (path_end - path)); + { char engine_buf[NAME_CHAR_LEN + 1]; LEX_CSTRING engine= { engine_buf, 0 }; - table_type= dd_frm_type(thd, path, &engine); + table_type= dd_frm_type(thd, path, &engine, &partition_engine_name, + &version); if (table_type == TABLE_TYPE_NORMAL || table_type == TABLE_TYPE_SEQUENCE) { plugin_ref p= plugin_lock_by_name(thd, &engine, @@ -2455,7 +1391,18 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, } thd->replication_flags= 0; - was_view= table_type == TABLE_TYPE_VIEW; + const bool was_view= table_type == TABLE_TYPE_VIEW; + + if (!table_count++) + { + LEX_CSTRING comment= {comment_start, (size_t) comment_len}; + if (ddl_log_drop_table_init(thd, ddl_log_state, current_db, &comment)) + { + error= 1; + goto err; + } + } + if ((table_type == TABLE_TYPE_UNKNOWN) || (was_view && !drop_view) || (table_type != TABLE_TYPE_SEQUENCE && drop_sequence)) { @@ -2468,10 +1415,12 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, wrong_drop_sequence= drop_sequence && hton; error= table_type == TABLE_TYPE_UNKNOWN ? ENOENT : -1; tdc_remove_table(thd, db.str, table_name.str); + if (wrong_drop_sequence) + goto report_error; } else { - if (WSREP(thd) && hton && !wsrep_should_replicate_ddl(thd, hton->db_type)) + if (WSREP(thd) && hton && !wsrep_should_replicate_ddl(thd, hton)) { error= 1; goto err; @@ -2503,7 +1452,23 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, log_if_exists= 1; bool enoent_warning= !dont_log_query && !(hton && hton->discover_table); - error= ha_delete_table(thd, hton, path, &db, &table_name, enoent_warning); + + if (was_view) + res= ddl_log_drop_view(thd, ddl_log_state, &cpath, &db, + &table_name); + else + res= ddl_log_drop_table(thd, ddl_log_state, hton, &cpath, &db, + &table_name); + if (res) + { + error= -1; + goto err; + } + + debug_crash_here("ddl_log_drop_before_delete_table"); + error= ha_delete_table(thd, hton, path, &db, &table_name, + enoent_warning); + debug_crash_here("ddl_log_drop_after_delete_table"); if (!error) table_dropped= 1; @@ -2565,11 +1530,18 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, scan all engines try to drop the table from there. This is to ensure we don't have any partial table files left. */ - if (non_existing_table_error(error) && !wrong_drop_sequence) + if (non_existing_table_error(error)) { int ferror= 0; DBUG_ASSERT(!was_view); + if (ddl_log_drop_table(thd, ddl_log_state, 0, &cpath, &db, + &table_name)) + { + error= -1; + goto err; + } + /* Remove extension for delete */ *path_end= '\0'; ferror= ha_delete_table_force(thd, path, &db, &table_name); @@ -2601,13 +1573,22 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, if (thd->replication_flags & OPTION_IF_EXISTS) log_if_exists= 1; + if (!was_view) + { + debug_crash_here("ddl_log_drop_before_drop_trigger"); + ddl_log_update_phase(ddl_log_state, DDL_DROP_PHASE_TRIGGER); + debug_crash_here("ddl_log_drop_before_drop_trigger2"); + } + if (likely(!error) || non_existing_table_error(error)) { if (Table_triggers_list::drop_all_triggers(thd, &db, &table_name, MYF(MY_WME | MY_IGNORE_ENOENT))) error= error ? error : -1; } + debug_crash_here("ddl_log_drop_after_drop_trigger"); +report_error: if (error) { StringBuffer<FN_REFLEN> tbl_name(system_charset_info); @@ -2655,7 +1636,24 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, db.str, (uint)db.length, table_name.str, (uint)table_name.length); mysql_audit_drop_table(thd, table); + if (!is_temporary) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("DROP") }; + if ((ddl_log.org_partitioned= (partition_engine_name.str != 0))) + ddl_log.org_storage_engine_name= partition_engine_name; + else + lex_string_set(&ddl_log.org_storage_engine_name, + ha_resolve_storage_engine_name(hton)); + ddl_log.org_database= table->db; + ddl_log.org_table= table->table_name; + ddl_log.org_table_id= version; + backup_log_ddl(&ddl_log); + } } + if (!was_view) + ddl_log_update_phase(ddl_log_state, DDL_DROP_PHASE_BINLOG); if (!dont_log_query && (!error || table_dropped || non_existing_table_error(error))) @@ -2668,11 +1666,11 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, if (thd->db.str == NULL || cmp(&db, &thd->db) != 0) { append_identifier(thd, &normal_tables, &db); - normal_tables.append("."); + normal_tables.append('.'); } append_identifier(thd, &normal_tables, &table_name); - normal_tables.append(","); + normal_tables.append(','); } DBUG_PRINT("table", ("table: %p s: %p", table->table, table->table ? table->table->s : NULL)); @@ -2714,62 +1712,66 @@ err: if (!dont_log_query && mysql_bin_log.is_open()) { + debug_crash_here("ddl_log_drop_before_binlog"); if (non_trans_tmp_table_deleted) { - /* Chop of the last comma */ - built_non_trans_tmp_query.chop(); - built_non_trans_tmp_query.append(" /* generated by server */"); - error |= (thd->binlog_query(THD::STMT_QUERY_TYPE, - built_non_trans_tmp_query.ptr(), - built_non_trans_tmp_query.length(), - FALSE, FALSE, - is_drop_tmp_if_exists_added, - 0) > 0); + /* Chop of the last comma */ + built_non_trans_tmp_query.chop(); + built_non_trans_tmp_query.append(generated_by_server); + error |= (thd->binlog_query(THD::STMT_QUERY_TYPE, + built_non_trans_tmp_query.ptr(), + built_non_trans_tmp_query.length(), + FALSE, FALSE, + is_drop_tmp_if_exists_added, + 0) > 0); } if (trans_tmp_table_deleted) { - /* Chop of the last comma */ - built_trans_tmp_query.chop(); - built_trans_tmp_query.append(" /* generated by server */"); - error |= (thd->binlog_query(THD::STMT_QUERY_TYPE, - built_trans_tmp_query.ptr(), - built_trans_tmp_query.length(), - TRUE, FALSE, - is_drop_tmp_if_exists_added, - 0) > 0); + /* Chop of the last comma */ + built_trans_tmp_query.chop(); + built_trans_tmp_query.append(generated_by_server); + error |= (thd->binlog_query(THD::STMT_QUERY_TYPE, + built_trans_tmp_query.ptr(), + built_trans_tmp_query.length(), + TRUE, FALSE, + is_drop_tmp_if_exists_added, + 0) > 0); } if (non_tmp_table_deleted) { String built_query; - const char *comment_start; - uint32 comment_len; built_query.set_charset(thd->charset()); - built_query.append("DROP "); + built_query.append(STRING_WITH_LEN("DROP ")); built_query.append(object_to_drop); built_query.append(' '); if (log_if_exists) - built_query.append("IF EXISTS "); + built_query.append(STRING_WITH_LEN("IF EXISTS ")); /* Preserve comment in original query */ - if ((comment_len= comment_length(thd, if_exists ? 17:9, - &comment_start))) + if (comment_len) { built_query.append(comment_start, comment_len); - built_query.append(" "); + built_query.append(' '); } /* Chop of the last comma */ normal_tables.chop(); built_query.append(normal_tables.ptr(), normal_tables.length()); - built_query.append(" /* generated by server */"); + built_query.append(generated_by_server); + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(ddl_log_state, thd->binlog_xid); error |= (thd->binlog_query(THD::STMT_QUERY_TYPE, built_query.ptr(), built_query.length(), TRUE, FALSE, FALSE, 0) > 0); + thd->binlog_xid= 0; } + debug_crash_here("ddl_log_drop_after_binlog"); } } + if (ddl_log_state == &local_ddl_log_state) + ddl_log_complete(ddl_log_state); if (!drop_temporary) { @@ -2829,41 +1831,56 @@ end: bool log_drop_table(THD *thd, const LEX_CSTRING *db_name, const LEX_CSTRING *table_name, + const LEX_CSTRING *handler_name, + bool partitioned, + const LEX_CUSTRING *id, bool temporary_table) { char buff[NAME_LEN*2 + 80]; String query(buff, sizeof(buff), system_charset_info); - bool error; + bool error= 0; DBUG_ENTER("log_drop_table"); - if (!mysql_bin_log.is_open()) - DBUG_RETURN(0); - - query.length(0); - query.append(STRING_WITH_LEN("DROP ")); - if (temporary_table) - query.append(STRING_WITH_LEN("TEMPORARY ")); - query.append(STRING_WITH_LEN("TABLE IF EXISTS ")); - append_identifier(thd, &query, db_name); - query.append("."); - append_identifier(thd, &query, table_name); - query.append(STRING_WITH_LEN("/* Generated to handle " - "failed CREATE OR REPLACE */")); + if (mysql_bin_log.is_open()) + { + query.length(0); + query.append(STRING_WITH_LEN("DROP ")); + if (temporary_table) + query.append(STRING_WITH_LEN("TEMPORARY ")); + query.append(STRING_WITH_LEN("TABLE IF EXISTS ")); + append_identifier(thd, &query, db_name); + query.append('.'); + append_identifier(thd, &query, table_name); + query.append(STRING_WITH_LEN("/* Generated to handle " + "failed CREATE OR REPLACE */")); - /* - In case of temporary tables we don't have to log the database name - in the binary log. We log this for non temporary tables, as the slave - may use a filter to ignore queries for a specific database. - */ - error= thd->binlog_query(THD::STMT_QUERY_TYPE, - query.ptr(), query.length(), - FALSE, FALSE, temporary_table, 0) > 0; + /* + In case of temporary tables we don't have to log the database name + in the binary log. We log this for non temporary tables, as the slave + may use a filter to ignore queries for a specific database. + */ + error= thd->binlog_query(THD::STMT_QUERY_TYPE, + query.ptr(), query.length(), + FALSE, FALSE, temporary_table, 0) > 0; + } + if (!temporary_table) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("DROP_AFTER_CREATE") }; + ddl_log.org_storage_engine_name= *handler_name; + ddl_log.org_partitioned= partitioned; + ddl_log.org_database= *db_name; + ddl_log.org_table= *table_name; + ddl_log.org_table_id= *id; + backup_log_ddl(&ddl_log); + } DBUG_RETURN(error); } /** - Quickly remove a table without bin logging + Quickly remove a table, without any logging @param thd Thread context. @param base The handlerton handle. @@ -2958,9 +1975,9 @@ static int sort_keys(KEY *a, KEY *b) /* Sort NOT NULL keys before other keys */ return (a_flags & HA_NULL_PART_KEY) ? 1 : -1; } - if (a->name.str == primary_key_name) + if (a->name.str == primary_key_name.str) return -1; - if (b->name.str == primary_key_name) + if (b->name.str == primary_key_name.str) return 1; /* Sort keys don't containing partial segments before others */ if ((a_flags ^ b_flags) & HA_KEY_HAS_PART_KEY_SEG) @@ -3622,7 +2639,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (!(file->ha_table_flags() & HA_CAN_TABLES_WITHOUT_ROLLBACK)) { my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), file->table_type(), - "SEQUENCE"); + SEQUENCE_clex_str.str); DBUG_RETURN(TRUE); } @@ -3873,13 +2890,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, else (*key_count)--; if (key->name.str && !tmp_table && (key->type != Key::PRIMARY) && - !my_strcasecmp(system_charset_info, key->name.str, primary_key_name)) + !my_strcasecmp(system_charset_info, key->name.str, + primary_key_name.str)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name.str); DBUG_RETURN(TRUE); } if (key->type == Key::PRIMARY && key->name.str && - my_strcasecmp(system_charset_info, key->name.str, primary_key_name) != 0) + my_strcasecmp(system_charset_info, key->name.str, primary_key_name.str) != 0) { bool sav_abort_on_warning= thd->abort_on_warning; thd->abort_on_warning= FALSE; /* Don't make an error out of this. */ @@ -3900,7 +2918,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, { if (key->name.str == ignore_key || key->type == Key::FOREIGN_KEY) continue; - /* Create the key->ame based on the first column (if not given) */ + /* Create the key name based on the first column (if not given) */ if (key->type == Key::PRIMARY) { if (primary_key) @@ -3909,7 +2927,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, MYF(0)); DBUG_RETURN(true); } - key_name=primary_key_name; + key_name= primary_key_name.str; primary_key=1; } else if (!(key_name= key->name.str)) @@ -4428,7 +3446,7 @@ without_overlaps_err: } create_info->period_info.unique_keys++; } - + key_info->is_ignored= key->key_create_info.is_ignored; key_info++; } @@ -4687,13 +3705,13 @@ bool validate_comment_length(THD *thd, LEX_CSTRING *comment, size_t max_len, if (thd->is_strict_mode()) { my_error(ER_INVALID_CHARACTER_STRING, MYF(0), - system_charset_info->csname, comment->str); + system_charset_info->cs_name.str, comment->str); DBUG_RETURN(true); } push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING, ER_THD(thd, ER_INVALID_CHARACTER_STRING), - system_charset_info->csname, comment->str); + system_charset_info->cs_name.str, comment->str); comment->length= tmp_len; DBUG_RETURN(false); } @@ -5158,13 +4176,17 @@ err: the extension). @param create_info Create information (like MAX_ROWS) @param alter_info Description of fields and keys for new table - @param create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY + @param create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, + C_ASSISTED_DISCOVERY or C_ALTER_TABLE_FRM_ONLY. or any positive number (for C_CREATE_SELECT). + If set to C_ALTER_TABLE_FRM_ONY then no frm or + table is created, only the frm image in memory. @param[out] is_trans Identifies the type of engine where the table was created: either trans or non-trans. @param[out] key_info Array of KEY objects describing keys in table which was created. @param[out] key_count Number of keys in table which was created. + @param[out] frm The frm image. If one creates a temporary table, its is automatically opened and its TABLE_SHARE is added to THD::all_temp_tables list. @@ -5180,10 +4202,13 @@ err: */ static -int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, +int create_table_impl(THD *thd, + DDL_LOG_STATE *ddl_log_state_create, + DDL_LOG_STATE *ddl_log_state_rm, + const LEX_CSTRING &orig_db, const LEX_CSTRING &orig_table_name, const LEX_CSTRING &db, const LEX_CSTRING &table_name, - const char *path, const DDL_options_st options, + const LEX_CSTRING &path, const DDL_options_st options, HA_CREATE_INFO *create_info, Alter_info *alter_info, int create_table_mode, bool *is_trans, KEY **key_info, uint *key_count, LEX_CUSTRING *frm) @@ -5193,10 +4218,16 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, int error= 1; bool frm_only= create_table_mode == C_ALTER_TABLE_FRM_ONLY; bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || frm_only; - handlerton *exists_hton; - DBUG_ENTER("mysql_create_table_no_lock"); + DBUG_ENTER("create_table_impl"); DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d path: %s", - db.str, table_name.str, internal_tmp_table, path)); + db.str, table_name.str, internal_tmp_table, path.str)); + + /* Easy check for ddl logging if we are creating a temporary table */ + if (create_info->tmp_table()) + { + ddl_log_state_create= 0; + ddl_log_state_rm= 0; + } if (fix_constraints_names(thd, &alter_info->check_constraint_list, create_info)) @@ -5282,10 +4313,12 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, goto err; } - if (!internal_tmp_table && ha_table_exists(thd, &db, &table_name, - &exists_hton)) + handlerton *db_type; + if (!internal_tmp_table && + ha_table_exists(thd, &db, &table_name, + &create_info->org_tabledef_version, NULL, &db_type)) { - if (ha_check_if_updates_are_ignored(thd, exists_hton, "CREATE")) + if (ha_check_if_updates_are_ignored(thd, db_type, "CREATE")) { /* Don't create table. CREATE will still be logged in binary log */ error= 0; @@ -5309,9 +4342,13 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, */ (void) trans_rollback_stmt(thd); /* Remove normal table without logging. Keep tables locked */ - if (mysql_rm_table_no_locks(thd, &table_list, 0, 0, 0, 0, 1, 1)) + if (mysql_rm_table_no_locks(thd, &table_list, &thd->db, + ddl_log_state_rm, + 0, 0, 0, 0, 1, 1)) goto err; + debug_crash_here("ddl_log_create_after_drop"); + /* We have to log this query, even if it failed later to ensure the drop is done. @@ -5319,9 +4356,10 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, thd->variables.option_bits|= OPTION_KEEP_LOG; thd->log_current_statement= 1; create_info->table_was_deleted= 1; + lex_string_set(&create_info->org_storage_engine_name, + ha_resolve_storage_engine_name(db_type)); DBUG_EXECUTE_IF("send_kill_after_delete", - thd->set_killed(KILL_QUERY); ); - + thd->set_killed(KILL_QUERY);); /* Restart statement transactions for the case of CREATE ... SELECT. */ @@ -5332,14 +4370,14 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, else if (options.if_not_exists()) { /* - We never come here as part of normal create table as table existance + We never come here as part of normal create table as table existence is checked in open_and_lock_tables(). We may come here as part of ALTER TABLE when converting a table for a distributed engine to a a local one. */ /* Log CREATE IF NOT EXISTS on slave for distributed engines */ - if (thd->slave_thread && (exists_hton && exists_hton->flags & + if (thd->slave_thread && (db_type && db_type->flags & HTON_IGNORE_UPDATES)) thd->log_current_statement= 1; goto warn; @@ -5374,7 +4412,7 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, goto err; } - init_tmp_table_share(thd, &share, db.str, 0, table_name.str, path); + init_tmp_table_share(thd, &share, db.str, 0, table_name.str, path.str); /* prepare everything for discovery */ share.field= &no_fields; @@ -5385,6 +4423,14 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, if (parse_engine_table_options(thd, hton, &share)) goto err; + /* + Log that we are going to do discovery. If things fails, any generated + .frm files are deleted + */ + if (ddl_log_state_create) + ddl_log_create_table(thd, ddl_log_state_create, (handlerton*) 0, &path, + &db, &table_name, 1); + ha_err= hton->discover_table_structure(hton, thd, &share, create_info); /* @@ -5406,44 +4452,56 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, } else { + if (ddl_log_state_create) + ddl_log_create_table(thd, ddl_log_state_create, create_info->db_type, + &path, &db, &table_name, frm_only); + debug_crash_here("ddl_log_create_before_create_frm"); + file= mysql_create_frm_image(thd, orig_db, orig_table_name, create_info, alter_info, create_table_mode, key_info, key_count, frm); /* - TODO: remove this check of thd->is_error() (now it intercept - errors in some val_*() methoids and bring some single place to - such error interception). + TODO: remove this check of thd->is_error() (now it intercept + errors in some val_*() methods and bring some single place to + such error interception). */ if (!file || thd->is_error()) + { + if (!file) + deletefrm(path.str); goto err; + } if (thd->variables.keep_files_on_create) create_info->options|= HA_CREATE_KEEP_FILES; - if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG)) + if (file->ha_create_partitioning_metadata(path.str, NULL, CHF_CREATE_FLAG)) goto err; if (!frm_only) { - if (ha_create_table(thd, path, db.str, table_name.str, create_info, frm)) + debug_crash_here("ddl_log_create_before_create_table"); + if (ha_create_table(thd, path.str, db.str, table_name.str, create_info, + frm, 0)) { - file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); - deletefrm(path); + file->ha_create_partitioning_metadata(path.str, NULL, CHF_DELETE_FLAG); + deletefrm(path.str); goto err; } + debug_crash_here("ddl_log_create_after_create_table"); } } create_info->table= 0; if (!frm_only && create_info->tmp_table()) { - TABLE *table= thd->create_and_open_tmp_table(frm, path, db.str, + TABLE *table= thd->create_and_open_tmp_table(frm, path.str, db.str, table_name.str, false); if (!table) { - (void) thd->rm_temporary_table(create_info->db_type, path); + (void) thd->rm_temporary_table(create_info->db_type, path.str); goto err; } @@ -5456,6 +4514,12 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db, error= 0; err: + if (unlikely(error) && ddl_log_state_create) + { + /* Table was never created, so we can ignore the ddl log entry */ + ddl_log_complete(ddl_log_state_create); + } + THD_STAGE_INFO(thd, stage_after_create); delete file; DBUG_PRINT("exit", ("return: %d", error)); @@ -5475,13 +4539,16 @@ warn: in various version of CREATE TABLE statement. @result - 1 unspefied error + 1 unspecifed error 2 error; Don't log create statement 0 ok -1 Table was used with IF NOT EXISTS and table existed (warning, not error) */ -int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db, +int mysql_create_table_no_lock(THD *thd, + DDL_LOG_STATE *ddl_log_state_create, + DDL_LOG_STATE *ddl_log_state_rm, + const LEX_CSTRING *db, const LEX_CSTRING *table_name, Table_specification_st *create_info, Alter_info *alter_info, bool *is_trans, @@ -5490,26 +4557,31 @@ int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db, KEY *not_used_1; uint not_used_2; int res; + uint path_length; char path[FN_REFLEN + 1]; + LEX_CSTRING cpath; LEX_CUSTRING frm= {0,0}; if (create_info->tmp_table()) - build_tmptable_filename(thd, path, sizeof(path)); + path_length= build_tmptable_filename(thd, path, sizeof(path)); else { - int length; const LEX_CSTRING *alias= table_case_name(create_info, table_name); - length= build_table_filename(path, sizeof(path) - 1, db->str, alias->str, "", 0); + path_length= build_table_filename(path, sizeof(path) - 1, db->str, + alias->str, + "", 0); // Check if we hit FN_REFLEN bytes along with file extension. - if (length+reg_ext_length > FN_REFLEN) + if (path_length+reg_ext_length > FN_REFLEN) { my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), (int) sizeof(path)-1, path); return true; } } + lex_string_set3(&cpath, path, path_length); - res= create_table_impl(thd, *db, *table_name, *db, *table_name, path, + res= create_table_impl(thd, ddl_log_state_create, ddl_log_state_rm, + *db, *table_name, *db, *table_name, cpath, *create_info, create_info, alter_info, create_table_mode, is_trans, ¬_used_1, ¬_used_2, &frm); @@ -5527,7 +4599,9 @@ int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db, { DBUG_ASSERT(thd->is_error()); /* Drop the table as it wasn't completely done */ - if (!mysql_rm_table_no_locks(thd, table_list, 1, + if (!mysql_rm_table_no_locks(thd, table_list, &thd->db, + (DDL_LOG_STATE*) 0, + 1, create_info->tmp_table(), false, true /* Sequence*/, true /* Don't log_query */, @@ -5560,17 +4634,22 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, Table_specification_st *create_info, Alter_info *alter_info) { - bool is_trans= FALSE; - bool result; - int create_table_mode; TABLE_LIST *pos_in_locked_tables= 0; MDL_ticket *mdl_ticket= 0; + DDL_LOG_STATE ddl_log_state_create, ddl_log_state_rm; + int create_table_mode; + uint save_thd_create_info_options; + bool is_trans= FALSE; + bool result; DBUG_ENTER("mysql_create_table"); DBUG_ASSERT(create_table == thd->lex->query_tables); + bzero(&ddl_log_state_create, sizeof(ddl_log_state_create)); + bzero(&ddl_log_state_rm, sizeof(ddl_log_state_rm)); + /* Copy temporarily the statement flags to thd for lock_table_names() */ - uint save_thd_create_info_options= thd->lex->create_info.options; + save_thd_create_info_options= thd->lex->create_info.options; thd->lex->create_info.options|= create_info->options; /* Open or obtain an exclusive metadata lock on table being created */ @@ -5613,7 +4692,8 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, /* We can abort create table for any table type */ thd->abort_on_warning= thd->is_strict_mode(); - if (mysql_create_table_no_lock(thd, &create_table->db, + if (mysql_create_table_no_lock(thd, &ddl_log_state_create, &ddl_log_state_rm, + &create_table->db, &create_table->table_name, create_info, alter_info, &is_trans, create_table_mode, @@ -5685,10 +4765,32 @@ err: */ create_info->table->s->table_creation_was_logged= 1; } + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state_create, thd->binlog_xid); + if (ddl_log_state_rm.is_active()) + ddl_log_update_xid(&ddl_log_state_rm, thd->binlog_xid); + debug_crash_here("ddl_log_create_before_binlog"); if (unlikely(write_bin_log(thd, result ? FALSE : TRUE, thd->query(), thd->query_length(), is_trans))) result= 1; + debug_crash_here("ddl_log_create_after_binlog"); + thd->binlog_xid= 0; + + if (!create_info->tmp_table()) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + ddl_log.org_partitioned= (create_info->db_type == partition_hton); + ddl_log.org_storage_engine_name= create_info->new_storage_engine_name; + ddl_log.org_database= create_table->db; + ddl_log.org_table= create_table->table_name; + ddl_log.org_table_id= create_info->tabledef_version; + backup_log_ddl(&ddl_log); + } } + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); DBUG_RETURN(result); } @@ -5732,7 +4834,7 @@ make_unique_key_name(THD *thd, const char *field_name,KEY *start,KEY *end) char buff[MAX_FIELD_NAME],*buff_end; if (!check_if_keyname_exists(field_name,start,end) && - my_strcasecmp(system_charset_info,field_name,primary_key_name)) + my_strcasecmp(system_charset_info,field_name,primary_key_name.str)) return (char*) field_name; // Use fieldname buff_end=strmake(buff,field_name, sizeof(buff)-4); @@ -5836,6 +4938,7 @@ bool operator!=(const MYSQL_TIME &lhs, const MYSQL_TIME &rhs) @param old_name The old table name. @param new_db The new database name. @param new_name The new table name. + @param id Table version id (for ddl log) @param flags flags FN_FROM_IS_TMP old_name is temporary. FN_TO_IS_TMP new_name is temporary. @@ -5843,7 +4946,6 @@ bool operator!=(const MYSQL_TIME &lhs, const MYSQL_TIME &rhs) but only the table in the storage engine. NO_HA_TABLE Don't rename table in engine. NO_FK_CHECKS Don't check FK constraints during rename. - @return false OK @return true Error */ @@ -5851,17 +4953,16 @@ bool operator!=(const MYSQL_TIME &lhs, const MYSQL_TIME &rhs) bool mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, const LEX_CSTRING *old_name, const LEX_CSTRING *new_db, - const LEX_CSTRING *new_name, uint flags) + const LEX_CSTRING *new_name, LEX_CUSTRING *id, uint flags) { THD *thd= current_thd; - char from[FN_REFLEN + 1], to[FN_REFLEN + 1], - lc_from[FN_REFLEN + 1], lc_to[FN_REFLEN + 1]; + char from[FN_REFLEN], to[FN_REFLEN], lc_from[FN_REFLEN], lc_to[FN_REFLEN]; char *from_base= from, *to_base= to; - char tmp_name[SAFE_NAME_LEN+1], tmp_db_name[SAFE_NAME_LEN+1]; handler *file; int error=0; ulonglong save_bits= thd->variables.option_bits; int length; + bool log_query= 0; DBUG_ENTER("mysql_rename_table"); DBUG_ASSERT(base); DBUG_PRINT("enter", ("old: '%s'.'%s' new: '%s'.'%s'", @@ -5885,38 +4986,22 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, DBUG_RETURN(TRUE); } - /* - If lower_case_table_names == 2 (case-preserving but case-insensitive - file system) and the storage is not HA_FILE_BASED, we need to provide - a lowercase file name, but we leave the .frm in mixed case. - */ - if (lower_case_table_names == 2 && file && - !(file->ha_table_flags() & HA_FILE_BASED)) + if (file && file->needs_lower_case_filenames()) { - strmov(tmp_name, old_name->str); - my_casedn_str(files_charset_info, tmp_name); - strmov(tmp_db_name, old_db->str); - my_casedn_str(files_charset_info, tmp_db_name); - - build_table_filename(lc_from, sizeof(lc_from) - 1, tmp_db_name, tmp_name, - "", flags & FN_FROM_IS_TMP); + build_lower_case_table_filename(lc_from, sizeof(lc_from) -1, + old_db, old_name, flags & FN_FROM_IS_TMP); + build_lower_case_table_filename(lc_to, sizeof(lc_from) -1, + new_db, new_name, flags & FN_TO_IS_TMP); from_base= lc_from; - - strmov(tmp_name, new_name->str); - my_casedn_str(files_charset_info, tmp_name); - strmov(tmp_db_name, new_db->str); - my_casedn_str(files_charset_info, tmp_db_name); - - build_table_filename(lc_to, sizeof(lc_to) - 1, tmp_db_name, tmp_name, "", - flags & FN_TO_IS_TMP); - to_base= lc_to; + to_base= lc_to; } if (flags & NO_HA_TABLE) { if (rename_file_ext(from,to,reg_ext)) error= my_errno; - if (!(flags & NO_PAR_TABLE)) + log_query= true; + if (file && !(flags & NO_PAR_TABLE)) (void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG); } else if (!file || likely(!(error=file->ha_rename_table(from_base, to_base)))) @@ -5932,6 +5017,25 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, file->ha_rename_table(to_base, from_base); // Restore old file name } } + else + log_query= true; + } + if (!error && log_query && !(flags & (FN_TO_IS_TMP | FN_FROM_IS_TMP))) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("RENAME") }; + ddl_log.org_partitioned= file->partition_engine(); + ddl_log.new_partitioned= ddl_log.org_partitioned; + lex_string_set(&ddl_log.org_storage_engine_name, file->real_table_type()); + ddl_log.org_database= *old_db; + ddl_log.org_table= *old_name; + ddl_log.org_table_id= *id; + ddl_log.new_storage_engine_name= ddl_log.org_storage_engine_name; + ddl_log.new_database= *new_db; + ddl_log.new_table= *new_name; + ddl_log.new_table_id= *id; + backup_log_ddl(&ddl_log); } delete file; @@ -5984,15 +5088,20 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, Table_specification_st local_create_info; TABLE_LIST *pos_in_locked_tables= 0; Alter_info local_alter_info; - Alter_table_ctx local_alter_ctx; // Not used + Alter_table_ctx local_alter_ctx; // Not used + DDL_LOG_STATE ddl_log_state_create, ddl_log_state_rm; int res= 1; bool is_trans= FALSE; bool do_logging= FALSE; bool force_generated_create= false; + bool src_table_exists= FALSE; uint not_used; int create_res; DBUG_ENTER("mysql_create_like_table"); + bzero(&ddl_log_state_create, sizeof(ddl_log_state_create)); + bzero(&ddl_log_state_rm, sizeof(ddl_log_state_rm)); + #ifdef WITH_WSREP if (WSREP(thd) && !thd->wsrep_applier && wsrep_create_like_table(thd, table, src_table, create_info)) @@ -6019,6 +5128,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, { /* is_error() may be 0 if table existed and we generated a warning */ res= thd->is_error(); + src_table_exists= !res; goto err; } /* Ensure we don't try to create something from which we select from */ @@ -6094,7 +5204,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, pos_in_locked_tables= local_create_info.table->pos_in_locked_tables; res= ((create_res= - mysql_create_table_no_lock(thd, &table->db, &table->table_name, + mysql_create_table_no_lock(thd, + &ddl_log_state_create, &ddl_log_state_rm, + &table->db, &table->table_name, &local_create_info, &local_alter_info, &is_trans, C_ORDINARY_CREATE, table)) > 0); @@ -6134,14 +5246,10 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, { /* Ensure that we have an exclusive lock on target table if we are creating - non-temporary table. - If we're creating non-temporary table, then either - - there is an exclusive lock on the table - or - - there was CREATE IF EXIST, and the table was not created - (it existed), and was previously locked + non-temporary table. We don't have or need the lock if the create failed + because of existing table when using "if exists". */ - DBUG_ASSERT((create_info->tmp_table()) || + DBUG_ASSERT((create_info->tmp_table()) || create_res < 0 || thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db.str, table->table_name.str, MDL_EXCLUSIVE) || @@ -6265,7 +5373,6 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, if (write_bin_log(thd, TRUE, query.ptr(), query.length())) { res= 1; - do_logging= 0; goto err; } @@ -6311,22 +5418,48 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, err: if (do_logging) { + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state_create, thd->binlog_xid); + if (ddl_log_state_rm.is_active()) + ddl_log_update_xid(&ddl_log_state_rm, thd->binlog_xid); + debug_crash_here("ddl_log_create_before_binlog"); if (res && create_info->table_was_deleted) { /* Table was not deleted. Original table was deleted. We have to log it. */ - log_drop_table(thd, &table->db, &table->table_name, create_info->tmp_table()); + DBUG_ASSERT(ddl_log_state_rm.is_active()); + log_drop_table(thd, &table->db, &table->table_name, + &create_info->org_storage_engine_name, + create_info->db_type == partition_hton, + &create_info->org_tabledef_version, + create_info->tmp_table()); } else if (res != 2) // Table was not dropped { if (write_bin_log(thd, res ? FALSE : TRUE, thd->query(), thd->query_length(), is_trans)) - res= 1; + res= 1; } + debug_crash_here("ddl_log_create_after_binlog"); + thd->binlog_xid= 0; + } + + if (!res && !src_table_exists && !create_info->tmp_table()) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + ddl_log.org_storage_engine_name= local_create_info.new_storage_engine_name; + ddl_log.org_database= table->db; + ddl_log.org_table= table->table_name; + ddl_log.org_table_id= local_create_info.tabledef_version; + backup_log_ddl(&ddl_log); } + ddl_log_complete(&ddl_log_state_rm); + ddl_log_complete(&ddl_log_state_create); DBUG_RETURN(res != 0); } @@ -6742,7 +5875,33 @@ drop_create_field: rename_key_it.remove(); } } - + /* Handle ALTER KEY IF EXISTS. */ + { + List_iterator<Alter_index_ignorability> ignor_it(alter_info->alter_index_ignorability_list); + Alter_index_ignorability *aii; + while ((aii= ignor_it++)) + { + if (!aii->if_exists()) + continue; + bool exists= false; + for (uint n_key= 0; n_key < table->s->keys; n_key++) + { + if (my_strcasecmp(system_charset_info, aii->name(), + table->key_info[n_key].name.str) == 0) + { + exists= true; + break; + } + } + if (exists) + continue; + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_KEY_DOES_NOT_EXISTS, + ER_THD(thd, ER_KEY_DOES_NOT_EXISTS), + aii->name(), table->s->table_name.str); + ignor_it.remove(); + } + } /* ALTER TABLE ADD KEY IF NOT EXISTS */ /* ALTER TABLE ADD FOREIGN KEY IF NOT EXISTS */ { @@ -6760,7 +5919,7 @@ drop_create_field: key->type == Key::PRIMARY && table->s->primary_key != MAX_KEY && (keyname= table->s->key_info[table->s->primary_key].name.str) && - my_strcasecmp(system_charset_info, keyname, primary_key_name) == 0; + my_strcasecmp(system_charset_info, keyname, primary_key_name.str) == 0; if (dup_primary_key) goto remove_key; @@ -6769,7 +5928,7 @@ drop_create_field: if ((keyname= key->name.str) == NULL) { if (key->type == Key::PRIMARY) - keyname= primary_key_name; + keyname= primary_key_name.str; else { List_iterator<Key_part_spec> part_it(key->columns); @@ -7113,6 +6272,33 @@ Compare_keys compare_keys_but_name(const KEY *table_key, const KEY *new_key, return result; } + +/** + Look-up KEY object by index name using case-insensitive comparison. + + @param key_name Index name. + @param key_start Start of array of KEYs for table. + @param key_end End of array of KEYs for table. + + @note Case-insensitive comparison is necessary to correctly + handle renaming of keys. + + @retval non-NULL - pointer to KEY object for index found. + @retval NULL - no index with such name found (or it is marked + as renamed). +*/ + +static KEY *find_key_ci(const char *key_name, KEY *key_start, KEY *key_end) +{ + for (KEY *key = key_start; key < key_end; key++) + { + if (!my_strcasecmp(system_charset_info, key_name, key->name.str)) + return key; + } + return NULL; +} + + /** Compare original and new versions of a table and fill Alter_inplace_info describing differences between those versions. @@ -7172,7 +6358,10 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, ! (ha_alter_info->index_add_buffer= (uint*) thd->alloc(sizeof(uint) * alter_info->key_list.elements)) || - ha_alter_info->rename_keys.reserve(ha_alter_info->index_add_count)) + ha_alter_info->rename_keys.reserve(ha_alter_info->index_add_count) || + ! (ha_alter_info->index_altered_ignorability_buffer= + (KEY_PAIR*)thd->alloc(sizeof(KEY_PAIR) * + alter_info->alter_index_ignorability_list.elements))) DBUG_RETURN(true); /* @@ -7454,7 +6643,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, const KEY* const new_pk= (ha_alter_info->key_count > 0 && (!my_strcasecmp(system_charset_info, ha_alter_info->key_info_buffer->name.str, - primary_key_name) || + primary_key_name.str) || is_candidate_key(ha_alter_info->key_info_buffer))) ? ha_alter_info->key_info_buffer : NULL; const KEY *const old_pk= table->s->primary_key == MAX_KEY ? NULL : @@ -7577,6 +6766,29 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, } } + List_iterator<Alter_index_ignorability> + ignorability_index_it(alter_info->alter_index_ignorability_list); + Alter_index_ignorability *alter_index_ignorability; + while((alter_index_ignorability= ignorability_index_it++)) + { + const char *name= alter_index_ignorability->name(); + + KEY *old_key, *new_key; + old_key= find_key_ci(name, table->key_info, table_key_end); + new_key= find_key_ci(name, ha_alter_info->key_info_buffer, new_key_end); + + DBUG_ASSERT(old_key != NULL); + + if (new_key == NULL) + { + my_error(ER_KEY_DOES_NOT_EXISTS, MYF(0), name, table->s->table_name.str); + DBUG_RETURN(true); + } + new_key->is_ignored= alter_index_ignorability->is_ignored(); + ha_alter_info->handler_flags|= ALTER_RENAME_INDEX; + ha_alter_info->add_altered_index_ignorability(old_key, new_key); + } + /* Sort index_add_buffer according to how key_info_buffer is sorted. I.e. with primary keys first - see sort_keys(). @@ -7966,6 +7178,41 @@ static bool is_inplace_alter_impossible(TABLE *table, } +/* + Notify engine that table definition has changed as part of inplace alter + table +*/ + +static bool notify_tabledef_changed(TABLE_LIST *table_list) +{ + TABLE *table= table_list->table; + DBUG_ENTER("notify_tabledef_changed"); + + if (table->file->partition_ht()->notify_tabledef_changed) + { + char db_buff[FN_REFLEN], table_buff[FN_REFLEN]; + handlerton *hton= table->file->ht; + LEX_CSTRING tmp_db, tmp_table; + + tmp_db.str= db_buff; + tmp_table.str= table_buff; + tmp_db.length= tablename_to_filename(table_list->db.str, + db_buff, sizeof(db_buff)); + tmp_table.length= tablename_to_filename(table_list->table_name.str, + table_buff, sizeof(table_buff)); + if ((hton->notify_tabledef_changed)(hton, &tmp_db, &tmp_table, + table->s->frm_image, + &table->s->tabledef_version, + table->file)) + { + my_error(HA_ERR_INCOMPATIBLE_DEFINITION, MYF(0)); + DBUG_RETURN(true); + } + } + DBUG_RETURN(0); +} + + /** Perform in-place alter table. @@ -8001,17 +7248,17 @@ static bool mysql_inplace_alter_table(THD *thd, TABLE *altered_table, Alter_inplace_info *ha_alter_info, MDL_request *target_mdl_request, + DDL_LOG_STATE *ddl_log_state, + TRIGGER_RENAME_PARAM *trigger_param, Alter_table_ctx *alter_ctx) { Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN | MYSQL_OPEN_IGNORE_KILLED); handlerton *db_type= table->s->db_type(); Alter_info *alter_info= ha_alter_info->alter_info; bool reopen_tables= false; - bool res; - + bool res, commit_succeded_with_error= 0; const enum_alter_inplace_result inplace_supported= ha_alter_info->inplace_supported; - DBUG_ENTER("mysql_inplace_alter_table"); /* Downgrade DDL lock while we are waiting for exclusive lock below */ @@ -8122,10 +7369,27 @@ static bool mysql_inplace_alter_table(THD *thd, break; } + ddl_log_update_phase(ddl_log_state, DDL_ALTER_TABLE_PHASE_PREPARE_INPLACE); + if (table->file->ha_prepare_inplace_alter_table(altered_table, ha_alter_info)) goto rollback; + debug_crash_here("ddl_log_alter_after_prepare_inplace"); + + /* + Store the new table_version() as it may have not been available before + in some engines, like InnoDB. + */ + ddl_log_update_unique_id(ddl_log_state, + table->file->table_version()); + /* + Mark that we have started inplace alter table. DDL recover will + have to decide if it should use the old or new version of the table, based + on if the new version did commit or not. + */ + ddl_log_update_phase(ddl_log_state, DDL_ALTER_TABLE_PHASE_INPLACE); + /* Downgrade the lock if storage engine has told us that exclusive lock was necessary only for prepare phase (unless we are not under LOCK TABLES) and @@ -8172,12 +7436,17 @@ static bool mysql_inplace_alter_table(THD *thd, if (backup_reset_alter_copy_lock(thd)) goto rollback; + /* Crashing here should cause the original table to be used */ + debug_crash_here("ddl_log_alter_after_copy"); /* If we are killed after this point, we should ignore and continue. We have mostly completed the operation at this point, there should be no long waits left. */ + DEBUG_SYNC(thd, "alter_table_inplace_before_commit"); + THD_STAGE_INFO(thd, stage_alter_inplace_commit); + DBUG_EXECUTE_IF("alter_table_rollback_new_index", { table->file->ha_commit_inplace_alter_table(altered_table, ha_alter_info, @@ -8186,8 +7455,15 @@ static bool mysql_inplace_alter_table(THD *thd, goto cleanup; }); - DEBUG_SYNC(thd, "alter_table_inplace_before_commit"); - THD_STAGE_INFO(thd, stage_alter_inplace_commit); + /* + Notify the engine that the table definition has changed so that it can + store the new ID as part of the commit + */ + + if (!(table->file->partition_ht()->flags & + HTON_REQUIRES_NOTIFY_TABLEDEF_CHANGED_AFTER_COMMIT) && + notify_tabledef_changed(table_list)) + goto rollback; { TR_table trt(thd, true); @@ -8203,43 +7479,37 @@ static bool mysql_inplace_alter_table(THD *thd, goto rollback; } if (trt.update(trx_start_id, trx_end_id)) - { goto rollback; - } } } if (table->file->ha_commit_inplace_alter_table(altered_table, ha_alter_info, true)) - { goto rollback; - } DEBUG_SYNC(thd, "alter_table_inplace_after_commit"); } - /* Notify the engine that the table definition has changed */ + /* + We are new ready to use the new table. Update the state in the + ddl log so that we recovery know that the new table is ready and + in case of crash it should use the new one and log the query + to the binary log. + */ + ddl_log_update_phase(ddl_log_state, DDL_ALTER_TABLE_PHASE_INPLACE_COPIED); + debug_crash_here("ddl_log_alter_after_log"); - if (table->file->partition_ht()->notify_tabledef_changed) + if ((table->file->partition_ht()->flags & + HTON_REQUIRES_NOTIFY_TABLEDEF_CHANGED_AFTER_COMMIT) && + notify_tabledef_changed(table_list)) { - char db_buff[FN_REFLEN], table_buff[FN_REFLEN]; - handlerton *hton= table->file->ht; - LEX_CSTRING tmp_db, tmp_table; - - tmp_db.str= db_buff; - tmp_table.str= table_buff; - tmp_db.length= tablename_to_filename(table_list->db.str, - db_buff, sizeof(db_buff)); - tmp_table.length= tablename_to_filename(table_list->table_name.str, - table_buff, sizeof(table_buff)); - if ((hton->notify_tabledef_changed)(hton, &tmp_db, &tmp_table, - table->s->frm_image, - &table->s->tabledef_version, - table->file)) - { - my_error(HA_ERR_INCOMPATIBLE_DEFINITION, MYF(0)); - DBUG_RETURN(true); - } + /* + The above should never fail. If it failed, the new structure is + commited and we have no way to roll back. + The best we can do is to continue, but send an error to the + user that something when wrong + */ + commit_succeded_with_error= 1; } close_all_tables_for_name(thd, table->s, @@ -8252,27 +7522,31 @@ static bool mysql_inplace_alter_table(THD *thd, /* Replace the old .FRM with the new .FRM, but keep the old name for now. Rename to the new name (if needed) will be handled separately below. - + */ + /* TODO: remove this check of thd->is_error() (now it intercept errors in some val_*() methods and bring some single place to such error interception). */ if (mysql_rename_table(db_type, &alter_ctx->new_db, &alter_ctx->tmp_name, &alter_ctx->db, &alter_ctx->alias, + &alter_ctx->tmp_id, FN_FROM_IS_TMP | NO_HA_TABLE) || thd->is_error()) { // Since changes were done in-place, we can't revert them. DBUG_RETURN(true); } + debug_crash_here("ddl_log_alter_after_rename_frm"); - // Rename altered table if requested. + // Rename altered table in case of ALTER TABLE ... RENAME if (alter_ctx->is_table_renamed()) { DBUG_ASSERT(!tdc_share_is_cached(thd, alter_ctx->db.str, alter_ctx->table_name.str)); if (mysql_rename_table(db_type, &alter_ctx->db, &alter_ctx->table_name, - &alter_ctx->new_db, &alter_ctx->new_alias, 0)) + &alter_ctx->new_db, &alter_ctx->new_alias, + &alter_ctx->tmp_id, 0)) { /* If the rename fails we will still have a working table @@ -8280,7 +7554,8 @@ static bool mysql_inplace_alter_table(THD *thd, */ DBUG_RETURN(true); } - if (Table_triggers_list::change_table_name(thd, + debug_crash_here("ddl_log_alter_before_rename_triggers"); + if (Table_triggers_list::change_table_name(thd, trigger_param, &alter_ctx->db, &alter_ctx->alias, &alter_ctx->table_name, @@ -8293,14 +7568,18 @@ static bool mysql_inplace_alter_table(THD *thd, */ (void) mysql_rename_table(db_type, &alter_ctx->new_db, &alter_ctx->new_alias, - &alter_ctx->db, &alter_ctx->alias, NO_FK_CHECKS); + &alter_ctx->db, &alter_ctx->alias, + &alter_ctx->id, + NO_FK_CHECKS); + ddl_log_disable_entry(ddl_log_state); DBUG_RETURN(true); } rename_table_in_stat_tables(thd, &alter_ctx->db, &alter_ctx->alias, &alter_ctx->new_db, &alter_ctx->new_alias); + debug_crash_here("ddl_log_alter_after_rename_triggers"); } - DBUG_RETURN(false); + DBUG_RETURN(commit_succeded_with_error); rollback: table->file->ha_commit_inplace_alter_table(altered_table, @@ -8317,7 +7596,6 @@ static bool mysql_inplace_alter_table(THD *thd, NULL); if (thd->locked_tables_list.reopen_tables(thd, false)) thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); - /* QQ; do something about metadata locks ? */ } DBUG_RETURN(true); } @@ -8431,6 +7709,17 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, /* New key definitions are added here */ List<Key> new_key_list; List<Alter_rename_key> rename_key_list(alter_info->alter_rename_key_list); + + /* + Create a deep copy of the list of visibility for indexes, as it will be + altered here. + */ + List<Alter_index_ignorability> + alter_index_ignorability_list(alter_info->alter_index_ignorability_list, + thd->mem_root); + + list_copy_and_replace_each_value(alter_index_ignorability_list, thd->mem_root); + List_iterator<Alter_drop> drop_it(alter_info->drop_list); List_iterator<Create_field> def_it(alter_info->create_list); List_iterator<Alter_column> alter_it(alter_info->alter_list); @@ -8921,7 +8210,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, const bool primary_key= table->s->primary_key == i; const bool explicit_pk= primary_key && !my_strcasecmp(system_charset_info, key_name, - primary_key_name); + primary_key_name.str); const bool implicit_pk= primary_key && !explicit_pk; Alter_drop *drop; @@ -8953,6 +8242,18 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, continue; } + List_iterator<Alter_index_ignorability> + ignorability_index_it(alter_index_ignorability_list); + + Alter_index_ignorability *index_ignorability; + while((index_ignorability= ignorability_index_it++)) + { + const char* name= index_ignorability->name(); + if (!my_strcasecmp(system_charset_info, key_name, name)) + ignorability_index_it.remove(); + } + + /* If this index is to stay in the table check if it has to be renamed. */ List_iterator<Alter_rename_key> rename_key_it(rename_key_list); Alter_rename_key *rename_key; @@ -8961,13 +8262,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, { if (!my_strcasecmp(system_charset_info, key_name, rename_key->old_name.str)) { - if (!my_strcasecmp(system_charset_info, key_name, primary_key_name)) + if (!my_strcasecmp(system_charset_info, key_name, primary_key_name.str)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), rename_key->old_name.str); goto err; } else if (!my_strcasecmp(system_charset_info, rename_key->new_name.str, - primary_key_name)) + primary_key_name.str)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), rename_key->new_name.str); goto err; @@ -9118,6 +8419,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, key_create_info.parser_name= *plugin_name(key_info->parser); if (key_info->flags & HA_USES_COMMENT) key_create_info.comment= key_info->comment; + key_create_info.is_ignored= key_info->is_ignored; /* We're refreshing an already existing index. Since the index is not @@ -9149,6 +8451,24 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, else key_type= Key::MULTIPLE; + List_iterator<Alter_index_ignorability> + ignorability_index_it(alter_info->alter_index_ignorability_list); + Alter_index_ignorability *index_ignorability; + while((index_ignorability= ignorability_index_it++)) + { + const char *name= index_ignorability->name(); + if (!my_strcasecmp(system_charset_info, key_name, name)) + { + if (table->s->primary_key <= MAX_KEY && + table->key_info + table->s->primary_key == key_info) + { + my_error(ER_PK_INDEX_CANT_BE_IGNORED, MYF(0)); + goto err; + } + key_create_info.is_ignored= index_ignorability->is_ignored(); + } + } + tmp_name.str= key_name; tmp_name.length= strlen(key_name); /* We dont need LONG_UNIQUE_HASH_FIELD flag because it will be autogenerated */ @@ -9174,7 +8494,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, goto err; new_key_list.push_back(key, thd->mem_root); if (key->name.str && - !my_strcasecmp(system_charset_info, key->name.str, primary_key_name)) + !my_strcasecmp(system_charset_info, key->name.str, + primary_key_name.str)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name.str); goto err; @@ -9343,6 +8664,14 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, goto err; } + if (alter_index_ignorability_list.elements) + { + my_error(ER_KEY_DOES_NOT_EXISTS, MYF(0), + alter_index_ignorability_list.head()->name(), + table->s->table_name.str); + goto err; + } + if (!create_info->comment.str) { create_info->comment.str= table->s->comment.str; @@ -9821,20 +9150,47 @@ simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list, @return Operation status @retval false Success @retval true Failure + + @notes + Normally with ALTER TABLE we roll forward as soon as data is copied + or new table is committed. For an ALTER TABLE that only does a RENAME, + we will roll back unless the RENAME fully completes. + If we crash while using enable/disable keys, this may have completed + and will not be rolled back. */ static bool simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, Alter_info::enum_enable_or_disable keys_onoff, + TRIGGER_RENAME_PARAM *trigger_param, Alter_table_ctx *alter_ctx) { TABLE *table= table_list->table; MDL_ticket *mdl_ticket= table->mdl_ticket; + DDL_LOG_STATE ddl_log_state; + LEX_CSTRING storage_engine; + LEX_CUSTRING table_version; + uchar table_version_buff[MY_UUID_SIZE]; + char storage_engine_buff[NAME_LEN]; int error= 0; + bool partitioned; enum ha_extra_function extra_func= thd->locked_tables_mode ? HA_EXTRA_NOT_USED : HA_EXTRA_FORCE_REOPEN; DBUG_ENTER("simple_rename_or_index_change"); + bzero(&ddl_log_state, sizeof(ddl_log_state)); + + table_version.str= table_version_buff; + storage_engine.str= storage_engine_buff; + if ((table_version.length= table->s->tabledef_version.length)) + memcpy((char*) table_version.str, table->s->tabledef_version.str, + table_version.length); + partitioned= table->file->partition_engine(); + storage_engine.length= (strmake((char*) storage_engine.str, + table->file->real_table_type(), + sizeof(storage_engine_buff)-1) - + storage_engine.str); + if (keys_onoff != Alter_info::LEAVE_AS_IS) { @@ -9849,12 +9205,25 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, error= alter_table_manage_keys(table, table->file->indexes_are_disabled(), keys_onoff); + if (table->s->tmp_table == NO_TMP_TABLE) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CHANGE_INDEX") }; + ddl_log.org_storage_engine_name= storage_engine; + ddl_log.org_partitioned= partitioned; + ddl_log.org_database= table_list->table->s->db; + ddl_log.org_table= table_list->table->s->table_name; + ddl_log.org_table_id= table_version; + backup_log_ddl(&ddl_log); + } } if (likely(!error) && alter_ctx->is_table_renamed()) { THD_STAGE_INFO(thd, stage_rename); handlerton *old_db_type= table->s->db_type(); + /* Then do a 'simple' rename of the table. First we need to close all instances of 'source' table. @@ -9868,37 +9237,54 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME, NULL); + (void) ddl_log_rename_table(thd, &ddl_log_state, old_db_type, + &alter_ctx->db, &alter_ctx->table_name, + &alter_ctx->new_db, &alter_ctx->new_alias); if (mysql_rename_table(old_db_type, &alter_ctx->db, &alter_ctx->table_name, - &alter_ctx->new_db, &alter_ctx->new_alias, 0)) + &alter_ctx->new_db, &alter_ctx->new_alias, + &table_version, 0)) error= -1; - else if (Table_triggers_list::change_table_name(thd, - &alter_ctx->db, - &alter_ctx->alias, - &alter_ctx->table_name, - &alter_ctx->new_db, - &alter_ctx->new_alias)) + if (!error) + ddl_log_update_phase(&ddl_log_state, DDL_RENAME_PHASE_TRIGGER); + debug_crash_here("ddl_log_alter_before_rename_triggers"); + if (!error && + Table_triggers_list::change_table_name(thd, trigger_param, + &alter_ctx->db, + &alter_ctx->alias, + &alter_ctx->table_name, + &alter_ctx->new_db, + &alter_ctx->new_alias)) { (void) mysql_rename_table(old_db_type, &alter_ctx->new_db, &alter_ctx->new_alias, &alter_ctx->db, &alter_ctx->table_name, + &table_version, NO_FK_CHECKS); + ddl_log_disable_entry(&ddl_log_state); error= -1; } - /* Update stat tables last. This is to be able to handle rename of a stat table */ + /* + Update stat tables last. This is to be able to handle rename of + a stat table. + */ if (error == 0) (void) rename_table_in_stat_tables(thd, &alter_ctx->db, &alter_ctx->table_name, &alter_ctx->new_db, &alter_ctx->new_alias); + debug_crash_here("ddl_log_alter_after_rename_triggers"); } if (likely(!error)) { + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); - + thd->binlog_xid= 0; if (likely(!error)) my_ok(thd); } + ddl_log_complete(&ddl_log_state); table_list->table= NULL; // For query cache query_cache_invalidate3(thd, table_list, 0); @@ -10024,9 +9410,12 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, uint order_num, ORDER *order, bool ignore, bool if_exists) { - bool engine_changed, error; + bool engine_changed, error, frm_is_created= false, error_handler_pushed= false; bool no_ha_table= true; /* We have not created table in storage engine yet */ TABLE *table, *new_table= nullptr; + DDL_LOG_STATE ddl_log_state; + Turn_errors_to_warnings_handler errors_to_warnings; + #ifdef WITH_PARTITION_STORAGE_ENGINE bool partition_changed= false; bool fast_alter_partition= false; @@ -10052,10 +9441,21 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, handlerton *new_db_type= create_info->db_type, *old_db_type; ha_rows copied=0, deleted=0; LEX_CUSTRING frm= {0,0}; - char index_file[FN_REFLEN], data_file[FN_REFLEN]; + LEX_CSTRING backup_name; + char index_file[FN_REFLEN], data_file[FN_REFLEN], backup_name_buff[60]; + uchar uuid_buffer[MY_UUID_SIZE]; MDL_request target_mdl_request; MDL_ticket *mdl_ticket= 0; Alter_table_prelocking_strategy alter_prelocking_strategy; + TRIGGER_RENAME_PARAM trigger_param; + + /* + Callback function that an engine can request to be called after executing + inplace alter table. + */ + Alter_inplace_info::inplace_alter_table_commit_callback + *inplace_alter_table_committed= 0; + void *inplace_alter_table_committed_argument= 0; DBUG_ENTER("mysql_alter_table"); /* @@ -10100,6 +9500,13 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, } THD_STAGE_INFO(thd, stage_init_update); + bzero(&ddl_log_state, sizeof(ddl_log_state)); + + /* Temporary name for backup of original table */ + backup_name.str= backup_name_buff; + backup_name.length= my_snprintf(backup_name_buff, sizeof(backup_name_buff)-1, + "%s-backup-%lx-%llx", tmp_file_prefix, + current_pid, thd->thread_id); /* Check if the new table type is a shared table */ if (ha_check_if_updates_are_ignored(thd, create_info->db_type, "ALTER")) @@ -10157,7 +9564,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, (thd->lex->sql_command == SQLCOM_ALTER_TABLE || thd->lex->sql_command == SQLCOM_CREATE_INDEX || thd->lex->sql_command == SQLCOM_DROP_INDEX) && - !wsrep_should_replicate_ddl(thd, table_list->table->s->db_type()->db_type)) + !wsrep_should_replicate_ddl(thd, table_list->table->s->db_type())) DBUG_RETURN(true); #endif @@ -10483,6 +9890,16 @@ do_continue:; create_info)) DBUG_RETURN(true); + /* Check if rename of triggers are supported */ + if (alter_ctx.is_table_renamed() && + Table_triggers_list::prepare_for_rename(thd, &trigger_param, + &alter_ctx.db, + &alter_ctx.alias, + &alter_ctx.table_name, + &alter_ctx.new_db, + &alter_ctx.new_alias)) + DBUG_RETURN(true); + /* Look if we have to do anything at all. ALTER can become NOOP after handling @@ -10528,6 +9945,7 @@ do_continue:; } res= simple_rename_or_index_change(thd, table_list, alter_info->keys_onoff, + &trigger_param, &alter_ctx); } else @@ -10728,6 +10146,37 @@ do_continue:; DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock"); + /* Create a new table version id for the new table */ + my_uuid(uuid_buffer); + create_info->tabledef_version.str= uuid_buffer; + create_info->tabledef_version.length= MY_UUID_SIZE; + + if (!table->s->tmp_table) + { + LEX_CSTRING path_to_frm= alter_ctx.get_tmp_cstring_path(); + LEX_CSTRING tmp_table= backup_name; + if (alter_ctx.is_table_renamed()) + tmp_table= alter_ctx.new_alias; + + if (ddl_log_alter_table(thd, &ddl_log_state, + old_db_type, + &alter_ctx.db, &alter_ctx.table_name, + new_db_type, + table->file->partition_ht(), + &alter_ctx.new_db, &alter_ctx.tmp_name, + &path_to_frm, + &tmp_table, + &create_info->tabledef_version, + table->file->table_version(), + alter_ctx.is_table_renamed()) || + ddl_log_store_query(thd, &ddl_log_state, + thd->query(), thd->query_length())) + { + error= 1; + goto err_cleanup; + } + } + tmp_disable_binlog(thd); create_info->options|=HA_CREATE_TMP_ALTER; if (!(alter_info->flags & ALTER_ADD_INDEX) && !alter_ctx.modified_primary_key) @@ -10736,20 +10185,33 @@ do_continue:; alter_info->flags|= ALTER_INDEX_ORDER; create_info->alias= alter_ctx.table_name; /* + Create the .frm file for the new table. Storage engine table will not be + created at this stage. + + No ddl logging needed as ddl_log_alter_query will take care of failed + table creations. + Partitioning: part_info is passed via thd->work_part_info */ - error= create_table_impl(thd, alter_ctx.db, alter_ctx.table_name, + error= create_table_impl(thd, nullptr, nullptr, + alter_ctx.db, alter_ctx.table_name, alter_ctx.new_db, alter_ctx.tmp_name, - alter_ctx.get_tmp_path(), + alter_ctx.get_tmp_cstring_path(), thd->lex->create_info, create_info, alter_info, C_ALTER_TABLE_FRM_ONLY, NULL, &key_info, &key_count, &frm); reenable_binlog(thd); + + debug_crash_here("ddl_log_alter_after_create_frm"); + if (unlikely(error)) - { - my_free(const_cast<uchar*>(frm.str)); - DBUG_RETURN(true); - } + goto err_cleanup; + + /* Remember version id for temporary table */ + alter_ctx.tmp_id= create_info->tabledef_version; + + /* Remember that we have not created table in storage engine yet. */ + no_ha_table= true; if (alter_info->algorithm(thd) != Alter_info::ALTER_TABLE_ALGORITHM_COPY) { @@ -10765,6 +10227,14 @@ do_continue:; if (fill_alter_inplace_info(thd, table, varchar, &ha_alter_info)) goto err_new_table_cleanup; + alter_ctx.tmp_storage_engine_name_partitioned= + table->file->partition_engine(); + alter_ctx.tmp_storage_engine_name.length= + (strmake((char*) alter_ctx.tmp_storage_engine_name.str, + table->file->real_table_type(), + sizeof(alter_ctx.tmp_storage_engine_buff)-1) - + alter_ctx.tmp_storage_engine_name.str); + /* We can ignore ALTER_COLUMN_ORDER and instead check ALTER_STORED_COLUMN_ORDER & ALTER_VIRTUAL_COLUMN_ORDER. This @@ -10794,7 +10264,6 @@ do_continue:; */ table->file->ha_create_partitioning_metadata(alter_ctx.get_tmp_path(), NULL, CHF_DELETE_FLAG); - my_free(const_cast<uchar*>(frm.str)); goto end_inplace; } @@ -10804,6 +10273,11 @@ do_continue:; if (create_table_for_inplace_alter(thd, alter_ctx, &frm, &altered_share, &altered_table)) goto err_new_table_cleanup; + /* + Avoid creating frm again in ha_create_table() if inplace alter will not + be used. + */ + frm_is_created= 1; /* Set markers for fields in TABLE object for altered table. */ update_altered_table(ha_alter_info, &altered_table); @@ -10876,16 +10350,21 @@ do_continue:; Set the truncated column values of thd as warning for alter table. */ - Check_level_instant_set check_level_save(thd, CHECK_FIELD_WARN); + enum_check_fields org_count_cuted_fields= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_WARN; int res= mysql_inplace_alter_table(thd, table_list, table, &altered_table, &ha_alter_info, - &target_mdl_request, &alter_ctx); - my_free(const_cast<uchar*>(frm.str)); - + &target_mdl_request, &ddl_log_state, + &trigger_param, + &alter_ctx); + thd->count_cuted_fields= org_count_cuted_fields; + inplace_alter_table_committed= ha_alter_info.inplace_alter_table_committed; + inplace_alter_table_committed_argument= + ha_alter_info.inplace_alter_table_committed_argument; if (res) { cleanup_table_after_inplace_alter(&altered_table); - DBUG_RETURN(true); + goto err_cleanup; } cleanup_table_after_inplace_alter_keep_files(&altered_table); @@ -10937,11 +10416,15 @@ do_continue:; MYSQL_LOCK_USE_MALLOC)) goto err_new_table_cleanup; + ddl_log_update_phase(&ddl_log_state, DDL_ALTER_TABLE_PHASE_CREATED); + if (ha_create_table(thd, alter_ctx.get_tmp_path(), alter_ctx.new_db.str, alter_ctx.new_name.str, - create_info, &frm)) + create_info, &frm, frm_is_created)) goto err_new_table_cleanup; + debug_crash_here("ddl_log_alter_after_create_table"); + /* Mark that we have created table in storage engine. */ no_ha_table= false; DEBUG_SYNC(thd, "alter_table_intermediate_table_created"); @@ -11042,7 +10525,7 @@ do_continue:; if (table->s->tmp_table != NO_TMP_TABLE) { - /* Close lock if this is a transactional table */ + /* Release lock if this is a transactional temporary table */ if (thd->lock) { if (thd->locked_tables_mode != LTM_LOCK_TABLES && @@ -11063,6 +10546,7 @@ do_continue:; goto err_new_table_cleanup; } } + new_table->s->table_creation_was_logged= table->s->table_creation_was_logged; /* Remove link to old table and rename the new one */ @@ -11079,19 +10563,34 @@ do_continue:; DROP + CREATE + data statement to the binary log */ thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; - (binlog_hton->commit)(binlog_hton, thd, 1); + binlog_commit(thd, true); } /* We don't replicate alter table statement on temporary tables */ if (!thd->is_current_stmt_binlog_format_row() && table_creation_was_logged && - !binlog_as_create_select && - write_bin_log_with_if_exists(thd, true, false, log_if_exists)) - DBUG_RETURN(true); - my_free(const_cast<uchar*>(frm.str)); + !binlog_as_create_select) + { + int tmp_error; + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + tmp_error= write_bin_log_with_if_exists(thd, true, false, log_if_exists); + thd->binlog_xid= 0; + if (tmp_error) + goto err_cleanup; + } goto end_temporary; } + /* Remember storage engine name for the new table */ + alter_ctx.tmp_storage_engine_name_partitioned= + new_table->file->partition_engine(); + alter_ctx.tmp_storage_engine_name.length= + (strmake((char*) alter_ctx.tmp_storage_engine_name.str, + new_table->file->real_table_type(), + sizeof(alter_ctx.tmp_storage_engine_buff)-1) - + alter_ctx.tmp_storage_engine_name.str); + /* Check if file names for the engine are unique. If we change engine and file names are unique then we don't need to rename the original @@ -11135,6 +10634,18 @@ do_continue:; (mysql_execute_command()) to release metadata locks. */ + debug_crash_here("ddl_log_alter_after_copy"); // Use old table + /* + We are new ready to use the new table. Update the state in the + ddl log so that we recovery know that the new table is ready and + in case of crash it should use the new one and log the query + to the binary log. + */ + if (engine_changed) + ddl_log_add_flag(&ddl_log_state, DDL_LOG_FLAG_ALTER_ENGINE_CHANGED); + ddl_log_update_phase(&ddl_log_state, DDL_ALTER_TABLE_PHASE_COPIED); + debug_crash_here("ddl_log_alter_after_log"); // Use new table + THD_STAGE_INFO(thd, stage_rename_result_table); if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME)) @@ -11146,18 +10657,6 @@ do_continue:; HA_EXTRA_NOT_USED, NULL); table_list->table= table= NULL; /* Safety */ - my_free(const_cast<uchar*>(frm.str)); - - /* - Rename the old table to temporary name to have a backup in case - anything goes wrong while renaming the new table. - We only have to do this if name of the table is not changed. - If we are changing to use another table handler, we don't - have to do the rename as the table names will not interfer. - */ - char backup_name_buff[FN_LEN]; - LEX_CSTRING backup_name; - backup_name.str= backup_name_buff; DBUG_PRINT("info", ("is_table_renamed: %d engine_changed: %d", alter_ctx.is_table_renamed(), engine_changed)); @@ -11169,13 +10668,16 @@ do_continue:; */ if (!alter_ctx.is_table_renamed() || alter_ctx.fk_error_if_delete_row) { - backup_name.length= my_snprintf(backup_name_buff, sizeof(backup_name_buff), - "%s-backup-%lx-%llx", tmp_file_prefix, - current_pid, thd->thread_id); - if (lower_case_table_names) - my_casedn_str(files_charset_info, backup_name_buff); + /* + Rename the old table to temporary name to have a backup in case + anything goes wrong while renaming the new table. + + We only have to do this if name of the table is not changed. + If we are changing to use another table handler, we don't + have to do the rename as the table names will not interfer. + */ if (mysql_rename_table(old_db_type, &alter_ctx.db, &alter_ctx.table_name, - &alter_ctx.db, &backup_name, + &alter_ctx.db, &backup_name, &alter_ctx.id, FN_TO_IS_TMP | (engine_changed ? NO_HA_TABLE | NO_PAR_TABLE : 0))) { @@ -11192,13 +10694,27 @@ do_continue:; PSI_CALL_drop_table_share(0, alter_ctx.db.str, (int) alter_ctx.db.length, alter_ctx.table_name.str, (int) alter_ctx.table_name.length); } + debug_crash_here("ddl_log_alter_after_rename_to_backup"); + + if (!alter_ctx.is_table_renamed()) + { + /* + We should not set this stage in case of rename as we in this case + must execute DDL_ALTER_TABLE_PHASE_COPIED to remove the orignal table + */ + ddl_log_update_phase(&ddl_log_state, DDL_ALTER_TABLE_PHASE_OLD_RENAMED); + } + + debug_crash_here("ddl_log_alter_after_rename_to_backup_log"); // Rename the new table to the correct name. if (mysql_rename_table(new_db_type, &alter_ctx.new_db, &alter_ctx.tmp_name, &alter_ctx.new_db, &alter_ctx.new_alias, + &alter_ctx.tmp_id, FN_FROM_IS_TMP)) { // Rename failed, delete the temporary table. + ddl_log_update_phase(&ddl_log_state, DDL_ALTER_TABLE_PHASE_RENAME_FAILED); (void) quick_rm_table(thd, new_db_type, &alter_ctx.new_db, &alter_ctx.tmp_name, FN_IS_TMP); @@ -11206,18 +10722,20 @@ do_continue:; { // Restore the backup of the original table to the old name. (void) mysql_rename_table(old_db_type, &alter_ctx.db, &backup_name, - &alter_ctx.db, &alter_ctx.alias, + &alter_ctx.db, &alter_ctx.alias, &alter_ctx.id, FN_FROM_IS_TMP | NO_FK_CHECKS | (engine_changed ? NO_HA_TABLE | NO_PAR_TABLE : 0)); } goto err_with_mdl; } + debug_crash_here("ddl_log_alter_after_rename_to_original"); // Check if we renamed the table and if so update trigger files. if (alter_ctx.is_table_renamed()) { - if (Table_triggers_list::change_table_name(thd, + debug_crash_here("ddl_log_alter_before_rename_triggers"); + if (Table_triggers_list::change_table_name(thd, &trigger_param, &alter_ctx.db, &alter_ctx.alias, &alter_ctx.table_name, @@ -11229,7 +10747,7 @@ do_continue:; &alter_ctx.new_db, &alter_ctx.new_alias, 0); // Restore the backup of the original table to the old name. (void) mysql_rename_table(old_db_type, &alter_ctx.db, &backup_name, - &alter_ctx.db, &alter_ctx.alias, + &alter_ctx.db, &alter_ctx.alias, &alter_ctx.id, FN_FROM_IS_TMP | NO_FK_CHECKS | (engine_changed ? NO_HA_TABLE | NO_PAR_TABLE : 0)); @@ -11237,20 +10755,26 @@ do_continue:; } rename_table_in_stat_tables(thd, &alter_ctx.db, &alter_ctx.alias, &alter_ctx.new_db, &alter_ctx.new_alias); + debug_crash_here("ddl_log_alter_after_rename_triggers"); } // ALTER TABLE succeeded, delete the backup of the old table. - error= quick_rm_table(thd, old_db_type, &alter_ctx.db, &backup_name, - FN_IS_TMP | - (engine_changed ? NO_HA_TABLE | NO_PAR_TABLE: 0)); + // a failure to delete isn't an error, as we cannot rollback ALTER anymore + thd->push_internal_handler(&errors_to_warnings); + error_handler_pushed=1; + + quick_rm_table(thd, old_db_type, &alter_ctx.db, &backup_name, + FN_IS_TMP | (engine_changed ? NO_HA_TABLE | NO_PAR_TABLE: 0)); + + debug_crash_here("ddl_log_alter_after_delete_backup"); if (engine_changed) { /* the .frm file was removed but not the original table */ - error|= quick_rm_table(thd, old_db_type, &alter_ctx.db, - &alter_ctx.table_name, - NO_FRM_RENAME | - (engine_changed ? 0 : FN_IS_TMP)); + quick_rm_table(thd, old_db_type, &alter_ctx.db, &alter_ctx.table_name, + NO_FRM_RENAME | (engine_changed ? 0 : FN_IS_TMP)); } + + debug_crash_here("ddl_log_alter_after_drop_original_table"); if (binlog_as_create_select) { /* @@ -11258,27 +10782,23 @@ do_continue:; DROP + CREATE + data statement to the binary log */ thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; - binlog_hton->commit(binlog_hton, thd, 1); - } - - if (error) - { - /* - The fact that deletion of the backup failed is not critical - error, but still worth reporting as it might indicate serious - problem with server. - */ - goto err_with_mdl_after_alter; + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + binlog_commit(thd, true); + thd->binlog_xid= 0; } end_inplace: thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; - if (thd->locked_tables_list.reopen_tables(thd, false)) - goto err_with_mdl_after_alter; + if (!error_handler_pushed) + thd->push_internal_handler(&errors_to_warnings); - THD_STAGE_INFO(thd, stage_end); + thd->locked_tables_list.reopen_tables(thd, false); + thd->pop_internal_handler(); + + THD_STAGE_INFO(thd, stage_end); DEBUG_SYNC(thd, "alter_table_before_main_binlog"); DBUG_ASSERT(!(mysql_bin_log.is_open() && @@ -11286,9 +10806,45 @@ end_inplace: (create_info->tmp_table()))); if (!binlog_as_create_select) { - if (write_bin_log_with_if_exists(thd, true, false, log_if_exists)) - DBUG_RETURN(true); + int tmp_error; + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + tmp_error= write_bin_log_with_if_exists(thd, true, false, log_if_exists); + thd->binlog_xid= 0; + if (tmp_error) + goto err_cleanup; } + + /* + We have to close the ddl log as soon as possible, after binlogging the + query, for inplace alter table. + */ + ddl_log_complete(&ddl_log_state); + if (inplace_alter_table_committed) + { + /* Signal to storage engine that ddl log is committed */ + (*inplace_alter_table_committed)(inplace_alter_table_committed_argument); + inplace_alter_table_committed= 0; + } + + if (!alter_ctx.tmp_table) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("ALTER") }; + ddl_log.org_storage_engine_name= alter_ctx.storage_engine_name; + ddl_log.org_partitioned= alter_ctx.storage_engine_partitioned; + ddl_log.org_database= alter_ctx.db; + ddl_log.org_table= alter_ctx.table_name; + ddl_log.org_table_id= alter_ctx.id; + ddl_log.new_storage_engine_name= alter_ctx.tmp_storage_engine_name; + ddl_log.new_partitioned= alter_ctx.tmp_storage_engine_name_partitioned; + ddl_log.new_database= alter_ctx.new_db; + ddl_log.new_table= alter_ctx.new_alias; + ddl_log.new_table_id= alter_ctx.tmp_id; + backup_log_ddl(&ddl_log); + } + table_list->table= NULL; // For query cache query_cache_invalidate3(thd, table_list, false); @@ -11302,6 +10858,8 @@ end_inplace: } end_temporary: + my_free(const_cast<uchar*>(frm.str)); + thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; my_snprintf(alter_ctx.tmp_buff, sizeof(alter_ctx.tmp_buff), @@ -11316,7 +10874,6 @@ err_new_table_cleanup: DBUG_PRINT("error", ("err_new_table_cleanup")); thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; - my_free(const_cast<uchar*>(frm.str)); /* No default value was provided for a DATE/DATETIME field, the current sql_mode doesn't allow the '0000-00-00' value and @@ -11340,23 +10897,19 @@ err_new_table_cleanup: &alter_ctx.new_db, &alter_ctx.tmp_name, (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0)), alter_ctx.get_tmp_path()); - + DEBUG_SYNC(thd, "alter_table_after_temp_table_drop"); +err_cleanup: + my_free(const_cast<uchar*>(frm.str)); + ddl_log_complete(&ddl_log_state); + if (inplace_alter_table_committed) + { + /* Signal to storage engine that ddl log is committed */ + (*inplace_alter_table_committed)(inplace_alter_table_committed_argument); + } DBUG_RETURN(true); -err_with_mdl_after_alter: - DBUG_PRINT("error", ("err_with_mdl_after_alter")); - /* the table was altered. binlog the operation */ - DBUG_ASSERT(!(mysql_bin_log.is_open() && - thd->is_current_stmt_binlog_format_row() && - (create_info->tmp_table()))); - /* - We can't reset error as we will return 'true' below and the server - expects that error is set - */ - if (!binlog_as_create_select) - write_bin_log_with_if_exists(thd, FALSE, FALSE, log_if_exists); - err_with_mdl: + ddl_log_complete(&ddl_log_state); /* An error happened while we were holding exclusive name metadata lock on table being altered. To be safe under LOCK TABLES we should @@ -11366,7 +10919,7 @@ err_with_mdl: thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); if (!table_list->table) thd->mdl_context.release_all_locks_for_name(mdl_ticket); - DBUG_RETURN(true); + goto err_cleanup; } @@ -11734,6 +11287,9 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } else { + /* In case of alter ignore, notify the engine about it. */ + if (ignore) + to->file->extra(HA_EXTRA_IGNORE_INSERT); DEBUG_SYNC(thd, "copy_data_between_tables_before"); found_count++; mysql_stage_set_work_completed(thd->m_stage_progress_psi, found_count); @@ -11763,6 +11319,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, cleanup_done= 1; to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + DEBUG_SYNC(thd, "copy_data_between_tables_before_reset_backup_lock"); if (backup_reset_alter_copy_lock(thd)) error= 1; @@ -11851,12 +11408,35 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy) } +/** + Collect field names of result set that will be sent to a client in result of + handling the CHECKSUM TABLE statement. + + @param thd Thread data object + @param[out] fields List of fields whose metadata should be collected for + sending to client + */ + +void fill_checksum_table_metadata_fields(THD *thd, List<Item> *fields) +{ + Item *item; + + item= new (thd->mem_root) Item_empty_string(thd, "Table", NAME_LEN*2); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); + + item= new (thd->mem_root) Item_int(thd, "Checksum", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS); + item->set_maybe_null(); + fields->push_back(item, thd->mem_root); +} + + bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { TABLE_LIST *table; List<Item> field_list; - Item *item; Protocol *protocol= thd->protocol; DBUG_ENTER("mysql_checksum_table"); @@ -11866,15 +11446,8 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, */ DBUG_ASSERT(! thd->in_sub_stmt); - field_list.push_back(item= new (thd->mem_root) - Item_empty_string(thd, "Table", NAME_LEN*2), - thd->mem_root); - item->maybe_null= 1; - field_list.push_back(item= new (thd->mem_root) - Item_int(thd, "Checksum", (longlong) 1, - MY_INT64_NUM_DECIMAL_DIGITS), - thd->mem_root); - item->maybe_null= 1; + fill_checksum_table_metadata_fields(thd, &field_list); + if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); @@ -11890,11 +11463,13 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, /* Open one table after the other to keep lock time as short as possible. */ for (table= tables; table; table= table->next_local) { - char table_name[SAFE_NAME_LEN*2+2]; + char table_name_buff[SAFE_NAME_LEN*2+2]; + LEX_CSTRING table_name= { table_name_buff, 0}; TABLE *t; TABLE_LIST *save_next_global; - strxmov(table_name, table->db.str ,".", table->table_name.str, NullS); + table_name.length= strxmov(table_name_buff, table->db.str ,".", + table->table_name.str, NullS) - table_name_buff; /* Remember old 'next' pointer and break the list. */ save_next_global= table->next_global; @@ -11914,7 +11489,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, table->next_global= save_next_global; protocol->prepare_for_resend(); - protocol->store(table_name, system_charset_info); + protocol->store(&table_name, system_charset_info); if (!t) { @@ -11996,6 +11571,8 @@ err: @retval true Engine not available/supported, error has been reported. @retval false Engine available/supported. + create_info->db_type & create_info->new_storage_engine_name + are updated. */ bool check_engine(THD *thd, const char *db_name, @@ -12051,6 +11628,8 @@ bool check_engine(THD *thd, const char *db_name, *new_engine= myisam_hton; } + lex_string_set(&create_info->new_storage_engine_name, + ha_resolve_storage_engine_name(*new_engine)); DBUG_RETURN(false); } diff --git a/sql/sql_table.h b/sql/sql_table.h index 53741d934cc..aacb6c99f15 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -37,80 +37,7 @@ typedef struct st_key KEY; typedef struct st_key_cache KEY_CACHE; typedef struct st_lock_param_type ALTER_PARTITION_PARAM_TYPE; typedef struct st_order ORDER; - -enum ddl_log_entry_code -{ - /* - DDL_LOG_EXECUTE_CODE: - This is a code that indicates that this is a log entry to - be executed, from this entry a linked list of log entries - can be found and executed. - DDL_LOG_ENTRY_CODE: - An entry to be executed in a linked list from an execute log - entry. - DDL_IGNORE_LOG_ENTRY_CODE: - An entry that is to be ignored - */ - DDL_LOG_EXECUTE_CODE = 'e', - DDL_LOG_ENTRY_CODE = 'l', - DDL_IGNORE_LOG_ENTRY_CODE = 'i' -}; - -enum ddl_log_action_code -{ - /* - The type of action that a DDL_LOG_ENTRY_CODE entry is to - perform. - DDL_LOG_DELETE_ACTION: - Delete an entity - DDL_LOG_RENAME_ACTION: - Rename an entity - DDL_LOG_REPLACE_ACTION: - Rename an entity after removing the previous entry with the - new name, that is replace this entry. - DDL_LOG_EXCHANGE_ACTION: - Exchange two entities by renaming them a -> tmp, b -> a, tmp -> b. - */ - DDL_LOG_DELETE_ACTION = 'd', - DDL_LOG_RENAME_ACTION = 'r', - DDL_LOG_REPLACE_ACTION = 's', - DDL_LOG_EXCHANGE_ACTION = 'e' -}; - -enum enum_ddl_log_exchange_phase { - EXCH_PHASE_NAME_TO_TEMP= 0, - EXCH_PHASE_FROM_TO_NAME= 1, - EXCH_PHASE_TEMP_TO_FROM= 2 -}; - - -typedef struct st_ddl_log_entry -{ - const char *name; - const char *from_name; - const char *handler_name; - const char *tmp_name; - uint next_entry; - uint entry_pos; - enum ddl_log_entry_code entry_type; - enum ddl_log_action_code action_type; - /* - Most actions have only one phase. REPLACE does however have two - phases. The first phase removes the file with the new name if - there was one there before and the second phase renames the - old name to the new name. - */ - char phase; -} DDL_LOG_ENTRY; - -typedef struct st_ddl_log_memory_entry -{ - uint entry_pos; - struct st_ddl_log_memory_entry *next_log_entry; - struct st_ddl_log_memory_entry *prev_log_entry; - struct st_ddl_log_memory_entry *next_active_log_entry; -} DDL_LOG_MEMORY_ENTRY; - +typedef struct st_ddl_log_state DDL_LOG_STATE; enum enum_explain_filename_mode { @@ -151,6 +78,10 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, const char *table, const char *ext, uint flags); uint build_table_shadow_filename(char *buff, size_t bufflen, ALTER_PARTITION_PARAM_TYPE *lpt); +void build_lower_case_table_filename(char *buff, size_t bufflen, + const LEX_CSTRING *db, + const LEX_CSTRING *table, + uint flags); uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen); bool mysql_create_table(THD *thd, TABLE_LIST *create_table, Table_specification_st *create_info, @@ -194,7 +125,10 @@ bool add_keyword_to_query(THD *thd, String *result, const LEX_CSTRING *keyword, #define C_ALTER_TABLE_FRM_ONLY -2 #define C_ASSISTED_DISCOVERY -3 -int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db, +int mysql_create_table_no_lock(THD *thd, + DDL_LOG_STATE *ddl_log_state, + DDL_LOG_STATE *ddl_log_state_rm, + const LEX_CSTRING *db, const LEX_CSTRING *table_name, Table_specification_st *create_info, Alter_info *alter_info, bool *is_trans, @@ -237,22 +171,29 @@ bool mysql_create_like_table(THD *thd, TABLE_LIST *table, Table_specification_st *create_info); bool mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db, const LEX_CSTRING *old_name, const LEX_CSTRING *new_db, - const LEX_CSTRING *new_name, uint flags); - + const LEX_CSTRING *new_name, LEX_CUSTRING *id, + uint flags); bool mysql_backup_table(THD* thd, TABLE_LIST* table_list); bool mysql_restore_table(THD* thd, TABLE_LIST* table_list); +template<typename T> class List; +void fill_checksum_table_metadata_fields(THD *thd, List<Item> *fields); bool mysql_checksum_table(THD* thd, TABLE_LIST* table_list, HA_CHECK_OPT* check_opt); bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool drop_sequence, bool dont_log_query); -int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, +int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, + const LEX_CSTRING *db, + DDL_LOG_STATE *ddl_log_state, + bool if_exists, bool drop_temporary, bool drop_view, bool drop_sequence, bool dont_log_query, bool dont_free_locks); bool log_drop_table(THD *thd, const LEX_CSTRING *db_name, - const LEX_CSTRING *table_name, bool temporary_table); + const LEX_CSTRING *table_name, const LEX_CSTRING *handler, + bool partitioned, const LEX_CUSTRING *id, + bool temporary_table); bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, const LEX_CSTRING *table_name, uint flags, const char *table_path=0); @@ -264,19 +205,7 @@ int write_bin_log(THD *thd, bool clear_error, bool is_trans= FALSE); int write_bin_log_with_if_exists(THD *thd, bool clear_error, bool is_trans, bool add_if_exists); -bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, - DDL_LOG_MEMORY_ENTRY **active_entry); -bool write_execute_ddl_log_entry(uint first_entry, - bool complete, - DDL_LOG_MEMORY_ENTRY **active_entry); -bool deactivate_ddl_log_entry(uint entry_no); -void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry); -bool sync_ddl_log(); -void release_ddl_log(); -void execute_ddl_log_recovery(); -bool execute_ddl_log_entry(THD *thd, uint first_entry); -template<typename T> class List; void promote_first_timestamp_column(List<Create_field> *column_definitions); /* @@ -286,8 +215,7 @@ uint explain_filename(THD* thd, const char *from, char *to, uint to_length, enum_explain_filename_mode explain_mode); -extern MYSQL_PLUGIN_IMPORT const char *primary_key_name; -extern mysql_mutex_t LOCK_gdl; +extern MYSQL_PLUGIN_IMPORT const LEX_CSTRING primary_key_name; bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *); diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 07ebcc7a37a..e06600700bb 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -51,11 +51,13 @@ static const char *lock_descriptions[] = /* TL_READ_WITH_SHARED_LOCKS */ "Shared read lock", /* TL_READ_HIGH_PRIORITY */ "High priority read lock", /* TL_READ_NO_INSERT */ "Read lock without concurrent inserts", + /* TL_READ_SKIP_LOCKED */ "Read lock without blocking if row is locked", /* TL_WRITE_ALLOW_WRITE */ "Write lock that allows other writers", /* TL_WRITE_CONCURRENT_INSERT */ "Concurrent insert lock", /* TL_WRITE_DELAYED */ "Lock used by delayed insert", /* TL_WRITE_DEFAULT */ NULL, /* TL_WRITE_LOW_PRIORITY */ "Low priority write lock", + /* TL_WRITE_SKIP_LOCKED */ "Write lock but skip existing locked rows", /* TL_WRITE */ "High priority write lock", /* TL_WRITE_ONLY */ "Highest priority write lock" }; @@ -125,25 +127,26 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length) char buff[256],buff2[256]; String str(buff,sizeof(buff),system_charset_info); String out(buff2,sizeof(buff2),system_charset_info); - const char *sep; + DBUG_ASSERT(s_length > 0); DBUG_ENTER("TEST_filesort"); out.length(0); - for (sep=""; s_length-- ; sortorder++, sep=" ") + for (; s_length-- ; sortorder++) { - out.append(sep); if (sortorder->reverse) out.append('-'); if (sortorder->field) { if (sortorder->field->table_name) { - out.append(*sortorder->field->table_name); + const char *table_name= *sortorder->field->table_name; + out.append(table_name, strlen(table_name)); out.append('.'); } - out.append(sortorder->field->field_name.str ? - sortorder->field->field_name.str : - "tmp_table_column"); + const char *name= sortorder->field->field_name.str; + if (!name) + name= "tmp_table_column"; + out.append(name, strlen(name)); } else { @@ -151,7 +154,9 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length) sortorder->item->print(&str, QT_ORDINARY); out.append(str); } + out.append(' '); } + out.chop(); // Remove last space DBUG_LOCK_FILE; (void) fputs("\nInfo about FILESORT\n",DBUG_FILE); fprintf(DBUG_FILE,"Sortorder: %s\n",out.c_ptr_safe()); @@ -184,8 +189,8 @@ TEST_join(JOIN *join) JOIN_TAB *tab= jt_range->start + i; for (ref= 0; ref < tab->ref.key_parts; ref++) { - ref_key_parts[i].append(tab->ref.items[ref]->full_name()); - ref_key_parts[i].append(" "); + ref_key_parts[i].append(tab->ref.items[ref]->full_name_cstring()); + ref_key_parts[i].append(STRING_WITH_LEN(" ")); } } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 6dead4fef16..d29ef532382 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -26,14 +26,16 @@ #include "parse_file.h" #include "sp.h" #include "sql_base.h" -#include "sql_show.h" // append_definer, append_identifier -#include "sql_table.h" // build_table_filename, - // check_n_cut_mysql50_prefix -#include "sql_db.h" // get_default_db_collation -#include "sql_handler.h" // mysql_ha_rm_tables +#include "sql_show.h" // append_definer, append_identifier +#include "sql_table.h" // build_table_filename, + // check_n_cut_mysql50_prefix +#include "sql_db.h" // get_default_db_collation +#include "sql_handler.h" // mysql_ha_rm_tables #include "sp_cache.h" // sp_invalidate_cache #include <mysys_err.h> -#include "debug_sync.h" +#include "ddl_log.h" // ddl_log_state +#include "debug_sync.h" // DEBUG_SYNC +#include "debug.h" // debug_crash_here #include "mysql/psi/mysql_sp.h" #include "wsrep_mysqld.h" #include <my_time.h> @@ -41,6 +43,24 @@ /*************************************************************************/ +static bool add_table_for_trigger_internal(THD *thd, + const sp_name *trg_name, + bool if_exists, + TABLE_LIST **table, + char *trn_path_buff); + +/* + Functions for TRIGGER_RENAME_PARAM +*/ + +void TRIGGER_RENAME_PARAM::reset() +{ + delete table.triggers; + table.triggers= 0; + free_root(&table.mem_root, MYF(0)); +} + + /** Trigger_creation_ctx -- creation context of triggers. */ @@ -97,10 +117,11 @@ Trigger_creation_ctx::create(THD *thd, CHARSET_INFO *db_cl; bool invalid_creation_ctx= FALSE; + myf utf8_flag= thd->get_utf8_flag(); if (resolve_charset(client_cs_name->str, thd->variables.character_set_client, - &client_cs)) + &client_cs, MYF(utf8_flag))) { sql_print_warning("Trigger for table '%s'.'%s': " "invalid character_set_client value (%s).", @@ -113,7 +134,7 @@ Trigger_creation_ctx::create(THD *thd, if (resolve_collation(connection_cl_name->str, thd->variables.collation_connection, - &connection_cl)) + &connection_cl,MYF(utf8_flag))) { sql_print_warning("Trigger for table '%s'.'%s': " "invalid collation_connection value (%s).", @@ -124,7 +145,7 @@ Trigger_creation_ctx::create(THD *thd, invalid_creation_ctx= TRUE; } - if (resolve_collation(db_cl_name->str, NULL, &db_cl)) + if (resolve_collation(db_cl_name->str, NULL, &db_cl, MYF(utf8_flag))) { sql_print_warning("Trigger for table '%s'.'%s': " "invalid database_collation value (%s).", @@ -411,21 +432,24 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) This is a good candidate for a minor refactoring. */ TABLE *table; - bool result= TRUE, refresh_metadata= FALSE; + bool result= true, refresh_metadata= false; + bool add_if_exists_to_binlog= false, action_executed= false; String stmt_query; bool lock_upgrade_done= FALSE; bool backup_of_table_list_done= 0;; MDL_ticket *mdl_ticket= NULL; MDL_request mdl_request_for_trn; Query_tables_list backup; - char path[FN_REFLEN + 1]; - char engine_name_buf[NAME_CHAR_LEN + 1]; - LEX_CSTRING engine_name= { engine_name_buf, 0 }; - + DDL_LOG_STATE ddl_log_state, ddl_log_state_tmp_file; + char trn_path_buff[FN_REFLEN]; + char path[FN_REFLEN + 1]; + DBUG_ENTER("mysql_create_or_drop_trigger"); /* Charset of the buffer for statement must be system one. */ stmt_query.set_charset(system_charset_info); + bzero(&ddl_log_state, sizeof(ddl_log_state)); + bzero(&ddl_log_state_tmp_file, sizeof(ddl_log_state_tmp_file)); /* QQ: This function could be merged in mysql_alter_table() function @@ -505,7 +529,8 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) goto end; } - if (add_table_for_trigger(thd, thd->lex->spname, if_exists, & tables)) + if (add_table_for_trigger_internal(thd, thd->lex->spname, if_exists, &tables, + trn_path_buff)) goto end; if (!tables) @@ -521,7 +546,8 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) */ result= FALSE; /* Still, we need to log the query ... */ - stmt_query.append(thd->query(), thd->query_length()); + stmt_query.set(thd->query(), thd->query_length(), system_charset_info); + action_executed= 1; goto end; } } @@ -548,7 +574,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) DBUG_ASSERT(tables->next_global == 0); build_table_filename(path, sizeof(path) - 1, tables->db.str, tables->alias.str, ".frm", 0); - tables->required_type= dd_frm_type(NULL, path, &engine_name); + tables->required_type= dd_frm_type(NULL, path, NULL, NULL, NULL); /* We do not allow creation of triggers on temporary tables or sequence. */ if (tables->required_type == TABLE_TYPE_SEQUENCE || @@ -582,14 +608,21 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) tables->table= open_n_lock_single_table(thd, tables, TL_READ_NO_INSERT, 0); if (! tables->table) + { + if (!create && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE) + { + /* TRN file exists but table does not. Drop the orphan trigger */ + thd->clear_error(); // Remove error from open + goto drop_orphan_trn; + } goto end; + } tables->table->use_all_columns(); } table= tables->table; #ifdef WITH_WSREP - if (WSREP(thd) && - !wsrep_should_replicate_ddl(thd, table->s->db_type()->db_type)) + if (WSREP(thd) && !wsrep_should_replicate_ddl(thd, table->s->db_type())) goto end; #endif @@ -608,11 +641,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) if (!table->triggers) { if (!create) - { - my_error(ER_TRG_DOES_NOT_EXIST, MYF(0)); - goto end; - } - + goto drop_orphan_trn; if (!(table->triggers= new (&table->mem_root) Table_triggers_list(table))) goto end; } @@ -628,15 +657,57 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) };); #endif /* WITH_WSREP && ENABLED_DEBUG_SYNC */ - result= (create ? - table->triggers->create_trigger(thd, tables, &stmt_query): - table->triggers->drop_trigger(thd, tables, &stmt_query)); + if (create) + result= table->triggers->create_trigger(thd, tables, &stmt_query, + &ddl_log_state, + &ddl_log_state_tmp_file); + else + { + result= table->triggers->drop_trigger(thd, tables, + &thd->lex->spname->m_name, + &stmt_query, + &ddl_log_state); + if (result) + { + thd->clear_error(); // Remove error from drop trigger + goto drop_orphan_trn; + } + } + action_executed= 1; refresh_metadata= TRUE; end: - if (!result) - result= write_bin_log(thd, TRUE, stmt_query.ptr(), stmt_query.length()); + if (!result && action_executed) + { + ulonglong save_option_bits= thd->variables.option_bits; + backup_log_info ddl_log; + + debug_crash_here("ddl_log_drop_before_binlog"); + if (add_if_exists_to_binlog) + thd->variables.option_bits|= OPTION_IF_EXISTS; + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + result= write_bin_log(thd, TRUE, stmt_query.ptr(), + stmt_query.length()); + thd->binlog_xid= 0; + thd->variables.option_bits= save_option_bits; + debug_crash_here("ddl_log_drop_after_binlog"); + + bzero(&ddl_log, sizeof(ddl_log)); + if (create) + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + else + ddl_log.query= { C_STRING_WITH_LEN("DROP") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("TRIGGER") }; + ddl_log.org_database= thd->lex->spname->m_db; + ddl_log.org_table= thd->lex->spname->m_name; + backup_log_ddl(&ddl_log); + } + ddl_log_complete(&ddl_log_state); + debug_crash_here("ddl_log_drop_before_delete_tmp"); + /* delete any created log files */ + result|= ddl_log_revert(thd, &ddl_log_state_tmp_file); if (mdl_request_for_trn.ticket) thd->mdl_context.release_lock(mdl_request_for_trn.ticket); @@ -659,7 +730,6 @@ end: */ sp_cache_invalidate(); } - /* If we are under LOCK TABLES we should restore original state of meta-data locks. Otherwise all locks will be released along @@ -688,6 +758,16 @@ wsrep_error_label: DBUG_ASSERT(result == 1); goto end; #endif + +drop_orphan_trn: + my_error(ER_REMOVED_ORPHAN_TRIGGER, MYF(ME_WARNING), + thd->lex->spname->m_name.str, tables->table_name.str); + mysql_file_delete(key_file_trg, trn_path_buff, MYF(0)); + result= thd->is_error(); + add_if_exists_to_binlog= 1; + action_executed= 1; // Ensure query is binlogged + stmt_query.set(thd->query(), thd->query_length(), system_charset_info); + goto end; } @@ -811,18 +891,23 @@ static void build_trig_stmt_query(THD *thd, TABLE_LIST *tables, */ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, - String *stmt_query) + String *stmt_query, + DDL_LOG_STATE *ddl_log_state, + DDL_LOG_STATE *ddl_log_state_tmp_file) { LEX *lex= thd->lex; TABLE *table= tables->table; char file_buff[FN_REFLEN], trigname_buff[FN_REFLEN]; - LEX_CSTRING file, trigname_file; + char backup_file_buff[FN_REFLEN]; char trg_definer_holder[USER_HOST_BUFF_SIZE]; + LEX_CSTRING backup_name= { backup_file_buff, 0 }; + LEX_CSTRING file, trigname_file; Item_trigger_field *trg_field; struct st_trigname trigname; String trigger_definition; Trigger *trigger= 0; - bool trigger_dropped= 0; + int error; + bool trigger_exists; DBUG_ENTER("create_trigger"); if (check_for_broken_triggers()) @@ -896,17 +981,40 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, trigname_file.str= trigname_buff; /* Use the filesystem to enforce trigger namespace constraints. */ - if (!access(trigname_buff, F_OK)) + trigger_exists= !access(trigname_file.str, F_OK); + + ddl_log_create_trigger(thd, ddl_log_state, &tables->db, &tables->table_name, + &lex->spname->m_name, + trigger_exists || table->triggers->count ? + DDL_CREATE_TRIGGER_PHASE_DELETE_COPY : + DDL_CREATE_TRIGGER_PHASE_NO_OLD_TRIGGER); + + /* Make a backup of the .TRG file that we can restore in case of crash */ + if (table->triggers->count && + (sql_backup_definition_file(&file, &backup_name) || + ddl_log_delete_tmp_file(thd, ddl_log_state_tmp_file, &backup_name, + ddl_log_state))) + DBUG_RETURN(true); + + if (trigger_exists) { if (lex->create_info.or_replace()) { - String drop_trg_query; + LEX_CSTRING *sp_name= &thd->lex->spname->m_name; // alias + + /* Make a backup of the .TRN file that we can restore in case of crash */ + if (sql_backup_definition_file(&trigname_file, &backup_name) || + ddl_log_delete_tmp_file(thd, ddl_log_state_tmp_file, &backup_name, + ddl_log_state)) + DBUG_RETURN(true); + ddl_log_update_phase(ddl_log_state, DDL_CREATE_TRIGGER_PHASE_OLD_COPIED); + /* The following can fail if the trigger is for another table or there exists a .TRN file but there was no trigger for it in the .TRG file */ - if (unlikely(drop_trigger(thd, tables, &drop_trg_query))) + if (unlikely(drop_trigger(thd, tables, sp_name, 0, 0))) DBUG_RETURN(true); } else if (lex->create_info.if_not_exists()) @@ -936,6 +1044,11 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, DBUG_RETURN(true); } } + else + { + if (table->triggers->count) + ddl_log_update_phase(ddl_log_state, DDL_CREATE_TRIGGER_PHASE_OLD_COPIED); + } trigname.trigger_table= tables->table_name; @@ -944,7 +1057,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, going to access lex->sphead later in build_trig_stmt_query() */ if (!(trigger= new (&table->mem_root) Trigger(this, 0))) - goto err_without_cleanup; + goto err; /* Time with in microseconds */ trigger->hr_create_time= make_hr_time(thd->query_start(), @@ -953,7 +1066,11 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, /* Create trigger_name.TRN file to ensure trigger name is unique */ if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type, (uchar*)&trigname, trigname_file_parameters)) - goto err_without_cleanup; + { + delete trigger; + trigger= 0; + goto err; + } /* Populate the trigger object */ @@ -970,11 +1087,10 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, - connection collation contains pair {character set, collation}; - database collation contains pair {character set, collation}; */ - lex_string_set(&trigger->client_cs_name, thd->charset()->csname); - lex_string_set(&trigger->connection_cl_name, - thd->variables.collation_connection->name); - lex_string_set(&trigger->db_cl_name, - get_default_db_collation(thd, tables->db.str)->name); + trigger->client_cs_name= thd->charset()->cs_name; + trigger->connection_cl_name= thd->variables.collation_connection->coll_name; + trigger->db_cl_name= get_default_db_collation(thd, tables->db.str)->coll_name; + trigger->name= lex->spname->m_name; /* Add trigger in it's correct place */ add_trigger(lex->trg_chistics.event, @@ -985,30 +1101,31 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, /* Create trigger definition file .TRG */ if (unlikely(create_lists_needed_for_files(thd->mem_root))) - goto err_with_cleanup; + goto err; - if (!sql_create_definition_file(NULL, &file, &triggers_file_type, - (uchar*)this, triggers_file_parameters)) - DBUG_RETURN(false); - -err_with_cleanup: - /* Delete .TRN file */ - mysql_file_delete(key_file_trn, trigname_buff, MYF(MY_WME)); + debug_crash_here("ddl_log_create_before_create_trigger"); + error= sql_create_definition_file(NULL, &file, &triggers_file_type, + (uchar*)this, triggers_file_parameters); + debug_crash_here("ddl_log_create_after_create_trigger"); -err_without_cleanup: - delete trigger; // Safety, not critical + if (!error) + DBUG_RETURN(false); - if (trigger_dropped) +err: + DBUG_PRINT("error",("create trigger failed")); + if (trigger) { - String drop_trg_query; - drop_trg_query.append(STRING_WITH_LEN("DROP TRIGGER /* generated by failed CREATE TRIGGER */ ")); - drop_trg_query.append(&lex->spname->m_name); - /* - We dropped an existing trigger and was not able to recreate it because - of an internal error. Ensure it's also dropped on the slave. - */ - write_bin_log(thd, FALSE, drop_trg_query.ptr(), drop_trg_query.length()); + my_debug_put_break_here(); + /* Delete trigger from trigger list if it exists */ + find_trigger(&trigger->name, 1); + /* Free trigger memory */ + delete trigger; } + + /* Recover the old .TRN and .TRG files & delete backup files */ + ddl_log_revert(thd, ddl_log_state); + /* All backup files are now deleted */ + ddl_log_complete(ddl_log_state_tmp_file); DBUG_RETURN(true); } @@ -1104,8 +1221,8 @@ static bool rm_trigger_file(char *path, const LEX_CSTRING *db, True error */ -static bool rm_trigname_file(char *path, const LEX_CSTRING *db, - const LEX_CSTRING *trigger_name, myf MyFlags) +bool rm_trigname_file(char *path, const LEX_CSTRING *db, + const LEX_CSTRING *trigger_name, myf MyFlags) { build_table_filename(path, FN_REFLEN - 1, db->str, trigger_name->str, TRN_EXT, 0); @@ -1131,15 +1248,17 @@ bool Table_triggers_list::save_trigger_file(THD *thd, const LEX_CSTRING *db, { char file_buff[FN_REFLEN]; LEX_CSTRING file; + DBUG_ENTER("Table_triggers_list::save_trigger_file"); if (create_lists_needed_for_files(thd->mem_root)) - return true; + DBUG_RETURN(true); file.length= build_table_filename(file_buff, FN_REFLEN - 1, db->str, table_name->str, TRG_EXT, 0); file.str= file_buff; - return sql_create_definition_file(NULL, &file, &triggers_file_type, - (uchar*) this, triggers_file_parameters); + DBUG_RETURN(sql_create_definition_file(NULL, &file, &triggers_file_type, + (uchar*) this, + triggers_file_parameters)); } @@ -1203,44 +1322,68 @@ Trigger *Table_triggers_list::find_trigger(const LEX_CSTRING *name, */ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables, - String *stmt_query) + LEX_CSTRING *sp_name, + String *stmt_query, + DDL_LOG_STATE *ddl_log_state) { - const LEX_CSTRING *sp_name= &thd->lex->spname->m_name; // alias char path[FN_REFLEN]; Trigger *trigger; + DBUG_ENTER("Table_triggers_list::drop_trigger"); - stmt_query->set(thd->query(), thd->query_length(), stmt_query->charset()); + if (stmt_query) + stmt_query->set(thd->query(), thd->query_length(), stmt_query->charset()); /* Find and delete trigger from list */ if (!(trigger= find_trigger(sp_name, true))) { my_message(ER_TRG_DOES_NOT_EXIST, ER_THD(thd, ER_TRG_DOES_NOT_EXIST), MYF(0)); - return 1; + DBUG_RETURN(1); } + delete trigger; + + if (ddl_log_state) + { + LEX_CSTRING query= {0,0}; + if (stmt_query) + { + /* This code is executed in case of DROP TRIGGER */ + lex_string_set3(&query, thd->query(), thd->query_length()); + } + if (ddl_log_drop_trigger(thd, ddl_log_state, + &tables->db, &tables->table_name, + sp_name, &query)) + goto err; + } + debug_crash_here("ddl_log_drop_before_drop_trigger"); if (!count) // If no more triggers { /* - TODO: Probably instead of removing .TRG file we should move - to archive directory but this should be done as part of - parse_file.cc functionality (because we will need it - elsewhere). + It is safe to remove the trigger file. If something goes wrong during + drop or create ddl_log recovery will ensure that all related + trigger files are deleted or the original ones are restored. */ if (rm_trigger_file(path, &tables->db, &tables->table_name, MYF(MY_WME))) - return 1; + goto err; } else { if (save_trigger_file(thd, &tables->db, &tables->table_name)) - return 1; + goto err; } + debug_crash_here("ddl_log_drop_before_drop_trn"); + if (rm_trigname_file(path, &tables->db, sp_name, MYF(MY_WME))) - return 1; + goto err; - delete trigger; - return 0; + debug_crash_here("ddl_log_drop_after_drop_trigger"); + + DBUG_RETURN(0); + +err: + DBUG_RETURN(1); } @@ -1545,12 +1688,9 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db, lex.raw_trg_on_table_name_begin); /* Copy pointers to character sets to make trigger easier to use */ - lex_string_set(&trigger->client_cs_name, - creation_ctx->get_client_cs()->csname); - lex_string_set(&trigger->connection_cl_name, - creation_ctx->get_connection_cl()->name); - lex_string_set(&trigger->db_cl_name, - creation_ctx->get_db_cl()->name); + trigger->client_cs_name= creation_ctx->get_client_cs()->cs_name; + trigger->connection_cl_name= creation_ctx->get_connection_cl()->coll_name; + trigger->db_cl_name= creation_ctx->get_db_cl()->coll_name; /* event can only be TRG_EVENT_MAX in case of fatal parse errors */ if (lex.trg_chistics.event != TRG_EVENT_MAX) @@ -1820,17 +1960,16 @@ void Trigger::get_trigger_info(LEX_CSTRING *trigger_stmt, @retval TRUE Otherwise. */ -bool add_table_for_trigger(THD *thd, - const sp_name *trg_name, - bool if_exists, - TABLE_LIST **table) +static bool add_table_for_trigger_internal(THD *thd, + const sp_name *trg_name, + bool if_exists, + TABLE_LIST **table, + char *trn_path_buff) { LEX *lex= thd->lex; - char trn_path_buff[FN_REFLEN]; LEX_CSTRING trn_path= { trn_path_buff, 0 }; LEX_CSTRING tbl_name= null_clex_str; - - DBUG_ENTER("add_table_for_trigger"); + DBUG_ENTER("add_table_for_trigger_internal"); build_trn_path(thd, trg_name, (LEX_STRING*) &trn_path); @@ -1863,6 +2002,23 @@ bool add_table_for_trigger(THD *thd, } +/* + Same as above, but with an allocated buffer. + This is called by mysql_excute_command() in is here to keep stack + space down in the caller. +*/ + +bool add_table_for_trigger(THD *thd, + const sp_name *trg_name, + bool if_exists, + TABLE_LIST **table) +{ + char trn_path_buff[FN_REFLEN]; + return add_table_for_trigger_internal(thd, trg_name, if_exists, + table, trn_path_buff); +} + + /** Drop all triggers for table. @@ -2112,62 +2268,43 @@ bool Trigger::change_on_table_name(void* param_arg) } -/** - Update .TRG and .TRN files after renaming triggers' subject table. - - @param[in,out] thd Thread context - @param[in] db Old database of subject table - @param[in] old_alias Old alias of subject table - @param[in] old_table Old name of subject table - @param[in] new_db New database for subject table - @param[in] new_table New name of subject table - - @note - This method tries to leave trigger related files in consistent state, - i.e. it either will complete successfully, or will fail leaving files - in their initial state. - Also this method assumes that subject table is not renamed to itself. - This method needs to be called under an exclusive table metadata lock. +/* + Check if we can rename triggers in change_table_name() + The idea is to ensure that it is close to impossible that + change_table_name() should fail. - @retval FALSE Success - @retval TRUE Error + @return 0 ok + @return 1 Error: rename of triggers would fail */ -bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, - const LEX_CSTRING *old_alias, - const LEX_CSTRING *old_table, - const LEX_CSTRING *new_db, - const LEX_CSTRING *new_table) +bool +Table_triggers_list::prepare_for_rename(THD *thd, + TRIGGER_RENAME_PARAM *param, + const LEX_CSTRING *db, + const LEX_CSTRING *old_alias, + const LEX_CSTRING *old_table, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table) { - TABLE table; + TABLE *table= ¶m->table; bool result= 0; - bool upgrading50to51= FALSE; - Trigger *err_trigger; - DBUG_ENTER("Triggers::change_table_name"); + DBUG_ENTER("Table_triggers_lists::prepare_change_table_name"); - table.reset(); init_sql_alloc(key_memory_Table_trigger_dispatcher, - &table.mem_root, 8192, 0, MYF(0)); - - /* - This method interfaces the mysql server code protected by - an exclusive metadata lock. - */ - DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db->str, - old_table->str, - MDL_EXCLUSIVE)); + &table->mem_root, 8192, 0, MYF(0)); DBUG_ASSERT(my_strcasecmp(table_alias_charset, db->str, new_db->str) || - my_strcasecmp(table_alias_charset, old_alias->str, new_table->str)); + my_strcasecmp(table_alias_charset, old_alias->str, + new_table->str)); - if (Table_triggers_list::check_n_load(thd, db, old_table, &table, TRUE)) + if (Table_triggers_list::check_n_load(thd, db, old_table, table, TRUE)) { result= 1; goto end; } - if (table.triggers) + if (table->triggers) { - if (table.triggers->check_for_broken_triggers()) + if (table->triggers->check_for_broken_triggers()) { result= 1; goto end; @@ -2188,7 +2325,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, if (check_n_cut_mysql50_prefix(db->str, dbname, sizeof(dbname)) && !my_strcasecmp(table_alias_charset, dbname, new_db->str)) { - upgrading50to51= TRUE; + param->upgrading50to51= TRUE; } else { @@ -2197,14 +2334,70 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, goto end; } } - if (unlikely(table.triggers->change_table_name_in_triggers(thd, db, new_db, + } + +end: + param->got_error= result; + DBUG_RETURN(result); +} + + +/** + Update .TRG and .TRN files after renaming triggers' subject table. + + @param[in,out] thd Thread context + @param[in] db Old database of subject table + @param[in] old_alias Old alias of subject table + @param[in] old_table Old name of subject table. The difference between + old_table and old_alias is that in case of lower_case_table_names + old_table == lowercase(old_alias) + @param[in] new_db New database for subject table + @param[in] new_table New name of subject table + + @note + This method tries to leave trigger related files in consistent state, + i.e. it either will complete successfully, or will fail leaving files + in their initial state. + Also this method assumes that subject table is not renamed to itself. + This method needs to be called under an exclusive table metadata lock. + + @retval FALSE Success + @retval TRUE Error +*/ + +bool Table_triggers_list::change_table_name(THD *thd, + TRIGGER_RENAME_PARAM *param, + const LEX_CSTRING *db, + const LEX_CSTRING *old_alias, + const LEX_CSTRING *old_table, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table) +{ + TABLE *table= ¶m->table; + bool result= 0; + bool upgrading50to51= FALSE; + Trigger *err_trigger; + DBUG_ENTER("Table_triggers_list::change_table_name"); + + DBUG_ASSERT(!param->got_error); + /* + This method interfaces the mysql server code protected by + an exclusive metadata lock. + */ + DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db->str, + old_table->str, + MDL_EXCLUSIVE)); + + if (table->triggers) + { + if (unlikely(table->triggers->change_table_name_in_triggers(thd, db, new_db, old_alias, new_table))) { result= 1; goto end; } - if ((err_trigger= table.triggers-> + if ((err_trigger= table->triggers-> change_table_name_in_trignames( upgrading50to51 ? db : NULL, new_db, new_table, 0))) { @@ -2214,10 +2407,10 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, We assume that we will be able to undo our changes without errors (we can't do much if there will be an error anyway). */ - (void) table.triggers->change_table_name_in_trignames( + (void) table->triggers->change_table_name_in_trignames( upgrading50to51 ? new_db : NULL, db, old_alias, err_trigger); - (void) table.triggers->change_table_name_in_triggers( + (void) table->triggers->change_table_name_in_triggers( thd, db, new_db, new_table, old_alias); result= 1; @@ -2226,8 +2419,6 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db, } end: - delete table.triggers; - free_root(&table.mem_root, MYF(0)); DBUG_RETURN(result); } @@ -2327,7 +2518,7 @@ add_tables_and_routines_for_triggers(THD *thd, TABLE_LIST *table_list) { DBUG_ASSERT(static_cast<int>(table_list->lock_type) >= - static_cast<int>(TL_WRITE_ALLOW_WRITE)); + static_cast<int>(TL_FIRST_WRITE)); for (int i= 0; i < (int)TRG_EVENT_MAX; i++) { @@ -2396,9 +2587,9 @@ void Table_triggers_list::mark_fields_used(trg_event_type event) trg_field= trg_field->next_trg_field) { /* We cannot mark fields which does not present in table. */ - if (trg_field->field_idx != (uint)-1) + if (trg_field->field_idx != NO_CACHED_FIELD_INDEX) { - DBUG_PRINT("info", ("marking field: %d", trg_field->field_idx)); + DBUG_PRINT("info", ("marking field: %u", (uint) trg_field->field_idx)); if (trg_field->get_settable_routine_parameter()) bitmap_set_bit(trigger_table->write_set, trg_field->field_idx); trigger_table->mark_column_with_deps( diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 101784ee776..774dca7cba1 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -28,6 +28,7 @@ class sp_name; class Query_tables_list; struct TABLE_LIST; class Query_tables_list; +typedef struct st_ddl_log_state DDL_LOG_STATE; /** Event on which trigger is invoked. */ enum trg_event_type @@ -78,6 +79,30 @@ struct st_trg_execution_order }; +/* + Parameter to change_table_name_in_triggers() +*/ + +class TRIGGER_RENAME_PARAM +{ +public: + TABLE table; + bool upgrading50to51; + bool got_error; + + TRIGGER_RENAME_PARAM() + { + upgrading50to51= got_error= 0; + table.reset(); + } + ~TRIGGER_RENAME_PARAM() + { + reset(); + } + void reset(); +}; + + class Table_triggers_list; /** @@ -219,8 +244,12 @@ public: } ~Table_triggers_list(); - bool create_trigger(THD *thd, TABLE_LIST *table, String *stmt_query); - bool drop_trigger(THD *thd, TABLE_LIST *table, String *stmt_query); + bool create_trigger(THD *thd, TABLE_LIST *table, String *stmt_query, + DDL_LOG_STATE *ddl_log_state, + DDL_LOG_STATE *ddl_log_state_tmp_file); + bool drop_trigger(THD *thd, TABLE_LIST *table, + LEX_CSTRING *sp_name, + String *stmt_query, DDL_LOG_STATE *ddl_log_state); bool process_triggers(THD *thd, trg_event_type event, trg_action_time_type time_type, bool old_row_is_record1); @@ -232,7 +261,14 @@ public: TABLE *table, bool names_only); static bool drop_all_triggers(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *table_name, myf MyFlags); - static bool change_table_name(THD *thd, const LEX_CSTRING *db, + static bool prepare_for_rename(THD *thd, TRIGGER_RENAME_PARAM *param, + const LEX_CSTRING *db, + const LEX_CSTRING *old_alias, + const LEX_CSTRING *old_table, + const LEX_CSTRING *new_db, + const LEX_CSTRING *new_table); + static bool change_table_name(THD *thd, TRIGGER_RENAME_PARAM *param, + const LEX_CSTRING *db, const LEX_CSTRING *old_alias, const LEX_CSTRING *old_table, const LEX_CSTRING *new_db, @@ -310,6 +346,7 @@ private: } }; + bool add_table_for_trigger(THD *thd, const sp_name *trg_name, bool continue_if_not_exist, @@ -324,6 +361,8 @@ bool load_table_name_for_trigger(THD *thd, const LEX_CSTRING *trn_path, LEX_CSTRING *tbl_name); bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create); +bool rm_trigname_file(char *path, const LEX_CSTRING *db, + const LEX_CSTRING *trigger_name, myf MyFlags); extern const char * const TRG_EXT; extern const char * const TRN_EXT; diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc index cb0849400ee..c6af72c5979 100644 --- a/sql/sql_truncate.cc +++ b/sql/sql_truncate.cc @@ -49,7 +49,7 @@ static bool fk_info_append_fields(THD *thd, String *str, while ((field= it++)) { res|= append_identifier(thd, str, field); - res|= str->append(", "); + res|= str->append(STRING_WITH_LEN(", ")); } str->chop(); @@ -81,17 +81,17 @@ static const char *fk_info_str(THD *thd, FOREIGN_KEY_INFO *fk_info) */ res|= append_identifier(thd, &str, fk_info->foreign_db); - res|= str.append("."); + res|= str.append('.'); res|= append_identifier(thd, &str, fk_info->foreign_table); - res|= str.append(", CONSTRAINT "); + res|= str.append(STRING_WITH_LEN(", CONSTRAINT ")); res|= append_identifier(thd, &str, fk_info->foreign_id); - res|= str.append(" FOREIGN KEY ("); + res|= str.append(STRING_WITH_LEN(" FOREIGN KEY (")); res|= fk_info_append_fields(thd, &str, &fk_info->foreign_fields); - res|= str.append(") REFERENCES "); + res|= str.append(STRING_WITH_LEN(") REFERENCES ")); res|= append_identifier(thd, &str, fk_info->referenced_db); - res|= str.append("."); + res|= str.append('.'); res|= append_identifier(thd, &str, fk_info->referenced_table); - res|= str.append(" ("); + res|= str.append(STRING_WITH_LEN(" (")); res|= fk_info_append_fields(thd, &str, &fk_info->referenced_fields); res|= str.append(')'); @@ -192,6 +192,7 @@ Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref, { int error= 0; uint flags= 0; + TABLE *table; DBUG_ENTER("Sql_cmd_truncate_table::handler_truncate"); /* @@ -235,10 +236,41 @@ Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref, if (fk_truncate_illegal_if_parent(thd, table_ref->table)) DBUG_RETURN(TRUNCATE_FAILED_SKIP_BINLOG); - error= table_ref->table->file->ha_truncate(); + table= table_ref->table; + + if ((table->file->ht->flags & HTON_TRUNCATE_REQUIRES_EXCLUSIVE_USE) && + !is_tmp_table) + { + if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN)) + DBUG_RETURN(TRUNCATE_FAILED_SKIP_BINLOG); + /* + Get rid of all TABLE instances belonging to this thread + except one to be used for TRUNCATE + */ + close_all_tables_for_name(thd, table->s, + HA_EXTRA_NOT_USED, + table); + } + + error= table->file->ha_truncate(); + + if (!is_tmp_table && !error) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("TRUNCATE") }; + ddl_log.org_partitioned= table->file->partition_engine(); + lex_string_set(&ddl_log.org_storage_engine_name, + table->file->real_table_type()); + ddl_log.org_database= table->s->db; + ddl_log.org_table= table->s->table_name; + ddl_log.org_table_id= table->s->tabledef_version; + backup_log_ddl(&ddl_log); + } + if (unlikely(error)) { - table_ref->table->file->print_error(error, MYF(0)); + table->file->print_error(error, MYF(0)); /* If truncate method is not implemented then we don't binlog the statement. If truncation has failed in a transactional engine then also @@ -246,7 +278,7 @@ Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref, inspite of errors. */ if (error == HA_ERR_WRONG_COMMAND || - table_ref->table->file->has_transactions_and_rollback()) + table->file->has_transactions_and_rollback()) DBUG_RETURN(TRUNCATE_FAILED_SKIP_BINLOG); else DBUG_RETURN(TRUNCATE_FAILED_BUT_BINLOG); @@ -305,7 +337,7 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref, hton= table->file->ht; #ifdef WITH_WSREP if (WSREP(thd) && - !wsrep_should_replicate_ddl(thd, hton->db_type)) + !wsrep_should_replicate_ddl(thd, hton)) DBUG_RETURN(TRUE); #endif @@ -329,7 +361,7 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref, #ifdef WITH_WSREP if (WSREP(thd) && hton != view_pseudo_hton && - !wsrep_should_replicate_ddl(thd, hton->db_type)) + !wsrep_should_replicate_ddl(thd, hton)) { tdc_release_share(share); DBUG_RETURN(TRUE); @@ -349,8 +381,8 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref, } } - *hton_can_recreate= !sequence - && ha_check_storage_engine_flag(hton, HTON_CAN_RECREATE); + *hton_can_recreate= (!sequence && + ha_check_storage_engine_flag(hton, HTON_CAN_RECREATE)); if (versioned) { @@ -476,10 +508,11 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref) if (error == TRUNCATE_OK && thd->locked_tables_mode && (table_ref->table->file->ht->flags & - HTON_REQUIRES_CLOSE_AFTER_TRUNCATE)) + (HTON_REQUIRES_CLOSE_AFTER_TRUNCATE | + HTON_TRUNCATE_REQUIRES_EXCLUSIVE_USE))) { thd->locked_tables_list.mark_table_for_reopen(thd, table_ref->table); - if (unlikely(thd->locked_tables_list.reopen_tables(thd, true))) + if (unlikely(thd->locked_tables_list.reopen_tables(thd, false))) thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); } diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index b9219515b48..2a7802f077e 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -212,7 +212,7 @@ bool get_type_attributes_for_tvc(THD *thd, Item *item; for (uint holder_pos= 0 ; (item= it++); holder_pos++) { - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); holders[holder_pos].add_argument(item); } } @@ -397,8 +397,7 @@ bool table_value_constr::optimize(THD *thd) create_explain_query_if_not_exists(thd->lex, thd->mem_root); have_query_plan= QEP_AVAILABLE; - if (select_lex->select_number != UINT_MAX && - select_lex->select_number != INT_MAX /* this is not a UNION's "fake select */ && + if (select_lex->select_number != FAKE_SELECT_LEX_ID && have_query_plan != QEP_NOT_PRESENT_YET && thd->lex->explain && // for "SET" command in SPs. (!thd->lex->explain->get_select(select_lex->select_number))) @@ -435,6 +434,8 @@ bool table_value_constr::exec(SELECT_LEX *sl) DBUG_RETURN(true); } + fix_rownum_pointers(sl->parent_lex->thd, sl, &send_records); + while ((elem= li++)) { THD *cur_thd= sl->parent_lex->thd; @@ -657,7 +658,8 @@ static bool create_tvc_name(THD *thd, st_select_lex *parent_select, bool table_value_constr::to_be_wrapped_as_with_tail() { return select_lex->master_unit()->first_select()->next_select() && - select_lex->order_list.elements && select_lex->explicit_limit; + select_lex->order_list.elements && + select_lex->limit_params.explicit_limit; } @@ -805,15 +807,11 @@ st_select_lex *wrap_tvc_with_tail(THD *thd, st_select_lex *tvc_sl) return NULL; wrapper_sl->order_list= tvc_sl->order_list; - wrapper_sl->select_limit= tvc_sl->select_limit; - wrapper_sl->offset_limit= tvc_sl->offset_limit; + wrapper_sl->limit_params= tvc_sl->limit_params; wrapper_sl->braces= tvc_sl->braces; - wrapper_sl->explicit_limit= tvc_sl->explicit_limit; tvc_sl->order_list.empty(); - tvc_sl->select_limit= NULL; - tvc_sl->offset_limit= NULL; + tvc_sl->limit_params.clear(); tvc_sl->braces= 0; - tvc_sl->explicit_limit= false; if (tvc_sl->select_number == 1) { tvc_sl->select_number= wrapper_sl->select_number; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index d2939f5e6e9..5335623eafd 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -725,7 +725,7 @@ uint Interval_DDhhmmssff::fsp(THD *thd, Item *item) case STRING_RESULT: break; } - if (!item->const_item() || item->is_expensive()) + if (!item->can_eval_in_optimize()) return TIME_SECOND_PART_DIGITS; Status st; Interval_DDhhmmssff it(thd, &st, false/*no warnings*/, item, UINT_MAX32, @@ -1186,9 +1186,9 @@ Datetime_truncation_not_needed::Datetime_truncation_not_needed(THD *thd, Item *i /********************************************************************/ -uint Type_numeric_attributes::find_max_decimals(Item **item, uint nitems) +decimal_digits_t Type_numeric_attributes::find_max_decimals(Item **item, uint nitems) { - uint res= 0; + decimal_digits_t res= 0; for (uint i= 0; i < nitems; i++) set_if_bigger(res, item[i]->decimals); return res; @@ -1225,9 +1225,10 @@ uint32 Type_numeric_attributes::find_max_octet_length(Item **item, uint nitems) } -int Type_numeric_attributes::find_max_decimal_int_part(Item **item, uint nitems) +decimal_digits_t Type_numeric_attributes:: +find_max_decimal_int_part(Item **item, uint nitems) { - int max_int_part= 0; + decimal_digits_t max_int_part= 0; for (uint i=0 ; i < nitems ; i++) set_if_bigger(max_int_part, item[i]->decimal_int_part()); return max_int_part; @@ -1244,11 +1245,12 @@ Type_numeric_attributes::aggregate_numeric_attributes_decimal(Item **item, uint nitems, bool unsigned_arg) { - int max_int_part= find_max_decimal_int_part(item, nitems); + decimal_digits_t max_int_part= find_max_decimal_int_part(item, nitems); decimals= find_max_decimals(item, nitems); - int precision= MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); + decimal_digits_t precision= (decimal_digits_t) + MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); max_length= my_decimal_precision_to_length_no_truncation(precision, - (uint8) decimals, + decimals, unsigned_flag); } @@ -1299,8 +1301,9 @@ Type_numeric_attributes::aggregate_numeric_attributes_real(Item **items, @retval False on success, true on error. */ -bool Type_std_attributes::aggregate_attributes_string(const char *func_name, - Item **items, uint nitems) +bool Type_std_attributes:: +aggregate_attributes_string(const LEX_CSTRING &func_name, + Item **items, uint nitems) { if (agg_arg_charsets_for_string_result(collation, func_name, items, nitems, 1)) @@ -1429,10 +1432,10 @@ CHARSET_INFO *Type_handler::charset_for_protocol(const Item *item) const bool -Type_handler::Item_func_or_sum_illegal_param(const char *funcname) const +Type_handler::Item_func_or_sum_illegal_param(const LEX_CSTRING &funcname) const { my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - name().ptr(), funcname); + name().ptr(), funcname.str); return true; } @@ -1440,7 +1443,7 @@ Type_handler::Item_func_or_sum_illegal_param(const char *funcname) const bool Type_handler::Item_func_or_sum_illegal_param(const Item_func_or_sum *it) const { - return Item_func_or_sum_illegal_param(it->func_name()); + return Item_func_or_sum_illegal_param(it->func_name_cstring()); } @@ -1841,10 +1844,9 @@ Type_handler::bit_and_int_mixture_handler(uint max_char_length) @retval true - on error */ -bool -Type_handler_hybrid_field_type::aggregate_for_result(const char *funcname, - Item **items, uint nitems, - bool treat_bit_as_number) +bool Type_handler_hybrid_field_type:: +aggregate_for_result(const LEX_CSTRING &funcname, Item **items, uint nitems, + bool treat_bit_as_number) { bool bit_and_non_bit_mixture_found= false; uint32 max_display_length; @@ -1872,7 +1874,7 @@ Type_handler_hybrid_field_type::aggregate_for_result(const char *funcname, if (aggregate_for_result(cur)) { my_error(ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION, MYF(0), - type_handler()->name().ptr(), cur->name().ptr(), funcname); + type_handler()->name().ptr(), cur->name().ptr(), funcname.str); return true; } } @@ -2067,7 +2069,7 @@ Type_collection_std::aggregate_for_min_max(const Type_handler *ha, bool -Type_handler_hybrid_field_type::aggregate_for_min_max(const char *funcname, +Type_handler_hybrid_field_type::aggregate_for_min_max(const LEX_CSTRING &funcname, Item **items, uint nitems) { bool bit_and_non_bit_mixture_found= false; @@ -2083,7 +2085,7 @@ Type_handler_hybrid_field_type::aggregate_for_min_max(const char *funcname, if (aggregate_for_min_max(cur)) { my_error(ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION, MYF(0), - type_handler()->name().ptr(), cur->name().ptr(), funcname); + type_handler()->name().ptr(), cur->name().ptr(), funcname.str); return true; } } @@ -2269,8 +2271,8 @@ Type_handler::make_num_distinct_aggregator_field(MEM_ROOT *mem_root, { return new(mem_root) Field_double(NULL, item->max_length, - (uchar *) (item->maybe_null ? "" : 0), - item->maybe_null ? 1 : 0, Field::NONE, + (uchar *) (item->maybe_null() ? "" : 0), + item->maybe_null() ? 1 : 0, Field::NONE, &item->name, (uint8) item->decimals, 0, item->unsigned_flag); } @@ -2283,8 +2285,8 @@ Type_handler_float::make_num_distinct_aggregator_field(MEM_ROOT *mem_root, { return new(mem_root) Field_float(NULL, item->max_length, - (uchar *) (item->maybe_null ? "" : 0), - item->maybe_null ? 1 : 0, Field::NONE, + (uchar *) (item->maybe_null() ? "" : 0), + item->maybe_null() ? 1 : 0, Field::NONE, &item->name, (uint8) item->decimals, 0, item->unsigned_flag); } @@ -2298,8 +2300,8 @@ Type_handler_decimal_result::make_num_distinct_aggregator_field( { return new (mem_root) Field_new_decimal(NULL, item->max_length, - (uchar *) (item->maybe_null ? "" : 0), - item->maybe_null ? 1 : 0, Field::NONE, + (uchar *) (item->maybe_null() ? "" : 0), + item->maybe_null() ? 1 : 0, Field::NONE, &item->name, (uint8) item->decimals, 0, item->unsigned_flag); } @@ -2316,8 +2318,8 @@ Type_handler_int_result::make_num_distinct_aggregator_field(MEM_ROOT *mem_root, */ return new(mem_root) Field_longlong(NULL, item->max_length, - (uchar *) (item->maybe_null ? "" : 0), - item->maybe_null ? 1 : 0, Field::NONE, + (uchar *) (item->maybe_null() ? "" : 0), + item->maybe_null() ? 1 : 0, Field::NONE, &item->name, 0, item->unsigned_flag); } @@ -4354,46 +4356,54 @@ int Type_handler_int_result::Item_save_in_field(Item *item, Field *field, /***********************************************************************/ -bool Type_handler_row::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_row:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_row(); + return cmp->set_cmp_func_row(thd); } -bool Type_handler_int_result::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_int_result:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_int(); + return cmp->set_cmp_func_int(thd); } -bool Type_handler_real_result::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_real_result:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_real(); + return cmp->set_cmp_func_real(thd); } -bool Type_handler_decimal_result::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_decimal_result:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_decimal(); + return cmp->set_cmp_func_decimal(thd); } -bool Type_handler_string_result::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_string_result:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_string(); + return cmp->set_cmp_func_string(thd); } -bool Type_handler_time_common::set_comparator_func(Arg_comparator *cmp) const +bool Type_handler_time_common:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_time(); + return cmp->set_cmp_func_time(thd); } bool -Type_handler_temporal_with_date::set_comparator_func(Arg_comparator *cmp) const +Type_handler_temporal_with_date:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_datetime(); + return cmp->set_cmp_func_datetime(thd); } bool -Type_handler_timestamp_common::set_comparator_func(Arg_comparator *cmp) const +Type_handler_timestamp_common:: +set_comparator_func(THD *thd, Arg_comparator *cmp) const { - return cmp->set_cmp_func_native(); + return cmp->set_cmp_func_native(thd); } @@ -4578,7 +4588,7 @@ Type_handler_timestamp_common::create_item_copy(THD *thd, Item *item) const bool Type_handler_int_result:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4604,7 +4614,7 @@ bool Type_handler_int_result:: bool Type_handler_real_result:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4616,7 +4626,7 @@ bool Type_handler_real_result:: bool Type_handler_decimal_result:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4629,7 +4639,7 @@ bool Type_handler_decimal_result:: bool Type_handler_string_result:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &func_name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4645,7 +4655,7 @@ bool Type_handler_string_result:: */ bool Type_handler_typelib:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &func_name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4677,7 +4687,7 @@ bool Type_handler_typelib:: bool Type_handler_blob_common:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &func_name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4691,7 +4701,7 @@ bool Type_handler_blob_common:: bool Type_handler_date_common:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4703,7 +4713,7 @@ bool Type_handler_date_common:: bool Type_handler_time_common:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4715,7 +4725,7 @@ bool Type_handler_time_common:: bool Type_handler_datetime_common:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4727,7 +4737,7 @@ bool Type_handler_datetime_common:: bool Type_handler_timestamp_common:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -4747,7 +4757,7 @@ bool Type_handler:: with aggregating for CASE-alike functions (e.g. COALESCE) for the majority of data type handlers. */ - return Item_hybrid_func_fix_attributes(thd, func->func_name(), + return Item_hybrid_func_fix_attributes(thd, func->func_name_cstring(), func, func, items, nitems); } @@ -4768,7 +4778,7 @@ bool Type_handler_temporal_result:: set_if_bigger(func->decimals, deci); } - if (rc || func->maybe_null) + if (rc || func->maybe_null()) return rc; /* LEAST/GREATES(non-temporal, temporal) can return NULL. @@ -4791,7 +4801,8 @@ bool Type_handler_temporal_result:: continue; // No conversion. if (ha->cmp_type() != TIME_RESULT) { - func->maybe_null= true; // Conversion from non-temporal is not safe + // Conversion from non-temporal is not safe + func->set_maybe_null(); break; } timestamp_type tf= hf->mysql_timestamp_type(); @@ -4842,7 +4853,7 @@ bool Type_handler_temporal_result:: DBUG_ASSERT(hf->field_type() == MYSQL_TYPE_DATETIME); if (!(thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST)) continue; - func->maybe_null= true; + func->set_maybe_null(); break; } return rc; @@ -4854,10 +4865,11 @@ bool Type_handler_date_common:: Item **items, uint nitems) const { func->fix_attributes_date(); - if (func->maybe_null) + if (func->maybe_null()) return false; /* - We cannot trust the generic maybe_null value calculated during fix_fields(). + We cannot trust the generic maybe_null value calculated during + fix_fields(). If a conversion from non-temoral types to DATE happens, then the result can be NULL (even if all arguments are not NULL). */ @@ -4865,7 +4877,7 @@ bool Type_handler_date_common:: { if (items[i]->type_handler()->cmp_type() != TIME_RESULT) { - func->maybe_null= true; + func->set_maybe_null(); break; } } @@ -6206,9 +6218,9 @@ String *Type_handler_row:: if (tmp) str->append(*tmp); else - str->append(STRING_WITH_LEN("NULL")); + str->append(NULL_clex_str); } - str->append(STRING_WITH_LEN(")")); + str->append(')'); return str; } @@ -6230,13 +6242,13 @@ String *Type_handler:: CHARSET_INFO *cs= thd->variables.character_set_client; buf.append('_'); - buf.append(result->charset()->csname); + buf.append(result->charset()->cs_name); if (cs->escape_with_backslash_is_dangerous) buf.append(' '); append_query_string(cs, &buf, result->ptr(), result->length(), thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES); - buf.append(" COLLATE '"); - buf.append(item->collation.collation->name); + buf.append(STRING_WITH_LEN(" COLLATE '")); + buf.append(item->collation.collation->coll_name); buf.append('\''); str->copy(buf); @@ -6747,7 +6759,7 @@ bool Type_handler:: item->arguments()[0]->time_precision(current_thd) : item->decimals; item->fix_attributes_temporal(MIN_TIME_WIDTH, dec); - item->maybe_null= true; + item->set_maybe_null(); return false; } @@ -6756,7 +6768,7 @@ bool Type_handler:: Item_date_typecast_fix_length_and_dec(Item_date_typecast *item) const { item->fix_attributes_temporal(MAX_DATE_WIDTH, 0); - item->maybe_null= true; + item->set_maybe_null(); return false; } @@ -6769,7 +6781,7 @@ bool Type_handler:: item->arguments()[0]->datetime_precision(current_thd) : item->decimals; item->fix_attributes_temporal(MAX_DATETIME_WIDTH, dec); - item->maybe_null= true; + item->set_maybe_null(); return false; } @@ -7039,25 +7051,25 @@ const Vers_type_handler* Type_handler_blob_common::vers() const /***************************************************************************/ -uint Type_handler::Item_time_precision(THD *thd, Item *item) const +decimal_digits_t Type_handler::Item_time_precision(THD *thd, Item *item) const { return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); } -uint Type_handler::Item_datetime_precision(THD *thd, Item *item) const +decimal_digits_t Type_handler::Item_datetime_precision(THD *thd, Item *item) const { return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); } -uint Type_handler_string_result::Item_temporal_precision(THD *thd, Item *item, - bool is_time) const +decimal_digits_t Type_handler_string_result:: +Item_temporal_precision(THD *thd, Item *item, bool is_time) const { StringBuffer<64> buf; String *tmp; MYSQL_TIME_STATUS status; - DBUG_ASSERT(item->is_fixed()); + DBUG_ASSERT(item->fixed()); // Nanosecond rounding is not needed here, for performance purposes if ((tmp= item->val_str(&buf)) && (is_time ? @@ -7068,34 +7080,34 @@ uint Type_handler_string_result::Item_temporal_precision(THD *thd, Item *item, Datetime(thd, &status, tmp->ptr(), tmp->length(), tmp->charset(), Datetime::Options(TIME_FUZZY_DATES, TIME_FRAC_TRUNCATE)). is_valid_datetime())) - return MY_MIN(status.precision, TIME_SECOND_PART_DIGITS); - return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) MY_MIN(status.precision, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); } /***************************************************************************/ -uint Type_handler::Item_decimal_scale(const Item *item) const +decimal_digits_t Type_handler::Item_decimal_scale(const Item *item) const { - return item->decimals < NOT_FIXED_DEC ? - item->decimals : - MY_MIN(item->max_length, DECIMAL_MAX_SCALE); + return (item->decimals < NOT_FIXED_DEC ? + item->decimals : + (decimal_digits_t) MY_MIN(item->max_length, DECIMAL_MAX_SCALE)); } -uint Type_handler_temporal_result:: - Item_decimal_scale_with_seconds(const Item *item) const +decimal_digits_t Type_handler_temporal_result:: +Item_decimal_scale_with_seconds(const Item *item) const { - return item->decimals < NOT_FIXED_DEC ? - item->decimals : - TIME_SECOND_PART_DIGITS; + return (item->decimals < NOT_FIXED_DEC ? + item->decimals : + TIME_SECOND_PART_DIGITS); } -uint Type_handler::Item_divisor_precision_increment(const Item *item) const +decimal_digits_t Type_handler::Item_divisor_precision_increment(const Item *item) const { return item->decimals; } -uint Type_handler_temporal_result:: - Item_divisor_precision_increment_with_seconds(const Item *item) const +decimal_digits_t Type_handler_temporal_result:: +Item_divisor_precision_increment_with_seconds(const Item *item) const { return item->decimals < NOT_FIXED_DEC ? item->decimals : @@ -7104,7 +7116,7 @@ uint Type_handler_temporal_result:: /***************************************************************************/ -uint Type_handler_string_result::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_string_result::Item_decimal_precision(const Item *item) const { uint res= item->max_char_length(); /* @@ -7113,49 +7125,51 @@ uint Type_handler_string_result::Item_decimal_precision(const Item *item) const INT(0) or DECIMAL(0,0) when converting NULL or empty strings to INT/DECIMAL: CREATE TABLE t1 AS SELECT CONVERT(NULL,SIGNED) AS a; */ - return res ? MY_MIN(res, DECIMAL_MAX_PRECISION) : 1; + return res ? (decimal_digits_t) MY_MIN(res, DECIMAL_MAX_PRECISION) : (decimal_digits_t) 1; } -uint Type_handler_real_result::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_real_result::Item_decimal_precision(const Item *item) const { uint res= item->max_char_length(); - return res ? MY_MIN(res, DECIMAL_MAX_PRECISION) : 1; + return res ? (decimal_digits_t) MY_MIN(res, DECIMAL_MAX_PRECISION) : (decimal_digits_t) 1; } -uint Type_handler_decimal_result::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_decimal_result::Item_decimal_precision(const Item *item) const { uint prec= my_decimal_length_to_precision(item->max_char_length(), item->decimals, item->unsigned_flag); - return MY_MIN(prec, DECIMAL_MAX_PRECISION); + return (decimal_digits_t) MY_MIN(prec, DECIMAL_MAX_PRECISION); } -uint Type_handler_int_result::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_int_result::Item_decimal_precision(const Item *item) const { uint prec= my_decimal_length_to_precision(item->max_char_length(), item->decimals, item->unsigned_flag); - return MY_MIN(prec, DECIMAL_MAX_PRECISION); + return (decimal_digits_t) MY_MIN(prec, DECIMAL_MAX_PRECISION); } -uint Type_handler_time_common::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_time_common::Item_decimal_precision(const Item *item) const { - return 7 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) (7 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS)); } -uint Type_handler_date_common::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_date_common::Item_decimal_precision(const Item *item) const { return 8; } -uint Type_handler_datetime_common::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_datetime_common:: +Item_decimal_precision(const Item *item) const { - return 14 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) (14 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS)); } -uint Type_handler_timestamp_common::Item_decimal_precision(const Item *item) const +decimal_digits_t Type_handler_timestamp_common:: +Item_decimal_precision(const Item *item) const { - return 14 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) (14 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS)); } /***************************************************************************/ @@ -7517,7 +7531,7 @@ bool Type_handler::Item_send_timestamp(Item *item, if (native.is_null()) return protocol->store_null(); native.to_TIME(protocol->thd, &buf->value.m_time); - return protocol->store(&buf->value.m_time, item->decimals); + return protocol->store_datetime(&buf->value.m_time, item->decimals); } @@ -7527,7 +7541,7 @@ bool Type_handler:: item->get_date(protocol->thd, &buf->value.m_time, Datetime::Options(protocol->thd)); if (!item->null_value) - return protocol->store(&buf->value.m_time, item->decimals); + return protocol->store_datetime(&buf->value.m_time, item->decimals); return protocol->store_null(); } @@ -7690,7 +7704,7 @@ static void wrong_precision_error(uint errcode, Item *a, */ bool get_length_and_scale(ulonglong length, ulonglong decimals, - uint *out_length, uint *out_decimals, + uint *out_length, decimal_digits_t *out_decimals, uint max_precision, uint max_scale, Item *a) { @@ -7705,7 +7719,7 @@ bool get_length_and_scale(ulonglong length, ulonglong decimals, return 1; } - *out_decimals= (uint) decimals; + *out_decimals= (decimal_digits_t) decimals; my_decimal_trim(&length, out_decimals); *out_length= (uint) length; @@ -7773,7 +7787,8 @@ Item *Type_handler_decimal_result:: create_typecast_item(THD *thd, Item *item, const Type_cast_attributes &attr) const { - uint len, dec; + uint len; + decimal_digits_t dec; if (get_length_and_scale(attr.length(), attr.decimals(), &len, &dec, DECIMAL_MAX_PRECISION, DECIMAL_MAX_SCALE, item)) return NULL; @@ -7785,7 +7800,8 @@ Item *Type_handler_double:: create_typecast_item(THD *thd, Item *item, const Type_cast_attributes &attr) const { - uint len, dec; + uint len; + decimal_digits_t dec; if (!attr.length_specified()) return new (thd->mem_root) Item_double_typecast(thd, item, DBL_DIG + 7, @@ -9218,7 +9234,7 @@ bool Type_handler::Column_definition_data_type_info_image(Binary_string *to, // Have *some* columns write type info (let's use string fields as an example) DBUG_EXECUTE_IF("frm_data_type_info_emulate", if (cmp_type() == STRING_RESULT) - return to->append("x", 1) || + return to->append_char('x') || to->append(name().lex_cstring());); if (type_collection() != &type_collection_std) return to->append(name().lex_cstring()); @@ -9267,7 +9283,7 @@ Type_handler_general_purpose_int::partition_field_append_value( const { DBUG_ASSERT(item_expr->cmp_type() == INT_RESULT); - StringBuffer<21> tmp; + StringBuffer<LONGLONG_BUFFER_SIZE> tmp; longlong value= item_expr->val_int(); tmp.set(value, system_charset_info); return str->append(tmp); @@ -9338,7 +9354,7 @@ bool Type_handler::partition_field_append_value( String *res; if (!(res= item_expr->val_str(&buf))) - return str->append(STRING_WITH_LEN("NULL"), system_charset_info); + return str->append(NULL_clex_str, system_charset_info); if (!res->length()) return str->append(STRING_WITH_LEN("''"), system_charset_info); @@ -9379,33 +9395,27 @@ LEX_CSTRING Charset::collation_specific_name() const for character sets and collations, so a collation name not necessarily starts with the character set name. */ - LEX_CSTRING retval; - size_t csname_length= strlen(m_charset->csname); - if (strncmp(m_charset->name, m_charset->csname, csname_length)) - { - retval.str= NULL; - retval.length= 0; - return retval; - } - const char *ptr= m_charset->name + csname_length; - retval.str= ptr; - retval.length= strlen(ptr); - return retval; + size_t cs_name_length= m_charset->cs_name.length; + if (strncmp(m_charset->coll_name.str, m_charset->cs_name.str, + cs_name_length)) + return {NULL, 0}; + const char *ptr= m_charset->coll_name.str + cs_name_length; + return {ptr, m_charset->coll_name.length - cs_name_length }; } bool Charset::encoding_allows_reinterpret_as(const CHARSET_INFO *cs) const { - if (!strcmp(m_charset->csname, cs->csname)) + if (my_charset_same(m_charset, cs)) return true; - if (!strcmp(m_charset->csname, MY_UTF8MB3) && - !strcmp(cs->csname, MY_UTF8MB4)) + if (!strcmp(m_charset->cs_name.str, MY_UTF8MB3) && + !strcmp(cs->cs_name.str, MY_UTF8MB4)) return true; /* - Originally we allowed here instat ALTER for ASCII-to-LATIN1 + Originally we allowed here instant ALTER for ASCII-to-LATIN1 and UCS2-to-UTF16, but this was wrong: - MariaDB's ascii is not a subset for 8-bit character sets like latin1, because it allows storing bytes 0x80..0xFF as diff --git a/sql/sql_type.h b/sql/sql_type.h index 8a3a3776b52..fcfc251e003 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -83,7 +83,7 @@ class Type_handler_hybrid_field_type; class Sort_param; class Arg_comparator; class Spvar_definition; -struct st_value; +class st_value; class Protocol; class handler; struct TABLE; @@ -399,7 +399,7 @@ public: { return m_ptr ? m_ptr->to_string(to, prec, dec, filler) : NULL; } - int to_binary(uchar *bin, int prec, int scale) const + int to_binary(uchar *bin, int prec, decimal_digits_t scale) const { return (m_ptr ? m_ptr : &decimal_zero)->to_binary(bin, prec, scale); } @@ -422,12 +422,13 @@ class Dec_ptr_and_buffer: public Dec_ptr protected: my_decimal m_buffer; public: + /* scale is int as it can be negative here */ int round_to(my_decimal *to, int scale, decimal_round_mode mode) { DBUG_ASSERT(m_ptr); return m_ptr->round_to(to, scale, mode); } - int round_self(uint scale, decimal_round_mode mode) + int round_self(decimal_digits_t scale, decimal_round_mode mode) { return round_to(&m_buffer, scale, mode); } @@ -439,7 +440,7 @@ public: m_ptr= &m_buffer; return res; } - String *to_string_round(String *to, uint dec) + String *to_string_round(String *to, decimal_digits_t dec) { /* decimal_round() allows from==to @@ -3052,28 +3053,27 @@ char_to_byte_length_safe(size_t char_length_arg, uint32 mbmaxlen_arg) return tmp > UINT_MAX32 ? (uint32) UINT_MAX32 : static_cast<uint32>(tmp); } - class Type_numeric_attributes { public: static uint count_unsigned(Item **item, uint nitems); static uint32 find_max_char_length(Item **item, uint nitems); static uint32 find_max_octet_length(Item **item, uint nitems); - static int find_max_decimal_int_part(Item **item, uint nitems); - static uint find_max_decimals(Item **item, uint nitems); + static decimal_digits_t find_max_decimal_int_part(Item **item, uint nitems); + static decimal_digits_t find_max_decimals(Item **item, uint nitems); public: /* The maximum value length in characters multiplied by collation->mbmaxlen. Almost always it's the maximum value length in bytes. */ uint32 max_length; - uint decimals; + decimal_digits_t decimals; bool unsigned_flag; public: Type_numeric_attributes() :max_length(0), decimals(0), unsigned_flag(false) { } - Type_numeric_attributes(uint32 max_length_arg, uint decimals_arg, + Type_numeric_attributes(uint32 max_length_arg, decimal_digits_t decimals_arg, bool unsigned_flag_arg) :max_length(max_length_arg), decimals(decimals_arg), @@ -3090,9 +3090,10 @@ protected: class Type_temporal_attributes: public Type_numeric_attributes { public: - Type_temporal_attributes(uint int_part_length, uint dec, bool unsigned_arg) + Type_temporal_attributes(uint32 int_part_length, decimal_digits_t dec, bool unsigned_arg) :Type_numeric_attributes(int_part_length + (dec ? 1 : 0), - MY_MIN(dec, TIME_SECOND_PART_DIGITS), + MY_MIN(dec, + (decimal_digits_t) TIME_SECOND_PART_DIGITS), unsigned_arg) { max_length+= decimals; @@ -3103,7 +3104,7 @@ public: class Type_temporal_attributes_not_fixed_dec: public Type_numeric_attributes { public: - Type_temporal_attributes_not_fixed_dec(uint32 int_part_length, uint dec, + Type_temporal_attributes_not_fixed_dec(uint32 int_part_length, decimal_digits_t dec, bool unsigned_flag) :Type_numeric_attributes(int_part_length, dec, unsigned_flag) { @@ -3158,7 +3159,7 @@ public: max_length= char_to_byte_length_safe(max_char_length_arg, collation.collation->mbmaxlen); } - void fix_attributes_temporal(uint32 int_part_length, uint dec) + void fix_attributes_temporal(uint32 int_part_length, decimal_digits_t dec) { *this= Type_std_attributes( Type_temporal_attributes(int_part_length, dec, false), @@ -3168,11 +3169,11 @@ public: { fix_attributes_temporal(MAX_DATE_WIDTH, 0); } - void fix_attributes_time(uint dec) + void fix_attributes_time(decimal_digits_t dec) { fix_attributes_temporal(MIN_TIME_WIDTH, dec); } - void fix_attributes_datetime(uint dec) + void fix_attributes_datetime(decimal_digits_t dec) { fix_attributes_temporal(MAX_DATETIME_WIDTH, dec); } @@ -3196,7 +3197,7 @@ public: aggregate_numeric_attributes_decimal(items, nitems, (unsigned_flag= unsigned_arg)); } - bool aggregate_attributes_string(const char *func_name, + bool aggregate_attributes_string(const LEX_CSTRING &func_name, Item **item, uint nitems); void aggregate_attributes_temporal(uint int_part_length, Item **item, uint nitems) @@ -3204,10 +3205,11 @@ public: fix_attributes_temporal(int_part_length, find_max_decimals(item, nitems)); } - bool agg_item_collations(DTCollation &c, const char *name, + bool agg_item_collations(DTCollation &c, const LEX_CSTRING &name, Item **items, uint nitems, uint flags, int item_sep); - bool agg_item_set_converter(const DTCollation &coll, const char *fname, + bool agg_item_set_converter(const DTCollation &coll, + const LEX_CSTRING &name, Item **args, uint nargs, uint flags, int item_sep); @@ -3239,7 +3241,7 @@ public: agg_item_charsets(coll, fname, &args[2], 2, flags, 3) */ - bool agg_arg_charsets(DTCollation &c, const char *func_name, + bool agg_arg_charsets(DTCollation &c, const LEX_CSTRING &func_name, Item **items, uint nitems, uint flags, int item_sep) { @@ -3252,7 +3254,8 @@ public: - convert to @@character_set_connection if all arguments are numbers - allow DERIVATION_NONE */ - bool agg_arg_charsets_for_string_result(DTCollation &c, const char *func_name, + bool agg_arg_charsets_for_string_result(DTCollation &c, + const LEX_CSTRING &func_name, Item **items, uint nitems, int item_sep) { @@ -3268,7 +3271,7 @@ public: - disallow DERIVATION_NONE */ bool agg_arg_charsets_for_string_result_with_comparison(DTCollation &c, - const char *func_name, + const LEX_CSTRING &func_name, Item **items, uint nitems, int item_sep) @@ -3286,7 +3289,7 @@ public: - don't allow DERIVATION_NONE */ bool agg_arg_charsets_for_comparison(DTCollation &c, - const char *func_name, + const LEX_CSTRING &func_name, Item **items, uint nitems, int item_sep) { @@ -3309,9 +3312,9 @@ public: :Type_std_attributes(other) { } virtual ~Type_all_attributes() {} - virtual void set_maybe_null(bool maybe_null_arg)= 0; + virtual void set_type_maybe_null(bool maybe_null_arg)= 0; // Returns total number of decimal digits - virtual uint decimal_precision() const= 0; + virtual decimal_digits_t decimal_precision() const= 0; virtual const TYPELIB *get_typelib() const= 0; virtual void set_typelib(const TYPELIB *typelib)= 0; }; @@ -3462,7 +3465,7 @@ class Information_schema_numeric_attributes ATTR_PRECISION_AND_SCALE= (ATTR_PRECISION|ATTR_SCALE) }; uint m_precision; - uint m_scale; + decimal_digits_t m_scale; enum_attr m_available_attributes; public: Information_schema_numeric_attributes() @@ -3473,7 +3476,7 @@ public: :m_precision(precision), m_scale(0), m_available_attributes(ATTR_PRECISION) { } - Information_schema_numeric_attributes(uint precision, uint scale) + Information_schema_numeric_attributes(uint precision, decimal_digits_t scale) :m_precision(precision), m_scale(scale), m_available_attributes(ATTR_PRECISION_AND_SCALE) { } @@ -3484,10 +3487,10 @@ public: DBUG_ASSERT(has_precision()); return (uint) m_precision; } - uint scale() const + decimal_digits_t scale() const { DBUG_ASSERT(has_scale()); - return (uint) m_scale; + return m_scale; } }; @@ -3592,7 +3595,7 @@ protected: longlong value, const SORT_FIELD_ATTR *sort_field) const; - bool Item_func_or_sum_illegal_param(const char *name) const; + bool Item_func_or_sum_illegal_param(const LEX_CSTRING &name) const; bool Item_func_or_sum_illegal_param(const Item_func_or_sum *) const; bool check_null(const Item *item, st_value *value) const; bool Item_send_str(Item *item, Protocol *protocol, st_value *buf) const; @@ -3847,15 +3850,15 @@ public: virtual bool can_return_extract_source(interval_type type) const; virtual bool is_bool_type() const { return false; } virtual bool is_general_purpose_string_type() const { return false; } - virtual uint Item_time_precision(THD *thd, Item *item) const; - virtual uint Item_datetime_precision(THD *thd, Item *item) const; - virtual uint Item_decimal_scale(const Item *item) const; - virtual uint Item_decimal_precision(const Item *item) const= 0; + virtual decimal_digits_t Item_time_precision(THD *thd, Item *item) const; + virtual decimal_digits_t Item_datetime_precision(THD *thd, Item *item) const; + virtual decimal_digits_t Item_decimal_scale(const Item *item) const; + virtual decimal_digits_t Item_decimal_precision(const Item *item) const= 0; /* Returns how many digits a divisor adds into a division result. See Item::divisor_precision_increment() in item.h for more comments. */ - virtual uint Item_divisor_precision_increment(const Item *) const; + virtual decimal_digits_t Item_divisor_precision_increment(const Item *) const; /** Makes a temporary table Field to handle numeric aggregate functions, e.g. SUM(DISTINCT expr), AVG(DISTINCT expr), etc. @@ -4198,7 +4201,7 @@ public: MY_ASSERT_UNREACHABLE(); return 0; } - virtual bool set_comparator_func(Arg_comparator *cmp) const= 0; + virtual bool set_comparator_func(THD *thd, Arg_comparator *cmp) const= 0; virtual bool Item_const_eq(const Item_const *a, const Item_const *b, bool binary_cmp) const { @@ -4207,7 +4210,7 @@ public: virtual bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const= 0; virtual bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, @@ -4490,7 +4493,7 @@ public: } bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const override; - uint Item_decimal_precision(const Item *) const override + decimal_digits_t Item_decimal_precision(const Item *) const override { MY_ASSERT_UNREACHABLE(); return DECIMAL_MAX_PRECISION; @@ -4528,9 +4531,9 @@ public: MY_ASSERT_UNREACHABLE(); return nullptr; } - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -4815,7 +4818,7 @@ public: bool binary_cmp) const override; bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const override; - uint Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_decimal_precision(const Item *item) const override; bool Item_save_in_value(THD *thd, Item *item, st_value *value) const override; bool Item_param_set_from_value(THD *thd, Item_param *param, @@ -4826,9 +4829,9 @@ public: const override; Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -4944,7 +4947,7 @@ public: VDec va(a), vb(b); return va.ptr() && vb.ptr() && !va.cmp(vb); } - uint Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_decimal_precision(const Item *item) const override; bool Item_save_in_value(THD *thd, Item *item, st_value *value) const override; void Item_param_set_param_func(Item_param *param, uchar **pos, ulong len) const override; @@ -4962,9 +4965,9 @@ public: Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; Item_cache *Item_get_cache(THD *thd, const Item *item) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -5186,7 +5189,7 @@ public: bool binary_cmp) const override; bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const override; - uint Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_decimal_precision(const Item *item) const override; bool Item_save_in_value(THD *thd, Item *item, st_value *value) const override; bool Item_param_set_from_value(THD *thd, Item_param *param, @@ -5196,9 +5199,9 @@ public: int Item_save_in_field(Item *item, Field *field, bool no_conversions) const override; Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; Item_cache *Item_get_cache(THD *thd, const Item *item) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) const override; @@ -5272,8 +5275,8 @@ public: class Type_handler_temporal_result: public Type_handler { protected: - uint Item_decimal_scale_with_seconds(const Item *item) const; - uint Item_divisor_precision_increment_with_seconds(const Item *) const; + decimal_digits_t Item_decimal_scale_with_seconds(const Item *item) const; + decimal_digits_t Item_divisor_precision_increment_with_seconds(const Item *) const; public: Item_result result_type() const override { return STRING_RESULT; } Item_result cmp_type() const override { return TIME_RESULT; } @@ -5360,7 +5363,7 @@ public: class Type_handler_string_result: public Type_handler { - uint Item_temporal_precision(THD *thd, Item *item, bool is_time) const; + decimal_digits_t Item_temporal_precision(THD *thd, Item *item, bool is_time) const; public: const Name &default_value() const override; protocol_send_type_t protocol_send_type() const override @@ -5420,15 +5423,15 @@ public: bool binary_cmp) const override; bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const override; - uint Item_time_precision(THD *thd, Item *item) const override + decimal_digits_t Item_time_precision(THD *thd, Item *item) const override { return Item_temporal_precision(thd, item, true); } - uint Item_datetime_precision(THD *thd, Item *item) const override + decimal_digits_t Item_datetime_precision(THD *thd, Item *item) const override { return Item_temporal_precision(thd, item, false); } - uint Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_decimal_precision(const Item *item) const override; void Item_update_null_value(Item *item) const override; bool Item_save_in_value(THD *thd, Item *item, st_value *value) const override; void Item_param_setup_conversion(THD *thd, Item_param *) const override; @@ -5460,9 +5463,9 @@ public: Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; Item_cache *Item_get_cache(THD *thd, const Item *item) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) const @@ -6111,12 +6114,12 @@ public: const override; bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, Item *a, Item *b) const override; - uint Item_decimal_scale(const Item *item) const override + decimal_digits_t Item_decimal_scale(const Item *item) const override { return Item_decimal_scale_with_seconds(item); } - uint Item_decimal_precision(const Item *item) const override; - uint Item_divisor_precision_increment(const Item *item) const override + decimal_digits_t Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_divisor_precision_increment(const Item *item) const override { return Item_divisor_precision_increment_with_seconds(item); } @@ -6143,7 +6146,7 @@ public: Item_cache *Item_get_cache(THD *thd, const Item *item) const override; longlong Item_val_int_unsigned_typecast(Item *item) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -6176,7 +6179,7 @@ public: bool Item_func_int_val_fix_length_and_dec(Item_func_int_val*) const override; Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const override; in_vector *make_in_vector(THD *, const Item_func_in *, uint nargs) const override; @@ -6270,7 +6273,7 @@ public: const override; Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const override; in_vector *make_in_vector(THD *, const Item_func_in *, uint nargs) const override; @@ -6326,7 +6329,7 @@ public: void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; - uint Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_decimal_precision(const Item *item) const override; String *print_item_value(THD *thd, Item *item, String *str) const override; Item_cache *Item_get_cache(THD *thd, const Item *item) const override; String *Item_func_min_max_val_str(Item_func_min_max *, String *) const override; @@ -6337,7 +6340,7 @@ public: bool Item_func_round_fix_length_and_dec(Item_func_round *) const override; bool Item_func_int_val_fix_length_and_dec(Item_func_int_val*) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) const @@ -6455,12 +6458,12 @@ public: const uchar *buffer, LEX_CUSTRING *gis_options) const override; - uint Item_decimal_scale(const Item *item) const override + decimal_digits_t Item_decimal_scale(const Item *item) const override { return Item_decimal_scale_with_seconds(item); } - uint Item_decimal_precision(const Item *item) const override; - uint Item_divisor_precision_increment(const Item *item) const override + decimal_digits_t Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_divisor_precision_increment(const Item *item) const override { return Item_divisor_precision_increment_with_seconds(item); } @@ -6478,7 +6481,7 @@ public: const override; bool Item_func_round_fix_length_and_dec(Item_func_round *) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -6617,12 +6620,12 @@ public: const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; bool Column_definition_fix_attributes(Column_definition *c) const override; - uint Item_decimal_scale(const Item *item) const override + decimal_digits_t Item_decimal_scale(const Item *item) const override { return Item_decimal_scale_with_seconds(item); } - uint Item_decimal_precision(const Item *item) const override; - uint Item_divisor_precision_increment(const Item *item) const override + decimal_digits_t Item_decimal_precision(const Item *item) const override; + decimal_digits_t Item_divisor_precision_increment(const Item *item) const override { return Item_divisor_precision_increment_with_seconds(item); } @@ -6640,9 +6643,9 @@ public: longlong Item_func_min_max_val_int(Item_func_min_max *) const override; my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *, my_decimal *) const override; - bool set_comparator_func(Arg_comparator *cmp) const override; + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -7126,7 +7129,7 @@ public: const Column_definition &def, const handler *file) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) const @@ -7259,7 +7262,7 @@ public: bool Item_func_int_val_fix_length_and_dec(Item_func_int_val*) const override; uint32 max_display_length_for_field(const Conv_source &src) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *, Type_all_attributes *atrr, Item **items, uint nitems) @@ -7462,13 +7465,14 @@ public: return (m_type_handler= Type_handler::get_handler_by_real_type(type)); } bool aggregate_for_comparison(const Type_handler *other); - bool aggregate_for_comparison(const char *funcname, + bool aggregate_for_comparison(const LEX_CSTRING &funcname, Item **items, uint nitems, bool treat_int_to_uint_as_decimal); bool aggregate_for_result(const Type_handler *other); - bool aggregate_for_result(const char *funcname, + bool aggregate_for_result(const LEX_CSTRING &funcname, Item **item, uint nitems, bool treat_bit_as_number); - bool aggregate_for_min_max(const char *funcname, Item **item, uint nitems); + bool aggregate_for_min_max(const LEX_CSTRING &funcname, Item **item, + uint nitems); bool aggregate_for_num_op(const class Type_aggregator *aggregator, const Type_handler *h0, const Type_handler *h1); diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h new file mode 100644 index 00000000000..546d585cc69 --- /dev/null +++ b/sql/sql_type_fixedbin.h @@ -0,0 +1,1913 @@ +#ifndef SQL_TYPE_FIXEDBIN_H +#define SQL_TYPE_FIXEDBIN_H +/* Copyright (c) 2019,2021 MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +/* + This is a common code for plugin (?) types that are generally + handled like strings, but have their own fixed size on-disk binary storage + format and their own (variable size) canonical string representation. + + Examples are INET6 and UUID types. +*/ + +#define MYSQL_SERVER +#include "sql_class.h" // THD, SORT_FIELD_ATTR +#include "opt_range.h" // SEL_ARG, null_element +#include "sql_type_fixedbin_storage.h" + +/***********************************************************************/ + + +template<class FbtImpl> +class FixedBinTypeBundle +{ +public: + class Fbt: public FbtImpl + { + protected: + using FbtImpl::m_buffer; + bool make_from_item(Item *item, bool warn) + { + if (item->type_handler() == type_handler_fbt()) + { + Native tmp(m_buffer, sizeof(m_buffer)); + bool rc= item->val_native(current_thd, &tmp); + if (rc) + return true; + DBUG_ASSERT(tmp.length() == sizeof(m_buffer)); + if (tmp.ptr() != m_buffer) + memcpy(m_buffer, tmp.ptr(), sizeof(m_buffer)); + return false; + } + StringBuffer<FbtImpl::max_char_length()+1> tmp; + String *str= item->val_str(&tmp); + return str ? make_from_character_or_binary_string(str, warn) : true; + } + + bool character_string_to_fbt(const char *str, size_t str_length, + CHARSET_INFO *cs) + { + if (cs->state & MY_CS_NONASCII) + { + char tmp[FbtImpl::max_char_length()+1]; + String_copier copier; + uint length= copier.well_formed_copy(&my_charset_latin1, tmp, sizeof(tmp), + cs, str, str_length); + return FbtImpl::ascii_to_fbt(tmp, length); + } + return FbtImpl::ascii_to_fbt(str, str_length); + } + bool make_from_character_or_binary_string(const String *str, bool warn) + { + if (str->charset() != &my_charset_bin) + { + bool rc= character_string_to_fbt(str->ptr(), str->length(), + str->charset()); + if (rc && warn) + current_thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN, + type_handler_fbt()->name().ptr(), ErrConvString(str).ptr()); + return rc; + } + if (str->length() != sizeof(m_buffer)) + { + if (warn) + current_thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN, + type_handler_fbt()->name().ptr(), ErrConvString(str).ptr()); + return true; + } + DBUG_ASSERT(str->ptr() != m_buffer); + memcpy(m_buffer, str->ptr(), sizeof(m_buffer)); + return false; + } + bool binary_to_fbt(const char *str, size_t length) + { + if (length != sizeof(m_buffer)) + return true; + memcpy(m_buffer, str, length); + return false; + } + + Fbt() { } + + public: + + static Fbt zero() + { + Fbt fbt; + fbt.set_zero(); + return fbt; + } + + static Fbt record_to_memory(const char *ptr) + { + Fbt fbt; + FbtImpl::record_to_memory(fbt.m_buffer, ptr); + return fbt; + } + /* + Check at Item's fix_fields() time if "item" can return a nullable value + on conversion to Fbt, or conversion produces a NOT NULL Fbt value. + */ + static bool fix_fields_maybe_null_on_conversion_to_fbt(Item *item) + { + if (item->maybe_null()) + return true; + if (item->type_handler() == type_handler_fbt()) + return false; + if (!item->const_item() || item->is_expensive()) + return true; + return Fbt_null(item, false).is_null(); + } + + public: + + Fbt(Item *item, bool *error, bool warn= true) + { + *error= make_from_item(item, warn); + } + void to_record(char *str, size_t str_size) const + { + DBUG_ASSERT(str_size >= sizeof(m_buffer)); + FbtImpl::memory_to_record(str, m_buffer); + } + bool to_binary(String *to) const + { + return to->copy(m_buffer, sizeof(m_buffer), &my_charset_bin); + } + bool to_native(Native *to) const + { + return to->copy(m_buffer, sizeof(m_buffer)); + } + bool to_string(String *to) const + { + to->set_charset(&my_charset_latin1); + if (to->alloc(FbtImpl::max_char_length()+1)) + return true; + to->length((uint32) FbtImpl::to_string(const_cast<char*>(to->ptr()), + FbtImpl::max_char_length()+1)); + return false; + } + int cmp(const Binary_string &other) const + { + return FbtImpl::cmp(FbtImpl::to_lex_cstring(), other.to_lex_cstring()); + } + int cmp(const Fbt &other) const + { + return FbtImpl::cmp(FbtImpl::to_lex_cstring(), other.to_lex_cstring()); + } + }; + + class Fbt_null: public Fbt, public Null_flag + { + public: + // Initialize from a text representation + Fbt_null(const char *str, size_t length, CHARSET_INFO *cs) + :Null_flag(Fbt::character_string_to_fbt(str, length, cs)) { } + Fbt_null(const String &str) + :Fbt_null(str.ptr(), str.length(), str.charset()) { } + // Initialize from a binary representation + Fbt_null(const char *str, size_t length) + :Null_flag(Fbt::binary_to_fbt(str, length)) { } + Fbt_null(const Binary_string &str) + :Fbt_null(str.ptr(), str.length()) { } + // Initialize from an Item + Fbt_null(Item *item, bool warn= true) + :Null_flag(Fbt::make_from_item(item, warn)) { } + public: + const Fbt& to_fbt() const + { + DBUG_ASSERT(!is_null()); + return *this; + } + void to_record(char *str, size_t str_size) const + { + to_fbt().to_record(str, str_size); + } + bool to_binary(String *to) const + { + return to_fbt().to_binary(to); + } + size_t to_string(char *dst, size_t dstsize) const + { + return to_fbt().to_string(dst, dstsize); + } + bool to_string(String *to) const + { + return to_fbt().to_string(to); + } + }; + + class Type_std_attributes_fbt: public Type_std_attributes + { + public: + Type_std_attributes_fbt() + :Type_std_attributes( + Type_numeric_attributes(FbtImpl::max_char_length(), 0, true), + DTCollation_numeric()) + { } + }; + + class Type_handler_fbt: public Type_handler + { + bool character_or_binary_string_to_native(THD *thd, const String *str, + Native *to) const + { + if (str->charset() == &my_charset_bin) + { + // Convert from a binary string + if (str->length() != FbtImpl::binary_length() || + to->copy(str->ptr(), str->length())) + { + thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN, + name().ptr(), ErrConvString(str).ptr()); + return true; + } + return false; + } + // Convert from a character string + Fbt_null tmp(*str); + if (tmp.is_null()) + thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN, + name().ptr(), ErrConvString(str).ptr()); + return tmp.is_null() || tmp.to_native(to); + } + + public: + ~Type_handler_fbt() override {} + + const Type_collection *type_collection() const override + { + static Type_collection_fbt type_collection_fbt; + return &type_collection_fbt; + } + + const Name &default_value() const override + { + return FbtImpl::default_value(); + } + ulong KEY_pack_flags(uint column_nr) const override + { + return FbtImpl::KEY_pack_flags(column_nr); + } + protocol_send_type_t protocol_send_type() const override + { + return PROTOCOL_SEND_STRING; + } + bool Item_append_extended_type_info(Send_field_extended_metadata *to, + const Item *item) const override + { + return to->set_data_type_name(name().lex_cstring()); + } + + enum_field_types field_type() const override + { + return MYSQL_TYPE_STRING; + } + + Item_result result_type() const override + { + return STRING_RESULT; + } + + Item_result cmp_type() const override + { + return STRING_RESULT; + } + + enum_dynamic_column_type dyncol_type(const Type_all_attributes *attr) + const override + { + return DYN_COL_STRING; + } + + uint32 max_display_length_for_field(const Conv_source &src) const override + { + return FbtImpl::max_char_length(); + } + + const Type_handler *type_handler_for_comparison() const override + { + return this; + } + + int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const override + { + DBUG_ASSERT(field->type_handler() == this); + Fbt_null ni(item); // Convert Item to Fbt + if (ni.is_null()) + return 0; + NativeBuffer<FbtImpl::binary_length()+1> tmp; + if (field->val_native(&tmp)) + { + DBUG_ASSERT(0); + return 0; + } + return -ni.cmp(tmp); + } + CHARSET_INFO *charset_for_protocol(const Item *item) const override + { + return item->collation.collation; + } + + bool is_scalar_type() const override { return true; } + bool is_val_native_ready() const override { return true; } + bool can_return_int() const override { return false; } + bool can_return_decimal() const override { return false; } + bool can_return_real() const override { return false; } + bool can_return_str() const override { return true; } + bool can_return_text() const override { return true; } + bool can_return_date() const override { return false; } + bool can_return_time() const override { return false; } + bool convert_to_binary_using_val_native() const override { return true; } + + decimal_digits_t Item_time_precision(THD *thd, Item *item) const override + { + return 0; + } + decimal_digits_t Item_datetime_precision(THD *thd, Item *item) const override + { + return 0; + } + decimal_digits_t Item_decimal_scale(const Item *item) const override + { + return 0; + } + decimal_digits_t Item_decimal_precision(const Item *item) const override + { + /* This will be needed if we ever allow cast from Fbt to DECIMAL. */ + return (FbtImpl::binary_length()*8+7)/10*3; // = bytes to decimal digits + } + + /* + Returns how many digits a divisor adds into a division result. + See Item::divisor_precision_increment() in item.h for more comments. + */ + decimal_digits_t Item_divisor_precision_increment(const Item *) const override + { + return 0; + } + /** + Makes a temporary table Field to handle numeric aggregate functions, + e.g. SUM(DISTINCT expr), AVG(DISTINCT expr), etc. + */ + Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const override + { + DBUG_ASSERT(0); + return 0; + } + Field *make_conversion_table_field(MEM_ROOT *root, TABLE *table, uint metadata, + const Field *target) const override + { + const Record_addr tmp(NULL, Bit_addr(true)); + return new (table->in_use->mem_root) Field_fbt(&empty_clex_str, tmp); + } + // Fix attributes after the parser + bool Column_definition_fix_attributes(Column_definition *c) const override + { + c->length= FbtImpl::max_char_length(); + return false; + } + + bool Column_definition_prepare_stage1(THD *thd, MEM_ROOT *mem_root, + Column_definition *def, handler *file, + ulonglong table_flags, + const Column_derived_attributes *derived_attr) + const override + { + def->prepare_stage1_simple(&my_charset_numeric); + return false; + } + + bool Column_definition_redefine_stage1(Column_definition *def, + const Column_definition *dup, + const handler *file) const override + { + def->redefine_stage1_common(dup, file); + def->set_compression_method(dup->compression_method()); + def->create_length_to_internal_length_string(); + return false; + } + + bool Column_definition_prepare_stage2(Column_definition *def, handler *file, + ulonglong table_flags) const override + { + def->pack_flag= FIELDFLAG_BINARY; + return false; + } + + bool partition_field_check(const LEX_CSTRING &field_name, + Item *item_expr) const override + { + if (item_expr->cmp_type() != STRING_RESULT) + { + my_error(ER_WRONG_TYPE_COLUMN_VALUE_ERROR, MYF(0)); + return true; + } + return false; + } + + bool partition_field_append_value(String *to, Item *item_expr, + CHARSET_INFO *field_cs, + partition_value_print_mode_t mode) + const override + { + StringBuffer<FbtImpl::max_char_length()+64> fbtstr; + Fbt_null fbt(item_expr); + if (fbt.is_null()) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + return true; + } + return fbt.to_string(&fbtstr) || + to->append('\'') || + to->append(fbtstr) || + to->append('\''); + } + + Field *make_table_field(MEM_ROOT *root, const LEX_CSTRING *name, + const Record_addr &addr, + const Type_all_attributes &attr, + TABLE_SHARE *table) const override + { + return new (root) Field_fbt(name, addr); + } + + Field * make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root, + const LEX_CSTRING *name, const Record_addr &addr, + const Bit_addr &bit, + const Column_definition_attributes *attr, + uint32 flags) const override + { + return new (mem_root) Field_fbt(name, addr); + } + void Column_definition_attributes_frm_pack(const Column_definition_attributes *def, + uchar *buff) const override + { + def->frm_pack_basic(buff); + def->frm_pack_charset(buff); + } + bool Column_definition_attributes_frm_unpack(Column_definition_attributes *def, + TABLE_SHARE *share, const uchar *buffer, + LEX_CUSTRING *gis_options) + const override + { + def->frm_unpack_basic(buffer); + return def->frm_unpack_charset(share, buffer); + } + void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, + Sort_param *param) const override + { + DBUG_ASSERT(item->type_handler() == this); + NativeBuffer<FbtImpl::binary_length()+1> tmp; + item->val_native_result(current_thd, &tmp); + if (item->maybe_null()) + { + if (item->null_value) + { + memset(to, 0, FbtImpl::binary_length() + 1); + return; + } + *to++= 1; + } + DBUG_ASSERT(!item->null_value); + DBUG_ASSERT(FbtImpl::binary_length() == tmp.length()); + DBUG_ASSERT(FbtImpl::binary_length() == sort_field->length); + FbtImpl::memory_to_record((char*) to, tmp.ptr()); + } + uint make_packed_sort_key_part(uchar *to, Item *item, + const SORT_FIELD_ATTR *sort_field, + Sort_param *param) const override + { + DBUG_ASSERT(item->type_handler() == this); + NativeBuffer<FbtImpl::binary_length()+1> tmp; + item->val_native_result(current_thd, &tmp); + if (item->maybe_null()) + { + if (item->null_value) + { + *to++=0; + return 0; + } + *to++= 1; + } + DBUG_ASSERT(!item->null_value); + DBUG_ASSERT(FbtImpl::binary_length() == tmp.length()); + DBUG_ASSERT(FbtImpl::binary_length() == sort_field->length); + FbtImpl::memory_to_record((char*) to, tmp.ptr()); + return tmp.length(); + } + void sort_length(THD *thd, const Type_std_attributes *item, + SORT_FIELD_ATTR *attr) const override + { + attr->original_length= attr->length= FbtImpl::binary_length(); + attr->suffix_length= 0; + } + uint32 max_display_length(const Item *item) const override + { + return FbtImpl::max_char_length(); + } + uint32 calc_pack_length(uint32 length) const override + { + return FbtImpl::binary_length(); + } + void Item_update_null_value(Item *item) const override + { + NativeBuffer<FbtImpl::binary_length()+1> tmp; + item->val_native(current_thd, &tmp); + } + bool Item_save_in_value(THD *thd, Item *item, st_value *value) const override + { + value->m_type= DYN_COL_STRING; + String *str= item->val_str(&value->m_string); + if (str != &value->m_string && !item->null_value) + { + // "item" returned a non-NULL value + if (Fbt_null(*str).is_null()) + { + /* + The value was not-null, but conversion to FBT failed: + SELECT a, DECODE_ORACLE(fbtcol, 'garbage', '<NULL>', '::01', '01') + FROM t1; + */ + thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN, + name().ptr(), ErrConvString(str).ptr()); + value->m_type= DYN_COL_NULL; + return true; + } + // "item" returned a non-NULL value, and it was a valid FBT + value->m_string.set(str->ptr(), str->length(), str->charset()); + } + return check_null(item, value); + } + void Item_param_setup_conversion(THD *thd, Item_param *param) const override + { + param->setup_conversion_string(thd, thd->variables.character_set_client); + } + void Item_param_set_param_func(Item_param *param, + uchar **pos, ulong len) const override + { + param->set_param_str(pos, len); + } + bool Item_param_set_from_value(THD *thd, Item_param *param, + const Type_all_attributes *attr, + const st_value *val) const override + { + param->unsigned_flag= false; + param->setup_conversion_string(thd, attr->collation.collation); + /* + Exact value of max_length is not known unless fbt is converted to + charset of connection, so we have to set it later. + */ + return param->set_str(val->m_string.ptr(), val->m_string.length(), + attr->collation.collation, + attr->collation.collation); + } + bool Item_param_val_native(THD *thd, Item_param *item, Native *to) + const override + { + StringBuffer<FbtImpl::max_char_length()+1> buffer; + String *str= item->val_str(&buffer); + if (!str) + return true; + Fbt_null tmp(*str); + return tmp.is_null() || tmp.to_native(to); + } + bool Item_send(Item *item, Protocol *p, st_value *buf) const override + { + return Item_send_str(item, p, buf); + } + int Item_save_in_field(Item *item, Field *field, bool no_conversions) + const override + { + if (field->type_handler() == this) + { + NativeBuffer<MAX_FIELD_WIDTH> tmp; + bool rc= item->val_native(current_thd, &tmp); + if (rc || item->null_value) + return set_field_to_null_with_conversions(field, no_conversions); + field->set_notnull(); + return field->store_native(tmp); + } + return item->save_str_in_field(field, no_conversions); + } + + String *print_item_value(THD *thd, Item *item, String *str) const override + { + StringBuffer<FbtImpl::max_char_length()+64> buf; + String *result= item->val_str(&buf); + /* + TODO: This should eventually use one of these notations: + 1. CAST('xxx' AS Fbt) + Problem: CAST is not supported as a NAME_CONST() argument. + 2. Fbt'xxx' + Problem: This syntax is not supported by the parser yet. + */ + return !result || str->realloc(result->length() + 2) || + str->append(STRING_WITH_LEN("'")) || + str->append(result->ptr(), result->length()) || + str->append(STRING_WITH_LEN("'")) ? nullptr : str; + } + + /** + Check if + WHERE expr=value AND expr=const + can be rewritten as: + WHERE const=value AND expr=const + + "this" is the comparison handler that is used by "target". + + @param target - the predicate expr=value, + whose "expr" argument will be replaced to "const". + @param target_expr - the target's "expr" which will be replaced to "const". + @param target_value - the target's second argument, it will remain unchanged. + @param source - the equality predicate expr=const (or expr<=>const) + that can be used to rewrite the "target" part + (under certain conditions, see the code). + @param source_expr - the source's "expr". It should be exactly equal to + the target's "expr" to make condition rewrite possible. + @param source_const - the source's "const" argument, it will be inserted + into "target" instead of "expr". + */ + bool can_change_cond_ref_to_const(Item_bool_func2 *target, Item *target_expr, + Item *target_value, Item_bool_func2 *source, + Item *source_expr, Item *source_const) + const override + { + /* + WHERE COALESCE(col)='xxx' AND COALESCE(col)=CONCAT(a); --> + WHERE COALESCE(col)='xxx' AND 'xxx'=CONCAT(a); + */ + return target->compare_type_handler() == source->compare_type_handler(); + } + bool subquery_type_allows_materialization(const Item *inner, + const Item *outer, bool) const override + { + /* + Example: + SELECT * FROM t1 WHERE a IN (SELECT col FROM t1 GROUP BY col); + Allow materialization only if the outer column is also FBT. + This can be changed for more relaxed rules in the future. + */ + DBUG_ASSERT(inner->type_handler() == this); + return outer->type_handler() == this; + } + /** + Make a simple constant replacement item for a constant "src", + so the new item can futher be used for comparison with "cmp", e.g.: + src = cmp -> replacement = cmp + + "this" is the type handler that is used to compare "src" and "cmp". + + @param thd - current thread, for mem_root + @param src - The item that we want to replace. It's a const item, + but it can be complex enough to calculate on every row. + @param cmp - The src's comparand. + @retval - a pointer to the created replacement Item + @retval - NULL, if could not create a replacement (e.g. on EOM). + NULL is also returned for ROWs, because instead of replacing + a Item_row to a new Item_row, Type_handler_row just replaces + its elements. + */ + Item *make_const_item_for_comparison(THD *thd, Item *src, + const Item *cmp) const override + { + Fbt_null tmp(src); + if (tmp.is_null()) + return new (thd->mem_root) Item_null(thd, src->name.str); + return new (thd->mem_root) Item_literal_fbt(thd, tmp); + } + Item_cache *Item_get_cache(THD *thd, const Item *item) const override + { + return new (thd->mem_root) Item_cache_fbt(thd); + } + + Item *create_typecast_item(THD *thd, Item *item, + const Type_cast_attributes &attr) const override + { + return new (thd->mem_root) Item_typecast_fbt(thd, item); + } + Item_copy *create_item_copy(THD *thd, Item *item) const override + { + return new (thd->mem_root) Item_copy_fbt(thd, item); + } + int cmp_native(const Native &a, const Native &b) const override + { + return FbtImpl::cmp(a.to_lex_cstring(), b.to_lex_cstring()); + } + bool set_comparator_func(THD *thd, Arg_comparator *cmp) const override + { + return cmp->set_cmp_func_native(thd); + } + bool Item_const_eq(const Item_const *a, const Item_const *b, + bool binary_cmp) const override + { + return false; + } + bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr, + Item *a, Item *b) const override + { + Fbt_null na(a), nb(b); + return !na.is_null() && !nb.is_null() && !na.cmp(nb); + } + bool Item_hybrid_func_fix_attributes(THD *thd, const LEX_CSTRING &name, + Type_handler_hybrid_field_type *h, + Type_all_attributes *attr, + Item **items, uint nitems) const override + { + attr->Type_std_attributes::operator=(Type_std_attributes_fbt()); + h->set_handler(this); + /* + If some of the arguments cannot be safely converted to "FBT NOT NULL", + then mark the entire function nullability as NULL-able. + Otherwise, keep the generic nullability calculated by earlier stages: + - either by the most generic way in Item_func::fix_fields() + - or by Item_func_xxx::fix_length_and_dec() before the call of + Item_hybrid_func_fix_attributes() + IFNULL() is special. It does not need to test args[0]. + */ + uint first= dynamic_cast<Item_func_ifnull*>(attr) ? 1 : 0; + for (uint i= first; i < nitems; i++) + { + if (Fbt::fix_fields_maybe_null_on_conversion_to_fbt(items[i])) + { + attr->set_type_maybe_null(true); + break; + } + } + return false; + } + bool Item_func_min_max_fix_attributes(THD *thd, Item_func_min_max *func, + Item **items, uint nitems) const override + { + return Item_hybrid_func_fix_attributes(thd, func->func_name_cstring(), + func, func, items, nitems); + + } + bool Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const override + { + func->Type_std_attributes::operator=(Type_std_attributes_fbt()); + func->set_handler(this); + return false; + } + bool Item_sum_sum_fix_length_and_dec(Item_sum_sum *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + bool Item_sum_avg_fix_length_and_dec(Item_sum_avg *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + bool Item_sum_variance_fix_length_and_dec(Item_sum_variance *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + + bool Item_val_native_with_conversion(THD *thd, Item *item, + Native *to) const override + { + if (item->type_handler() == this) + return item->val_native(thd, to); // No conversion needed + StringBuffer<FbtImpl::max_char_length()+1> buffer; + String *str= item->val_str(&buffer); + return str ? character_or_binary_string_to_native(thd, str, to) : true; + } + bool Item_val_native_with_conversion_result(THD *thd, Item *item, + Native *to) const override + { + if (item->type_handler() == this) + return item->val_native_result(thd, to); // No conversion needed + StringBuffer<FbtImpl::max_char_length()+1> buffer; + String *str= item->str_result(&buffer); + return str ? character_or_binary_string_to_native(thd, str, to) : true; + } + + bool Item_val_bool(Item *item) const override + { + NativeBuffer<FbtImpl::binary_length()+1> tmp; + if (item->val_native(current_thd, &tmp)) + return false; + return !Fbt::only_zero_bytes(tmp.ptr(), tmp.length()); + } + void Item_get_date(THD *thd, Item *item, Temporal::Warn *buff, + MYSQL_TIME *ltime, date_mode_t fuzzydate) const override + { + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + } + + longlong Item_val_int_signed_typecast(Item *item) const override + { + DBUG_ASSERT(0); + return 0; + } + + longlong Item_val_int_unsigned_typecast(Item *item) const override + { + DBUG_ASSERT(0); + return 0; + } + + String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) + const override + { + NativeBuffer<FbtImpl::binary_length()+1> tmp; + if ((item->null_value= item->arguments()[0]->val_native(current_thd, &tmp))) + return nullptr; + DBUG_ASSERT(tmp.length() == FbtImpl::binary_length()); + if (str->set_hex(tmp.ptr(), tmp.length())) + { + str->length(0); + str->set_charset(item->collation.collation); + } + return str; + } + + String *Item_func_hybrid_field_type_val_str(Item_func_hybrid_field_type *item, + String *str) const override + { + NativeBuffer<FbtImpl::binary_length()+1> native; + if (item->val_native(current_thd, &native)) + { + DBUG_ASSERT(item->null_value); + return nullptr; + } + DBUG_ASSERT(native.length() == FbtImpl::binary_length()); + Fbt_null tmp(native.ptr(), native.length()); + return tmp.is_null() || tmp.to_string(str) ? nullptr : str; + } + double Item_func_hybrid_field_type_val_real(Item_func_hybrid_field_type *) + const override + { + return 0; + } + longlong Item_func_hybrid_field_type_val_int(Item_func_hybrid_field_type *) + const override + { + return 0; + } + my_decimal * + Item_func_hybrid_field_type_val_decimal(Item_func_hybrid_field_type *, + my_decimal *to) const override + { + my_decimal_set_zero(to); + return to; + } + void Item_func_hybrid_field_type_get_date(THD *, + Item_func_hybrid_field_type *, + Temporal::Warn *, + MYSQL_TIME *to, + date_mode_t fuzzydate) + const override + { + set_zero_time(to, MYSQL_TIMESTAMP_TIME); + } + // WHERE is Item_func_min_max_val_native??? + String *Item_func_min_max_val_str(Item_func_min_max *func, String *str) + const override + { + Fbt_null tmp(func); + return tmp.is_null() || tmp.to_string(str) ? nullptr : str; + } + double Item_func_min_max_val_real(Item_func_min_max *) const override + { + return 0; + } + longlong Item_func_min_max_val_int(Item_func_min_max *) const override + { + return 0; + } + my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *, + my_decimal *to) const override + { + my_decimal_set_zero(to); + return to; + } + bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*, MYSQL_TIME *to, + date_mode_t fuzzydate) const override + { + set_zero_time(to, MYSQL_TIMESTAMP_TIME); + return false; + } + + bool Item_func_between_fix_length_and_dec(Item_func_between *func) const override + { + return false; + } + longlong Item_func_between_val_int(Item_func_between *func) const override + { + return func->val_int_cmp_native(); + } + + cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const override + { + return new (thd->mem_root) cmp_item_fbt; + } + + in_vector *make_in_vector(THD *thd, const Item_func_in *func, + uint nargs) const override + { + return new (thd->mem_root) in_fbt(thd, nargs); + } + + bool Item_func_in_fix_comparator_compatible_types(THD *thd, + Item_func_in *func) + const override + { + if (func->compatible_types_scalar_bisection_possible()) + { + return func->value_list_convert_const_to_int(thd) || + func->fix_for_scalar_comparison_using_bisection(thd); + } + return + func->fix_for_scalar_comparison_using_cmp_items(thd, + 1U << (uint) STRING_RESULT); + } + bool Item_func_round_fix_length_and_dec(Item_func_round *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + bool Item_func_int_val_fix_length_and_dec(Item_func_int_val *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + + bool Item_func_abs_fix_length_and_dec(Item_func_abs *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + + bool Item_func_neg_fix_length_and_dec(Item_func_neg *func) const override + { + return Item_func_or_sum_illegal_param(func); + } + + bool Item_func_signed_fix_length_and_dec(Item_func_signed *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_unsigned_fix_length_and_dec(Item_func_unsigned *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_double_typecast_fix_length_and_dec(Item_double_typecast *item) + const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_float_typecast_fix_length_and_dec(Item_float_typecast *item) + const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_decimal_typecast_fix_length_and_dec(Item_decimal_typecast *item) + const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_char_typecast_fix_length_and_dec(Item_char_typecast *item) + const override + { + if (item->cast_charset() == &my_charset_bin) + { + static Item_char_typecast_func_handler_fbt_to_binary + item_char_typecast_func_handler_fbt_to_binary; + item->fix_length_and_dec_native_to_binary(FbtImpl::binary_length()); + item->set_func_handler(&item_char_typecast_func_handler_fbt_to_binary); + return false; + } + item->fix_length_and_dec_str(); + return false; + } + + bool Item_time_typecast_fix_length_and_dec(Item_time_typecast *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_date_typecast_fix_length_and_dec(Item_date_typecast *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_datetime_typecast_fix_length_and_dec(Item_datetime_typecast *item) + const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_plus_fix_length_and_dec(Item_func_plus *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_minus_fix_length_and_dec(Item_func_minus *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_mul_fix_length_and_dec(Item_func_mul *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_div_fix_length_and_dec(Item_func_div *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + bool Item_func_mod_fix_length_and_dec(Item_func_mod *item) const override + { + return Item_func_or_sum_illegal_param(item); + } + }; + + class cmp_item_fbt: public cmp_item_scalar + { + Fbt m_native; + public: + cmp_item_fbt() + :cmp_item_scalar(), + m_native(Fbt::zero()) + { } + void store_value(Item *item) override + { + m_native= Fbt(item, &m_null_value); + } + int cmp_not_null(const Value *val) override + { + DBUG_ASSERT(!val->is_null()); + DBUG_ASSERT(val->is_string()); + Fbt_null tmp(val->m_string); + DBUG_ASSERT(!tmp.is_null()); + return m_native.cmp(tmp); + } + int cmp(Item *arg) override + { + Fbt_null tmp(arg); + return m_null_value || tmp.is_null() ? UNKNOWN : m_native.cmp(tmp) != 0; + } + int compare(cmp_item *ci) override + { + cmp_item_fbt *tmp= static_cast<cmp_item_fbt*>(ci); + DBUG_ASSERT(!m_null_value); + DBUG_ASSERT(!tmp->m_null_value); + return m_native.cmp(tmp->m_native); + } + cmp_item *make_same(THD *thd) override + { + return new (thd->mem_root) cmp_item_fbt(); + } + }; + + class Field_fbt: public Field + { + static void set_min_value(char *ptr) + { + memset(ptr, 0, FbtImpl::binary_length()); + } + static void set_max_value(char *ptr) + { + memset(ptr, 0xFF, FbtImpl::binary_length()); + } + void store_warning(const ErrConv &str, + Sql_condition::enum_warning_level level) + { + if (get_thd()->count_cuted_fields <= CHECK_FIELD_EXPRESSION) + return; + const TABLE_SHARE *s= table->s; + static const Name type_name= type_handler_fbt()->name(); + get_thd()->push_warning_truncated_value_for_field(level, type_name.ptr(), + str.ptr(), s ? s->db.str : nullptr, s ? s->table_name.str : nullptr, + field_name.str); + } + int set_null_with_warn(const ErrConv &str) + { + store_warning(str, Sql_condition::WARN_LEVEL_WARN); + set_null(); + return 1; + } + int set_min_value_with_warn(const ErrConv &str) + { + store_warning(str, Sql_condition::WARN_LEVEL_WARN); + set_min_value((char*) ptr); + return 1; + } + int set_max_value_with_warn(const ErrConv &str) + { + store_warning(str, Sql_condition::WARN_LEVEL_WARN); + set_max_value((char*) ptr); + return 1; + } + int store_fbt_null_with_warn(const Fbt_null &fbt, + const ErrConvString &err) + { + DBUG_ASSERT(marked_for_write_or_computed()); + if (fbt.is_null()) + return maybe_null() ? set_null_with_warn(err) + : set_min_value_with_warn(err); + fbt.to_record((char *) ptr, FbtImpl::binary_length()); + return 0; + } + + public: + Field_fbt(const LEX_CSTRING *field_name_arg, const Record_addr &rec) + :Field(rec.ptr(), FbtImpl::max_char_length(), + rec.null_ptr(), rec.null_bit(), Field::NONE, field_name_arg) + { + flags|= BINARY_FLAG | UNSIGNED_FLAG; + } + const Type_handler *type_handler() const override + { + return type_handler_fbt(); + } + uint32 max_display_length() const override { return field_length; } + bool str_needs_quotes() const override { return true; } + const DTCollation &dtcollation() const override + { + static DTCollation_numeric c; + return c; + } + CHARSET_INFO *charset(void) const override { return &my_charset_numeric; } + const CHARSET_INFO *sort_charset(void) const override { return &my_charset_bin; } + /** + This makes client-server protocol convert the value according + to @@character_set_client. + */ + bool binary() const override { return false; } + enum ha_base_keytype key_type() const override { return HA_KEYTYPE_BINARY; } + + bool is_equal(const Column_definition &new_field) const override + { + return new_field.type_handler() == type_handler(); + } + bool eq_def(const Field *field) const override + { + return Field::eq_def(field); + } + double pos_in_interval(Field *min, Field *max) override + { + return pos_in_interval_val_str(min, max, 0); + } + int cmp(const uchar *a, const uchar *b) const override + { return memcmp(a, b, pack_length()); } + + void sort_string(uchar *to, uint length) override + { + DBUG_ASSERT(length == pack_length()); + memcpy(to, ptr, length); + } + uint32 pack_length() const override + { + return FbtImpl::binary_length(); + } + uint pack_length_from_metadata(uint field_metadata) const override + { + return FbtImpl::binary_length(); + } + + void sql_type(String &str) const override + { + static Name name= type_handler_fbt()->name(); + str.set_ascii(name.ptr(), name.length()); + } + + void make_send_field(Send_field *to) override + { + Field::make_send_field(to); + to->set_data_type_name(type_handler_fbt()->name().lex_cstring()); + } + + bool validate_value_in_record(THD *thd, const uchar *record) const override + { + return false; + } + + bool val_native(Native *to) override + { + DBUG_ASSERT(marked_for_read()); + if (to->alloc(FbtImpl::binary_length())) + return true; + to->length(FbtImpl::binary_length()); + FbtImpl::record_to_memory((char*) to->ptr(), (const char*) ptr); + return false; + } + + Fbt to_fbt() const + { + DBUG_ASSERT(marked_for_read()); + return Fbt::record_to_memory((const char*) ptr); + } + + String *val_str(String *val_buffer, String *) override + { + return to_fbt().to_string(val_buffer) ? NULL : val_buffer; + } + + my_decimal *val_decimal(my_decimal *to) override + { + DBUG_ASSERT(marked_for_read()); + my_decimal_set_zero(to); + return to; + } + + longlong val_int() override + { + DBUG_ASSERT(marked_for_read()); + return 0; + } + + double val_real() override + { + DBUG_ASSERT(marked_for_read()); + return 0; + } + + bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate) override + { + DBUG_ASSERT(marked_for_read()); + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + return false; + } + + bool val_bool(void) override + { + DBUG_ASSERT(marked_for_read()); + return !Fbt::only_zero_bytes((const char *) ptr, FbtImpl::binary_length()); + } + + int store_native(const Native &value) override + { + DBUG_ASSERT(marked_for_write_or_computed()); + DBUG_ASSERT(value.length() == FbtImpl::binary_length()); + FbtImpl::memory_to_record((char*) ptr, value.ptr()); + return 0; + } + + int store(const char *str, size_t length, CHARSET_INFO *cs) override + { + return cs == &my_charset_bin ? store_binary(str, length) + : store_text(str, length, cs); + } + + int store_text(const char *str, size_t length, CHARSET_INFO *cs) override + { + return store_fbt_null_with_warn(Fbt_null(str, length, cs), + ErrConvString(str, length, cs)); + } + + int store_binary(const char *str, size_t length) override + { + return store_fbt_null_with_warn(Fbt_null(str, length), + ErrConvString(str, length, + &my_charset_bin)); + } + + int store_hex_hybrid(const char *str, size_t length) override + { + return Field_fbt::store_binary(str, length); + } + + int store_decimal(const my_decimal *num) override + { + DBUG_ASSERT(marked_for_write_or_computed()); + return set_min_value_with_warn(ErrConvDecimal(num)); + } + + int store(longlong nr, bool unsigned_flag) override + { + DBUG_ASSERT(marked_for_write_or_computed()); + return set_min_value_with_warn( + ErrConvInteger(Longlong_hybrid(nr, unsigned_flag))); + } + + int store(double nr) override + { + DBUG_ASSERT(marked_for_write_or_computed()); + return set_min_value_with_warn(ErrConvDouble(nr)); + } + + int store_time_dec(const MYSQL_TIME *ltime, uint dec) override + { + DBUG_ASSERT(marked_for_write_or_computed()); + return set_min_value_with_warn(ErrConvTime(ltime)); + } + + /*** Field conversion routines ***/ + int store_field(Field *from) override + { + // INSERT INTO t1 (fbt_field) SELECT different_field_type FROM t2; + return from->save_in_field(this); + } + int save_in_field(Field *to) override + { + // INSERT INTO t2 (different_field_type) SELECT fbt_field FROM t1; + if (to->charset() == &my_charset_bin && + dynamic_cast<const Type_handler_general_purpose_string*> + (to->type_handler())) + { + NativeBuffer<FbtImpl::binary_length()+1> res; + val_native(&res); + return to->store(res.ptr(), res.length(), &my_charset_bin); + } + return save_in_field_str(to); + } + Copy_func *get_copy_func(const Field *from) const override + { + // ALTER to FBT from another field + return do_field_string; + } + + Copy_func *get_copy_func_to(const Field *to) const override + { + if (type_handler() == to->type_handler()) + { + // ALTER from FBT to FBT + DBUG_ASSERT(pack_length() == to->pack_length()); + DBUG_ASSERT(charset() == to->charset()); + DBUG_ASSERT(sort_charset() == to->sort_charset()); + return Field::do_field_eq; + } + // ALTER from FBT to another fbt type + if (to->charset() == &my_charset_bin && + dynamic_cast<const Type_handler_general_purpose_string*> + (to->type_handler())) + { + /* + ALTER from FBT to a binary string type, e.g.: + BINARY, TINYBLOB, BLOB, MEDIUMBLOB, LONGBLOB + */ + return do_field_fbt_native_to_binary; + } + return do_field_string; + } + + static void do_field_fbt_native_to_binary(Copy_field *copy) + { + NativeBuffer<FbtImpl::binary_length()+1> res; + copy->from_field->val_native(&res); + copy->to_field->store(res.ptr(), res.length(), &my_charset_bin); + } + + bool memcpy_field_possible(const Field *from) const override + { + // INSERT INTO t1 (fbt_field) SELECT field2 FROM t2; + return type_handler() == from->type_handler(); + } + enum_conv_type rpl_conv_type_from(const Conv_source &source, + const Relay_log_info *rli, + const Conv_param ¶m) const override + { + if (type_handler() == source.type_handler() || + (source.type_handler() == &type_handler_string && + source.type_handler()->max_display_length_for_field(source) == + FbtImpl::binary_length())) + return rpl_conv_type_from_same_data_type(source.metadata(), rli, param); + return CONV_TYPE_IMPOSSIBLE; + } + + /*** Optimizer routines ***/ + bool test_if_equality_guarantees_uniqueness(const Item *const_item) const override + { + /* + This condition: + WHERE fbt_field=const + should return a single distinct value only, + as comparison is done according to FBT. + */ + return true; + } + bool can_be_substituted_to_equal_item(const Context &ctx, + const Item_equal *item_equal) + override + { + switch (ctx.subst_constraint()) { + case ANY_SUBST: + return ctx.compare_type_handler() == item_equal->compare_type_handler(); + case IDENTITY_SUBST: + return true; + } + return false; + } + Item *get_equal_const_item(THD *thd, const Context &ctx, + Item *const_item) override + { + Fbt_null tmp(const_item); + if (tmp.is_null()) + return NULL; + return new (thd->mem_root) Item_literal_fbt(thd, tmp); + } + bool can_optimize_keypart_ref(const Item_bool_func *cond, + const Item *item) const override + { + /* + Mixing of two different non-traditional types is currently prevented. + This may change in the future. + */ + DBUG_ASSERT(item->type_handler()->type_handler_base_or_self()-> + is_traditional_scalar_type() || + item->type_handler() == type_handler()); + return true; + } + /** + Test if Field can use range optimizer for a standard comparison operation: + <=, <, =, <=>, >, >= + Note, this method does not cover spatial operations. + */ + bool can_optimize_range(const Item_bool_func *cond, + const Item *item, + bool is_eq_func) const override + { + // See the DBUG_ASSERT comment in can_optimize_keypart_ref() + DBUG_ASSERT(item->type_handler()->type_handler_base_or_self()-> + is_traditional_scalar_type() || + item->type_handler() == type_handler()); + return true; + } + void hash(ulong *nr, ulong *nr2) override + { + if (is_null()) + *nr^= (*nr << 1) | 1; + else + FbtImpl::hash_record(ptr, nr, nr2); + } + SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *prm, KEY_PART *key_part, + const Item_bool_func *cond, + scalar_comparison_op op, Item *value) override + { + DBUG_ENTER("Field_fbt::get_mm_leaf"); + if (!can_optimize_scalar_range(prm, key_part, cond, op, value)) + DBUG_RETURN(0); + int err= value->save_in_field_no_warnings(this, 1); + if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0) + DBUG_RETURN(&null_element); + if (err > 0) + { + if (op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL) + DBUG_RETURN(new (prm->mem_root) SEL_ARG_IMPOSSIBLE(this)); + DBUG_RETURN(NULL); /* Cannot infer anything */ + } + DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value)); + } + bool can_optimize_hash_join(const Item_bool_func *cond, + const Item *item) const override + { + return can_optimize_keypart_ref(cond, item); + } + bool can_optimize_group_min_max(const Item_bool_func *cond, + const Item *const_item) const override + { + return true; + } + + uint row_pack_length() const override { return pack_length(); } + + Binlog_type_info binlog_type_info() const override + { + DBUG_ASSERT(type() == binlog_type()); + return Binlog_type_info_fixed_string(Field_fbt::binlog_type(), + FbtImpl::binary_length(), &my_charset_bin); + } + + uchar *pack(uchar *to, const uchar *from, uint max_length) override + { + DBUG_PRINT("debug", ("Packing field '%s'", field_name.str)); + return FbtImpl::pack(to, from, max_length); + } + + const uchar *unpack(uchar *to, const uchar *from, const uchar *from_end, + uint param_data) override + { + return FbtImpl::unpack(to, from, from_end, param_data); + } + + uint max_packed_col_length(uint max_length) override + { + return StringPack::max_packed_col_length(max_length); + } + + uint packed_col_length(const uchar *fbt_ptr, uint length) override + { + return StringPack::packed_col_length(fbt_ptr, length); + } + + uint size_of() const override { return sizeof(*this); } + }; + + class Item_typecast_fbt: public Item_func + { + public: + Item_typecast_fbt(THD *thd, Item *a) :Item_func(thd, a) {} + + const Type_handler *type_handler() const override + { return type_handler_fbt(); } + + enum Functype functype() const override { return CHAR_TYPECAST_FUNC; } + bool eq(const Item *item, bool binary_cmp) const override + { + if (this == item) + return true; + if (item->type() != FUNC_ITEM || + functype() != ((Item_func*)item)->functype()) + return false; + if (type_handler() != item->type_handler()) + return false; + Item_typecast_fbt *cast= (Item_typecast_fbt*) item; + return args[0]->eq(cast->args[0], binary_cmp); + } + LEX_CSTRING func_name_cstring() const override + { + static Name name= type_handler_fbt()->name(); + size_t len= 9+name.length()+1; + char *buf= (char*)current_thd->alloc(len); + strmov(strmov(buf, "cast_as_"), name.ptr()); + return { buf, len }; + } + void print(String *str, enum_query_type query_type) override + { + str->append(STRING_WITH_LEN("cast(")); + args[0]->print(str, query_type); + str->append(STRING_WITH_LEN(" as ")); + str->append(type_handler_fbt()->name().lex_cstring()); + str->append(')'); + } + bool fix_length_and_dec() override + { + Type_std_attributes::operator=(Type_std_attributes_fbt()); + if (Fbt::fix_fields_maybe_null_on_conversion_to_fbt(args[0])) + set_maybe_null(); + return false; + } + String *val_str(String *to) override + { + Fbt_null tmp(args[0]); + return (null_value= tmp.is_null() || tmp.to_string(to)) ? NULL : to; + } + longlong val_int() override + { + return 0; + } + double val_real() override + { + return 0; + } + my_decimal *val_decimal(my_decimal *to) override + { + my_decimal_set_zero(to); + return to; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override + { + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + return false; + } + bool val_native(THD *thd, Native *to) override + { + Fbt_null tmp(args[0]); + return null_value= tmp.is_null() || tmp.to_native(to); + } + Item *get_copy(THD *thd) override + { return get_item_copy<Item_typecast_fbt>(thd, this); } + }; + + class Item_cache_fbt: public Item_cache + { + NativeBuffer<FbtImpl::binary_length()+1> m_value; + public: + Item_cache_fbt(THD *thd) + :Item_cache(thd, type_handler_fbt()) { } + Item *get_copy(THD *thd) + { return get_item_copy<Item_cache_fbt>(thd, this); } + bool cache_value() + { + if (!example) + return false; + value_cached= true; + null_value_inside= null_value= + example->val_native_with_conversion_result(current_thd, + &m_value, type_handler()); + return true; + } + String* val_str(String *to) + { + if (!has_value()) + return NULL; + Fbt_null tmp(m_value.ptr(), m_value.length()); + return tmp.is_null() || tmp.to_string(to) ? NULL : to; + } + my_decimal *val_decimal(my_decimal *to) + { + if (!has_value()) + return NULL; + my_decimal_set_zero(to); + return to; + } + longlong val_int() + { + if (!has_value()) + return 0; + return 0; + } + double val_real() + { + if (!has_value()) + return 0; + return 0; + } + longlong val_datetime_packed(THD *thd) + { + DBUG_ASSERT(0); + if (!has_value()) + return 0; + return 0; + } + longlong val_time_packed(THD *thd) + { + DBUG_ASSERT(0); + if (!has_value()) + return 0; + return 0; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) + { + if (!has_value()) + return true; + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + return false; + } + bool val_native(THD *thd, Native *to) + { + if (!has_value()) + return true; + return to->copy(m_value.ptr(), m_value.length()); + } + }; + + class Item_literal_fbt: public Item_literal + { + Fbt m_value; + public: + Item_literal_fbt(THD *thd) + :Item_literal(thd), + m_value(Fbt::zero()) + { } + Item_literal_fbt(THD *thd, const Fbt &value) + :Item_literal(thd), + m_value(value) + { } + const Type_handler *type_handler() const override + { + return type_handler_fbt(); + } + longlong val_int() override + { + return 0; + } + double val_real() override + { + return 0; + } + String *val_str(String *to) override + { + return m_value.to_string(to) ? NULL : to; + } + my_decimal *val_decimal(my_decimal *to) override + { + my_decimal_set_zero(to); + return to; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override + { + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + return false; + } + bool val_native(THD *thd, Native *to) override + { + return m_value.to_native(to); + } + void print(String *str, enum_query_type query_type) override + { + StringBuffer<FbtImpl::max_char_length()+64> tmp; + tmp.append(type_handler_fbt()->name().lex_cstring()); + my_caseup_str(&my_charset_latin1, tmp.c_ptr()); + str->append(tmp); + str->append('\''); + m_value.to_string(&tmp); + str->append(tmp); + str->append('\''); + } + Item *get_copy(THD *thd) override + { return get_item_copy<Item_literal_fbt>(thd, this); } + + // Non-overriding methods + void set_value(const Fbt &value) + { + m_value= value; + } + }; + + class Item_copy_fbt: public Item_copy + { + NativeBuffer<Fbt::binary_length()+1> m_value; + public: + Item_copy_fbt(THD *thd, Item *item_arg): Item_copy(thd, item_arg) {} + + bool val_native(THD *thd, Native *to) override + { + if (null_value) + return true; + return to->copy(m_value.ptr(), m_value.length()); + } + String *val_str(String *to) override + { + if (null_value) + return NULL; + Fbt_null tmp(m_value.ptr(), m_value.length()); + return tmp.is_null() || tmp.to_string(to) ? NULL : to; + } + my_decimal *val_decimal(my_decimal *to) override + { + my_decimal_set_zero(to); + return to; + } + double val_real() override + { + return 0; + } + longlong val_int() override + { + return 0; + } + bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override + { + set_zero_time(ltime, MYSQL_TIMESTAMP_TIME); + return null_value; + } + void copy() override + { + null_value= item->val_native(current_thd, &m_value); + DBUG_ASSERT(null_value == item->null_value); + } + int save_in_field(Field *field, bool no_conversions) override + { + return Item::save_in_field(field, no_conversions); + } + Item *get_copy(THD *thd) override + { return get_item_copy<Item_copy_fbt>(thd, this); } + }; + + class in_fbt :public in_vector + { + Fbt m_value; + static int cmp_fbt(void *cmp_arg, Fbt *a, Fbt *b) + { + return a->cmp(*b); + } + public: + in_fbt(THD *thd, uint elements) + :in_vector(thd, elements, sizeof(Fbt), (qsort2_cmp) cmp_fbt, 0), + m_value(Fbt::zero()) + { } + const Type_handler *type_handler() const override + { + return type_handler_fbt(); + } + void set(uint pos, Item *item) override + { + Fbt *buff= &((Fbt *) base)[pos]; + Fbt_null value(item); + if (value.is_null()) + *buff= Fbt::zero(); + else + *buff= value; + } + uchar *get_value(Item *item) override + { + Fbt_null value(item); + if (value.is_null()) + return 0; + m_value= value; + return (uchar *) &m_value; + } + Item* create_item(THD *thd) override + { + return new (thd->mem_root) Item_literal_fbt(thd); + } + void value_to_item(uint pos, Item *item) override + { + const Fbt &buff= (((Fbt*) base)[pos]); + static_cast<Item_literal_fbt*>(item)->set_value(buff); + } + }; + + class Item_char_typecast_func_handler_fbt_to_binary: + public Item_handled_func::Handler_str + { + public: + const Type_handler *return_type_handler(const Item_handled_func *item) + const override + { + if (item->max_length > MAX_FIELD_VARCHARLENGTH) + return Type_handler::blob_type_handler(item->max_length); + if (item->max_length > 255) + return &type_handler_varchar; + return &type_handler_string; + } + bool fix_length_and_dec(Item_handled_func *xitem) const override + { + return false; + } + String *val_str(Item_handled_func *item, String *to) const override + { + DBUG_ASSERT(dynamic_cast<const Item_char_typecast*>(item)); + return static_cast<Item_char_typecast*>(item)-> + val_str_binary_from_native(to); + } + }; + + class Type_collection_fbt: public Type_collection + { + const Type_handler *aggregate_common(const Type_handler *a, + const Type_handler *b) const + { + if (a == b) + return a; + return NULL; + } + const Type_handler *aggregate_if_string(const Type_handler *a, + const Type_handler *b) const + { + static const Type_aggregator::Pair agg[]= + { + {type_handler_fbt(), &type_handler_null, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_varchar, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_string, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_tiny_blob, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_blob, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_medium_blob, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_long_blob, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_hex_hybrid, type_handler_fbt()}, + {NULL,NULL,NULL} + }; + return Type_aggregator::find_handler_in_array(agg, a, b, true); + } + public: + const Type_handler *aggregate_for_result(const Type_handler *a, + const Type_handler *b) + const override + { + const Type_handler *h; + if ((h= aggregate_common(a, b)) || + (h= aggregate_if_string(a, b))) + return h; + return NULL; + } + + const Type_handler *aggregate_for_min_max(const Type_handler *a, + const Type_handler *b) + const override + { + return aggregate_for_result(a, b); + } + + const Type_handler *aggregate_for_comparison(const Type_handler *a, + const Type_handler *b) + const override + { + if (const Type_handler *h= aggregate_common(a, b)) + return h; + static const Type_aggregator::Pair agg[]= + { + {type_handler_fbt(), &type_handler_null, type_handler_fbt()}, + {type_handler_fbt(), &type_handler_long_blob, type_handler_fbt()}, + {NULL,NULL,NULL} + }; + return Type_aggregator::find_handler_in_array(agg, a, b, true); + } + + const Type_handler *aggregate_for_num_op(const Type_handler *a, + const Type_handler *b) + const override + { + return NULL; + } + + const Type_handler *handler_by_name(const LEX_CSTRING &name) const override + { + if (type_handler_fbt()->name().eq(name)) + return type_handler_fbt(); + return NULL; + } + }; + static Type_handler_fbt *type_handler_fbt() + { + static Type_handler_fbt th; + return &th; + } +}; + +#endif /* SQL_TYPE_FIXEDBIN_H */ diff --git a/sql/sql_type_fixedbin_storage.h b/sql/sql_type_fixedbin_storage.h new file mode 100644 index 00000000000..6e18335bd4c --- /dev/null +++ b/sql/sql_type_fixedbin_storage.h @@ -0,0 +1,173 @@ +#ifndef SQL_TYPE_FIXEDBIN_STORAGE +#define SQL_TYPE_FIXEDBIN_STORAGE +/* Copyright (c) 2019,2021 MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +/* + This is a common code for plugin (?) types that are generally + handled like strings, but have their own fixed size on-disk binary storage + format and their own (variable size) canonical string representation. + + Examples are INET6 and UUID types. + + The MariaDB server uses three binary representations of a data type: + + 1. In-memory binary representation (user visible) + This representation: + - can be used in INSERT..VALUES (X'AABBCC') + - can be used in WHERE conditions: WHERE c1=X'AABBCC' + - is returned by CAST(x AS BINARY(N)) + - is returned by Field::val_native() and Item::val_native() + + 2. In-record binary representation (user invisible) + This representation: + - is used in records (is pointed by Field::ptr) + - must be comparable by memcmp() + + 3. Binlog binary (row) representation + Usually, for string data types the binlog representation + is based on the in-record representation with trailing byte compression: + - trailing space compression for text string data types + - trailing zero compression for binary string data types + + We have to have separate in-memory and in-record representations + because we use HA_KEYTYPE_BINARY for indexing. The engine API + does not have a way to pass a comparison function as a parameter. + + The default implementation below assumes that: + - the in-memory and in-record representations are equal + - the binlog representation is compatible with BINARY(N) + This is OK for simple data types, like INET6. + + Data type implementations that need different representations + can override the default implementation (like e.g. UUID does). +*/ + +/***********************************************************************/ + +template<size_t NATIVE_LEN, size_t MAX_CHAR_LEN> +class FixedBinTypeStorage +{ +protected: + // The buffer that stores the in-memory binary representation + char m_buffer[NATIVE_LEN]; + + // Non-initializing constructor + FixedBinTypeStorage() + { } + + FixedBinTypeStorage & set_zero() + { + bzero(&m_buffer, sizeof(m_buffer)); + return *this; + } +public: + + // Initialize from the in-memory binary representation + FixedBinTypeStorage(const char *str, size_t length) + { + if (length != binary_length()) + set_zero(); + else + memcpy(&m_buffer, str, sizeof(m_buffer)); + } + + // Return the buffer with the in-memory representation + Lex_cstring to_lex_cstring() const + { + return Lex_cstring(m_buffer, sizeof(m_buffer)); + } + + static constexpr uint binary_length() { return NATIVE_LEN; } + static constexpr uint max_char_length() { return MAX_CHAR_LEN; } + + // Compare the in-memory binary representations of two values + static int cmp(const LEX_CSTRING &a, const LEX_CSTRING &b) + { + DBUG_ASSERT(a.length == binary_length()); + DBUG_ASSERT(b.length == binary_length()); + return memcmp(a.str, b.str, b.length); + } + + /* + Convert from the in-memory to the in-record representation. + Used in Field::store_native(). + */ + static void memory_to_record(char *to, const char *from) + { + memcpy(to, from, NATIVE_LEN); + } + /* + Convert from the in-record to the in-memory representation + Used in Field::val_native(). + */ + static void record_to_memory(char *to, const char *from) + { + memcpy(to, from, NATIVE_LEN); + } + + /* + Hash the in-record representation + Used in Field::hash(). + */ + static void hash_record(const uchar *ptr, ulong *nr, ulong *nr2) + { + my_charset_bin.hash_sort(ptr, binary_length(), nr, nr2); + } + + static bool only_zero_bytes(const char *ptr, size_t length) + { + for (uint i= 0 ; i < length; i++) + { + if (ptr[i] != 0) + return false; + } + return true; + } + + static ulong KEY_pack_flags(uint column_nr) + { + /* + Return zero by default. A particular data type can override + this method return some flags, e.g. HA_PACK_KEY to enable + key prefix compression. + */ + return 0; + } + + /* + Convert from the in-record to the binlog representation. + Used in Field::pack(), and in filesort to store the addon fields. + By default, do what BINARY(N) does. + */ + static uchar *pack(uchar *to, const uchar *from, uint max_length) + { + return StringPack(&my_charset_bin, binary_length()).pack(to, from, max_length); + } + + /* + Convert from the in-binary-log to the in-record representation. + Used in Field::unpack(). + By default, do what BINARY(N) does. + */ + static const uchar *unpack(uchar *to, const uchar *from, const uchar *from_end, + uint param_data) + { + return StringPack(&my_charset_bin, binary_length()).unpack(to, from, from_end, + param_data); + } + +}; +#endif /* SQL_TYPE_FIXEDBIN_STORAGE */ diff --git a/sql/sql_type_geom.cc b/sql/sql_type_geom.cc index 9d6bc3846cc..5732ae47217 100644 --- a/sql/sql_type_geom.cc +++ b/sql/sql_type_geom.cc @@ -205,8 +205,9 @@ bool Type_collection_geometry::init(Type_handler_data *data) } -bool Type_handler_geometry::check_type_geom_or_binary(const char *opname, - const Item *item) +bool Type_handler_geometry:: +check_type_geom_or_binary(const LEX_CSTRING &opname, + const Item *item) { const Type_handler *handler= item->type_handler(); if (handler->type_handler_for_comparison() == &type_handler_geometry || @@ -214,14 +215,15 @@ bool Type_handler_geometry::check_type_geom_or_binary(const char *opname, item->collation.collation == &my_charset_bin)) return false; my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), - handler->name().ptr(), opname); + handler->name().ptr(), opname.str); return true; } -bool Type_handler_geometry::check_types_geom_or_binary(const char *opname, - Item* const *args, - uint start, uint end) +bool Type_handler_geometry:: +check_types_geom_or_binary(const LEX_CSTRING &opname, + Item* const *args, + uint start, uint end) { for (uint i= start; i < end ; i++) { @@ -491,7 +493,7 @@ Field *Type_handler_geometry::make_table_field(MEM_ROOT *root, bool Type_handler_geometry:: Item_hybrid_func_fix_attributes(THD *thd, - const char *func_name, + const LEX_CSTRING &func_name, Type_handler_hybrid_field_type *handler, Type_all_attributes *func, Item **items, uint nitems) const @@ -501,7 +503,7 @@ bool Type_handler_geometry:: func->unsigned_flag= false; func->decimals= 0; func->max_length= (uint32) UINT_MAX32; - func->set_maybe_null(true); + func->set_type_maybe_null(true); return false; } @@ -509,14 +511,16 @@ bool Type_handler_geometry:: bool Type_handler_geometry:: Item_sum_sum_fix_length_and_dec(Item_sum_sum *item) const { - return Item_func_or_sum_illegal_param("sum"); + LEX_CSTRING name= {STRING_WITH_LEN("sum") }; + return Item_func_or_sum_illegal_param(name); } bool Type_handler_geometry:: Item_sum_avg_fix_length_and_dec(Item_sum_avg *item) const { - return Item_func_or_sum_illegal_param("avg"); + LEX_CSTRING name= {STRING_WITH_LEN("avg") }; + return Item_func_or_sum_illegal_param(name); } @@ -841,8 +845,6 @@ int Field_geom::store(const char *from, size_t length, CHARSET_INFO *cs) bzero(ptr, Field_blob::pack_length()); else { - if (from == Geometry::bad_geometry_data.ptr()) - goto err; // Check given WKB uint32 wkb_type; if (length < SRID_SIZE + WKB_HEADER_SIZE + 4) diff --git a/sql/sql_type_geom.h b/sql/sql_type_geom.h index ff00beea598..3bc25808bc3 100644 --- a/sql/sql_type_geom.h +++ b/sql/sql_type_geom.h @@ -34,8 +34,9 @@ public: GEOM_MULTIPOINT = 4, GEOM_MULTILINESTRING = 5, GEOM_MULTIPOLYGON = 6, GEOM_GEOMETRYCOLLECTION = 7 }; - static bool check_type_geom_or_binary(const char *opname, const Item *item); - static bool check_types_geom_or_binary(const char *opname, + static bool check_type_geom_or_binary(const LEX_CSTRING &opname, + const Item *item); + static bool check_types_geom_or_binary(const LEX_CSTRING &opname, Item * const *args, uint start, uint end); static const Type_handler_geometry *type_handler_geom_by_type(uint type); @@ -155,7 +156,7 @@ public: bool Item_func_abs_fix_length_and_dec(Item_func_abs *) const override; bool Item_func_neg_fix_length_and_dec(Item_func_neg *) const override; bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *h, Type_all_attributes *attr, Item **items, uint nitems) const diff --git a/sql/sql_type_int.h b/sql/sql_type_int.h index fca00f25448..c22b1038890 100644 --- a/sql/sql_type_int.h +++ b/sql/sql_type_int.h @@ -77,23 +77,27 @@ public: } Longlong_null operator<<(const Longlong_null &llshift) const { + ulonglong res; + uint shift; if (is_null() || llshift.is_null()) return Longlong_null(); - uint shift= (uint) llshift.value(); - ulonglong res= (shift < sizeof(longlong) * 8) - ? ((ulonglong) value()) << shift - : 0; + shift= (uint) llshift.value(); + res= 0; + if (shift < sizeof(longlong) * 8) + res= ((ulonglong) value()) << shift; return Longlong_null((longlong) res); } Longlong_null operator>>(const Longlong_null &llshift) const { + ulonglong res; + uint shift; if (is_null() || llshift.is_null()) return Longlong_null(); - uint shift= (uint) llshift.value(); - ulonglong res= (shift < sizeof(longlong) * 8) - ? ((ulonglong) value()) >> shift - : 0; - return Longlong_null((longlong) res); + shift= (uint) llshift.value(); + res= 0; + if (shift < sizeof(longlong) * 8) + res= ((ulonglong) value()) >> shift; + return Longlong_null(res); } Longlong_null bit_count() const { diff --git a/sql/sql_type_json.h b/sql/sql_type_json.h index 4e1f0a50241..b7fe5c8aa64 100644 --- a/sql/sql_type_json.h +++ b/sql/sql_type_json.h @@ -87,7 +87,7 @@ public: } bool Item_hybrid_func_fix_attributes(THD *thd, - const char *name, + const LEX_CSTRING &name, Type_handler_hybrid_field_type *hybrid, Type_all_attributes *attr, Item **items, uint nitems) diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 150900a4c77..758edbdcbaa 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1096,7 +1096,9 @@ bool st_select_lex_unit::prepare_join(THD *thd_arg, SELECT_LEX *sl, thd_arg->lex->current_select= sl; - can_skip_order_by= is_union_select && !(sl->braces && sl->explicit_limit); + can_skip_order_by= (is_union_select && !(sl->braces && + sl->limit_params.explicit_limit) && + !thd->lex->with_rownum); saved_error= join->prepare(sl->table_list.first, (derived && derived->merged ? NULL : sl->where), @@ -1196,11 +1198,11 @@ bool st_select_lex_unit::join_union_type_attributes(THD *thd_arg, been fixed yet. An Item_type_holder must be created based on a fixed Item, so use the inner Item instead. */ - DBUG_ASSERT(item_tmp->is_fixed() || + DBUG_ASSERT(item_tmp->fixed() || (item_tmp->type() == Item::REF_ITEM && ((Item_ref *)(item_tmp))->ref_type() == Item_ref::OUTER_REF)); - if (!item_tmp->is_fixed()) + if (!item_tmp->fixed()) item_tmp= item_tmp->real_item(); holders[holder_pos].add_argument(item_tmp); } @@ -1352,7 +1354,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, else { sl->join->result= result; - lim.set_unlimited(); + lim.clear(); if (!sl->join->procedure && result->prepare(sl->join->fields_list, this)) { @@ -1462,7 +1464,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, if (fake_select_lex) { if (fake_select_lex->order_list.first || - fake_select_lex->explicit_limit) + fake_select_lex->limit_params.explicit_limit) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "global ORDER_BY/LIMIT in recursive CTE spec"); @@ -1522,7 +1524,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, if (!unit->first_select()->next_select()) { if (!unit->fake_select_lex) - { + { Query_arena *arena, backup_arena; arena= thd->activate_stmt_arena_if_needed(&backup_arena); bool rc= unit->add_fake_select_lex(thd); @@ -1533,17 +1535,13 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, } SELECT_LEX *fake= unit->fake_select_lex; fake->order_list= sl->order_list; - fake->explicit_limit= sl->explicit_limit; - fake->select_limit= sl->select_limit; - fake->offset_limit= sl->offset_limit; + fake->limit_params= sl->limit_params; sl->order_list.empty(); - sl->explicit_limit= 0; - sl->select_limit= 0; - sl->offset_limit= 0; + sl->limit_params.clear(); if (describe) fake->options|= SELECT_DESCRIBE; } - else if (!sl->explicit_limit) + else if (!sl->limit_params.explicit_limit) sl->order_list.empty(); } } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 244bd319205..d341206732f 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -384,7 +384,7 @@ int mysql_update(THD *thd, privilege_t want_privilege(NO_ACL); #endif uint table_count= 0; - ha_rows updated, found; + ha_rows updated, updated_or_same, found; key_map old_covering_keys; TABLE *table; SQL_SELECT *select= NULL; @@ -469,7 +469,7 @@ int mysql_update(THD *thd, want_privilege= (table_list->view ? UPDATE_ACL : table_list->grant.want_privilege); #endif - promote_select_describe_flag_if_needed(thd->lex); + thd->lex->promote_select_describe_flag_if_needed(); if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) DBUG_RETURN(1); @@ -949,7 +949,7 @@ update_begin: if (init_read_record(&info, thd, table, select, file_sort, 0, 1, FALSE)) goto err; - updated= found= 0; + updated= updated_or_same= found= 0; /* Generate an error (in TRADITIONAL mode) or warning when trying to set a NOT NULL field to NULL. @@ -977,7 +977,8 @@ update_begin: } if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE) && - !table->prepare_triggers_for_update_stmt_or_event()) + !table->prepare_triggers_for_update_stmt_or_event() && + !thd->lex->with_rownum) will_batch= !table->file->start_bulk_update(); /* @@ -1001,6 +1002,7 @@ update_begin: DBUG_ASSERT(table->file->inited != handler::NONE); THD_STAGE_INFO(thd, stage_updating); + fix_rownum_pointers(thd, thd->lex->current_select, &updated_or_same); while (!(error=info.read_record()) && !thd->killed) { explain->tracker.on_record_read(); @@ -1090,12 +1092,14 @@ update_begin: if (unlikely(record_was_same)) { error= 0; + updated_or_same++; } else if (likely(!error)) { if (has_vers_fields && table->versioned(VERS_TRX_ID)) rows_inserted++; updated++; + updated_or_same++; } if (likely(!error) && !record_was_same && table_list->has_period()) @@ -1114,6 +1118,8 @@ update_begin: goto error; } } + else + updated_or_same++; if (likely(!error) && has_vers_fields && table->versioned(VERS_TIMESTAMP)) { @@ -2942,7 +2948,9 @@ int multi_update::do_updates() } } else + { local_error= 0; + } } if (table->triggers && diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 0c1d0e13382..a2b744fc8be 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -16,32 +16,36 @@ */ #define MYSQL_LEX 1 -#include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */ +#include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */ #include "sql_priv.h" #include "unireg.h" #include "sql_view.h" -#include "sql_base.h" // find_table_in_global_list, lock_table_names -#include "sql_parse.h" // sql_parse -#include "sql_cache.h" // query_cache_* -#include "lock.h" // MYSQL_OPEN_SKIP_TEMPORARY -#include "sql_show.h" // append_identifier -#include "sql_table.h" // build_table_filename +#include "sql_base.h" // find_table_in_global_list, lock_table_names +#include "sql_parse.h" // sql_parse +#include "sql_cache.h" // query_cache_* +#include "lock.h" // MYSQL_OPEN_SKIP_TEMPORARY +#include "sql_show.h" // append_identifier +#include "sql_table.h" // build_table_filename #include "sql_db.h" // mysql_opt_change_db, mysql_change_db #include "sql_select.h" #include "parse_file.h" #include "sp_head.h" #include "sp.h" #include "sp_cache.h" -#include "datadict.h" // dd_frm_is_view() +#include "datadict.h" // dd_frm_is_view() #include "sql_derived.h" #include "opt_trace.h" +#include "ddl_log.h" +#include "debug.h" // debug_crash_here #include "wsrep_mysqld.h" #define MD5_BUFF_LENGTH 33 const LEX_CSTRING view_type= { STRING_WITH_LEN("VIEW") }; -static int mysql_register_view(THD *, TABLE_LIST *, enum_view_create_mode); +static int mysql_register_view(THD *thd, DDL_LOG_STATE *ddl_log_state, + TABLE_LIST *view, enum_view_create_mode mode, + char *backup_file_name); /* Make a unique name for an anonymous view column @@ -137,7 +141,7 @@ bool check_duplicate_names(THD *thd, List<Item> &item_list, bool gen_unique_view Item *check; /* treat underlying fields like set by user names */ if (item->real_item()->type() == Item::FIELD_ITEM) - item->common_flags&= ~IS_AUTO_GENERATED_NAME; + item->base_flags|= item_base_t::IS_EXPLICIT_NAME; itc.rewind(); while ((check= itc++) && check != item) { @@ -145,9 +149,9 @@ bool check_duplicate_names(THD *thd, List<Item> &item_list, bool gen_unique_view { if (!gen_unique_view_name) goto err; - if (item->is_autogenerated_name()) + if (!item->is_explicit_name()) make_unique_view_field_name(thd, item, item_list, item); - else if (check->is_autogenerated_name()) + else if (!check->is_explicit_name()) make_unique_view_field_name(thd, check, item_list, item); else goto err; @@ -179,7 +183,7 @@ void make_valid_column_names(THD *thd, List<Item> &item_list) for (uint column_no= 1; (item= it++); column_no++) { - if (!item->is_autogenerated_name() || !check_column_name(item->name.str)) + if (item->is_explicit_name() || !check_column_name(item->name.str)) continue; name_len= my_snprintf(buff, NAME_LEN, "Name_exp_%u", column_no); item->orig_name= item->name.str; @@ -405,6 +409,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, SELECT_LEX *select_lex= lex->first_select_lex(); SELECT_LEX *sl; SELECT_LEX_UNIT *unit= &lex->unit; + DDL_LOG_STATE ddl_log_state, ddl_log_state_tmp_file; + char backup_file_name[FN_REFLEN+2]; bool res= FALSE; DBUG_ENTER("mysql_create_view"); @@ -422,6 +428,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, DBUG_ASSERT(!lex->proc_list.first && !lex->param_list.elements); + bzero(&ddl_log_state, sizeof(ddl_log_state)); + bzero(&ddl_log_state_tmp_file, sizeof(ddl_log_state_tmp_file)); + backup_file_name[0]= 0; /* We can't allow taking exclusive meta-data locks of unlocked view under LOCK TABLES since this might lead to deadlock. Since at the moment we @@ -449,7 +458,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, */ if (lex->current_select->lock_type != TL_READ_DEFAULT) { - lex->current_select->set_lock_for_tables(TL_READ_DEFAULT, false); + lex->current_select->set_lock_for_tables(TL_READ_DEFAULT, false, select_lex->skip_locked); view->mdl_request.set_type(MDL_EXCLUSIVE); } @@ -527,7 +536,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { /* is this table temporary and is not view? */ if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view && - !tbl->schema_table) + !tbl->schema_table && !tbl->table_function) { my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias.str); res= TRUE; @@ -571,7 +580,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, while ((item= it++, name= nm++)) { item->set_name(thd, *name); - item->common_flags&= ~IS_AUTO_GENERATED_NAME; + item->base_flags|= item_base_t::IS_EXPLICIT_NAME; } } @@ -652,7 +661,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, } #endif - res= mysql_register_view(thd, view, mode); + res= mysql_register_view(thd, &ddl_log_state, view, mode, backup_file_name); /* View TABLE_SHARE must be removed from the table definition cache in order to @@ -691,14 +700,14 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (lex->view_list.elements) { List_iterator_fast<LEX_CSTRING> names(lex->view_list); - LEX_CSTRING *name; - int i; - - for (i= 0; (name= names++); i++) + + buff.append('('); + while (LEX_CSTRING *name= names++) { - buff.append(i ? ", " : "("); append_identifier(thd, &buff, name); + buff.append(", ", 2); } + buff.length(buff.length()-2); buff.append(')'); } buff.append(STRING_WITH_LEN(" AS ")); @@ -711,10 +720,31 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, with statement based replication */ thd->reset_unsafe_warnings(); + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); + if (backup_file_name[0]) + { + LEX_CSTRING cpath= {backup_file_name, strlen(backup_file_name) }; + ddl_log_delete_tmp_file(thd, &ddl_log_state_tmp_file, &cpath, + &ddl_log_state); + } + debug_crash_here("ddl_log_create_before_binlog"); if (thd->binlog_query(THD::STMT_QUERY_TYPE, buff.ptr(), buff.length(), FALSE, FALSE, FALSE, errcode) > 0) res= TRUE; + thd->binlog_xid= 0; + debug_crash_here("ddl_log_create_after_binlog"); + } + if (!res) + { + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("CREATE") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("VIEW") }; + ddl_log.org_database= view->db; + ddl_log.org_table= view->table_name; + backup_log_ddl(&ddl_log); } if (mode != VIEW_CREATE_NEW) @@ -722,8 +752,14 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (res) goto err; + if (backup_file_name[0] && + mysql_file_delete(key_file_fileparser, backup_file_name, MYF(MY_WME))) + goto err; // Should be impossible + my_ok(thd); lex->link_first_table_back(view, link_to_local); + ddl_log_complete(&ddl_log_state); + ddl_log_complete(&ddl_log_state_tmp_file); DBUG_RETURN(0); #ifdef WITH_WSREP @@ -736,6 +772,10 @@ err: lex->link_first_table_back(view, link_to_local); err_no_relink: unit->cleanup(); + if (backup_file_name[0]) + mysql_file_delete(key_file_fileparser, backup_file_name, MYF(MY_WME)); + ddl_log_complete(&ddl_log_state); + ddl_log_complete(&ddl_log_state_tmp_file); DBUG_RETURN(res || thd->is_error()); } @@ -901,6 +941,7 @@ int mariadb_fix_view(THD *thd, TABLE_LIST *view, bool wrong_checksum, thd - thread handler view - view description mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + backup_file_name - Store name for backup of old view definition here RETURN 0 OK @@ -908,8 +949,9 @@ int mariadb_fix_view(THD *thd, TABLE_LIST *view, bool wrong_checksum, 1 Error and error message given */ -static int mysql_register_view(THD *thd, TABLE_LIST *view, - enum_view_create_mode mode) +static int mysql_register_view(THD *thd, DDL_LOG_STATE *ddl_log_state, + TABLE_LIST *view, enum_view_create_mode mode, + char *backup_file_name) { LEX *lex= thd->lex; @@ -953,11 +995,13 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, char dir_buff[FN_REFLEN + 1], path_buff[FN_REFLEN + 1]; LEX_CSTRING dir, file, path; int error= 0; + bool old_view_exists= 0; DBUG_ENTER("mysql_register_view"); /* Generate view definition and IS queries. */ view_query.length(0); is_query.length(0); + backup_file_name[0]= 0; { Sql_mode_instant_remove sms(thd, MODE_ANSI_QUOTES); @@ -1059,6 +1103,7 @@ loop_out: if (ha_table_exists(thd, &view->db, &view->table_name)) { + old_view_exists= 1; if (lex->create_info.if_not_exists()) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, @@ -1094,7 +1139,7 @@ loop_out: */ } else - { + { if (mode == VIEW_ALTER) { my_error(ER_NO_SUCH_TABLE, MYF(0), view->db.str, view->alias.str); @@ -1113,11 +1158,10 @@ loop_out: frm-file. */ - lex_string_set(&view->view_client_cs_name, - view->view_creation_ctx->get_client_cs()->csname); + view->view_client_cs_name= view->view_creation_ctx->get_client_cs()->cs_name; - lex_string_set(&view->view_connection_cl_name, - view->view_creation_ctx->get_connection_cl()->name); + view->view_connection_cl_name= + view->view_creation_ctx->get_connection_cl()->coll_name; if (!thd->make_lex_string(&view->view_body_utf8, is_query.ptr(), is_query.length())) @@ -1155,12 +1199,32 @@ loop_out: goto err; } + ddl_log_create_view(thd, ddl_log_state, &path, old_view_exists ? + DDL_CREATE_VIEW_PHASE_DELETE_VIEW_COPY : + DDL_CREATE_VIEW_PHASE_NO_OLD_VIEW); + + debug_crash_here("ddl_log_create_before_copy_view"); + + if (old_view_exists) + { + LEX_CSTRING backup_name= { backup_file_name, 0 }; + if (sql_backup_definition_file(&path, &backup_name)) + { + error= 1; + goto err; + } + ddl_log_update_phase(ddl_log_state, DDL_CREATE_VIEW_PHASE_OLD_VIEW_COPIED); + } + + debug_crash_here("ddl_log_create_before_create_view"); if (sql_create_definition_file(&dir, &file, view_file_type, (uchar*)view, view_parameters)) { error= thd->is_error() ? -1 : 1; goto err; } + debug_crash_here("ddl_log_create_after_create_view"); + DBUG_RETURN(0); err: view->select_stmt.str= NULL; @@ -1742,7 +1806,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, if (view_is_mergeable && (table->select_lex->master_unit() != &old_lex->unit || old_lex->can_use_merged()) && - !old_lex->can_not_use_merged()) + !old_lex->can_not_use_merged(0)) { /* lex should contain at least one table */ DBUG_ASSERT(view_main_select_tables != 0); @@ -1871,9 +1935,12 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) bool delete_error= FALSE, wrong_object_name= FALSE; bool some_views_deleted= FALSE; bool something_wrong= FALSE; - uint not_exists_count= 0; + uint not_exists_count= 0, view_count= 0; + DDL_LOG_STATE ddl_log_state; DBUG_ENTER("mysql_drop_view"); + bzero(&ddl_log_state, sizeof(ddl_log_state)); + /* We can't allow dropping of unlocked view under LOCK TABLES since this might lead to deadlock. But since we can't really lock view with LOCK @@ -1892,18 +1959,21 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) for (view= views; view; view= view->next_local) { + LEX_CSTRING cpath; bool not_exist; - build_table_filename(path, sizeof(path) - 1, - view->db.str, view->table_name.str, reg_ext, 0); + size_t length; + length= build_table_filename(path, sizeof(path) - 1, + view->db.str, view->table_name.str, reg_ext, 0); + lex_string_set3(&cpath, path, length); if ((not_exist= my_access(path, F_OK)) || !dd_frm_is_view(thd, path)) { char name[FN_REFLEN]; - my_snprintf(name, sizeof(name), "%s.%s", view->db.str, - view->table_name.str); + size_t length= my_snprintf(name, sizeof(name), "%s.%s", view->db.str, + view->table_name.str); if (non_existant_views.length()) non_existant_views.append(','); - non_existant_views.append(name); + non_existant_views.append(name, length); if (!not_exist) { @@ -1915,8 +1985,18 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) not_exists_count++; continue; } + if (!view_count++) + { + if (ddl_log_drop_view_init(thd, &ddl_log_state, &thd->db)) + DBUG_RETURN(TRUE); + } + if (ddl_log_drop_view(thd, &ddl_log_state, &cpath, &view->db, + &view->table_name)) + DBUG_RETURN(TRUE); + debug_crash_here("ddl_log_drop_before_delete_view"); if (unlikely(mysql_file_delete(key_file_frm, path, MYF(MY_WME)))) delete_error= TRUE; + debug_crash_here("ddl_log_drop_after_delete_view"); some_views_deleted= TRUE; @@ -1927,6 +2007,14 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) tdc_remove_table(thd, view->db.str, view->table_name.str); query_cache_invalidate3(thd, view, 0); sp_cache_invalidate(); + + backup_log_info ddl_log; + bzero(&ddl_log, sizeof(ddl_log)); + ddl_log.query= { C_STRING_WITH_LEN("DROP") }; + ddl_log.org_storage_engine_name= { C_STRING_WITH_LEN("VIEW") }; + ddl_log.org_database= view->db; + ddl_log.org_table= view->table_name; + backup_log_ddl(&ddl_log); } something_wrong= (delete_error || @@ -1944,10 +2032,16 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) /* if something goes wrong, bin-log with possible error code, otherwise bin-log with error code cleared. */ + debug_crash_here("ddl_log_drop_before_binlog"); + thd->binlog_xid= thd->query_id; + ddl_log_update_xid(&ddl_log_state, thd->binlog_xid); if (unlikely(write_bin_log(thd, !something_wrong, thd->query(), thd->query_length()))) something_wrong= 1; + thd->binlog_xid= 0; + debug_crash_here("ddl_log_drop_after_binlog"); } + ddl_log_complete(&ddl_log_state); if (unlikely(something_wrong)) { @@ -1992,7 +2086,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) */ if ((!view->view && !view->belong_to_view) || thd->lex->sql_command == SQLCOM_INSERT || - thd->lex->first_select_lex()->select_limit == 0) + thd->lex->first_select_lex()->limit_params.select_limit == 0) DBUG_RETURN(FALSE); /* it is normal table or query without LIMIT */ table= view->table; view= view->top_table(); @@ -2230,7 +2324,8 @@ bool mysql_rename_view(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *new_name, - TABLE_LIST *view) + const LEX_CSTRING *old_db, + const LEX_CSTRING *old_name) { LEX_CSTRING pathstr; File_parser *parser; @@ -2240,7 +2335,7 @@ mysql_rename_view(THD *thd, pathstr.str= (char *) path_buff; pathstr.length= build_table_filename(path_buff, sizeof(path_buff) - 1, - view->db.str, view->table_name.str, + old_db->str, old_name->str, reg_ext, 0); if ((parser= sql_parse_prepare(&pathstr, thd->mem_root, 1)) && @@ -2267,9 +2362,10 @@ mysql_rename_view(THD *thd, goto err; /* rename view and it's backups */ - if (rename_in_schema_file(thd, view->db.str, view->table_name.str, + if (rename_in_schema_file(thd, old_db->str, old_name->str, new_db->str, new_name->str)) goto err; + debug_crash_here("rename_view_after_rename_schema_file"); dir.str= dir_buff; dir.length= build_table_filename(dir_buff, sizeof(dir_buff) - 1, @@ -2286,16 +2382,25 @@ mysql_rename_view(THD *thd, (uchar*)&view_def, view_parameters)) { /* restore renamed view in case of error */ - rename_in_schema_file(thd, new_db->str, new_name->str, view->db.str, - view->table_name.str); + rename_in_schema_file(thd, new_db->str, new_name->str, old_db->str, + old_name->str); goto err; } - } else + } + else DBUG_RETURN(1); /* remove cache entries */ - query_cache_invalidate3(thd, view, 0); - sp_cache_invalidate(); + { + char key[NAME_LEN*2+1], *ptr; + memcpy(key, old_db->str, old_db->length); + ptr= key+ old_db->length; + *ptr++= 0; + memcpy(key, old_name->str, old_name->length); + ptr= key+ old_db->length; + *ptr++= 0; + query_cache.invalidate(thd, key, (size_t) (ptr-key), 0); + } error= FALSE; err: diff --git a/sql/sql_view.h b/sql/sql_view.h index 0713b951de7..1b880e43eb1 100644 --- a/sql/sql_view.h +++ b/sql/sql_view.h @@ -53,8 +53,10 @@ extern TYPELIB updatable_views_with_limit_typelib; bool check_duplicate_names(THD *thd, List<Item>& item_list, bool gen_unique_view_names); -bool mysql_rename_view(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *new_name, - TABLE_LIST *view); +bool mysql_rename_view(THD *thd, const LEX_CSTRING *new_db, + const LEX_CSTRING *new_name, + const LEX_CSTRING *old_db, + const LEX_CSTRING *old_name); void make_valid_column_names(THD *thd, List<Item> &item_list); diff --git a/sql/sql_window.cc b/sql/sql_window.cc index 4bd993411e9..963535e2417 100644 --- a/sql/sql_window.cc +++ b/sql/sql_window.cc @@ -694,10 +694,6 @@ int compare_window_funcs_by_window_specs(Item_window_func *win_func1, } -#define SORTORDER_CHANGE_FLAG 1 -#define PARTITION_CHANGE_FLAG 2 -#define FRAME_CHANGE_FLAG 4 - typedef int (*Item_window_func_cmp)(Item_window_func *f1, Item_window_func *f2, void *arg); @@ -727,15 +723,15 @@ void order_window_funcs_by_window_specs(List<Item_window_func> *win_func_list) List_iterator_fast<Item_window_func> it(*win_func_list); Item_window_func *prev= it++; - prev->marker= SORTORDER_CHANGE_FLAG | - PARTITION_CHANGE_FLAG | - FRAME_CHANGE_FLAG; + prev->marker= (MARKER_SORTORDER_CHANGE | + MARKER_PARTITION_CHANGE | + MARKER_FRAME_CHANGE); Item_window_func *curr; while ((curr= it++)) { Window_spec *win_spec_prev= prev->window_spec; Window_spec *win_spec_curr= curr->window_spec; - curr->marker= 0; + curr->marker= MARKER_UNUSED; if (!(win_spec_prev->partition_list == win_spec_curr->partition_list && win_spec_prev->order_list == win_spec_curr->order_list)) { @@ -749,17 +745,17 @@ void order_window_funcs_by_window_specs(List<Item_window_func> *win_func_list) cmp= compare_window_spec_joined_lists(win_spec_prev, win_spec_curr); if (!(CMP_LT_C <= cmp && cmp <= CMP_GT_C)) { - curr->marker= SORTORDER_CHANGE_FLAG | - PARTITION_CHANGE_FLAG | - FRAME_CHANGE_FLAG; + curr->marker= (MARKER_SORTORDER_CHANGE | + MARKER_PARTITION_CHANGE | + MARKER_FRAME_CHANGE); } else if (win_spec_prev->partition_list != win_spec_curr->partition_list) { - curr->marker|= PARTITION_CHANGE_FLAG | FRAME_CHANGE_FLAG; + curr->marker|= MARKER_PARTITION_CHANGE | MARKER_FRAME_CHANGE; } } else if (win_spec_prev->window_frame != win_spec_curr->window_frame) - curr->marker|= FRAME_CHANGE_FLAG; + curr->marker|= MARKER_FRAME_CHANGE; prev= curr; } @@ -2553,7 +2549,7 @@ void add_special_frame_cursors(THD *thd, Cursor_manager *cursor_manager, cursor_manager->add_cursor(bottom_bound); cursor_manager->add_cursor(top_bound); cursor_manager->add_cursor(current_row_pos); - DBUG_ASSERT(item_sum->fixed); + DBUG_ASSERT(item_sum->fixed()); bool negative_offset= item_sum->sum_func() == Item_sum::LAG_FUNC; fc= new Frame_positional_cursor(*current_row_pos, *top_bound, *bottom_bound, @@ -2569,7 +2565,7 @@ void add_special_frame_cursors(THD *thd, Cursor_manager *cursor_manager, Frame_cursor *top_bound= get_frame_cursor(thd, spec, true); cursor_manager->add_cursor(bottom_bound); cursor_manager->add_cursor(top_bound); - DBUG_ASSERT(item_sum->fixed); + DBUG_ASSERT(item_sum->fixed()); Item *offset_item= new (thd->mem_root) Item_int(thd, 0); offset_item->fix_fields(thd, &offset_item); fc= new Frame_positional_cursor(*top_bound, @@ -2585,7 +2581,7 @@ void add_special_frame_cursors(THD *thd, Cursor_manager *cursor_manager, Frame_cursor *top_bound= get_frame_cursor(thd, spec, true); cursor_manager->add_cursor(bottom_bound); cursor_manager->add_cursor(top_bound); - DBUG_ASSERT(item_sum->fixed); + DBUG_ASSERT(item_sum->fixed()); Item *offset_item= new (thd->mem_root) Item_int(thd, 0); offset_item->fix_fields(thd, &offset_item); fc= new Frame_positional_cursor(*bottom_bound, @@ -2601,7 +2597,7 @@ void add_special_frame_cursors(THD *thd, Cursor_manager *cursor_manager, Frame_cursor *top_bound= get_frame_cursor(thd, spec, true); cursor_manager->add_cursor(bottom_bound); cursor_manager->add_cursor(top_bound); - DBUG_ASSERT(item_sum->fixed); + DBUG_ASSERT(item_sum->fixed()); Item *int_item= new (thd->mem_root) Item_int(thd, 1); Item *offset_func= new (thd->mem_root) Item_func_minus(thd, item_sum->get_arg(1), @@ -2790,7 +2786,7 @@ bool save_window_function_values(List<Item_window_func>& window_functions, Item *func; for (; (func = *func_ptr) ; func_ptr++) { - if (func->with_window_func && func->type() != Item::WINDOW_FUNC_ITEM) + if (func->with_window_func() && func->type() != Item::WINDOW_FUNC_ITEM) func->save_in_result_field(true); } @@ -3020,7 +3016,8 @@ bool Window_func_runner::exec(THD *thd, TABLE *tbl, SORT_INFO *filesort_result) win_func->set_phase_to_computation(); // TODO(cvicentiu) Setting the aggregator should probably be done during // setup of Window_funcs_sort. - win_func->window_func()->set_aggregator(Aggregator::SIMPLE_AGGREGATOR); + win_func->window_func()->set_aggregator(thd, + Aggregator::SIMPLE_AGGREGATOR); } it.rewind(); @@ -3093,11 +3090,11 @@ bool Window_funcs_sort::setup(THD *thd, SQL_SELECT *sel, return true; it++; win_func= it.peek(); - } while (win_func && !(win_func->marker & SORTORDER_CHANGE_FLAG)); + } while (win_func && !(win_func->marker & MARKER_SORTORDER_CHANGE)); /* The sort criteria must be taken from the last win_func in the group of - adjacent win_funcs that do not have SORTORDER_CHANGE_FLAG. This is + adjacent win_funcs that do not have MARKER_SORTORDER_CHANGE. This is because the sort order must be the most specific sorting criteria defined within the window function group. This ensures that we sort the table in a way that the result is valid for all window functions belonging to diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index dfbc89e411a..691fe7d9189 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -68,6 +68,7 @@ #include "sql_sequence.h" #include "my_base.h" #include "sql_type_json.h" +#include "json_table.h" /* this is to get the bison compilation windows warnings out */ #ifdef _MSC_VER @@ -75,7 +76,7 @@ /* warning C4102: 'yyexhaustedlab': unreferenced label */ #pragma warning (disable : 4065 4102) #endif -#ifdef __GNUC__ +#if defined (__GNUC__) || defined (__clang__) #pragma GCC diagnostic ignored "-Wunused-label" /* yyexhaustedlab: */ #endif @@ -199,6 +200,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)() MYSQL_YYABORT; \ } while(0) + %} %union { int num; @@ -223,6 +225,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)() Lex_for_loop_st for_loop; Lex_for_loop_bounds_st for_loop_bounds; Lex_trim_st trim; + Json_table_column::On_response json_on_response; vers_history_point_t vers_history_point; struct { @@ -338,7 +341,7 @@ static_assert(sizeof(YYSTYPE) == sizeof(void*)*2+8, "%union size check"); bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %} -%pure-parser /* We have threads */ +%define api.pure /* We have threads */ %parse-param { THD *thd } %lex-param { THD *thd } /* @@ -507,6 +510,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> ELSEIF_MARIADB_SYM %token <kwd> ELSE /* SQL-2003-R */ %token <kwd> ELSIF_ORACLE_SYM /* PLSQL-R */ +%token <kwd> EMPTY_SYM /* SQL-2016-R */ %token <kwd> ENCLOSED %token <kwd> ESCAPED %token <kwd> EXCEPT_SYM /* SQL-2003-R */ @@ -525,6 +529,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> GROUP_CONCAT_SYM %token <rwd> JSON_ARRAYAGG_SYM %token <rwd> JSON_OBJECTAGG_SYM +%token <kwd> JSON_TABLE_SYM %token <kwd> GROUP_SYM /* SQL-2003-R */ %token <kwd> HAVING /* SQL-2003-R */ %token <kwd> HOUR_MICROSECOND_SYM @@ -533,6 +538,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> IF_SYM %token <kwd> IGNORE_DOMAIN_IDS_SYM %token <kwd> IGNORE_SYM +%token <kwd> IGNORED_SYM %token <kwd> INDEX_SYM %token <kwd> INFILE %token <kwd> INNER_SYM /* SQL-2003-R */ @@ -576,12 +582,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> MEDIUMINT %token <kwd> MEDIUMTEXT %token <kwd> MIN_SYM /* SQL-2003-N */ +%token <kwd> MINUS_ORACLE_SYM /* Oracle-R */ %token <kwd> MINUTE_MICROSECOND_SYM %token <kwd> MINUTE_SECOND_SYM %token <kwd> MODIFIES_SYM /* SQL-2003-R */ %token <kwd> MOD_SYM /* SQL-2003-N */ %token <kwd> NATURAL /* SQL-2003-R */ %token <kwd> NEG +%token <kwd> NESTED_SYM /* SQL-2003-N */ %token <kwd> NOT_SYM /* SQL-2003-R */ %token <kwd> NO_WRITE_TO_BINLOG %token <kwd> NOW_SYM @@ -593,6 +601,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> OPTIMIZE %token <kwd> OPTIONALLY %token <kwd> ORDER_SYM /* SQL-2003-R */ +%token <kwd> ORDINALITY_SYM /* SQL-2003-N */ %token <kwd> OR_SYM /* SQL-2003-R */ %token <kwd> OTHERS_ORACLE_SYM /* SQL-2011-N, PLSQL-R */ %token <kwd> OUTER @@ -603,6 +612,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> PAGE_CHECKSUM_SYM %token <kwd> PARSE_VCOL_EXPR_SYM %token <kwd> PARTITION_SYM /* SQL-2003-R */ +%token <kwd> PATH_SYM /* SQL-2003-N */ %token <kwd> PERCENTILE_CONT_SYM %token <kwd> PERCENTILE_DISC_SYM %token <kwd> PERCENT_RANK_SYM @@ -719,6 +729,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> PACKAGE_MARIADB_SYM // Oracle-R %token <kwd> RAISE_MARIADB_SYM // PLSQL-R %token <kwd> ROWTYPE_MARIADB_SYM // PLSQL-R +%token <kwd> ROWNUM_SYM /* Oracle-R */ /* Non-reserved keywords @@ -728,6 +739,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> ACTION /* SQL-2003-N */ %token <kwd> ADMIN_SYM /* SQL-2003-N */ %token <kwd> ADDDATE_SYM /* MYSQL-FUNC */ +%token <kwd> ADD_MONTHS_SYM /* Oracle FUNC*/ %token <kwd> AFTER_SYM /* SQL-2003-N */ %token <kwd> AGAINST %token <kwd> AGGREGATE_SYM @@ -902,6 +914,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> LEVEL_SYM %token <kwd> LIST_SYM %token <kwd> LOCAL_SYM /* SQL-2003-R */ +%token <kwd> LOCKED_SYM %token <kwd> LOCKS_SYM %token <kwd> LOGFILE_SYM %token <kwd> LOGS_SYM @@ -1062,6 +1075,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %token <kwd> SHUTDOWN %token <kwd> SIGNED_SYM %token <kwd> SIMPLE_SYM /* SQL-2003-N */ +%token <kwd> SKIP_SYM %token <kwd> SLAVE %token <kwd> SLAVES %token <kwd> SLAVE_POS_SYM @@ -1298,6 +1312,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type <lex_string_with_metadata> TEXT_STRING NCHAR_STRING + json_text_literal + json_text_literal_or_num %type <lex_str_ptr> opt_table_alias_clause @@ -1357,6 +1373,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type <sp_handler> sp_handler +%type <json_on_response> json_on_response + %type <Lex_field_type> field_type field_type_all qualified_field_type field_type_numeric @@ -1364,6 +1382,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); field_type_lob field_type_temporal field_type_misc + json_table_field_type %type <Lex_dyncol_type> opt_dyncol_type dyncol_type numeric_dyncol_type temporal_dyncol_type string_dyncol_type @@ -1389,6 +1408,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); case_stmt_body opt_bin_mod opt_for_system_time_clause opt_if_exists_table_element opt_if_not_exists_table_element opt_recursive opt_format_xid opt_for_portion_of_time_clause + ignorability %type <object_ddl_options> create_or_replace @@ -1526,7 +1546,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); table_primary_derived table_primary_derived_opt_parens derived_table_list table_reference_list_parens nested_table_reference_list join_table_parens - update_table_list + update_table_list table_function %type <date_time_type> date_time_type; %type <interval> interval @@ -1600,6 +1620,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); opt_lock_wait_timeout_new %type <select_limit> opt_limit_clause limit_clause limit_options + fetch_first_clause %type <order_limit_lock> query_expression_tail @@ -1682,6 +1703,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); opt_delete_gtid_domain asrow_attribute opt_constraint_no_id + json_table_columns_clause json_table_columns_list json_table_column + json_table_column_type json_opt_on_empty_or_error + json_on_error_response json_on_empty_response %type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt %type <NONE> sp_if_then_statements sp_case_then_statements @@ -1703,6 +1727,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type <num> view_algorithm view_check_option %type <view_suid> view_suid opt_view_suid +%type <num> only_or_with_ties %type <plsql_cursor_attr> plsql_cursor_attr %type <sp_suid> sp_suid @@ -4672,6 +4697,7 @@ opt_create_select: | opt_duplicate opt_as create_select_query_expression opt_versioning_option { + Lex->create_info.add(DDL_options_st::OPT_CREATE_SELECT); if (Lex->check_cte_dependencies_and_resolve_references()) MYSQL_YYABORT; } @@ -6590,7 +6616,7 @@ attribute: { if (unlikely(Lex->charset && !my_charset_same(Lex->charset,$2))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), - $2->name,Lex->charset->csname)); + $2->coll_name.str, Lex->charset->cs_name.str)); Lex->last_field->charset= $2; } | serial_attribute @@ -6718,7 +6744,9 @@ charset: charset_name: ident_or_text { - if (unlikely(!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))))) + myf utf8_flag= thd->get_utf8_flag(); + if (unlikely(!($$=get_charset_by_csname($1.str, MY_CS_PRIMARY, + MYF(utf8_flag))))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } | BINARY { $$= &my_charset_bin; } @@ -6737,8 +6765,10 @@ opt_load_data_charset: old_or_new_charset_name: ident_or_text { + myf utf8_flag= thd->get_utf8_flag(); if (unlikely(!($$=get_charset_by_csname($1.str, - MY_CS_PRIMARY,MYF(0))) && + MY_CS_PRIMARY, + MYF(utf8_flag))) && !($$=get_old_charset_by_name($1.str)))) my_yyabort_error((ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str)); } @@ -6753,7 +6783,8 @@ old_or_new_charset_name_or_default: collation_name: ident_or_text { - if (unlikely(!($$= mysqld_collation_get_by_name($1.str)))) + if (unlikely(!($$= mysqld_collation_get_by_name($1.str, + thd->get_utf8_flag())))) MYSQL_YYABORT; } ; @@ -6804,7 +6835,7 @@ binary: { if (!my_charset_same($2, $1)) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), - $2->name, $1->csname)); + $2->coll_name.str, $1->cs_name.str)); Lex->charset= $2; } } @@ -7075,7 +7106,11 @@ all_key_opt: { Lex->last_key->key_create_info.comment= $2; } | VISIBLE_SYM { - /* This is mainly for MySQL 8.0 compatiblity */ + /* This is mainly for MySQL 8.0 compatibility */ + } + | ignorability + { + Lex->last_key->key_create_info.is_ignored= $1; } | IDENT_sys equal TEXT_STRING_sys { @@ -7133,6 +7168,11 @@ btree_or_rtree: | HASH_SYM { $$= HA_KEY_ALG_HASH; } ; +ignorability: + IGNORED_SYM { $$= true; } + | NOT_SYM IGNORED_SYM { $$= false; } + ; + key_list: key_list ',' key_part order_dir { @@ -7791,7 +7831,7 @@ alter_list_item: { LEX *lex=Lex; Alter_drop *ad= (new (thd->mem_root) - Alter_drop(Alter_drop::KEY, primary_key_name, + Alter_drop(Alter_drop::KEY, primary_key_name.str, FALSE)); if (unlikely(ad == NULL)) MYSQL_YYABORT; @@ -7827,6 +7867,16 @@ alter_list_item: if (unlikely(Lex->add_alter_list($4, $7, $3))) MYSQL_YYABORT; } + | ALTER key_or_index opt_if_exists_table_element ident ignorability + { + LEX *lex= Lex; + Alter_index_ignorability *ac= new (thd->mem_root) + Alter_index_ignorability($4.str, $5, $3); + if (ac == NULL) + MYSQL_YYABORT; + lex->alter_info.alter_index_ignorability_list.push_back(ac); + lex->alter_info.flags|= ALTER_INDEX_IGNORABILITY; + } | ALTER opt_column opt_if_exists_table_element field_ident DROP DEFAULT { if (unlikely(Lex->add_alter_list($4, (Virtual_column_info*) 0, $3))) @@ -7870,7 +7920,7 @@ alter_list_item: $5= $5 ? $5 : $4; if (unlikely(!my_charset_same($4,$5))) my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), - $5->name, $4->csname)); + $5->coll_name.str, $4->cs_name.str)); if (unlikely(Lex->create_info.add_alter_list_item_convert_to_charset($5))) MYSQL_YYABORT; Lex->alter_info.flags|= ALTER_CONVERT_TO; @@ -9153,7 +9203,6 @@ opt_select_lock_type: } ; - opt_lock_wait_timeout_new: /* empty */ { @@ -9161,14 +9210,22 @@ opt_lock_wait_timeout_new: } | WAIT_SYM ulong_num { + $$.empty(); $$.defined_timeout= TRUE; $$.timeout= $2; } | NOWAIT_SYM { + $$.empty(); $$.defined_timeout= TRUE; $$.timeout= 0; } + | SKIP_SYM LOCKED_SYM + { + $$.empty(); + $$.skip_locked= 1; + Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SKIP_LOCKED); + } ; select_item_list: @@ -9210,7 +9267,7 @@ select_item: if (unlikely(Lex->sql_command == SQLCOM_CREATE_VIEW && check_column_name($4.str))) my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), $4.str)); - $2->common_flags&= ~IS_AUTO_GENERATED_NAME; + $2->base_flags|= item_base_t::IS_EXPLICIT_NAME; $2->set_name(thd, $4); } else if (!$2->name.str || $2->name.str == item_empty_name) @@ -9853,7 +9910,7 @@ column_default_non_parenthesized_expr: | variable | sum_expr { - if (!Lex->select_stack_top) + if (!Lex->select_stack_top || Lex->json_table) { my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0)); MYSQL_YYABORT; @@ -10227,7 +10284,14 @@ function_call_keyword: discouraged. */ function_call_nonkeyword: - ADDDATE_SYM '(' expr ',' expr ')' + ADD_MONTHS_SYM '(' expr ',' expr ')' + { + $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, + INTERVAL_MONTH, 0); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } + | ADDDATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, INTERVAL_DAY, 0); @@ -10315,6 +10379,17 @@ function_call_nonkeyword: if (unlikely($$ == NULL)) MYSQL_YYABORT; } + | ROWNUM_SYM +%ifdef MARIADB + '(' ')' +%else + optional_braces +%endif ORACLE + { + $$= new (thd->mem_root) Item_func_rownum(thd); + if (unlikely($$ == NULL)) + MYSQL_YYABORT; + } | SUBDATE_SYM '(' expr ',' expr ')' { $$= new (thd->mem_root) Item_date_add_interval(thd, $3, $5, @@ -10348,23 +10423,22 @@ function_call_nonkeyword: if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5)))) MYSQL_YYABORT; } - | SYSDATE opt_time_precision +%ifdef ORACLE + | SYSDATE { - /* - Unlike other time-related functions, SYSDATE() is - replication-unsafe because it is not affected by the - TIMESTAMP variable. It is unsafe even if - sysdate_is_now=1, because the slave may have - sysdate_is_now=0. - */ - Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); - if (global_system_variables.sysdate_is_now == 0) - $$= new (thd->mem_root) Item_func_sysdate_local(thd, $2); - else - $$= new (thd->mem_root) Item_func_now_local(thd, $2); - if (unlikely($$ == NULL)) - MYSQL_YYABORT; - Lex->safe_to_cache_query=0; + if (unlikely(!($$= Lex->make_item_func_sysdate(thd, 0)))) + MYSQL_YYABORT; + } +%endif + | SYSDATE '(' ')' + { + if (unlikely(!($$= Lex->make_item_func_sysdate(thd, 0)))) + MYSQL_YYABORT; + } + | SYSDATE '(' real_ulong_num ')' + { + if (unlikely(!($$= Lex->make_item_func_sysdate(thd, (uint) $3)))) + MYSQL_YYABORT; } | TIMESTAMP_ADD '(' interval_time_stamp ',' expr ',' expr ')' { @@ -10793,7 +10867,7 @@ udf_expr: */ if ($4.str) { - $2->common_flags&= ~IS_AUTO_GENERATED_NAME; + $2->base_flags|= item_base_t::IS_EXPLICIT_NAME; $2->set_name(thd, $4); } /* @@ -10942,13 +11016,11 @@ sum_expr: Item_func_group_concat(thd, Lex->current_context(), $3, $5, sel->gorder_list, $7, $8, - sel->select_limit, - sel->offset_limit); + sel->limit_params.select_limit, + sel->limit_params.offset_limit); if (unlikely($$ == NULL)) MYSQL_YYABORT; - sel->select_limit= NULL; - sel->offset_limit= NULL; - sel->explicit_limit= 0; + sel->limit_params.clear(); $5->empty(); sel->gorder_list.empty(); } @@ -10974,13 +11046,11 @@ sum_expr: Item_func_json_arrayagg(thd, Lex->current_context(), $3, args, sel->gorder_list, s, $7, - sel->select_limit, - sel->offset_limit); + sel->limit_params.select_limit, + sel->limit_params.offset_limit); if (unlikely($$ == NULL)) MYSQL_YYABORT; - sel->select_limit= NULL; - sel->offset_limit= NULL; - sel->explicit_limit= 0; + sel->limit_params.clear(); $5->empty(); sel->gorder_list.empty(); } @@ -11302,38 +11372,18 @@ opt_glimit_clause: | glimit_clause { $$ = 1; } ; -glimit_clause_init: - LIMIT{} - ; glimit_clause: - glimit_clause_init glimit_options + LIMIT glimit_options { Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT); } ; glimit_options: - limit_option - { - SELECT_LEX *sel= Select; - sel->select_limit= $1; - sel->offset_limit= 0; - sel->explicit_limit= 1; - } - | limit_option ',' limit_option + limit_options { - SELECT_LEX *sel= Select; - sel->select_limit= $3; - sel->offset_limit= $1; - sel->explicit_limit= 1; - } - | limit_option OFFSET_SYM limit_option - { - SELECT_LEX *sel= Select; - sel->select_limit= $1; - sel->offset_limit= $3; - sel->explicit_limit= 1; + Select->limit_params= $1; } ; @@ -11517,10 +11567,225 @@ table_ref: } ; +json_text_literal: + TEXT_STRING + { + Lex->json_table->m_text_literal_cs= NULL; + } + | NCHAR_STRING + { + Lex->json_table->m_text_literal_cs= national_charset_info; + } + | UNDERSCORE_CHARSET TEXT_STRING + { + Lex->json_table->m_text_literal_cs= $1; + $$= $2; + } + ; + +json_text_literal_or_num: + json_text_literal + | NUM + { + Lex->json_table->m_text_literal_cs= NULL; + } + | LONG_NUM + { + Lex->json_table->m_text_literal_cs= NULL; + } + | DECIMAL_NUM + { + Lex->json_table->m_text_literal_cs= NULL; + } + | FLOAT_NUM + { + Lex->json_table->m_text_literal_cs= NULL; + } + ; + join_table_list: derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); } ; +json_table_columns_clause: + COLUMNS '(' json_table_columns_list ')' + {} + ; + +json_table_columns_list: + json_table_column + | json_table_columns_list ',' json_table_column + {} + ; + +json_table_column: + ident + { + LEX *lex=Lex; + Create_field *f= new (thd->mem_root) Create_field(); + + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) + my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); + + lex->json_table->m_cur_json_table_column= + new (thd->mem_root) Json_table_column(f, + lex->json_table->get_cur_nested_path()); + + if (unlikely(!f || + !lex->json_table->m_cur_json_table_column)) + MYSQL_YYABORT; + + lex->init_last_field(f, &$1, NULL); + } + json_table_column_type + { + LEX *lex=Lex; + if (unlikely(lex->json_table-> + m_cur_json_table_column->m_field->check(thd))) + MYSQL_YYABORT; + lex->json_table->m_columns.push_back( + lex->json_table->m_cur_json_table_column, thd->mem_root); + } + | NESTED_SYM PATH_SYM json_text_literal + { + LEX *lex=Lex; + Json_table_nested_path *np= new (thd->mem_root) + Json_table_nested_path(); + np->set_path(thd, $3); + lex->json_table->start_nested_path(np); + } + json_table_columns_clause + { + LEX *lex=Lex; + lex->json_table->end_nested_path(); + } + ; + +json_table_column_type: + FOR_SYM ORDINALITY_SYM + { + Lex_field_type_st type; + type.set_handler_length_flags(&type_handler_slong, 0, 0); + Lex->last_field->set_attributes(thd, type, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + Lex->json_table->m_cur_json_table_column-> + set(Json_table_column::FOR_ORDINALITY); + } + | json_table_field_type PATH_SYM json_text_literal + json_opt_on_empty_or_error + { + Lex->last_field->set_attributes(thd, $1, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + if (Lex->json_table->m_cur_json_table_column-> + set(thd, Json_table_column::PATH, $3, Lex->charset)) + { + MYSQL_YYABORT; + } + } + | json_table_field_type EXISTS PATH_SYM json_text_literal + { + Lex->last_field->set_attributes(thd, $1, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + Lex->json_table->m_cur_json_table_column-> + set(thd, Json_table_column::EXISTS_PATH, $4, Lex->charset); + } + ; + +json_table_field_type: + field_type_numeric + | field_type_temporal + | field_type_string + | field_type_lob + ; + +json_opt_on_empty_or_error: + /* none */ + {} + | json_on_error_response + | json_on_error_response json_on_empty_response + | json_on_empty_response + | json_on_empty_response json_on_error_response + ; + +json_on_response: + ERROR_SYM + { + $$.m_response= Json_table_column::RESPONSE_ERROR; + } + | NULL_SYM + { + $$.m_response= Json_table_column::RESPONSE_NULL; + } + | DEFAULT json_text_literal_or_num + { + $$.m_response= Json_table_column::RESPONSE_DEFAULT; + $$.m_default= $2; + Lex->json_table->m_cur_json_table_column->m_defaults_cs= + thd->variables.collation_connection; + } + ; + +json_on_error_response: + json_on_response ON ERROR_SYM + { + Lex->json_table->m_cur_json_table_column->m_on_error= $1; + } + ; + +json_on_empty_response: + json_on_response ON EMPTY_SYM + { + Lex->json_table->m_cur_json_table_column->m_on_empty= $1; + } + ; + +table_function: + JSON_TABLE_SYM '(' + { + push_table_function_arg_context(Lex, thd->mem_root); + //TODO: introduce IN_TABLE_FUNC_ARGUMENT? + Select->parsing_place= IN_ON; + } + expr ',' + { + Table_function_json_table *jt= + new (thd->mem_root) Table_function_json_table($4); + if (unlikely(!jt)) + MYSQL_YYABORT; + /* See comment for class Table_function_json_table: */ + Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); + Lex->json_table= jt; + + Select->parsing_place= NO_MATTER; + jt->set_name_resolution_context(Lex->pop_context()); + } + json_text_literal json_table_columns_clause ')' opt_table_alias_clause + { + SELECT_LEX *sel= Select; + if (unlikely($10 == NULL)) + { + /* Alias is not optional. */ + my_error(ER_JSON_TABLE_ALIAS_REQUIRED, MYF(0)); + MYSQL_YYABORT; + } + if (unlikely(Lex->json_table->m_nested_path.set_path(thd, $7))) + MYSQL_YYABORT; + if (!($$= sel->add_table_to_list(thd, + new (thd->mem_root) Table_ident(thd, &any_db, + $10, TRUE), + NULL, + TL_OPTION_TABLE_FUNCTION, + YYPS->m_lock_type, + YYPS->m_mdl_type, + 0,0,0))) + MYSQL_YYABORT; + $$->table_function= Lex->json_table; + Lex->json_table= 0; + status_var_increment(thd->status_var.feature_json); + } + ; + /* The ODBC escape syntax for Outer Join is: '{' OJ join_table '}' The parser does not define OJ as a token, any ident is accepted @@ -11728,6 +11993,7 @@ table_factor: $$= $1; } | table_reference_list_parens { $$= $1; } + | table_function { $$= $1; } ; table_primary_ident_opt_parens: @@ -11783,10 +12049,8 @@ table_primary_ident: table_ident opt_use_partition opt_for_system_time_clause opt_table_alias_clause opt_key_definition { - SELECT_LEX *sel= Select; - sel->table_join_options= 0; if (!($$= Select->add_table_to_list(thd, $1, $4, - Select->get_table_join_options(), + 0, YYPS->m_lock_type, YYPS->m_mdl_type, Select->pop_index_hints(), @@ -11804,6 +12068,16 @@ table_primary_derived: if (!($$= Lex->parsed_derived_table($1->master_unit(), $2, $3))) MYSQL_YYABORT; } +%ifdef ORACLE + | subquery + opt_for_system_time_clause + { + LEX_CSTRING alias; + if ($1->make_unique_derived_name(thd, &alias) || + !($$= Lex->parsed_derived_table($1->master_unit(), $2, &alias))) + MYSQL_YYABORT; + } +%endif ; opt_outer: @@ -11876,18 +12150,18 @@ using_list: { if (unlikely(!($$= new (thd->mem_root) List<String>))) MYSQL_YYABORT; - String *s= new (thd->mem_root) String((const char *) $1.str, - $1.length, - system_charset_info); + String *s= new (thd->mem_root) String((const char*) $1.str, + $1.length, + system_charset_info); if (unlikely(unlikely(s == NULL))) MYSQL_YYABORT; $$->push_back(s, thd->mem_root); } | using_list ',' ident { - String *s= new (thd->mem_root) String((const char *) $3.str, - $3.length, - system_charset_info); + String *s= new (thd->mem_root) String((const char*) $3.str, + $3.length, + system_charset_info); if (unlikely(unlikely(s == NULL))) MYSQL_YYABORT; if (unlikely($1->push_back(s, thd->mem_root))) @@ -12258,14 +12532,15 @@ order_list: ; order_dir: - /* empty */ { $$ = 1; } - | ASC { $$ =1; } - | DESC { $$ =0; } + /* empty */ { $$= 1; } + | ASC { $$= 1; } + | DESC { $$= 0; } ; + opt_limit_clause: /* empty */ - { $$.empty(); } + { $$.clear(); } | limit_clause { $$= $1; } ; @@ -12286,19 +12561,85 @@ limit_clause: } | LIMIT ROWS_SYM EXAMINED_SYM limit_rows_option { - $$.select_limit= 0; - $$.offset_limit= 0; - $$.explicit_limit= 0; + $$.clear(); Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT); } + | fetch_first_clause + { + $$= $1; + if (!$$.select_limit || + !$$.select_limit->basic_const_item() || + $$.select_limit->val_int() > 0) + Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT); + } + ; + +fetch_first_clause: + FETCH_SYM first_or_next row_or_rows only_or_with_ties + { + Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); + if (unlikely(one == NULL)) + MYSQL_YYABORT; + $$.select_limit= one; + $$.offset_limit= 0; + $$.explicit_limit= true; + $$.with_ties= $4; + } + | OFFSET_SYM limit_option row_or_rows + FETCH_SYM first_or_next row_or_rows only_or_with_ties + { + Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); + if (unlikely(one == NULL)) + MYSQL_YYABORT; + $$.select_limit= one; + $$.offset_limit= $2; + $$.explicit_limit= true; + $$.with_ties= $7; + } + | FETCH_SYM first_or_next limit_option row_or_rows only_or_with_ties + { + $$.select_limit= $3; + $$.offset_limit= 0; + $$.explicit_limit= true; + $$.with_ties= $5; + } + | OFFSET_SYM limit_option row_or_rows + FETCH_SYM first_or_next limit_option row_or_rows only_or_with_ties + { + $$.select_limit= $6; + $$.offset_limit= $2; + $$.explicit_limit= true; + $$.with_ties= $8; + } + | OFFSET_SYM limit_option row_or_rows + { + $$.select_limit= 0; + $$.offset_limit= $2; + $$.explicit_limit= true; + $$.with_ties= false; + } + ; + +first_or_next: + FIRST_SYM + | NEXT_SYM + ; + +row_or_rows: + ROW_SYM + | ROWS_SYM + ; + +only_or_with_ties: + ONLY_SYM { $$= 0; } + | WITH TIES_SYM { $$= 1; } ; + opt_global_limit_clause: opt_limit_clause { - Select->explicit_limit= $1.explicit_limit; - Select->select_limit= $1.select_limit; - Select->offset_limit= $1.offset_limit; + Select->limit_params= $1; } ; @@ -12306,20 +12647,23 @@ limit_options: limit_option { $$.select_limit= $1; - $$.offset_limit= 0; - $$.explicit_limit= 1; + $$.offset_limit= NULL; + $$.explicit_limit= true; + $$.with_ties= false; } | limit_option ',' limit_option { $$.select_limit= $3; $$.offset_limit= $1; - $$.explicit_limit= 1; + $$.explicit_limit= true; + $$.with_ties= false; } | limit_option OFFSET_SYM limit_option { $$.select_limit= $1; $$.offset_limit= $3; - $$.explicit_limit= 1; + $$.explicit_limit= true; + $$.with_ties= false; } ; @@ -12361,8 +12705,7 @@ limit_option: limit_rows_option: limit_option { - LEX *lex=Lex; - lex->limit_rows_examined= $1; + Lex->limit_rows_examined= $1; } ; @@ -12370,14 +12713,14 @@ delete_limit_clause: /* empty */ { LEX *lex=Lex; - lex->current_select->select_limit= 0; + lex->current_select->limit_params.select_limit= 0; } | LIMIT limit_option { SELECT_LEX *sel= Select; - sel->select_limit= $2; + sel->limit_params.select_limit= $2; Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT); - sel->explicit_limit= 1; + sel->limit_params.explicit_limit= 1; } | LIMIT ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; } | LIMIT limit_option ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; } @@ -12400,7 +12743,7 @@ order_limit_lock: if (!$$) YYABORT; $$->order_list= NULL; - $$->limit.empty(); + $$->limit.clear(); $$->lock= $1; } ; @@ -12443,6 +12786,7 @@ opt_procedure_or_into: } ; + order_or_limit: order_clause opt_limit_clause { @@ -12454,11 +12798,9 @@ order_or_limit: } | limit_clause { - Lex_order_limit_lock *op= $$= new(thd->mem_root) Lex_order_limit_lock; + $$= new(thd->mem_root) Lex_order_limit_lock; if (!$$) YYABORT; - op->order_list= NULL; - op->limit= $1; $$->order_list= NULL; $$->limit= $1; } @@ -12892,7 +13234,7 @@ insert: } insert_start insert_lock_option opt_ignore opt_into insert_table { - Select->set_lock_for_tables($4, true); + Select->set_lock_for_tables($4, true, false); } insert_field_spec opt_insert_update opt_returning stmt_end @@ -12909,7 +13251,7 @@ replace: } insert_start replace_lock_option opt_into insert_table { - Select->set_lock_for_tables($4, true); + Select->set_lock_for_tables($4, true, false); } insert_field_spec opt_returning stmt_end @@ -13166,10 +13508,8 @@ update_table_list: table_ident opt_use_partition for_portion_of_time_clause opt_table_alias_clause opt_key_definition { - SELECT_LEX *sel= Select; - sel->table_join_options= 0; if (!($$= Select->add_table_to_list(thd, $1, $4, - Select->get_table_join_options(), + 0, YYPS->m_lock_type, YYPS->m_mdl_type, Select->pop_index_hints(), @@ -13210,7 +13550,7 @@ update: be too pessimistic. We will decrease lock level if possible in mysql_multi_update(). */ - slex->set_lock_for_tables($3, slex->table_list.elements == 1); + slex->set_lock_for_tables($3, slex->table_list.elements == 1, false); } opt_where_clause opt_order_clause delete_limit_clause { @@ -14019,8 +14359,9 @@ wild_and_where: /* empty */ { $$= 0; } | LIKE remember_tok_start TEXT_STRING_sys { - Lex->wild= new (thd->mem_root) String($3.str, $3.length, - system_charset_info); + Lex->wild= new (thd->mem_root) String((const char*) $3.str, + $3.length, + system_charset_info); if (unlikely(Lex->wild == NULL)) MYSQL_YYABORT; $$= $2; @@ -14158,8 +14499,6 @@ opt_flush_lock: for (; tables; tables= tables->next_global) { tables->mdl_request.set_type(MDL_SHARED_NO_WRITE); - /* Don't try to flush views. */ - tables->required_type= TABLE_TYPE_NORMAL; /* Ignore temporary tables. */ tables->open_type= OT_BASE_ONLY; } @@ -14694,9 +15033,9 @@ text_literal: text_string: TEXT_STRING_literal { - $$= new (thd->mem_root) String($1.str, - $1.length, - thd->variables.collation_connection); + $$= new (thd->mem_root) String((const char*) $1.str, + $1.length, + thd->variables.collation_connection); if (unlikely($$ == NULL)) MYSQL_YYABORT; } @@ -15195,7 +15534,7 @@ table_ident_opt_wild: table_ident_nodb: ident { - LEX_CSTRING db={(char*) any_db,3}; + LEX_CSTRING db= any_db; $$= new (thd->mem_root) Table_ident(thd, &db, &$1, 0); if (unlikely($$ == NULL)) MYSQL_YYABORT; @@ -15371,6 +15710,7 @@ keyword_table_alias: | keyword_verb_clause | FUNCTION_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM ; /* Keyword that we allow for identifiers (except SP labels) */ @@ -15387,6 +15727,7 @@ keyword_ident: | FUNCTION_SYM | WINDOW_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM ; keyword_sysvar_name: @@ -15401,6 +15742,8 @@ keyword_sysvar_name: | FUNCTION_SYM | WINDOW_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM + | OFFSET_SYM ; keyword_set_usual_case: @@ -15415,6 +15758,8 @@ keyword_set_usual_case: | FUNCTION_SYM | WINDOW_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM + | OFFSET_SYM ; non_reserved_keyword_udt: @@ -15425,6 +15770,7 @@ non_reserved_keyword_udt: | keyword_sp_block_section | keyword_sysvar_type | keyword_sp_var_and_label + | OFFSET_SYM ; /* @@ -15618,6 +15964,7 @@ keyword_sp_var_and_label: ACTION | ACCOUNT_SYM | ADDDATE_SYM + | ADD_MONTHS_SYM | ADMIN_SYM | AFTER_SYM | AGAINST @@ -15688,6 +16035,7 @@ keyword_sp_var_and_label: | DYNAMIC_SYM | ELSEIF_ORACLE_SYM | ELSIF_MARIADB_SYM + | EMPTY_SYM | ENDS_SYM | ENGINE_SYM | ENGINES_SYM @@ -15738,6 +16086,7 @@ keyword_sp_var_and_label: | ISSUER_SYM | INSERT_METHOD | INVISIBLE_SYM + | JSON_TABLE_SYM | KEY_BLOCK_SIZE | LAST_VALUE | LAST_SYM @@ -15746,6 +16095,7 @@ keyword_sp_var_and_label: | LESS_SYM | LEVEL_SYM | LIST_SYM + | LOCKED_SYM | LOCKS_SYM | LOGFILE_SYM | LOGS_SYM @@ -15783,6 +16133,9 @@ keyword_sp_var_and_label: | MICROSECOND_SYM | MIGRATE_SYM | MINUTE_SYM +%ifdef MARIADB + | MINUS_ORACLE_SYM +%endif | MINVALUE_SYM | MIN_ROWS | MODIFY_SYM @@ -15793,6 +16146,7 @@ keyword_sp_var_and_label: | MYSQL_SYM | MYSQL_ERRNO_SYM | NAME_SYM + | NESTED_SYM | NEVER_SYM | NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 | NEXTVAL_SYM @@ -15807,11 +16161,11 @@ keyword_sp_var_and_label: | NONE_SYM | NOTFOUND_SYM | OF_SYM - | OFFSET_SYM | OLD_PASSWORD_SYM | ONE_SYM | ONLINE_SYM | ONLY_SYM + | ORDINALITY_SYM | OVERLAPS_SYM | PACKAGE_MARIADB_SYM | PACK_KEYS_SYM @@ -15819,6 +16173,7 @@ keyword_sp_var_and_label: | PARTIAL | PARTITIONING_SYM | PARTITIONS_SYM + | PATH_SYM | PERSISTENT_SYM | PHASE_SYM | PLUGIN_SYM @@ -15865,6 +16220,9 @@ keyword_sp_var_and_label: | ROWTYPE_MARIADB_SYM | ROW_COUNT_SYM | ROW_FORMAT_SYM +%ifdef MARIADB + | ROWNUM_SYM +%endif | RTREE_SYM | SCHEDULE_SYM | SCHEMA_NAME_SYM @@ -15874,6 +16232,7 @@ keyword_sp_var_and_label: | SETVAL_SYM | SIMPLE_SYM | SHARE_SYM + | SKIP_SYM | SLAVE_POS_SYM | SLOW | SNAPSHOT_SYM @@ -15899,6 +16258,9 @@ keyword_sp_var_and_label: | SUSPEND_SYM | SWAPS_SYM | SWITCHES_SYM +%ifdef MARIADB + | SYSDATE +%endif | SYSTEM | SYSTEM_TIME_SYM | TABLE_NAME_SYM @@ -16031,6 +16393,7 @@ reserved_keyword_udt_not_param_type: | IF_SYM | IGNORE_DOMAIN_IDS_SYM | IGNORE_SYM + | IGNORED_SYM | INDEX_SYM | INFILE | INNER_SYM @@ -16065,6 +16428,9 @@ reserved_keyword_udt_not_param_type: | MINUTE_MICROSECOND_SYM | MINUTE_SECOND_SYM | MIN_SYM +%ifdef ORACLE + | MINUS_ORACLE_SYM +%endif | MODIFIES_SYM | MOD_SYM | NATURAL @@ -16147,7 +16513,6 @@ reserved_keyword_udt_not_param_type: | STRAIGHT_JOIN | SUBSTRING | SUM_SYM - | SYSDATE | TABLE_REF_PRIORITY | TABLE_SYM | TERMINATED @@ -16511,7 +16876,7 @@ option_value_no_option_type: if (unlikely(!my_charset_same(cs2, cs3))) { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), - cs3->name, cs2->csname); + cs3->coll_name.str, cs2->cs_name.str); MYSQL_YYABORT; } set_var_collation_client *var; @@ -16754,7 +17119,7 @@ table_lock: table_ident opt_table_alias_clause lock_option { thr_lock_type lock_type= (thr_lock_type) $3; - bool lock_for_write= (lock_type >= TL_WRITE_ALLOW_WRITE); + bool lock_for_write= (lock_type >= TL_FIRST_WRITE); ulong table_options= lock_for_write ? TL_OPTION_UPDATING : 0; enum_mdl_type mdl_type= !lock_for_write ? MDL_SHARED_READ @@ -16832,6 +17197,7 @@ handler_tail: | table_ident_nodb READ_SYM { LEX *lex=Lex; + SELECT_LEX *select= Select; if (unlikely(lex->sphead)) my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER")); lex->clause_that_disallows_subselect= "HANDLER..READ"; @@ -16840,8 +17206,8 @@ handler_tail: Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); if (unlikely(one == NULL)) MYSQL_YYABORT; - lex->current_select->select_limit= one; - lex->current_select->offset_limit= 0; + select->limit_params.select_limit= one; + select->limit_params.offset_limit= 0; lex->limit_rows_examined= 0; if (!lex->current_select->add_table_to_list(thd, $1, 0, 0)) MYSQL_YYABORT; @@ -16849,14 +17215,15 @@ handler_tail: handler_read_or_scan opt_where_clause opt_global_limit_clause { LEX *lex=Lex; + SELECT_LEX *select= Select; lex->clause_that_disallows_subselect= NULL; - if (!lex->current_select->explicit_limit) + if (!lex->current_select->limit_params.explicit_limit) { Item *one= new (thd->mem_root) Item_int(thd, (int32) 1); if (one == NULL) MYSQL_YYABORT; - lex->current_select->select_limit= one; - lex->current_select->offset_limit= 0; + select->limit_params.select_limit= one; + select->limit_params.offset_limit= 0; lex->limit_rows_examined= 0; } /* Stored functions are not supported for HANDLER READ. */ @@ -17925,6 +18292,7 @@ keyword_label: | keyword_sysvar_type | FUNCTION_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM ; keyword_sp_decl: @@ -17939,6 +18307,7 @@ keyword_sp_decl: | keyword_verb_clause | FUNCTION_SYM | WINDOW_SYM + | IGNORED_SYM ; opt_truncate_table_storage_clause: @@ -18357,6 +18726,7 @@ keyword_label: | FUNCTION_SYM | COMPRESSED_SYM | EXCEPTION_ORACLE_SYM + | IGNORED_SYM ; keyword_sp_decl: @@ -18367,6 +18737,7 @@ keyword_sp_decl: | keyword_sysvar_type | keyword_verb_clause | WINDOW_SYM + | IGNORED_SYM ; opt_truncate_table_storage_clause: diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 660ba248597..416aea589dd 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -365,7 +365,7 @@ const char *set_to_string(THD *thd, LEX_CSTRING *result, ulonglong set, for (uint i= 0; set; i++, set >>= 1) if (set & 1) { - tmp.append(lib[i]); + tmp.append(lib[i], strlen(lib[i])); tmp.append(','); } @@ -396,8 +396,11 @@ const char *flagset_to_string(THD *thd, LEX_CSTRING *result, ulonglong set, // note that the last element is always "default", and it's ignored below for (uint i= 0; lib[i+1]; i++, set >>= 1) { - tmp.append(lib[i]); - tmp.append(set & 1 ? "=on," : "=off,"); + tmp.append(lib[i], strlen(lib[i])); + if (set & 1) + tmp.append(STRING_WITH_LEN("=on,")); + else + tmp.append(STRING_WITH_LEN("=off,")); } result->str= thd->strmake(tmp.ptr(), tmp.length()-1); diff --git a/sql/structs.h b/sql/structs.h index 17c6ea9d2c3..b49b5ffcdfd 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -36,6 +36,9 @@ class Index_statistics; class THD; +/* Array index type for table.field[] */ +typedef uint16 field_index_t; + typedef struct st_date_time_format { uchar positions[8]; char time_separator; /* Separator between hour and minute */ @@ -82,10 +85,10 @@ typedef struct st_key_part_info { /* Info about a key part */ */ uint store_length; uint16 key_type; - uint16 fieldnr; /* Fieldnr begins counting from 1 */ + field_index_t fieldnr; /* Fieldnr begins counting from 1 */ uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */ uint8 type; - uint8 null_bit; /* Position to null_bit */ + uint8 null_bit; /* Position to null_bit */ } KEY_PART_INFO ; class engine_option_value; @@ -165,6 +168,10 @@ typedef struct st_key { double actual_rec_per_key(uint i); bool without_overlaps; + /* + TRUE if index needs to be ignored + */ + bool is_ignored; } KEY; @@ -173,6 +180,7 @@ struct st_join_table; typedef struct st_reginfo { /* Extra info about reg */ struct st_join_table *join_tab; /* Used by SELECT() */ enum thr_lock_type lock_type; /* How database is used */ + bool skip_locked; bool not_exists_optimize; /* TRUE <=> range optimizer found that there is no rows satisfying @@ -529,7 +537,8 @@ public: OPT_OR_REPLACE= 16, // CREATE OR REPLACE TABLE OPT_OR_REPLACE_SLAVE_GENERATED= 32,// REPLACE was added on slave, it was // not in the original query on master. - OPT_IF_EXISTS= 64 + OPT_IF_EXISTS= 64, + OPT_CREATE_SELECT= 128 // CREATE ... SELECT }; private: @@ -557,6 +566,8 @@ public: { return m_options & OPT_OR_REPLACE_SLAVE_GENERATED; } bool like() const { return m_options & OPT_LIKE; } bool if_exists() const { return m_options & OPT_IF_EXISTS; } + bool is_create_select() const { return m_options & OPT_CREATE_SELECT; } + void add(const DDL_options_st::Options other) { m_options= (Options) ((uint) m_options | (uint) other); @@ -803,13 +814,14 @@ public: uint defined_lock:1; uint update_lock:1; uint defined_timeout:1; + uint skip_locked:1; }; ulong timeout; void empty() { - defined_lock= update_lock= defined_timeout= FALSE; + defined_lock= update_lock= defined_timeout= skip_locked= FALSE; timeout= 0; } void set_to(st_select_lex *sel); @@ -818,13 +830,17 @@ public: class Lex_select_limit { public: + /* explicit LIMIT clause was used */ bool explicit_limit; + bool with_ties; Item *select_limit, *offset_limit; - void empty() + void clear() { - explicit_limit= FALSE; - select_limit= offset_limit= NULL; + explicit_limit= FALSE; // No explicit limit given by user + with_ties= FALSE; // No use of WITH TIES operator + select_limit= NULL; // denotes the default limit = HA_POS_ERROR + offset_limit= NULL; // denotes the default offset = 0 } }; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 02e6f612f21..f844c8c6912 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -759,9 +759,10 @@ static bool check_charset(sys_var *self, THD *thd, set_var *var) else { ErrConvString err(res); /* Get utf8 '\0' terminated string */ + myf utf8_flag= thd->get_utf8_flag(); if (!(var->save_result.ptr= get_charset_by_csname(err.ptr(), - MY_CS_PRIMARY, - MYF(0))) && + MY_CS_PRIMARY, + MYF(utf8_flag))) && !(var->save_result.ptr= get_old_charset_by_name(err.ptr()))) { my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), err.ptr()); @@ -788,12 +789,12 @@ static Sys_var_struct Sys_character_set_system( "character_set_system", "The character set used by the server " "for storing identifiers", READ_ONLY GLOBAL_VAR(system_charset_info), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(0)); + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(0)); static Sys_var_struct Sys_character_set_server( "character_set_server", "The default character set", SESSION_VAR(collation_server), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_charset_not_null)); static bool check_charset_db(sys_var *self, THD *thd, set_var *var) @@ -808,7 +809,7 @@ static Sys_var_struct Sys_character_set_database( "character_set_database", "The character set used by the default database", SESSION_VAR(collation_database), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_charset_db)); static bool check_cs_client(sys_var *self, THD *thd, set_var *var) @@ -832,7 +833,7 @@ static Sys_var_struct Sys_character_set_client( "character_set_client", "The character set for statements " "that arrive from the client", NO_SET_STMT SESSION_VAR(character_set_client), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_cs_client), ON_UPDATE(fix_thd_charset)); // for check changing @@ -843,7 +844,7 @@ static Sys_var_struct Sys_character_set_connection( "literals that do not have a character set introducer and for " "number-to-string conversion", NO_SET_STMT SESSION_VAR(collation_connection), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_charset_not_null), ON_UPDATE(fix_thd_charset)); // for check changing @@ -853,7 +854,7 @@ static Sys_var_struct Sys_character_set_results( "character_set_results", "The character set used for returning " "query results to the client", SESSION_VAR(character_set_results), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_charset)); // for check changing export sys_var *Sys_character_set_results_ptr= &Sys_character_set_results; @@ -861,7 +862,7 @@ export sys_var *Sys_character_set_results_ptr= &Sys_character_set_results; static Sys_var_struct Sys_character_set_filesystem( "character_set_filesystem", "The filesystem character set", NO_SET_STMT SESSION_VAR(character_set_filesystem), NO_CMD_LINE, - offsetof(CHARSET_INFO, csname), DEFAULT(&character_set_filesystem), + offsetof(CHARSET_INFO, cs_name.str), DEFAULT(&character_set_filesystem), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_charset_not_null), ON_UPDATE(fix_thd_charset)); @@ -875,7 +876,7 @@ static bool check_collation_not_null(sys_var *self, THD *thd, set_var *var) { if (!var->value) return false; - + myf utf8_flag= thd->get_utf8_flag(); char buff[STRING_BUFFER_USUAL_SIZE]; if (var->value->result_type() == STRING_RESULT) { @@ -885,7 +886,7 @@ static bool check_collation_not_null(sys_var *self, THD *thd, set_var *var) else { ErrConvString err(res); /* Get utf8 '\0'-terminated string */ - if (!(var->save_result.ptr= get_charset_by_name(err.ptr(), MYF(0)))) + if (!(var->save_result.ptr= get_charset_by_name(err.ptr(), MYF(utf8_flag)))) { my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr()); return true; @@ -907,7 +908,7 @@ static Sys_var_struct Sys_collation_connection( "collation_connection", "The collation of the connection " "character set", NO_SET_STMT SESSION_VAR(collation_connection), NO_CMD_LINE, - offsetof(CHARSET_INFO, name), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, coll_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_collation_not_null), ON_UPDATE(fix_thd_charset)); @@ -923,13 +924,13 @@ static Sys_var_struct Sys_collation_database( "collation_database", "The collation of the database " "character set", SESSION_VAR(collation_database), NO_CMD_LINE, - offsetof(CHARSET_INFO, name), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, coll_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_collation_db)); static Sys_var_struct Sys_collation_server( "collation_server", "The server default collation", SESSION_VAR(collation_server), NO_CMD_LINE, - offsetof(CHARSET_INFO, name), DEFAULT(&default_charset_info), + offsetof(CHARSET_INFO, coll_name.str), DEFAULT(&default_charset_info), NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_collation_not_null)); static Sys_var_uint Sys_column_compression_threshold( @@ -1156,14 +1157,46 @@ static Sys_var_enum Sys_event_scheduler( ON_CHECK(event_scheduler_check), ON_UPDATE(event_scheduler_update)); #endif -static Sys_var_on_access_global<Sys_var_ulong, +static bool copy_to_expire_logs_days(sys_var *, THD *, + enum_var_type type) +{ + expire_logs_days= binlog_expire_logs_seconds / (double)(24 * 60 * 60); + return false; +} + +static bool copy_to_binlog_expire_logs_seconds(sys_var *, THD *, + enum_var_type type) +{ + binlog_expire_logs_seconds= (ulong)(expire_logs_days * 24 * 60 * 60); + return false; +} + +static Sys_var_on_access_global<Sys_var_double, PRIV_SET_SYSTEM_GLOBAL_VAR_EXPIRE_LOGS_DAYS> Sys_expire_logs_days( "expire_logs_days", "If non-zero, binary logs will be purged after expire_logs_days " - "days; possible purges happen at startup and at binary log rotation", + "days; It and binlog_expire_logs_seconds are linked, such that " + "changes in one are converted into the other, presentable as a " + "decimal value with 1/1000000 of the day precision; possible " + "purges happen at startup and at binary log rotation", GLOBAL_VAR(expire_logs_days), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 99), DEFAULT(0), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG, OPT_EXPIRE_LOGS_DAYS), VALID_RANGE(0, 99), + DEFAULT(0), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(copy_to_binlog_expire_logs_seconds)); + +static Sys_var_on_access_global<Sys_var_ulong, + PRIV_SET_SYSTEM_GLOBAL_VAR_EXPIRE_LOGS_DAYS> +Sys_binlog_expire_logs_seconds( + "binlog_expire_logs_seconds", + "If non-zero, binary logs will be purged after " + "binlog_expire_logs_seconds seconds; It and expire_logs_days are " + "linked, such that changes in one are converted into the other. " + "Possible purges happen at startup and at binary log rotation.", + GLOBAL_VAR(binlog_expire_logs_seconds), + CMD_LINE(REQUIRED_ARG, OPT_BINLOG_EXPIRE_LOGS_SECONDS), + VALID_RANGE(0, 8553600), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD, + NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(copy_to_expire_logs_days)); static Sys_var_mybool Sys_flush( "flush", "Flush MyISAM tables to disk between SQL commands", @@ -1998,7 +2031,10 @@ Sys_gtid_strict_mode( "gtid_strict_mode", "Enforce strict seq_no ordering of events in the binary log. Slave " "stops with an error if it encounters an event that would cause it to " - "generate an out-of-order binlog if executed.", + "generate an out-of-order binlog if executed. " + "When ON the same server-id semisync-replicated transactions that " + "duplicate exising ones in binlog are ignored without error " + "and slave interruption.", GLOBAL_VAR(opt_gtid_strict_mode), CMD_LINE(OPT_ARG), DEFAULT(FALSE)); @@ -2464,7 +2500,7 @@ static Sys_var_ulong Sys_max_recursive_iterations( "max_recursive_iterations", "Maximum number of iterations when executing recursive queries", SESSION_VAR(max_recursive_iterations), CMD_LINE(OPT_ARG), - VALID_RANGE(0, UINT_MAX), DEFAULT(UINT_MAX), BLOCK_SIZE(1)); + VALID_RANGE(0, UINT_MAX), DEFAULT(1000), BLOCK_SIZE(1)); static Sys_var_ulong Sys_max_sort_length( "max_sort_length", @@ -3005,7 +3041,7 @@ static Sys_var_ulonglong Sys_thread_stack( static Sys_var_charptr_fscs Sys_tmpdir( "tmpdir", "Path for temporary files. Several paths may " "be specified, separated by a " -#if defined(__WIN__) +#if defined(_WIN32) "semicolon (;)" #else "colon (:)" @@ -3716,6 +3752,7 @@ static const char *old_mode_names[]= "NO_DUP_KEY_WARNINGS_WITH_IGNORE", "NO_PROGRESS_INFO", "ZERO_DATE_TIME_CAST", + "UTF8_IS_UTF8MB3", 0 }; @@ -3727,7 +3764,7 @@ static Sys_var_set Sys_old_behavior( "old_mode", "Used to emulate old behavior from earlier MariaDB or MySQL versions", SESSION_VAR(old_behavior), CMD_LINE(REQUIRED_ARG), - old_mode_names, DEFAULT(0)); + old_mode_names, DEFAULT(OLD_MODE_UTF8_IS_UTF8MB3)); #if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) #define SSL_OPT(X) CMD_LINE(REQUIRED_ARG,X) @@ -5968,6 +6005,25 @@ static Sys_var_uint Sys_wsrep_sync_wait( NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(wsrep_sync_wait_update)); +static const char *wsrep_mode_names[]= +{ + "STRICT_REPLICATION", + "BINLOG_ROW_FORMAT_ONLY", + "REQUIRED_PRIMARY_KEY", + "REPLICATE_MYISAM", + "REPLICATE_ARIA", + "DISALLOW_LOCAL_GTID", + NullS +}; +static Sys_var_set Sys_wsrep_mode( + "wsrep_mode", + "Set of WSREP features that are enabled.", + GLOBAL_VAR(wsrep_mode), CMD_LINE(REQUIRED_ARG), + wsrep_mode_names, + DEFAULT(0), + NO_MUTEX_GUARD, NOT_IN_BINLOG, + ON_CHECK(wsrep_mode_check)); + static const char *wsrep_OSU_method_names[]= { "TOI", "RSU", NullS }; static Sys_var_enum Sys_wsrep_OSU_method( "wsrep_OSU_method", "Method for Online Schema Upgrade", @@ -5984,12 +6040,14 @@ static Sys_var_mybool Sys_wsrep_desync ( ON_UPDATE(wsrep_desync_update)); static Sys_var_mybool Sys_wsrep_strict_ddl ( - "wsrep_strict_ddl", "If set, reject DDL on affected tables not supporting Galera replication", + "wsrep_strict_ddl", + "If set, reject DDL on affected tables not supporting Galera replication", GLOBAL_VAR(wsrep_strict_ddl), CMD_LINE(OPT_ARG), DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(0)); + ON_UPDATE(wsrep_strict_ddl_update), + DEPRECATED("'@@wsrep_mode=STRICT_REPLICATION'")); // since 10.6.0 static const char *wsrep_reject_queries_names[]= { "NONE", "ALL", "ALL_KILL", NullS }; static Sys_var_enum Sys_wsrep_reject_queries( @@ -6013,7 +6071,10 @@ static Sys_var_mybool Sys_wsrep_recover_datadir( static Sys_var_mybool Sys_wsrep_replicate_myisam( "wsrep_replicate_myisam", "To enable myisam replication", - GLOBAL_VAR(wsrep_replicate_myisam), CMD_LINE(OPT_ARG), DEFAULT(FALSE)); + GLOBAL_VAR(wsrep_replicate_myisam), CMD_LINE(OPT_ARG), DEFAULT(FALSE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(wsrep_replicate_myisam_update), + DEPRECATED("'@@wsrep_mode=REPLICATE_MYISAM'")); // since 10.6.0 static Sys_var_mybool Sys_wsrep_log_conflicts( "wsrep_log_conflicts", "To log multi-master conflicts", diff --git a/sql/table.cc b/sql/table.cc index 7334f0143e6..345b51646d8 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -50,6 +50,17 @@ #define MYSQL57_GENERATED_FIELD 128 #define MYSQL57_GCOL_HEADER_SIZE 4 +bool TABLE::init_expr_arena(MEM_ROOT *mem_root) +{ + /* + We need to use CONVENTIONAL_EXECUTION here to ensure that + any new items created by fix_fields() are not reverted. + */ + expr_arena= new (alloc_root(mem_root, sizeof(Query_arena))) + Query_arena(mem_root, Query_arena::STMT_CONVENTIONAL_EXECUTION); + return expr_arena == NULL; +} + struct extra2_fields { LEX_CUSTRING version; @@ -61,6 +72,7 @@ struct extra2_fields LEX_CUSTRING application_period; LEX_CUSTRING field_data_type_info; LEX_CUSTRING without_overlaps; + LEX_CUSTRING index_flags; void reset() { bzero((void*)this, sizeof(*this)); } }; @@ -101,7 +113,8 @@ static bool fix_type_pointers(const char ***typelib_value_names, TYPELIB *point_to_type, uint types, char *names, size_t names_length); -static uint find_field(Field **fields, uchar *record, uint start, uint length); +static field_index_t find_field(Field **fields, uchar *record, uint start, + uint length); inline bool is_system_table_name(const char *name, size_t length); @@ -194,14 +207,14 @@ View_creation_ctx * View_creation_ctx::create(THD *thd, /* Resolve cs names. Throw a warning if there is unknown cs name. */ bool invalid_creation_ctx; - + myf utf8_flag= thd->get_utf8_flag(); invalid_creation_ctx= resolve_charset(view->view_client_cs_name.str, system_charset_info, - &ctx->m_client_cs); + &ctx->m_client_cs, MYF(utf8_flag)); invalid_creation_ctx= resolve_collation(view->view_connection_cl_name.str, system_charset_info, - &ctx->m_connection_cl) || + &ctx->m_connection_cl, MYF(utf8_flag)) || invalid_creation_ctx; if (invalid_creation_ctx) @@ -1168,14 +1181,8 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table, table->s->table_check_constraints * sizeof(Virtual_column_info*)); DBUG_ASSERT(table->expr_arena == NULL); - /* - We need to use CONVENTIONAL_EXECUTION here to ensure that - any new items created by fix_fields() are not reverted. - */ - table->expr_arena= new (alloc_root(mem_root, sizeof(Query_arena))) - Query_arena(mem_root, - Query_arena::STMT_CONVENTIONAL_EXECUTION); - if (!table->expr_arena) + + if (table->init_expr_arena(mem_root)) DBUG_RETURN(1); thd->set_n_backup_active_arena(table->expr_arena, &backup_arena); @@ -1440,6 +1447,35 @@ void TABLE_SHARE::set_overlapped_keys() } +/* + @brief + Set of indexes that are marked as IGNORE. +*/ + +void TABLE_SHARE::set_ignored_indexes() +{ + KEY *keyinfo= key_info; + for (uint i= 0; i < keys; i++, keyinfo++) + { + if (keyinfo->is_ignored) + ignored_indexes.set_bit(i); + } +} + + +/* + @brief + Set of indexes that the optimizer may use when creating an execution plan. +*/ + +key_map TABLE_SHARE::usable_indexes(THD *thd) +{ + key_map usable_indexes(keys_in_use); + usable_indexes.subtract(ignored_indexes); + return usable_indexes; +} + + bool Item_field::check_index_dependence(void *arg) { TABLE *table= (TABLE *)arg; @@ -1592,6 +1628,9 @@ bool read_extra2(const uchar *frm_image, size_t len, extra2_fields *fields) case EXTRA2_FIELD_DATA_TYPE_INFO: fail= read_extra2_section_once(extra2, length, &fields->field_data_type_info); break; + case EXTRA2_INDEX_FLAGS: + fail= read_extra2_section_once(extra2, length, &fields->index_flags); + break; default: /* abort frm parsing if it's an unknown but important extra2 value */ if (type >= EXTRA2_ENGINE_IMPORTANT) @@ -1751,6 +1790,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, Virtual_column_info **table_check_constraints; bool *interval_unescaped= NULL; extra2_fields extra2; + bool extra_index_flags_present= FALSE; DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image"); keyinfo= &first_keyinfo; @@ -1905,9 +1945,13 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, share->key_parts= key_parts= disk_buff[1]; } share->keys_for_keyread.init(0); + share->ignored_indexes.init(0); share->keys_in_use.init(keys); ext_key_parts= key_parts; + if (extra2.index_flags.str && extra2.index_flags.length != keys) + goto err; + len= (uint) uint2korr(disk_buff+4); share->reclength = uint2korr(frm_image+16); @@ -2103,9 +2147,26 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } share->key_block_size= uint2korr(frm_image+62); keyinfo= share->key_info; + + + if (extra2.index_flags.str) + extra_index_flags_present= TRUE; + for (uint i= 0; i < share->keys; i++, keyinfo++) + { + if (extra_index_flags_present) + { + uchar flags= *extra2.index_flags.str++; + keyinfo->is_ignored= (flags & EXTRA2_IGNORED_KEY); + } + else + keyinfo->is_ignored= FALSE; + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) hash_fields++; + } + + share->set_ignored_indexes(); #ifdef WITH_PARTITION_STORAGE_ENGINE if (par_image && plugin_data(se_plugin, handlerton*) == partition_hton) @@ -2532,8 +2593,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (!f_is_blob(attr.pack_flag)) { // 3.23 or 4.0 string - if (!(attr.charset= get_charset_by_csname(share->table_charset->csname, - MY_CS_BINSORT, MYF(0)))) + myf utf8_flag= thd->get_utf8_flag(); + if (!(attr.charset= get_charset_by_csname(share->table_charset-> + cs_name.str, + MY_CS_BINSORT, + MYF(utf8_flag)))) attr.charset= &my_charset_bin; } } @@ -2766,8 +2830,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, uint add_first_key_parts= 0; longlong ha_option= handler_file->ha_table_flags(); keyinfo= share->key_info; - uint primary_key= my_strcasecmp(system_charset_info, share->keynames.type_names[0], - primary_key_name) ? MAX_KEY : 0; + uint primary_key= my_strcasecmp(system_charset_info, + share->keynames.type_names[0], + primary_key_name.str) ? MAX_KEY : 0; KEY* key_first_info= NULL; if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME && @@ -2805,6 +2870,33 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } } + /* + Make sure that the primary key is not marked as IGNORE + This can happen in the case + 1) when IGNORE is mentioned in the Key specification + 2) When a unique NON-NULLABLE key is promted to a primary key. + The unqiue key could have been marked as IGNORE when there + was a primary key in the table. + + Eg: + CREATE TABLE t1(a INT NOT NULL, primary key(a), UNIQUE key1(a)) + so for this table when we try to IGNORE key1 + then we run: + ALTER TABLE t1 ALTER INDEX key1 IGNORE + this runs successsfully and key1 is marked as IGNORE. + + But lets say then we drop the primary key + ALTER TABLE t1 DROP PRIMARY + then the UNIQUE key will be promoted to become the primary key + but then the UNIQUE key cannot be marked as IGNORE, so an + error is thrown + */ + if (primary_key != MAX_KEY && keyinfo && keyinfo->is_ignored) + { + my_error(ER_PK_INDEX_CANT_BE_IGNORED, MYF(0)); + goto err; + } + if (share->use_ext_keys) { if (primary_key >= MAX_KEY) @@ -2942,10 +3034,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, { Field *field; if (new_field_pack_flag <= 1) - key_part->fieldnr= (uint16) find_field(share->field, - share->default_values, - (uint) key_part->offset, - (uint) key_part->length); + key_part->fieldnr= find_field(share->field, + share->default_values, + (uint) key_part->offset, + (uint) key_part->length); if (!key_part->fieldnr) goto err; @@ -3546,7 +3638,7 @@ bool Virtual_column_info::fix_session_expr(THD *thd) if (!need_refix()) return false; - DBUG_ASSERT(!expr->is_fixed()); + DBUG_ASSERT(!expr->fixed()); return fix_expr(thd); } @@ -3554,9 +3646,7 @@ bool Virtual_column_info::fix_session_expr(THD *thd) bool Virtual_column_info::cleanup_session_expr() { DBUG_ASSERT(need_refix()); - if (expr->walk(&Item::cleanup_excluding_fields_processor, 0, 0)) - return true; - return false; + return expr->walk(&Item::cleanup_excluding_fields_processor, 0, 0); } @@ -3621,7 +3711,7 @@ bool TABLE::vcol_fix_expr(THD *thd) return false; if (!thd->stmt_arena->is_conventional() && - vcol_refix_list.head()->expr->is_fixed()) + vcol_refix_list.head()->expr->fixed()) { /* NOTE: Under trigger we already have fixed expressions */ return false; @@ -3688,7 +3778,7 @@ bool Virtual_column_info::fix_and_check_expr(THD *thd, TABLE *table) DBUG_ASSERT(expr); /* NOTE: constants are fixed when constructed */ - if (expr->is_fixed()) + if (expr->fixed()) DBUG_RETURN(0); // nothing to do if (fix_expr(thd)) @@ -4067,6 +4157,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, } outparam->reginfo.lock_type= TL_UNLOCK; + outparam->reginfo.skip_locked= false; outparam->current_lock= F_UNLCK; records=0; if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN)) @@ -4665,10 +4756,11 @@ fix_type_pointers(const char ***typelib_value_names, # field number +1 */ -static uint find_field(Field **fields, uchar *record, uint start, uint length) +static field_index_t find_field(Field **fields, uchar *record, uint start, + uint length) { Field **field; - uint i, pos; + field_index_t i, pos; pos= 0; for (field= fields, i=1 ; *field ; i++,field++) @@ -4876,7 +4968,7 @@ rename_file_ext(const char * from,const char * to,const char * ext) bool get_field(MEM_ROOT *mem, Field *field, String *res) { - char *to; + const char *to; StringBuffer<MAX_FIELD_WIDTH> str; bool rc; THD *thd= field->get_thd(); @@ -4989,7 +5081,7 @@ bool check_db_name(LEX_STRING *org_name) if (!name_length || name_length > NAME_LEN) return 1; - if (lower_case_table_names == 1 && name != any_db) + if (lower_case_table_names == 1 && name != any_db.str) { org_name->length= name_length= my_casedn_str(files_charset_info, name); if (check_for_path_chars) @@ -5240,7 +5332,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) error= TRUE; } else if (field_def->cset.str && - strcmp(field->charset()->csname, field_def->cset.str)) + strcmp(field->charset()->cs_name.str, field_def->cset.str)) { report_error(0, "Incorrect definition of table %s.%s: " "expected the type of column '%s' at position %d " @@ -5248,7 +5340,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) "character set '%s'.", table->s->db.str, table->alias.c_ptr(), field_def->name.str, i, field_def->cset.str, - field->charset()->csname); + field->charset()->cs_name.str); error= TRUE; } } @@ -5552,6 +5644,7 @@ void TABLE::init(THD *thd, TABLE_LIST *tl) reginfo.impossible_range= 0; reginfo.join_tab= NULL; reginfo.not_exists_optimize= FALSE; + reginfo.skip_locked= false; created= TRUE; cond_selectivity= 1.0; cond_selectivity_sampling_explain= NULL; @@ -5845,7 +5938,7 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds, if (where) { - if (where->is_fixed()) + if (where->fixed()) where->update_used_tables(); else if (where->fix_fields(thd, &where)) DBUG_RETURN(TRUE); @@ -6195,6 +6288,8 @@ int TABLE::verify_constraints(bool ignore_failure) { if (versioned() && !vers_end_field()->is_max()) return VIEW_CHECK_OK; + + StringBuffer<MAX_FIELD_WIDTH> field_error(system_charset_info); for (Virtual_column_info **chk= check_constraints ; *chk ; chk++) { /* @@ -6204,16 +6299,19 @@ int TABLE::verify_constraints(bool ignore_failure) if (((*chk)->expr->val_int() == 0 && !(*chk)->expr->null_value) || in_use->is_error()) { - StringBuffer<MAX_FIELD_WIDTH> field_error(system_charset_info); enum_vcol_info_type vcol_type= (*chk)->get_vcol_type(); DBUG_ASSERT(vcol_type == VCOL_CHECK_TABLE || vcol_type == VCOL_CHECK_FIELD); + + field_error.set_buffer_if_not_allocated(system_charset_info); + field_error.length(0); + if (vcol_type == VCOL_CHECK_FIELD) { - field_error.append(s->table_name.str); - field_error.append("."); + field_error.append(s->table_name); + field_error.append('.'); } - field_error.append((*chk)->name.str); + field_error.append((*chk)->name); my_error(ER_CONSTRAINT_FAILED, MYF(ignore_failure ? ME_WARNING : 0), field_error.c_ptr(), s->db.str, s->table_name.str); @@ -6791,8 +6889,8 @@ const char *Natural_join_column::safe_db_name() ensure consistency. An exception are I_S schema tables, which are inconsistent in this respect. */ - DBUG_ASSERT(!cmp(&table_ref->db, - &table_ref->table->s->db) || + DBUG_ASSERT(!cmp(&table_ref->db, &table_ref->table->s->db) || + table_ref->table_function || (table_ref->schema_table && is_infoschema_db(&table_ref->table->s->db)) || table_ref->is_materialized_derived()); @@ -6874,13 +6972,13 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref, ('mysql_schema_table' function). So we can return directly the field. This case happens only for 'show & where' commands. */ - DBUG_ASSERT(field && field->is_fixed()); + DBUG_ASSERT(field && field->fixed()); DBUG_RETURN(field); } DBUG_ASSERT(field); thd->lex->current_select->no_wrap_view_item= TRUE; - if (!field->is_fixed()) + if (!field->fixed()) { if (field->fix_fields(thd, field_ref)) { @@ -6907,7 +7005,7 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref, views/derived tables. */ if (view->table && view->table->maybe_null) - item->maybe_null= TRUE; + item->set_maybe_null(); /* Save item in case we will need to fall back to materialization. */ view->used_items.push_front(item, thd->mem_root); /* @@ -7026,7 +7124,7 @@ const char *Field_iterator_table_ref::get_table_name() DBUG_ASSERT(!strcmp(table_ref->table_name.str, table_ref->table->s->table_name.str) || - table_ref->schema_table); + table_ref->schema_table || table_ref->table_function); return table_ref->table_name.str; } @@ -7045,7 +7143,8 @@ const char *Field_iterator_table_ref::get_db_name() */ DBUG_ASSERT(!cmp(&table_ref->db, &table_ref->table->s->db) || (table_ref->schema_table && - is_infoschema_db(&table_ref->table->s->db))); + is_infoschema_db(&table_ref->table->s->db)) || + table_ref->table_function); return table_ref->db.str; } @@ -8251,7 +8350,8 @@ bool TABLE::is_filled_at_execution() */ return MY_TEST(!pos_in_table_list || pos_in_table_list->jtbm_subselect || - pos_in_table_list->is_active_sjm()); + pos_in_table_list->is_active_sjm() || + pos_in_table_list->table_function); } @@ -8403,7 +8503,7 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl) { /* initialize the result variables */ tbl->keys_in_use_for_query= tbl->keys_in_use_for_group_by= - tbl->keys_in_use_for_order_by= tbl->s->keys_in_use; + tbl->keys_in_use_for_order_by= tbl->s->usable_indexes(tbl->in_use); /* index hint list processing */ if (index_hints) @@ -8457,7 +8557,8 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl) */ if (tbl->s->keynames.type_names == 0 || (pos= find_type(&tbl->s->keynames, hint->key_name.str, - hint->key_name.length, 1)) <= 0) + hint->key_name.length, 1)) <= 0 || + (tbl->s->key_info[pos - 1].is_ignored)) { my_error(ER_KEY_DOES_NOT_EXISTS, MYF(0), hint->key_name.str, alias.str); return 1; @@ -8566,7 +8667,7 @@ void init_mdl_requests(TABLE_LIST *table_list) for ( ; table_list ; table_list= table_list->next_global) MDL_REQUEST_INIT(&table_list->mdl_request, MDL_key::TABLE, table_list->db.str, table_list->table_name.str, - table_list->lock_type >= TL_WRITE_ALLOW_WRITE + table_list->lock_type >= TL_FIRST_WRITE ? MDL_SHARED_WRITE : MDL_SHARED_READ, MDL_TRANSACTION); } @@ -8625,25 +8726,6 @@ bool is_simple_order(ORDER *order) return TRUE; } -class Turn_errors_to_warnings_handler : public Internal_error_handler -{ -public: - Turn_errors_to_warnings_handler() {} - bool handle_condition(THD *thd, - uint sql_errno, - const char* sqlstate, - Sql_condition::enum_warning_level *level, - const char* msg, - Sql_condition ** cond_hdl) - { - *cond_hdl= NULL; - if (*level == Sql_condition::WARN_LEVEL_ERROR) - *level= Sql_condition::WARN_LEVEL_WARN; - return(0); - } -}; - - /* to satisfy marked_for_write_or_computed() Field's assert we temporarily mark field for write before storing the generated value in it @@ -9467,10 +9549,12 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) { /* A subquery might be forced to be materialized due to a side-effect. */ if (!is_materialized_derived() && first_select->is_mergeable() && + (unit->outer_select() && !unit->outer_select()->with_rownum) && + (!thd->lex->with_rownum || + (!first_select->group_list.elements && + !first_select->order_list.elements)) && optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && - !thd->lex->can_not_use_merged() && - !(thd->lex->sql_command == SQLCOM_UPDATE_MULTI || - thd->lex->sql_command == SQLCOM_DELETE_MULTI) && + !thd->lex->can_not_use_merged(1) && !is_recursive_with_table()) set_merged_derived(); else @@ -9622,6 +9706,8 @@ bool TABLE_LIST::change_refs_to_fields() */ thd->change_item_tree((Item **)&ref->ref, (Item*)(materialized_items + idx)); + /* Inform Item_direct_ref that what it points to has changed */ + ref->ref_changed(); } return FALSE; diff --git a/sql/table.h b/sql/table.h index d704f3ce05e..c3a6575ca14 100644 --- a/sql/table.h +++ b/sql/table.h @@ -76,6 +76,7 @@ class Range_rowid_filter_cost_info; class derived_handler; class Pushdown_derived; struct Name_resolution_context; +class Table_function_json_table; /* Used to identify NESTED_JOIN structures within a join (applicable only to @@ -380,7 +381,7 @@ enum enum_vcol_update_mode /* Field visibility enums */ -enum field_visibility_t { +enum __attribute__((packed)) field_visibility_t { VISIBLE= 0, INVISIBLE_USER, /* automatically added by the server. Can be queried explicitly @@ -779,6 +780,10 @@ struct TABLE_SHARE Excludes keys disabled by ALTER TABLE ... DISABLE KEYS. */ key_map keys_in_use; + + /* The set of ignored indexes for a table. */ + key_map ignored_indexes; + key_map keys_for_keyread; ha_rows min_rows, max_rows; /* create information */ ulong avg_row_length; /* create information */ @@ -901,8 +906,8 @@ struct TABLE_SHARE */ struct period_info_t { - uint16 start_fieldno; - uint16 end_fieldno; + field_index_t start_fieldno; + field_index_t end_fieldno; Lex_ident name; Lex_ident constr_name; uint unique_keys; @@ -1151,7 +1156,7 @@ struct TABLE_SHARE bool write_frm_image(const uchar *frm_image, size_t frm_length); bool write_par_image(const uchar *par_image, size_t par_length); - /* Only used by tokudb */ + /* Only used by S3 */ bool write_frm_image(void) { return frm_image ? write_frm_image(frm_image->str, frm_image->length) : 0; } @@ -1165,6 +1170,8 @@ struct TABLE_SHARE void free_frm_image(const uchar *frm); void set_overlapped_keys(); + void set_ignored_indexes(); + key_map usable_indexes(THD *thd); }; /* not NULL, but cannot be dereferenced */ @@ -1627,6 +1634,8 @@ public: m_needs_reopen= value; } + bool init_expr_arena(MEM_ROOT *mem_root); + bool alloc_keys(uint key_count); bool check_tmp_key(uint key, uint key_parts, uint (*next_field_no) (uchar *), uchar *arg); @@ -1936,14 +1945,14 @@ class IS_table_read_plan; constexpr uint frm_fieldno_size= 2; /** number of bytes used by key position number in frm */ constexpr uint frm_keyno_size= 2; -static inline uint16 read_frm_fieldno(const uchar *data) +static inline field_index_t read_frm_fieldno(const uchar *data) { return uint2korr(data); } -static inline void store_frm_fieldno(uchar *data, uint16 fieldno) +static inline void store_frm_fieldno(uchar *data, field_index_t fieldno) { int2store(data, fieldno); } static inline uint16 read_frm_keyno(const uchar *data) { return uint2korr(data); } -static inline void store_frm_keyno(uchar *data, uint16 fieldno) -{ int2store(data, fieldno); } +static inline void store_frm_keyno(uchar *data, uint16 keyno) +{ int2store(data, keyno); } static inline size_t extra2_str_size(size_t len) { return (len > 255 ? 3 : 1) + len; } @@ -2186,7 +2195,7 @@ struct TABLE_LIST enum thr_lock_type lock_type_arg) { enum enum_mdl_type mdl_type; - if (lock_type_arg >= TL_WRITE_ALLOW_WRITE) + if (lock_type_arg >= TL_FIRST_WRITE) mdl_type= MDL_SHARED_WRITE; else if (lock_type_arg == TL_READ_NO_INSERT) mdl_type= MDL_SHARED_NO_WRITE; @@ -2201,7 +2210,7 @@ struct TABLE_LIST table_name= *table_name_arg; alias= (alias_arg ? *alias_arg : *table_name_arg); lock_type= lock_type_arg; - updating= lock_type >= TL_WRITE_ALLOW_WRITE; + updating= lock_type >= TL_FIRST_WRITE; MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db.str, table_name.str, mdl_type, MDL_TRANSACTION); } @@ -2235,7 +2244,7 @@ struct TABLE_LIST belong_to_view= belong_to_view_arg; trg_event_map= trg_event_map_arg; /* MDL is enough for read-only FK checks, we don't need the table */ - if (prelocking_type == PRELOCK_FK && lock_type < TL_WRITE_ALLOW_WRITE) + if (prelocking_type == PRELOCK_FK && lock_type < TL_FIRST_WRITE) open_strategy= OPEN_STUB; **last_ptr= this; @@ -2260,6 +2269,7 @@ struct TABLE_LIST const char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ Name_resolution_context *on_context; /* For ON expressions */ + Table_function_json_table *table_function; /* If it's the table function. */ Item *sj_on_expr; /* @@ -2527,7 +2537,8 @@ struct TABLE_LIST bool updating; /* for replicate-do/ignore table */ bool force_index; /* prefer index over table scan */ bool ignore_leaves; /* preload only non-leaf nodes */ - bool crashed; /* Table was found crashed */ + bool crashed; /* Table was found crashed */ + bool skip_locked; /* Skip locked in view defination */ table_map dep_tables; /* tables the table depends on */ table_map on_expr_dep_tables; /* tables on expression depends on */ struct st_nested_join *nested_join; /* if the element is a nested join */ @@ -2671,7 +2682,7 @@ struct TABLE_LIST void cleanup_items(); bool placeholder() { - return derived || view || schema_table || !table; + return derived || view || schema_table || !table || table_function; } void print(THD *thd, table_map eliminated_tables, String *str, enum_query_type query_type); @@ -2866,6 +2877,7 @@ struct TABLE_LIST */ const char *get_table_name() const { return view != NULL ? view_name.str : table_name.str; } bool is_active_sjm(); + bool is_sjm_scan_table(); bool is_jtbm() { return MY_TEST(jtbm_subselect != NULL); } st_select_lex_unit *get_unit(); st_select_lex *get_single_select(); diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 62ccfba7e7d..8fb3a559d86 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2012, Oracle and/or its affiliates. - Copyright (c) 2010, 2011 Monty Program Ab + Copyright (c) 2010, 2022, MariaDB Corporation. Copyright (C) 2013 Sergey Vojtovich and MariaDB Foundation This program is free software; you can redistribute it and/or modify @@ -50,6 +50,7 @@ #include "lf.h" #include "table.h" #include "sql_base.h" +#include "aligned.h" /** Configuration. */ @@ -122,6 +123,7 @@ struct Table_cache_instance records, Share_free_tables::List (TABLE::prev and TABLE::next), TABLE::in_use. */ + alignas(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t LOCK_table_cache; I_P_List <TABLE, I_P_List_adapter<TABLE, &TABLE::global_free_next, &TABLE::global_free_prev>, @@ -130,11 +132,10 @@ struct Table_cache_instance ulong records; uint mutex_waits; uint mutex_nowaits; - /** Avoid false sharing between instances */ - char pad[CPU_LEVEL1_DCACHE_LINESIZE]; Table_cache_instance(): records(0), mutex_waits(0), mutex_nowaits(0) { + static_assert(!(sizeof(*this) % CPU_LEVEL1_DCACHE_LINESIZE), "alignment"); mysql_mutex_init(key_LOCK_table_cache, &LOCK_table_cache, MY_MUTEX_INIT_FAST); } @@ -146,6 +147,10 @@ struct Table_cache_instance DBUG_ASSERT(records == 0); } + static void *operator new[](size_t size) + { return aligned_malloc(size, CPU_LEVEL1_DCACHE_LINESIZE); } + static void operator delete[](void *ptr) { aligned_free(ptr); } + /** Lock table cache mutex and check contention. @@ -1001,8 +1006,9 @@ void tdc_remove_referenced_share(THD *thd, TABLE_SHARE *share) DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, share->db.str, share->table_name.str, MDL_EXCLUSIVE)); - share->tdc->flush_unused(false); + share->tdc->flush_unused(true); mysql_mutex_lock(&share->tdc->LOCK_table_share); + DEBUG_SYNC(thd, "before_wait_for_refs"); share->tdc->wait_for_refs(1); DBUG_ASSERT(share->tdc->all_tables.is_empty()); share->tdc->ref_count--; diff --git a/sql/threadpool.h b/sql/threadpool.h index 27da872c5cc..7737d056b4a 100644 --- a/sql/threadpool.h +++ b/sql/threadpool.h @@ -37,6 +37,8 @@ extern uint threadpool_mode; /* Thread pool implementation , windows or generic #define DEFAULT_THREADPOOL_STALL_LIMIT 500U struct TP_connection; +struct st_vio; + extern void tp_callback(TP_connection *c); extern void tp_timeout_handler(TP_connection *c); @@ -113,7 +115,7 @@ struct TP_connection virtual void wait_begin(int type)= 0; virtual void wait_end() = 0; - + IF_WIN(virtual,) void init_vio(st_vio *){}; }; @@ -131,9 +133,11 @@ struct TP_pool virtual int set_stall_limit(uint){ return 0; } virtual int get_thread_count() { return tp_stats.num_worker_threads; } virtual int get_idle_thread_count(){ return 0; } + virtual void resume(TP_connection* c)=0; }; #ifdef _WIN32 + struct TP_pool_win:TP_pool { TP_pool_win(); @@ -143,6 +147,7 @@ struct TP_pool_win:TP_pool virtual void add(TP_connection *); virtual int set_max_threads(uint); virtual int set_min_threads(uint); + void resume(TP_connection *c); }; #endif @@ -156,6 +161,7 @@ struct TP_pool_generic :TP_pool virtual int set_pool_size(uint); virtual int set_stall_limit(uint); virtual int get_idle_thread_count(); + void resume(TP_connection* c); }; #endif /* HAVE_POOL_OF_THREADS */ diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index c4ab9697f8e..78f9eaf8792 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -23,11 +23,17 @@ #include <sql_audit.h> #include <debug_sync.h> #include <threadpool.h> +#include <sql_class.h> +#include <sql_parse.h> #ifdef WITH_WSREP #include "wsrep_trans_observer.h" #endif /* WITH_WSREP */ +#ifdef _WIN32 +#include "threadpool_winsockets.h" +#endif + /* Threadpool parameters */ uint threadpool_min_threads; @@ -47,8 +53,8 @@ TP_STATISTICS tp_stats; static void threadpool_remove_connection(THD *thd); -static int threadpool_process_request(THD *thd); -static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data); +static dispatch_command_return threadpool_process_request(THD *thd); +static THD* threadpool_add_connection(CONNECT *connect, TP_connection *c); extern bool do_command(THD*); @@ -86,13 +92,13 @@ struct Worker_thread_context PSI_thread *psi_thread; st_my_thread_var* mysys_var; - void save() + Worker_thread_context() { psi_thread= PSI_CALL_get_thread(); mysys_var= my_thread_var; } - void restore() + ~Worker_thread_context() { PSI_CALL_set_thread(psi_thread); set_mysys_var(mysys_var); @@ -137,6 +143,44 @@ static inline void set_thd_idle(THD *thd) } /* + Per OS thread info (ID and pthread_self) + stored as TLS, because of syscall overhead + (on Linux) +*/ +struct OS_thread_info +{ + pthread_t self; + ssize_t stack_size; + uint32_t thread_id; + + inline bool initialized() { return stack_size != 0; } + + void init(ssize_t ssize) + { +#if _WIN32 + self= thread_id= GetCurrentThreadId(); +#else +#ifdef __NR_gettid + thread_id= (uint32) syscall(__NR_gettid); +#else + thread_id= 0; +#endif + self= pthread_self(); +#endif + stack_size= ssize; + } +}; +static thread_local OS_thread_info os_thread_info; + +static const OS_thread_info *get_os_thread_info() +{ + auto *res= &os_thread_info; + if (!res->initialized()) + res->init((ssize_t) (my_thread_stack_size * STACK_DIRECTION)); + return res; +} + +/* Attach/associate the connection with the OS thread, */ static void thread_attach(THD* thd) @@ -148,7 +192,12 @@ static void thread_attach(THD* thd) #endif /* WITH_WSREP */ set_mysys_var(thd->mysys_var); thd->thread_stack=(char*)&thd; - thd->store_globals(); + set_current_thd(thd); + auto tinfo= get_os_thread_info(); + thd->real_id= tinfo->self; + thd->os_thread_id= tinfo->thread_id; + DBUG_ASSERT(thd->mysys_var == my_thread_var); + thd->mysys_var->stack_ends_here= thd->thread_stack + tinfo->stack_size; PSI_CALL_set_thread(thd->get_psi()); mysql_socket_set_thread_owner(thd->net.vio->mysql_socket); } @@ -173,7 +222,6 @@ void tp_callback(TP_connection *c) DBUG_ASSERT(c); Worker_thread_context worker_context; - worker_context.save(); THD *thd= c->thd; @@ -191,10 +239,29 @@ void tp_callback(TP_connection *c) } c->connect= 0; } - else if (threadpool_process_request(thd)) + else { - /* QUIT or an error occurred. */ - goto error; +retry: + switch(threadpool_process_request(thd)) + { + case DISPATCH_COMMAND_WOULDBLOCK: + if (!thd->async_state.try_suspend()) + { + /* + All async operations finished meanwhile, thus nobody is will wake up + this THD. Therefore, we'll resume "manually" here. + */ + thd->async_state.m_state = thd_async_state::enum_async_state::RESUMED; + goto retry; + } + return; + case DISPATCH_COMMAND_CLOSE_CONNECTION: + /* QUIT or an error occurred. */ + goto error; + case DISPATCH_COMMAND_SUCCESS: + break; + } + thd->async_state.m_state= thd_async_state::enum_async_state::NONE; } /* Set priority */ @@ -205,8 +272,6 @@ void tp_callback(TP_connection *c) c->state= TP_STATE_IDLE; if (c->start_io()) goto error; - - worker_context.restore(); return; error: @@ -216,11 +281,10 @@ error: threadpool_remove_connection(thd); } delete c; - worker_context.restore(); } -static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data) +static THD *threadpool_add_connection(CONNECT *connect, TP_connection *c) { THD *thd= NULL; @@ -241,12 +305,12 @@ static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data) my_thread_end(); return NULL; } - thd->event_scheduler.data = scheduler_data; + + thd->event_scheduler.data= c; server_threads.insert(thd); // Make THD visible in show processlist delete connect; // must be after server_threads.insert, see close_connections() thd->set_mysys_var(mysys_var); - /* Login. */ thread_attach(thd); re_init_net_server_extension(thd); @@ -260,6 +324,8 @@ static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data) if (thd_prepare_connection(thd)) goto end; + c->init_vio(thd->net.vio); + /* Check if THD is ok, as prepare_new_connection_state() can fail, for example if init command failed. @@ -325,10 +391,13 @@ static bool has_unread_data(THD* thd) /** Process a single client request or a single batch. */ -static int threadpool_process_request(THD *thd) +static dispatch_command_return threadpool_process_request(THD *thd) { - int retval= 0; + dispatch_command_return retval= DISPATCH_COMMAND_SUCCESS; + thread_attach(thd); + if(thd->async_state.m_state == thd_async_state::enum_async_state::RESUMED) + goto resume; if (thd->killed >= KILL_CONNECTION) { @@ -336,7 +405,7 @@ static int threadpool_process_request(THD *thd) killed flag was set by timeout handler or KILL command. Return error. */ - retval= 1; + retval= DISPATCH_COMMAND_CLOSE_CONNECTION; if(thd->killed == KILL_WAIT_TIMEOUT) handle_wait_timeout(thd); goto end; @@ -359,19 +428,27 @@ static int threadpool_process_request(THD *thd) if (mysql_audit_release_required(thd)) mysql_audit_release(thd); - if ((retval= do_command(thd)) != 0) - goto end; +resume: + retval= do_command(thd, false); + switch(retval) + { + case DISPATCH_COMMAND_WOULDBLOCK: + case DISPATCH_COMMAND_CLOSE_CONNECTION: + goto end; + case DISPATCH_COMMAND_SUCCESS: + break; + } if (!thd_is_connection_alive(thd)) { - retval= 1; + retval=DISPATCH_COMMAND_CLOSE_CONNECTION; goto end; } set_thd_idle(thd); if (!has_unread_data(thd)) - { + { /* More info on this debug sync is in sql_parse.cc*/ DEBUG_SYNC(thd, "before_do_command_net_read"); goto end; @@ -404,6 +481,9 @@ static bool tp_init() pool= 0; return true; } +#ifdef _WIN32 + init_win_aio_buffers(max_connections); +#endif return false; } @@ -505,6 +585,9 @@ static void tp_wait_end(THD *thd) static void tp_end() { delete pool; +#ifdef _WIN32 + destroy_win_aio_buffers(); +#endif } static void tp_post_kill_notification(THD *thd) @@ -515,6 +598,15 @@ static void tp_post_kill_notification(THD *thd) post_kill_notification(thd); } +/* Resume previously suspended THD */ +static void tp_resume(THD* thd) +{ + DBUG_ASSERT(thd->async_state.m_state == thd_async_state::enum_async_state::SUSPENDED); + thd->async_state.m_state = thd_async_state::enum_async_state::RESUMED; + TP_connection* c = get_TP_connection(thd); + pool->resume(c); +} + static scheduler_functions tp_scheduler_functions= { 0, // max_threads @@ -525,7 +617,8 @@ static scheduler_functions tp_scheduler_functions= tp_wait_begin, // thd_wait_begin tp_wait_end, // thd_wait_end tp_post_kill_notification, // post kill notification - tp_end // end + tp_end, // end + tp_resume }; void pool_of_threads_scheduler(struct scheduler_functions *func, diff --git a/sql/threadpool_generic.cc b/sql/threadpool_generic.cc index 3a5b68d3baf..eb08441a4d5 100644 --- a/sql/threadpool_generic.cc +++ b/sql/threadpool_generic.cc @@ -29,8 +29,8 @@ #include <sql_plist.h> #include <threadpool.h> #include <algorithm> - -#ifdef HAVE_IOCP +#ifdef _WIN32 +#include "threadpool_winsockets.h" #define OPTIONAL_IO_POLL_READ_PARAM this #else #define OPTIONAL_IO_POLL_READ_PARAM 0 @@ -352,7 +352,7 @@ static void* native_event_get_userdata(native_event *event) return event->portev_user; } -#elif defined(HAVE_IOCP) +#elif defined(_WIN32) static TP_file_handle io_poll_create() @@ -363,29 +363,8 @@ static TP_file_handle io_poll_create() int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *, void *opt) { - static char c; - TP_connection_generic *con= (TP_connection_generic *)opt; - OVERLAPPED *overlapped= &con->overlapped; - if (con->vio_type == VIO_TYPE_NAMEDPIPE) - { - if (ReadFile(fd, &c, 0, NULL, overlapped)) - return 0; - } - else - { - WSABUF buf; - buf.buf= &c; - buf.len= 0; - DWORD flags=0; - - if (WSARecv((SOCKET)fd, &buf, 1,NULL, &flags,overlapped, NULL) == 0) - return 0; - } - - if (GetLastError() == ERROR_IO_PENDING) - return 0; - - return 1; + auto c= (TP_connection_generic *) opt; + return (int) c->win_sock.begin_read(); } @@ -434,20 +413,33 @@ int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd) } -int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, int timeout_ms) +static void *native_event_get_userdata(native_event *event) { - ULONG n; - BOOL ok = GetQueuedCompletionStatusEx(pollfd, events, - maxevents, &n, timeout_ms, FALSE); - - return ok ? (int)n : -1; + return (void *) event->lpCompletionKey; } - -static void* native_event_get_userdata(native_event *event) +int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, + int timeout_ms) { - return (void *)event->lpCompletionKey; + ULONG n; + if (!GetQueuedCompletionStatusEx(pollfd, events, maxevents, &n, timeout_ms, FALSE)) + return -1; + + /* Update win_sock with number of bytes read.*/ + for (ULONG i= 0; i < n; i++) + { + auto ev= &events[i]; + auto c= (TP_connection_generic *) native_event_get_userdata(ev); + /* null userdata zero means shutdown (see PostQueuedCompletionStatus() usage*/ + if (c) + { + c->win_sock.end_read(ev->dwNumberOfBytesTransferred, 0); + } + } + + return (int) n; } + #endif @@ -1010,7 +1002,7 @@ void thread_group_destroy(thread_group_t *thread_group) io_poll_close(thread_group->pollfd); thread_group->pollfd= INVALID_HANDLE_VALUE; } -#ifndef HAVE_IOCP +#ifndef _WIN32 for(int i=0; i < 2; i++) { if(thread_group->shutdown_pipe[i] != -1) @@ -1057,7 +1049,7 @@ static int wake_thread(thread_group_t *thread_group,bool due_to_stall) */ static int wake_listener(thread_group_t *thread_group) { -#ifndef HAVE_IOCP +#ifndef _WIN32 if (pipe(thread_group->shutdown_pipe)) { return -1; @@ -1340,7 +1332,10 @@ void TP_pool_generic::add(TP_connection *c) DBUG_VOID_RETURN; } - +void TP_pool_generic::resume(TP_connection* c) +{ + add(c); +} /** MySQL scheduler callback: wait begin @@ -1403,12 +1398,6 @@ TP_connection_generic::TP_connection_generic(CONNECT *c): bound_to_poll_descriptor(false), waiting(false), fix_group(false) -#ifdef HAVE_IOCP -, overlapped() -#endif -#ifdef _WIN32 -, vio_type(c->vio_type) -#endif { DBUG_ASSERT(c->vio_type != VIO_CLOSED); diff --git a/sql/threadpool_generic.h b/sql/threadpool_generic.h index acf5ec6978b..b7a35b7cbf0 100644 --- a/sql/threadpool_generic.h +++ b/sql/threadpool_generic.h @@ -23,6 +23,7 @@ #ifdef _WIN32 #include <windows.h> +#include "threadpool_winsockets.h" /* AIX may define this, too ?*/ #define HAVE_IOCP #endif @@ -75,11 +76,11 @@ struct TP_connection_generic :public TP_connection TP_connection_generic(CONNECT* c); ~TP_connection_generic(); - virtual int init() { return 0; }; - virtual void set_io_timeout(int sec); - virtual int start_io(); - virtual void wait_begin(int type); - virtual void wait_end(); + int init() override { return 0; } + void set_io_timeout(int sec) override; + int start_io() override; + void wait_begin(int type) override; + void wait_end() override; thread_group_t* thread_group; TP_connection_generic* next_in_queue; @@ -90,12 +91,12 @@ struct TP_connection_generic :public TP_connection bool bound_to_poll_descriptor; int waiting; bool fix_group; -#ifdef HAVE_IOCP - OVERLAPPED overlapped; -#endif #ifdef _WIN32 - enum_vio_type vio_type; + win_aiosocket win_sock{}; + void init_vio(st_vio *vio) override + { win_sock.init(vio);} #endif + }; diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc index 6003b06bc7b..ed68e31c755 100644 --- a/sql/threadpool_win.cc +++ b/sql/threadpool_win.cc @@ -30,6 +30,9 @@ #include <debug_sync.h> #include <threadpool.h> #include <windows.h> +#include <set_var.h> + +#include "threadpool_winsockets.h" /* Log a warning */ static void tp_log_warning(const char *msg, const char *fct) @@ -43,8 +46,6 @@ static PTP_POOL pool; static TP_CALLBACK_ENVIRON callback_environ; static DWORD fls; -static bool skip_completion_port_on_success = false; - PTP_CALLBACK_ENVIRON get_threadpool_win_callback_environ() { return pool? &callback_environ: 0; @@ -83,22 +84,21 @@ struct TP_connection_win:public TP_connection public: TP_connection_win(CONNECT*); ~TP_connection_win(); - virtual int init(); - virtual int start_io(); - virtual void set_io_timeout(int sec); - virtual void wait_begin(int type); - virtual void wait_end(); - - ulonglong timeout; - enum_vio_type vio_type; - HANDLE handle; - OVERLAPPED overlapped; - PTP_CALLBACK_INSTANCE callback_instance; - PTP_IO io; - PTP_TIMER timer; - PTP_WORK work; - bool long_callback; - + int init() override; + void init_vio(st_vio *vio) override; + int start_io() override; + void set_io_timeout(int sec) override; + void wait_begin(int type) override; + void wait_end() override; + + ulonglong timeout=ULLONG_MAX; + OVERLAPPED overlapped{}; + PTP_CALLBACK_INSTANCE callback_instance{}; + PTP_IO io{}; + PTP_TIMER timer{}; + PTP_WORK work{}; + bool long_callback{}; + win_aiosocket sock; }; struct TP_connection *new_TP_connection(CONNECT *connect) @@ -125,120 +125,56 @@ void TP_pool_win::add(TP_connection *c) } } - -TP_connection_win::TP_connection_win(CONNECT *c) : - TP_connection(c), - timeout(ULONGLONG_MAX), - callback_instance(0), - io(0), - timer(0), - work(0) +void TP_pool_win::resume(TP_connection* c) { + DBUG_ASSERT(c->state == TP_STATE_RUNNING); + SubmitThreadpoolWork(((TP_connection_win*)c)->work); } -#define CHECK_ALLOC_ERROR(op) if (!(op)) {tp_log_warning("Allocation failed", #op); DBUG_ASSERT(0); return -1; } +#define CHECK_ALLOC_ERROR(op) \ + do \ + { \ + if (!(op)) \ + { \ + tp_log_warning("Allocation failed", #op); \ + } \ + } while (0) -int TP_connection_win::init() +TP_connection_win::TP_connection_win(CONNECT *c) : + TP_connection(c) { - - memset(&overlapped, 0, sizeof(OVERLAPPED)); - switch ((vio_type = connect->vio_type)) - { - case VIO_TYPE_SSL: - case VIO_TYPE_TCPIP: - handle= (HANDLE) mysql_socket_getfd(connect->sock); - break; - case VIO_TYPE_NAMEDPIPE: - handle= connect->pipe; - break; - default: - abort(); - } - - - /* Performance tweaks (s. MSDN documentation)*/ - UCHAR flags= FILE_SKIP_SET_EVENT_ON_HANDLE; - if (skip_completion_port_on_success) - { - flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS; - } - (void)SetFileCompletionNotificationModes(handle, flags); /* Assign io completion callback */ - CHECK_ALLOC_ERROR(io= CreateThreadpoolIo(handle, io_completion_callback, this, &callback_environ)); - CHECK_ALLOC_ERROR(timer= CreateThreadpoolTimer(timer_callback, this, &callback_environ)); + HANDLE h= c->vio_type == VIO_TYPE_NAMEDPIPE ? c->pipe + : (HANDLE)mysql_socket_getfd(c->sock); + + CHECK_ALLOC_ERROR(io=CreateThreadpoolIo(h, io_completion_callback, this, &callback_environ)); + CHECK_ALLOC_ERROR(timer= CreateThreadpoolTimer(timer_callback, this, &callback_environ)); CHECK_ALLOC_ERROR(work= CreateThreadpoolWork(work_callback, this, &callback_environ)); - return 0; } +int TP_connection_win::init() +{ + return !io || !timer || !work ; +} + +void TP_connection_win::init_vio(st_vio* vio) +{ + sock.init(vio); +} /* Start asynchronous read */ int TP_connection_win::start_io() { - DWORD num_bytes = 0; - static char c; - WSABUF buf; - buf.buf= &c; - buf.len= 0; - DWORD flags=0; - DWORD last_error= 0; - - int retval; StartThreadpoolIo(io); - - if (vio_type == VIO_TYPE_TCPIP || vio_type == VIO_TYPE_SSL) - { - /* Start async io (sockets). */ - if (WSARecv((SOCKET)handle , &buf, 1, &num_bytes, &flags, - &overlapped, NULL) == 0) - { - retval= last_error= 0; - } - else - { - retval= -1; - last_error= WSAGetLastError(); - } - } - else - { - /* Start async io (named pipe) */ - if (ReadFile(handle, &c, 0, &num_bytes,&overlapped)) - { - retval= last_error= 0; - } - else - { - retval= -1; - last_error= GetLastError(); - } - } - - if (retval == 0 || last_error == ERROR_MORE_DATA) + if (sock.begin_read()) { - /* - IO successfully finished (synchronously). - If skip_completion_port_on_success is set, we need to handle it right - here, because completion callback would not be executed by the pool. - */ - if (skip_completion_port_on_success) - { - CancelThreadpoolIo(io); - io_completion_callback(callback_instance, this, &overlapped, last_error, - num_bytes, io); - } - return 0; - } - - if (last_error == ERROR_IO_PENDING) - { - return 0; + /* Some error occurred */ + CancelThreadpoolIo(io); + return -1; } - - /* Some error occurred */ - CancelThreadpoolIo(io); - return -1; + return 0; } /* @@ -305,7 +241,7 @@ void tp_win_callback_prolog() { /* Running in new worker thread*/ FlsSetValue(fls, (void *)1); - statistic_increment(thread_created, &LOCK_status); + thread_created++; tp_stats.num_worker_threads++; my_thread_init(); } @@ -350,6 +286,10 @@ static VOID CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance, PVOID context, PVOID overlapped, ULONG io_result, ULONG_PTR nbytes, PTP_IO io) { TP_connection_win *c= (TP_connection_win *)context; + + /* How many bytes were preread into read buffer */ + c->sock.end_read((ULONG)nbytes, io_result); + /* Execute high priority connections immediately. 'Yield' in case of low priority connections, i.e SubmitThreadpoolWork (with the same callback) @@ -412,12 +352,24 @@ int TP_pool_win::init() InitializeThreadpoolEnvironment(&callback_environ); SetThreadpoolCallbackPool(&callback_environ, pool); - if (threadpool_max_threads) + if (IS_SYSVAR_AUTOSIZE(&threadpool_max_threads)) + { + /* + Nr 500 comes from Microsoft documentation, + there is no API for GetThreadpoolThreadMaxThreads() + */ + SYSVAR_AUTOSIZE(threadpool_max_threads,500); + } + else { SetThreadpoolThreadMaximum(pool, threadpool_max_threads); } - if (threadpool_min_threads) + if (IS_SYSVAR_AUTOSIZE(&threadpool_min_threads)) + { + SYSVAR_AUTOSIZE(threadpool_min_threads,1); + } + else { if (!SetThreadpoolThreadMinimum(pool, threadpool_min_threads)) { @@ -426,6 +378,18 @@ int TP_pool_win::init() } } + + if (IS_SYSVAR_AUTOSIZE(&global_system_variables.threadpool_priority)) + { + /* + There is a notable overhead for "auto" priority implementation, + use "high" which handles socket IO callbacks as they come + without rescheduling to work queue. + */ + SYSVAR_AUTOSIZE(global_system_variables.threadpool_priority, + TP_PRIORITY_HIGH); + } + TP_POOL_STACK_INFORMATION stackinfo; stackinfo.StackCommit = 0; stackinfo.StackReserve = (SIZE_T)my_thread_stack_size; @@ -480,3 +444,4 @@ TP_connection *TP_pool_win::new_connection(CONNECT *connect) } return c; } + diff --git a/sql/threadpool_winsockets.cc b/sql/threadpool_winsockets.cc new file mode 100644 index 00000000000..a214cda2a5c --- /dev/null +++ b/sql/threadpool_winsockets.cc @@ -0,0 +1,268 @@ +/* Copyright (C) 2012 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + */ + +#include <winsock2.h> +#include <my_global.h> +#include <violite.h> +#include "threadpool_winsockets.h" +#include <algorithm> +#include <vector> +#include <mutex> + +/* + A cache for IO buffers for asynchronous socket(or named pipe) reads. + + Considerations on Windows : since Windows locks the AIO buffers in physical memory, + it is important that these buffers are compactly allocated. + We try to to prevent any kinds of memory fragmentation + + A relatively small region (at most 1MB) is allocated, for equally sized smallish(256 bytes) + This allow buffers. The region is pagesize-aligned (via VirtualAlloc allocation) + + We use smallish IO buffers, 256 bytes is probably large enough for most of + the queries. Larger buffers could have funny effects(thread hogginng) + on threadpool scheduling in case client is using protocol pipelining. + + Also note, that even in an unlikely situation where cache runs out of buffers, + this does not lead to errors, zero szed reads will be used in WSARecv then. +*/ + +constexpr size_t READ_BUFSIZ= 256; +class AIO_buffer_cache +{ + const size_t ITEM_SIZE= READ_BUFSIZ; + + /** Limit the whole cache to 1MB*/ + const size_t MAX_SIZE= 1048576; + + /* Allocation base */ + char *m_base= 0; + + /* "Free list" with LIFO policy */ + std::vector<char *> m_cache; + std::mutex m_mtx; + size_t m_elements=0; + +public: + void set_size(size_t n_items); + char *acquire_buffer(); + void release_buffer(char *v); + void clear(); + ~AIO_buffer_cache(); +}; + + +void AIO_buffer_cache::set_size(size_t n_items) +{ + DBUG_ASSERT(!m_base); + m_elements= std::min(n_items, MAX_SIZE / ITEM_SIZE); + auto sz= m_elements * ITEM_SIZE; + + m_base= + (char *) VirtualAlloc(0, sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (!m_base) + { + m_elements= 0; + return; + } + + /* Try to help memory manager here, by prelocking region in memory*/ + (void) VirtualLock(m_base, sz); + + m_cache.reserve(m_elements); + for (ssize_t i= m_elements - 1; i >= 0 ; i--) + m_cache.push_back(m_base + i * ITEM_SIZE); +} + +/* + Returns a buffer, or NULL if no free buffers. + + LIFO policy is implemented, so we do not touch too many + pages (no std::stack though) +*/ +char *AIO_buffer_cache::acquire_buffer() +{ + std::unique_lock<std::mutex> lk(m_mtx); + if (m_cache.empty()) + return nullptr; + auto p= m_cache.back(); + m_cache.pop_back(); + return p; +} + +void AIO_buffer_cache::release_buffer(char *v) +{ + std::unique_lock<std::mutex> lk(m_mtx); + m_cache.push_back(v); +} + +void AIO_buffer_cache::clear() +{ + if (!m_base) + return; + + std::unique_lock<std::mutex> lk(m_mtx, std::defer_lock); + for(;;) + { + if (lk.try_lock()) + { + if (m_cache.size() == m_elements) + break; + lk.unlock(); + } + Sleep(100); + } + VirtualFree(m_base, 0, MEM_RELEASE); + m_cache.clear(); + m_base= 0; + m_elements= 0; +} + +AIO_buffer_cache::~AIO_buffer_cache() { clear(); } + +/* Global variable for the cache buffers.*/ +AIO_buffer_cache read_buffers; + +win_aiosocket::~win_aiosocket() +{ + if (m_buf_ptr) + read_buffers.release_buffer(m_buf_ptr); +} + + +/** Return number of unread bytes.*/ +size_t win_aiosocket::buffer_remaining() +{ + return m_buf_datalen - m_buf_off; +} + +static my_bool my_vio_has_data(st_vio *vio) +{ + auto sock= (win_aiosocket *) vio->tp_ctx; + return sock->buffer_remaining() || sock->m_orig_vio_has_data(vio); +} + +/* + (Half-)buffered read. + + The buffer is filled once, by completion of the async IO. + + We do not refill the buffer once it is read off, + does not make sense. +*/ +static size_t my_vio_read(st_vio *vio, uchar *dest, size_t sz) +{ + auto sock= (win_aiosocket *) vio->tp_ctx; + DBUG_ASSERT(sock); + + auto nbytes= std::min(sock->buffer_remaining(), sz); + + if (nbytes > 0) + { + /* Copy to output, adjust the offset.*/ + memcpy(dest, sock->m_buf_ptr + sock->m_buf_off, nbytes); + sock->m_buf_off += nbytes; + return nbytes; + } + + return sock->m_orig_vio_read(vio, dest, sz); +} + +DWORD win_aiosocket::begin_read() +{ + DWORD err = ERROR_SUCCESS; + static char c; + WSABUF buf; + + DBUG_ASSERT(!buffer_remaining()); + + /* + If there is no internal buffer to store data, + we do zero size read, but still need a valid + pointer for the buffer parameter. + */ + if (m_buf_ptr) + buf= {(ULONG)READ_BUFSIZ, m_buf_ptr}; + else + buf= {0, &c}; + + + if (!m_is_pipe) + { + /* Do async io (sockets). */ + DWORD flags= 0; + if (WSARecv((SOCKET) m_handle, &buf, 1, 0, &flags, &m_overlapped, NULL)) + err= WSAGetLastError(); + } + else + { + /* Do async read (named pipe) */ + if (!ReadFile(m_handle, buf.buf, buf.len, 0, &m_overlapped)) + err= GetLastError(); + } + + if (!err || err == ERROR_IO_PENDING) + return 0; + return err; +} + +void win_aiosocket::end_read(ULONG nbytes, DWORD err) +{ + DBUG_ASSERT(!buffer_remaining()); + DBUG_ASSERT(!nbytes || m_buf_ptr); + m_buf_off= 0; + m_buf_datalen= nbytes; +} + +void win_aiosocket::init(Vio *vio) +{ + m_is_pipe= vio->type == VIO_TYPE_NAMEDPIPE; + m_handle= + m_is_pipe ? vio->hPipe : (HANDLE) mysql_socket_getfd(vio->mysql_socket); + + SetFileCompletionNotificationModes(m_handle, FILE_SKIP_SET_EVENT_ON_HANDLE); + if (vio->type == VIO_TYPE_SSL) + { + /* + TODO : This requires fixing viossl to call our manipulated VIO + */ + return; + } + + if (!(m_buf_ptr = read_buffers.acquire_buffer())) + { + /* Ran out of buffers, that's fine.*/ + return; + } + + vio->tp_ctx= this; + + m_orig_vio_has_data= vio->has_data; + vio->has_data= my_vio_has_data; + + m_orig_vio_read= vio->read; + vio->read= my_vio_read; +} + +void init_win_aio_buffers(unsigned int n_buffers) +{ + read_buffers.set_size(n_buffers); +} + +extern void destroy_win_aio_buffers() +{ + read_buffers.clear(); +} diff --git a/sql/threadpool_winsockets.h b/sql/threadpool_winsockets.h new file mode 100644 index 00000000000..ca2068b759d --- /dev/null +++ b/sql/threadpool_winsockets.h @@ -0,0 +1,80 @@ +/* Copyright (C) 2020 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + */ +#pragma once + +#include <WinSock2.h> +#include <windows.h> + +struct st_vio; + +struct win_aiosocket +{ + /** OVERLAPPED is needed by all Windows AIO*/ + OVERLAPPED m_overlapped{}; + /** Handle to pipe, or socket */ + HANDLE m_handle{}; + /** Whether the m_handle refers to pipe*/ + bool m_is_pipe{}; + + /* Read buffer handling */ + + /** Pointer to buffer of size READ_BUFSIZ. Can be NULL.*/ + char *m_buf_ptr{}; + /** Offset to current buffer position*/ + size_t m_buf_off{}; + /** Size of valid data in the buffer*/ + size_t m_buf_datalen{}; + + /* Vio handling */ + /** Pointer to original vio->vio_read/vio->has_data function */ + size_t (*m_orig_vio_read)(st_vio *, unsigned char *, size_t){}; + char (*m_orig_vio_has_data)(st_vio *){}; + + + + /** + Begins asynchronnous reading from socket/pipe. + On IO completion, pre-read some bytes into internal buffer + */ + DWORD begin_read(); + + /** + Update number of bytes returned, and IO error status + + Should be called right after IO is completed + GetQueuedCompletionStatus() , or threadpool IO completion + callback would return nbytes and the error. + + Sets the valid data length in the read buffer. + */ + void end_read(ULONG nbytes, DWORD err); + + /** + Override VIO routines with ours, accounting for + one-shot buffering. + */ + void init(st_vio *vio); + + /** Return number of unread bytes.*/ + size_t buffer_remaining(); + + /* Frees the read buffer.*/ + ~win_aiosocket(); +}; + +/* Functions related to IO buffers caches.*/ +extern void init_win_aio_buffers(unsigned int n_buffers); +extern void destroy_win_aio_buffers(); diff --git a/sql/tztime.cc b/sql/tztime.cc index 3a3abf1898a..dc148d3bf4c 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1769,7 +1769,8 @@ end_with_setting_default_tz: /* If we have default time zone try to load it */ if (default_tzname) { - String tmp_tzname2(default_tzname, &my_charset_latin1); + String tmp_tzname2(default_tzname, strlen(default_tzname), + &my_charset_latin1); /* Time zone tables may be open here, and my_tz_find() may open most of them once more, but this is OK for system tables open @@ -1797,7 +1798,7 @@ end: delete thd; if (org_thd) org_thd->store_globals(); /* purecov: inspected */ - + default_tz= default_tz_name ? global_system_variables.time_zone : my_tz_SYSTEM; @@ -1872,7 +1873,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) #ifdef ABBR_ARE_USED char chars[MY_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))]; #endif - /* + /* Used as a temporary tz_info until we decide that we actually want to allocate and keep the tz info and tz name in tz_storage. */ @@ -2025,7 +2026,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) mysql.time_zone_transition table. Here we additionally need records in ascending order by index scan also satisfies us. */ - table= tz_tables->table; + table= tz_tables->table; table->field[0]->store((longlong) tzid, TRUE); if (table->file->ha_index_init(0, 1)) goto end; @@ -2360,7 +2361,7 @@ my_tz_find(THD *thd, const String *name) /** Convert leap seconds into non-leap - This function will convert the leap seconds added by the OS to + This function will convert the leap seconds added by the OS to non-leap seconds, e.g. 23:59:59, 23:59:60 -> 23:59:59, 00:00:01 ... This check is not checking for years on purpose : although it's not a complete check this way it doesn't require looking (and having installed) @@ -2417,7 +2418,7 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp) } printf("INSERT INTO time_zone_transition_type \ -(Time_zone_id, Transition_type_id, Offset, Is_DST, Abbreviation) VALUES\n"); +(Time_zone_id, Transition_type_id, `Offset`, Is_DST, Abbreviation) VALUES\n"); for (i= 0; i < sp->typecnt; i++) printf("%s(@time_zone_id, %u, %ld, %d, '%s')\n", (i == 0 ? " " : ","), i, @@ -2427,6 +2428,11 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp) } +#define SAVE_ENGINE(e) \ + "\"select ENGINE into @" e "_engine" \ + " from information_schema.TABLES" \ + " where TABLE_SCHEMA=DATABASE() and TABLE_NAME='" e "'\"" + /* Print info about leap seconds in time zone as SQL statements populating mysql.time_zone_leap_second table. @@ -2445,12 +2451,11 @@ print_tz_leaps_as_sql(const TIME_ZONE_INFO *sp) For all timezones. */ if (!opt_skip_write_binlog) - printf("\\d |\n" - "IF (select count(*) from information_schema.global_variables where\n" - "variable_name='wsrep_on' and variable_value='ON') = 1 THEN\n" - "ALTER TABLE time_zone_leap_second ENGINE=InnoDB;\n" - "END IF|\n" - "\\d ;\n"); + printf( + "execute immediate if(@wsrep_cannot_replicate_tz, " + SAVE_ENGINE("time_zone_leap_second") ", 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " + "'ALTER TABLE time_zone_leap_second ENGINE=InnoDB', 'do 0');\n"); printf("TRUNCATE TABLE time_zone_leap_second;\n"); @@ -2465,12 +2470,10 @@ print_tz_leaps_as_sql(const TIME_ZONE_INFO *sp) } if (!opt_skip_write_binlog) - printf("\\d |\n" - "IF (select count(*) from information_schema.global_variables where\n" - "variable_name='wsrep_on' and variable_value='ON') = 1 THEN\n" - "ALTER TABLE time_zone_leap_second ENGINE=Aria;\n" - "END IF|\n" - "\\d ;\n"); + printf( + "execute immediate if(@wsrep_cannot_replicate_tz, " + "concat('ALTER TABLE time_zone_leap_second ENGINE=', " + "@time_zone_leap_second_engine), 'do 0');\n"); printf("ALTER TABLE time_zone_leap_second ORDER BY Transition_time;\n"); } @@ -2639,7 +2642,7 @@ static struct my_option my_long_options[] = &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip-write-binlog", 'S', "Do not replicate changes to time zone tables to the binary log, or to other nodes in a Galera cluster (if wsrep_on=ON).", + {"skip-write-binlog", 'S', "Do not replicate changes to time zone tables to the binary log, or to other nodes in a Galera cluster.", &opt_skip_write_binlog,&opt_skip_write_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -2716,17 +2719,32 @@ static const char *lock_tables= " time_zone_leap_second WRITE,\n" " time_zone_name WRITE,\n" " time_zone_transition WRITE,\n" - " time_zone_transition_type WRITE;\n"; + " time_zone_transition_type WRITE"; static const char *trunc_tables_const= "TRUNCATE TABLE time_zone;\n" "TRUNCATE TABLE time_zone_name;\n" "TRUNCATE TABLE time_zone_transition;\n" "TRUNCATE TABLE time_zone_transition_type;\n"; +/* + These queries need to return FALSE/0 when the 'wsrep*' variables do not + exist at all. + Moving the WHERE clause into the sum(...) seems like the obvious solution + here, but it does not work in bootstrap mode (see MDEV-28782 and + 0e4cf497ca11a7298e2bd896cb594bd52085a1d4). + Thus we use coalesce(..., 0) instead, +*/ +static const char *wsrep_is_on= + "select coalesce(sum(SESSION_VALUE='ON'), 0)" + " from information_schema.SYSTEM_VARIABLES WHERE VARIABLE_NAME='wsrep_on'"; +static const char *wsrep_cannot_replicate_tz= + "select coalesce(sum(GLOBAL_VALUE NOT LIKE @replicate_opt), 0)" + " from information_schema.SYSTEM_VARIABLES WHERE VARIABLE_NAME='wsrep_mode'"; + int main(int argc, char **argv) { - const char *trunc_tables; + const char *trunc_tables= ""; MY_INIT(argv[0]); load_defaults_or_exit("my", load_default_groups, &argc, &argv); @@ -2742,38 +2760,48 @@ main(int argc, char **argv) return 1; } - if (!(argc == 1 && !opt_leap)) - trunc_tables= "SELECT 'skip truncate tables';\n"; // No-op - needed for ELSE clause - else + if (argc == 1 && !opt_leap) trunc_tables= trunc_tables_const; + printf("set @wsrep_is_on=(%s);\n", wsrep_is_on); + printf("SET STATEMENT SQL_MODE='' FOR " + "SELECT concat('%%', GROUP_CONCAT(OPTION), '%%') INTO @replicate_opt " + " FROM" + " (SELECT DISTINCT concat('REPLICATE_', UPPER(ENGINE)) AS OPTION" + " FROM information_schema.TABLES" + " WHERE TABLE_SCHEMA=DATABASE()" + " AND TABLE_NAME IN ('time_zone'," + " 'time_zone_name'," + " 'time_zone_transition'," + " 'time_zone_transition_type'," + " 'time_zone_leap_second')" + " AND ENGINE in ('MyISAM'," + " 'Aria')) AS o" + " ORDER BY OPTION DESC;\n"); + printf("set @wsrep_cannot_replicate_tz=@wsrep_is_on AND (%s);\n", wsrep_cannot_replicate_tz); if (opt_skip_write_binlog) - /* If skip_write_binlog is set and wsrep is compiled in we disable - sql_log_bin and wsrep_on to avoid Galera replicating below - TRUNCATE TABLE clauses. This will allow user to set different - time zones to nodes in Galera cluster. */ - printf("set @prep1=if((select count(*) from information_schema.global_variables where variable_name='wsrep_on' and variable_value='ON'), 'SET SESSION WSREP_ON=OFF', 'do 0');\n" + /* We turn off session wsrep if we cannot replicate using galera. + Disable sql_log_bin as the name implies. */ + printf("execute immediate if(@wsrep_is_on, 'SET @save_wsrep_on=@@WSREP_ON, WSREP_ON=OFF', 'do 0');\n" + "SET @save_sql_log_bin=@@SQL_LOG_BIN;\n" "SET SESSION SQL_LOG_BIN=0;\n" - "execute immediate @prep1;\n" - "%s%s", trunc_tables, lock_tables); + "SET @wsrep_cannot_replicate_tz=0;\n" + "%s%s;\n", trunc_tables, lock_tables); else // Alter time zone tables to InnoDB if wsrep_on is enabled // to allow changes to them to replicate with Galera - printf("\\d |\n" - "IF (select count(*) from information_schema.global_variables where\n" - "variable_name='wsrep_on' and variable_value='ON') = 1 THEN\n" - "ALTER TABLE time_zone ENGINE=InnoDB;\n" - "ALTER TABLE time_zone_name ENGINE=InnoDB;\n" - "ALTER TABLE time_zone_transition ENGINE=InnoDB;\n" - "ALTER TABLE time_zone_transition_type ENGINE=InnoDB;\n" + printf( + "execute immediate if(@wsrep_cannot_replicate_tz, " SAVE_ENGINE("time_zone") ", 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, 'ALTER TABLE time_zone ENGINE=InnoDB', 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " SAVE_ENGINE("time_zone_name") ", 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, 'ALTER TABLE time_zone_name ENGINE=InnoDB', 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " SAVE_ENGINE("time_zone_transition") ", 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, 'ALTER TABLE time_zone_transition ENGINE=InnoDB', 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " SAVE_ENGINE("time_zone_transition_type") ", 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, 'ALTER TABLE time_zone_transition_type ENGINE=InnoDB', 'do 0');\n" "%s" - "START TRANSACTION;\n" - "ELSE\n%s" - "END IF|\n" - "\\d ;\n", - trunc_tables, trunc_tables); - // Ideally we'd like to put lock_tables in the ELSE branch however - // "ERROR 1314 (0A000) at line 2: LOCK is not allowed in stored procedures" + "/*M!100602 execute immediate if(@wsrep_cannot_replicate_tz, 'start transaction', '%s')*/;\n" + , trunc_tables, lock_tables); if (argc == 1 && !opt_leap) { @@ -2793,10 +2821,13 @@ main(int argc, char **argv) printf("UNLOCK TABLES;\n" "COMMIT;\n"); - printf("ALTER TABLE time_zone_transition " - "ORDER BY Time_zone_id, Transition_time;\n"); - printf("ALTER TABLE time_zone_transition_type " - "ORDER BY Time_zone_id, Transition_type_id;\n"); + printf( + "execute immediate if(@wsrep_cannot_replicate_tz, 'do 0'," + "'ALTER TABLE time_zone_transition " + "ORDER BY Time_zone_id, Transition_time');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, 'do 0'," + "'ALTER TABLE time_zone_transition_type " + "ORDER BY Time_zone_id, Transition_type_id');\n"); } else { @@ -2821,17 +2852,22 @@ main(int argc, char **argv) free_root(&tz_storage, MYF(0)); } - if(!opt_skip_write_binlog) - // Fall back to Aria - printf("\\d |\n" - "IF (select count(*) from information_schema.global_variables where\n" - "variable_name='wsrep_on' and variable_value='ON') = 1 THEN\n" - "ALTER TABLE time_zone ENGINE=Aria;\n" - "ALTER TABLE time_zone_name ENGINE=Aria;\n" - "ALTER TABLE time_zone_transition ENGINE=Aria, ORDER BY Time_zone_id, Transition_time;\n" - "ALTER TABLE time_zone_transition_type ENGINE=Aria, ORDER BY Time_zone_id, Transition_type_id;\n" - "END IF|\n" - "\\d ;\n"); + if(opt_skip_write_binlog) + printf("SET SESSION SQL_LOG_BIN=@save_sql_log_bin;\n" + "execute immediate if(@wsrep_is_on, 'SET SESSION WSREP_ON=@save_wsrep_on', 'do 0');\n"); + else + // Change back to what it was before + printf( + "execute immediate if(@wsrep_cannot_replicate_tz, " + "concat('ALTER TABLE time_zone ENGINE=', @time_zone_engine), 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " + "concat('ALTER TABLE time_zone_name ENGINE=', @time_zone_name_engine), 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " + "concat('ALTER TABLE time_zone_transition ENGINE=', " + "@time_zone_transition_engine, ', ORDER BY Time_zone_id, Transition_time'), 'do 0');\n" + "execute immediate if(@wsrep_cannot_replicate_tz, " + "concat('ALTER TABLE time_zone_transition_type ENGINE=', " + "@time_zone_transition_type_engine, ', ORDER BY Time_zone_id, Transition_type_id'), 'do 0');\n"); free_allocated_data(); my_end(0); diff --git a/sql/udf_example.c b/sql/udf_example.c index cb3f3ad1c98..14c793ee98a 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -122,13 +122,13 @@ #include <stdlib.h> #include <stdio.h> #include <string.h> -#ifdef __WIN__ +#ifdef _WIN32 typedef unsigned __int64 ulonglong; /* Microsofts 64 bit types */ typedef __int64 longlong; #else typedef unsigned long long ulonglong; typedef long long longlong; -#endif /*__WIN__*/ +#endif /*_WIN32*/ #else #include "mariadb.h" #include <my_sys.h> @@ -700,7 +700,7 @@ longlong udf_sequence(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, ** ****************************************************************************/ -#ifdef __WIN__ +#ifdef _WIN32 #include <winsock2.h> #else #include <sys/socket.h> diff --git a/sql/unireg.cc b/sql/unireg.cc index 904fda10599..7f89f4e78b1 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -119,8 +119,23 @@ static uchar *extra2_write_field_properties(uchar *pos, return pos; } -static uint16 -get_fieldno_by_name(HA_CREATE_INFO *create_info, List<Create_field> &create_fields, +static uchar *extra2_write_index_properties(uchar *pos, const KEY *keyinfo, + uint keys) +{ + *pos++= EXTRA2_INDEX_FLAGS; + pos= extra2_write_len(pos, keys); + for (uint i=0; i < keys; i++) + { + *pos++= keyinfo[i].is_ignored ? + EXTRA2_IGNORED_KEY : + EXTRA2_DEFAULT_INDEX_FLAGS; + } + return pos; +} + +static field_index_t +get_fieldno_by_name(HA_CREATE_INFO *create_info, + List<Create_field> &create_fields, const Lex_ident &field_name) { List_iterator<Create_field> it(create_fields); @@ -128,17 +143,17 @@ get_fieldno_by_name(HA_CREATE_INFO *create_info, List<Create_field> &create_fiel DBUG_ASSERT(field_name); - for (unsigned field_no = 0; (sql_field = it++); ++field_no) + for (field_index_t field_no= 0; (sql_field = it++); ++field_no) { if (field_name.streq(sql_field->field_name)) { - DBUG_ASSERT(field_no <= uint16(~0U)); - return uint16(field_no); + DBUG_ASSERT(field_no < NO_CACHED_FIELD_INDEX); + return field_no; } } DBUG_ASSERT(0); /* Not Reachable */ - return 0; + return NO_CACHED_FIELD_INDEX; } static inline @@ -403,6 +418,14 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, extra2_size+= 1 + extra2_str_size(create_fields.elements); } + /* + To store the ignorability flag for each key. + Here 1 bytes is reserved to store the extra index flags for keys. + Currently only 1 bit is used, rest of the bits can be used in the future + */ + if (keys) + extra2_size+= 1 + extra2_str_size(keys); + for (i= 0; i < keys; i++) if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH) e_unique_hash_extra_parts++; @@ -519,6 +542,10 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, if (has_extra2_field_flags_) pos= extra2_write_field_properties(pos, create_fields); + + if (keys) + pos= extra2_write_index_properties(pos, key_info, keys); + int4store(pos, filepos); // end of the extra2 segment pos+= 4; @@ -843,7 +870,7 @@ static bool pack_header(THD *thd, uchar *forminfo, as auto-update field. */ if (field->real_field_type() == MYSQL_TYPE_TIMESTAMP && - MTYP_TYPENR(field->unireg_check) != Field::NONE && + field->unireg_check != Field::NONE && !time_stamp_pos) time_stamp_pos= (uint) field->offset+ (uint) data_offset + 1; length=field->pack_length; diff --git a/sql/unireg.h b/sql/unireg.h index 7810a4379ee..1eec3585acc 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -173,6 +173,7 @@ enum extra2_frm_value_type { EXTRA2_GIS=2, EXTRA2_APPLICATION_TIME_PERIOD=3, EXTRA2_PERIOD_FOR_SYSTEM_TIME=4, + EXTRA2_INDEX_FLAGS=5, #define EXTRA2_ENGINE_IMPORTANT 128 @@ -186,6 +187,12 @@ enum extra2_field_flags { VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS, }; +enum extra2_index_flags { + EXTRA2_DEFAULT_INDEX_FLAGS, + EXTRA2_IGNORED_KEY +}; + + static inline size_t extra2_read_len(const uchar **extra2, const uchar *end) { size_t length= *(*extra2)++; diff --git a/sql/upgrade_conf_file.cc b/sql/upgrade_conf_file.cc index e41e4dfd857..543df7b9bdf 100644 --- a/sql/upgrade_conf_file.cc +++ b/sql/upgrade_conf_file.cc @@ -42,13 +42,19 @@ static const char *removed_variables[] = "have_partitioning", "innodb_adaptive_flushing_method", "innodb_adaptive_hash_index_partitions", +"innodb_adaptive_max_sleep_delay", "innodb_additional_mem_pool_size", "innodb_api_bk_commit_interval", "innodb_api_disable_rowlock", "innodb_api_enable_binlog", "innodb_api_enable_mdl", "innodb_api_trx_level", +"innodb_background_scrub_data_check_interval", +"innodb_background_scrub_data_compressed", +"innodb_background_scrub_data_interval", +"innodb_background_scrub_data_uncompressed", "innodb_blocking_buffer_pool_restore", +"innodb_buffer_pool_instances", "innodb_buffer_pool_populate", "innodb_buffer_pool_restore_at_startup", "innodb_buffer_pool_shm_checksum", @@ -62,6 +68,8 @@ static const char *removed_variables[] = "innodb_cleaner_lsn_age_factor", "innodb_cleaner_max_flush_time", "innodb_cleaner_max_lru_time", +"innodb_commit_concurrency", +"innodb_concurrency_tickets", "innodb_corrupt_table_action", "innodb_dict_size_limit", "innodb_doublewrite_file", @@ -72,6 +80,7 @@ static const char *removed_variables[] = "innodb_file_format_check", "innodb_file_format_max", "innodb_flush_neighbor_pages", +"innodb_force_load_corrupted", "innodb_foreground_preflush", "innodb_ibuf_accel_rate", "innodb_ibuf_active_contract", @@ -89,12 +98,16 @@ static const char *removed_variables[] = "innodb_log_archive", "innodb_log_block_size", "innodb_log_checksum_algorithm", -"innodb_rollback_segments", +"innodb_log_checksums", +"innodb_log_compressed_pages", +"innodb_log_files_in_group", +"innodb_log_optimize_ddl", "innodb_max_bitmap_file_size", "innodb_max_changed_pages", "innodb_merge_sort_block_size", "innodb_mirrored_log_groups", "innodb_mtflush_threads", +"innodb_page_cleaners", "innodb_persistent_stats_root_page", "innodb_print_lock_wait_timeout_info", "innodb_purge_run_now", @@ -102,15 +115,23 @@ static const char *removed_variables[] = "innodb_read_ahead", "innodb_recovery_stats", "innodb_recovery_update_relay_log", +"innodb_replication_delay", +"innodb_rollback_segments", +"innodb_scrub_log", +"innodb_scrub_log_speed", "innodb_show_locks_held", "innodb_show_verbose_locks", "innodb_stats_auto_update", "innodb_stats_sample_pages", "innodb_stats_update_need_lock", "innodb_support_xa", +"innodb_sync_array_size", +"innodb_thread_concurrency", "innodb_thread_concurrency_timer_based", +"innodb_thread_sleep_delay", "innodb_track_changed_pages", "innodb_track_redo_log_now", +"innodb_undo_logs", "innodb_use_fallocate", "innodb_use_global_flush_log_at_trx_commit", "innodb_use_mtflush", diff --git a/sql/winmain.cc b/sql/winmain.cc new file mode 100644 index 00000000000..7def0aed531 --- /dev/null +++ b/sql/winmain.cc @@ -0,0 +1,372 @@ +/* Copyright (C) 2020 MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA +*/ + +/* + main() function for the server on Windows is implemented here. + The core functionality is implemented elsewhere, in mysqld_main(), and running as + service is done here. + + Main tasks of the service are + + 1. Report current status back to service control manager. Here we're + providing callbacks so code outside of winmain.cc can call it + (via mysqld_set_service_status_callback()) + + 2. React to notification, the only one we care about is the "stop" + notification. we initiate shutdown, when instructed. + + Note that our service might not be too Windows-friendly, as it might take + a while to startup (recovery), and a while to shut down(innodb cleanups). + + Most of the code more of less standard service stuff, taken from Microsoft + docs examples. + + Notable oddity in running services, is that we do not know for sure, + whether we should run as a service or not (there is no --service parameter that + would tell).Heuristics are used, and if the last command line argument is + valid service name, we try to run as service, but fallback to usual process + if this fails. + + As an example, even if mysqld.exe is started with command line like "mysqld.exe --help", + it is entirely possible that mysqld.exe run as service "--help". + + Apart from that, now deprecated and obsolete service registration/removal functionality is + still provided (mysqld.exe --install/--remove) +*/ + +#include <my_global.h> +#include <mysqld.h> +#include <log.h> + +#include <stdio.h> +#include <windows.h> +#include <string> +#include <cassert> + +static SERVICE_STATUS svc_status{SERVICE_WIN32_OWN_PROCESS}; +static SERVICE_STATUS_HANDLE svc_status_handle; +static char *svc_name; + +static char **save_argv; +static int save_argc; + +static int install_service(int argc, char **argv, const char *name); +static int remove_service(const char *name); + +/* + Report service status to SCM. This function is indirectly invoked + by the server to report state transitions. + + 1. from START_PENDING to SERVICE_RUNNING, when we start accepting user connections + 2. from SERVICE_RUNNING to STOP_PENDING, when we start shutdown + 3. from STOP_PENDING to SERVICE_STOPPED, in mysqld_exit() + sometimes also START_PENDING to SERVICE_STOPPED, on startup errors +*/ +static void report_svc_status(DWORD current_state, DWORD exit_code, DWORD wait_hint) +{ + if (!svc_status_handle) + return; + + static DWORD check_point= 1; + if (current_state != (DWORD)-1) + svc_status.dwCurrentState= current_state; + svc_status.dwWaitHint= wait_hint; + + if (exit_code) + { + svc_status.dwWin32ExitCode= ERROR_SERVICE_SPECIFIC_ERROR; + svc_status.dwServiceSpecificExitCode= exit_code; + } + else + { + svc_status.dwWin32ExitCode= 0; + } + + if (current_state == SERVICE_START_PENDING) + svc_status.dwControlsAccepted= 0; + else + svc_status.dwControlsAccepted= SERVICE_ACCEPT_STOP|SERVICE_ACCEPT_SHUTDOWN; + + if ((current_state == SERVICE_RUNNING) || (current_state == SERVICE_STOPPED)) + svc_status.dwCheckPoint= 0; + else + svc_status.dwCheckPoint= check_point++; + + SetServiceStatus(svc_status_handle, &svc_status); +} + +/* Report unexpected errors. */ +static void svc_report_event(const char *svc_name, const char *command) +{ + char buffer[80]; + sprintf_s(buffer, "mariadb service %s, %s failed with %d", + svc_name, command, GetLastError()); + OutputDebugString(buffer); +} + +/* + Service control function. + Reacts to service stop, initiates shutdown. +*/ +static void WINAPI svc_ctrl_handle(DWORD cntrl) +{ + switch (cntrl) + { + case SERVICE_CONTROL_SHUTDOWN: + case SERVICE_CONTROL_STOP: + sql_print_information( + "Windows service \"%s\": received %s", + svc_name, + cntrl == SERVICE_CONTROL_STOP? "SERVICE_CONTROL_STOP": "SERVICE_CONTROL_SHUTDOWN"); + + /* The below will also set the status to STOP_PENDING. */ + mysqld_win_initiate_shutdown(); + break; + + case SERVICE_CONTROL_INTERROGATE: + default: + break; + } +} + +/* Service main routine, mainly runs mysqld_main() */ +static void WINAPI svc_main(DWORD svc_argc, char **svc_argv) +{ + /* Register the handler function for the service */ + char *name= svc_argv[0]; + + svc_status_handle= RegisterServiceCtrlHandler(name, svc_ctrl_handle); + if (!svc_status_handle) + { + svc_report_event(name, "RegisterServiceCtrlHandler"); + return; + } + report_svc_status(SERVICE_START_PENDING, NO_ERROR, 0); + + /* Make server report service status via our callback.*/ + mysqld_set_service_status_callback(report_svc_status); + + /* This would add service name entry to load_defaults.*/ + mysqld_win_set_service_name(name); + + /* + Do not pass the service name parameter (last on the command line) + to mysqld_main(), it is unaware of it. + */ + save_argv[save_argc - 1]= 0; + mysqld_main(save_argc - 1, save_argv); +} + +/* + This start the service. Sometimes it will fail, because + currently we do not know for sure whether we run as service or not. + If this fails, the fallback is to run as normal process. +*/ +static int run_as_service(char *name) +{ + SERVICE_TABLE_ENTRY stb[]= {{name, svc_main}, {0, 0}}; + if (!StartServiceCtrlDispatcher(stb)) + { + assert(GetLastError() == ERROR_FAILED_SERVICE_CONTROLLER_CONNECT); + return -1; + } + return 0; +} + +/* + Check for valid existing service name. + Part of our guesswork, whether we run as service or not. +*/ +static bool is_existing_service(const char *name) +{ + if (strchr(name, '\\') || strchr(name, '/')) + { + /* Invalid characters in service name */ + return false; + } + + SC_HANDLE sc_service= 0, scm= 0; + bool ret= ((scm= OpenSCManager(0, 0, SC_MANAGER_ENUMERATE_SERVICE)) != 0) && + ((sc_service= OpenService(scm, name, SERVICE_QUERY_STATUS)) != 0); + + if (sc_service) + CloseServiceHandle(sc_service); + if (scm) + CloseServiceHandle(scm); + + return ret; +} + +/* + If service name is not given to --install/--remove + it is assumed to be "MySQL" (traditional handling) +*/ +static const char *get_svc_name(const char *arg) +{ + return arg ? arg : "MySQL"; +} + +/* + Main function on Windows. + Runs mysqld as normal process, or as a service. + + Plus, the obsolete functionality to register/remove services. +*/ +__declspec(dllexport) int mysqld_win_main(int argc, char **argv) +{ + save_argv= argv; + save_argc= argc; + + /* + If no special arguments are given, service name is nor present + run as normal program. + */ + if (argc == 1) + return mysqld_main(argc, argv); + + auto cmd= argv[1]; + + /* Handle install/remove */ + if (!strcmp(cmd, "--install") || !strcmp(cmd, "--install-manual")) + return install_service(argc, argv, get_svc_name(argv[2])); + + if (!strcmp(cmd, "--remove")) + return remove_service(get_svc_name(argv[2])); + + /* Try to run as service, and fallback to mysqld_main(), if this fails */ + svc_name= argv[argc - 1]; + if (is_existing_service(svc_name) && !run_as_service(svc_name)) + return 0; + svc_name= 0; + + /* Run as normal program.*/ + return mysqld_main(argc, argv); +} + + +/* + Register/remove services functionality. + This is kept for backward compatibility only, and is + superseeded by much more versatile mysql_install_db.exe + + "mysqld --remove=svc" has no advantage over + OS own "sc delete svc" +*/ +static void ATTRIBUTE_NORETURN die(const char *func, const char *name) +{ + DWORD err= GetLastError(); + fprintf(stderr, "FATAL ERROR : %s failed (%lu)\n", func, err); + switch (err) + { + case ERROR_SERVICE_EXISTS: + fprintf(stderr, "Service %s already exists.\n", name); + break; + case ERROR_SERVICE_DOES_NOT_EXIST: + fprintf(stderr, "Service %s does not exist.\n", name); + break; + case ERROR_ACCESS_DENIED: + fprintf(stderr, "Access is denied. " + "Make sure to run as elevated admin user.\n"); + break; + case ERROR_INVALID_NAME: + fprintf(stderr, "Invalid service name '%s'\n", name); + default: + break; + } + exit(1); +} + +static inline std::string quoted(const char *src) +{ + std::string s; + s.append("\"").append(src).append("\""); + return s; +} + +static int install_service(int argc, char **argv, const char *name) +{ + std::string cmdline; + + char path[MAX_PATH]; + auto nSize = GetModuleFileName(0, path, sizeof(path)); + + if (nSize == (DWORD) sizeof(path) && GetLastError() == ERROR_INSUFFICIENT_BUFFER) + die("GetModuleName", name); + + cmdline.append(quoted(path)); + + const char *user= 0; + // mysqld --install[-manual] name ...[--local-service] + if (argc > 2) + { + for (int i= 3; argv[i]; i++) + { + if (!strcmp(argv[i], "--local-service")) + user= "NT AUTHORITY\\LocalService"; + else + { + cmdline.append(" ").append(quoted(argv[i])); + } + } + } + cmdline.append(" ").append(quoted(name)); + + DWORD start_type; + if (!strcmp(argv[1], "--install-manual")) + start_type= SERVICE_DEMAND_START; + else + start_type= SERVICE_AUTO_START; + + SC_HANDLE scm, sc_service; + if (!(scm= OpenSCManager(0, 0, SC_MANAGER_CREATE_SERVICE))) + die("OpenSCManager", name); + + if (!(sc_service= CreateService( + scm, name, name, SERVICE_ALL_ACCESS, + SERVICE_WIN32_OWN_PROCESS, start_type, SERVICE_ERROR_NORMAL, + cmdline.c_str(), 0, 0, 0, user, 0))) + die("CreateService", name); + + char description[]= "MariaDB database server"; + SERVICE_DESCRIPTION sd= {description}; + ChangeServiceConfig2(sc_service, SERVICE_CONFIG_DESCRIPTION, &sd); + + CloseServiceHandle(sc_service); + CloseServiceHandle(scm); + + printf("Service '%s' successfully installed.\n", name); + return 0; +} + +static int remove_service(const char *name) +{ + SC_HANDLE scm, sc_service; + + if (!(scm= OpenSCManager(0, 0, SC_MANAGER_CREATE_SERVICE))) + die("OpenSCManager", name); + + if (!(sc_service= OpenService(scm, name, DELETE))) + die("OpenService", name); + + if (!DeleteService(sc_service)) + die("DeleteService", name); + + CloseServiceHandle(sc_service); + CloseServiceHandle(scm); + + printf("Service '%s' successfully deleted.\n", name); + return 0; +} diff --git a/sql/winservice.c b/sql/winservice.c index d7cfd2f7584..a11087e5cd5 100644 --- a/sql/winservice.c +++ b/sql/winservice.c @@ -40,7 +40,7 @@ void get_file_version(const char *path, int *major, int *minor, int *patch) *major= *minor= *patch= 0; size= GetFileVersionInfoSize(path, &version_handle); - if (size == 0) + if (size == 0) return; ver= (char *)malloc(size); if(!GetFileVersionInfo(path, version_handle, size, ver)) @@ -65,7 +65,7 @@ void normalize_path(char *path, size_t size) char *p; strcpy_s(buf, MAX_PATH, path+1); p= strchr(buf, '"'); - if (p) + if (p) *p=0; } else @@ -136,15 +136,15 @@ static void get_datadir_from_ini(const char *ini, char *service_name, char *data /* Retrieve some properties from windows mysqld service binary path. - We're interested in ini file location and datadir, and also in version of + We're interested in ini file location and datadir, and also in version of the data. We tolerate missing mysqld.exe. - Note that this function carefully avoids using mysql libraries (e.g dbug), + Note that this function carefully avoids using mysql libraries (e.g dbug), since it is used in unusual environments (windows installer, MFC), where we - do not have much control over how threads are created and destroyed, so we + do not have much control over how threads are created and destroyed, so we cannot assume MySQL thread initilization here. */ -int get_mysql_service_properties(const wchar_t *bin_path, +int get_mysql_service_properties(const wchar_t *bin_path, mysqld_service_properties *props) { int numargs; @@ -193,9 +193,10 @@ int get_mysql_service_properties(const wchar_t *bin_path, if(wcsstr(mysqld_path, L".exe") == NULL) wcscat(mysqld_path, L".exe"); - if(wcsicmp(file_part, L"mysqld.exe") != 0 && + if(wcsicmp(file_part, L"mysqld.exe") != 0 && wcsicmp(file_part, L"mysqld-debug.exe") != 0 && - wcsicmp(file_part, L"mysqld-nt.exe") != 0) + wcsicmp(file_part, L"mysqld-nt.exe") != 0 && + wcsicmp(file_part, L"mariadbd.exe") != 0) { /* The service executable is not mysqld. */ goto end; @@ -205,7 +206,7 @@ int get_mysql_service_properties(const wchar_t *bin_path, /* If mysqld.exe exists, try to get its version from executable */ if (GetFileAttributes(props->mysqld_exe) != INVALID_FILE_ATTRIBUTES) { - get_file_version(props->mysqld_exe, &props->version_major, + get_file_version(props->mysqld_exe, &props->version_major, &props->version_minor, &props->version_patch); } @@ -235,7 +236,7 @@ int get_mysql_service_properties(const wchar_t *bin_path, { /* Hard, although a rare case, we're guessing datadir and defaults-file. - On Windows, defaults-file is traditionally install-root\my.ini + On Windows, defaults-file is traditionally install-root\my.ini and datadir is install-root\data */ char install_root[MAX_PATH]; @@ -297,7 +298,7 @@ int get_mysql_service_properties(const wchar_t *bin_path, } /* - If version could not be determined so far, try mysql_upgrade_info in + If version could not be determined so far, try mysql_upgrade_info in database directory. */ if(props->version_major == 0) diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc index 4005de22e72..90ede81a06a 100644 --- a/sql/wsrep_applier.cc +++ b/sql/wsrep_applier.cc @@ -39,7 +39,7 @@ static Log_event* wsrep_read_log_event( char *head= (*arg_buf); uint data_len= uint4korr(head + EVENT_LEN_OFFSET); - char *buf= (*arg_buf); + uchar *buf= (uchar*) (*arg_buf); const char *error= 0; Log_event *res= 0; diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc index 5162f13458b..7ab5ba0c1e2 100644 --- a/sql/wsrep_client_service.cc +++ b/sql/wsrep_client_service.cc @@ -70,8 +70,6 @@ bool Wsrep_client_service::interrupted( wsrep::unique_lock<wsrep::mutex>& lock WSREP_UNUSED) const { DBUG_ASSERT(m_thd == current_thd); - /* Underlying mutex in lock object points to THD::LOCK_thd_data, which - protects m_thd->wsrep_trx() and protects us from thd delete. */ mysql_mutex_assert_owner(static_cast<mysql_mutex_t*>(lock.mutex()->native())); bool ret= (m_thd->killed != NOT_KILLED); if (ret) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 3205b2dfa21..7d684cef35d 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -24,6 +24,7 @@ #include <sql_class.h> #include <sql_parse.h> #include <sql_base.h> /* find_temporary_table() */ +#include <sql_statistics.h> /* is_stat_table() */ #include "slave.h" #include "rpl_mi.h" #include "sql_repl.h" @@ -98,7 +99,8 @@ my_bool wsrep_restart_slave; // Should mysql slave thread be // restarted, when node joins back? my_bool wsrep_desync; // De(re)synchronize the node from the // cluster -my_bool wsrep_strict_ddl; // Reject DDL to +ulonglong wsrep_mode; +my_bool wsrep_strict_ddl; // Deprecated: Reject DDL to // effected tables not // supporting Galera replication bool wsrep_service_started; // If Galera was initialized @@ -1093,7 +1095,6 @@ void wsrep_recover() { WSREP_INFO("Recovered position: %s", oss.str().c_str()); } - } @@ -1113,7 +1114,7 @@ void wsrep_stop_replication(THD *thd) */ if (thd && !thd->wsrep_applier) trans_rollback(thd); wsrep_close_client_connections(TRUE, thd); - + /* wait until appliers have stopped */ wsrep_wait_appliers_close(thd); @@ -1198,6 +1199,327 @@ bool wsrep_start_replication(const char *wsrep_cluster_address) return true; } +bool wsrep_check_mode (enum_wsrep_mode mask) +{ + return wsrep_mode & mask; +} + +//seconds after which the limit warnings suppression will be activated +#define WSREP_WARNING_ACTIVATION_TIMEOUT 5*60 +//number of limit warnings after which the suppression will be activated +#define WSREP_WARNING_ACTIVATION_THRESHOLD 10 + +enum wsrep_warning_type { + WSREP_DISABLED = 0, + WSREP_REQUIRE_PRIMARY_KEY= 1, + WSREP_REQUIRE_INNODB= 2, + WSREP_REQUIRE_MAX=3, +}; + +static ulonglong wsrep_warning_start_time=0; +static bool wsrep_warning_active[WSREP_REQUIRE_MAX+1]; +static ulonglong wsrep_warning_count[WSREP_REQUIRE_MAX+1]; +static ulonglong wsrep_total_warnings_count=0; + +/** + Auxiliary function to reset the limit of wsrep warnings. + This is done without mutex protection, but this should be good + enough as it doesn't matter if we loose a couple of suppressed + messages or if this is called multiple times. +*/ + +static void wsrep_reset_warnings(ulonglong now) +{ + uint i; + + wsrep_warning_start_time= now; + wsrep_total_warnings_count= 0; + + for (i= 0 ; i < WSREP_REQUIRE_MAX ; i++) + { + wsrep_warning_active[i]= false; + wsrep_warning_count[i]= 0; + } +} + +static const char* wsrep_warning_name(const enum wsrep_warning_type type) +{ + switch(type) + { + case WSREP_REQUIRE_PRIMARY_KEY: + return "WSREP_REQUIRE_PRIMARY_KEY"; break; + case WSREP_REQUIRE_INNODB: + return "WSREP_REQUIRE_INNODB"; break; + default: assert(0); return " "; break; // for compiler + } +} +/** + Auxiliary function to check if the warning statements should be + thrown or suppressed. + + Logic is: + - If we get more than WSREP_WARNING_ACTIVATION_THRESHOLD errors + of one type, that type of errors will be suppressed for + WSREP_WARNING_ACTIVATION_TIMEOUT. + - When the time limit has been reached, all suppressions are reset. + + This means that if one gets many different types of errors, some of them + may be reset less than WSREP_WARNING_ACTIVATION_TIMEOUT. However at + least one error is disabled for this time. + + SYNOPSIS: + @params + warning_type - The type of warning. + + RETURN: + 0 0k to log + 1 Message suppressed +*/ + +static bool wsrep_protect_against_warning_flood( + enum wsrep_warning_type warning_type) +{ + ulonglong count; + ulonglong now= my_interval_timer()/1000000000ULL; + + count= ++wsrep_warning_count[warning_type]; + wsrep_total_warnings_count++; + + /* + INITIALIZING: + If this is the first time this function is called with log warning + enabled, the monitoring the warnings should start. + */ + if (wsrep_warning_start_time == 0) + { + wsrep_reset_warnings(now); + return false; + } + + /* + The following is true if we got too many errors or if the error was + already suppressed + */ + if (count >= WSREP_WARNING_ACTIVATION_THRESHOLD) + { + ulonglong diff_time= (now - wsrep_warning_start_time); + + if (!wsrep_warning_active[warning_type]) + { + /* + ACTIVATION: + We got WSREP_WARNING_ACTIVATION_THRESHOLD warnings in + less than WSREP_WARNING_ACTIVATION_TIMEOUT we activate the + suppression. + */ + if (diff_time <= WSREP_WARNING_ACTIVATION_TIMEOUT) + { + wsrep_warning_active[warning_type]= true; + WSREP_INFO("Suppressing warnings of type '%s' for up to %d seconds because of flooding", + wsrep_warning_name(warning_type), + WSREP_WARNING_ACTIVATION_TIMEOUT); + } + else + { + /* + There is no flooding till now, therefore we restart the monitoring + */ + wsrep_reset_warnings(now); + } + } + else + { + /* This type of warnings was suppressed */ + if (diff_time > WSREP_WARNING_ACTIVATION_TIMEOUT) + { + ulonglong save_count= wsrep_total_warnings_count; + /* Print a suppression note and remove the suppression */ + wsrep_reset_warnings(now); + WSREP_INFO("Suppressed %lu unsafe warnings during " + "the last %d seconds", + save_count, (int) diff_time); + } + } + } + + return wsrep_warning_active[warning_type]; +} + +/** + Auxiliary function to push warning to client and to the error log +*/ +static void wsrep_push_warning(THD *thd, + enum wsrep_warning_type type, + const handlerton *hton, + const TABLE_LIST *tables) +{ + switch(type) + { + case WSREP_REQUIRE_PRIMARY_KEY: + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "WSREP: wsrep_mode = REQUIRED_PRIMARY_KEY enabled. " + "Table '%s'.'%s' should have PRIMARY KEY defined.", + tables->db.str, tables->table_name.str); + if (global_system_variables.log_warnings > 1 && + !wsrep_protect_against_warning_flood(type)) + WSREP_WARN("wsrep_mode = REQUIRED_PRIMARY_KEY enabled. " + "Table '%s'.'%s' should have PRIMARY KEY defined", + tables->db.str, tables->table_name.str); + break; + case WSREP_REQUIRE_INNODB: + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "WSREP: wsrep_mode = STRICT_REPLICATION enabled. " + "Storage engine %s for table '%s'.'%s' is " + "not supported in Galera", + ha_resolve_storage_engine_name(hton), + tables->db.str, tables->table_name.str); + if (global_system_variables.log_warnings > 1 && + !wsrep_protect_against_warning_flood(type)) + WSREP_WARN("wsrep_mode = STRICT_REPLICATION enabled. " + "Storage engine %s for table '%s'.'%s' is " + "not supported in Galera", + ha_resolve_storage_engine_name(hton), + tables->db.str, tables->table_name.str); + break; + + default: assert(0); break; + } +} + +bool wsrep_check_mode_after_open_table (THD *thd, + const handlerton *hton, + TABLE_LIST *tables) +{ + enum_sql_command sql_command= thd->lex->sql_command; + bool is_dml_stmt= thd->get_command() != COM_STMT_PREPARE && + (sql_command == SQLCOM_INSERT || + sql_command == SQLCOM_INSERT_SELECT || + sql_command == SQLCOM_REPLACE || + sql_command == SQLCOM_REPLACE_SELECT || + sql_command == SQLCOM_UPDATE || + sql_command == SQLCOM_UPDATE_MULTI || + sql_command == SQLCOM_LOAD || + sql_command == SQLCOM_DELETE); + + if (!is_dml_stmt) + return true; + + const legacy_db_type db_type= hton->db_type; + bool replicate= ((db_type == DB_TYPE_MYISAM && wsrep_check_mode(WSREP_MODE_REPLICATE_MYISAM)) || + (db_type == DB_TYPE_ARIA && wsrep_check_mode(WSREP_MODE_REPLICATE_ARIA))); + TABLE *tbl= tables->table; + + if (replicate) + { + /* It is not recommended to replicate MyISAM as it lacks rollback feature + but if user demands then actions are replicated using TOI. + Following code will kick-start the TOI but this has to be done only once + per statement. + Note: kick-start will take-care of creating isolation key for all tables + involved in the list (provided all of them are MYISAM or Aria tables). */ + if (!is_stat_table(&tables->db, &tables->alias)) + { + if (tbl->s->primary_key == MAX_KEY && + wsrep_check_mode(WSREP_MODE_REQUIRED_PRIMARY_KEY)) + { + /* Other replicated table doesn't have explicit primary-key defined. */ + wsrep_push_warning(thd, WSREP_REQUIRE_PRIMARY_KEY, hton, tables); + } + + wsrep_before_rollback(thd, true); + wsrep_after_rollback(thd, true); + wsrep_after_statement(thd); + WSREP_TO_ISOLATION_BEGIN(NULL, NULL, (tables)); + } + } else if (db_type != DB_TYPE_UNKNOWN && + db_type != DB_TYPE_PERFORMANCE_SCHEMA) + { + bool is_system_db= (tbl && + ((strcmp(tbl->s->db.str, "mysql") == 0) || + (strcmp(tbl->s->db.str, "information_schema") == 0))); + + if (!is_system_db && + !is_temporary_table(tables)) + { + + if (db_type != DB_TYPE_INNODB && + wsrep_check_mode(WSREP_MODE_STRICT_REPLICATION)) + { + /* Table is not an InnoDB table and strict replication is requested*/ + wsrep_push_warning(thd, WSREP_REQUIRE_INNODB, hton, tables); + } + + if (tbl->s->primary_key == MAX_KEY && + db_type == DB_TYPE_INNODB && + wsrep_check_mode(WSREP_MODE_REQUIRED_PRIMARY_KEY)) + { + /* InnoDB table doesn't have explicit primary-key defined. */ + wsrep_push_warning(thd, WSREP_REQUIRE_PRIMARY_KEY, hton, tables); + } + + if (db_type != DB_TYPE_INNODB && + thd->variables.sql_log_bin == 1 && + wsrep_check_mode(WSREP_MODE_DISALLOW_LOCAL_GTID)) + { + /* Table is not an InnoDB table and local GTIDs are disallowed */ + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "You can't execute statements that would generate local " + "GTIDs when wsrep_mode = DISALLOW_LOCAL_GTID is set. " + "Try disabling binary logging with SET sql_log_bin=0 " + "to execute this statement."); + goto wsrep_error_label; + } + } + } + + return true; + +wsrep_error_label: + return false; +} + +bool wsrep_check_mode_before_cmd_execute (THD *thd) +{ + bool ret= true; + if (wsrep_check_mode(WSREP_MODE_BINLOG_ROW_FORMAT_ONLY) && + !thd->is_current_stmt_binlog_format_row() && is_update_query(thd->lex->sql_command)) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "WSREP: wsrep_mode = BINLOG_ROW_FORMAT_ONLY enabled. Only ROW binlog format is supported."); + ret= false; + } + if (wsrep_check_mode(WSREP_MODE_REQUIRED_PRIMARY_KEY) && + thd->lex->sql_command == SQLCOM_CREATE_TABLE) + { + Key *key; + List_iterator<Key> key_iterator(thd->lex->alter_info.key_list); + bool primary_key_found= false; + while ((key= key_iterator++)) + { + if (key->type == Key::PRIMARY) + { + primary_key_found= true; + break; + } + } + if (!primary_key_found) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "WSREP: wsrep_mode = REQUIRED_PRIMARY_KEY enabled. Table should have PRIMARY KEY defined."); + ret= false; + } + } + return ret; +} + bool wsrep_must_sync_wait (THD* thd, uint mask) { bool ret= 0; @@ -1910,11 +2232,14 @@ create_view_query(THD *thd, uchar** buf, size_t* buf_len) LEX_CSTRING *name; int i; + buff.append('('); for (i= 0; (name= names++); i++) { - buff.append(i ? ", " : "("); append_identifier(thd, &buff, name); + buff.append(", ", 2); } + if (i) + buff.length(buff.length()-2); buff.append(')'); } buff.append(STRING_WITH_LEN(" AS ")); @@ -1952,25 +2277,25 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len) if (found_temp_table) { - buff.append("DROP TABLE "); + buff.append(STRING_WITH_LEN("DROP TABLE ")); if (lex->check_exists) - buff.append("IF EXISTS "); + buff.append(STRING_WITH_LEN("IF EXISTS ")); for (TABLE_LIST* table= first_table; table; table= table->next_global) { if (!thd->find_temporary_table(table->db.str, table->table_name.str)) { append_identifier(thd, &buff, table->db.str, table->db.length); - buff.append("."); + buff.append('.'); append_identifier(thd, &buff, table->table_name.str, table->table_name.length); - buff.append(","); + buff.append(','); } } /* Chop the last comma */ buff.chop(); - buff.append(" /* generated by wsrep */"); + buff.append(STRING_WITH_LEN(" /* generated by wsrep */")); WSREP_DEBUG("Rewrote '%s' as '%s'", thd->query(), buff.ptr()); @@ -1994,43 +2319,50 @@ bool wsrep_should_replicate_ddl_iterate(THD* thd, const TABLE_LIST* table_list) for (const TABLE_LIST* it= table_list; it; it= it->next_global) { if (it->table && - !wsrep_should_replicate_ddl(thd, it->table->s->db_type()->db_type)) + !wsrep_should_replicate_ddl(thd, it->table->s->db_type())) return false; } } return true; } -bool wsrep_should_replicate_ddl(THD* thd, - const enum legacy_db_type db_type) +bool wsrep_should_replicate_ddl(THD* thd, const handlerton *hton) { - if (!wsrep_strict_ddl) + if (!wsrep_check_mode(WSREP_MODE_STRICT_REPLICATION)) return true; - switch (db_type) + if (!hton) + return true; + + switch (hton->db_type) { case DB_TYPE_INNODB: return true; break; case DB_TYPE_MYISAM: - if (wsrep_replicate_myisam) + if (wsrep_check_mode(WSREP_MODE_REPLICATE_MYISAM)) return true; else WSREP_DEBUG("wsrep OSU failed for %s", wsrep_thd_query(thd)); break; case DB_TYPE_ARIA: - /* if (wsrep_replicate_aria) */ - /* fallthrough */ + if (wsrep_check_mode(WSREP_MODE_REPLICATE_ARIA)) + return true; + else + WSREP_DEBUG("wsrep OSU failed for %s", wsrep_thd_query(thd)); + break; default: WSREP_DEBUG("wsrep OSU failed for %s", wsrep_thd_query(thd)); break; } - /* STRICT, treat as error */ + /* wsrep_mode = STRICT_REPLICATION, treat as error */ my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA, - "WSREP: wsrep_strict_ddl=true and storage engine does not support Galera replication."); + ER_ILLEGAL_HA, + "WSREP: wsrep_mode = STRICT_REPLICATION enabled. " + "Storage engine %s not supported.", + ha_resolve_storage_engine_name(hton)); return false; } /* @@ -2061,7 +2393,7 @@ bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table, { return false; } - if (!wsrep_should_replicate_ddl(thd, create_info->db_type->db_type)) + if (!wsrep_should_replicate_ddl(thd, create_info->db_type)) { return false; } @@ -2137,23 +2469,16 @@ bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table, return true; break; case SQLCOM_ALTER_TABLE: - { if (create_info) { - enum legacy_db_type db_type; + const handlerton *hton= create_info->db_type; - if (create_info->db_type) - db_type= create_info->db_type->db_type; - else - { - const handlerton *hton= ha_default_handlerton(thd); - db_type= hton->db_type; - } - if (!wsrep_should_replicate_ddl(thd, db_type)) + if (!hton) + hton= ha_default_handlerton(thd); + if (!wsrep_should_replicate_ddl(thd, hton)) return false; } - } - /* fallthrough */ + /* fallthrough */ default: if (table && !thd->find_temporary_table(db, table)) { @@ -2233,11 +2558,6 @@ static int wsrep_TOI_event_buf(THD* thd, uchar** buf, size_t* buf_len) case SQLCOM_DROP_TABLE: err= wsrep_drop_table_query(thd, buf, buf_len); break; - case SQLCOM_KILL: - WSREP_DEBUG("KILL as TOI: %s", thd->query()); - err= wsrep_to_buf_helper(thd, thd->query(), thd->query_length(), - buf, buf_len); - break; case SQLCOM_CREATE_ROLE: if (sp_process_definer(thd)) { @@ -2463,6 +2783,23 @@ static int wsrep_RSU_begin(THD *thd, const char *db_, const char *table_) { WSREP_DEBUG("RSU BEGIN: %lld, : %s", wsrep_thd_trx_seqno(thd), wsrep_thd_query(thd)); + + if (thd->variables.wsrep_OSU_method == WSREP_OSU_RSU && + thd->variables.sql_log_bin == 1 && + wsrep_check_mode(WSREP_MODE_DISALLOW_LOCAL_GTID)) + { + /* wsrep_mode = WSREP_MODE_DISALLOW_LOCAL_GTID, treat as error */ + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_OPTION_PREVENTS_STATEMENT, + "You can't execute statements that would generate local " + "GTIDs when wsrep_mode = DISALLOW_LOCAL_GTID is set. " + "Try disabling binary logging with SET sql_log_bin=0 " + "to execute this statement."); + + return -1; + } + if (thd->wsrep_cs().begin_rsu(5000)) { WSREP_WARN("RSU begin failed"); @@ -2485,6 +2822,17 @@ static void wsrep_RSU_end(THD *thd) thd->variables.wsrep_on= 1; } +static inline bool is_replaying_connection(THD *thd) +{ + bool ret; + + mysql_mutex_lock(&thd->LOCK_thd_data); + ret= (thd->wsrep_trx().state() == wsrep::transaction::s_replaying) ? true : false; + mysql_mutex_unlock(&thd->LOCK_thd_data); + + return ret; +} + int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, const TABLE_LIST* table_list, const Alter_info *alter_info, @@ -2493,9 +2841,16 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, { /* No isolation for applier or replaying threads. - */ + */ if (!wsrep_thd_is_local(thd)) + { + if (wsrep_OSU_method_get(thd) == WSREP_OSU_TOI) + WSREP_DEBUG("%s TOI Begin: %s", + is_replaying_connection(thd) ? "Replay" : "Apply", + wsrep_thd_query(thd)); + return 0; + } if (thd->wsrep_parallel_slave_wait_for_prior_commit()) { @@ -2504,6 +2859,7 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, } int ret= 0; + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_trx().state() == wsrep::transaction::s_must_abort) @@ -2590,9 +2946,6 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, void wsrep_to_isolation_end(THD *thd) { - DBUG_ASSERT(wsrep_thd_is_local_toi(thd) || - wsrep_thd_is_in_rsu(thd)); - if (wsrep_thd_is_local_toi(thd)) { thd->variables.lock_wait_timeout= thd->variables.saved_lock_wait_timeout; @@ -2601,13 +2954,24 @@ void wsrep_to_isolation_end(THD *thd) } else if (wsrep_thd_is_in_rsu(thd)) { + thd->variables.lock_wait_timeout= thd->variables.saved_lock_wait_timeout; DBUG_ASSERT(wsrep_OSU_method_get(thd) == WSREP_OSU_RSU); wsrep_RSU_end(thd); } else { - DBUG_ASSERT(0); + /* Applier or replaying threads just output TO END */ + if (wsrep_debug) + { + wsrep::client_state& client_state(thd->wsrep_cs()); + WSREP_DEBUG("%s TO END: %lld: %s", + is_replaying_connection(thd) ? "Replay" : "Apply", + client_state.toi_meta().seqno().get(), + wsrep_thd_query(thd)); + } + return; } + if (wsrep_emulate_bin_log) wsrep_thd_binlog_trx_reset(thd); } @@ -2657,11 +3021,7 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, request_thd, granted_thd); ticket->wsrep_report(wsrep_debug); - /* Here we will call wsrep_abort_transaction so we should hold - THD::LOCK_thd_data to protect victim from concurrent usage - and THD::LOCK_thd_kill to protect from disconnect or delete. */ - wsrep_thd_LOCK(granted_thd); - + mysql_mutex_lock(&granted_thd->LOCK_thd_data); if (wsrep_thd_is_toi(granted_thd) || wsrep_thd_is_applying(granted_thd)) { @@ -2669,22 +3029,21 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for SR in aborting state"); ticket->wsrep_report(wsrep_debug); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); } else if (wsrep_thd_is_SR(granted_thd) && !wsrep_thd_is_SR(request_thd)) { - WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR", + WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR", schema, schema_len, request_thd, granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { WSREP_MDL_LOG(INFO, "MDL BF-BF conflict", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); unireg_abort(1); } } @@ -2693,16 +3052,15 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for FLUSH"); ticket->wsrep_report(wsrep_debug); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); } else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE) { WSREP_DEBUG("DROP caused BF abort, conf %s", wsrep_thd_transaction_state_str(granted_thd)); ticket->wsrep_report(wsrep_debug); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { @@ -2711,9 +3069,8 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, ticket->wsrep_report(wsrep_debug); if (granted_thd->wsrep_trx().active()) { - wsrep_abort_thd(request_thd, granted_thd, true); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + wsrep_abort_thd(request_thd, granted_thd, 1); } else { @@ -2721,11 +3078,10 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, Granted_thd is likely executing with wsrep_on=0. If the requesting thd is BF, BF abort and wait. */ + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); if (wsrep_thd_is_BF(request_thd, FALSE)) { ha_abort_transaction(request_thd, granted_thd, TRUE); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { @@ -2747,7 +3103,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, static bool abort_replicated(THD *thd) { bool ret_code= false; - wsrep_thd_LOCK(thd); if (thd->wsrep_trx().state() == wsrep::transaction::s_committing) { WSREP_DEBUG("aborting replicated trx: %llu", (ulonglong)(thd->real_id)); @@ -2755,9 +3110,6 @@ static bool abort_replicated(THD *thd) (void)wsrep_abort_thd(thd, thd, TRUE); ret_code= true; } - else - wsrep_thd_UNLOCK(thd); - return ret_code; } @@ -2767,17 +3119,6 @@ static inline bool is_client_connection(THD *thd) return (thd->wsrep_client_thread && thd->variables.wsrep_on); } -static inline bool is_replaying_connection(THD *thd) -{ - bool ret; - - mysql_mutex_lock(&thd->LOCK_thd_data); - ret= (thd->wsrep_trx().state() == wsrep::transaction::s_replaying) ? true : false; - mysql_mutex_unlock(&thd->LOCK_thd_data); - - return ret; -} - static inline bool is_committing_connection(THD *thd) { bool ret; @@ -2795,10 +3136,8 @@ static my_bool have_client_connections(THD *thd, void*) (longlong) thd->thread_id)); if (is_client_connection(thd) && thd->killed == KILL_CONNECTION) { - WSREP_DEBUG("Informing thread %lld that it's time to die", - thd->thread_id); (void)abort_replicated(thd); - return true; + return 1; } return 0; } @@ -2835,8 +3174,6 @@ static my_bool kill_all_threads(THD *thd, THD *caller_thd) { DBUG_PRINT("quit", ("Informing thread %lld that it's time to die", (longlong) thd->thread_id)); - WSREP_DEBUG("Informing thread %lld that it's time to die", - thd->thread_id); /* We skip slave threads & scheduler on this first loop through. */ if (is_client_connection(thd) && thd != caller_thd) { @@ -2874,7 +3211,7 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD* except_caller_thd) { /* Clear thread cache */ thread_cache.final_flush(); - + /* First signal all threads that it's time to die */ diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 2061802afbd..6f9a2d127b4 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -88,9 +88,10 @@ extern ulong wsrep_running_applier_threads; extern ulong wsrep_running_rollbacker_threads; extern bool wsrep_new_cluster; extern bool wsrep_gtid_mode; -extern my_bool wsrep_strict_ddl; extern uint32 wsrep_gtid_domain_id; extern std::atomic <bool > wsrep_thread_create_failed; +extern ulonglong wsrep_mode; +extern my_bool wsrep_strict_ddl; enum enum_wsrep_reject_types { WSREP_REJECT_NONE, /* nothing rejected */ @@ -122,6 +123,16 @@ enum enum_wsrep_ignore_apply_error { WSREP_IGNORE_ERRORS_MAX= 0x7 }; +/* wsrep_mode features */ +enum enum_wsrep_mode { + WSREP_MODE_STRICT_REPLICATION= (1ULL << 0), + WSREP_MODE_BINLOG_ROW_FORMAT_ONLY= (1ULL << 1), + WSREP_MODE_REQUIRED_PRIMARY_KEY= (1ULL << 2), + WSREP_MODE_REPLICATE_MYISAM= (1ULL << 3), + WSREP_MODE_REPLICATE_ARIA= (1ULL << 4), + WSREP_MODE_DISALLOW_LOCAL_GTID= (1ULL << 5) +}; + // Streaming Replication #define WSREP_FRAG_BYTES 0 #define WSREP_FRAG_ROWS 1 @@ -198,6 +209,10 @@ extern void wsrep_close_applier_threads(int count); extern void wsrep_stop_replication(THD *thd); extern bool wsrep_start_replication(const char *wsrep_cluster_address); extern void wsrep_shutdown_replication(); +extern bool wsrep_check_mode (enum_wsrep_mode mask); +extern bool wsrep_check_mode_after_open_table (THD *thd, const handlerton *hton, + TABLE_LIST *tables); +extern bool wsrep_check_mode_before_cmd_execute (THD *thd); extern bool wsrep_must_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ); extern bool wsrep_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ); extern bool wsrep_sync_wait (THD* thd, enum enum_sql_command command); @@ -339,7 +354,7 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_, const wsrep::key_array *fk_tables= nullptr, const HA_CREATE_INFO* create_info= nullptr); -bool wsrep_should_replicate_ddl(THD* thd, const enum legacy_db_type db_type); +bool wsrep_should_replicate_ddl(THD* thd, const handlerton *db_type); bool wsrep_should_replicate_ddl_iterate(THD* thd, const TABLE_LIST* table_list); void wsrep_to_isolation_end(THD *thd); diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc index 1943136722e..1cfdae2890b 100644 --- a/sql/wsrep_schema.cc +++ b/sql/wsrep_schema.cc @@ -235,7 +235,7 @@ static int execute_SQL(THD* thd, const char* sql, uint length) { thd->set_query((char*)sql, length); thd->set_query_id(next_query_id()); - mysql_parse(thd, (char*)sql, length, & parser_state, FALSE, FALSE); + mysql_parse(thd, (char*)sql, length, & parser_state); if (thd->is_error()) { WSREP_WARN("Wsrep_schema::execute_sql() failed, %d %s\nSQL: %s", @@ -1295,7 +1295,7 @@ int Wsrep_schema::replay_transaction(THD* orig_thd, { Wsrep_schema_impl::thd_context_switch thd_context_switch(&thd, orig_thd); - ret= wsrep_apply_events(orig_thd, rli, buf.c_ptr_quick(), buf.length()); + ret= wsrep_apply_events(orig_thd, rli, buf.ptr(), buf.length()); if (ret) { WSREP_WARN("Wsrep_schema::replay_transaction: failed to apply fragments"); @@ -1449,7 +1449,7 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd) String data_str; (void)frag_table->field[4]->val_str(&data_str); - wsrep::const_buffer data(data_str.c_ptr_quick(), data_str.length()); + wsrep::const_buffer data(data_str.ptr(), data_str.length()); wsrep::ws_meta ws_meta(gtid, wsrep::stid(server_id, transaction_id, diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index ac7226b9948..9be6af71c56 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -41,6 +41,7 @@ static void init_service_thd(THD* thd, char* thread_stack) thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer(); thd->set_command(COM_SLEEP); thd->reset_for_next_command(true); + server_threads.insert(thd); // as wsrep_innobase_kill_one_trx() uses find_thread_by_id() } Wsrep_storage_service* @@ -80,6 +81,7 @@ void Wsrep_server_service::release_storage_service( static_cast<Wsrep_storage_service*>(storage_service); THD* thd= ss->m_thd; wsrep_reset_threadvars(thd); + server_threads.erase(thd); delete ss; delete thd; } @@ -93,7 +95,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx) streaming transaction is BF aborted and streaming applier is created from BF aborter context. */ Wsrep_threadvars saved_threadvars(wsrep_save_threadvars()); - wsrep_reset_threadvars(saved_threadvars.cur_thd); + if (saved_threadvars.cur_thd) + wsrep_reset_threadvars(saved_threadvars.cur_thd); THD *thd= 0; Wsrep_applier_service *ret= 0; if (!wsrep_create_threadvars() && @@ -110,7 +113,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx) } /* Restore original thread local storage state before returning. */ wsrep_restore_threadvars(saved_threadvars); - wsrep_store_threadvars(saved_threadvars.cur_thd); + if (saved_threadvars.cur_thd) + wsrep_store_threadvars(saved_threadvars.cur_thd); return ret; } @@ -139,6 +143,7 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se THD* thd= hps->m_thd; delete hps; wsrep_store_threadvars(thd); + server_threads.erase(thd); delete thd; wsrep_delete_threadvars(); } diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 09211a96e1e..339b138dbf2 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -731,7 +731,7 @@ static int sst_append_env_var(wsp::env& env, return -env.error(); } -#ifdef __WIN__ +#ifdef _WIN32 /* Space, single quote, ampersand, backquote, I/O redirection characters, caret, all brackets, plus, exclamation and comma @@ -805,7 +805,7 @@ static size_t estimate_cmd_len (bool* extra_args) else if (IS_REQ_ESCAPING(c)) { cmd_len++; -#ifdef __WIN__ +#ifdef _WIN32 quotation= true; #endif } @@ -834,7 +834,7 @@ static size_t estimate_cmd_len (bool* extra_args) else if (IS_REQ_ESCAPING(c)) { cmd_len++; -#ifdef __WIN__ +#ifdef _WIN32 quotation= true; #endif } @@ -890,7 +890,7 @@ static void copy_orig_argv (char* cmd_str) else if (IS_REQ_ESCAPING(c)) { plain= false; -#ifdef __WIN__ +#ifdef _WIN32 quotation= true; #endif } @@ -930,7 +930,7 @@ static void copy_orig_argv (char* cmd_str) c = *arg++; if (IS_REQ_ESCAPING(c)) { -#ifdef __WIN__ +#ifdef _WIN32 *cmd_str++ = c; #else *cmd_str++ = '\\'; @@ -970,7 +970,7 @@ static void copy_orig_argv (char* cmd_str) else if (IS_REQ_ESCAPING(c)) { plain= false; -#ifdef __WIN__ +#ifdef _WIN32 quotation= true; #endif } @@ -1001,7 +1001,7 @@ static void copy_orig_argv (char* cmd_str) { if (IS_REQ_ESCAPING(c)) { -#ifdef __WIN__ +#ifdef _WIN32 *cmd_str++ = c; #else *cmd_str++ = '\\'; @@ -1490,6 +1490,8 @@ static int sst_donate_mysqldump (const char* addr, wsrep::seqno::undefined())); Wsrep_server_state::instance().sst_sent(sst_sent_gtid, ret); + wsrep_donor_monitor_end(); + return ret; } @@ -1554,8 +1556,7 @@ static int run_sql_command(THD *thd, const char *query) return -1; } - mysql_parse(thd, thd->query(), thd->query_length(), &ps, FALSE, FALSE); - + mysql_parse(thd, thd->query(), thd->query_length(), &ps); if (thd->is_error()) { int const err= thd->get_stmt_da()->sql_errno(); @@ -1585,10 +1586,10 @@ static int sst_flush_tables(THD* thd) if (!is_supported_parser_charset(current_charset)) { /* Do not use non-supported parser character sets */ - WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->csname); + WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->cs_name.str); thd->variables.character_set_client= &my_charset_latin1; WSREP_WARN("For SST temporally setting character set to : %s", - my_charset_latin1.csname); + my_charset_latin1.cs_name.str); } if (run_sql_command(thd, "FLUSH TABLES WITH READ LOCK")) diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index fbca4a76b66..e610d3a6c2b 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -314,8 +314,7 @@ int wsrep_abort_thd(THD *bf_thd_ptr, THD *victim_thd_ptr, my_bool signal) THD *victim_thd= (THD *) victim_thd_ptr; THD *bf_thd= (THD *) bf_thd_ptr; - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_lock(&victim_thd->LOCK_thd_data); /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) might not be true. @@ -328,14 +327,16 @@ int wsrep_abort_thd(THD *bf_thd_ptr, THD *victim_thd_ptr, my_bool signal) { WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", (bf_thd) ? (long long)bf_thd->real_id : 0, (long long)victim_thd->real_id); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); ha_abort_transaction(bf_thd, victim_thd, signal); + mysql_mutex_lock(&victim_thd->LOCK_thd_data); } else { WSREP_DEBUG("wsrep_abort_thd not effective: %p %p", bf_thd, victim_thd); - wsrep_thd_UNLOCK(victim_thd); } + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); DBUG_RETURN(1); } @@ -344,9 +345,6 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) WSREP_LOG_THD(bf_thd, "BF aborter before"); WSREP_LOG_THD(victim_thd, "victim before"); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - #ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("sync.wsrep_bf_abort", { @@ -361,23 +359,23 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active()) { - WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction"); - switch (victim_thd->wsrep_trx().state()) - { + WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction." + " Victim state %s bf state %s", + wsrep::to_c_string(victim_thd->wsrep_trx().state()), + wsrep::to_c_string(bf_thd->wsrep_trx().state())); + + switch (victim_thd->wsrep_trx().state()) { case wsrep::transaction::s_aborting: /* fall through */ case wsrep::transaction::s_aborted: - WSREP_DEBUG("victim thd is already aborted or in aborting state."); - return false; - default: + WSREP_DEBUG("victim is aborting or has aborted"); break; + default: break; } - /* Test: galera_create_table_as_select. Here we enter wsrep-lib - were LOCK_thd_data will be acquired, thus we need to release it. - However, we can still hold LOCK_thd_kill to protect from - disconnect or delete. */ - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - wsrep_start_transaction(victim_thd, victim_thd->wsrep_next_trx_id()); - mysql_mutex_lock(&victim_thd->LOCK_thd_data); + /* victim may not have started transaction yet in wsrep context, but it may + have acquired MDL locks (due to DDL execution), and this has caused BF conflict. + such case does not require aborting in wsrep or replication provider state. + */ + return false; } bool ret; @@ -385,21 +383,12 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) if (wsrep_thd_is_toi(bf_thd)) { - /* Here we enter wsrep-lib were LOCK_thd_data will be acquired, - thus we need to release it. However, we can still hold - LOCK_thd_kill to protect from disconnect or delete. */ - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); ret= victim_thd->wsrep_cs().total_order_bf_abort(bf_seqno); - mysql_mutex_lock(&victim_thd->LOCK_thd_data); } else { - /* Test: mysql-wsrep-features#165. Here we enter wsrep-lib - were LOCK_thd_data will be acquired and later LOCK_thd_kill - thus we need to release them. */ - wsrep_thd_UNLOCK(victim_thd); + DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); ret= victim_thd->wsrep_cs().bf_abort(bf_seqno); - wsrep_thd_LOCK(victim_thd); } if (ret) { diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index f6a7e25b945..5ec32d63626 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -93,19 +93,15 @@ static bool refresh_provider_options() } } -void wsrep_set_wsrep_on() +void wsrep_set_wsrep_on(THD* thd) { + if (thd) + thd->wsrep_was_on= WSREP_ON_; WSREP_PROVIDER_EXISTS_= wsrep_provider && strncasecmp(wsrep_provider, WSREP_NONE, FN_REFLEN); WSREP_ON_= global_system_variables.wsrep_on && WSREP_PROVIDER_EXISTS_; } -/* This is intentionally declared as a weak global symbol, so that -linking will succeed even if the server is built with a dynamically -linked InnoDB. */ -ulong innodb_lock_schedule_algorithm __attribute__((weak)); -struct handlerton* innodb_hton_ptr __attribute__((weak)); - bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type) { if (var_type == OPT_GLOBAL) @@ -134,7 +130,7 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type) thd->variables.wsrep_on= global_system_variables.wsrep_on= saved_wsrep_on; } - wsrep_set_wsrep_on(); + wsrep_set_wsrep_on(thd); if (var_type == OPT_GLOBAL) { @@ -158,14 +154,6 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var) if (new_wsrep_on) { - if (innodb_hton_ptr && innodb_lock_schedule_algorithm != 0) - { - my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled " - "if innodb_lock_schedule_algorithm=VATS. Please configure" - " innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0)); - return true; - } - if (!WSREP_PROVIDER_EXISTS) { my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) can't be enabled " @@ -520,7 +508,7 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type) if (!rcode) refresh_provider_options(); - wsrep_set_wsrep_on(); + wsrep_set_wsrep_on(thd); mysql_mutex_lock(&LOCK_global_system_variables); return rcode; @@ -540,7 +528,7 @@ void wsrep_provider_init (const char* value) if (wsrep_provider) my_free((void *)wsrep_provider); wsrep_provider= my_strdup(PSI_INSTRUMENT_MEM, value, MYF(0)); - wsrep_set_wsrep_on(); + wsrep_set_wsrep_on(NULL); } bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var) @@ -996,6 +984,11 @@ bool wsrep_max_ws_size_update(sys_var *self, THD *thd, enum_var_type) return refresh_provider_options(); } +bool wsrep_mode_check(sys_var *self, THD* thd, set_var* var) +{ + return false; +} + #if UNUSED /* eaec266eb16c (Sergei Golubchik 2014-09-28) */ static SHOW_VAR wsrep_status_vars[]= { @@ -1124,3 +1117,25 @@ bool wsrep_gtid_domain_id_update(sys_var* self, THD *thd, enum_var_type) wsrep_gtid_server.domain_id= wsrep_gtid_domain_id; return false; } + +bool wsrep_strict_ddl_update(sys_var *self, THD* thd, enum_var_type var_type) +{ + // In case user still sets wsrep_strict_ddl we set new + // option to wsrep_mode + if (wsrep_strict_ddl) + wsrep_mode|= WSREP_MODE_STRICT_REPLICATION; + else + wsrep_mode&= (~WSREP_MODE_STRICT_REPLICATION); + return false; +} + +bool wsrep_replicate_myisam_update(sys_var *self, THD* thd, enum_var_type var_type) +{ + // In case user still sets wsrep_replicate_myisam we set new + // option to wsrep_mode + if (wsrep_replicate_myisam) + wsrep_mode|= WSREP_MODE_REPLICATE_MYISAM; + else + wsrep_mode&= (~WSREP_MODE_REPLICATE_MYISAM); + return false; +} diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h index 997784674dd..7908e873795 100644 --- a/sql/wsrep_var.h +++ b/sql/wsrep_var.h @@ -36,7 +36,7 @@ class set_var; class THD; int wsrep_init_vars(); -void wsrep_set_wsrep_on(); +void wsrep_set_wsrep_on(THD *thd); #define CHECK_ARGS (sys_var *self, THD* thd, set_var *var) #define UPDATE_ARGS (sys_var *self, THD* thd, enum_var_type type) @@ -107,6 +107,10 @@ extern bool wsrep_debug_update UPDATE_ARGS; extern bool wsrep_gtid_seq_no_check CHECK_ARGS; extern bool wsrep_gtid_domain_id_update UPDATE_ARGS; + +extern bool wsrep_mode_check CHECK_ARGS; +extern bool wsrep_strict_ddl_update UPDATE_ARGS; +extern bool wsrep_replicate_myisam_update UPDATE_ARGS; #else /* WITH_WSREP */ #define wsrep_provider_init(X) diff --git a/sql/xa.cc b/sql/xa.cc index e0defcb92ed..af7c7388c57 100644 --- a/sql/xa.cc +++ b/sql/xa.cc @@ -1001,23 +1001,29 @@ static my_bool xa_recover_callback_verbose(XID_cache_element *xs, } -bool mysql_xa_recover(THD *thd) +/** + Collect field names of result set that will be sent to a client in result of + handling XA RECOVER statement. + + @param thd Thread data object + @param[out] fields List of fields whose metadata should be collected for + sending to client +*/ + +void xa_recover_get_fields(THD *thd, List<Item> *field_list, + my_hash_walk_action *action) { - List<Item> field_list; - Protocol *protocol= thd->protocol; MEM_ROOT *mem_root= thd->mem_root; - my_hash_walk_action action; - DBUG_ENTER("mysql_xa_recover"); - field_list.push_back(new (mem_root) - Item_int(thd, "formatID", 0, - MY_INT32_NUM_DECIMAL_DIGITS), mem_root); - field_list.push_back(new (mem_root) - Item_int(thd, "gtrid_length", 0, - MY_INT32_NUM_DECIMAL_DIGITS), mem_root); - field_list.push_back(new (mem_root) - Item_int(thd, "bqual_length", 0, - MY_INT32_NUM_DECIMAL_DIGITS), mem_root); + field_list->push_back(new (mem_root) + Item_int(thd, "formatID", 0, + MY_INT32_NUM_DECIMAL_DIGITS), mem_root); + field_list->push_back(new (mem_root) + Item_int(thd, "gtrid_length", 0, + MY_INT32_NUM_DECIMAL_DIGITS), mem_root); + field_list->push_back(new (mem_root) + Item_int(thd, "bqual_length", 0, + MY_INT32_NUM_DECIMAL_DIGITS), mem_root); { uint len; CHARSET_INFO *cs; @@ -1026,18 +1032,30 @@ bool mysql_xa_recover(THD *thd) { len= SQL_XIDSIZE; cs= &my_charset_utf8mb3_general_ci; - action= (my_hash_walk_action) xa_recover_callback_verbose; + if (action) + *action= (my_hash_walk_action) xa_recover_callback_verbose; } else { len= XIDDATASIZE; cs= &my_charset_bin; - action= (my_hash_walk_action) xa_recover_callback_short; + if (action) + *action= (my_hash_walk_action) xa_recover_callback_short; } - field_list.push_back(new (mem_root) - Item_empty_string(thd, "data", len, cs), mem_root); + field_list->push_back(new (mem_root) + Item_empty_string(thd, "data", len, cs), mem_root); } +} + +bool mysql_xa_recover(THD *thd) +{ + List<Item> field_list; + Protocol *protocol= thd->protocol; + my_hash_walk_action action; + DBUG_ENTER("mysql_xa_recover"); + + xa_recover_get_fields(thd, &field_list, &action); if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) @@ -53,4 +53,7 @@ bool trans_xa_rollback(THD *thd); bool trans_xa_detach(THD *thd); bool mysql_xa_recover(THD *thd); +void xa_recover_get_fields(THD *thd, List<Item> *field_list, + my_hash_walk_action *action); + #endif /* XA_INCLUDED */ |