summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt60
-rw-r--r--sql/bounded_queue.h2
-rw-r--r--sql/client_settings.h2
-rw-r--r--sql/compat56.cc6
-rw-r--r--sql/create_options.cc13
-rw-r--r--sql/datadict.cc2
-rw-r--r--sql/debug_sync.cc65
-rw-r--r--sql/debug_sync.h3
-rw-r--r--sql/derror.cc255
-rw-r--r--sql/derror.h3
-rw-r--r--sql/discover.cc2
-rw-r--r--sql/event_data_objects.cc6
-rw-r--r--sql/event_data_objects.h4
-rw-r--r--sql/event_db_repository.cc15
-rw-r--r--sql/event_parse_data.cc12
-rw-r--r--sql/event_queue.cc18
-rw-r--r--sql/event_scheduler.cc37
-rw-r--r--sql/events.cc8
-rw-r--r--sql/field.cc1041
-rw-r--r--sql/field.h1010
-rw-r--r--sql/field_conv.cc404
-rw-r--r--sql/filesort.cc914
-rw-r--r--sql/filesort.h148
-rw-r--r--sql/filesort_utils.cc68
-rw-r--r--sql/filesort_utils.h34
-rw-r--r--sql/gcalc_slicescan.cc11
-rw-r--r--sql/gcalc_slicescan.h7
-rw-r--r--sql/gcalc_tools.cc11
-rw-r--r--sql/gcalc_tools.h1
-rw-r--r--sql/gen_lex_hash.cc30
-rw-r--r--sql/gen_lex_token.cc2
-rw-r--r--sql/ha_partition.cc163
-rw-r--r--sql/ha_partition.h19
-rw-r--r--sql/handler.cc482
-rw-r--r--sql/handler.h390
-rw-r--r--sql/hostname.cc4
-rw-r--r--sql/hostname.h2
-rw-r--r--sql/init.h2
-rw-r--r--sql/item.cc1989
-rw-r--r--sql/item.h1461
-rw-r--r--sql/item_buff.cc91
-rw-r--r--sql/item_cmpfunc.cc1015
-rw-r--r--sql/item_cmpfunc.h331
-rw-r--r--sql/item_create.cc1007
-rw-r--r--sql/item_func.cc326
-rw-r--r--sql/item_func.h579
-rw-r--r--sql/item_geofunc.cc154
-rw-r--r--sql/item_geofunc.h195
-rw-r--r--sql/item_inetfunc.cc26
-rw-r--r--sql/item_inetfunc.h29
-rw-r--r--sql/item_jsonfunc.cc3308
-rw-r--r--sql/item_jsonfunc.h454
-rw-r--r--sql/item_row.cc23
-rw-r--r--sql/item_row.h36
-rw-r--r--sql/item_strfunc.cc338
-rw-r--r--sql/item_strfunc.h371
-rw-r--r--sql/item_subselect.cc232
-rw-r--r--sql/item_subselect.h100
-rw-r--r--sql/item_sum.cc442
-rw-r--r--sql/item_sum.h260
-rw-r--r--sql/item_timefunc.cc241
-rw-r--r--sql/item_timefunc.h502
-rw-r--r--sql/item_windowfunc.cc451
-rw-r--r--sql/item_windowfunc.h975
-rw-r--r--sql/item_xmlfunc.cc103
-rw-r--r--sql/item_xmlfunc.h15
-rw-r--r--sql/key.cc50
-rw-r--r--sql/key.h2
-rw-r--r--sql/lex.h29
-rw-r--r--sql/lock.cc52
-rw-r--r--sql/log.cc227
-rw-r--r--sql/log.h23
-rw-r--r--sql/log_event.cc1409
-rw-r--r--sql/log_event.h285
-rw-r--r--sql/log_event_old.cc47
-rw-r--r--sql/log_slow.h6
-rw-r--r--sql/mdl.cc169
-rw-r--r--sql/mdl.h33
-rw-r--r--sql/mf_iocache.cc3
-rw-r--r--sql/mf_iocache_encr.cc12
-rw-r--r--sql/multi_range_read.cc13
-rw-r--r--sql/mysql_install_db.cc22
-rw-r--r--sql/mysqld.cc1051
-rw-r--r--sql/mysqld.h85
-rw-r--r--sql/net_serv.cc44
-rw-r--r--sql/opt_range.cc476
-rw-r--r--sql/opt_range.h22
-rw-r--r--sql/opt_range_mrr.cc66
-rw-r--r--sql/opt_subselect.cc56
-rw-r--r--sql/opt_sum.cc10
-rw-r--r--sql/opt_table_elimination.cc12
-rw-r--r--sql/parse_file.cc10
-rw-r--r--sql/partition_element.h2
-rw-r--r--sql/partition_info.cc558
-rw-r--r--sql/partition_info.h69
-rw-r--r--sql/password.c27
-rw-r--r--sql/plistsort.c10
-rw-r--r--sql/procedure.h11
-rw-r--r--sql/protocol.cc173
-rw-r--r--sql/protocol.h2
-rw-r--r--sql/records.cc83
-rw-r--r--sql/records.h14
-rw-r--r--sql/rpl_filter.cc2
-rw-r--r--sql/rpl_gtid.cc15
-rw-r--r--sql/rpl_gtid.h3
-rw-r--r--sql/rpl_mi.cc8
-rw-r--r--sql/rpl_parallel.cc84
-rw-r--r--sql/rpl_record.cc28
-rw-r--r--sql/rpl_record_old.cc6
-rw-r--r--sql/rpl_rli.cc339
-rw-r--r--sql/rpl_rli.h125
-rw-r--r--sql/rpl_tblmap.cc14
-rw-r--r--sql/rpl_utility.cc202
-rw-r--r--sql/scheduler.cc24
-rw-r--r--sql/scheduler.h2
-rw-r--r--sql/session_tracker.cc1712
-rw-r--r--sql/session_tracker.h304
-rw-r--r--sql/set_var.cc101
-rw-r--r--sql/set_var.h22
-rw-r--r--sql/share/CMakeLists.txt1
-rw-r--r--sql/share/charsets/Index.xml139
-rw-r--r--sql/share/errmsg-utf8.txt1331
-rw-r--r--sql/signal_handler.cc13
-rw-r--r--sql/slave.cc474
-rw-r--r--sql/slave.h25
-rw-r--r--sql/sp.cc50
-rw-r--r--sql/sp.h5
-rw-r--r--sql/sp_head.cc182
-rw-r--r--sql/sp_head.h14
-rw-r--r--sql/sp_pcontext.cc2
-rw-r--r--sql/sp_pcontext.h18
-rw-r--r--sql/sp_rcontext.cc16
-rw-r--r--sql/spatial.cc863
-rw-r--r--sql/spatial.h45
-rw-r--r--sql/sql_acl.cc2080
-rw-r--r--sql/sql_acl.h67
-rw-r--r--sql/sql_admin.cc351
-rw-r--r--sql/sql_alter.cc7
-rw-r--r--sql/sql_alter.h7
-rw-r--r--sql/sql_analyse.cc8
-rw-r--r--sql/sql_analyze_stmt.cc93
-rw-r--r--sql/sql_analyze_stmt.h171
-rw-r--r--sql/sql_array.h9
-rw-r--r--sql/sql_audit.cc162
-rw-r--r--sql/sql_audit.h364
-rw-r--r--sql/sql_base.cc1523
-rw-r--r--sql/sql_base.h62
-rw-r--r--sql/sql_basic_types.h25
-rw-r--r--sql/sql_binlog.cc156
-rw-r--r--sql/sql_bootstrap.cc10
-rw-r--r--sql/sql_cache.cc537
-rw-r--r--sql/sql_cache.h7
-rw-r--r--sql/sql_class.cc1079
-rw-r--r--sql/sql_class.h745
-rw-r--r--sql/sql_cmd.h3
-rw-r--r--sql/sql_connect.cc177
-rw-r--r--sql/sql_connect.h37
-rw-r--r--sql/sql_const.h4
-rw-r--r--sql/sql_cte.cc1447
-rw-r--r--sql/sql_cte.h450
-rw-r--r--sql/sql_cursor.cc4
-rw-r--r--sql/sql_db.cc35
-rw-r--r--sql/sql_delete.cc108
-rw-r--r--sql/sql_derived.cc392
-rw-r--r--sql/sql_derived.h4
-rw-r--r--sql/sql_digest.cc2
-rw-r--r--sql/sql_do.cc2
-rw-r--r--sql/sql_error.cc27
-rw-r--r--sql/sql_error.h44
-rw-r--r--sql/sql_explain.cc270
-rw-r--r--sql/sql_explain.h98
-rw-r--r--sql/sql_get_diagnostics.cc2
-rw-r--r--sql/sql_get_diagnostics.h2
-rw-r--r--sql/sql_handler.cc91
-rw-r--r--sql/sql_help.cc19
-rw-r--r--sql/sql_hset.h14
-rw-r--r--sql/sql_insert.cc535
-rw-r--r--sql/sql_join_cache.cc593
-rw-r--r--sql/sql_lex.cc493
-rw-r--r--sql/sql_lex.h301
-rw-r--r--sql/sql_list.cc18
-rw-r--r--sql/sql_list.h28
-rw-r--r--sql/sql_load.cc880
-rw-r--r--sql/sql_locale.cc5
-rw-r--r--sql/sql_locale.h2
-rw-r--r--sql/sql_manager.cc2
-rw-r--r--sql/sql_parse.cc952
-rw-r--r--sql/sql_parse.h8
-rw-r--r--sql/sql_partition.cc891
-rw-r--r--sql/sql_partition.h18
-rw-r--r--sql/sql_partition_admin.cc57
-rw-r--r--sql/sql_plugin.cc114
-rw-r--r--sql/sql_plugin.h25
-rw-r--r--sql/sql_plugin_services.ic17
-rw-r--r--sql/sql_prepare.cc1043
-rw-r--r--sql/sql_prepare.h9
-rw-r--r--sql/sql_priv.h10
-rw-r--r--sql/sql_profile.cc18
-rw-r--r--sql/sql_profile.h1
-rw-r--r--sql/sql_reload.cc4
-rw-r--r--sql/sql_rename.cc5
-rw-r--r--sql/sql_repl.cc71
-rw-r--r--sql/sql_repl.h1
-rw-r--r--sql/sql_select.cc4953
-rw-r--r--sql/sql_select.h610
-rw-r--r--sql/sql_servers.cc22
-rw-r--r--sql/sql_show.cc871
-rw-r--r--sql/sql_show.h6
-rw-r--r--sql/sql_sort.h7
-rw-r--r--sql/sql_statistics.cc67
-rw-r--r--sql/sql_statistics.h2
-rw-r--r--sql/sql_string.cc91
-rw-r--r--sql/sql_string.h138
-rw-r--r--sql/sql_table.cc1366
-rw-r--r--sql/sql_table.h9
-rw-r--r--sql/sql_test.cc78
-rw-r--r--sql/sql_test.h2
-rw-r--r--sql/sql_time.cc48
-rw-r--r--sql/sql_time.h14
-rw-r--r--sql/sql_trigger.cc1399
-rw-r--r--sql/sql_trigger.h179
-rw-r--r--sql/sql_type.cc476
-rw-r--r--sql/sql_type.h258
-rw-r--r--sql/sql_udf.cc5
-rw-r--r--sql/sql_union.cc474
-rw-r--r--sql/sql_update.cc199
-rw-r--r--sql/sql_view.cc48
-rw-r--r--sql/sql_view.h7
-rw-r--r--sql/sql_window.cc3069
-rw-r--r--sql/sql_window.h235
-rw-r--r--sql/sql_yacc.yy2387
-rw-r--r--sql/strfunc.cc6
-rw-r--r--sql/structs.h156
-rw-r--r--sql/sys_vars.cc388
-rw-r--r--sql/sys_vars.ic328
-rw-r--r--sql/sys_vars_shared.h1
-rw-r--r--sql/table.cc2236
-rw-r--r--sql/table.h307
-rw-r--r--sql/table_cache.cc632
-rw-r--r--sql/table_cache.h231
-rw-r--r--sql/temporary_tables.cc1513
-rw-r--r--sql/thr_malloc.cc53
-rw-r--r--sql/thr_malloc.h7
-rw-r--r--sql/threadpool.h122
-rw-r--r--sql/threadpool_common.cc296
-rw-r--r--sql/threadpool_generic.cc (renamed from sql/threadpool_unix.cc)663
-rw-r--r--sql/threadpool_win.cc659
-rw-r--r--sql/transaction.cc95
-rw-r--r--sql/transaction.h2
-rw-r--r--sql/tztime.cc4
-rw-r--r--sql/udf_example.c2
-rw-r--r--sql/uniques.cc78
-rw-r--r--sql/uniques.h100
-rw-r--r--sql/unireg.cc287
-rw-r--r--sql/unireg.h58
-rw-r--r--sql/winservice.c2
-rw-r--r--sql/wsrep_applier.cc15
-rw-r--r--sql/wsrep_binlog.cc25
-rw-r--r--sql/wsrep_check_opts.cc2
-rw-r--r--sql/wsrep_dummy.cc9
-rw-r--r--sql/wsrep_hton.cc29
-rw-r--r--sql/wsrep_mysqld.cc295
-rw-r--r--sql/wsrep_mysqld.h6
-rw-r--r--sql/wsrep_notify.cc1
-rw-r--r--sql/wsrep_priv.h11
-rw-r--r--sql/wsrep_sst.cc122
-rw-r--r--sql/wsrep_sst.h9
-rw-r--r--sql/wsrep_thd.cc63
-rw-r--r--sql/wsrep_utils.cc2
-rw-r--r--sql/wsrep_utils.h2
-rw-r--r--sql/wsrep_var.cc142
-rw-r--r--sql/wsrep_var.h4
-rw-r--r--sql/wsrep_xid.cc64
-rw-r--r--sql/wsrep_xid.h6
274 files changed, 52551 insertions, 21940 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index fa69d5c3066..1b52c26ce22 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -75,7 +75,7 @@ ENDIF()
SET (SQL_SOURCE
../sql-common/client.c compat56.cc derror.cc des_key_file.cc
- discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
+ discover.cc ../sql-common/errmsg.c field.cc field_conv.cc
filesort_utils.cc
filesort.cc gstream.cc
signal_handler.cc
@@ -86,12 +86,14 @@ SET (SQL_SOURCE
key.cc log.cc lock.cc
log_event.cc rpl_record.cc rpl_reporting.cc
log_event_old.cc rpl_record_old.cc
- mf_iocache.cc my_decimal.cc ../sql-common/my_time.c
+ mf_iocache.cc my_decimal.cc
mysqld.cc net_serv.cc keycaches.cc
../sql-common/client_plugin.c
opt_range.cc opt_sum.cc
../sql-common/pack.c parse_file.cc password.c procedure.cc
- protocol.cc records.cc repl_failsafe.cc rpl_filter.cc set_var.cc
+ protocol.cc records.cc repl_failsafe.cc rpl_filter.cc
+ session_tracker.cc
+ set_var.cc
slave.cc sp.cc sp_cache.cc sp_head.cc sp_pcontext.cc
sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc
sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc
@@ -108,7 +110,8 @@ SET (SQL_SOURCE
sql_statistics.cc sql_string.cc
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
- sql_time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
+ sql_time.cc tztime.cc unireg.cc item_xmlfunc.cc
+ uniques.cc
rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc
event_queue.cc event_db_repository.cc
sql_tablespace.cc events.cc ../sql-common/my_user.c
@@ -129,12 +132,14 @@ SET (SQL_SOURCE
opt_table_elimination.cc sql_expression_cache.cc
gcalc_slicescan.cc gcalc_tools.cc
threadpool_common.cc ../sql-common/mysql_async.c
- my_apc.cc mf_iocache_encr.cc
+ my_apc.cc mf_iocache_encr.cc item_jsonfunc.cc
my_json_writer.cc
rpl_gtid.cc rpl_parallel.cc
sql_type.cc
+ item_windowfunc.cc sql_window.cc
+ sql_cte.cc
${WSREP_SOURCES}
- table_cache.cc encryption.cc
+ table_cache.cc encryption.cc temporary_tables.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_builtin.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
@@ -147,9 +152,9 @@ IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR
ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS)
IF(WIN32)
SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc)
- ELSE()
- SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc)
ENDIF()
+ SET(SQL_SOURCE ${SQL_SOURCE} threadpool_generic.cc)
+
ENDIF()
MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
@@ -159,7 +164,7 @@ ADD_LIBRARY(sql STATIC ${SQL_SOURCE})
ADD_DEPENDENCIES(sql GenServerSource)
DTRACE_INSTRUMENT(sql)
TARGET_LINK_LIBRARIES(sql ${MYSQLD_STATIC_PLUGIN_LIBS}
- mysys mysys_ssl dbug strings vio pcre ${LIBJEMALLOC}
+ mysys mysys_ssl dbug strings vio pcre
${LIBWRAP} ${LIBCRYPT} ${LIBDL} ${CMAKE_THREAD_LIBS_INIT}
${WSREP_LIB}
${SSL_LIBRARIES}
@@ -230,6 +235,26 @@ IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS)
SET(MYSQLD_LIB_BYPRODUCTS BYPRODUCTS ${MYSQLD_DEF} ${MYSQLD_LIB} ${MYSQLD_EXP})
ENDIF()
+ # Create a cmake script to generate import and export libs
+ # from a .def file
+ SET(CMAKE_CONFIGURABLE_FILE_CONTENT "
+ IF ((mysqld_lib.def IS_NEWER_THAN mysqld_lib.lib) OR
+ (mysqld_lib.def IS_NEWER_THAN mysqld_lib.exp))
+ FILE(REMOVE mysqld_lib.lib mysqld_lib.exp)
+ SET(ENV{VS_UNICODE_OUTPUT})
+ EXECUTE_PROCESS (
+ COMMAND \"${CMAKE_LINKER}\" /lib /NAME:mysqld.exe \"/DEF:${MYSQLD_DEF}\" /MACHINE:${_PLATFORM}
+ RESULT_VARIABLE ret)
+ IF(NOT ret EQUAL 0)
+ MESSAGE(FATAL_ERROR \"process failed ret=\${ret}\")
+ ENDIF()
+ ENDIF()
+ ")
+
+ CONFIGURE_FILE(
+ ${PROJECT_SOURCE_DIR}/cmake/configurable_file_content.in
+ make_mysqld_lib.cmake)
+
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/mysqld_lib.stamp
${MYSQLD_LIB_BYPRODUCTS}
@@ -355,13 +380,6 @@ IF(WIN32 OR HAVE_DLOPEN AND NOT DISABLE_SHARED)
TARGET_LINK_LIBRARIES(udf_example strings)
ENDIF()
-FOREACH(tool glibtoolize libtoolize aclocal autoconf autoheader automake gtar
- tar git)
- STRING(TOUPPER ${tool} TOOL)
- FIND_PROGRAM(${TOOL}_EXECUTABLE ${tool} DOC "path to the executable")
- MARK_AS_ADVANCED(${TOOL}_EXECUTABLE)
-ENDFOREACH()
-
CONFIGURE_FILE(
${CMAKE_SOURCE_DIR}/cmake/make_dist.cmake.in
${CMAKE_BINARY_DIR}/make_dist.cmake @ONLY)
@@ -400,15 +418,15 @@ IF(WIN32 AND MYSQLD_EXECUTABLE)
ENDIF()
MAKE_DIRECTORY(${CMAKE_CURRENT_BINARY_DIR}/data)
ADD_CUSTOM_COMMAND(
- OUTPUT initdb.dep
- COMMAND ${CMAKE_COMMAND}
- ${CONFIG_PARAM} -P ${CMAKE_CURRENT_BINARY_DIR}/create_initial_db.cmake
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/initdb.dep
+ COMMAND ${CMAKE_COMMAND} ${CONFIG_PARAM} -P ${CMAKE_CURRENT_BINARY_DIR}/create_initial_db.cmake
+ COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/initdb.dep
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data
DEPENDS mysqld
)
ADD_CUSTOM_TARGET(initial_database
ALL
- DEPENDS initdb.dep
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/initdb.dep
)
INSTALL(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data DESTINATION .
COMPONENT DataFiles
@@ -464,7 +482,7 @@ IF(WIN32)
TARGET_LINK_LIBRARIES(mysql_upgrade_service mysys winservice)
ENDIF(WIN32)
-INSTALL(DIRECTORY . DESTINATION ${INSTALL_INCLUDEDIR}/private COMPONENT Development
+INSTALL(DIRECTORY . DESTINATION ${INSTALL_INCLUDEDIR}/server/private COMPONENT Development
FILES_MATCHING PATTERN "*.h"
PATTERN share EXCLUDE
PATTERN CMakeFiles EXCLUDE)
diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h
index 070ae46c347..3573c5ceb27 100644
--- a/sql/bounded_queue.h
+++ b/sql/bounded_queue.h
@@ -16,11 +16,11 @@
#ifndef BOUNDED_QUEUE_INCLUDED
#define BOUNDED_QUEUE_INCLUDED
-#include <string.h>
#include "my_global.h"
#include "my_base.h"
#include "my_sys.h"
#include "queues.h"
+#include <string.h>
class Sort_param;
diff --git a/sql/client_settings.h b/sql/client_settings.h
index f2ad1797b8e..486862b276d 100644
--- a/sql/client_settings.h
+++ b/sql/client_settings.h
@@ -28,7 +28,7 @@
When adding capabilities here, consider if they should be also added to
the libmysql version.
*/
-#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | \
+#define CLIENT_CAPABILITIES (CLIENT_MYSQL | \
CLIENT_LONG_FLAG | \
CLIENT_TRANSACTIONS | \
CLIENT_PROTOCOL_41 | \
diff --git a/sql/compat56.cc b/sql/compat56.cc
index 357b4bcf78b..b2e67a50491 100644
--- a/sql/compat56.cc
+++ b/sql/compat56.cc
@@ -65,7 +65,7 @@ void TIME_from_longlong_time_packed(MYSQL_TIME *ltime, longlong tmp)
long hms;
if ((ltime->neg= (tmp < 0)))
tmp= -tmp;
- hms= MY_PACKED_TIME_GET_INT_PART(tmp);
+ hms= (long) MY_PACKED_TIME_GET_INT_PART(tmp);
ltime->year= (uint) 0;
ltime->month= (uint) 0;
ltime->day= (uint) 0;
@@ -267,11 +267,11 @@ void TIME_from_longlong_datetime_packed(MYSQL_TIME *ltime, longlong tmp)
ltime->day= ymd % (1 << 5);
ltime->month= ym % 13;
- ltime->year= ym / 13;
+ ltime->year= (uint) (ym / 13);
ltime->second= hms % (1 << 6);
ltime->minute= (hms >> 6) % (1 << 6);
- ltime->hour= (hms >> 12);
+ ltime->hour= (uint) (hms >> 12);
ltime->time_type= MYSQL_TIMESTAMP_DATETIME;
}
diff --git a/sql/create_options.cc b/sql/create_options.cc
index f6bf391e294..53258dac3fc 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010 Monty Program Ab
+/* Copyright (C) 2010, 2017, MariaDB Corporation Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@
#include <my_getopt.h>
#include "set_var.h"
-#define FRM_QUOTED_VALUE 0x8000
+#define FRM_QUOTED_VALUE 0x8000U
/**
Links this item to the given list end
@@ -124,8 +124,8 @@ static bool set_one_value(ha_create_table_option *opt,
MEM_ROOT *root)
{
DBUG_ENTER("set_one_value");
- DBUG_PRINT("enter", ("opt: 0x%lx type: %u name '%s' value: '%s'",
- (ulong) opt,
+ DBUG_PRINT("enter", ("opt: %p type: %u name '%s' value: '%s'",
+ opt,
opt->type, opt->name,
(value->str ? value->str : "<DEFAULT>")));
switch (opt->type)
@@ -184,7 +184,7 @@ static bool set_one_value(ha_create_table_option *opt,
{
for (end=start;
*end && *end != ',';
- end+= my_mbcharlen(system_charset_info, *end)) /* no-op */;
+ end++) /* no-op */;
if (!my_strnncoll(system_charset_info,
(uchar*)start, end-start,
(uchar*)value->str, value->length))
@@ -613,7 +613,8 @@ uchar *engine_option_value::frm_image(uchar *buff)
{
if (value.str)
{
- *buff++= name.length;
+ DBUG_ASSERT(name.length <= 0xff);
+ *buff++= (uchar)name.length;
memcpy(buff, name.str, name.length);
buff+= name.length;
int2store(buff, value.length | (quoted_value ? FRM_QUOTED_VALUE : 0));
diff --git a/sql/datadict.cc b/sql/datadict.cc
index f01d61f531b..ec3d65f0113 100644
--- a/sql/datadict.cc
+++ b/sql/datadict.cc
@@ -116,7 +116,7 @@ frm_type_enum dd_frm_type(THD *thd, char *path, LEX_STRING *engine_name)
if (mysql_file_seek(file, 0, SEEK_SET, MYF(MY_WME)))
goto err;
- if (read_string(file, &frm_image, state.st_size))
+ if (read_string(file, &frm_image, (size_t)state.st_size))
goto err;
if ((n_length= uint4korr(frm_image+55)))
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 3dfbcdbcf81..2d34322c59e 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -549,7 +549,7 @@ static void debug_sync_reset(THD *thd)
static void debug_sync_remove_action(st_debug_sync_control *ds_control,
st_debug_sync_action *action)
{
- uint dsp_idx= action - ds_control->ds_action;
+ uint dsp_idx= (uint)(action - ds_control->ds_action);
DBUG_ENTER("debug_sync_remove_action");
DBUG_ASSERT(ds_control);
DBUG_ASSERT(ds_control == current_thd->debug_sync_control);
@@ -681,8 +681,8 @@ static st_debug_sync_action *debug_sync_get_action(THD *thd,
}
DBUG_ASSERT(action >= ds_control->ds_action);
DBUG_ASSERT(action < ds_control->ds_action + ds_control->ds_active);
- DBUG_PRINT("debug_sync", ("action: 0x%lx array: 0x%lx count: %u",
- (long) action, (long) ds_control->ds_action,
+ DBUG_PRINT("debug_sync", ("action: %p array: %p count: %u",
+ action, ds_control->ds_action,
ds_control->ds_active));
DBUG_RETURN(action);
@@ -847,16 +847,16 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
to the string terminator ASCII NUL ('\0').
*/
-static char *debug_sync_token(char **token_p, uint *token_length_p, char *ptr)
+static char *debug_sync_token(char **token_p, uint *token_length_p,
+ char *ptr, char *ptrend)
{
DBUG_ASSERT(token_p);
DBUG_ASSERT(token_length_p);
DBUG_ASSERT(ptr);
/* Skip leading space */
- while (my_isspace(system_charset_info, *ptr))
- ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr);
-
+ ptr+= system_charset_info->cset->scan(system_charset_info,
+ ptr, ptrend, MY_SEQ_SPACES);
if (!*ptr)
{
ptr= NULL;
@@ -867,17 +867,18 @@ static char *debug_sync_token(char **token_p, uint *token_length_p, char *ptr)
*token_p= ptr;
/* Find token end. */
- while (*ptr && !my_isspace(system_charset_info, *ptr))
- ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr);
+ ptr+= system_charset_info->cset->scan(system_charset_info,
+ ptr, ptrend, MY_SEQ_NONSPACES);
/* Get token length. */
- *token_length_p= ptr - *token_p;
+ *token_length_p= (uint)(ptr - *token_p);
/* If necessary, terminate token. */
if (*ptr)
{
+ DBUG_ASSERT(ptr < ptrend);
/* Get terminator character length. */
- uint mbspacelen= my_mbcharlen(system_charset_info, (uchar) *ptr);
+ uint mbspacelen= my_charlen_fix(system_charset_info, ptr, ptrend);
/* Terminate token. */
*ptr= '\0';
@@ -886,8 +887,8 @@ static char *debug_sync_token(char **token_p, uint *token_length_p, char *ptr)
ptr+= mbspacelen;
/* Skip trailing space */
- while (my_isspace(system_charset_info, *ptr))
- ptr+= my_mbcharlen(system_charset_info, (uchar) *ptr);
+ ptr+= system_charset_info->cset->scan(system_charset_info,
+ ptr, ptrend, MY_SEQ_SPACES);
}
end:
@@ -917,7 +918,8 @@ static char *debug_sync_token(char **token_p, uint *token_length_p, char *ptr)
undefined in this case.
*/
-static char *debug_sync_number(ulong *number_p, char *actstrptr)
+static char *debug_sync_number(ulong *number_p, char *actstrptr,
+ char *actstrend)
{
char *ptr;
char *ept;
@@ -927,7 +929,7 @@ static char *debug_sync_number(ulong *number_p, char *actstrptr)
DBUG_ASSERT(actstrptr);
/* Get token from string. */
- if (!(ptr= debug_sync_token(&token, &token_length, actstrptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, actstrptr, actstrend)))
goto end;
*number_p= strtoul(token, &ept, 10);
@@ -971,7 +973,7 @@ static char *debug_sync_number(ulong *number_p, char *actstrptr)
for the string.
*/
-static bool debug_sync_eval_action(THD *thd, char *action_str)
+static bool debug_sync_eval_action(THD *thd, char *action_str, char *action_end)
{
st_debug_sync_action *action= NULL;
const char *errmsg;
@@ -986,7 +988,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
/*
Get debug sync point name. Or a special command.
*/
- if (!(ptr= debug_sync_token(&token, &token_length, action_str)))
+ if (!(ptr= debug_sync_token(&token, &token_length, action_str, action_end)))
{
errmsg= "Missing synchronization point name";
goto err;
@@ -1009,7 +1011,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
/*
Get kind of action to be taken at sync point.
*/
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
{
/* No action present. Try special commands. Token unchanged. */
@@ -1090,7 +1092,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
if (!my_strcasecmp(system_charset_info, token, "SIGNAL"))
{
/* It is SIGNAL. Signal name must follow. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
{
errmsg= "Missing signal name after action SIGNAL";
goto err;
@@ -1108,7 +1110,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
action->execute= 1;
/* Get next token. If none follows, set action. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
goto set_action;
}
@@ -1118,7 +1120,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
if (!my_strcasecmp(system_charset_info, token, "WAIT_FOR"))
{
/* It is WAIT_FOR. Wait_for signal name must follow. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
{
errmsg= "Missing signal name after action WAIT_FOR";
goto err;
@@ -1137,7 +1139,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
action->timeout= opt_debug_sync_timeout;
/* Get next token. If none follows, set action. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
goto set_action;
/*
@@ -1146,14 +1148,14 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
if (!my_strcasecmp(system_charset_info, token, "TIMEOUT"))
{
/* It is TIMEOUT. Number must follow. */
- if (!(ptr= debug_sync_number(&action->timeout, ptr)))
+ if (!(ptr= debug_sync_number(&action->timeout, ptr, action_end)))
{
errmsg= "Missing valid number after TIMEOUT";
goto err;
}
/* Get next token. If none follows, set action. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
goto set_action;
}
}
@@ -1174,14 +1176,14 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
}
/* Number must follow. */
- if (!(ptr= debug_sync_number(&action->execute, ptr)))
+ if (!(ptr= debug_sync_number(&action->execute, ptr, action_end)))
{
errmsg= "Missing valid number after EXECUTE";
goto err;
}
/* Get next token. If none follows, set action. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
goto set_action;
}
@@ -1191,14 +1193,14 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
if (!my_strcasecmp(system_charset_info, token, "HIT_LIMIT"))
{
/* Number must follow. */
- if (!(ptr= debug_sync_number(&action->hit_limit, ptr)))
+ if (!(ptr= debug_sync_number(&action->hit_limit, ptr, action_end)))
{
errmsg= "Missing valid number after HIT_LIMIT";
goto err;
}
/* Get next token. If none follows, set action. */
- if (!(ptr= debug_sync_token(&token, &token_length, ptr)))
+ if (!(ptr= debug_sync_token(&token, &token_length, ptr, action_end)))
goto set_action;
}
@@ -1246,7 +1248,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
terminators in the string. So we need to take a copy here.
*/
-bool debug_sync_update(THD *thd, char *val_str)
+bool debug_sync_update(THD *thd, char *val_str, size_t len)
{
DBUG_ENTER("debug_sync_update");
DBUG_PRINT("debug_sync", ("set action: '%s'", val_str));
@@ -1255,8 +1257,9 @@ bool debug_sync_update(THD *thd, char *val_str)
debug_sync_eval_action() places '\0' in the string, which itself
must be '\0' terminated.
*/
+ DBUG_ASSERT(val_str[len] == '\0');
DBUG_RETURN(opt_debug_sync_timeout ?
- debug_sync_eval_action(thd, val_str) :
+ debug_sync_eval_action(thd, val_str, val_str + len) :
FALSE);
}
@@ -1592,7 +1595,7 @@ bool debug_sync_set_action(THD *thd, const char *action_str, size_t len)
DBUG_ASSERT(action_str);
value= strmake_root(thd->mem_root, action_str, len);
- rc= debug_sync_eval_action(thd, value);
+ rc= debug_sync_eval_action(thd, value, value + len);
DBUG_RETURN(rc);
}
diff --git a/sql/debug_sync.h b/sql/debug_sync.h
index 25b379e5892..999667b9efc 100644
--- a/sql/debug_sync.h
+++ b/sql/debug_sync.h
@@ -44,7 +44,8 @@ extern void debug_sync_end(void);
extern void debug_sync_init_thread(THD *thd);
extern void debug_sync_end_thread(THD *thd);
extern bool debug_sync_set_action(THD *thd, const char *action_str, size_t len);
-extern bool debug_sync_update(THD *thd, char *val_str);
+extern bool debug_sync_update(THD *thd, char *val_str, size_t len);
+extern uchar *debug_sync_value_ptr(THD *thd);
#endif /* defined(ENABLED_DEBUG_SYNC) */
diff --git a/sql/derror.cc b/sql/derror.cc
index 5f0bc455caf..5a1bee23f4a 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -30,16 +30,19 @@
#include "derror.h" // read_texts
#include "sql_class.h" // THD
+uint errors_per_range[MAX_ERROR_RANGES+1];
+
static bool check_error_mesg(const char *file_name, const char **errmsg);
static void init_myfunc_errs(void);
C_MODE_START
-static const char **get_server_errmsgs(void)
+static const char **get_server_errmsgs(int nr)
{
+ int section= (nr-ER_ERROR_FIRST) / ERRORS_PER_RANGE;
if (!current_thd)
- return DEFAULT_ERRMSGS;
- return CURRENT_THD_ERRMSGS;
+ return DEFAULT_ERRMSGS[section];
+ return CURRENT_THD_ERRMSGS[section];
}
C_MODE_END
@@ -60,61 +63,88 @@ C_MODE_END
TRUE Error
*/
+static const char ***original_error_messages;
+
bool init_errmessage(void)
{
- const char **errmsgs, **ptr, **org_errmsgs;
+ const char **errmsgs;
bool error= FALSE;
DBUG_ENTER("init_errmessage");
- /*
- Get a pointer to the old error messages pointer array.
- read_texts() tries to free it.
- */
- org_errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST);
+ free_error_messages();
+ my_free(original_error_messages);
+ original_error_messages= 0;
+
+ error_message_charset_info= system_charset_info;
/* Read messages from file. */
if (read_texts(ERRMSG_FILE, my_default_lc_messages->errmsgs->language,
- &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) &&
- !errmsgs)
+ &original_error_messages))
{
- my_free(errmsgs);
-
- if (org_errmsgs)
- {
- /* Use old error messages */
- errmsgs= org_errmsgs;
- }
- else
+ /*
+ No error messages. Create a temporary empty error message so
+ that we don't get a crash if some code wrongly tries to access
+ a non existing error message.
+ */
+ if (!(original_error_messages= (const char***)
+ my_malloc(MAX_ERROR_RANGES * sizeof(char**) +
+ (ERRORS_PER_RANGE * sizeof(char*)),
+ MYF(0))))
+ DBUG_RETURN(TRUE);
+ errmsgs= (const char**) (original_error_messages + MAX_ERROR_RANGES);
+
+ for (uint i=0 ; i < MAX_ERROR_RANGES ; i++)
{
- /*
- No error messages. Create a temporary empty error message so
- that we don't get a crash if some code wrongly tries to access
- a non existing error message.
- */
- if (!(errmsgs= (const char**) my_malloc((ER_ERROR_LAST-ER_ERROR_FIRST+1)*
- sizeof(char*), MYF(0))))
- DBUG_RETURN(TRUE);
- for (ptr= errmsgs; ptr < errmsgs + ER_ERROR_LAST - ER_ERROR_FIRST; ptr++)
- *ptr= "";
- error= TRUE;
+ original_error_messages[i]= errmsgs;
+ errors_per_range[i]= ERRORS_PER_RANGE;
}
+ errors_per_range[2]= 0; // MYSYS error messages
+
+ for (const char **ptr= errmsgs;
+ ptr < errmsgs + ERRORS_PER_RANGE ;
+ ptr++)
+ *ptr= "";
+
+ error= TRUE;
}
- else
- my_free(org_errmsgs); // Free old language
/* Register messages for use with my_error(). */
- if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST))
+ for (uint i=0 ; i < MAX_ERROR_RANGES ; i++)
{
- my_free(errmsgs);
- DBUG_RETURN(TRUE);
+ if (errors_per_range[i])
+ {
+ if (my_error_register(get_server_errmsgs, (i+1)*ERRORS_PER_RANGE,
+ (i+1)*ERRORS_PER_RANGE +
+ errors_per_range[i]-1))
+ {
+ my_free(original_error_messages);
+ original_error_messages= 0;
+ DBUG_RETURN(TRUE);
+ }
+ }
}
-
- DEFAULT_ERRMSGS= errmsgs; /* Init global variable */
+ DEFAULT_ERRMSGS= original_error_messages;
init_myfunc_errs(); /* Init myfunc messages */
DBUG_RETURN(error);
}
+void free_error_messages()
+{
+ /* We don't need to free errmsg as it's done in cleanup_errmsg */
+ for (uint i= 0 ; i < MAX_ERROR_RANGES ; i++)
+ {
+ if (errors_per_range[i])
+ {
+ my_error_unregister((i+1)*ERRORS_PER_RANGE,
+ (i+1)*ERRORS_PER_RANGE +
+ errors_per_range[i]-1);
+ errors_per_range[i]= 0;
+ }
+ }
+}
+
+
/**
Check the error messages array contains all relevant error messages
*/
@@ -125,11 +155,17 @@ static bool check_error_mesg(const char *file_name, const char **errmsg)
The last MySQL error message can't be an empty string; If it is,
it means that the error file doesn't contain all MySQL messages
and is probably from an older version of MySQL / MariaDB.
+ We also check that each section has enough error messages.
*/
- if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0)
+ if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0 ||
+ (errors_per_range[0] < ER_ERROR_LAST_SECTION_2 - ER_ERROR_FIRST + 1) ||
+ errors_per_range[1] != 0 ||
+ (errors_per_range[2] < ER_ERROR_LAST_SECTION_4 -
+ ER_ERROR_FIRST_SECTION_4 +1) ||
+ (errors_per_range[3] < ER_ERROR_LAST - ER_ERROR_FIRST_SECTION_5 + 1))
{
sql_print_error("Error message file '%s' is probably from and older "
- "version of MariaDB / MYSQL as it doesn't contain all "
+ "version of MariaDB as it doesn't contain all "
"error messages", file_name);
return 1;
}
@@ -137,27 +173,28 @@ static bool check_error_mesg(const char *file_name, const char **errmsg)
}
-/**
- Read text from packed textfile in language-directory.
+struct st_msg_file
+{
+ uint sections;
+ uint max_error;
+ uint errors;
+ size_t text_length;
+};
- If we can't read messagefile then it's panic- we can't continue.
+/**
+ Open file for packed textfile in language-directory.
*/
-bool read_texts(const char *file_name, const char *language,
- const char ***point, uint error_messages)
+static File open_error_msg_file(const char *file_name, const char *language,
+ uint error_messages, struct st_msg_file *ret)
{
- register uint i;
- uint count,funktpos;
- size_t offset, length;
+ int error_pos= 0;
File file;
char name[FN_REFLEN];
char lang_path[FN_REFLEN];
- uchar *UNINIT_VAR(buff);
- uchar head[32],*pos;
- DBUG_ENTER("read_texts");
+ uchar head[32];
+ DBUG_ENTER("open_error_msg_file");
- *point= 0;
- funktpos=0;
convert_dirname(lang_path, language, NullS);
(void) my_load_path(lang_path, lang_path, lc_messages_dir);
if ((file= mysql_file_open(key_file_ERRMSG,
@@ -168,69 +205,121 @@ bool read_texts(const char *file_name, const char *language,
/*
Trying pre-5.4 sematics of the --language parameter.
It included the language-specific part, e.g.:
-
--language=/path/to/english/
*/
if ((file= mysql_file_open(key_file_ERRMSG,
- fn_format(name, file_name, lc_messages_dir, "", 4),
+ fn_format(name, file_name, lc_messages_dir, "",
+ 4),
O_RDONLY | O_SHARE | O_BINARY,
MYF(0))) < 0)
goto err;
sql_print_warning("An old style --language or -lc-message-dir value with language specific part detected: %s", lc_messages_dir);
sql_print_warning("Use --lc-messages-dir without language specific part instead.");
}
-
- funktpos=1;
+ error_pos=1;
if (mysql_file_read(file, (uchar*) head, 32, MYF(MY_NABP)))
goto err;
- funktpos=2;
+ error_pos=2;
if (head[0] != (uchar) 254 || head[1] != (uchar) 254 ||
- head[2] != 2 || head[3] != 3)
+ head[2] != 2 || head[3] != 4)
goto err; /* purecov: inspected */
- error_message_charset_info= system_charset_info;
- length=uint4korr(head+6); count=uint2korr(head+10);
+ ret->text_length= uint4korr(head+6);
+ ret->max_error= uint2korr(head+10);
+ ret->errors= uint2korr(head+12);
+ ret->sections= uint2korr(head+14);
- if (count < error_messages)
+ if (ret->max_error < error_messages || ret->sections != MAX_ERROR_RANGES)
{
sql_print_error("\
Error message file '%s' had only %d error messages, but it should contain at least %d error messages.\nCheck that the above file is the right version for this program!",
- name,count,error_messages);
+ name,ret->errors,error_messages);
(void) mysql_file_close(file, MYF(MY_WME));
- DBUG_RETURN(1);
+ DBUG_RETURN(FERR);
}
+ DBUG_RETURN(file);
- if (!(*point= (const char**)
- my_malloc((size_t) (MY_MAX(length,count*2)+count*sizeof(char*)),MYF(0))))
- {
- funktpos=3; /* purecov: inspected */
+err:
+ sql_print_error((error_pos == 2) ?
+ "Incompatible header in messagefile '%s'. Probably from "
+ "another version of MariaDB" :
+ ((error_pos == 1) ? "Can't read from messagefile '%s'" :
+ "Can't find messagefile '%s'"), name);
+ if (file != FERR)
+ (void) mysql_file_close(file, MYF(MY_WME));
+ DBUG_RETURN(FERR);
+}
+
+
+/*
+ Define the number of normal and extra error messages in the errmsg.sys
+ file
+*/
+
+static const uint error_messages= ER_ERROR_LAST - ER_ERROR_FIRST+1;
+
+/**
+ Read text from packed textfile in language-directory.
+*/
+
+bool read_texts(const char *file_name, const char *language,
+ const char ****data)
+{
+ uint i, range_size;
+ const char **point;
+ size_t offset;
+ File file;
+ uchar *buff, *pos;
+ struct st_msg_file msg_file;
+ DBUG_ENTER("read_texts");
+
+ if ((file= open_error_msg_file(file_name, language, error_messages,
+ &msg_file)) == FERR)
+ DBUG_RETURN(1);
+
+ if (!(*data= (const char***)
+ my_malloc((size_t) ((MAX_ERROR_RANGES+1) * sizeof(char**) +
+ MY_MAX(msg_file.text_length, msg_file.errors * 2)+
+ msg_file.errors * sizeof(char*)),
+ MYF(MY_WME))))
goto err; /* purecov: inspected */
- }
- buff= (uchar*) (*point + count);
- if (mysql_file_read(file, buff, (size_t) count*2, MYF(MY_NABP)))
+ point= (const char**) ((*data) + MAX_ERROR_RANGES);
+ buff= (uchar*) (point + msg_file.errors);
+
+ if (mysql_file_read(file, buff,
+ (size_t) (msg_file.errors + msg_file.sections) * 2,
+ MYF(MY_NABP | MY_WME)))
goto err;
- for (i=0, offset=0, pos= buff ; i< count ; i++)
+
+ pos= buff;
+ /* read in sections */
+ for (i= 0, offset= 0; i < msg_file.sections ; i++)
{
- (*point)[i]= (char*) buff+offset;
- offset+= uint2korr(pos);
+ (*data)[i]= point + offset;
+ errors_per_range[i]= range_size= uint2korr(pos);
+ offset+= range_size;
+ pos+= 2;
+ }
+
+ /* Calculate pointers to text data */
+ for (i=0, offset=0 ; i < msg_file.errors ; i++)
+ {
+ point[i]= (char*) buff+offset;
+ offset+=uint2korr(pos);
pos+=2;
}
- if (mysql_file_read(file, buff, length, MYF(MY_NABP)))
+
+ /* Read error message texts */
+ if (mysql_file_read(file, buff, msg_file.text_length, MYF(MY_NABP | MY_WME)))
goto err;
- (void) mysql_file_close(file, MYF(0));
+ (void) mysql_file_close(file, MYF(MY_WME));
- i= check_error_mesg(file_name, *point);
- DBUG_RETURN(i);
+ DBUG_RETURN(check_error_mesg(file_name, point));
err:
- sql_print_error((funktpos == 3) ? "Not enough memory for messagefile '%s'" :
- (funktpos == 2) ? "Incompatible header in messagefile '%s'. Probably from another version of MariaDB" :
- ((funktpos == 1) ? "Can't read from messagefile '%s'" :
- "Can't find messagefile '%s'"), name);
- if (file != FERR)
- (void) mysql_file_close(file, MYF(MY_WME));
+ (void) mysql_file_close(file, MYF(0));
DBUG_RETURN(1);
} /* read_texts */
diff --git a/sql/derror.h b/sql/derror.h
index b2f6331e048..9f2aee71c7e 100644
--- a/sql/derror.h
+++ b/sql/derror.h
@@ -19,7 +19,8 @@
#include "my_global.h" /* uint */
bool init_errmessage(void);
+void free_error_messages();
bool read_texts(const char *file_name, const char *language,
- const char ***point, uint error_messages);
+ const char ****data);
#endif /* DERROR_INCLUDED */
diff --git a/sql/discover.cc b/sql/discover.cc
index d8bf6ca79c5..62a0084e2e7 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -229,7 +229,7 @@ int extension_based_table_discovery(MY_DIR *dirp, const char *ext_meta,
cur++;
}
advance(from, to, cur, skip);
- dirp->number_of_files= to - dirp->dir_entry;
+ dirp->number_of_files= (uint)(to - dirp->dir_entry);
return 0;
}
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 6ef9fa9f8ef..90e839debb5 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -561,7 +561,7 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table)
}
if ((ptr= get_field(&mem_root, table->field[ET_FIELD_ORIGINATOR])) == NullS)
DBUG_RETURN(TRUE);
- originator = table->field[ET_FIELD_ORIGINATOR]->val_int();
+ originator = (uint32) table->field[ET_FIELD_ORIGINATOR]->val_int();
/* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */
if ((ptr= get_field(&mem_root,
@@ -911,9 +911,9 @@ Event_queue_element::compute_next_execution_time()
{
my_time_t time_now;
DBUG_ENTER("Event_queue_element::compute_next_execution_time");
- DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
+ DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: %p",
(long) starts, (long) ends, (long) last_executed,
- (long) this));
+ this));
if (status != Event_parse_data::ENABLED)
{
diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h
index 8113fcb0e2e..7ad8191f4d2 100644
--- a/sql/event_data_objects.h
+++ b/sql/event_data_objects.h
@@ -132,7 +132,7 @@ public:
ulonglong created;
ulonglong modified;
- ulong sql_mode;
+ sql_mode_t sql_mode;
class Stored_program_creation_ctx *creation_ctx;
LEX_STRING body_utf8;
@@ -158,7 +158,7 @@ public:
LEX_STRING definer_user;
LEX_STRING definer_host;
- ulong sql_mode;
+ sql_mode_t sql_mode;
class Stored_program_creation_ctx *creation_ctx;
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 3afd2659a29..50bc84883a0 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -191,7 +191,7 @@ mysql_event_fill_row(THD *thd,
TABLE *table,
Event_parse_data *et,
sp_head *sp,
- ulonglong sql_mode,
+ sql_mode_t sql_mode,
my_bool is_update)
{
CHARSET_INFO *scs= system_charset_info;
@@ -488,7 +488,8 @@ Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table,
READ_RECORD read_record_info;
DBUG_ENTER("Event_db_repository::table_scan_all_for_i_s");
- if (init_read_record(&read_record_info, thd, event_table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, event_table, NULL, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(TRUE);
/*
@@ -646,7 +647,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
int ret= 1;
TABLE *table= NULL;
sp_head *sp= thd->lex->sphead;
- ulonglong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
/*
Take a savepoint to release only the lock on mysql.event
table at the end but keep the global read lock and
@@ -773,7 +774,7 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
CHARSET_INFO *scs= system_charset_info;
TABLE *table= NULL;
sp_head *sp= thd->lex->sphead;
- ulonglong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
/*
Take a savepoint to release only the lock on mysql.event
table at the end but keep the global read lock and
@@ -1002,7 +1003,7 @@ Event_db_repository::drop_schema_events(THD *thd, LEX_STRING schema)
DBUG_VOID_RETURN;
/* only enabled events are in memory, so we go now and delete the rest */
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL, 1, 0, FALSE))
goto end;
while (!ret && !(read_record_info.read_record(&read_record_info)) )
@@ -1059,7 +1060,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname,
TABLE_LIST event_table;
DBUG_ENTER("Event_db_repository::load_named_event");
- DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd,
+ DBUG_PRINT("enter",("thd: %p name: %*s", thd,
(int) name.length, name.str));
event_table.init_one_table("mysql", 5, "event", 5, "event", TL_READ);
@@ -1182,7 +1183,7 @@ Event_db_repository::check_system_tables(THD *thd)
const unsigned int event_priv_column_position= 29;
DBUG_ENTER("Event_db_repository::check_system_tables");
- DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+ DBUG_PRINT("enter", ("thd: %p", thd));
/* Check mysql.db */
tables.init_one_table("mysql", 5, "db", 2, "db", TL_READ);
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index 6c123c8e641..6fa0c25508e 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -497,9 +497,9 @@ Event_parse_data::check_parse_data(THD *thd)
{
bool ret;
DBUG_ENTER("Event_parse_data::check_parse_data");
- DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
- (long) item_execute_at, (long) item_expression,
- (long) item_starts, (long) item_ends));
+ DBUG_PRINT("info", ("execute_at: %p expr=%p starts=%p ends=%p",
+ item_execute_at, item_expression,
+ item_starts, item_ends));
init_name(thd, identifier);
@@ -532,9 +532,9 @@ Event_parse_data::init_definer(THD *thd)
size_t definer_user_len= thd->lex->definer->user.length;
size_t definer_host_len= thd->lex->definer->host.length;
- DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx "
- "definer_user: 0x%lx", (long) thd->mem_root,
- (long) definer_user));
+ DBUG_PRINT("info",("init definer_user thd->mem_root: %p "
+ "definer_user: %p", thd->mem_root,
+ definer_user));
/* + 1 for @ */
DBUG_PRINT("info",("init definer as whole"));
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index ae8ba258717..01a1507f6f2 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -135,7 +135,7 @@ bool
Event_queue::init_queue(THD *thd)
{
DBUG_ENTER("Event_queue::init_queue");
- DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
+ DBUG_PRINT("enter", ("this: %p", this));
LOCK_QUEUE_DATA();
@@ -201,7 +201,7 @@ Event_queue::create_event(THD *thd, Event_queue_element *new_element,
bool *created)
{
DBUG_ENTER("Event_queue::create_event");
- DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd,
+ DBUG_PRINT("enter", ("thd: %p et=%s.%s", thd,
new_element->dbname.str, new_element->name.str));
/* Will do nothing if the event is disabled */
@@ -213,7 +213,7 @@ Event_queue::create_event(THD *thd, Event_queue_element *new_element,
DBUG_RETURN(FALSE);
}
- DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
+ DBUG_PRINT("info", ("new event in the queue: %p", new_element));
LOCK_QUEUE_DATA();
*created= (queue_insert_safe(&queue, (uchar *) new_element) == FALSE);
@@ -242,7 +242,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
Event_queue_element *new_element)
{
DBUG_ENTER("Event_queue::update_event");
- DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str));
+ DBUG_PRINT("enter", ("thd: %p et=[%s.%s]", thd, dbname.str, name.str));
if ((new_element->status == Event_parse_data::DISABLED) ||
(new_element->status == Event_parse_data::SLAVESIDE_DISABLED))
@@ -264,7 +264,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
/* If not disabled event */
if (new_element)
{
- DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
+ DBUG_PRINT("info", ("new event in the queue: %p", new_element));
queue_insert_safe(&queue, (uchar *) new_element);
mysql_cond_broadcast(&COND_queue_state);
}
@@ -290,7 +290,7 @@ void
Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
{
DBUG_ENTER("Event_queue::drop_event");
- DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd,
+ DBUG_PRINT("enter", ("thd: %p db :%s name: %s", thd,
dbname.str, name.str));
LOCK_QUEUE_DATA();
@@ -545,7 +545,7 @@ Event_queue::dbug_dump_queue(my_time_t when)
i++)
{
et= ((Event_queue_element*)queue_element(&queue, i));
- DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et,
+ DBUG_PRINT("info", ("et: %p name: %s.%s", et,
et->dbname.str, et->name.str));
DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u "
"expr: %ld et.exec_at: %ld now: %ld "
@@ -673,8 +673,8 @@ Event_queue::get_top_for_execution_if_time(THD *thd,
end:
UNLOCK_QUEUE_DATA();
- DBUG_PRINT("info", ("returning %d et_new: 0x%lx ",
- ret, (long) *event_name));
+ DBUG_PRINT("info", ("returning %d et_new: %p ",
+ ret, *event_name));
if (*event_name)
{
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index e02b618a80a..57bbf0e1eea 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -150,14 +150,8 @@ deinit_event_thread(THD *thd)
{
thd->proc_info= "Clearing";
DBUG_PRINT("exit", ("Event thread finishing"));
-
- mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
- mysql_mutex_unlock(&LOCK_thread_count);
-
+ unlink_not_visible_thd(thd);
delete thd;
- thread_safe_decrement32(&thread_count);
- signal_thd_deleted();
}
@@ -191,11 +185,7 @@ pre_init_event_thread(THD* thd)
thd->net.read_timeout= slave_net_timeout;
thd->variables.option_bits|= OPTION_AUTO_IS_NULL;
thd->client_capabilities|= CLIENT_MULTI_RESULTS;
- thread_safe_increment32(&thread_count);
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ add_to_active_threads(thd);
/*
Guarantees that we will see the thread in SHOW PROCESSLIST though its
@@ -305,7 +295,7 @@ Event_worker_thread::run(THD *thd, Event_queue_element_for_exec *event)
res= post_init_event_thread(thd);
DBUG_ENTER("Event_worker_thread::run");
- DBUG_PRINT("info", ("Time is %ld, THD: 0x%lx", (long) my_time(0), (long) thd));
+ DBUG_PRINT("info", ("Time is %u, THD: %p", (uint)my_time(0), thd));
inc_thread_running();
if (res)
@@ -399,7 +389,7 @@ Event_scheduler::start(int *err_no)
if (state > INITIALIZED)
goto end;
- if (!(new_thd= new THD))
+ if (!(new_thd= new THD(next_thread_id())))
{
sql_print_error("Event Scheduler: Cannot initialize the scheduler thread");
ret= true;
@@ -430,7 +420,7 @@ Event_scheduler::start(int *err_no)
scheduler_thd= new_thd;
DBUG_PRINT("info", ("Setting state go RUNNING"));
state= RUNNING;
- DBUG_PRINT("info", ("Forking new thread for scheduler. THD: 0x%lx", (long) new_thd));
+ DBUG_PRINT("info", ("Forking new thread for scheduler. THD: %p", new_thd));
if ((*err_no= mysql_thread_create(key_thread_event_scheduler,
&th, &connection_attrib,
event_scheduler_thread,
@@ -474,7 +464,7 @@ Event_scheduler::run(THD *thd)
DBUG_ENTER("Event_scheduler::run");
sql_print_information("Event Scheduler: scheduler thread started with id %lu",
- thd->thread_id);
+ (ulong) thd->thread_id);
/*
Recalculate the values in the queue because there could have been stops
in executions of the scheduler and some times could have passed by.
@@ -495,7 +485,7 @@ Event_scheduler::run(THD *thd)
}
DBUG_PRINT("info", ("get_top_for_execution_if_time returned "
- "event_name=0x%lx", (long) event_name));
+ "event_name=%p", event_name));
if (event_name)
{
if ((res= execute_top(event_name)))
@@ -540,7 +530,7 @@ Event_scheduler::execute_top(Event_queue_element_for_exec *event_name)
int res= 0;
DBUG_ENTER("Event_scheduler::execute_top");
- if (!(new_thd= new THD()))
+ if (!(new_thd= new THD(next_thread_id())))
goto error;
pre_init_event_thread(new_thd);
@@ -576,7 +566,7 @@ Event_scheduler::execute_top(Event_queue_element_for_exec *event_name)
started_events++;
executed_events++; // For SHOW STATUS
- DBUG_PRINT("info", ("Event is in THD: 0x%lx", (long) new_thd));
+ DBUG_PRINT("info", ("Event is in THD: %p", new_thd));
DBUG_RETURN(FALSE);
error:
@@ -627,7 +617,7 @@ Event_scheduler::stop()
{
THD *thd= current_thd;
DBUG_ENTER("Event_scheduler::stop");
- DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+ DBUG_PRINT("enter", ("thd: %p", thd));
LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
@@ -657,13 +647,13 @@ Event_scheduler::stop()
state= STOPPING;
DBUG_PRINT("info", ("Scheduler thread has id %lu",
- scheduler_thd->thread_id));
+ (ulong) scheduler_thd->thread_id));
/* Lock from delete */
mysql_mutex_lock(&scheduler_thd->LOCK_thd_data);
/* This will wake up the thread if it waits on Queue's conditional */
sql_print_information("Event Scheduler: Killing the scheduler thread, "
"thread id %lu",
- scheduler_thd->thread_id);
+ (ulong) scheduler_thd->thread_id);
scheduler_thd->awake(KILL_CONNECTION);
mysql_mutex_unlock(&scheduler_thd->LOCK_thd_data);
@@ -820,7 +810,8 @@ Event_scheduler::dump_internal_status()
puts("");
puts("Event scheduler status:");
printf("State : %s\n", scheduler_states_names[state].str);
- printf("Thread id : %lu\n", scheduler_thd? scheduler_thd->thread_id : 0);
+ printf("Thread id : %lu\n", scheduler_thd ?
+ (ulong) scheduler_thd->thread_id : (ulong) 0);
printf("LLA : %s:%u\n", mutex_last_locked_in_func,
mutex_last_locked_at_line);
printf("LUA : %s:%u\n", mutex_last_unlocked_in_func,
diff --git a/sql/events.cc b/sql/events.cc
index 187a3208d9f..cd0257f5317 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -103,7 +103,7 @@ ulong Events::inited;
int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs)
{
return cs->coll->strnncollsp(cs, (uchar *) s.str,s.length,
- (uchar *) t.str,t.length, 0);
+ (uchar *) t.str,t.length);
}
@@ -873,7 +873,7 @@ Events::init(THD *thd, bool opt_noacl_or_bootstrap)
if (!thd)
{
- if (!(thd= new THD()))
+ if (!(thd= new THD(0)))
{
res= TRUE;
goto end;
@@ -1137,7 +1137,7 @@ Events::load_events_from_db(THD *thd)
uint count= 0;
ulong saved_master_access;
DBUG_ENTER("Events::load_events_from_db");
- DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+ DBUG_PRINT("enter", ("thd: %p", thd));
/*
NOTE: even if we run in read-only mode, we should be able to lock the
@@ -1166,7 +1166,7 @@ Events::load_events_from_db(THD *thd)
DBUG_RETURN(TRUE);
}
- if (init_read_record(&read_record_info, thd, table, NULL, 0, 1, FALSE))
+ if (init_read_record(&read_record_info, thd, table, NULL, NULL, 0, 1, FALSE))
{
close_thread_tables(thd);
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index a4308a378b3..0621015c0e4 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -70,8 +70,21 @@ const char field_separator=',';
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
((ulong) ((1LL << MY_MIN(arg, 4) * 8) - 1))
-#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
-#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(is_stat_field || !table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || (table->vcol_set && bitmap_is_set(table->vcol_set, field_index))))
+// Column marked for read or the field set to read out or record[0] or [1]
+#define ASSERT_COLUMN_MARKED_FOR_READ \
+ DBUG_ASSERT(!table || \
+ (!table->read_set || \
+ bitmap_is_set(table->read_set, field_index) || \
+ (!(ptr >= table->record[0] && \
+ ptr < table->record[0] + table->s->reclength))))
+
+#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED \
+ DBUG_ASSERT(is_stat_field || !table || \
+ (!table->write_set || \
+ bitmap_is_set(table->write_set, field_index) || \
+ (!(ptr >= table->record[0] && \
+ ptr < table->record[0] + table->s->reclength))) || \
+ (table->vcol_set && bitmap_is_set(table->vcol_set, field_index)))
#define FLAGSTR(S,F) ((S) & (F) ? #F " " : "")
@@ -80,7 +93,7 @@ const char field_separator=',';
NOTE: to avoid 256*256 table, gap in table types numeration is skiped
following #defines describe that gap and how to canculate number of fields
- and index of field in thia array.
+ and index of field in this array.
*/
#define FIELDTYPE_TEAR_FROM (MYSQL_TYPE_BIT + 1)
#define FIELDTYPE_TEAR_TO (MYSQL_TYPE_NEWDECIMAL - 1)
@@ -231,7 +244,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT,
+ MYSQL_TYPE_DOUBLE, MYSQL_TYPE_FLOAT,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -1224,7 +1237,8 @@ bool Field::test_if_equality_guarantees_uniqueness(const Item *item) const
for temporal columns, so the query:
WHERE temporal_column='string'
cannot return multiple distinct temporal values.
- QQ: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items.
+
+ TODO: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items.
*/
return result_type() == item->result_type();
}
@@ -1322,6 +1336,98 @@ bool Field::can_optimize_range(const Item_bool_func *cond,
}
+int Field::store_hex_hybrid(const char *str, uint length)
+{
+ DBUG_ASSERT(result_type() != STRING_RESULT);
+ ulonglong nr;
+
+ if (length > 8)
+ {
+ nr= flags & UNSIGNED_FLAG ? ULONGLONG_MAX : LONGLONG_MAX;
+ goto warn;
+ }
+ nr= (ulonglong) longlong_from_hex_hybrid(str, length);
+ if ((length == 8) && !(flags & UNSIGNED_FLAG) && (nr > LONGLONG_MAX))
+ {
+ nr= LONGLONG_MAX;
+ goto warn;
+ }
+ return store((longlong) nr, true); // Assume hex numbers are unsigned
+
+warn:
+ if (!store((longlong) nr, true))
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ return 1;
+}
+
+
+/**
+ If a field does not have a corresponding data, it's behavior can vary:
+ - In case of the fixed file format
+ it's set to the default value for the data type,
+ such as 0 for numbers or '' for strings.
+ - In case of a non-fixed format
+ it's set to NULL for nullable fields, and
+ it's set to the default value for the data type for NOT NULL fields.
+ This seems to be by design.
+*/
+bool Field::load_data_set_no_data(THD *thd, bool fixed_format)
+{
+ reset(); // Do not use the DEFAULT value
+ if (fixed_format)
+ {
+ set_notnull();
+ /*
+ We're loading a fixed format file, e.g.:
+ LOAD DATA INFILE 't1.txt' INTO TABLE t1 FIELDS TERMINATED BY '';
+ Suppose the file ended unexpectedly and no data was provided for an
+ auto-increment column in the current row.
+ Historically, if sql_mode=NO_AUTO_VALUE_ON_ZERO, then the column value
+ is set to 0 in such case (the next auto_increment value is not used).
+ This behaviour was introduced by the fix for "bug#12053" in mysql-4.1.
+ Note, loading a delimited file works differently:
+ "no data" is not converted to 0 on NO_AUTO_VALUE_ON_ZERO:
+ it's considered as equal to setting the column to NULL,
+ which is then replaced to the next auto_increment value.
+ This difference seems to be intentional.
+ */
+ if (this == table->next_number_field)
+ table->auto_increment_field_not_null= true;
+ }
+ set_has_explicit_value(); // Do not auto-update this field
+ return false;
+}
+
+
+bool Field::load_data_set_null(THD *thd)
+{
+ reset();
+ set_null();
+ if (!maybe_null())
+ {
+ if (this != table->next_number_field)
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_NULL_TO_NOTNULL, 1);
+ }
+ set_has_explicit_value(); // Do not auto-update this field
+ return false;
+}
+
+
+void Field::load_data_set_value(const char *pos, uint length,
+ CHARSET_INFO *cs)
+{
+ /*
+ Mark field as not null, we should do this for each row because of
+ restore_record...
+ */
+ set_notnull();
+ if (this == table->next_number_field)
+ table->auto_increment_field_not_null= true;
+ store(pos, length, cs);
+ set_has_explicit_value(); // Do not auto-update this field
+}
+
+
/**
Numeric fields base class constructor.
*/
@@ -1664,9 +1770,8 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
part_of_key_not_clustered(0), part_of_sortkey(0),
unireg_check(unireg_check_arg), field_length(length_arg),
null_bit(null_bit_arg), is_created_from_null_item(FALSE),
- read_stats(NULL), collected_stats(0),
- vcol_info(0),
- stored_in_db(TRUE)
+ read_stats(NULL), collected_stats(0), vcol_info(0), check_constraint(0),
+ default_value(0)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
@@ -1783,6 +1888,33 @@ int Field::store(const char *to, uint length, CHARSET_INFO *cs,
}
+static int timestamp_to_TIME(THD *thd, MYSQL_TIME *ltime, my_time_t ts,
+ ulong sec_part, ulonglong fuzzydate)
+{
+ thd->time_zone_used= 1;
+ if (ts == 0 && sec_part == 0)
+ {
+ if (fuzzydate & TIME_NO_ZERO_DATE)
+ return 1;
+ set_zero_time(ltime, MYSQL_TIMESTAMP_DATETIME);
+ }
+ else
+ {
+ thd->variables.time_zone->gmt_sec_to_TIME(ltime, ts);
+ ltime->second_part= sec_part;
+ }
+ return 0;
+}
+
+
+int Field::store_timestamp(my_time_t ts, ulong sec_part)
+{
+ MYSQL_TIME ltime;
+ THD *thd= get_thd();
+ timestamp_to_TIME(thd, &ltime, ts, sec_part, 0);
+ return store_time_dec(&ltime, decimals());
+}
+
/**
Pack the field into a format suitable for storage and transfer.
@@ -2027,7 +2159,7 @@ bool Field_num::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
longlong nr= val_int();
bool neg= !(flags & UNSIGNED_FLAG) && nr < 0;
return int_to_datetime_with_warn(neg, neg ? -nr : nr, ltime, fuzzydate,
- field_name);
+ table->s, field_name);
}
@@ -2283,6 +2415,26 @@ Field *Field::clone(MEM_ROOT *root, my_ptrdiff_t diff)
return tmp;
}
+int Field::set_default()
+{
+ if (default_value)
+ {
+ Query_arena backup_arena;
+ table->in_use->set_n_backup_active_arena(table->expr_arena, &backup_arena);
+ int rc= default_value->expr->save_in_field(this, 0);
+ table->in_use->restore_active_arena(table->expr_arena, &backup_arena);
+ return rc;
+ }
+ /* Copy constant value stored in s->default_values */
+ my_ptrdiff_t l_offset= (my_ptrdiff_t) (table->s->default_values -
+ table->record[0]);
+ memcpy(ptr, ptr + l_offset, pack_length());
+ if (maybe_null_in_table())
+ *null_ptr= ((*null_ptr & (uchar) ~null_bit) |
+ (null_ptr[l_offset] & null_bit));
+ return 0;
+}
+
/****************************************************************************
Field_null, a field that always return NULL
@@ -2725,7 +2877,7 @@ int Field_decimal::store(double nr)
return 1;
}
- reg4 uint i;
+ uint i;
size_t length;
uchar fyllchar,*to;
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
@@ -2901,6 +3053,23 @@ void Field_decimal::sql_type(String &res) const
}
+Field *Field_decimal::make_new_field(MEM_ROOT *root, TABLE *new_table,
+ bool keep_type)
+{
+ if (keep_type)
+ return Field_real::make_new_field(root, new_table, keep_type);
+
+ Field *field= new (root) Field_new_decimal(NULL, field_length,
+ maybe_null() ? (uchar*) "" : 0, 0,
+ NONE, field_name,
+ dec, flags & ZEROFILL_FLAG,
+ unsigned_flag);
+ if (field)
+ field->init_for_make_new_field(new_table, orig_table);
+ return field;
+}
+
+
/****************************************************************************
** Field_new_decimal
****************************************************************************/
@@ -3256,6 +3425,16 @@ longlong Field_new_decimal::val_int(void)
}
+ulonglong Field_new_decimal::val_uint(void)
+{
+ ASSERT_COLUMN_MARKED_FOR_READ;
+ longlong i;
+ my_decimal decimal_value;
+ my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), true, &i);
+ return i;
+}
+
+
my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
{
ASSERT_COLUMN_MARKED_FOR_READ;
@@ -3285,7 +3464,7 @@ bool Field_new_decimal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
my_decimal value;
return decimal_to_datetime_with_warn(val_decimal(&value),
- ltime, fuzzydate, field_name);
+ ltime, fuzzydate, table->s, field_name);
}
@@ -4248,16 +4427,13 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_longlong::store(double nr)
{
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
- bool error;
- longlong res;
-
- res= double_to_longlong(nr, unsigned_flag, &error);
+ Converter_double_to_longlong conv(nr, unsigned_flag);
- if (error)
+ if (conv.error())
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
- int8store(ptr,res);
- return error;
+ int8store(ptr, conv.result());
+ return conv.error();
}
@@ -4430,8 +4606,7 @@ longlong Field_float::val_int(void)
{
float j;
float4get(j,ptr);
- bool error;
- return double_to_longlong(j, false, &error);
+ return Converter_double_to_longlong(j, false).result();
}
@@ -4453,13 +4628,13 @@ String *Field_float::val_str(String *val_buffer,
char *to=(char*) val_buffer->ptr();
size_t len;
- if (dec >= NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
len= my_gcvt(nr, MY_GCVT_ARG_FLOAT, to_length - 1, to, NULL);
else
{
/*
We are safe here because the buffer length is 70, and
- fabs(float) < 10^39, dec < NOT_FIXED_DEC. So the resulting string
+ fabs(float) < 10^39, dec < FLOATING_POINT_DECIMALS. So the resulting string
will be not longer than 69 chars + terminating '\0'.
*/
len= my_fcvt(nr, dec, to, NULL);
@@ -4543,7 +4718,7 @@ int Field_float::do_save_field_metadata(uchar *metadata_ptr)
void Field_float::sql_type(String &res) const
{
- if (dec == NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
{
res.set_ascii(STRING_WITH_LEN("float"));
}
@@ -4624,7 +4799,7 @@ int truncate_double(double *nr, uint field_length, uint dec,
return 1;
}
- if (dec < NOT_FIXED_DEC)
+ if (dec < FLOATING_POINT_DECIMALS)
{
uint order= field_length - dec;
uint step= array_elements(log_10) - 1;
@@ -4659,53 +4834,61 @@ int truncate_double(double *nr, uint field_length, uint dec,
/*
Convert double to longlong / ulonglong.
- If double is outside of range, adjust return value and set error.
+ If double is outside of the supported range,
+ adjust m_result and set m_error.
- SYNOPSIS
- double_to_longlong()
- nr Number to convert
- unsigned_flag 1 if result is unsigned
- error Will be set to 1 in case of overflow.
+ @param nr Number to convert
+ @param unsigned_flag true if result is unsigned
*/
-longlong double_to_longlong(double nr, bool unsigned_flag, bool *error)
+Value_source::
+Converter_double_to_longlong::Converter_double_to_longlong(double nr,
+ bool unsigned_flag)
+ :m_error(false)
{
- longlong res;
-
- *error= 0;
-
nr= rint(nr);
if (unsigned_flag)
{
if (nr < 0)
{
- res= 0;
- *error= 1;
+ m_result= 0;
+ m_error= true;
}
else if (nr >= (double) ULONGLONG_MAX)
{
- res= ~(longlong) 0;
- *error= 1;
+ m_result= ~(longlong) 0;
+ m_error= true;
}
else
- res= (longlong) double2ulonglong(nr);
+ m_result= (longlong) double2ulonglong(nr);
}
else
{
if (nr <= (double) LONGLONG_MIN)
{
- res= LONGLONG_MIN;
- *error= (nr < (double) LONGLONG_MIN);
+ m_result= LONGLONG_MIN;
+ m_error= (nr < (double) LONGLONG_MIN);
}
else if (nr >= (double) (ulonglong) LONGLONG_MAX)
{
- res= LONGLONG_MAX;
- *error= (nr > (double) LONGLONG_MAX);
+ m_result= LONGLONG_MAX;
+ m_error= (nr > (double) LONGLONG_MAX);
}
else
- res= (longlong) nr;
+ m_result= (longlong) nr;
}
- return res;
+}
+
+
+void Value_source::
+Converter_double_to_longlong::push_warning(THD *thd,
+ double nr,
+ bool unsigned_flag)
+{
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_DATA_OVERFLOW, ER_THD(thd, ER_DATA_OVERFLOW),
+ ErrConvDouble(nr).ptr(),
+ unsigned_flag ? "UNSIGNED INT" : "INT");
}
@@ -4730,32 +4913,13 @@ double Field_double::val_real(void)
return j;
}
+
longlong Field_double::val_int_from_real(bool want_unsigned_result)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
- double j;
- longlong res;
- bool error;
- float8get(j,ptr);
-
- res= double_to_longlong(j, want_unsigned_result, &error);
- /*
- Note, val_uint() is currently used for auto_increment purposes only,
- and we want to suppress all warnings in such cases.
- If we ever start using val_uint() for other purposes,
- val_int_from_real() will need a new separate parameter to
- suppress warnings.
- */
- if (error && !want_unsigned_result)
- {
- THD *thd= get_thd();
- ErrConvDouble err(j);
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE,
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE), "INTEGER",
- err.ptr());
- }
- return res;
+ Converter_double_to_longlong conv(val_real(), want_unsigned_result);
+ if (!want_unsigned_result && conv.error())
+ conv.push_warning(get_thd(), Field_double::val_real(), false);
+ return conv.result();
}
@@ -4771,7 +4935,8 @@ bool Field_real::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
{
ASSERT_COLUMN_MARKED_FOR_READ;
double nr= val_real();
- return double_to_datetime_with_warn(nr, ltime, fuzzydate, field_name);
+ return double_to_datetime_with_warn(nr, ltime, fuzzydate,
+ table->s, field_name);
}
@@ -4813,7 +4978,7 @@ String *Field_double::val_str(String *val_buffer,
char *to=(char*) val_buffer->ptr();
size_t len;
- if (dec >= NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
len= my_gcvt(nr, MY_GCVT_ARG_DOUBLE, to_length - 1, to, NULL);
else
len= my_fcvt(nr, dec, to, NULL);
@@ -4872,7 +5037,7 @@ int Field_double::do_save_field_metadata(uchar *metadata_ptr)
void Field_double::sql_type(String &res) const
{
CHARSET_INFO *cs=res.charset();
- if (dec == NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
{
res.set_ascii(STRING_WITH_LEN("double"));
}
@@ -4904,12 +5069,12 @@ void Field_double::sql_type(String &res) const
field has NOW() as default and is updated when row changes, else it is
field which has 0 as default value and is not automatically updated.
TIMESTAMP_DN_FIELD - field with NOW() as default but not set on update
- automatically (TIMESTAMP DEFAULT NOW())
+ automatically (TIMESTAMP DEFAULT NOW()), not used in Field since 10.2.2
TIMESTAMP_UN_FIELD - field which is set on update automatically but has not
NOW() as default (but it may has 0 or some other const timestamp as
default) (TIMESTAMP ON UPDATE NOW()).
TIMESTAMP_DNUN_FIELD - field which has now() as default and is auto-set on
- update. (TIMESTAMP DEFAULT NOW() ON UPDATE NOW())
+ update. (TIMESTAMP DEFAULT NOW() ON UPDATE NOW()), not used in Field since 10.2.2
NONE - field which is not auto-set on update with some other than NOW()
default value (TIMESTAMP DEFAULT 0).
@@ -4946,6 +5111,13 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
}
+int Field_timestamp::save_in_field(Field *to)
+{
+ ulong sec_part;
+ my_time_t ts= get_timestamp(&sec_part);
+ return to->store_timestamp(ts, sec_part);
+}
+
my_time_t Field_timestamp::get_timestamp(const uchar *pos,
ulong *sec_part) const
{
@@ -5072,12 +5244,11 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
}
-int Field_timestamp::store_timestamp(Field_timestamp *from)
+int Field_timestamp::store_timestamp(my_time_t ts, ulong sec_part)
{
- ulong sec_part;
- my_time_t ts= from->get_timestamp(&sec_part);
store_TIME(ts, sec_part);
- if (!ts && !sec_part && get_thd()->variables.sql_mode & MODE_NO_ZERO_DATE)
+ if (ts == 0 && sec_part == 0 &&
+ get_thd()->variables.sql_mode & TIME_NO_ZERO_DATE)
{
ErrConvString s(
STRING_WITH_LEN("0000-00-00 00:00:00.000000") - (decimals() ? 6 - decimals() : 7),
@@ -5192,22 +5363,9 @@ Field_timestamp::validate_value_in_record(THD *thd, const uchar *record) const
bool Field_timestamp::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
- THD *thd= get_thd();
- thd->time_zone_used= 1;
ulong sec_part;
- my_time_t temp= get_timestamp(&sec_part);
- if (temp == 0 && sec_part == 0)
- { /* Zero time is "000000" */
- if (fuzzydate & TIME_NO_ZERO_DATE)
- return 1;
- set_zero_time(ltime, MYSQL_TIMESTAMP_DATETIME);
- }
- else
- {
- thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)temp);
- ltime->second_part= sec_part;
- }
- return 0;
+ my_time_t ts= get_timestamp(&sec_part);
+ return timestamp_to_TIME(get_thd(), ltime, ts, sec_part, fuzzydate);
}
@@ -5257,34 +5415,43 @@ int Field_timestamp::set_time()
return 0;
}
-/**
- Mark the field as having an explicit default value.
-
- @param value if available, the value that the field is being set to
- @note
- Fields that have an explicit default value should not be updated
- automatically via the DEFAULT or ON UPDATE functions. The functions
- that deal with data change functionality (INSERT/UPDATE/LOAD),
- determine if there is an explicit value for each field before performing
- the data change, and call this method to mark the field.
+bool Field_timestamp::load_data_set_no_data(THD *thd, bool fixed_format)
+{
+ if (!maybe_null())
+ {
+ /*
+ Timestamp fields that are NOT NULL are autoupdated if there is no
+ corresponding value in the data file.
+ */
+ set_time();
+ set_has_explicit_value();
+ return false;
+ }
+ return Field::load_data_set_no_data(thd, fixed_format);
+}
- For timestamp columns, the only case where a column is not marked
- as been given a value are:
- - It's explicitly assigned with DEFAULT
- - We assign NULL to a timestamp field that is defined as NOT NULL.
- This is how MySQL has worked since it's start.
-*/
-void Field_timestamp::set_explicit_default(Item *value)
+bool Field_timestamp::load_data_set_null(THD *thd)
{
- if (((value->type() == Item::DEFAULT_VALUE_ITEM &&
- !((Item_default_value*)value)->arg) ||
- (!maybe_null() && value->null_value)))
- return;
- set_has_explicit_value();
+ if (!maybe_null())
+ {
+ /*
+ Timestamp fields that are NOT NULL are autoupdated if there is no
+ corresponding value in the data file.
+ */
+ set_time();
+ }
+ else
+ {
+ reset();
+ set_null();
+ }
+ set_has_explicit_value(); // Do not auto-update this field
+ return false;
}
+
#ifdef NOT_USED
static void store_native(ulonglong num, uchar *to, uint bytes)
{
@@ -5583,7 +5750,7 @@ int Field_temporal_with_date::store(double nr)
ErrConvDouble str(nr);
longlong tmp= double_to_datetime(nr, &ltime,
- sql_mode_for_dates(thd), &error);
+ (uint) sql_mode_for_dates(thd), &error);
return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
}
@@ -6289,7 +6456,7 @@ bool Field_year::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
if (tmp || field_length != 4)
tmp+= 1900;
return int_to_datetime_with_warn(false, tmp * 10000,
- ltime, fuzzydate, field_name);
+ ltime, fuzzydate, table->s, field_name);
}
@@ -6860,8 +7027,11 @@ Field_longstr::check_string_copy_error(const String_copier *copier,
if (!(pos= copier->most_important_error_pos()))
return FALSE;
- convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6);
- set_warning_truncated_wrong_value("string", tmp);
+ if (!is_stat_field)
+ {
+ convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6);
+ set_warning_truncated_wrong_value("string", tmp);
+ }
return TRUE;
}
@@ -7186,8 +7356,7 @@ int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr)
*/
return field_charset->coll->strnncollsp(field_charset,
a_ptr, a_len,
- b_ptr, b_len,
- 0);
+ b_ptr, b_len);
}
@@ -7408,15 +7577,7 @@ Field *Field_string::make_new_field(MEM_ROOT *root, TABLE *new_table,
This is done to ensure that ALTER TABLE will convert old VARCHAR fields
to now VARCHAR fields.
*/
- field->init(new_table);
- /*
- Normally orig_table is different from table only if field was
- created via ::make_new_field. Here we alter the type of field,
- so ::make_new_field is not applicable. But we still need to
- preserve the original field metadata for the client-server
- protocol.
- */
- field->orig_table= orig_table;
+ field->init_for_make_new_field(new_table, orig_table);
}
return field;
}
@@ -7561,7 +7722,7 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
a_length,
b_ptr+
length_bytes,
- b_length,0);
+ b_length);
return diff;
}
@@ -7584,7 +7745,7 @@ int Field_varstring::key_cmp(const uchar *key_ptr, uint max_key_length)
length,
key_ptr+
HA_KEY_BLOB_LENGTH,
- uint2korr(key_ptr), 0);
+ uint2korr(key_ptr));
}
@@ -7602,8 +7763,7 @@ int Field_varstring::key_cmp(const uchar *a,const uchar *b)
a + HA_KEY_BLOB_LENGTH,
uint2korr(a),
b + HA_KEY_BLOB_LENGTH,
- uint2korr(b),
- 0);
+ uint2korr(b));
}
@@ -7893,7 +8053,7 @@ void Field_blob::store_length(uchar *i_ptr, uint i_packlength, uint32 i_number)
}
-uint32 Field_blob::get_length(const uchar *pos, uint packlength_arg)
+uint32 Field_blob::get_length(const uchar *pos, uint packlength_arg) const
{
return (uint32)read_lowendian(pos, packlength_arg);
}
@@ -7908,16 +8068,12 @@ int Field_blob::copy_value(Field_blob *from)
DBUG_ASSERT(field_charset == from->charset());
int rc= 0;
uint32 length= from->get_length();
- uchar *data;
- from->get_ptr(&data);
+ uchar *data= from->get_ptr();
if (packlength < from->packlength)
{
- int well_formed_errors;
set_if_smaller(length, Field_blob::max_data_length());
- length= field_charset->cset->well_formed_len(field_charset,
- (const char *) data,
- (const char *) data + length,
- length, &well_formed_errors);
+ length= (uint32) Well_formed_prefix(field_charset,
+ (const char *) data, length).length();
rc= report_if_important_data((const char *) data + length,
(const char *) data + from->get_length(),
true);
@@ -7956,12 +8112,11 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
DBUG_ASSERT(length <= max_data_length());
new_length= length;
- copy_length= table->in_use->variables.group_concat_max_len;
+ copy_length= (uint)MY_MIN(UINT_MAX,table->in_use->variables.group_concat_max_len);
if (new_length > copy_length)
{
- int well_formed_error;
- new_length= cs->cset->well_formed_len(cs, from, from + copy_length,
- new_length, &well_formed_error);
+ new_length= Well_formed_prefix(cs,
+ from, copy_length, new_length).length();
table->blob_storage->set_truncated_value(true);
}
if (!(tmp= table->blob_storage->store(from, new_length)))
@@ -8109,8 +8264,7 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
uint32 b_length)
{
return field_charset->coll->strnncollsp(field_charset,
- a, a_length, b, b_length,
- 0);
+ a, a_length, b, b_length);
}
@@ -8167,7 +8321,7 @@ uint Field_blob::get_key_image(uchar *buff,uint length, imagetype type_arg)
bzero(buff, image_length);
return image_length;
}
- get_ptr(&blob);
+ blob= get_ptr();
gobj= Geometry::construct(&buffer, (char*) blob, blob_length);
if (!gobj || gobj->get_mbr(&mbr, &dummy))
bzero(buff, image_length);
@@ -8182,7 +8336,7 @@ uint Field_blob::get_key_image(uchar *buff,uint length, imagetype type_arg)
}
#endif /*HAVE_SPATIAL*/
- get_ptr(&blob);
+ blob= get_ptr();
uint local_char_length= length / field_charset->mbmaxlen;
local_char_length= my_charpos(field_charset, blob, blob + blob_length,
local_char_length);
@@ -8276,7 +8430,7 @@ void Field_blob::sort_string(uchar *to,uint length)
uchar *blob;
uint blob_length=get_length();
- if (!blob_length)
+ if (!blob_length && field_charset->pad_char == 0)
bzero(to,length);
else
{
@@ -8341,7 +8495,7 @@ uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
*/
if (length > 0)
{
- get_ptr((uchar**) &from);
+ from= get_ptr();
memcpy(to+packlength, from,length);
}
ptr=save; // Restore org row pointer
@@ -8370,8 +8524,8 @@ const uchar *Field_blob::unpack(uchar *to, const uchar *from,
const uchar *from_end, uint param_data)
{
DBUG_ENTER("Field_blob::unpack");
- DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx; param_data: %u",
- (ulong) to, (ulong) from, param_data));
+ DBUG_PRINT("enter", ("to: %p; from: %p; param_data: %u",
+ to, from, param_data));
uint const master_packlength=
param_data > 0 ? param_data & 0xFF : packlength;
if (from + master_packlength > from_end)
@@ -8505,7 +8659,7 @@ uint gis_field_options_read(const uchar *buf, uint buf_len,
}
end_of_record:
- return cbuf - buf;
+ return (uint)(cbuf - buf);
}
@@ -8587,14 +8741,20 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs)
geom_type != Field::GEOM_GEOMETRYCOLLECTION &&
(uint32) geom_type != wkb_type)
{
- my_printf_error(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
- ER_THD(get_thd(), ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- MYF(0),
- Geometry::ci_collection[geom_type]->m_name.str,
- Geometry::ci_collection[wkb_type]->m_name.str,
- field_name,
- (ulong) table->in_use->get_stmt_da()->
- current_row_for_warning());
+ const char *db= table->s->db.str;
+ const char *tab_name= table->s->table_name.str;
+
+ if (!db)
+ db= "";
+ if (!tab_name)
+ tab_name= "";
+
+ my_error(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, MYF(0),
+ Geometry::ci_collection[geom_type]->m_name.str,
+ Geometry::ci_collection[wkb_type]->m_name.str,
+ db, tab_name, field_name,
+ (ulong) table->in_use->get_stmt_da()->
+ current_row_for_warning());
goto err_exit;
}
@@ -8646,6 +8806,28 @@ bool Field_geom::can_optimize_range(const Item_bool_func *cond,
return item->cmp_type() == STRING_RESULT;
}
+
+bool Field_geom::load_data_set_no_data(THD *thd, bool fixed_format)
+{
+ return Field_geom::load_data_set_null(thd);
+}
+
+
+bool Field_geom::load_data_set_null(THD *thd)
+{
+ Field_blob::reset();
+ if (!maybe_null())
+ {
+ my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field_name,
+ thd->get_stmt_da()->current_row_for_warning());
+ return true;
+ }
+ set_null();
+ set_has_explicit_value(); // Do not auto-update this field
+ return false;
+}
+
+
#endif /*HAVE_SPATIAL*/
/****************************************************************************
@@ -8984,7 +9166,7 @@ void Field_set::sql_type(String &res) const
0 if the fields are unequally defined
*/
-bool Field::eq_def(Field *field)
+bool Field::eq_def(const Field *field) const
{
if (real_type() != field->real_type() || charset() != field->charset() ||
pack_length() != field->pack_length())
@@ -9016,7 +9198,7 @@ static bool compare_type_names(CHARSET_INFO *charset, TYPELIB *t1, TYPELIB *t2)
returns 1 if the fields are equally defined
*/
-bool Field_enum::eq_def(Field *field)
+bool Field_enum::eq_def(const Field *field) const
{
TYPELIB *values;
@@ -9093,7 +9275,7 @@ const uchar *Field_enum::unpack(uchar *to, const uchar *from,
@return
returns 1 if the fields are equally defined
*/
-bool Field_num::eq_def(Field *field)
+bool Field_num::eq_def(const Field *field) const
{
if (!Field::eq_def(field))
return 0;
@@ -9227,8 +9409,8 @@ Field_bit::do_last_null_byte() const
bits. On systems with CHAR_BIT > 8 (not very common), the storage
will lose the extra bits.
*/
- DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx",
- bit_ofs, bit_len, (long) bit_ptr));
+ DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: %p",
+ bit_ofs, bit_len, bit_ptr));
uchar *result;
if (bit_len == 0)
result= null_ptr;
@@ -9676,7 +9858,7 @@ Field_bit::unpack(uchar *to, const uchar *from, const uchar *from_end,
}
-void Field_bit::set_default()
+int Field_bit::set_default()
{
if (bit_len > 0)
{
@@ -9684,7 +9866,7 @@ void Field_bit::set_default()
uchar bits= get_rec_bits(bit_ptr + col_offset, bit_ofs, bit_len);
set_rec_bits(bits, bit_ptr, bit_ofs, bit_len);
}
- Field::set_default();
+ return Field::set_default();
}
/*
@@ -9749,7 +9931,7 @@ void Field_bit_as_char::sql_type(String &res) const
Convert create_field::length from number of characters to number of bytes.
*/
-void Create_field::create_length_to_internal_length(void)
+void Column_definition::create_length_to_internal_length(void)
{
switch (sql_type) {
case MYSQL_TYPE_TINY_BLOB:
@@ -9761,8 +9943,9 @@ void Create_field::create_length_to_internal_length(void)
case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VARCHAR:
length*= charset->mbmaxlen;
- key_length= length;
- pack_length= calc_pack_length(sql_type, length);
+ set_if_smaller(length, UINT_MAX32);
+ key_length= (uint32)length;
+ pack_length= calc_pack_length(sql_type, key_length);
break;
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
@@ -9777,7 +9960,7 @@ void Create_field::create_length_to_internal_length(void)
}
else
{
- pack_length= length / 8;
+ pack_length= (uint)(length / 8);
/* We need one extra byte to store the bits we save among the null bits */
key_length= pack_length + MY_TEST(length & 7);
}
@@ -9789,246 +9972,135 @@ void Create_field::create_length_to_internal_length(void)
Field_new_decimal::Field_new_decimal as otherwise the record layout
gets out of sync.
*/
- uint precision= my_decimal_length_to_precision(length, decimals,
+ uint precision= my_decimal_length_to_precision((uint)length, decimals,
flags & UNSIGNED_FLAG);
set_if_smaller(precision, DECIMAL_MAX_PRECISION);
key_length= pack_length= my_decimal_get_binary_size(precision, decimals);
break;
}
default:
- key_length= pack_length= calc_pack_length(sql_type, length);
+ key_length= pack_length= calc_pack_length(sql_type, (uint)length);
break;
}
}
-/**
- Init for a tmp table field. To be extended if need be.
-*/
-void Create_field::init_for_tmp_table(enum_field_types sql_type_arg,
- uint32 length_arg, uint32 decimals_arg,
- bool maybe_null, bool is_unsigned,
- uint pack_length_arg)
-{
- DBUG_ENTER("Create_field::init_for_tmp_table");
+bool check_expression(Virtual_column_info *vcol, const char *name,
+ enum_vcol_info_type type)
- field_name= "";
- sql_type= sql_type_arg;
- char_length= length= length_arg;;
- unireg_check= Field::NONE;
- interval= 0;
- charset= &my_charset_bin;
- geom_type= Field::GEOM_GEOMETRY;
+{
+ bool ret;
+ Item::vcol_func_processor_result res;
- DBUG_PRINT("enter", ("sql_type: %d, length: %u, pack_length: %u",
- sql_type_arg, length_arg, pack_length_arg));
+ if (!vcol->name.length)
+ vcol->name.str= const_cast<char*>(name);
/*
- These pack flags are crafted to get it correctly through the
- branches of make_field().
- */
- switch (sql_type_arg)
- {
- case MYSQL_TYPE_VARCHAR:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_STRING:
- case MYSQL_TYPE_SET:
- pack_flag= 0;
- break;
-
- case MYSQL_TYPE_GEOMETRY:
- pack_flag= FIELDFLAG_GEOM;
- break;
-
- case MYSQL_TYPE_ENUM:
- pack_flag= FIELDFLAG_INTERVAL;
- break;
-
- case MYSQL_TYPE_NEWDECIMAL:
- DBUG_ASSERT(decimals_arg <= DECIMAL_MAX_SCALE);
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- pack_flag= FIELDFLAG_NUMBER |
- (decimals_arg & FIELDFLAG_MAX_DEC) << FIELDFLAG_DEC_SHIFT;
- break;
-
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- pack_flag= FIELDFLAG_BLOB;
- break;
-
- case MYSQL_TYPE_BIT:
- pack_flag= FIELDFLAG_NUMBER | FIELDFLAG_TREAT_BIT_AS_CHAR;
- break;
+ Walk through the Item tree checking if all items are valid
+ to be part of the virtual column
+ */
+ res.errors= 0;
+ ret= vcol->expr->walk(&Item::check_vcol_func_processor, 0, &res);
+ vcol->flags= res.errors;
- default:
- pack_flag= FIELDFLAG_NUMBER;
- break;
- }
+ uint filter= VCOL_IMPOSSIBLE;
+ if (type != VCOL_GENERATED_VIRTUAL && type != VCOL_DEFAULT)
+ filter|= VCOL_NOT_STRICTLY_DETERMINISTIC;
- /*
- Set the pack flag correctly for the blob-like types. This sets the
- packtype to something that make_field can use. If the pack type is
- not set correctly, the packlength will be reeeeally wierd (like
- 129 or so).
- */
- switch (sql_type_arg)
+ if (ret || (res.errors & filter))
{
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_GEOMETRY:
- // If you are going to use the above types, you have to pass a
- // pack_length as parameter. Assert that is really done.
- DBUG_ASSERT(pack_length_arg != ~0U);
- pack_flag|= pack_length_to_packflag(pack_length_arg);
- break;
- default:
- /* Nothing */
- break;
+ my_error(ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name,
+ vcol_type_name(type), name);
+ return TRUE;
}
-
- pack_flag|=
- (maybe_null ? FIELDFLAG_MAYBE_NULL : 0) |
- (is_unsigned ? 0 : FIELDFLAG_DECIMAL);
-
- DBUG_PRINT("debug", ("pack_flag: %s%s%s%s%s%s, pack_type: %d",
- FLAGSTR(pack_flag, FIELDFLAG_BINARY),
- FLAGSTR(pack_flag, FIELDFLAG_NUMBER),
- FLAGSTR(pack_flag, FIELDFLAG_INTERVAL),
- FLAGSTR(pack_flag, FIELDFLAG_GEOM),
- FLAGSTR(pack_flag, FIELDFLAG_BLOB),
- FLAGSTR(pack_flag, FIELDFLAG_DECIMAL),
- f_packtype(pack_flag)));
- vcol_info= 0;
- create_if_not_exists= FALSE;
- stored_in_db= TRUE;
-
- DBUG_VOID_RETURN;
-}
-
-
-static inline bool is_item_func(Item* x)
-{
- return x != NULL && x->type() == Item::FUNC_ITEM;
+ /*
+ Safe to call before fix_fields as long as vcol's don't include sub
+ queries (which is now checked in check_vcol_func_processor)
+ */
+ if (vcol->expr->check_cols(1))
+ return TRUE;
+ return FALSE;
}
-bool Create_field::check(THD *thd)
+bool Column_definition::check(THD *thd)
{
const uint conditional_type_modifiers= AUTO_INCREMENT_FLAG;
uint sign_len, allowed_type_modifier= 0;
ulong max_field_charlength= MAX_FIELD_CHARLENGTH;
- DBUG_ENTER("Create_field::check");
+ DBUG_ENTER("Column_definition::check");
+ /* Initialize data for a computed field */
if (vcol_info)
{
+ DBUG_ASSERT(vcol_info->expr);
vcol_info->set_field_type(sql_type);
- sql_type= (enum enum_field_types)MYSQL_TYPE_VIRTUAL;
- }
-
- if (length > MAX_FIELD_BLOBLENGTH)
- {
- my_error(ER_TOO_BIG_DISPLAYWIDTH, MYF(0), field_name, MAX_FIELD_BLOBLENGTH);
- DBUG_RETURN(1);
+ if (check_expression(vcol_info, field_name, vcol_info->stored_in_db
+ ? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL))
+ DBUG_RETURN(TRUE);
}
- if (decimals >= NOT_FIXED_DEC)
- {
- my_error(ER_TOO_BIG_SCALE, MYF(0), static_cast<ulonglong>(decimals),
- field_name, static_cast<ulong>(NOT_FIXED_DEC - 1));
- DBUG_RETURN(TRUE);
- }
+ if (check_constraint &&
+ check_expression(check_constraint, field_name, VCOL_CHECK_FIELD))
+ DBUG_RETURN(1);
- if (def)
+ if (default_value)
{
- /*
- Default value should be literal => basic constants =>
- no need fix_fields()
+ Item *def_expr= default_value->expr;
+ if (check_expression(default_value, field_name, VCOL_DEFAULT))
+ DBUG_RETURN(TRUE);
- We allow only one function as part of default value -
- NOW() as default for TIMESTAMP and DATETIME type.
- */
- if (def->type() == Item::FUNC_ITEM &&
- (static_cast<Item_func*>(def)->functype() != Item_func::NOW_FUNC ||
- (mysql_type_to_time_type(sql_type) != MYSQL_TIMESTAMP_DATETIME) ||
- def->decimals < length))
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
- DBUG_RETURN(1);
- }
- else if (def->type() == Item::NULL_ITEM)
+ /* Constant's are stored in the 'empty_record', except for blobs */
+ if (def_expr->basic_const_item())
{
- def= 0;
- if ((flags & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG)
+ if (def_expr->type() == Item::NULL_ITEM)
{
- my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
- DBUG_RETURN(1);
+ default_value= 0;
+ if ((flags & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG)
+ {
+ my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
+ DBUG_RETURN(1);
+ }
}
}
- else if (flags & AUTO_INCREMENT_FLAG)
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
- DBUG_RETURN(1);
- }
}
- if (is_item_func(def))
+ if (default_value && (flags & AUTO_INCREMENT_FLAG))
{
- /* There is a function default for insertions. */
- def= NULL;
- unireg_check= (is_item_func(on_update) ?
- Field::TIMESTAMP_DNUN_FIELD : // for insertions and for updates.
- Field::TIMESTAMP_DN_FIELD); // only for insertions.
- }
- else
- {
- /* No function default for insertions. Either NULL or a constant. */
- if (is_item_func(on_update))
- unireg_check= Field::TIMESTAMP_UN_FIELD; // function default for updates
- else
- unireg_check= ((flags & AUTO_INCREMENT_FLAG) ?
- Field::NEXT_NUMBER : // Automatic increment.
- Field::NONE);
- }
-
- if (on_update &&
- (mysql_type_to_time_type(sql_type) != MYSQL_TIMESTAMP_DATETIME ||
- on_update->decimals < length))
- {
- my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name);
+ my_error(ER_INVALID_DEFAULT, MYF(0), field_name);
DBUG_RETURN(1);
}
- /* Initialize data for a computed field */
- if (sql_type == MYSQL_TYPE_VIRTUAL)
+ if (default_value && !default_value->expr->basic_const_item() &&
+ mysql_type_to_time_type(sql_type) == MYSQL_TIMESTAMP_DATETIME &&
+ default_value->expr->type() == Item::FUNC_ITEM)
{
- DBUG_ASSERT(vcol_info && vcol_info->expr_item);
- stored_in_db= vcol_info->is_stored();
/*
- Walk through the Item tree checking if all items are valid
- to be part of the virtual column
+ Special case: NOW() for TIMESTAMP and DATETIME fields are handled
+ as in MariaDB 10.1 by marking them in unireg_check.
*/
- if (vcol_info->expr_item->walk(&Item::check_vcol_func_processor, 0, NULL))
+ Item_func *fn= static_cast<Item_func*>(default_value->expr);
+ if (fn->functype() == Item_func::NOW_FUNC &&
+ (fn->decimals == 0 || fn->decimals >= length))
{
- my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), field_name);
- DBUG_RETURN(TRUE);
+ default_value= 0;
+ unireg_check= Field::TIMESTAMP_DN_FIELD;
}
+ }
- /*
- Make a field created for the real type.
- Note that regular and computed fields differ from each other only by
- Field::vcol_info. It is is always NULL for a column that is not
- computed.
- */
- sql_type= vcol_info->get_real_type();
+ if (on_update)
+ {
+ if (mysql_type_to_time_type(sql_type) != MYSQL_TIMESTAMP_DATETIME ||
+ on_update->decimals < length)
+ {
+ my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name);
+ DBUG_RETURN(TRUE);
+ }
+ unireg_check= unireg_check == Field::NONE ? Field::TIMESTAMP_UN_FIELD
+ : Field::TIMESTAMP_DNUN_FIELD;
}
+ else if (flags & AUTO_INCREMENT_FLAG)
+ unireg_check= Field::NEXT_NUMBER;
sign_len= flags & UNSIGNED_FLAG ? 0 : 1;
@@ -10061,6 +10133,12 @@ bool Create_field::check(THD *thd)
case MYSQL_TYPE_NULL:
break;
case MYSQL_TYPE_NEWDECIMAL:
+ if (decimals >= NOT_FIXED_DEC)
+ {
+ my_error(ER_TOO_BIG_SCALE, MYF(0), static_cast<ulonglong>(decimals),
+ field_name, static_cast<uint>(NOT_FIXED_DEC - 1));
+ DBUG_RETURN(TRUE);
+ }
my_decimal_trim(&length, &decimals);
if (length > DECIMAL_MAX_PRECISION)
{
@@ -10074,9 +10152,9 @@ bool Create_field::check(THD *thd)
DBUG_RETURN(TRUE);
}
length=
- my_decimal_precision_to_length(length, decimals, flags & UNSIGNED_FLAG);
+ my_decimal_precision_to_length((uint)length, decimals, flags & UNSIGNED_FLAG);
pack_length=
- my_decimal_get_binary_size(length, decimals);
+ my_decimal_get_binary_size((uint)length, decimals);
break;
case MYSQL_TYPE_VARCHAR:
/*
@@ -10092,33 +10170,6 @@ bool Create_field::check(THD *thd)
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_MEDIUM_BLOB:
case MYSQL_TYPE_GEOMETRY:
- if (def)
- {
- /* Allow empty as default value. */
- String str,*res;
- res= def->val_str(&str);
- /*
- A default other than '' is always an error, and any non-NULL
- specified default is an error in strict mode.
- */
- if (res->length() || thd->is_strict_mode())
- {
- my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0),
- field_name); /* purecov: inspected */
- DBUG_RETURN(TRUE);
- }
- else
- {
- /*
- Otherwise a default of '' is just a warning.
- */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_BLOB_CANT_HAVE_DEFAULT,
- ER_THD(thd, ER_BLOB_CANT_HAVE_DEFAULT),
- field_name);
- }
- def= 0;
- }
flags|= BLOB_FLAG;
break;
case MYSQL_TYPE_YEAR:
@@ -10140,6 +10191,12 @@ bool Create_field::check(THD *thd)
my_error(ER_M_BIGGER_THAN_D, MYF(0), field_name);
DBUG_RETURN(TRUE);
}
+ if (decimals != NOT_FIXED_DEC && decimals >= FLOATING_POINT_DECIMALS)
+ {
+ my_error(ER_TOO_BIG_SCALE, MYF(0), static_cast<ulonglong>(decimals),
+ field_name, static_cast<uint>(FLOATING_POINT_DECIMALS-1));
+ DBUG_RETURN(TRUE);
+ }
break;
case MYSQL_TYPE_DOUBLE:
allowed_type_modifier= AUTO_INCREMENT_FLAG;
@@ -10154,6 +10211,12 @@ bool Create_field::check(THD *thd)
my_error(ER_M_BIGGER_THAN_D, MYF(0), field_name);
DBUG_RETURN(TRUE);
}
+ if (decimals != NOT_FIXED_DEC && decimals >= FLOATING_POINT_DECIMALS)
+ {
+ my_error(ER_TOO_BIG_SCALE, MYF(0), static_cast<ulonglong>(decimals),
+ field_name, static_cast<uint>(FLOATING_POINT_DECIMALS-1));
+ DBUG_RETURN(TRUE);
+ }
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_TIMESTAMP2:
@@ -10213,14 +10276,14 @@ bool Create_field::check(THD *thd)
static_cast<ulong>(MAX_BIT_FIELD_LENGTH));
DBUG_RETURN(TRUE);
}
- pack_length= (length + 7) / 8;
+ pack_length= ((uint)length + 7) / 8;
break;
}
case MYSQL_TYPE_DECIMAL:
DBUG_ASSERT(0); /* Was obsolete */
}
/* Remember the value of length */
- char_length= length;
+ char_length= (uint)length;
/*
Set NO_DEFAULT_VALUE_FLAG if this field doesn't have a default value and
@@ -10228,7 +10291,7 @@ bool Create_field::check(THD *thd)
We need to do this check here and in mysql_create_prepare_table() as
sp_head::fill_field_definition() calls this function.
*/
- if (!def && unireg_check == Field::NONE && (flags & NOT_NULL_FLAG))
+ if (!default_value && unireg_check == Field::NONE && (flags & NOT_NULL_FLAG))
{
/*
TIMESTAMP columns get implicit DEFAULT value when
@@ -10243,7 +10306,7 @@ bool Create_field::check(THD *thd)
if (!(flags & BLOB_FLAG) &&
((length > max_field_charlength &&
- (sql_type != MYSQL_TYPE_VARCHAR || def)) ||
+ sql_type != MYSQL_TYPE_VARCHAR) ||
(length == 0 &&
sql_type != MYSQL_TYPE_ENUM && sql_type != MYSQL_TYPE_SET &&
sql_type != MYSQL_TYPE_STRING && sql_type != MYSQL_TYPE_VARCHAR &&
@@ -10257,6 +10320,12 @@ bool Create_field::check(THD *thd)
field_name, max_field_charlength); /* purecov: inspected */
DBUG_RETURN(TRUE);
}
+ else if (length > MAX_FIELD_BLOBLENGTH)
+ {
+ my_error(ER_TOO_BIG_DISPLAYWIDTH, MYF(0), field_name, MAX_FIELD_BLOBLENGTH);
+ DBUG_RETURN(1);
+ }
+
if ((~allowed_type_modifier) & flags & conditional_type_modifiers)
{
my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name);
@@ -10468,19 +10537,29 @@ Field *make_field(TABLE_SHARE *share,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case MYSQL_TYPE_FLOAT:
+ {
+ int decimals= f_decimals(pack_flag);
+ if (decimals == FLOATING_POINT_DECIMALS)
+ decimals= NOT_FIXED_DEC;
return new (mem_root)
Field_float(ptr,field_length,null_pos,null_bit,
unireg_check, field_name,
- f_decimals(pack_flag),
+ decimals,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
+ }
case MYSQL_TYPE_DOUBLE:
+ {
+ int decimals= f_decimals(pack_flag);
+ if (decimals == FLOATING_POINT_DECIMALS)
+ decimals= NOT_FIXED_DEC;
return new (mem_root)
Field_double(ptr,field_length,null_pos,null_bit,
unireg_check, field_name,
- f_decimals(pack_flag),
+ decimals,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
+ }
case MYSQL_TYPE_TINY:
return new (mem_root)
Field_tiny(ptr,field_length,null_pos,null_bit,
@@ -10590,10 +10669,10 @@ Field *make_field(TABLE_SHARE *share,
/** Create a field suitable for create of table. */
-Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
+Column_definition::Column_definition(THD *thd, Field *old_field,
+ Field *orig_field)
{
- field= old_field;
- field_name=change=old_field->field_name;
+ field_name= old_field->field_name;
length= old_field->field_length;
flags= old_field->flags;
unireg_check=old_field->unireg_check;
@@ -10604,10 +10683,9 @@ Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
comment= old_field->comment;
decimals= old_field->decimals();
vcol_info= old_field->vcol_info;
- create_if_not_exists= FALSE;
- stored_in_db= old_field->stored_in_db;
+ default_value= orig_field ? orig_field->default_value : 0;
+ check_constraint= orig_field ? orig_field->check_constraint : 0;
option_list= old_field->option_list;
- option_struct= old_field->option_struct;
switch (sql_type) {
case MYSQL_TYPE_BLOB:
@@ -10650,6 +10728,15 @@ Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
buff, "YEAR(4)");
}
break;
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
+ /*
+ Floating points are stored with FLOATING_POINT_DECIMALS but internally
+ in MariaDB used with NOT_FIXED_DEC, which is >= FLOATING_POINT_DECIMALS.
+ */
+ if (decimals >= FLOATING_POINT_DECIMALS)
+ decimals= NOT_FIXED_DEC;
+ break;
default:
break;
}
@@ -10658,8 +10745,7 @@ Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
interval= ((Field_enum*) old_field)->typelib;
else
interval=0;
- def=0;
- char_length= length;
+ char_length= (uint)length;
/*
Copy the default (constant/function) from the column object orig_field, if
@@ -10667,40 +10753,31 @@ Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
- The column allows a default.
- - The column type is not a BLOB type.
+ - The column type is not a BLOB type (as BLOB's doesn't have constant
+ defaults)
- The original column (old_field) was properly initialized with a record
buffer pointer.
+
+ - The column didn't have a default expression
*/
if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) &&
- old_field->ptr != NULL &&
- orig_field != NULL)
+ old_field->ptr != NULL && orig_field != NULL)
{
- bool default_now= false;
- if (real_type_with_now_as_default(sql_type))
- {
- // The SQL type of the new field allows a function default:
- default_now= orig_field->has_insert_default_function();
- bool update_now= orig_field->has_update_default_function();
-
- if (default_now && update_now)
- unireg_check= Field::TIMESTAMP_DNUN_FIELD;
- else if (default_now)
- unireg_check= Field::TIMESTAMP_DN_FIELD;
- else if (update_now)
- unireg_check= Field::TIMESTAMP_UN_FIELD;
- }
- if (!default_now) // Give a constant default
+ if (orig_field->unireg_check != Field::NEXT_NUMBER)
+ unireg_check= orig_field->unireg_check;
+
+ /* Get the value from default_values */
+ const uchar *dv= orig_field->table->s->default_values;
+ if (!default_value && !orig_field->is_null_in_record(dv))
{
- /* Get the value from default_values */
- const uchar *dv= orig_field->table->s->default_values;
- if (!orig_field->is_null_in_record(dv))
- {
- StringBuffer<MAX_FIELD_WIDTH> tmp(charset);
- String *res= orig_field->val_str(&tmp, orig_field->ptr_in_record(dv));
- char *pos= (char*) sql_strmake(res->ptr(), res->length());
- def= new (thd->mem_root) Item_string(thd, pos, res->length(), charset);
- }
+ StringBuffer<MAX_FIELD_WIDTH> tmp(charset);
+ String *res= orig_field->val_str(&tmp, orig_field->ptr_in_record(dv));
+ char *pos= (char*) thd->strmake(res->ptr(), res->length());
+ default_value= new (thd->mem_root) Virtual_column_info();
+ default_value->expr=
+ new (thd->mem_root) Item_string(thd, pos, res->length(), charset);
+ default_value->utf8= 0;
}
}
}
@@ -10720,7 +10797,7 @@ Create_field::Create_field(THD *thd, Field *old_field, Field *orig_field)
length
*/
-uint32 Field_blob::char_length()
+uint32 Field_blob::char_length() const
{
switch (packlength)
{
@@ -10751,6 +10828,20 @@ Create_field *Create_field::clone(MEM_ROOT *mem_root) const
return res;
}
+/**
+ Return true if default is an expression that must be saved explicitely
+
+ This is:
+ - Not basic constants
+ - If field is a BLOB (Which doesn't support normal DEFAULT)
+*/
+
+bool Column_definition::has_default_expression()
+{
+ return (default_value &&
+ (!default_value->expr->basic_const_item() ||
+ (flags & BLOB_FLAG)));
+}
/**
maximum possible display length for blob.
@@ -10847,7 +10938,8 @@ void Field::set_datetime_warning(Sql_condition::enum_warning_level level,
{
THD *thd= get_thd();
if (thd->really_abort_on_warning() && level >= Sql_condition::WARN_LEVEL_WARN)
- make_truncated_value_warning(thd, level, str, ts_type, field_name);
+ make_truncated_value_warning(thd, level, str, ts_type,
+ table->s, field_name);
else
set_warning(level, code, cuted_increment);
}
@@ -10857,10 +10949,19 @@ void Field::set_warning_truncated_wrong_value(const char *type_arg,
const char *value)
{
THD *thd= get_thd();
+ const char *db_name= table->s->db.str;
+ const char *table_name= table->s->table_name.str;
+
+ if (!db_name)
+ db_name= "";
+ if (!table_name)
+ table_name= "";
+
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER_THD(thd, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- type_arg, value, field_name,
+ type_arg, value,
+ db_name, table_name, field_name,
static_cast<ulong>(thd->get_stmt_da()->
current_row_for_warning()));
}
@@ -10907,12 +11008,13 @@ key_map Field::get_possible_keys()
analyzed to check if it really should count as a value.
*/
-void Field::set_explicit_default(Item *value)
+bool Field::set_explicit_default(Item *value)
{
if (value->type() == Item::DEFAULT_VALUE_ITEM &&
!((Item_default_value*)value)->arg)
- return;
+ return false;
set_has_explicit_value();
+ return true;
}
@@ -10933,3 +11035,66 @@ bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record)
dbug_tmp_restore_column_map(table->read_set, old_map);
return rc;
}
+
+
+bool Field::save_in_field_default_value(bool view_error_processing)
+{
+ THD *thd= table->in_use;
+
+ if (flags & NO_DEFAULT_VALUE_FLAG &&
+ real_type() != MYSQL_TYPE_ENUM)
+ {
+ if (reset())
+ {
+ my_message(ER_CANT_CREATE_GEOMETRY_OBJECT,
+ ER_THD(thd, ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0));
+ return true;
+ }
+
+ if (view_error_processing)
+ {
+ TABLE_LIST *view= table->pos_in_table_list->top_table();
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NO_DEFAULT_FOR_VIEW_FIELD,
+ ER_THD(thd, ER_NO_DEFAULT_FOR_VIEW_FIELD),
+ view->view_db.str,
+ view->view_name.str);
+ }
+ else
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NO_DEFAULT_FOR_FIELD,
+ ER_THD(thd, ER_NO_DEFAULT_FOR_FIELD),
+ field_name);
+ }
+ return true;
+ }
+ set_default();
+ return
+ !is_null() &&
+ validate_value_in_record_with_warn(thd, table->record[0]) &&
+ thd->is_error();
+}
+
+
+bool Field::save_in_field_ignore_value(bool view_error_processing)
+{
+ enum_sql_command com= table->in_use->lex->sql_command;
+ // All insert-like commands
+ if (com == SQLCOM_INSERT || com == SQLCOM_REPLACE ||
+ com == SQLCOM_INSERT_SELECT || com == SQLCOM_REPLACE_SELECT ||
+ com == SQLCOM_LOAD)
+ return save_in_field_default_value(view_error_processing);
+ return 0; // ignore
+}
+
+
+void Field::register_field_in_read_map()
+{
+ if (vcol_info)
+ {
+ Item *vcol_item= vcol_info->expr;
+ vcol_item->walk(&Item::register_field_in_read_map, 1, 0);
+ }
+ bitmap_set_bit(table->read_set, field_index);
+}
diff --git a/sql/field.h b/sql/field.h
index 86853f7d9d9..5a1ec2df8d0 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1,7 +1,7 @@
#ifndef FIELD_INCLUDED
#define FIELD_INCLUDED
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2015, MariaDB
+ Copyright (c) 2008, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
#include "compat56.h"
class Send_field;
+class Copy_field;
class Protocol;
class Create_field;
class Relay_log_info;
@@ -50,7 +51,6 @@ enum enum_check_fields
CHECK_FIELD_ERROR_FOR_NULL
};
-
/*
Common declarations for Field and Item
*/
@@ -80,6 +80,35 @@ protected:
Warn_filter_all() :Warn_filter(true, true) { }
};
+ class Converter_double_to_longlong
+ {
+ protected:
+ bool m_error;
+ longlong m_result;
+ public:
+ Converter_double_to_longlong(double nr, bool unsigned_flag);
+ longlong result() const { return m_result; }
+ bool error() const { return m_error; }
+ void push_warning(THD *thd, double nr, bool unsigned_flag);
+ };
+ class Converter_double_to_longlong_with_warn:
+ public Converter_double_to_longlong
+ {
+ public:
+ Converter_double_to_longlong_with_warn(THD *thd, double nr,
+ bool unsigned_flag)
+ :Converter_double_to_longlong(nr, unsigned_flag)
+ {
+ if (m_error)
+ push_warning(thd, nr, unsigned_flag);
+ }
+ Converter_double_to_longlong_with_warn(double nr, bool unsigned_flag)
+ :Converter_double_to_longlong(nr, unsigned_flag)
+ {
+ if (m_error)
+ push_warning(current_thd, nr, unsigned_flag);
+ }
+ };
// String-to-number converters
class Converter_string_to_number
@@ -182,7 +211,7 @@ protected:
CHARSET_INFO *cs, const char *str, size_t length,
my_decimal *buf)
{
- m_error= str2my_decimal(mask, str, length, cs,
+ m_error= str2my_decimal(mask, str,(uint) length, cs,
buf, (const char **) &m_end_of_num);
// E_DEC_TRUNCATED means a very minor truncation: '1e-100' -> 0
m_edom= m_error && m_error != E_DEC_TRUNCATED;
@@ -280,6 +309,16 @@ protected:
return decimal_value;
}
+ longlong longlong_from_hex_hybrid(const char *str, uint32 length)
+ {
+ const char *end= str + length;
+ const char *ptr= end - MY_MIN(length, sizeof(longlong));
+ ulonglong value= 0;
+ for ( ; ptr != end ; ptr++)
+ value= (value << 8) + (ulonglong) (uchar) *ptr;
+ return (longlong) value;
+ }
+
longlong longlong_from_string_with_check(const String *str) const
{
return longlong_from_string_with_check(str->charset(),
@@ -384,7 +423,9 @@ enum Derivation
#define MY_REPERTOIRE_NUMERIC MY_REPERTOIRE_ASCII
/* The length of the header part for each virtual column in the .frm file */
-#define FRM_VCOL_HEADER_SIZE(b) (3 + MY_TEST(b))
+#define FRM_VCOL_OLD_HEADER_SIZE(b) (3 + MY_TEST(b))
+#define FRM_VCOL_NEW_BASE_SIZE 16
+#define FRM_VCOL_NEW_HEADER_SIZE 6
class Count_distinct_field;
@@ -392,11 +433,8 @@ struct ha_field_option_struct;
struct st_cache_field;
int field_conv(Field *to,Field *from);
-int field_conv_incompatible(Field *to,Field *from);
-bool memcpy_field_possible(Field *to, Field *from);
int truncate_double(double *nr, uint field_length, uint dec,
bool unsigned_flag, double max_value);
-longlong double_to_longlong(double nr, bool unsigned_flag, bool *error);
inline uint get_enum_pack_length(int elements)
{
@@ -436,20 +474,6 @@ inline bool is_temporal_type_with_date(enum_field_types type)
/**
- Tests if a field real type can have "DEFAULT CURRENT_TIMESTAMP"
-
- @param type Field type, as returned by field->real_type().
- @retval true If field real type can have "DEFAULT CURRENT_TIMESTAMP".
- @retval false If field real type can not have "DEFAULT CURRENT_TIMESTAMP".
-*/
-inline bool real_type_with_now_as_default(enum_field_types type)
-{
- return type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_TIMESTAMP2 ||
- type == MYSQL_TYPE_DATETIME || type == MYSQL_TYPE_DATETIME2;
-}
-
-
-/**
Recognizer for concrete data type (called real_type for some reason),
returning true if it is one of the TIMESTAMP types.
*/
@@ -536,6 +560,47 @@ inline bool is_temporal_type_with_time(enum_field_types type)
}
}
+enum enum_vcol_info_type
+{
+ VCOL_GENERATED_VIRTUAL, VCOL_GENERATED_STORED,
+ VCOL_DEFAULT, VCOL_CHECK_FIELD, VCOL_CHECK_TABLE,
+ /* Additional types should be added here */
+ /* Following is the highest value last */
+ VCOL_TYPE_NONE = 127 // Since the 0 value is already in use
+};
+
+static inline const char *vcol_type_name(enum_vcol_info_type type)
+{
+ switch (type)
+ {
+ case VCOL_GENERATED_VIRTUAL:
+ case VCOL_GENERATED_STORED:
+ return "GENERATED ALWAYS AS";
+ case VCOL_DEFAULT:
+ return "DEFAULT";
+ case VCOL_CHECK_FIELD:
+ case VCOL_CHECK_TABLE:
+ return "CHECK";
+ case VCOL_TYPE_NONE:
+ return "UNTYPED";
+ }
+ return 0;
+}
+
+/*
+ Flags for Virtual_column_info. If none is set, the expression must be
+ a constant with no side-effects, so it's calculated at CREATE TABLE time,
+ stored in table->record[2], and not recalculated for every statement.
+*/
+#define VCOL_FIELD_REF 1
+#define VCOL_NON_DETERMINISTIC 2
+#define VCOL_SESSION_FUNC 4 /* uses session data, e.g. USER or DAYNAME */
+#define VCOL_TIME_FUNC 8
+#define VCOL_AUTO_INC 16
+#define VCOL_IMPOSSIBLE 32
+
+#define VCOL_NOT_STRICTLY_DETERMINISTIC \
+ (VCOL_NON_DETERMINISTIC | VCOL_TIME_FUNC | VCOL_SESSION_FUNC)
/*
Virtual_column_info is the class to contain additional
@@ -550,32 +615,48 @@ inline bool is_temporal_type_with_time(enum_field_types type)
class Virtual_column_info: public Sql_alloc
{
private:
+ enum_vcol_info_type vcol_type; /* Virtual column expression type */
/*
The following data is only updated by the parser and read
when a Create_field object is created/initialized.
*/
enum_field_types field_type; /* Real field type*/
- /* Flag indicating that the field is physically stored in the database */
- bool stored_in_db;
/* Flag indicating that the field used in a partitioning expression */
bool in_partitioning_expr;
public:
- /* The expression to compute the value of the virtual column */
- Item *expr_item;
- /* Text representation of the defining expression */
- LEX_STRING expr_str;
+ /* Flag indicating that the field is physically stored in the database */
+ bool stored_in_db;
+ bool utf8; /* Already in utf8 */
+ Item *expr;
+ LEX_STRING name; /* Name of constraint */
+ /* see VCOL_* (VCOL_FIELD_REF, ...) */
+ uint flags;
Virtual_column_info()
- : field_type((enum enum_field_types)MYSQL_TYPE_VIRTUAL),
- stored_in_db(FALSE), in_partitioning_expr(FALSE),
- expr_item(NULL)
+ : vcol_type((enum_vcol_info_type)VCOL_TYPE_NONE),
+ field_type((enum enum_field_types)MYSQL_TYPE_VIRTUAL),
+ in_partitioning_expr(FALSE), stored_in_db(FALSE),
+ utf8(TRUE), expr(NULL), flags(0)
{
- expr_str.str= NULL;
- expr_str.length= 0;
+ name.str= NULL;
+ name.length= 0;
};
~Virtual_column_info() {}
- enum_field_types get_real_type()
+ enum_vcol_info_type get_vcol_type() const
+ {
+ return vcol_type;
+ }
+ void set_vcol_type(enum_vcol_info_type v_type)
+ {
+ vcol_type= v_type;
+ }
+ const char *get_vcol_type_name() const
+ {
+ DBUG_ASSERT(vcol_type != VCOL_TYPE_NONE);
+ return vcol_type_name(vcol_type);
+ }
+ enum_field_types get_real_type() const
{
return field_type;
}
@@ -584,7 +665,7 @@ public:
/* Calling this function can only be done once. */
field_type= fld_type;
}
- bool is_stored()
+ bool is_stored() const
{
return stored_in_db;
}
@@ -592,7 +673,7 @@ public:
{
stored_in_db= stored;
}
- bool is_in_partitioning_expr()
+ bool is_in_partitioning_expr() const
{
return in_partitioning_expr;
}
@@ -600,24 +681,32 @@ public:
{
in_partitioning_expr= TRUE;
}
- bool is_equal(Virtual_column_info* vcol)
- {
- return field_type == vcol->get_real_type()
- && stored_in_db == vcol->is_stored()
- && expr_str.length == vcol->expr_str.length
- && memcmp(expr_str.str, vcol->expr_str.str, expr_str.length) == 0;
- }
+ inline bool is_equal(const Virtual_column_info* vcol) const;
+ inline void print(String*);
};
class Field: public Value_source
{
Field(const Item &); /* Prevent use of these */
void operator=(Field &);
+protected:
+ int save_in_field_str(Field *to)
+ {
+ StringBuffer<MAX_FIELD_WIDTH> result(charset());
+ val_str(&result);
+ return to->store(result.ptr(), result.length(), charset());
+ }
+ static void do_field_int(Copy_field *copy);
+ static void do_field_real(Copy_field *copy);
+ static void do_field_string(Copy_field *copy);
+ static void do_field_temporal(Copy_field *copy);
+ static void do_field_timestamp(Copy_field *copy);
+ static void do_field_decimal(Copy_field *copy);
public:
static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
{ return alloc_root(mem_root, size); }
static void *operator new(size_t size) throw ()
- { return sql_alloc(size); }
+ { return thd_alloc(current_thd, size); }
static void operator delete(void *ptr_arg, size_t size) { TRASH_FREE(ptr_arg, size); }
static void operator delete(void *ptr, MEM_ROOT *mem_root)
{ DBUG_ASSERT(0); }
@@ -657,10 +746,14 @@ public:
in more clean way with transition to new text based .frm format.
See also comment for Field_timestamp::Field_timestamp().
*/
- enum utype { NONE,DATE,SHIELD,NOEMPTY,CASEUP,PNR,BGNR,PGNR,YES,NO,REL,
- CHECK,EMPTY,UNKNOWN_FIELD,CASEDN,NEXT_NUMBER,INTERVAL_FIELD,
- BIT_FIELD, TIMESTAMP_OLD_FIELD, CAPITALIZE, BLOB_FIELD,
- TIMESTAMP_DN_FIELD, TIMESTAMP_UN_FIELD, TIMESTAMP_DNUN_FIELD};
+ enum utype {
+ NONE=0,
+ NEXT_NUMBER=15, // AUTO_INCREMENT
+ TIMESTAMP_OLD_FIELD=18, // TIMESTAMP created before 4.1.3
+ TIMESTAMP_DN_FIELD=21, // TIMESTAMP DEFAULT NOW()
+ TIMESTAMP_UN_FIELD=22, // TIMESTAMP ON UPDATE NOW()
+ TIMESTAMP_DNUN_FIELD=23 // TIMESTAMP DEFAULT NOW() ON UPDATE NOW()
+ };
enum geometry_type
{
GEOM_GEOMETRY = 0, GEOM_POINT = 1, GEOM_LINESTRING = 2, GEOM_POLYGON = 3,
@@ -715,39 +808,58 @@ public:
Column_statistics_collected *collected_stats;
/*
- This is additional data provided for any computed(virtual) field.
- In particular it includes a pointer to the item by which this field
+ This is additional data provided for any computed(virtual) field,
+ default function or check constraint.
+ In particular it includes a pointer to the item by which this field
can be computed from other fields.
*/
- Virtual_column_info *vcol_info;
- /*
- Flag indicating that the field is physically stored in tables
- rather than just computed from other fields.
- As of now, FALSE can be set only for computed virtual columns.
- */
- bool stored_in_db;
+ Virtual_column_info *vcol_info, *check_constraint, *default_value;
Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg);
virtual ~Field() {}
+ /**
+ Convenience definition of a copy function returned by
+ Field::get_copy_func()
+ */
+ typedef void Copy_func(Copy_field*);
+ virtual Copy_func *get_copy_func(const Field *from) const= 0;
/* Store functions returns 1 on overflow and -1 on fatal error */
+ virtual int store_field(Field *from) { return from->save_in_field(this); }
+ virtual int save_in_field(Field *to)= 0;
+ /**
+ Check if it is possible just copy the value
+ of the field 'from' to the field 'this', e.g. for
+ INSERT INTO t1 (field1) SELECT field2 FROM t2;
+ @param from - The field to copy from
+ @retval true - it is possible to just copy value of 'from' to 'this'
+ @retval false - conversion is needed
+ */
+ virtual bool memcpy_field_possible(const Field *from) const= 0;
virtual int store(const char *to, uint length,CHARSET_INFO *cs)=0;
+ virtual int store_hex_hybrid(const char *str, uint length);
virtual int store(double nr)=0;
virtual int store(longlong nr, bool unsigned_val)=0;
virtual int store_decimal(const my_decimal *d)=0;
virtual int store_time_dec(MYSQL_TIME *ltime, uint dec);
+ virtual int store_timestamp(my_time_t timestamp, ulong sec_part);
int store_time(MYSQL_TIME *ltime)
{ return store_time_dec(ltime, TIME_SECOND_PART_DIGITS); }
int store(const char *to, uint length, CHARSET_INFO *cs,
enum_check_fields check_level);
int store(const LEX_STRING *ls, CHARSET_INFO *cs)
- { return store(ls->str, ls->length, cs); }
+ { return store(ls->str, (uint32) ls->length, cs); }
virtual double val_real(void)=0;
virtual longlong val_int(void)=0;
+ /*
+ Get ulonglong representation.
+ Negative values are truncated to 0.
+ */
virtual ulonglong val_uint(void)
{
- return (ulonglong) val_int();
+ longlong nr= val_int();
+ return nr < 0 ? 0 : (ulonglong) nr;
}
virtual bool val_bool(void)= 0;
virtual my_decimal *val_decimal(my_decimal *);
@@ -766,6 +878,7 @@ public:
*/
virtual String *val_str(String*,String *)=0;
String *val_int_as_str(String *val_buffer, bool unsigned_flag);
+ fast_field_copier get_fast_field_copier(const Field *from);
/*
str_needs_quotes() returns TRUE if the value returned by val_str() needs
to be quoted when used in constructing an SQL query.
@@ -781,7 +894,7 @@ public:
return (ptr == field->ptr && null_ptr == field->null_ptr &&
null_bit == field->null_bit && field->type() == type());
}
- virtual bool eq_def(Field *field);
+ virtual bool eq_def(const Field *field) const;
/*
pack_length() returns size (in bytes) used to store field data in memory
@@ -850,26 +963,16 @@ public:
my_ptrdiff_t l_offset= (my_ptrdiff_t) (record - table->record[0]);
return ptr + l_offset;
}
- virtual void set_default()
- {
- my_ptrdiff_t l_offset= (my_ptrdiff_t) (table->s->default_values -
- table->record[0]);
- memcpy(ptr, ptr + l_offset, pack_length());
- if (maybe_null_in_table())
- *null_ptr= ((*null_ptr & (uchar) ~null_bit) |
- (null_ptr[l_offset] & null_bit));
- }
+ virtual int set_default();
- bool has_insert_default_function() const
+ bool has_update_default_function() const
{
- return unireg_check == TIMESTAMP_DN_FIELD ||
- unireg_check == TIMESTAMP_DNUN_FIELD;
+ return flags & ON_UPDATE_NOW_FLAG;
}
-
- bool has_update_default_function() const
+ bool has_default_now_unireg_check() const
{
- return unireg_check == TIMESTAMP_UN_FIELD ||
- unireg_check == TIMESTAMP_DNUN_FIELD;
+ return unireg_check == TIMESTAMP_DN_FIELD
+ || unireg_check == TIMESTAMP_DNUN_FIELD;
}
/*
@@ -878,18 +981,13 @@ public:
*/
void set_has_explicit_value()
{
- flags|= HAS_EXPLICIT_VALUE;
+ bitmap_set_bit(&table->has_value_set, field_index);
}
-
- virtual void set_explicit_default(Item *value);
-
- /**
- Evaluates the @c INSERT default function and stores the result in the
- field. If no such function exists for the column, or the function is not
- valid for the column's data type, invoking this function has no effect.
- */
- virtual int evaluate_insert_default_function() { return 0; }
-
+ bool has_explicit_value()
+ {
+ return bitmap_is_set(&table->has_value_set, field_index);
+ }
+ bool set_explicit_default(Item *value);
/**
Evaluates the @c UPDATE default function, if one exists, and stores the
@@ -957,7 +1055,7 @@ public:
virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
{ return cmp(a, b); }
virtual int cmp(const uchar *,const uchar *)=0;
- virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0L)
+ virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
{ return memcmp(a,b,pack_length()); }
virtual int cmp_offset(uint row_offset)
{ return cmp(ptr,ptr+row_offset); }
@@ -1045,6 +1143,11 @@ public:
{ if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; }
inline bool maybe_null(void) const
{ return null_ptr != 0 || table->maybe_null; }
+ // Set to NULL on LOAD DATA or LOAD XML
+ virtual bool load_data_set_null(THD *thd);
+ // Reset when a LOAD DATA file ended unexpectedly
+ virtual bool load_data_set_no_data(THD *thd, bool fixed_format);
+ void load_data_set_value(const char *pos, uint length, CHARSET_INFO *cs);
/* @return true if this field is NULL-able (even if temporarily) */
inline bool real_maybe_null(void) const { return null_ptr != 0; }
@@ -1068,6 +1171,8 @@ public:
null_bit= p_null_bit;
}
+ bool stored_in_db() const { return !vcol_info || vcol_info->stored_in_db; }
+
inline THD *get_thd() const
{ return likely(table) ? table->in_use : current_thd; }
@@ -1116,6 +1221,11 @@ public:
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
}
inline void move_field(uchar *ptr_arg) { ptr=ptr_arg; }
+ inline uchar *record_ptr() // record[0] or wherever the field was moved to
+ {
+ my_ptrdiff_t offset= table->s->field[field_index]->ptr - table->s->default_values;
+ return ptr - offset;
+ }
virtual void move_field_offset(my_ptrdiff_t ptr_diff)
{
ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*);
@@ -1206,7 +1316,7 @@ public:
virtual uint max_packed_col_length(uint max_length)
{ return max_length;}
- uint offset(uchar *record)
+ uint offset(uchar *record) const
{
return (uint) (ptr - record);
}
@@ -1253,6 +1363,7 @@ protected:
return (op_result == E_DEC_OVERFLOW);
}
int warn_if_overflow(int op_result);
+ Copy_func *get_identical_copy_func() const;
public:
void set_table_name(String *alias)
{
@@ -1263,6 +1374,18 @@ public:
orig_table= table= table_arg;
set_table_name(&table_arg->alias);
}
+ void init_for_make_new_field(TABLE *new_table_arg, TABLE *orig_table_arg)
+ {
+ init(new_table_arg);
+ /*
+ Normally orig_table is different from table only if field was
+ created via ::make_new_field. Here we alter the type of field,
+ so ::make_new_field is not applicable. But we still need to
+ preserve the original field metadata for the client-server
+ protocol.
+ */
+ orig_table= orig_table_arg;
+ }
/* maximum possible display length */
virtual uint32 max_display_length()= 0;
@@ -1279,7 +1402,7 @@ public:
longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag,
int *err);
/* The max. number of characters */
- virtual uint32 char_length()
+ virtual uint32 char_length() const
{
return field_length / charset()->mbmaxlen;
}
@@ -1300,7 +1423,8 @@ public:
void set_storage_type(ha_storage_media storage_type_arg)
{
DBUG_ASSERT(field_storage_type() == HA_SM_DEFAULT);
- flags |= (storage_type_arg << FIELD_FLAGS_STORAGE_MEDIA);
+ flags |= static_cast<uint32>(storage_type_arg) <<
+ FIELD_FLAGS_STORAGE_MEDIA;
}
column_format_type column_format() const
@@ -1312,7 +1436,8 @@ public:
void set_column_format(column_format_type column_format_arg)
{
DBUG_ASSERT(column_format() == COLUMN_FORMAT_TYPE_DEFAULT);
- flags |= (column_format_arg << FIELD_FLAGS_COLUMN_FORMAT);
+ flags |= static_cast<uint32>(column_format_arg) <<
+ FIELD_FLAGS_COLUMN_FORMAT;
}
/*
@@ -1348,7 +1473,7 @@ public:
- If field is char/varchar/.. and is not part of write set.
TRUE - If field is char/varchar/.. and is part of write set.
*/
- virtual bool is_updatable() const { return FALSE; }
+ virtual bool is_varchar_and_in_write_set() const { return FALSE; }
/* Check whether the field can be used as a join attribute in hash join */
virtual bool hash_join_is_possible() { return TRUE; }
@@ -1406,7 +1531,14 @@ public:
// Exactly the same rules with REF access
return can_optimize_keypart_ref(cond, item);
}
- friend int cre_myisam(char * name, register TABLE *form, uint options,
+
+ bool save_in_field_default_value(bool view_eror_processing);
+ bool save_in_field_ignore_value(bool view_error_processing);
+
+ /* Mark field in read map. Updates also virtual fields */
+ void register_field_in_read_map();
+
+ friend int cre_myisam(char * name, TABLE *form, uint options,
ulonglong auto_increment_value);
friend class Copy_field;
friend class Item_avg_field;
@@ -1530,7 +1662,24 @@ public:
void make_field(Send_field *);
uint decimals() const { return (uint) dec; }
uint size_of() const { return sizeof(*this); }
- bool eq_def(Field *field);
+ bool eq_def(const Field *field) const;
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ if (unsigned_flag && from->cmp_type() == DECIMAL_RESULT)
+ return do_field_decimal;
+ return do_field_int;
+ }
+ int save_in_field(Field *to)
+ {
+ return to->store(val_int(), MY_TEST(flags & UNSIGNED_FLAG));
+ }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return real_type() == from->real_type() &&
+ pack_length() == from->pack_length() &&
+ !((flags & UNSIGNED_FLAG) && !(from->flags & UNSIGNED_FLAG)) &&
+ decimals() == from->decimals();
+ }
int store_decimal(const my_decimal *);
my_decimal *val_decimal(my_decimal *);
bool val_bool() { return val_int() != 0; }
@@ -1565,10 +1714,21 @@ public:
const char *field_name_arg, CHARSET_INFO *charset);
Item_result result_type () const { return STRING_RESULT; }
uint decimals() const { return NOT_FIXED_DEC; }
+ int save_in_field(Field *to) { return save_in_field_str(to); }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return real_type() == from->real_type() &&
+ pack_length() == from->pack_length() &&
+ charset() == from->charset();
+ }
int store(double nr);
int store(longlong nr, bool unsigned_val)=0;
int store_decimal(const my_decimal *);
int store(const char *to,uint length,CHARSET_INFO *cs)=0;
+ int store_hex_hybrid(const char *str, uint length)
+ {
+ return store(str, length, &my_charset_bin);
+ }
uint repertoire(void) const { return field_repertoire; }
CHARSET_INFO *charset(void) const { return field_charset; }
enum Derivation derivation(void) const { return field_derivation; }
@@ -1627,7 +1787,7 @@ public:
int store_decimal(const my_decimal *d);
uint32 max_data_length() const;
- bool is_updatable() const
+ bool is_varchar_and_in_write_set() const
{
DBUG_ASSERT(table && table->write_set);
return bitmap_is_set(table->write_set, field_index);
@@ -1658,9 +1818,24 @@ public:
uint8 dec_arg, bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
field_name_arg, dec_arg, zero_arg, unsigned_arg),
- not_fixed(dec_arg >= NOT_FIXED_DEC)
+ not_fixed(dec_arg >= FLOATING_POINT_DECIMALS)
{}
Item_result result_type () const { return REAL_RESULT; }
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ return do_field_real;
+ }
+ int save_in_field(Field *to) { return to->store(val_real()); }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ /*
+ Cannot do memcpy from a longer field to a shorter field,
+ e.g. a DOUBLE(53,10) into a DOUBLE(10,10).
+ But it should be OK the other way around.
+ */
+ return Field_num::memcpy_field_possible(from) &&
+ field_length >= from->field_length;
+ }
int store_decimal(const my_decimal *);
int store_time_dec(MYSQL_TIME *ltime, uint dec);
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
@@ -1682,9 +1857,14 @@ public:
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
+ Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type);
enum_field_types type() const { return MYSQL_TYPE_DECIMAL;}
enum ha_base_keytype key_type() const
{ return zerofill ? HA_KEYTYPE_BINARY : HA_KEYTYPE_NUM; }
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ return eq_def(from) ? get_identical_copy_func() : do_field_string;
+ }
int reset(void);
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
@@ -1728,6 +1908,22 @@ public:
enum_field_types type() const { return MYSQL_TYPE_NEWDECIMAL;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
Item_result result_type () const { return DECIMAL_RESULT; }
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ // if (from->real_type() == MYSQL_TYPE_BIT) // QQ: why?
+ // return do_field_int;
+ return do_field_decimal;
+ }
+ int save_in_field(Field *to)
+ {
+ my_decimal buff;
+ return to->store_decimal(val_decimal(&buff));
+ }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return Field_num::memcpy_field_possible(from) &&
+ field_length == from->field_length;
+ }
int reset(void);
bool store_value(const my_decimal *decimal_value);
bool store_value(const my_decimal *decimal_value, int *native_error);
@@ -1739,6 +1935,7 @@ public:
int store_decimal(const my_decimal *);
double val_real(void);
longlong val_int(void);
+ ulonglong val_uint(void);
my_decimal *val_decimal(my_decimal *);
String *val_str(String*, String *);
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
@@ -1766,15 +1963,34 @@ public:
};
-class Field_tiny :public Field_num {
+class Field_integer: public Field_num
+{
+public:
+ Field_integer(uchar *ptr_arg, uint32 len_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, 0,
+ zero_arg, unsigned_arg)
+ { }
+ ulonglong val_uint()
+ {
+ longlong nr= val_int();
+ return nr < 0 && !unsigned_flag ? 0 : (ulonglong) nr;
+ }
+};
+
+
+class Field_tiny :public Field_integer {
public:
Field_tiny(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- bool zero_arg, bool unsigned_arg)
- :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg,
- 0, zero_arg,unsigned_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_integer(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg,
+ zero_arg, unsigned_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_TINY;}
enum ha_base_keytype key_type() const
@@ -1815,20 +2031,20 @@ public:
};
-class Field_short :public Field_num {
+class Field_short :public Field_integer {
public:
Field_short(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- bool zero_arg, bool unsigned_arg)
- :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg,
- 0, zero_arg,unsigned_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_integer(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg,
+ zero_arg, unsigned_arg)
{}
Field_short(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- bool unsigned_arg)
- :Field_num((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg, 0, 0, unsigned_arg)
+ bool unsigned_arg)
+ :Field_integer((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, 0,
+ NONE, field_name_arg, 0, unsigned_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_SHORT;}
enum ha_base_keytype key_type() const
@@ -1860,15 +2076,15 @@ public:
}
};
-class Field_medium :public Field_num {
+class Field_medium :public Field_integer {
public:
Field_medium(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- bool zero_arg, bool unsigned_arg)
- :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg,
- 0, zero_arg,unsigned_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_integer(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg,
+ zero_arg, unsigned_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_INT24;}
enum ha_base_keytype key_type() const
@@ -1899,20 +2115,20 @@ public:
};
-class Field_long :public Field_num {
+class Field_long :public Field_integer {
public:
Field_long(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- bool zero_arg, bool unsigned_arg)
- :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg,
- 0, zero_arg,unsigned_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_integer(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg,
+ zero_arg, unsigned_arg)
{}
Field_long(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
- bool unsigned_arg)
- :Field_num((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg,0,0,unsigned_arg)
+ bool unsigned_arg)
+ :Field_integer((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, 0,
+ NONE, field_name_arg, 0, unsigned_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_LONG;}
enum ha_base_keytype key_type() const
@@ -1949,21 +2165,21 @@ public:
};
-class Field_longlong :public Field_num {
+class Field_longlong :public Field_integer {
public:
Field_longlong(uchar *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- bool zero_arg, bool unsigned_arg)
- :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg,
- 0, zero_arg,unsigned_arg)
+ uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ bool zero_arg, bool unsigned_arg)
+ :Field_integer(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg,
+ zero_arg, unsigned_arg)
{}
Field_longlong(uint32 len_arg,bool maybe_null_arg,
- const char *field_name_arg,
- bool unsigned_arg)
- :Field_num((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
- NONE, field_name_arg,0,0,unsigned_arg)
+ const char *field_name_arg,
+ bool unsigned_arg)
+ :Field_integer((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, 0,
+ NONE, field_name_arg,0, unsigned_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_LONGLONG;}
enum ha_base_keytype key_type() const
@@ -2011,12 +2227,18 @@ public:
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
- {}
+ {
+ if (dec_arg >= FLOATING_POINT_DECIMALS)
+ dec_arg= NOT_FIXED_DEC;
+ }
Field_float(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
uint8 dec_arg)
:Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
NONE, field_name_arg, dec_arg, 0, 0)
- {}
+ {
+ if (dec_arg >= FLOATING_POINT_DECIMALS)
+ dec_arg= NOT_FIXED_DEC;
+ }
enum_field_types type() const { return MYSQL_TYPE_FLOAT;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_FLOAT; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -2054,17 +2276,27 @@ public:
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
- {}
+ {
+ if (dec_arg >= FLOATING_POINT_DECIMALS)
+ dec_arg= NOT_FIXED_DEC;
+ }
Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
uint8 dec_arg)
:Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0,
NONE, field_name_arg, dec_arg, 0, 0)
- {}
+ {
+ if (dec_arg >= FLOATING_POINT_DECIMALS)
+ dec_arg= NOT_FIXED_DEC;
+ }
Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
uint8 dec_arg, bool not_fixed_arg)
:Field_real((uchar*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0,
NONE, field_name_arg, dec_arg, 0, 0)
- {not_fixed= not_fixed_arg; }
+ {
+ not_fixed= not_fixed_arg;
+ if (dec_arg >= FLOATING_POINT_DECIMALS)
+ dec_arg= NOT_FIXED_DEC;
+ }
enum_field_types type() const { return MYSQL_TYPE_DOUBLE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -2105,6 +2337,10 @@ public:
unireg_check_arg, field_name_arg, cs)
{}
enum_field_types type() const { return MYSQL_TYPE_NULL;}
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ return do_field_string;
+ }
int store(const char *to, uint length, CHARSET_INFO *cs)
{ null[0]=1; return 0; }
int store(double nr) { null[0]=1; return 0; }
@@ -2151,6 +2387,19 @@ public:
field_name_arg)
{ flags|= BINARY_FLAG; }
Item_result result_type () const { return STRING_RESULT; }
+ int store_hex_hybrid(const char *str, uint length)
+ {
+ return store(str, length, &my_charset_bin);
+ }
+ Copy_func *get_copy_func(const Field *from) const;
+ int save_in_field(Field *to)
+ {
+ MYSQL_TIME ltime;
+ if (get_date(&ltime, 0))
+ return to->reset();
+ return to->store_time_dec(&ltime, decimals());
+ }
+ bool memcpy_field_possible(const Field *from) const;
uint32 max_display_length() { return field_length; }
bool str_needs_quotes() { return TRUE; }
enum Derivation derivation(void) const { return DERIVATION_NUMERIC; }
@@ -2161,7 +2410,7 @@ public:
enum Item_result cmp_type () const { return TIME_RESULT; }
bool val_bool() { return val_real() != 0e0; }
uint is_equal(Create_field *new_field);
- bool eq_def(Field *field)
+ bool eq_def(const Field *field) const
{
return (Field::eq_def(field) && decimals() == field->decimals());
}
@@ -2236,12 +2485,14 @@ public:
TABLE_SHARE *share);
enum_field_types type() const { return MYSQL_TYPE_TIMESTAMP;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
+ Copy_func *get_copy_func(const Field *from) const;
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
int store_time_dec(MYSQL_TIME *ltime, uint dec);
int store_decimal(const my_decimal *);
- int store_timestamp(Field_timestamp *from);
+ int store_timestamp(my_time_t timestamp, ulong sec_part);
+ int save_in_field(Field *to);
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -2251,23 +2502,8 @@ public:
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
bool zero_pack() const { return 0; }
- virtual int set_time();
- virtual void set_default()
- {
- if (has_insert_default_function())
- set_time();
- else
- Field::set_default();
- }
- virtual void set_explicit_default(Item *value);
- virtual int evaluate_insert_default_function()
- {
- int res= 0;
- if (has_insert_default_function())
- res= set_time();
- return res;
- }
- virtual int evaluate_update_default_function()
+ int set_time();
+ int evaluate_update_default_function()
{
int res= 0;
if (has_update_default_function())
@@ -2300,6 +2536,8 @@ public:
{
return get_equal_const_item_datetime(thd, ctx, const_item);
}
+ bool load_data_set_null(THD *thd);
+ bool load_data_set_no_data(THD *thd, bool fixed_format);
uint size_of() const { return sizeof(*this); }
};
@@ -2415,6 +2653,28 @@ public:
unireg_check_arg, field_name_arg, 1, 1)
{}
enum_field_types type() const { return MYSQL_TYPE_YEAR;}
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ if (eq_def(from))
+ return get_identical_copy_func();
+ switch (from->cmp_type()) {
+ case STRING_RESULT:
+ return do_field_string;
+ case TIME_RESULT:
+ return do_field_temporal;
+ case DECIMAL_RESULT:
+ return do_field_decimal;
+ case REAL_RESULT:
+ return do_field_real;
+ case INT_RESULT:
+ break;
+ case ROW_RESULT:
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ return do_field_int;
+ }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -2504,6 +2764,7 @@ protected:
int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
int was_cut, int have_smth_to_conv);
bool check_zero_in_date_with_warn(ulonglong fuzzydate);
+ static void do_field_time(Copy_field *copy);
public:
Field_time(uchar *ptr_arg, uint length_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
@@ -2513,6 +2774,19 @@ public:
{}
enum_field_types type() const { return MYSQL_TYPE_TIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; }
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ return from->cmp_type() == REAL_RESULT ? do_field_string : // MDEV-9344
+ from->type() == MYSQL_TYPE_YEAR ? do_field_int :
+ from->type() == MYSQL_TYPE_BIT ? do_field_int :
+ eq_def(from) ? get_identical_copy_func() :
+ do_field_time;
+ }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return real_type() == from->real_type() &&
+ decimals() == from->decimals();
+ }
int store_time_dec(MYSQL_TIME *ltime, uint dec);
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
@@ -2646,7 +2920,11 @@ public:
const char *field_name_arg)
:Field_temporal_with_date(ptr_arg, length_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg)
- {}
+ {
+ if (unireg_check == TIMESTAMP_UN_FIELD ||
+ unireg_check == TIMESTAMP_DNUN_FIELD)
+ flags|= ON_UPDATE_NOW_FLAG;
+ }
enum_field_types type() const { return MYSQL_TYPE_DATETIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; }
double val_real(void);
@@ -2659,22 +2937,8 @@ public:
void sql_type(String &str) const;
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{ return Field_datetime::get_TIME(ltime, ptr, fuzzydate); }
- virtual int set_time();
- virtual void set_default()
- {
- if (has_insert_default_function())
- set_time();
- else
- Field::set_default();
- }
- virtual int evaluate_insert_default_function()
- {
- int res= 0;
- if (has_insert_default_function())
- res= set_time();
- return res;
- }
- virtual int evaluate_update_default_function()
+ int set_time();
+ int evaluate_update_default_function()
{
int res= 0;
if (has_update_default_function())
@@ -2811,7 +3075,7 @@ new_Field_timestamp(MEM_ROOT *root,uchar *ptr, uchar *null_ptr, uchar null_bit,
return new (root)
Field_timestamp(ptr, MAX_DATETIME_WIDTH, null_ptr,
null_bit, unireg_check, field_name, share);
- if (dec == NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
dec= MAX_DATETIME_PRECISION;
return new (root)
Field_timestamp_hires(ptr, null_ptr, null_bit, unireg_check,
@@ -2827,7 +3091,7 @@ new_Field_time(MEM_ROOT *root, uchar *ptr, uchar *null_ptr, uchar null_bit,
return new (root)
Field_time(ptr, MIN_TIME_WIDTH, null_ptr, null_bit, unireg_check,
field_name);
- if (dec == NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
dec= MAX_DATETIME_PRECISION;
return new (root)
Field_time_hires(ptr, null_ptr, null_bit, unireg_check, field_name, dec);
@@ -2842,7 +3106,7 @@ new_Field_datetime(MEM_ROOT *root, uchar *ptr, uchar *null_ptr, uchar null_bit,
return new (root)
Field_datetime(ptr, MAX_DATETIME_WIDTH, null_ptr, null_bit,
unireg_check, field_name);
- if (dec == NOT_FIXED_DEC)
+ if (dec >= FLOATING_POINT_DECIMALS)
dec= MAX_DATETIME_PRECISION;
return new (root)
Field_datetime_hires(ptr, null_ptr, null_bit,
@@ -2881,6 +3145,7 @@ public:
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; }
bool zero_pack() const { return 0; }
+ Copy_func *get_copy_func(const Field *from) const;
int reset(void)
{
charset()->cset->fill(charset(),(char*) ptr, field_length,
@@ -2928,6 +3193,7 @@ private:
class Field_varstring :public Field_longstr {
+public:
uchar *get_data() const
{
return ptr + length_bytes;
@@ -2936,7 +3202,6 @@ class Field_varstring :public Field_longstr {
{
return length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
}
-public:
/*
The maximum space available in a Field_varstring, in bytes. See
length_bytes.
@@ -2977,6 +3242,12 @@ public:
return (uint32) field_length + (field_charset == &my_charset_bin ?
length_bytes : 0);
}
+ Copy_func *get_copy_func(const Field *from) const;
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return Field_str::memcpy_field_possible(from) &&
+ length_bytes == ((Field_varstring*) from)->length_bytes;
+ }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(longlong nr, bool unsigned_val);
int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */
@@ -2987,7 +3258,7 @@ public:
int cmp_max(const uchar *, const uchar *, uint max_length);
int cmp(const uchar *a,const uchar *b)
{
- return cmp_max(a, b, ~0L);
+ return cmp_max(a, b, ~0U);
}
void sort_string(uchar *buff,uint length);
uint get_key_image(uchar *buff,uint length, imagetype type);
@@ -2996,7 +3267,7 @@ public:
virtual uchar *pack(uchar *to, const uchar *from, uint max_length);
virtual const uchar *unpack(uchar* to, const uchar *from,
const uchar *from_end, uint param_data);
- int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0L);
+ int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
int key_cmp(const uchar *,const uchar*);
int key_cmp(const uchar *str, uint length);
uint packed_col_length(const uchar *to, uint length);
@@ -3029,7 +3300,15 @@ protected:
The 'value'-object is a cache fronting the storage engine.
*/
String value;
-
+ /**
+ Cache for blob values when reading a row with a virtual blob
+ field. This is needed to not destroy the old cached value when
+ updating the blob with a new value when creating the new row.
+ */
+ String read_value;
+
+ static void do_copy_blob(Copy_field *copy);
+ static void do_conv_blob(Copy_field *copy);
public:
Field_blob(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
@@ -3063,6 +3342,32 @@ public:
enum_field_types type() const { return MYSQL_TYPE_BLOB;}
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ /*
+ TODO: MDEV-9331
+ if (from->type() == MYSQL_TYPE_BIT)
+ return do_field_int;
+ */
+ if (!(from->flags & BLOB_FLAG) || from->charset() != charset())
+ return do_conv_blob;
+ if (from->pack_length() != Field_blob::pack_length())
+ return do_copy_blob;
+ return get_identical_copy_func();
+ }
+ int store_field(Field *from)
+ { // Be sure the value is stored
+ from->val_str(&value);
+ if (table->copy_blobs ||
+ (!value.is_alloced() && from->is_varchar_and_in_write_set()))
+ value.copy();
+ return store(value.ptr(), value.length(), from->charset());
+ }
+ bool memcpy_field_possible(const Field *from) const
+ {
+ return Field_str::memcpy_field_possible(from) &&
+ !table->copy_blobs;
+ }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -3072,9 +3377,9 @@ public:
my_decimal *val_decimal(my_decimal *);
int cmp_max(const uchar *, const uchar *, uint max_length);
int cmp(const uchar *a,const uchar *b)
- { return cmp_max(a, b, ~0L); }
+ { return cmp_max(a, b, ~0U); }
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
- int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0L);
+ int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
int key_cmp(const uchar *,const uchar*);
int key_cmp(const uchar *str, uint length);
/* Never update the value of min_val for a blob field */
@@ -3104,37 +3409,36 @@ public:
return (uint32) (((ulonglong) 1 << (packlength*8)) -1);
}
int reset(void) { bzero(ptr, packlength+sizeof(uchar*)); return 0; }
- void reset_fields() { bzero((uchar*) &value,sizeof(value)); }
+ void reset_fields() { bzero((uchar*) &value,sizeof(value)); bzero((uchar*) &read_value,sizeof(read_value)); }
uint32 get_field_buffer_size(void) { return value.alloced_length(); }
void store_length(uchar *i_ptr, uint i_packlength, uint32 i_number);
inline void store_length(uint32 number)
{
store_length(ptr, packlength, number);
}
- inline uint32 get_length(uint row_offset= 0)
+ inline uint32 get_length(my_ptrdiff_t row_offset= 0) const
{ return get_length(ptr+row_offset, this->packlength); }
- uint32 get_length(const uchar *ptr, uint packlength);
- uint32 get_length(const uchar *ptr_arg)
+ uint32 get_length(const uchar *ptr, uint packlength) const;
+ uint32 get_length(const uchar *ptr_arg) const
{ return get_length(ptr_arg, this->packlength); }
- inline void get_ptr(uchar **str)
- {
- memcpy(str, ptr+packlength, sizeof(uchar*));
- }
- inline void get_ptr(uchar **str, uint row_offset)
- {
- memcpy(str, ptr+packlength+row_offset, sizeof(char*));
- }
+ inline uchar *get_ptr() const { return get_ptr(0); }
+ inline uchar *get_ptr(my_ptrdiff_t row_offset) const
+ {
+ uchar *s;
+ memcpy(&s, ptr + packlength + row_offset, sizeof(uchar*));
+ return s;
+ }
inline void set_ptr(uchar *length, uchar *data)
- {
- memcpy(ptr,length,packlength);
- memcpy(ptr+packlength, &data,sizeof(char*));
- }
- void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, uchar *data)
- {
- uchar *ptr_ofs= ADD_TO_PTR(ptr,ptr_diff,uchar*);
- store_length(ptr_ofs, packlength, length);
- memcpy(ptr_ofs+packlength, &data, sizeof(char*));
- }
+ {
+ memcpy(ptr,length,packlength);
+ memcpy(ptr+packlength, &data,sizeof(char*));
+ }
+ void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, const uchar *data)
+ {
+ uchar *ptr_ofs= ADD_TO_PTR(ptr,ptr_diff,uchar*);
+ store_length(ptr_ofs, packlength, length);
+ memcpy(ptr_ofs+packlength, &data, sizeof(char*));
+ }
inline void set_ptr(uint32 length, uchar *data)
{
set_ptr_offset(0, length, data);
@@ -3148,8 +3452,7 @@ public:
void sql_type(String &str) const;
inline bool copy()
{
- uchar *tmp;
- get_ptr(&tmp);
+ uchar *tmp= get_ptr();
if (value.copy((char*) tmp, get_length(), charset()))
{
Field_blob::reset();
@@ -3159,20 +3462,51 @@ public:
memcpy(ptr+packlength, &tmp, sizeof(char*));
return 0;
}
+ /* store value for the duration of the current read record */
+ inline void swap_value_and_read_value()
+ {
+ read_value.swap(value);
+ }
+ inline void set_value(uchar *data)
+ {
+ /* Set value pointer. Lengths are not important */
+ value.reset((char*) data, 1, 1, &my_charset_bin);
+ }
virtual uchar *pack(uchar *to, const uchar *from, uint max_length);
virtual const uchar *unpack(uchar *to, const uchar *from,
const uchar *from_end, uint param_data);
uint packed_col_length(const uchar *col_ptr, uint length);
uint max_packed_col_length(uint max_length);
- void free() { value.free(); }
- inline void clear_temporary() { bzero((uchar*) &value,sizeof(value)); }
- friend int field_conv_incompatible(Field *to,Field *from);
+ void free()
+ {
+ value.free();
+ read_value.free();
+ }
+ inline void clear_temporary()
+ {
+ uchar *tmp= get_ptr();
+ if (likely(value.ptr() == (char*) tmp))
+ bzero((uchar*) &value, sizeof(value));
+ else
+ {
+ /*
+ Currently read_value should never point to tmp, the following code
+ is mainly here to make things future proof.
+ */
+ if (unlikely(read_value.ptr() == (char*) tmp))
+ bzero((uchar*) &read_value, sizeof(read_value));
+ }
+ }
uint size_of() const { return sizeof(*this); }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
uint32 max_display_length();
- uint32 char_length();
+ uint32 char_length() const;
uint is_equal(Create_field *new_field);
+
+ friend void TABLE::remember_blob_values(String *blob_storage);
+ friend void TABLE::restore_blob_values(String *blob_storage);
+
private:
int do_save_field_metadata(uchar *first_byte);
};
@@ -3224,6 +3558,8 @@ public:
but the underlying blob must still be reset.
*/
int reset(void) { return Field_blob::reset() || !maybe_null(); }
+ bool load_data_set_null(THD *thd);
+ bool load_data_set_no_data(THD *thd, bool fixed_format);
geometry_type get_geometry_type() { return geom_type; };
static geometry_type geometry_type_merge(geometry_type, geometry_type);
@@ -3238,6 +3574,7 @@ uint gis_field_options_read(const uchar *buf, uint buf_len,
class Field_enum :public Field_str {
+ static void do_field_enum(Copy_field *copy_field);
protected:
uint packlength;
public:
@@ -3258,6 +3595,33 @@ public:
enum_field_types type() const { return MYSQL_TYPE_STRING; }
enum Item_result cmp_type () const { return INT_RESULT; }
enum ha_base_keytype key_type() const;
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ if (eq_def(from))
+ return get_identical_copy_func();
+ if (real_type() == MYSQL_TYPE_ENUM &&
+ from->real_type() == MYSQL_TYPE_ENUM)
+ return do_field_enum;
+ if (from->result_type() == STRING_RESULT)
+ return do_field_string;
+ return do_field_int;
+ }
+ int store_field(Field *from)
+ {
+ if (from->real_type() == MYSQL_TYPE_ENUM && from->val_int() == 0)
+ {
+ store_type(0);
+ return 0;
+ }
+ return from->save_in_field(this);
+ }
+ int save_in_field(Field *to)
+ {
+ if (to->result_type() != STRING_RESULT)
+ return to->store(val_int(), 0);
+ return save_in_field_str(to);
+ }
+ bool memcpy_field_possible(const Field *from) const { return false; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -3276,7 +3640,7 @@ public:
uint row_pack_length() const { return pack_length(); }
virtual bool zero_pack() const { return 0; }
bool optimize_range(uint idx, uint part) { return 0; }
- bool eq_def(Field *field);
+ bool eq_def(const Field *field) const;
bool has_charset(void) const { return TRUE; }
/* enum and set are sorted as integers */
CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; }
@@ -3321,6 +3685,7 @@ public:
{
flags=(flags & ~ENUM_FLAG) | SET_FLAG;
}
+ int store_field(Field *from) { return from->save_in_field(this); }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr) { return Field_set::store((longlong) nr, FALSE); }
int store(longlong nr, bool unsigned_val);
@@ -3372,6 +3737,14 @@ public:
clr_rec_bits(bit_ptr, bit_ofs, bit_len);
return 0;
}
+ Copy_func *get_copy_func(const Field *from) const
+ {
+ if (from->cmp_type() == DECIMAL_RESULT)
+ return do_field_decimal;
+ return do_field_int;
+ }
+ int save_in_field(Field *to) { return to->store(val_int(), true); }
+ bool memcpy_field_possible(const Field *from) const { return false; }
int store(const char *to, uint length, CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -3447,7 +3820,7 @@ public:
virtual uchar *pack(uchar *to, const uchar *from, uint max_length);
virtual const uchar *unpack(uchar *to, const uchar *from,
const uchar *from_end, uint param_data);
- virtual void set_default();
+ virtual int set_default();
Field *new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uint32 length,
@@ -3500,18 +3873,26 @@ public:
extern const LEX_STRING null_lex_str;
+
+
+Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ uchar *ptr, uint32 field_length,
+ uchar *null_pos, uchar null_bit,
+ uint pack_flag, enum_field_types field_type,
+ CHARSET_INFO *cs,
+ Field::geometry_type geom_type, uint srid,
+ Field::utype unireg_check,
+ TYPELIB *interval, const char *field_name);
+
/*
Create field class for CREATE TABLE
*/
-
-class Create_field :public Sql_alloc
+class Column_definition: public Sql_alloc
{
public:
const char *field_name;
- const char *change; // If done with alter table
- const char *after; // Put column after this one
LEX_STRING comment; // Comment for field
- Item *def, *on_update; // Default value
+ Item *on_update; // ON UPDATE NOW()
enum enum_field_types sql_type;
/*
At various stages in execution this can be length of field in bytes or
@@ -3526,58 +3907,42 @@ public:
uint decimals, flags, pack_length, key_length;
Field::utype unireg_check;
TYPELIB *interval; // Which interval to use
- TYPELIB *save_interval; // Temporary copy for the above
- // Used only for UCS2 intervals
List<String> interval_list;
CHARSET_INFO *charset;
uint32 srid;
Field::geometry_type geom_type;
- Field *field; // For alter table
engine_option_value *option_list;
- /** structure with parsed options (for comparing fields in ALTER TABLE) */
- ha_field_option_struct *option_struct;
- uint8 interval_id; // For rea_create_table
- uint offset,pack_flag;
- bool create_if_not_exists; // Used in ALTER TABLE IF NOT EXISTS
+ uint pack_flag;
- /*
+ /*
This is additinal data provided for any computed(virtual) field.
In particular it includes a pointer to the item by which this field
can be computed from other fields.
*/
- Virtual_column_info *vcol_info;
- /*
- Flag indicating that the field is physically stored in tables
- rather than just computed from other fields.
- As of now, FALSE can be set only for computed virtual columns.
- */
- bool stored_in_db;
-
- Create_field() :change(0), after(0), comment(null_lex_str),
- def(0), on_update(0), sql_type(MYSQL_TYPE_NULL),
- flags(0), pack_length(0), key_length(0), interval(0),
- srid(0), geom_type(Field::GEOM_GEOMETRY),
- field(0), option_list(NULL), option_struct(NULL),
- create_if_not_exists(false), vcol_info(0),
- stored_in_db(true)
+ Virtual_column_info
+ *vcol_info, // Virtual field
+ *default_value, // Default value
+ *check_constraint; // Check constraint
+
+ Column_definition():
+ comment(null_lex_str),
+ on_update(0), sql_type(MYSQL_TYPE_NULL),
+ flags(0), pack_length(0), key_length(0), unireg_check(Field::NONE),
+ interval(0), srid(0), geom_type(Field::GEOM_GEOMETRY),
+ option_list(NULL),
+ vcol_info(0), default_value(0), check_constraint(0)
{
interval_list.empty();
}
- Create_field(THD *thd, Field *field, Field *orig_field);
- /* Used to make a clone of this object for ALTER/CREATE TABLE */
- Create_field *clone(MEM_ROOT *mem_root) const;
+ Column_definition(THD *thd, Field *field, Field *orig_field);
void create_length_to_internal_length(void);
- /* Init for a tmp table field. To be extended if need be. */
- void init_for_tmp_table(enum_field_types sql_type_arg,
- uint32 max_length, uint32 decimals,
- bool maybe_null, bool is_unsigned,
- uint pack_length = ~0U);
-
bool check(THD *thd);
+ bool stored_in_db() const { return !vcol_info || vcol_info->stored_in_db; }
+
ha_storage_media field_storage_type() const
{
return (ha_storage_media)
@@ -3590,21 +3955,69 @@ public:
((flags >> FIELD_FLAGS_COLUMN_FORMAT) & 3);
}
- uint virtual_col_expr_maxlen()
+ bool has_default_function() const
{
- return 255 - FRM_VCOL_HEADER_SIZE(interval != NULL);
+ return unireg_check != Field::NONE;
}
- bool has_default_function() const
+ Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ uchar *ptr, uchar *null_pos, uchar null_bit,
+ const char *field_name_arg) const
+ {
+ return ::make_field(share, mem_root, ptr,
+ (uint32)length, null_pos, null_bit,
+ pack_flag, sql_type, charset,
+ geom_type, srid, unireg_check, interval,
+ field_name_arg);
+ }
+ Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const char *field_name_arg)
+ {
+ return make_field(share, mem_root, (uchar *) 0, (uchar *) "", 0,
+ field_name_arg);
+ }
+ /* Return true if default is an expression that must be saved explicitely */
+ bool has_default_expression();
+
+ bool has_default_now_unireg_check() const
{
- return (unireg_check == Field::TIMESTAMP_DN_FIELD ||
- unireg_check == Field::TIMESTAMP_DNUN_FIELD ||
- unireg_check == Field::TIMESTAMP_UN_FIELD ||
- unireg_check == Field::NEXT_NUMBER);
+ return unireg_check == Field::TIMESTAMP_DN_FIELD
+ || unireg_check == Field::TIMESTAMP_DNUN_FIELD;
}
};
+class Create_field :public Column_definition
+{
+public:
+ const char *change; // If done with alter table
+ const char *after; // Put column after this one
+ Field *field; // For alter table
+ TYPELIB *save_interval; // Temporary copy for the above
+ // Used only for UCS2 intervals
+
+ /** structure with parsed options (for comparing fields in ALTER TABLE) */
+ ha_field_option_struct *option_struct;
+ uint offset;
+ uint8 interval_id; // For rea_create_table
+ bool create_if_not_exists; // Used in ALTER TABLE IF NOT EXISTS
+
+ Create_field():
+ Column_definition(), change(0), after(0),
+ field(0), option_struct(NULL),
+ create_if_not_exists(false)
+ { }
+ Create_field(THD *thd, Field *old_field, Field *orig_field):
+ Column_definition(thd, old_field, orig_field),
+ change(old_field->field_name), after(0),
+ field(old_field), option_struct(old_field->option_struct),
+ create_if_not_exists(false)
+ { }
+ /* Used to make a clone of this object for ALTER/CREATE TABLE */
+ Create_field *clone(MEM_ROOT *mem_root) const;
+};
+
+
/*
A class for sending info to the client
*/
@@ -3626,12 +4039,6 @@ class Send_field :public Sql_alloc {
*/
class Copy_field :public Sql_alloc {
- /**
- Convenience definition of a copy function returned by
- get_copy_func.
- */
- typedef void Copy_func(Copy_field*);
- Copy_func *get_copy_func(Field *to, Field *from);
public:
uchar *from_ptr,*to_ptr;
uchar *from_null_ptr,*to_null_ptr;
@@ -3652,7 +4059,7 @@ public:
Note that for VARCHARs, do_copy() will be do_varstring*() which
only copies the length-bytes (1 or 2) + the actual length of the
- text instead of from/to_length bytes. @see get_copy_func()
+ text instead of from/to_length bytes.
*/
uint from_length,to_length;
Field *from_field,*to_field;
@@ -3667,51 +4074,39 @@ public:
};
-Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
- uchar *ptr, uint32 field_length,
- uchar *null_pos, uchar null_bit,
- uint pack_flag, enum_field_types field_type,
- CHARSET_INFO *cs,
- Field::geometry_type geom_type, uint srid,
- Field::utype unireg_check,
- TYPELIB *interval, const char *field_name);
uint pack_length_to_packflag(uint type);
enum_field_types get_blob_type_from_length(ulong length);
uint32 calc_pack_length(enum_field_types type,uint32 length);
int set_field_to_null(Field *field);
int set_field_to_null_with_conversions(Field *field, bool no_conversions);
int convert_null_to_field_value_or_error(Field *field);
+bool check_expression(Virtual_column_info *vcol, const char *name,
+ enum_vcol_info_type type);
/*
The following are for the interface with the .frm file
*/
-#define FIELDFLAG_DECIMAL 1
-#define FIELDFLAG_BINARY 1 // Shares same flag
-#define FIELDFLAG_NUMBER 2
-#define FIELDFLAG_ZEROFILL 4
-#define FIELDFLAG_PACK 120 // Bits used for packing
-#define FIELDFLAG_INTERVAL 256 // mangled with decimals!
-#define FIELDFLAG_BITFIELD 512 // mangled with decimals!
-#define FIELDFLAG_BLOB 1024 // mangled with decimals!
-#define FIELDFLAG_GEOM 2048 // mangled with decimals!
-
-#define FIELDFLAG_TREAT_BIT_AS_CHAR 4096 /* use Field_bit_as_char */
-
-#define FIELDFLAG_LEFT_FULLSCREEN 8192
-#define FIELDFLAG_RIGHT_FULLSCREEN 16384
-#define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output
-#define FIELDFLAG_NO_DEFAULT 16384 /* sql */
-#define FIELDFLAG_SUM ((uint) 32768)// predit: +#fieldflag
-#define FIELDFLAG_MAYBE_NULL ((uint) 32768)// sql
-#define FIELDFLAG_HEX_ESCAPE ((uint) 0x10000)
+#define FIELDFLAG_DECIMAL 1U
+#define FIELDFLAG_BINARY 1U // Shares same flag
+#define FIELDFLAG_NUMBER 2U
+#define FIELDFLAG_ZEROFILL 4U
+#define FIELDFLAG_PACK 120U // Bits used for packing
+#define FIELDFLAG_INTERVAL 256U // mangled with decimals!
+#define FIELDFLAG_BITFIELD 512U // mangled with decimals!
+#define FIELDFLAG_BLOB 1024U // mangled with decimals!
+#define FIELDFLAG_GEOM 2048U // mangled with decimals!
+
+#define FIELDFLAG_TREAT_BIT_AS_CHAR 4096U /* use Field_bit_as_char */
+#define FIELDFLAG_LONG_DECIMAL 8192U
+#define FIELDFLAG_NO_DEFAULT 16384U /* sql */
+#define FIELDFLAG_MAYBE_NULL 32768U // sql
+#define FIELDFLAG_HEX_ESCAPE 0x10000U
#define FIELDFLAG_PACK_SHIFT 3
#define FIELDFLAG_DEC_SHIFT 8
-#define FIELDFLAG_MAX_DEC 31
-#define FIELDFLAG_NUM_SCREEN_TYPE 0x7F01
-#define FIELDFLAG_ALFA_SCREEN_TYPE 0x7800
+#define FIELDFLAG_MAX_DEC 63U
-#define MTYP_TYPENR(type) (type & 127) /* Remove bits from type */
+#define MTYP_TYPENR(type) (type & 127U) /* Remove bits from type */
#define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL)
#define f_is_num(x) ((x) & FIELDFLAG_NUMBER)
@@ -3725,10 +4120,9 @@ int convert_null_to_field_value_or_error(Field *field);
#define f_is_bitfield(x) (((x) & (FIELDFLAG_BITFIELD | FIELDFLAG_NUMBER)) == FIELDFLAG_BITFIELD)
#define f_is_blob(x) (((x) & (FIELDFLAG_BLOB | FIELDFLAG_NUMBER)) == FIELDFLAG_BLOB)
#define f_is_geom(x) (((x) & (FIELDFLAG_GEOM | FIELDFLAG_NUMBER)) == FIELDFLAG_GEOM)
-#define f_is_equ(x) ((x) & (1+2+FIELDFLAG_PACK+31*256))
-#define f_settype(x) (((int) x) << FIELDFLAG_PACK_SHIFT)
-#define f_maybe_null(x) (x & FIELDFLAG_MAYBE_NULL)
-#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT)
+#define f_settype(x) (((uint) (x)) << FIELDFLAG_PACK_SHIFT)
+#define f_maybe_null(x) ((x) & FIELDFLAG_MAYBE_NULL)
+#define f_no_default(x) ((x) & FIELDFLAG_NO_DEFAULT)
#define f_bit_as_char(x) ((x) & FIELDFLAG_TREAT_BIT_AS_CHAR)
#define f_is_hex_escape(x) ((x) & FIELDFLAG_HEX_ESCAPE)
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index cdef8e8d746..cec3d3a3e7d 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -219,13 +219,6 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
}
-static int copy_timestamp_fields(Field *from, Field *to)
-{
- DBUG_ASSERT(from->type() == MYSQL_TYPE_TIMESTAMP);
- DBUG_ASSERT(to->type() == MYSQL_TYPE_TIMESTAMP);
- return ((Field_timestamp*)to)->store_timestamp((Field_timestamp*)from);
-}
-
static void do_skip(Copy_field *copy __attribute__((unused)))
{
}
@@ -341,12 +334,12 @@ static void do_copy_next_number(Copy_field *copy)
}
-static void do_copy_blob(Copy_field *copy)
+void Field_blob::do_copy_blob(Copy_field *copy)
{
((Field_blob*) copy->to_field)->copy_value(((Field_blob*) copy->from_field));
}
-static void do_conv_blob(Copy_field *copy)
+void Field_blob::do_conv_blob(Copy_field *copy)
{
copy->from_field->val_str(&copy->tmp);
((Field_blob *) copy->to_field)->store(copy->tmp.ptr(),
@@ -368,7 +361,7 @@ static void do_save_blob(Copy_field *copy)
}
-static void do_field_string(Copy_field *copy)
+void Field::do_field_string(Copy_field *copy)
{
char buff[MAX_FIELD_WIDTH];
String res(buff, sizeof(buff), copy->from_field->charset());
@@ -379,7 +372,7 @@ static void do_field_string(Copy_field *copy)
}
-static void do_field_enum(Copy_field *copy)
+void Field_enum::do_field_enum(Copy_field *copy)
{
if (copy->from_field->val_int() == 0)
((Field_enum *) copy->to_field)->store_type((ulonglong) 0);
@@ -403,38 +396,52 @@ static void do_field_varbinary_pre50(Copy_field *copy)
}
-static void do_field_int(Copy_field *copy)
+void Field::do_field_int(Copy_field *copy)
{
longlong value= copy->from_field->val_int();
copy->to_field->store(value,
MY_TEST(copy->from_field->flags & UNSIGNED_FLAG));
}
-static void do_field_real(Copy_field *copy)
+void Field::do_field_real(Copy_field *copy)
{
double value=copy->from_field->val_real();
copy->to_field->store(value);
}
-static void do_field_decimal(Copy_field *copy)
+void Field::do_field_decimal(Copy_field *copy)
{
my_decimal value;
copy->to_field->store_decimal(copy->from_field->val_decimal(&value));
}
-static void do_field_timestamp(Copy_field *copy)
+void Field::do_field_timestamp(Copy_field *copy)
{
- copy_timestamp_fields(copy->from_field, copy->to_field);
+ // XXX why couldn't we do it everywhere?
+ copy->from_field->save_in_field(copy->to_field);
}
-static void do_field_temporal(Copy_field *copy)
+void Field::do_field_temporal(Copy_field *copy)
{
MYSQL_TIME ltime;
- copy->from_field->get_date(&ltime, 0);
- copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
+ // TODO: we now need to check result
+ if (copy->from_field->get_date(&ltime, 0))
+ copy->to_field->reset();
+ else
+ copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
+}
+
+
+void Field_time::do_field_time(Copy_field *copy)
+{
+ MYSQL_TIME ltime;
+ if (copy->from_field->get_date(&ltime, TIME_TIME_ONLY))
+ copy->to_field->reset();
+ else
+ copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
}
@@ -467,20 +474,19 @@ static void do_cut_string(Copy_field *copy)
static void do_cut_string_complex(Copy_field *copy)
{ // Shorter string field
- int well_formed_error;
CHARSET_INFO *cs= copy->from_field->charset();
const uchar *from_end= copy->from_ptr + copy->from_length;
- uint copy_length= cs->cset->well_formed_len(cs,
- (char*) copy->from_ptr,
- (char*) from_end,
- copy->to_length / cs->mbmaxlen,
- &well_formed_error);
+ Well_formed_prefix prefix(cs,
+ (char*) copy->from_ptr,
+ (char*) from_end,
+ copy->to_length / cs->mbmaxlen);
+ uint copy_length= prefix.length();
if (copy->to_length < copy_length)
copy_length= copy->to_length;
memcpy(copy->to_ptr, copy->from_ptr, copy_length);
/* Check if we lost any important characters */
- if (well_formed_error ||
+ if (prefix.well_formed_error_pos() ||
cs->cset->scan(cs, (char*) copy->from_ptr + copy_length,
(char*) from_end,
MY_SEQ_SPACES) < (copy->from_length - copy_length))
@@ -534,22 +540,19 @@ static void do_varstring1(Copy_field *copy)
static void do_varstring1_mb(Copy_field *copy)
{
- int well_formed_error;
CHARSET_INFO *cs= copy->from_field->charset();
uint from_length= (uint) *(uchar*) copy->from_ptr;
const uchar *from_ptr= copy->from_ptr + 1;
uint to_char_length= (copy->to_length - 1) / cs->mbmaxlen;
- uint length= cs->cset->well_formed_len(cs, (char*) from_ptr,
- (char*) from_ptr + from_length,
- to_char_length, &well_formed_error);
- if (length < from_length)
+ Well_formed_prefix prefix(cs, (char*) from_ptr, from_length, to_char_length);
+ if (prefix.length() < from_length)
{
if (current_thd->count_cuted_fields)
copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
- *copy->to_ptr= (uchar) length;
- memcpy(copy->to_ptr + 1, from_ptr, length);
+ *copy->to_ptr= (uchar) prefix.length();
+ memcpy(copy->to_ptr + 1, from_ptr, prefix.length());
}
@@ -572,22 +575,19 @@ static void do_varstring2(Copy_field *copy)
static void do_varstring2_mb(Copy_field *copy)
{
- int well_formed_error;
CHARSET_INFO *cs= copy->from_field->charset();
uint char_length= (copy->to_length - HA_KEY_BLOB_LENGTH) / cs->mbmaxlen;
uint from_length= uint2korr(copy->from_ptr);
const uchar *from_beg= copy->from_ptr + HA_KEY_BLOB_LENGTH;
- uint length= cs->cset->well_formed_len(cs, (char*) from_beg,
- (char*) from_beg + from_length,
- char_length, &well_formed_error);
- if (length < from_length)
+ Well_formed_prefix prefix(cs, (char*) from_beg, from_length, char_length);
+ if (prefix.length() < from_length)
{
if (current_thd->count_cuted_fields)
copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
- int2store(copy->to_ptr, length);
- memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, length);
+ int2store(copy->to_ptr, prefix.length());
+ memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, prefix.length());
}
@@ -707,124 +707,82 @@ void Copy_field::set(Field *to,Field *from,bool save)
if ((to->flags & BLOB_FLAG) && save)
do_copy2= do_save_blob;
else
- do_copy2= get_copy_func(to,from);
+ do_copy2= to->get_copy_func(from);
if (!do_copy) // Not null
do_copy=do_copy2;
}
-Copy_field::Copy_func *
-Copy_field::get_copy_func(Field *to,Field *from)
+Field::Copy_func *Field_timestamp::get_copy_func(const Field *from) const
{
- if (to->flags & BLOB_FLAG)
- {
- if (!(from->flags & BLOB_FLAG) || from->charset() != to->charset())
- return do_conv_blob;
- if (from_length != to_length)
- return do_copy_blob;
- }
+ Field::Copy_func *copy= Field_temporal::get_copy_func(from);
+ if (copy == do_field_temporal && from->type() == MYSQL_TYPE_TIMESTAMP)
+ return do_field_timestamp;
else
- {
- if (to->real_type() == MYSQL_TYPE_BIT ||
- from->real_type() == MYSQL_TYPE_BIT)
- return do_field_int;
- if (to->result_type() == DECIMAL_RESULT)
- return do_field_decimal;
- if (from->cmp_type() == TIME_RESULT)
- {
- /* If types are not 100 % identical then convert trough get_date() */
- if (!to->eq_def(from) ||
- ((to->table->in_use->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE)) &&
- mysql_type_to_time_type(to->type()) != MYSQL_TIMESTAMP_TIME))
- return (from->type() == MYSQL_TYPE_TIMESTAMP &&
- to->type() == MYSQL_TYPE_TIMESTAMP)
- ? do_field_timestamp : do_field_temporal;
- /* Do binary copy */
- }
- // Check if identical fields
- if (from->result_type() == STRING_RESULT)
- {
- /*
- Detect copy from pre 5.0 varbinary to varbinary as of 5.0 and
- use special copy function that removes trailing spaces and thus
- repairs data.
- */
- if (from->type() == MYSQL_TYPE_VAR_STRING && !from->has_charset() &&
- to->type() == MYSQL_TYPE_VARCHAR && !to->has_charset())
- return do_field_varbinary_pre50;
-
- if (to->real_type() != from->real_type())
- {
- if (from->real_type() == MYSQL_TYPE_ENUM ||
- from->real_type() == MYSQL_TYPE_SET)
- if (to->result_type() != STRING_RESULT)
- return do_field_int; // Convert SET to number
- return do_field_string;
- }
- if (to->real_type() == MYSQL_TYPE_ENUM ||
- to->real_type() == MYSQL_TYPE_SET)
- {
- if (!to->eq_def(from))
- {
- if (from->real_type() == MYSQL_TYPE_ENUM &&
- to->real_type() == MYSQL_TYPE_ENUM)
- return do_field_enum;
- return do_field_string;
- }
- }
- else if (to->charset() != from->charset())
- return do_field_string;
- else if (to->real_type() == MYSQL_TYPE_VARCHAR)
- {
- if (((Field_varstring*) to)->length_bytes !=
- ((Field_varstring*) from)->length_bytes)
- return do_field_string;
- return (((Field_varstring*) to)->length_bytes == 1 ?
- (from->charset()->mbmaxlen == 1 ? do_varstring1 :
- do_varstring1_mb) :
- (from->charset()->mbmaxlen == 1 ? do_varstring2 :
- do_varstring2_mb));
- }
- else if (to_length < from_length)
- return (from->charset()->mbmaxlen == 1 ?
- do_cut_string : do_cut_string_complex);
- else if (to_length > from_length)
- {
- if (to->charset() == &my_charset_bin)
- return do_expand_binary;
- return do_expand_string;
- }
- }
- else if (to->real_type() != from->real_type() ||
- to_length != from_length)
- {
- if ((to->real_type() == MYSQL_TYPE_ENUM ||
- to->real_type() == MYSQL_TYPE_SET) &&
- from->real_type() == MYSQL_TYPE_NEWDECIMAL)
- return do_field_decimal;
- if (to->real_type() == MYSQL_TYPE_DECIMAL ||
- to->result_type() == STRING_RESULT)
- return do_field_string;
- if (to->result_type() == INT_RESULT)
- return do_field_int;
- return do_field_real;
- }
- else
- {
- if (!to->eq_def(from))
- {
- if (to->real_type() == MYSQL_TYPE_DECIMAL)
- return do_field_string;
- if (to->result_type() == INT_RESULT)
- return do_field_int;
- else
- return do_field_real;
- }
- }
- }
+ return copy;
+}
+
+
+Field::Copy_func *Field_temporal::get_copy_func(const Field *from) const
+{
+ /* If types are not 100 % identical then convert trough get_date() */
+ if (from->cmp_type() == REAL_RESULT)
+ return do_field_string; // TODO: MDEV-9344
+ if (from->type() == MYSQL_TYPE_YEAR)
+ return do_field_string; // TODO: MDEV-9343
+ if (from->type() == MYSQL_TYPE_BIT)
+ return do_field_int;
+ if (!eq_def(from) ||
+ (table->in_use->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE)))
+ return do_field_temporal;
+ return get_identical_copy_func();
+}
+
+
+Field::Copy_func *Field_varstring::get_copy_func(const Field *from) const
+{
+ if (from->type() == MYSQL_TYPE_BIT)
+ return do_field_int;
+ /*
+ Detect copy from pre 5.0 varbinary to varbinary as of 5.0 and
+ use special copy function that removes trailing spaces and thus
+ repairs data.
+ */
+ if (from->type() == MYSQL_TYPE_VAR_STRING && !from->has_charset() &&
+ !Field_varstring::has_charset())
+ return do_field_varbinary_pre50;
+ if (Field_varstring::real_type() != from->real_type() ||
+ Field_varstring::charset() != from->charset() ||
+ length_bytes != ((const Field_varstring*) from)->length_bytes)
+ return do_field_string;
+ return length_bytes == 1 ?
+ (from->charset()->mbmaxlen == 1 ? do_varstring1 : do_varstring1_mb) :
+ (from->charset()->mbmaxlen == 1 ? do_varstring2 : do_varstring2_mb);
+}
+
+
+Field::Copy_func *Field_string::get_copy_func(const Field *from) const
+{
+ if (from->type() == MYSQL_TYPE_BIT)
+ return do_field_int;
+ if (Field_string::real_type() != from->real_type() ||
+ Field_string::charset() != from->charset())
+ return do_field_string;
+ if (Field_string::pack_length() < from->pack_length())
+ return (Field_string::charset()->mbmaxlen == 1 ?
+ do_cut_string : do_cut_string_complex);
+ if (Field_string::pack_length() > from->pack_length())
+ return Field_string::charset() == &my_charset_bin ? do_expand_binary :
+ do_expand_string;
+ return get_identical_copy_func();
+}
+
+
+Field::Copy_func *Field::get_identical_copy_func() const
+{
/* Identical field types */
- switch (to_length) {
+ switch (pack_length()) {
case 1: return do_field_1;
case 2: return do_field_2;
case 3: return do_field_3;
@@ -835,61 +793,25 @@ Copy_field::get_copy_func(Field *to,Field *from)
return do_field_eq;
}
-/**
- Check if it is possible just copy value of the fields
- @param to The field to copy to
- @param from The field to copy from
+bool Field_temporal::memcpy_field_possible(const Field *from) const
+{
+ return real_type() == from->real_type() &&
+ decimals() == from->decimals() &&
+ !sql_mode_for_dates(table->in_use);
+}
- @retval TRUE - it is possible to just copy value of 'from' to 'to'.
- @retval FALSE - conversion is needed
-*/
-bool memcpy_field_possible(Field *to,Field *from)
+static int field_conv_memcpy(Field *to, Field *from)
{
- const enum_field_types to_real_type= to->real_type();
- const enum_field_types from_real_type= from->real_type();
/*
- Warning: Calling from->type() may be unsafe in some (unclear) circumstances
- related to SPs. See MDEV-6799.
+ This may happen if one does 'UPDATE ... SET x=x'
+ The test is here mostly for valgrind, but can also be relevant
+ if memcpy() is implemented with prefetch-write
*/
- return (to_real_type == from_real_type &&
- !(to->flags & BLOB_FLAG && to->table->copy_blobs) &&
- to->pack_length() == from->pack_length() &&
- !(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) &&
- to->decimals() == from->decimals() &&
- to_real_type != MYSQL_TYPE_ENUM &&
- to_real_type != MYSQL_TYPE_SET &&
- to_real_type != MYSQL_TYPE_BIT &&
- (to_real_type != MYSQL_TYPE_NEWDECIMAL ||
- to->field_length == from->field_length) &&
- from->charset() == to->charset() &&
- (!sql_mode_for_dates(to->table->in_use) ||
- (from->type()!= MYSQL_TYPE_DATE &&
- from->type()!= MYSQL_TYPE_DATETIME &&
- from->type()!= MYSQL_TYPE_TIMESTAMP)) &&
- (from_real_type != MYSQL_TYPE_VARCHAR ||
- ((Field_varstring*)from)->length_bytes ==
- ((Field_varstring*)to)->length_bytes));
-}
-
-
-/** Simple quick field convert that is called on insert. */
-
-int field_conv(Field *to,Field *from)
-{
- if (memcpy_field_possible(to, from))
- { // Identical fields
- /*
- This may happen if one does 'UPDATE ... SET x=x'
- The test is here mostly for valgrind, but can also be relevant
- if memcpy() is implemented with prefetch-write
- */
- if (to->ptr != from->ptr)
- memcpy(to->ptr, from->ptr, to->pack_length());
- return 0;
- }
- return field_conv_incompatible(to, from);
+ if (to->ptr != from->ptr)
+ memcpy(to->ptr,from->ptr, to->pack_length());
+ return 0;
}
@@ -899,74 +821,34 @@ int field_conv(Field *to,Field *from)
@note Impossibility of simple copy should be checked before this call.
@param to The field to copy to
- @param from The field to copy from
@retval TRUE ERROR
@retval FALSE OK
+
+*/
+static int field_conv_incompatible(Field *to, Field *from)
+{
+ return to->store_field(from);
+}
+
+
+/**
+ Simple quick field converter that is called on insert, e.g.:
+ INSERT INTO t1 (field1) SELECT field2 FROM t2;
*/
-int field_conv_incompatible(Field *to, Field *from)
+int field_conv(Field *to,Field *from)
{
- const enum_field_types to_real_type= to->real_type();
- const enum_field_types from_real_type= from->real_type();
- if (to->flags & BLOB_FLAG)
- { // Be sure the value is stored
- Field_blob *blob=(Field_blob*) to;
- from->val_str(&blob->value);
+ return to->memcpy_field_possible(from) ?
+ field_conv_memcpy(to, from) :
+ field_conv_incompatible(to, from);
+}
- /*
- Copy value if copy_blobs is set, or source is part of the table's
- writeset.
- */
- if (to->table->copy_blobs ||
- (!blob->value.is_alloced() && from->is_updatable()))
- blob->value.copy();
- return blob->store(blob->value.ptr(),blob->value.length(),from->charset());
- }
- if (from_real_type == MYSQL_TYPE_ENUM &&
- to_real_type == MYSQL_TYPE_ENUM &&
- from->val_int() == 0)
- {
- ((Field_enum *)(to))->store_type(0);
- return 0;
- }
- Item_result from_result_type= from->result_type();
- if (from_result_type == REAL_RESULT)
- return to->store(from->val_real());
- if (from_result_type == DECIMAL_RESULT)
- {
- my_decimal buff;
- return to->store_decimal(from->val_decimal(&buff));
- }
- if (from->type() == MYSQL_TYPE_TIMESTAMP && to->type() == MYSQL_TYPE_TIMESTAMP)
- {
- return copy_timestamp_fields(from, to);
- }
- if (from->cmp_type() == TIME_RESULT)
- {
- MYSQL_TIME ltime;
- if (from->get_date(&ltime, 0))
- return to->reset();
- else
- return to->store_time_dec(&ltime, from->decimals());
- }
- if ((from_result_type == STRING_RESULT &&
- (to->result_type() == STRING_RESULT ||
- (from_real_type != MYSQL_TYPE_ENUM &&
- from_real_type != MYSQL_TYPE_SET))) ||
- to->type() == MYSQL_TYPE_DECIMAL)
- {
- char buff[MAX_FIELD_WIDTH];
- String result(buff,sizeof(buff),from->charset());
- from->val_str(&result);
- /*
- We use c_ptr_quick() here to make it easier if to is a float/double
- as the conversion routines will do a copy of the result doesn't
- end with \0. Can be replaced with .ptr() when we have our own
- string->double conversion.
- */
- return to->store(result.c_ptr_quick(),result.length(),from->charset());
- }
- return to->store(from->val_int(), MY_TEST(from->flags & UNSIGNED_FLAG));
+fast_field_copier Field::get_fast_field_copier(const Field *from)
+{
+ DBUG_ENTER("Field::get_fast_field_copier");
+ DBUG_RETURN(memcpy_field_possible(from) ?
+ &field_conv_memcpy :
+ &field_conv_incompatible);
}
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 7f7407fc2dc..2bab4390309 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -31,7 +31,7 @@
#include <m_ctype.h>
#include "sql_sort.h"
#include "probes_mysql.h"
-#include "sql_base.h" // update_virtual_fields
+#include "sql_base.h"
#include "sql_test.h" // TEST_filesort
#include "opt_range.h" // SQL_SELECT
#include "bounded_queue.h"
@@ -50,34 +50,36 @@ if (my_b_write((file),(uchar*) (from),param->ref_length)) \
static uchar *read_buffpek_from_file(IO_CACHE *buffer_file, uint count,
uchar *buf);
static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
- Filesort_info *fs_info,
+ SORT_INFO *fs_info,
IO_CACHE *buffer_file,
IO_CACHE *tempfile,
Bounded_queue<uchar, uchar> *pq,
ha_rows *found_rows);
-static bool write_keys(Sort_param *param, Filesort_info *fs_info,
+static bool write_keys(Sort_param *param, SORT_INFO *fs_info,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
static void register_used_fields(Sort_param *param);
static bool save_index(Sort_param *param, uint count,
- Filesort_info *table_sort);
+ SORT_INFO *table_sort);
static uint suffix_length(ulong string_length);
static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset);
static SORT_ADDON_FIELD *get_addon_fields(ulong max_length_for_sort_data,
Field **ptabfield,
- uint sortlength, uint *plength);
+ uint sortlength,
+ LEX_STRING *addon_buf);
static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
uchar *buff, uchar *buff_end);
-static bool check_if_pq_applicable(Sort_param *param, Filesort_info *info,
+static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
TABLE *table,
ha_rows records, ulong memory_available);
-
void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
ulong max_length_for_sort_data,
ha_rows maxrows, bool sort_positions)
{
+ DBUG_ASSERT(addon_field == 0 && addon_buf.length == 0);
+
sort_length= sortlen;
ref_length= table->file->ref_length;
if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
@@ -85,13 +87,13 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
{
/*
Get the descriptors of all fields whose values are appended
- to sorted fields and get its total length in addon_length.
+ to sorted fields and get its total length in addon_buf.length
*/
addon_field= get_addon_fields(max_length_for_sort_data,
- table->field, sort_length, &addon_length);
+ table->field, sort_length, &addon_buf);
}
if (addon_field)
- res_length= addon_length;
+ res_length= addon_buf.length;
else
{
res_length= ref_length;
@@ -101,7 +103,7 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
*/
sort_length+= ref_length;
}
- rec_length= sort_length + addon_length;
+ rec_length= sort_length + addon_buf.length;
max_rows= maxrows;
}
@@ -115,40 +117,32 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
Before calling filesort, one must have done
table->file->info(HA_STATUS_VARIABLE)
- The result set is stored in table->io_cache or
- table->record_pointers.
+ The result set is stored in
+ filesort_info->io_cache or
+ filesort_info->record_pointers.
@param thd Current thread
@param table Table to sort
- @param sortorder How to sort the table
- @param s_length Number of elements in sortorder
- @param select Condition to apply to the rows
- @param max_rows Return only this many rows
- @param sort_positions Set to TRUE if we want to force sorting by position
- (Needed by UPDATE/INSERT or ALTER TABLE or
- when rowids are required by executor)
- @param[out] examined_rows Store number of examined rows here
- @param[out] found_rows Store the number of found rows here
-
+ @param filesort How to sort the table
+ @param[out] found_rows Store the number of found rows here.
+ This is the number of found rows after
+ applying WHERE condition.
@note
- If we sort by position (like if sort_positions is 1) filesort() will
- call table->prepare_for_position().
+ If we sort by position (like if filesort->sort_positions==true)
+ filesort() will call table->prepare_for_position().
@retval
- HA_POS_ERROR Error
- @retval
- \# Number of rows
+ 0 Error
+ # SORT_INFO
*/
-ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
- SQL_SELECT *select, ha_rows max_rows,
- bool sort_positions,
- ha_rows *examined_rows,
- ha_rows *found_rows,
- Filesort_tracker* tracker)
+SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
+ Filesort_tracker* tracker, JOIN *join,
+ table_map first_table_bit)
{
int error;
- size_t memory_available= thd->variables.sortbuff_size;
+ DBUG_ASSERT(thd->variables.sortbuff_size <= SIZE_T_MAX);
+ size_t memory_available= (size_t)thd->variables.sortbuff_size;
uint maxbuffer;
BUFFPEK *buffpek;
ha_rows num_rows= HA_POS_ERROR;
@@ -156,54 +150,57 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
Sort_param param;
bool multi_byte_charset;
Bounded_queue<uchar, uchar> pq;
+ SQL_SELECT *const select= filesort->select;
+ ha_rows max_rows= filesort->limit;
+ uint s_length= 0;
DBUG_ENTER("filesort");
- DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length););
+
+ if (!(s_length= filesort->make_sortorder(thd, join, first_table_bit)))
+ DBUG_RETURN(NULL); /* purecov: inspected */
+
+ DBUG_EXECUTE("info",TEST_filesort(filesort->sortorder,s_length););
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_PUSH(""); /* No DBUG here */
#endif
- Filesort_info table_sort= table->sort;
+ SORT_INFO *sort;
TABLE_LIST *tab= table->pos_in_table_list;
Item_subselect *subselect= tab ? tab->containing_subselect() : 0;
-
MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str);
DEBUG_SYNC(thd, "filesort_start");
- /*
- Release InnoDB's adaptive hash index latch (if holding) before
- running a sort.
- */
- ha_release_temporary_latches(thd);
+ if (!(sort= new SORT_INFO))
+ return 0;
+
+ if (subselect && subselect->filesort_buffer.is_allocated())
+ {
+ /* Reuse cache from last call */
+ sort->filesort_buffer= subselect->filesort_buffer;
+ sort->buffpek= subselect->sortbuffer;
+ subselect->filesort_buffer.reset();
+ subselect->sortbuffer.str=0;
+ }
+
+ outfile= &sort->io_cache;
- /*
- Don't use table->sort in filesort as it is also used by
- QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end
- when index_merge select has finished with it.
- */
- table->sort.io_cache= NULL;
- DBUG_ASSERT(table_sort.record_pointers == NULL);
-
- outfile= table_sort.io_cache;
my_b_clear(&tempfile);
my_b_clear(&buffpek_pointers);
buffpek=0;
error= 1;
- *found_rows= HA_POS_ERROR;
+ sort->found_rows= HA_POS_ERROR;
- param.init_for_filesort(sortlength(thd, sortorder, s_length,
+ param.init_for_filesort(sortlength(thd, filesort->sortorder, s_length,
&multi_byte_charset),
table,
thd->variables.max_length_for_sort_data,
- max_rows, sort_positions);
-
- table_sort.addon_buf= 0;
- table_sort.addon_length= param.addon_length;
- table_sort.addon_field= param.addon_field;
- table_sort.unpack= unpack_addon_fields;
- if (param.addon_field &&
- !(table_sort.addon_buf=
- (uchar *) my_malloc(param.addon_length, MYF(MY_WME |
- MY_THREAD_SPECIFIC))))
+ max_rows, filesort->sort_positions);
+
+ sort->addon_buf= param.addon_buf;
+ sort->addon_field= param.addon_field;
+ sort->unpack= unpack_addon_fields;
+ if (multi_byte_charset &&
+ !(param.tmp_buffer= (char*) my_malloc(param.sort_length,
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
goto err;
if (select && select->quick)
@@ -216,12 +213,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
// If number of rows is not known, use as much of sort buffer as possible.
num_rows= table->file->estimate_rows_upper_bound();
- if (multi_byte_charset &&
- !(param.tmp_buffer= (char*) my_malloc(param.sort_length,
- MYF(MY_WME | MY_THREAD_SPECIFIC))))
- goto err;
-
- if (check_if_pq_applicable(&param, &table_sort,
+ if (check_if_pq_applicable(&param, sort,
table, num_rows, memory_available))
{
DBUG_PRINT("info", ("filesort PQ is applicable"));
@@ -233,45 +225,31 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
true, // max_at_top
NULL, // compare_function
compare_length,
- &make_sortkey, &param, table_sort.get_sort_keys()))
+ &make_sortkey, &param, sort->get_sort_keys()))
{
/*
If we fail to init pq, we have to give up:
out of memory means my_malloc() will call my_error().
*/
DBUG_PRINT("info", ("failed to allocate PQ"));
- table_sort.free_sort_buffer();
DBUG_ASSERT(thd->is_error());
goto err;
}
// For PQ queries (with limit) we initialize all pointers.
- table_sort.init_record_pointers();
+ sort->init_record_pointers();
}
else
{
DBUG_PRINT("info", ("filesort PQ is not applicable"));
- size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+ size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY,
+ param.sort_length*MERGEBUFF2);
set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys);
- if (table_sort.get_sort_keys())
- {
- // If we have already allocated a buffer, it better have same size!
- if (!table_sort.check_sort_buffer_properties(param.max_keys_per_buffer,
- param.rec_length))
- {
- /*
- table->sort will still have a pointer to the same buffer,
- but that will be overwritten by the assignment below.
- */
- table_sort.free_sort_buffer();
- }
- }
- table_sort.alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length);
- if (table_sort.get_sort_keys())
+ if (sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length))
break;
size_t old_memory_available= memory_available;
memory_available= memory_available/4*3;
@@ -284,7 +262,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
goto err;
}
- tracker->report_sort_buffer_size(table_sort.sort_buffer_size());
+ tracker->report_sort_buffer_size(sort->sort_buffer_size());
}
if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,
@@ -292,23 +270,23 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
goto err;
param.sort_form= table;
- param.end=(param.local_sortorder=sortorder)+s_length;
+ param.end=(param.local_sortorder=filesort->sortorder)+s_length;
num_rows= find_all_keys(thd, &param, select,
- &table_sort,
+ sort,
&buffpek_pointers,
&tempfile,
pq.is_initialized() ? &pq : NULL,
- found_rows);
+ &sort->found_rows);
if (num_rows == HA_POS_ERROR)
goto err;
maxbuffer= (uint) (my_b_tell(&buffpek_pointers)/sizeof(*buffpek));
tracker->report_merge_passes_at_start(thd->query_plan_fsort_passes);
- tracker->report_row_numbers(param.examined_rows, *found_rows, num_rows);
+ tracker->report_row_numbers(param.examined_rows, sort->found_rows, num_rows);
if (maxbuffer == 0) // The whole set is in memory
{
- if (save_index(&param, (uint) num_rows, &table_sort))
+ if (save_index(&param, (uint) num_rows, sort))
goto err;
}
else
@@ -316,17 +294,17 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
/* filesort cannot handle zero-length records during merge. */
DBUG_ASSERT(param.sort_length != 0);
- if (table_sort.buffpek && table_sort.buffpek_len < maxbuffer)
+ if (sort->buffpek.str && sort->buffpek.length < maxbuffer)
{
- my_free(table_sort.buffpek);
- table_sort.buffpek= 0;
+ my_free(sort->buffpek.str);
+ sort->buffpek.str= 0;
}
- if (!(table_sort.buffpek=
- (uchar *) read_buffpek_from_file(&buffpek_pointers, maxbuffer,
- table_sort.buffpek)))
+ if (!(sort->buffpek.str=
+ (char *) read_buffpek_from_file(&buffpek_pointers, maxbuffer,
+ (uchar*) sort->buffpek.str)))
goto err;
- buffpek= (BUFFPEK *) table_sort.buffpek;
- table_sort.buffpek_len= maxbuffer;
+ sort->buffpek.length= maxbuffer;
+ buffpek= (BUFFPEK *) sort->buffpek.str;
close_cached_file(&buffpek_pointers);
/* Open cached file if it isn't open */
if (! my_b_inited(outfile) &&
@@ -345,7 +323,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
param.rec_length - 1);
maxbuffer--; // Offset from 0
if (merge_many_buff(&param,
- (uchar*) table_sort.get_sort_keys(),
+ (uchar*) sort->get_sort_keys(),
buffpek,&maxbuffer,
&tempfile))
goto err;
@@ -353,7 +331,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
goto err;
if (merge_index(&param,
- (uchar*) table_sort.get_sort_keys(),
+ (uchar*) sort->get_sort_keys(),
buffpek,
maxbuffer,
&tempfile,
@@ -372,11 +350,18 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_free(param.tmp_buffer);
if (!subselect || !subselect->is_uncacheable())
{
- table_sort.free_sort_buffer();
- my_free(buffpek);
- table_sort.buffpek= 0;
- table_sort.buffpek_len= 0;
+ sort->free_sort_buffer();
+ my_free(sort->buffpek.str);
+ }
+ else
+ {
+ /* Remember sort buffers for next subquery call */
+ subselect->filesort_buffer= sort->filesort_buffer;
+ subselect->sortbuffer= sort->buffpek;
+ sort->filesort_buffer.reset(); // Don't free this
}
+ sort->buffpek.str= 0;
+
close_cached_file(&tempfile);
close_cached_file(&buffpek_pointers);
if (my_b_inited(outfile))
@@ -397,13 +382,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
int kill_errno= thd->killed_errno();
DBUG_ASSERT(thd->is_error() || kill_errno || thd->killed == ABORT_QUERY);
- /*
- We replace the table->sort at the end.
- Hence calling free_io_cache to make sure table->sort.io_cache
- used for QUICK_INDEX_MERGE_SELECT is free.
- */
- free_io_cache(table);
-
my_printf_error(ER_FILSORT_ABORT,
"%s: %s",
MYF(0),
@@ -424,49 +402,97 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
}
else
thd->inc_status_sort_rows(num_rows);
- *examined_rows= param.examined_rows;
+
+ sort->examined_rows= param.examined_rows;
+ sort->return_rows= num_rows;
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_POP(); /* Ok to DBUG */
#endif
- /* table->sort.io_cache should be free by this time */
- DBUG_ASSERT(NULL == table->sort.io_cache);
-
- // Assign the copy back!
- table->sort= table_sort;
-
DBUG_PRINT("exit",
- ("num_rows: %ld examined_rows: %ld found_rows: %ld",
- (long) num_rows, (long) *examined_rows, (long) *found_rows));
+ ("num_rows: %lld examined_rows: %lld found_rows: %lld",
+ (longlong) sort->return_rows, (longlong) sort->examined_rows,
+ (longlong) sort->found_rows));
MYSQL_FILESORT_DONE(error, num_rows);
- DBUG_RETURN(error ? HA_POS_ERROR : num_rows);
+
+ if (error)
+ {
+ delete sort;
+ sort= 0;
+ }
+ DBUG_RETURN(sort);
} /* filesort */
-void filesort_free_buffers(TABLE *table, bool full)
+void Filesort::cleanup()
{
- DBUG_ENTER("filesort_free_buffers");
-
- my_free(table->sort.record_pointers);
- table->sort.record_pointers= NULL;
-
- if (unlikely(full))
+ if (select && own_select)
{
- table->sort.free_sort_buffer();
- my_free(table->sort.buffpek);
- table->sort.buffpek= NULL;
- table->sort.buffpek_len= 0;
+ select->cleanup();
+ select= NULL;
}
+}
+
+
+uint Filesort::make_sortorder(THD *thd, JOIN *join, table_map first_table_bit)
+{
+ uint count;
+ SORT_FIELD *sort,*pos;
+ ORDER *ord;
+ DBUG_ENTER("make_sortorder");
- /* addon_buf is only allocated if addon_field is set */
- if (unlikely(table->sort.addon_field))
+
+ count=0;
+ for (ord = order; ord; ord= ord->next)
+ count++;
+ if (!sortorder)
+ sortorder= (SORT_FIELD*) thd->alloc(sizeof(SORT_FIELD) * (count + 1));
+ pos= sort= sortorder;
+
+ if (!pos)
+ DBUG_RETURN(0);
+
+ for (ord= order; ord; ord= ord->next, pos++)
{
- my_free(table->sort.addon_field);
- my_free(table->sort.addon_buf);
- table->sort.addon_buf= NULL;
- table->sort.addon_field= NULL;
+ Item *first= ord->item[0];
+ /*
+ It is possible that the query plan is to read table t1, while the
+ sort criteria actually has "ORDER BY t2.col" and the WHERE clause has
+ a multi-equality(t1.col, t2.col, ...).
+ The optimizer detects such cases (grep for
+ UseMultipleEqualitiesToRemoveTempTable to see where), but doesn't
+ perform equality substitution in the order->item. We need to do the
+ substitution here ourselves.
+ */
+ table_map item_map= first->used_tables();
+ if (join && (item_map & ~join->const_table_map) &&
+ !(item_map & first_table_bit) && join->cond_equal &&
+ first->get_item_equal())
+ {
+ /*
+ Ok, this is the case descibed just above. Get the first element of the
+ multi-equality.
+ */
+ Item_equal *item_eq= first->get_item_equal();
+ first= item_eq->get_first(NO_PARTICULAR_TAB, NULL);
+ }
+
+ Item *item= first->real_item();
+ pos->field= 0; pos->item= 0;
+ if (item->type() == Item::FIELD_ITEM)
+ pos->field= ((Item_field*) item)->field;
+ else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item())
+ pos->field= ((Item_sum*) item)->get_tmp_table_field();
+ else if (item->type() == Item::COPY_STR_ITEM)
+ { // Blob patch
+ pos->item= ((Item_copy*) item)->get_item();
+ }
+ else
+ pos->item= *ord->item;
+ pos->reverse= (ord->direction == ORDER::ORDER_DESC);
+ DBUG_ASSERT(pos->field != NULL || pos->item != NULL);
}
- DBUG_VOID_RETURN;
+ DBUG_RETURN(count);
}
@@ -672,7 +698,7 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
*/
static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
- Filesort_info *fs_info,
+ SORT_INFO *fs_info,
IO_CACHE *buffpek_pointers,
IO_CACHE *tempfile,
Bounded_queue<uchar, uchar> *pq,
@@ -685,7 +711,8 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
TABLE *sort_form;
handler *file;
MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set;
-
+ Item *sort_cond;
+ ha_rows retval;
DBUG_ENTER("find_all_keys");
DBUG_PRINT("info",("using: %s",
(select ? select->quick ? "ranges" : "where":
@@ -716,35 +743,34 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
DBUG_SET("+d,ha_rnd_init_fail"););
if (file->ha_rnd_init_with_error(1))
DBUG_RETURN(HA_POS_ERROR);
- file->extra_opt(HA_EXTRA_CACHE,
- current_thd->variables.read_buff_size);
+ file->extra_opt(HA_EXTRA_CACHE, thd->variables.read_buff_size);
}
/* Remember original bitmaps */
save_read_set= sort_form->read_set;
save_write_set= sort_form->write_set;
- save_vcol_set= sort_form->vcol_set;
+ save_vcol_set= sort_form->vcol_set;
+
/* Set up temporary column read map for columns used by sort */
+ DBUG_ASSERT(save_read_set != &sort_form->tmp_set);
bitmap_clear_all(&sort_form->tmp_set);
- /* Temporary set for register_used_fields and register_field_in_read_map */
- sort_form->read_set= &sort_form->tmp_set;
+ sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set,
+ &sort_form->tmp_set);
register_used_fields(param);
if (quick_select)
- select->quick->add_used_key_part_to_set(sort_form->read_set);
+ select->quick->add_used_key_part_to_set();
- Item *sort_cond= !select ?
- 0 : !select->pre_idx_push_select_cond ?
- select->cond : select->pre_idx_push_select_cond;
+ sort_cond= (!select ? 0 :
+ (!select->pre_idx_push_select_cond ?
+ select->cond : select->pre_idx_push_select_cond));
if (sort_cond)
- sort_cond->walk(&Item::register_field_in_read_map, 1, (uchar*) sort_form);
- sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set,
- &sort_form->tmp_set);
-
+ sort_cond->walk(&Item::register_field_in_read_map, 1, sort_form);
+ sort_form->file->column_bitmaps_signal();
if (quick_select)
{
if (select->quick->reset())
- DBUG_RETURN(HA_POS_ERROR);
+ goto err;
}
DEBUG_SYNC(thd, "after_index_merge_phase1");
@@ -754,8 +780,6 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
{
if ((error= select->quick->get_next()))
break;
- if (!error && sort_form->vfield)
- update_virtual_fields(thd, sort_form);
file->position(sort_form->record[0]);
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
}
@@ -763,8 +787,6 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
{
{
error= file->ha_rnd_next(sort_form->record[0]);
- if (!error && sort_form->vfield)
- update_virtual_fields(thd, sort_form);
if (!flag)
{
my_store_ptr(ref_pos,ref_length,record); // Position to row
@@ -785,7 +807,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
(void) file->extra(HA_EXTRA_NO_CACHE);
file->ha_rnd_end();
}
- DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
+ goto err; /* purecov: inspected */
}
bool write_record= false;
@@ -833,7 +855,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (idx == param->max_keys_per_buffer)
{
if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
- DBUG_RETURN(HA_POS_ERROR);
+ goto err;
idx= 0;
indexpos++;
}
@@ -859,12 +881,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
file->ha_rnd_end();
}
- if (thd->is_error())
- DBUG_RETURN(HA_POS_ERROR);
-
/* Signal we should use orignal column read and write maps */
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ if (thd->is_error())
+ DBUG_RETURN(HA_POS_ERROR);
+
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{
@@ -874,11 +896,15 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (indexpos && idx &&
write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
- const ha_rows retval=
- my_b_inited(tempfile) ?
- (ha_rows) (my_b_tell(tempfile)/param->rec_length) : idx;
- DBUG_PRINT("info", ("find_all_keys return %u", (uint) retval));
+ retval= (my_b_inited(tempfile) ?
+ (ha_rows) (my_b_tell(tempfile)/param->rec_length) :
+ idx);
+ DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) retval));
DBUG_RETURN(retval);
+
+err:
+ sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ DBUG_RETURN(HA_POS_ERROR);
} /* find_all_keys */
@@ -905,7 +931,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
*/
static bool
-write_keys(Sort_param *param, Filesort_info *fs_info, uint count,
+write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
IO_CACHE *buffpek_pointers, IO_CACHE *tempfile)
{
size_t rec_length;
@@ -965,14 +991,185 @@ static inline void store_length(uchar *to, uint length, uint pack_length)
}
+void
+Type_handler_string_result::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ CHARSET_INFO *cs= item->collation.collation;
+ bool maybe_null= item->maybe_null;
+
+ if (maybe_null)
+ *to++= 1;
+ char *tmp_buffer= param->tmp_buffer ? param->tmp_buffer : (char*) to;
+ String tmp(tmp_buffer, param->tmp_buffer ? param->sort_length :
+ sort_field->length, cs);
+ String *res= item->str_result(&tmp);
+ if (!res)
+ {
+ if (maybe_null)
+ memset(to - 1, 0, sort_field->length + 1);
+ else
+ {
+ /* purecov: begin deadcode */
+ /*
+ This should only happen during extreme conditions if we run out
+ of memory or have an item marked not null when it can be null.
+ This code is here mainly to avoid a hard crash in this case.
+ */
+ DBUG_ASSERT(0);
+ DBUG_PRINT("warning",
+ ("Got null on something that shouldn't be null"));
+ memset(to, 0, sort_field->length); // Avoid crash
+ /* purecov: end */
+ }
+ return;
+ }
+
+ if (use_strnxfrm(cs))
+ {
+ uint tmp_length __attribute__((unused));
+ tmp_length= cs->coll->strnxfrm(cs, to, sort_field->length,
+ item->max_char_length() *
+ cs->strxfrm_multiply,
+ (uchar*) res->ptr(), res->length(),
+ MY_STRXFRM_PAD_WITH_SPACE |
+ MY_STRXFRM_PAD_TO_MAXLEN);
+ DBUG_ASSERT(tmp_length == sort_field->length);
+ }
+ else
+ {
+ uint diff;
+ uint sort_field_length= sort_field->length - sort_field->suffix_length;
+ uint length= res->length();
+ if (sort_field_length < length)
+ {
+ diff= 0;
+ length= sort_field_length;
+ }
+ else
+ diff= sort_field_length - length;
+ if (sort_field->suffix_length)
+ {
+ /* Store length last in result_string */
+ store_length(to + sort_field_length, length, sort_field->suffix_length);
+ }
+ /* apply cs->sort_order for case-insensitive comparison if needed */
+ my_strnxfrm(cs,(uchar*)to,length,(const uchar*)res->ptr(),length);
+ char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' ');
+ cs->cset->fill(cs, (char *)to+length,diff,fill_char);
+ }
+}
+
+
+void
+Type_handler_int_result::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ longlong value= item->val_int_result();
+ make_sort_key_longlong(to, item->maybe_null, item->null_value,
+ item->unsigned_flag, value);
+}
+
+
+void
+Type_handler_temporal_result::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ MYSQL_TIME buf;
+ if (item->get_date_result(&buf, TIME_INVALID_DATES))
+ {
+ DBUG_ASSERT(item->maybe_null);
+ DBUG_ASSERT(item->null_value);
+ make_sort_key_longlong(to, item->maybe_null, true,
+ item->unsigned_flag, 0);
+ }
+ else
+ make_sort_key_longlong(to, item->maybe_null, false,
+ item->unsigned_flag, pack_time(&buf));
+}
+
+
+void
+Type_handler::make_sort_key_longlong(uchar *to,
+ bool maybe_null,
+ bool null_value,
+ bool unsigned_flag,
+ longlong value) const
+
+{
+ if (maybe_null)
+ {
+ if (null_value)
+ {
+ memset(to, 0, 9);
+ return;
+ }
+ *to++= 1;
+ }
+ to[7]= (uchar) value;
+ to[6]= (uchar) (value >> 8);
+ to[5]= (uchar) (value >> 16);
+ to[4]= (uchar) (value >> 24);
+ to[3]= (uchar) (value >> 32);
+ to[2]= (uchar) (value >> 40);
+ to[1]= (uchar) (value >> 48);
+ if (unsigned_flag) /* Fix sign */
+ to[0]= (uchar) (value >> 56);
+ else
+ to[0]= (uchar) (value >> 56) ^ 128; /* Reverse signbit */
+}
+
+
+void
+Type_handler_decimal_result::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf);
+ if (item->maybe_null)
+ {
+ if (item->null_value)
+ {
+ memset(to, 0, sort_field->length + 1);
+ return;
+ }
+ *to++= 1;
+ }
+ my_decimal2binary(E_DEC_FATAL_ERROR, dec_val, to,
+ item->max_length - (item->decimals ? 1 : 0),
+ item->decimals);
+}
+
+
+void
+Type_handler_real_result::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ double value= item->val_result();
+ if (item->maybe_null)
+ {
+ if (item->null_value)
+ {
+ memset(to, 0, sort_field->length + 1);
+ return;
+ }
+ *to++= 1;
+ }
+ change_double_for_sort(value, to);
+}
+
+
/** Make a sort-key from record. */
-static void make_sortkey(register Sort_param *param,
- register uchar *to, uchar *ref_pos)
+static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
{
- reg3 Field *field;
- reg1 SORT_FIELD *sort_field;
- reg5 uint length;
+ Field *field;
+ SORT_FIELD *sort_field;
+ uint length;
for (sort_field=param->local_sortorder ;
sort_field != param->end ;
@@ -987,162 +1184,9 @@ static void make_sortkey(register Sort_param *param,
}
else
{ // Item
- Item *item=sort_field->item;
- maybe_null= item->maybe_null;
- switch (sort_field->result_type) {
- case STRING_RESULT:
- {
- CHARSET_INFO *cs=item->collation.collation;
- char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' ');
-
- if (maybe_null)
- *to++=1;
- char *tmp_buffer= param->tmp_buffer ? param->tmp_buffer : (char*)to;
- String tmp(tmp_buffer, param->tmp_buffer ? param->sort_length :
- sort_field->length, cs);
- String *res= item->str_result(&tmp);
- if (!res)
- {
- if (maybe_null)
- memset(to-1, 0, sort_field->length+1);
- else
- {
- /* purecov: begin deadcode */
- /*
- This should only happen during extreme conditions if we run out
- of memory or have an item marked not null when it can be null.
- This code is here mainly to avoid a hard crash in this case.
- */
- DBUG_ASSERT(0);
- DBUG_PRINT("warning",
- ("Got null on something that shouldn't be null"));
- memset(to, 0, sort_field->length); // Avoid crash
- /* purecov: end */
- }
- break;
- }
- length= res->length();
- if (sort_field->need_strxnfrm)
- {
- uint tmp_length __attribute__((unused));
- tmp_length= cs->coll->strnxfrm(cs, to, sort_field->length,
- item->max_char_length() *
- cs->strxfrm_multiply,
- (uchar*) res->ptr(), length,
- MY_STRXFRM_PAD_WITH_SPACE |
- MY_STRXFRM_PAD_TO_MAXLEN);
- DBUG_ASSERT(tmp_length == sort_field->length);
- }
- else
- {
- uint diff;
- uint sort_field_length= sort_field->length -
- sort_field->suffix_length;
- if (sort_field_length < length)
- {
- diff= 0;
- length= sort_field_length;
- }
- else
- diff= sort_field_length - length;
- if (sort_field->suffix_length)
- {
- /* Store length last in result_string */
- store_length(to + sort_field_length, length,
- sort_field->suffix_length);
- }
- /* apply cs->sort_order for case-insensitive comparison if needed */
- my_strnxfrm(cs,(uchar*)to,length,(const uchar*)res->ptr(),length);
- cs->cset->fill(cs, (char *)to+length,diff,fill_char);
- }
- break;
- }
- case INT_RESULT:
- case TIME_RESULT:
- {
- longlong UNINIT_VAR(value);
- if (sort_field->result_type == INT_RESULT)
- value= item->val_int_result();
- else
- {
- MYSQL_TIME buf;
- if (item->get_date_result(&buf, TIME_INVALID_DATES))
- {
- DBUG_ASSERT(maybe_null);
- DBUG_ASSERT(item->null_value);
- }
- else
- value= pack_time(&buf);
- }
- if (maybe_null)
- {
- *to++=1; /* purecov: inspected */
- if (item->null_value)
- {
- if (maybe_null)
- memset(to-1, 0, sort_field->length+1);
- else
- {
- DBUG_PRINT("warning",
- ("Got null on something that shouldn't be null"));
- memset(to, 0, sort_field->length);
- }
- break;
- }
- }
- to[7]= (uchar) value;
- to[6]= (uchar) (value >> 8);
- to[5]= (uchar) (value >> 16);
- to[4]= (uchar) (value >> 24);
- to[3]= (uchar) (value >> 32);
- to[2]= (uchar) (value >> 40);
- to[1]= (uchar) (value >> 48);
- if (item->unsigned_flag) /* Fix sign */
- to[0]= (uchar) (value >> 56);
- else
- to[0]= (uchar) (value >> 56) ^ 128; /* Reverse signbit */
- break;
- }
- case DECIMAL_RESULT:
- {
- my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf);
- if (maybe_null)
- {
- if (item->null_value)
- {
- memset(to, 0, sort_field->length+1);
- to++;
- break;
- }
- *to++=1;
- }
- my_decimal2binary(E_DEC_FATAL_ERROR, dec_val, to,
- item->max_length - (item->decimals ? 1:0),
- item->decimals);
- break;
- }
- case REAL_RESULT:
- {
- double value= item->val_result();
- if (maybe_null)
- {
- if (item->null_value)
- {
- memset(to, 0, sort_field->length+1);
- to++;
- break;
- }
- *to++=1;
- }
- change_double_for_sort(value,(uchar*) to);
- break;
- }
- case ROW_RESULT:
- default:
- // This case should never be choosen
- DBUG_ASSERT(0);
- break;
- }
+ sort_field->item->make_sort_key(to, sort_field->item, sort_field, param);
+ if ((maybe_null= sort_field->item->maybe_null))
+ to++;
}
if (sort_field->reverse)
{ /* Revers key */
@@ -1214,9 +1258,8 @@ static void make_sortkey(register Sort_param *param,
static void register_used_fields(Sort_param *param)
{
- reg1 SORT_FIELD *sort_field;
+ SORT_FIELD *sort_field;
TABLE *table=param->sort_form;
- MY_BITMAP *bitmap= table->read_set;
for (sort_field= param->local_sortorder ;
sort_field != param->end ;
@@ -1226,19 +1269,11 @@ static void register_used_fields(Sort_param *param)
if ((field= sort_field->field))
{
if (field->table == table)
- {
- if (field->vcol_info)
- {
- Item *vcol_item= field->vcol_info->expr_item;
- vcol_item->walk(&Item::register_field_in_read_map, 1, (uchar *) 0);
- }
- bitmap_set_bit(bitmap, field->field_index);
- }
+ field->register_field_in_read_map();
}
else
{ // Item
- sort_field->item->walk(&Item::register_field_in_read_map, 1,
- (uchar *) table);
+ sort_field->item->walk(&Item::register_field_in_read_map, 1, table);
}
}
@@ -1247,7 +1282,7 @@ static void register_used_fields(Sort_param *param)
SORT_ADDON_FIELD *addonf= param->addon_field;
Field *field;
for ( ; (field= addonf->field) ; addonf++)
- bitmap_set_bit(bitmap, field->field_index);
+ field->register_field_in_read_map();
}
else
{
@@ -1257,11 +1292,13 @@ static void register_used_fields(Sort_param *param)
}
-static bool save_index(Sort_param *param, uint count, Filesort_info *table_sort)
+static bool save_index(Sort_param *param, uint count,
+ SORT_INFO *table_sort)
{
uint offset,res_length;
uchar *to;
DBUG_ENTER("save_index");
+ DBUG_ASSERT(table_sort->record_pointers == 0);
table_sort->sort_buffer(param, count);
res_length= param->res_length;
@@ -1310,7 +1347,7 @@ static bool save_index(Sort_param *param, uint count, Filesort_info *table_sort)
*/
bool check_if_pq_applicable(Sort_param *param,
- Filesort_info *filesort_info,
+ SORT_INFO *filesort_info,
TABLE *table, ha_rows num_rows,
ulong memory_available)
{
@@ -1344,9 +1381,8 @@ bool check_if_pq_applicable(Sort_param *param,
// The whole source set fits into memory.
if (param->max_rows < num_rows/PQ_slowness )
{
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length) != NULL);
}
else
{
@@ -1358,9 +1394,8 @@ bool check_if_pq_applicable(Sort_param *param,
// Do we have space for LIMIT rows in memory?
if (param->max_keys_per_buffer < num_available_keys)
{
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length) != NULL);
}
// Try to strip off addon fields.
@@ -1396,17 +1431,14 @@ bool check_if_pq_applicable(Sort_param *param,
if (sort_merge_cost < pq_cost)
DBUG_RETURN(false);
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->sort_length + param->ref_length);
- if (filesort_info->get_sort_keys())
+ if (filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->sort_length +
+ param->ref_length))
{
- // Make attached data to be references instead of fields.
- my_free(filesort_info->addon_buf);
+ /* Make attached data to be references instead of fields. */
my_free(filesort_info->addon_field);
- filesort_info->addon_buf= NULL;
filesort_info->addon_field= NULL;
param->addon_field= NULL;
- param->addon_length= 0;
param->res_length= param->ref_length;
param->sort_length+= param->ref_length;
@@ -1425,7 +1457,7 @@ bool check_if_pq_applicable(Sort_param *param,
int merge_many_buff(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file)
{
- register uint i;
+ uint i;
IO_CACHE t_file2,*from_file,*to_file,*temp;
BUFFPEK *lastbuff;
DBUG_ENTER("merge_many_buff");
@@ -1457,8 +1489,6 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer,
if (flush_io_cache(to_file))
break; /* purecov: inspected */
temp=from_file; from_file=to_file; to_file=temp;
- setup_io_cache(from_file);
- setup_io_cache(to_file);
*maxbuffer= (uint) (lastbuff-buffpek)-1;
}
cleanup:
@@ -1466,7 +1496,6 @@ cleanup:
if (to_file == t_file)
{
*t_file=t_file2; // Copy result file
- setup_io_cache(t_file);
}
DBUG_RETURN(*maxbuffer >= MERGEBUFF2); /* Return 1 if interrupted */
@@ -1483,7 +1512,7 @@ cleanup:
uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint rec_length)
{
- register uint count;
+ uint count;
uint length;
if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
@@ -1780,14 +1809,14 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
if (flag == 0)
{
if (my_b_write(to_file, (uchar*) buffpek->key,
- (rec_length*buffpek->mem_count)))
+ (size_t)(rec_length*buffpek->mem_count)))
{
error= 1; goto err; /* purecov: inspected */
}
}
else
{
- register uchar *end;
+ uchar *end;
src= buffpek->key+offset;
for (end= src+buffpek->mem_count*rec_length ;
src != end ;
@@ -1844,6 +1873,64 @@ static uint suffix_length(ulong string_length)
}
+void
+Type_handler_string_result::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ CHARSET_INFO *cs;
+ sortorder->length= item->max_length;
+ set_if_smaller(sortorder->length, thd->variables.max_sort_length);
+ if (use_strnxfrm((cs= item->collation.collation)))
+ {
+ sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length);
+ }
+ else if (cs == &my_charset_bin)
+ {
+ /* Store length last to be able to sort blob/varbinary */
+ sortorder->suffix_length= suffix_length(sortorder->length);
+ sortorder->length+= sortorder->suffix_length;
+ }
+}
+
+
+void
+Type_handler_temporal_result::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ sortorder->length= 8; // Sizof intern longlong
+}
+
+
+void
+Type_handler_int_result::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ sortorder->length= 8; // Sizof intern longlong
+}
+
+
+void
+Type_handler_real_result::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ sortorder->length= sizeof(double);
+}
+
+
+void
+Type_handler_decimal_result::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ sortorder->length=
+ my_decimal_get_binary_size(item->max_length - (item->decimals ? 1 : 0),
+ item->decimals);
+}
+
/**
Calculate length of sort key.
@@ -1856,8 +1943,6 @@ static uint suffix_length(ulong string_length)
@note
sortorder->length is updated for each sort item.
- @n
- sortorder->need_strxnfrm is set 1 if we have to use strxnfrm
@return
Total length of sort buffer in bytes
@@ -1868,23 +1953,19 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset)
{
uint length;
- CHARSET_INFO *cs;
*multi_byte_charset= 0;
length=0;
for (; s_length-- ; sortorder++)
{
- sortorder->need_strxnfrm= 0;
sortorder->suffix_length= 0;
if (sortorder->field)
{
- cs= sortorder->field->sort_charset();
+ CHARSET_INFO *cs= sortorder->field->sort_charset();
sortorder->length= sortorder->field->sort_length();
-
if (use_strnxfrm((cs=sortorder->field->sort_charset())))
{
- sortorder->need_strxnfrm= 1;
- *multi_byte_charset= 1;
+ *multi_byte_charset= true;
sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length);
}
if (sortorder->field->maybe_null())
@@ -1892,42 +1973,10 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
}
else
{
- sortorder->result_type= sortorder->item->cmp_type();
- switch (sortorder->result_type) {
- case STRING_RESULT:
- sortorder->length=sortorder->item->max_length;
- set_if_smaller(sortorder->length, thd->variables.max_sort_length);
- if (use_strnxfrm((cs=sortorder->item->collation.collation)))
- {
- sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length);
- sortorder->need_strxnfrm= 1;
- *multi_byte_charset= 1;
- }
- else if (cs == &my_charset_bin)
- {
- /* Store length last to be able to sort blob/varbinary */
- sortorder->suffix_length= suffix_length(sortorder->length);
- sortorder->length+= sortorder->suffix_length;
- }
- break;
- case TIME_RESULT:
- case INT_RESULT:
- sortorder->length=8; // Size of intern longlong
- break;
- case DECIMAL_RESULT:
- sortorder->length=
- my_decimal_get_binary_size(sortorder->item->max_length -
- (sortorder->item->decimals ? 1 : 0),
- sortorder->item->decimals);
- break;
- case REAL_RESULT:
- sortorder->length=sizeof(double);
- break;
- case ROW_RESULT:
- default:
- // This case should never be choosen
- DBUG_ASSERT(0);
- break;
+ sortorder->item->sortlength(thd, sortorder->item, sortorder);
+ if (use_strnxfrm(sortorder->item->collation.collation))
+ {
+ *multi_byte_charset= true;
}
if (sortorder->item->maybe_null)
length++; // Place for NULL marker
@@ -1956,7 +2005,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
@param thd Current thread
@param ptabfield Array of references to the table fields
@param sortlength Total length of sorted fields
- @param[out] plength Total length of appended fields
+ @param [out] addon_buf Buffer to us for appended fields
@note
The null bits for the appended values are supposed to be put together
@@ -1970,7 +2019,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
static SORT_ADDON_FIELD *
get_addon_fields(ulong max_length_for_sort_data,
- Field **ptabfield, uint sortlength, uint *plength)
+ Field **ptabfield, uint sortlength, LEX_STRING *addon_buf)
{
Field **pfield;
Field *field;
@@ -1979,6 +2028,7 @@ get_addon_fields(ulong max_length_for_sort_data,
uint fields= 0;
uint null_fields= 0;
MY_BITMAP *read_set= (*ptabfield)->table->read_set;
+ DBUG_ENTER("get_addon_fields");
/*
If there is a reference to a field in the query add it
@@ -1990,31 +2040,33 @@ get_addon_fields(ulong max_length_for_sort_data,
the values directly from sorted fields.
But beware the case when item->cmp_type() != item->result_type()
*/
- *plength= 0;
+ addon_buf->str= 0;
+ addon_buf->length= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
{
if (!bitmap_is_set(read_set, field->field_index))
continue;
if (field->flags & BLOB_FLAG)
- return 0;
+ DBUG_RETURN(0);
length+= field->max_packed_col_length(field->pack_length());
if (field->maybe_null())
null_fields++;
fields++;
}
if (!fields)
- return 0;
+ DBUG_RETURN(0);
length+= (null_fields+7)/8;
if (length+sortlength > max_length_for_sort_data ||
- !(addonf= (SORT_ADDON_FIELD *) my_malloc(sizeof(SORT_ADDON_FIELD)*
- (fields+1),
- MYF(MY_WME |
- MY_THREAD_SPECIFIC))))
- return 0;
+ !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC),
+ &addonf, sizeof(SORT_ADDON_FIELD) * (fields+1),
+ &addon_buf->str, length,
+ NullS))
+
+ DBUG_RETURN(0);
- *plength= length;
+ addon_buf->length= length;
length= (null_fields+7)/8;
null_fields= 0;
for (pfield= ptabfield; (field= *pfield) ; pfield++)
@@ -2041,7 +2093,7 @@ get_addon_fields(ulong max_length_for_sort_data,
addonf->field= 0; // Put end marker
DBUG_PRINT("info",("addon_length: %d",length));
- return (addonf-fields);
+ DBUG_RETURN(addonf-fields);
}
@@ -2127,3 +2179,13 @@ void change_double_for_sort(double nr,uchar *to)
}
}
+/**
+ Free SORT_INFO
+*/
+
+SORT_INFO::~SORT_INFO()
+{
+ DBUG_ENTER("~SORT_INFO::SORT_INFO()");
+ free_data();
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/filesort.h b/sql/filesort.h
index 4c95f1202b2..2b4f7ac2654 100644
--- a/sql/filesort.h
+++ b/sql/filesort.h
@@ -16,23 +16,151 @@
#ifndef FILESORT_INCLUDED
#define FILESORT_INCLUDED
-class SQL_SELECT;
-
-#include "my_global.h" /* uint, uchar */
#include "my_base.h" /* ha_rows */
+#include "sql_list.h" /* Sql_alloc */
+#include "filesort_utils.h"
class SQL_SELECT;
class THD;
struct TABLE;
-typedef struct st_sort_field SORT_FIELD;
class Filesort_tracker;
+struct SORT_FIELD;
+typedef struct st_order ORDER;
+class JOIN;
+
+
+/**
+ Sorting related info.
+ To be extended by another WL to include complete filesort implementation.
+*/
+class Filesort: public Sql_alloc
+{
+public:
+ /** List of expressions to order the table by */
+ ORDER *order;
+ /** Number of records to return */
+ ha_rows limit;
+ /** ORDER BY list with some precalculated info for filesort */
+ SORT_FIELD *sortorder;
+ /** select to use for getting records */
+ SQL_SELECT *select;
+ /** TRUE <=> free select on destruction */
+ bool own_select;
+ /** true means we are using Priority Queue for order by with limit. */
+ bool using_pq;
+
+ /*
+ TRUE means sort operation must produce table rowids.
+ FALSE means that it halso has an option of producing {sort_key,
+ addon_fields} pairs.
+ */
+ bool sort_positions;
+
+ Filesort_tracker *tracker;
+
+ Filesort(ORDER *order_arg, ha_rows limit_arg, bool sort_positions_arg,
+ SQL_SELECT *select_arg):
+ order(order_arg),
+ limit(limit_arg),
+ sortorder(NULL),
+ select(select_arg),
+ own_select(false),
+ using_pq(false),
+ sort_positions(sort_positions_arg)
+ {
+ DBUG_ASSERT(order);
+ };
+
+ ~Filesort() { cleanup(); }
+ /* Prepare ORDER BY list for sorting. */
+ uint make_sortorder(THD *thd, JOIN *join, table_map first_table_bit);
+
+private:
+ void cleanup();
+};
+
+
+class SORT_INFO
+{
+ /// Buffer for sorting keys.
+ Filesort_buffer filesort_buffer;
+
+public:
+ SORT_INFO()
+ :addon_field(0), record_pointers(0)
+ {
+ buffpek.str= 0;
+ my_b_clear(&io_cache);
+ }
+
+ ~SORT_INFO();
+
+ void free_data()
+ {
+ close_cached_file(&io_cache);
+ my_free(record_pointers);
+ my_free(buffpek.str);
+ my_free(addon_field);
+ }
+
+ void reset()
+ {
+ free_data();
+ record_pointers= 0;
+ buffpek.str= 0;
+ addon_field= 0;
+ }
+
+
+ IO_CACHE io_cache; /* If sorted through filesort */
+ LEX_STRING buffpek; /* Buffer for buffpek structures */
+ LEX_STRING addon_buf; /* Pointer to a buffer if sorted with fields */
+ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
+ /* To unpack back */
+ void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
+ uchar *record_pointers; /* If sorted in memory */
+ /*
+ How many rows in final result.
+ Also how many rows in record_pointers, if used
+ */
+ ha_rows return_rows;
+ ha_rows examined_rows; /* How many rows read */
+ ha_rows found_rows; /* How many rows was accepted */
+
+ /** Sort filesort_buffer */
+ void sort_buffer(Sort_param *param, uint count)
+ { filesort_buffer.sort_buffer(param, count); }
+
+ /**
+ Accessors for Filesort_buffer (which @c).
+ */
+ uchar *get_record_buffer(uint idx)
+ { return filesort_buffer.get_record_buffer(idx); }
+
+ uchar **get_sort_keys()
+ { return filesort_buffer.get_sort_keys(); }
+
+ uchar **alloc_sort_buffer(uint num_records, uint record_length)
+ { return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
+
+ void free_sort_buffer()
+ { filesort_buffer.free_sort_buffer(); }
+
+ void init_record_pointers()
+ { filesort_buffer.init_record_pointers(); }
+
+ size_t sort_buffer_size() const
+ { return filesort_buffer.sort_buffer_size(); }
+
+ friend SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
+ Filesort_tracker* tracker, JOIN *join,
+ table_map first_table_bit);
+};
+
+SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
+ Filesort_tracker* tracker, JOIN *join=NULL,
+ table_map first_table_bit=0);
-ha_rows filesort(THD *thd, TABLE *table, st_sort_field *sortorder,
- uint s_length, SQL_SELECT *select,
- ha_rows max_rows, bool sort_positions,
- ha_rows *examined_rows, ha_rows *found_rows,
- Filesort_tracker* tracker);
-void filesort_free_buffers(TABLE *table, bool full);
void change_double_for_sort(double nr,uchar *to);
#endif /* FILESORT_INCLUDED */
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index 1e0cf096145..cb0b2d52b6f 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -85,31 +85,66 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
return total_cost;
}
-uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, uint record_length)
-{
- ulong sort_buff_sz;
+/*
+ alloc_sort_buffer()
- DBUG_ENTER("alloc_sort_buffer");
+ Allocate buffer for sorting keys.
+ Try to reuse old buffer if possible.
+ @return
+ 0 Error
+ # Pointer to allocated buffer
+*/
+
+uchar **Filesort_buffer::alloc_sort_buffer(uint num_records,
+ uint record_length)
+{
+ size_t buff_size;
+ uchar **sort_keys, **start_of_data;
+ DBUG_ENTER("alloc_sort_buffer");
DBUG_EXECUTE_IF("alloc_sort_buffer_fail",
DBUG_SET("+d,simulate_out_of_memory"););
- if (m_idx_array.is_null())
+ buff_size= ((size_t)num_records) * (record_length + sizeof(uchar*));
+ set_if_bigger(buff_size, record_length * MERGEBUFF2);
+
+ if (!m_idx_array.is_null())
{
- sort_buff_sz= ((size_t)num_records) * (record_length + sizeof(uchar*));
- set_if_bigger(sort_buff_sz, record_length * MERGEBUFF2);
- uchar **sort_keys=
- (uchar**) my_malloc(sort_buff_sz, MYF(MY_THREAD_SPECIFIC));
- m_idx_array= Idx_array(sort_keys, num_records);
- m_record_length= record_length;
- uchar **start_of_data= m_idx_array.array() + m_idx_array.size();
- m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
+ /*
+ Reuse old buffer if exists and is large enough
+ Note that we don't make the buffer smaller, as we want to be
+ prepared for next subquery iteration.
+ */
+
+ sort_keys= m_idx_array.array();
+ if (buff_size > allocated_size)
+ {
+ /*
+ Better to free and alloc than realloc as we don't have to remember
+ the old values
+ */
+ my_free(sort_keys);
+ if (!(sort_keys= (uchar**) my_malloc(buff_size,
+ MYF(MY_THREAD_SPECIFIC))))
+ {
+ reset();
+ DBUG_RETURN(0);
+ }
+ allocated_size= buff_size;
+ }
}
else
{
- DBUG_ASSERT(num_records == m_idx_array.size());
- DBUG_ASSERT(record_length == m_record_length);
+ if (!(sort_keys= (uchar**) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
+ DBUG_RETURN(0);
+ allocated_size= buff_size;
}
+
+ m_idx_array= Idx_array(sort_keys, num_records);
+ m_record_length= record_length;
+ start_of_data= m_idx_array.array() + m_idx_array.size();
+ m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
+
DBUG_RETURN(m_idx_array.array());
}
@@ -117,8 +152,7 @@ uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, uint record_length)
void Filesort_buffer::free_sort_buffer()
{
my_free(m_idx_array.array());
- m_idx_array= Idx_array();
- m_record_length= 0;
+ m_idx_array.reset();
m_start_of_data= NULL;
}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
index 00fa6f2566b..d537b602edf 100644
--- a/sql/filesort_utils.h
+++ b/sql/filesort_utils.h
@@ -60,9 +60,23 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
class Filesort_buffer
{
public:
- Filesort_buffer() :
- m_idx_array(), m_record_length(0), m_start_of_data(NULL)
+ Filesort_buffer()
+ : m_idx_array(), m_start_of_data(NULL), allocated_size(0)
{}
+
+ ~Filesort_buffer()
+ {
+ my_free(m_idx_array.array());
+ }
+
+ bool is_allocated()
+ {
+ return m_idx_array.array() != 0;
+ }
+ void reset()
+ {
+ m_idx_array.reset();
+ }
/** Sort me... */
void sort_buffer(const Sort_param *param, uint count);
@@ -84,20 +98,12 @@ public:
/// Returns total size: pointer array + record buffers.
size_t sort_buffer_size() const
{
- return m_idx_array.size() * (m_record_length + sizeof(uchar*));
+ return allocated_size;
}
/// Allocates the buffer, but does *not* initialize pointers.
uchar **alloc_sort_buffer(uint num_records, uint record_length);
-
- /// Check <num_records, record_length> for the buffer
- bool check_sort_buffer_properties(uint num_records, uint record_length)
- {
- return (static_cast<uint>(m_idx_array.size()) == num_records &&
- m_record_length == record_length);
- }
-
/// Frees the buffer.
void free_sort_buffer();
@@ -115,15 +121,17 @@ public:
m_idx_array= rhs.m_idx_array;
m_record_length= rhs.m_record_length;
m_start_of_data= rhs.m_start_of_data;
+ allocated_size= rhs.allocated_size;
return *this;
}
private:
typedef Bounds_checked_array<uchar*> Idx_array;
- Idx_array m_idx_array;
+ Idx_array m_idx_array; /* Pointers to key data */
uint m_record_length;
- uchar *m_start_of_data;
+ uchar *m_start_of_data; /* Start of key data */
+ size_t allocated_size;
};
#endif // FILESORT_UTILS_INCLUDED
diff --git a/sql/gcalc_slicescan.cc b/sql/gcalc_slicescan.cc
index 644ab4b8710..ba75a1ed827 100644
--- a/sql/gcalc_slicescan.cc
+++ b/sql/gcalc_slicescan.cc
@@ -177,6 +177,17 @@ Gcalc_dyn_list::Gcalc_dyn_list(size_t blk_size, size_t sizeof_item):
{}
+Gcalc_dyn_list::Gcalc_dyn_list(const Gcalc_dyn_list &dl)
+{
+ m_blk_size= dl.m_blk_size;
+ m_sizeof_item= dl.m_sizeof_item;
+ m_points_per_blk= dl.m_points_per_blk;
+ m_blk_hook= &m_first_blk;
+ m_free= NULL;
+ m_keep= NULL;
+}
+
+
void Gcalc_dyn_list::format_blk(void* block)
{
Item *pi_end, *cur_pi, *first_pi;
diff --git a/sql/gcalc_slicescan.h b/sql/gcalc_slicescan.h
index b9516fc8d8c..ebf173c1a57 100644
--- a/sql/gcalc_slicescan.h
+++ b/sql/gcalc_slicescan.h
@@ -63,6 +63,7 @@ public:
};
Gcalc_dyn_list(size_t blk_size, size_t sizeof_item);
+ Gcalc_dyn_list(const Gcalc_dyn_list &dl);
~Gcalc_dyn_list();
Item *new_item()
{
@@ -229,6 +230,12 @@ public:
Gcalc_dyn_list(blk_size, sizeof(Info)),
m_hook(&m_first), m_n_points(0)
{}
+
+ Gcalc_heap(const Gcalc_heap &gh) :
+ Gcalc_dyn_list(gh),
+ m_hook(&m_first), m_n_points(0)
+ {}
+
void set_extent(double xmin, double xmax, double ymin, double ymax);
Info *new_point_info(double x, double y, gcalc_shape_info shape);
void free_point_info(Info *i, Gcalc_dyn_list::Item **i_hook);
diff --git a/sql/gcalc_tools.cc b/sql/gcalc_tools.cc
index 71118ae1c9f..b472665d0d2 100644
--- a/sql/gcalc_tools.cc
+++ b/sql/gcalc_tools.cc
@@ -663,6 +663,17 @@ Gcalc_operation_reducer::Gcalc_operation_reducer(size_t blk_size) :
{}
+Gcalc_operation_reducer::Gcalc_operation_reducer(
+ const Gcalc_operation_reducer &gor) :
+ Gcalc_dyn_list(gor),
+#ifndef GCALC_DBUG_OFF
+ n_res_points(0),
+#endif /*GCALC_DBUG_OFF*/
+ m_res_hook((Gcalc_dyn_list::Item **)&m_result),
+ m_first_active_thread(NULL)
+{}
+
+
void Gcalc_operation_reducer::init(Gcalc_function *fn, modes mode)
{
m_fn= fn;
diff --git a/sql/gcalc_tools.h b/sql/gcalc_tools.h
index 8bda3c144a6..4d5aec0d443 100644
--- a/sql/gcalc_tools.h
+++ b/sql/gcalc_tools.h
@@ -224,6 +224,7 @@ public:
};
Gcalc_operation_reducer(size_t blk_size=8192);
+ Gcalc_operation_reducer(const Gcalc_operation_reducer &gor);
void init(Gcalc_function *fn, modes mode= default_mode);
Gcalc_operation_reducer(Gcalc_function *fn, modes mode= default_mode,
size_t blk_size=8192);
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 3a3273d279b..05ac7e4fa42 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -403,8 +403,8 @@ int main(int argc,char **argv)
static SYMBOL *get_hash_symbol(const char *s,\n\
unsigned int len,bool function)\n\
{\n\
- register uchar *hash_map;\n\
- register const char *cur_str= s;\n\
+ uchar *hash_map;\n\
+ const char *cur_str= s;\n\
\n\
if (len == 0) {\n\
DBUG_PRINT(\"warning\", (\"get_hash_symbol() received a request for a zero-length symbol, which is probably a mistake.\"));\
@@ -416,25 +416,25 @@ static SYMBOL *get_hash_symbol(const char *s,\n\
if (function){\n\
if (len>sql_functions_max_len) return 0;\n\
hash_map= sql_functions_map;\n\
- register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\
+ uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\
\n\
for (;;){\n\
- register uchar first_char= (uchar)cur_struct;\n\
+ uchar first_char= (uchar)cur_struct;\n\
\n\
if (first_char == 0)\n\
{\n\
- register int16 ires= (int16)(cur_struct>>16);\n\
+ int16 ires= (int16)(cur_struct>>16);\n\
if (ires==array_elements(symbols)) return 0;\n\
- register SYMBOL *res;\n\
+ SYMBOL *res;\n\
if (ires>=0) \n\
res= symbols+ires;\n\
else\n\
res= sql_functions-ires-1;\n\
- register uint count= (uint) (cur_str - s);\n\
+ uint count= (uint) (cur_str - s);\n\
return lex_casecmp(cur_str,res->name+count,len-count) ? 0 : res;\n\
}\n\
\n\
- register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\
+ uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\
if (cur_char<first_char) return 0;\n\
cur_struct>>=8;\n\
if (cur_char>(uchar)cur_struct) return 0;\n\
@@ -450,20 +450,20 @@ static SYMBOL *get_hash_symbol(const char *s,\n\
}else{\n\
if (len>symbols_max_len) return 0;\n\
hash_map= symbols_map;\n\
- register uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\
+ uint32 cur_struct= uint4korr(hash_map+((len-1)*4));\n\
\n\
for (;;){\n\
- register uchar first_char= (uchar)cur_struct;\n\
+ uchar first_char= (uchar)cur_struct;\n\
\n\
- if (first_char==0){\n\
- register int16 ires= (int16)(cur_struct>>16);\n\
+ if (first_char==0) {\n\
+ int16 ires= (int16)(cur_struct>>16);\n\
if (ires==array_elements(symbols)) return 0;\n\
- register SYMBOL *res= symbols+ires;\n\
- register uint count= (uint) (cur_str - s);\n\
+ SYMBOL *res= symbols+ires;\n\
+ uint count= (uint) (cur_str - s);\n\
return lex_casecmp(cur_str,res->name+count,len-count)!=0 ? 0 : res;\n\
}\n\
\n\
- register uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\
+ uchar cur_char= (uchar)to_upper_lex[(uchar)*cur_str];\n\
if (cur_char<first_char) return 0;\n\
cur_struct>>=8;\n\
if (cur_char>(uchar)cur_struct) return 0;\n\
diff --git a/sql/gen_lex_token.cc b/sql/gen_lex_token.cc
index eefe9163819..bd2b9728177 100644
--- a/sql/gen_lex_token.cc
+++ b/sql/gen_lex_token.cc
@@ -79,7 +79,7 @@ void set_token(int tok, const char *str)
}
compiled_token_array[tok].m_token_string= str;
- compiled_token_array[tok].m_token_length= strlen(str);
+ compiled_token_array[tok].m_token_length= (int)strlen(str);
compiled_token_array[tok].m_append_space= true;
compiled_token_array[tok].m_start_expr= false;
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 2167bea8d7c..8700610415c 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2005, 2017, Oracle and/or its affiliates.
- Copyright (c) 2009, 2017, MariaDB
+ Copyright (c) 2009, 2018, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -401,7 +401,7 @@ const char *ha_partition::table_type() const
ha_partition::~ha_partition()
{
- DBUG_ENTER("ha_partition::~ha_partition()");
+ DBUG_ENTER("ha_partition::~ha_partition");
if (m_new_partitions_share_refs.elements)
m_new_partitions_share_refs.delete_elements();
if (m_file != NULL)
@@ -413,8 +413,12 @@ ha_partition::~ha_partition()
destroy_record_priority_queue();
my_free(m_part_ids_sorted_by_num_of_records);
+ if (m_added_file)
+ {
+ for (handler **ph= m_added_file; *ph; ph++)
+ delete (*ph);
+ }
clear_handler_file();
-
free_root(&m_mem_root, MYF(0));
DBUG_VOID_RETURN;
@@ -612,7 +616,7 @@ int ha_partition::create_partitioning_metadata(const char *path,
const char *old_path,
int action_flag)
{
- DBUG_ENTER("ha_partition::create_partitioning_metadata()");
+ DBUG_ENTER("ha_partition::create_partitioning_metadata");
/*
We need to update total number of parts since we might write the handler
@@ -1300,8 +1304,8 @@ static bool print_admin_msg(THD* thd, uint len,
length=(uint) (strxmov(name, db_name, ".", table_name.c_ptr_safe(), NullS) - name);
/*
TODO: switch from protocol to push_warning here. The main reason we didn't
- it yet is parallel repair. Due to following trace:
- mi_check_print_msg/push_warning/sql_alloc/my_pthread_getspecific_ptr.
+ it yet is parallel repair, which threads have no THD object accessible via
+ current_thd.
Also we likely need to lock mutex here (in both cases with protocol and
push_warning).
@@ -2485,7 +2489,7 @@ register_query_cache_dependant_tables(THD *thd,
part= i * num_subparts + j;
/* we store the end \0 as part of the key */
end= strmov(engine_pos, sub_elem->partition_name);
- length= end - engine_key;
+ length= (uint)(end - engine_key);
/* Copy the suffix also to query cache key */
memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
if (reg_query_cache_dependant_table(thd, engine_key, length,
@@ -2501,7 +2505,7 @@ register_query_cache_dependant_tables(THD *thd,
else
{
char *end= engine_pos+1; // copy end \0
- uint length= end - engine_key;
+ uint length= (uint)(end - engine_key);
/* Copy the suffix also to query cache key */
memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
if (reg_query_cache_dependant_table(thd, engine_key, length,
@@ -3447,7 +3451,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
}
m_start_key.length= 0;
m_rec0= table->record[0];
- m_rec_length= table_share->stored_rec_length;
+ m_rec_length= table_share->reclength;
if (!m_part_ids_sorted_by_num_of_records)
{
if (!(m_part_ids_sorted_by_num_of_records=
@@ -3808,6 +3812,8 @@ int ha_partition::external_lock(THD *thd, int lock_type)
(void) (*file)->ha_external_lock(thd, lock_type);
} while (*(++file));
}
+ if (lock_type == F_WRLCK && m_part_info->part_expr)
+ m_part_info->part_expr->walk(&Item::register_field_in_read_map, 1, 0);
DBUG_RETURN(0);
err_handler:
@@ -3947,6 +3953,8 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
/* Add partition to be called in reset(). */
bitmap_set_bit(&m_partitions_to_reset, i);
}
+ if (lock_type == F_WRLCK && m_part_info->part_expr)
+ m_part_info->part_expr->walk(&Item::register_field_in_read_map, 1, 0);
DBUG_RETURN(error);
}
@@ -4781,8 +4789,8 @@ int ha_partition::rnd_init(bool scan)
}
/* Now we see what the index of our first important partition is */
- DBUG_PRINT("info", ("m_part_info->read_partitions: 0x%lx",
- (long) m_part_info->read_partitions.bitmap));
+ DBUG_PRINT("info", ("m_part_info->read_partitions: %p",
+ m_part_info->read_partitions.bitmap));
part_id= bitmap_get_first_set(&(m_part_info->read_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
@@ -6717,7 +6725,7 @@ int ha_partition::info(uint flag)
/* Get variables if not already done */
if (!(flag & HA_STATUS_VARIABLE) ||
!bitmap_is_set(&(m_part_info->read_partitions),
- (file_array - m_file)))
+ (uint)(file_array - m_file)))
file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
if (file->stats.records > max_records)
{
@@ -6803,6 +6811,24 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
}
+static int extra_cb(handler *h, void *operation)
+{
+ return h->extra(*(enum ha_extra_function*)operation);
+}
+
+
+static int start_keyread_cb(handler* h, void *p)
+{
+ return h->ha_start_keyread(*(uint*)p);
+}
+
+
+static int end_keyread_cb(handler* h, void *unused)
+{
+ return h->ha_end_keyread();
+}
+
+
/**
General function to prepare handler for certain behavior.
@@ -7123,11 +7149,12 @@ int ha_partition::extra(enum ha_extra_function operation)
switch (operation) {
/* Category 1), used by most handlers */
- case HA_EXTRA_KEYREAD:
case HA_EXTRA_NO_KEYREAD:
+ DBUG_RETURN(loop_partitions(end_keyread_cb, NULL));
+ case HA_EXTRA_KEYREAD:
case HA_EXTRA_FLUSH:
case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
case HA_EXTRA_PREPARE_FOR_RENAME:
case HA_EXTRA_FORCE_REOPEN:
DBUG_RETURN(loop_extra_alter(operation));
@@ -7139,7 +7166,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
{
if (!m_myisam)
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
break;
}
@@ -7163,7 +7190,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_PREPARE_FOR_DROP:
case HA_EXTRA_FLUSH_CACHE:
{
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
}
case HA_EXTRA_NO_READCHECK:
{
@@ -7195,7 +7222,7 @@ int ha_partition::extra(enum ha_extra_function operation)
m_extra_cache_size= 0;
m_extra_prepare_for_update= FALSE;
m_extra_cache_part_id= NO_CURRENT_PART_ID;
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
}
case HA_EXTRA_IGNORE_NO_KEY:
case HA_EXTRA_NO_IGNORE_NO_KEY:
@@ -7229,7 +7256,7 @@ int ha_partition::extra(enum ha_extra_function operation)
}
/* Category 7), used by federated handlers */
case HA_EXTRA_INSERT_WITH_UPDATE:
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
/* Category 8) Operations only used by NDB */
case HA_EXTRA_DELETE_CANNOT_BATCH:
case HA_EXTRA_UPDATE_CANNOT_BATCH:
@@ -7253,6 +7280,10 @@ int ha_partition::extra(enum ha_extra_function operation)
*/
case HA_EXTRA_MARK_AS_LOG_TABLE:
DBUG_RETURN(ER_UNSUPORTED_LOG_ENGINE);
+ case HA_EXTRA_BEGIN_ALTER_COPY:
+ case HA_EXTRA_END_ALTER_COPY:
+ case HA_EXTRA_FAKE_START_STMT:
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
default:
{
/* Temporary crash to discover what is wrong */
@@ -7297,24 +7328,40 @@ int ha_partition::reset(void)
}
/*
- Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+ Special extra method with additional parameter
+ See @ref ha_partition::extra
- SYNOPSIS
- extra_opt()
- operation Must be HA_EXTRA_CACHE
- cachesize Size of cache in full table scan
+ @param[in] operation operation to execute
+ @param[in] arg extra argument
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return status
+ @retval 0 success
+ @retval >0 error code
+
+ @detail
+ Operations supported by extra_opt:
+ HA_EXTRA_KEYREAD:
+ arg is interpreted as key index
+ HA_EXTRA_CACHE:
+ arg is interpreted as size of cache in full table scan
+
+ For detailed description refer to @ref ha_partition::extra
*/
-int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
+int ha_partition::extra_opt(enum ha_extra_function operation, ulong arg)
{
- DBUG_ENTER("ha_partition::extra_opt()");
+ DBUG_ENTER("ha_partition::extra_opt");
- DBUG_ASSERT(HA_EXTRA_CACHE == operation);
- prepare_extra_cache(cachesize);
+ switch (operation)
+ {
+ case HA_EXTRA_KEYREAD:
+ DBUG_RETURN(loop_partitions(start_keyread_cb, &arg));
+ case HA_EXTRA_CACHE:
+ prepare_extra_cache(arg);
+ DBUG_RETURN(0);
+ default:
+ DBUG_ASSERT(0);
+ }
DBUG_RETURN(0);
}
@@ -7332,7 +7379,7 @@ int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
void ha_partition::prepare_extra_cache(uint cachesize)
{
- DBUG_ENTER("ha_partition::prepare_extra_cache()");
+ DBUG_ENTER("ha_partition::prepare_extra_cache");
DBUG_PRINT("info", ("cachesize %u", cachesize));
m_extra_cache= TRUE;
@@ -7362,7 +7409,7 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation)
{
int result= 0, tmp;
handler **file;
- DBUG_ENTER("ha_partition::loop_extra_alter()");
+ DBUG_ENTER("ha_partition::loop_extra_alter");
DBUG_ASSERT(operation == HA_EXTRA_PREPARE_FOR_RENAME ||
operation == HA_EXTRA_FORCE_REOPEN);
@@ -7378,34 +7425,33 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation)
if ((tmp= (*file)->extra(operation)))
result= tmp;
}
- if ((tmp= loop_extra(operation)))
+ if ((tmp= loop_partitions(extra_cb, &operation)))
result= tmp;
DBUG_RETURN(result);
}
-/*
- Call extra on all partitions
+/**
+ Call callback(part, param) on all partitions
- SYNOPSIS
- loop_extra()
- operation extra operation type
+ @param callback a callback to call for each partition
+ @param param a void*-parameter passed to callback
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return Operation status
+ @retval >0 Error code
+ @retval 0 Success
*/
-int ha_partition::loop_extra(enum ha_extra_function operation)
+int ha_partition::loop_partitions(handler_callback callback, void *param)
{
int result= 0, tmp;
uint i;
- DBUG_ENTER("ha_partition::loop_extra()");
+ DBUG_ENTER("ha_partition::loop_partitions");
for (i= bitmap_get_first_set(&m_part_info->lock_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
{
- if ((tmp= m_file[i]->extra(operation)))
+ if ((tmp= callback(m_file[i], param)))
result= tmp;
}
/* Add all used partitions to be called in reset(). */
@@ -7683,7 +7729,7 @@ ha_rows ha_partition::estimate_rows_upper_bound()
do
{
- if (bitmap_is_set(&(m_part_info->read_partitions), (file - m_file)))
+ if (bitmap_is_set(&(m_part_info->read_partitions), (uint)(file - m_file)))
{
rows= (*file)->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
@@ -7977,7 +8023,7 @@ void ha_partition::append_row_to_str(String &str)
{
Field **field_ptr;
if (!is_rec0)
- set_field_ptr(m_part_info->full_part_field_array, rec,
+ table->move_fields(m_part_info->full_part_field_array, rec,
table->record[0]);
/* No primary key, use full partition field array. */
for (field_ptr= m_part_info->full_part_field_array;
@@ -7991,7 +8037,7 @@ void ha_partition::append_row_to_str(String &str)
field_unpack(&str, field, rec, 0, false);
}
if (!is_rec0)
- set_field_ptr(m_part_info->full_part_field_array, table->record[0],
+ table->move_fields(m_part_info->full_part_field_array, table->record[0],
rec);
}
}
@@ -8474,18 +8520,6 @@ uint ha_partition::max_supported_keys() const
}
-uint ha_partition::extra_rec_buf_length() const
-{
- handler **file;
- uint max= (*m_file)->extra_rec_buf_length();
-
- for (file= m_file, file++; *file; file++)
- if (max < (*file)->extra_rec_buf_length())
- max= (*file)->extra_rec_buf_length();
- return max;
-}
-
-
uint ha_partition::min_record_length(uint options) const
{
handler **file;
@@ -8497,7 +8531,6 @@ uint ha_partition::min_record_length(uint options) const
return max;
}
-
/****************************************************************************
MODULE compare records
****************************************************************************/
@@ -8890,6 +8923,8 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
{
/* Only need to read the partitioning fields. */
bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+ if (table->vcol_set)
+ bitmap_union(table->vcol_set, &m_part_info->full_part_field_set);
}
if ((result= m_file[read_part_id]->ha_rnd_init(1)))
@@ -9102,13 +9137,9 @@ int ha_partition::check_for_upgrade(HA_CHECK_OPT *check_opt)
}
m_part_info->key_algorithm= partition_info::KEY_ALGORITHM_51;
if (skip_generation ||
- !(part_buf= generate_partition_syntax(m_part_info,
+ !(part_buf= generate_partition_syntax_for_frm(thd, m_part_info,
&part_buf_len,
- true,
- true,
- NULL,
- NULL,
- NULL)) ||
+ NULL, NULL)) ||
print_admin_msg(thd, SQL_ADMIN_MSG_TEXT_SIZE + 1, "error",
table_share->db.str,
table->alias,
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 23d02337359..9a73eeff817 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -21,13 +21,6 @@
#include "sql_partition.h" /* part_id_range, partition_element */
#include "queues.h" /* QUEUE */
-enum partition_keywords
-{
- PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR,
- PKW_COLUMNS, PKW_ALGORITHM
-};
-
-
#define PARTITION_BYTES_IN_POS 2
@@ -656,7 +649,7 @@ public:
void get_dynamic_partition_info(PARTITION_STATS *stat_info,
uint part_id);
virtual int extra(enum ha_extra_function operation);
- virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
+ virtual int extra_opt(enum ha_extra_function operation, ulong arg);
virtual int reset(void);
virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
virtual my_bool
@@ -666,6 +659,8 @@ public:
uint *n);
private:
+ typedef int handler_callback(handler *, void *);
+
my_bool reg_query_cache_dependant_table(THD *thd,
char *engine_key,
uint engine_key_len,
@@ -676,7 +671,7 @@ private:
**block_table,
handler *file, uint *n);
static const uint NO_CURRENT_PART_ID;
- int loop_extra(enum ha_extra_function operation);
+ int loop_partitions(handler_callback callback, void *param);
int loop_extra_alter(enum ha_extra_function operations);
void late_extra_cache(uint partition_id);
void late_extra_no_cache(uint partition_id);
@@ -1012,12 +1007,6 @@ public:
virtual uint max_supported_key_parts() const;
virtual uint max_supported_key_length() const;
virtual uint max_supported_key_part_length() const;
-
- /*
- The extra record buffer length is the maximum needed by all handlers.
- The minimum record length is the maximum of all involved handlers.
- */
- virtual uint extra_rec_buf_length() const;
virtual uint min_record_length(uint options) const;
/*
diff --git a/sql/handler.cc b/sql/handler.cc
index 87331a41db6..ad1bad59efa 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -31,11 +31,10 @@
#include "sql_table.h" // build_table_filename
#include "sql_parse.h" // check_stack_overrun
#include "sql_acl.h" // SUPER_ACL
-#include "sql_base.h" // free_io_cache
+#include "sql_base.h" // TDC_element
#include "discover.h" // extension_based_table_discovery, etc
#include "log_event.h" // *_rows_log_event
#include "create_options.h"
-#include "rpl_filter.h"
#include <myisampack.h>
#include "transaction.h"
#include "myisam.h"
@@ -259,7 +258,7 @@ handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
{
handler *file;
DBUG_ENTER("get_new_handler");
- DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc));
+ DBUG_PRINT("enter", ("alloc: %p", alloc));
if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
{
@@ -305,7 +304,7 @@ handler *get_ha_partition(partition_info *part_info)
static const char **handler_errmsgs;
C_MODE_START
-static const char **get_handler_errmsgs(void)
+static const char **get_handler_errmsgs(int nr)
{
return handler_errmsgs;
}
@@ -369,7 +368,7 @@ int ha_init_errors(void)
SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine");
SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER_DEFAULT(ER_TABLE_DEF_CHANGED));
SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key");
- SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, "Table upgrade required. Please do \"REPAIR TABLE %`\" or dump/reload to fix it");
+ SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER_DEFAULT(ER_TABLE_NEEDS_UPGRADE));
SETMSG(HA_ERR_TABLE_READONLY, ER_DEFAULT(ER_OPEN_AS_READONLY));
SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER_DEFAULT(ER_AUTOINC_READ_FAILED));
SETMSG(HA_ERR_AUTOINC_ERANGE, ER_DEFAULT(ER_WARN_DATA_OUT_OF_RANGE));
@@ -380,6 +379,8 @@ int ha_init_errors(void)
SETMSG(HA_ERR_TABLE_IN_FK_CHECK, ER_DEFAULT(ER_TABLE_IN_FK_CHECK));
SETMSG(HA_ERR_DISK_FULL, ER_DEFAULT(ER_DISK_FULL));
SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search");
+ SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds");
+ SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING));
/* Register the error messages for use with my_error(). */
return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
@@ -396,12 +397,10 @@ int ha_init_errors(void)
*/
static int ha_finish_errors(void)
{
- const char **errmsgs;
-
/* Allocate a pointer array for the error message strings. */
- if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST)))
- return 1;
- my_free(errmsgs);
+ my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST);
+ my_free(handler_errmsgs);
+ handler_errmsgs= 0;
return 0;
}
@@ -796,8 +795,8 @@ static my_bool closecon_handlerton(THD *thd, plugin_ref plugin,
void ha_close_connection(THD* thd)
{
plugin_foreach_with_mask(thd, closecon_handlerton,
- MYSQL_STORAGE_ENGINE_PLUGIN,
- PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0);
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0);
}
static my_bool kill_handlerton(THD *thd, plugin_ref plugin,
@@ -1376,7 +1375,7 @@ int ha_commit_trans(THD *thd, bool all)
uint rw_ha_count= ha_check_and_coalesce_trx_read_only(thd, ha_info, all);
/* rw_trans is TRUE when we in a transaction changing data */
bool rw_trans= is_real_trans &&
- (rw_ha_count > !thd->is_current_stmt_binlog_disabled());
+ (rw_ha_count > (thd->is_current_stmt_binlog_disabled()?0U:1U));
MDL_request mdl_request;
DBUG_PRINT("info", ("is_real_trans: %d rw_trans: %d rw_ha_count: %d",
is_real_trans, rw_trans, rw_ha_count));
@@ -1676,7 +1675,7 @@ int ha_rollback_trans(THD *thd, bool all)
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
#ifdef WITH_WSREP
- WSREP_WARN("handlerton rollback failed, thd %lu %lld conf %d SQL %s",
+ WSREP_WARN("handlerton rollback failed, thd %llu %lld conf %d SQL %s",
thd->thread_id, thd->query_id, thd->wsrep_conflict_state,
thd->query());
#endif /* WITH_WSREP */
@@ -1828,6 +1827,35 @@ static char* xid_to_str(char *buf, XID *xid)
}
#endif
+#ifdef WITH_WSREP
+static my_xid wsrep_order_and_check_continuity(XID *list, int len)
+{
+ wsrep_sort_xid_array(list, len);
+ wsrep_uuid_t uuid;
+ wsrep_seqno_t seqno;
+ if (wsrep_get_SE_checkpoint(uuid, seqno))
+ {
+ WSREP_ERROR("Could not read wsrep SE checkpoint for recovery");
+ return 0;
+ }
+ long long cur_seqno= seqno;
+ for (int i= 0; i < len; ++i)
+ {
+ if (!wsrep_is_wsrep_xid(list + i) ||
+ wsrep_xid_seqno(*(list + i)) != cur_seqno + 1)
+ {
+ WSREP_WARN("Discovered discontinuity in recovered wsrep "
+ "transaction XIDs. Truncating the recovery list to "
+ "%d entries", i);
+ break;
+ }
+ ++cur_seqno;
+ }
+ WSREP_INFO("Last wsrep seqno to be recovered %lld", cur_seqno);
+ return (cur_seqno < 0 ? 0 : cur_seqno);
+}
+#endif /* WITH_WSREP */
+
/**
recover() step of xa.
@@ -1865,6 +1893,19 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
{
sql_print_information("Found %d prepared transaction(s) in %s",
got, hton_name(hton)->str);
+#ifdef WITH_WSREP
+ /* If wsrep_on=ON, XIDs are first ordered and then the range of
+ recovered XIDs is checked for continuity. All the XIDs which
+ are in continuous range can be safely committed if binlog
+ is off since they have already ordered and certified in the
+ cluster. */
+ my_xid wsrep_limit= 0;
+ if (WSREP_ON)
+ {
+ wsrep_limit= wsrep_order_and_check_continuity(info->list, got);
+ }
+#endif /* WITH_WSREP */
+
for (int i=0; i < got; i ++)
{
my_xid x= WSREP_ON && wsrep_is_wsrep_xid(&info->list[i]) ?
@@ -1880,15 +1921,21 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
info->found_foreign_xids++;
continue;
}
- if (info->dry_run)
+ if (IF_WSREP(!(wsrep_emulate_bin_log &&
+ wsrep_is_wsrep_xid(info->list + i) &&
+ x <= wsrep_limit) && info->dry_run,
+ info->dry_run))
{
info->found_my_xids++;
continue;
}
// recovery mode
- if (info->commit_list ?
- my_hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
- tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)
+ if (IF_WSREP((wsrep_emulate_bin_log &&
+ wsrep_is_wsrep_xid(info->list + i) &&
+ x <= wsrep_limit), false) ||
+ (info->commit_list ?
+ my_hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
+ tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT))
{
#ifndef DBUG_OFF
int rc=
@@ -2051,44 +2098,6 @@ commit_checkpoint_notify_ha(handlerton *hton, void *cookie)
/**
- @details
- This function should be called when MySQL sends rows of a SELECT result set
- or the EOF mark to the client. It releases a possible adaptive hash index
- S-latch held by thd in InnoDB and also releases a possible InnoDB query
- FIFO ticket to enter InnoDB. To save CPU time, InnoDB allows a thd to
- keep them over several calls of the InnoDB handler interface when a join
- is executed. But when we let the control to pass to the client they have
- to be released because if the application program uses mysql_use_result(),
- it may deadlock on the S-latch if the application on another connection
- performs another SQL query. In MySQL-4.1 this is even more important because
- there a connection can have several SELECT queries open at the same time.
-
- @param thd the thread handle of the current connection
-
- @return
- always 0
-*/
-
-int ha_release_temporary_latches(THD *thd)
-{
- Ha_trx_info *info;
-
- /*
- Note that below we assume that only transactional storage engines
- may need release_temporary_latches(). If this will ever become false,
- we could iterate on thd->open_tables instead (and remove duplicates
- as if (!seen[hton->slot]) { seen[hton->slot]=1; ... }).
- */
- for (info= thd->transaction.stmt.ha_list; info; info= info->next())
- {
- handlerton *hton= info->ht();
- if (hton && hton->release_temporary_latches)
- hton->release_temporary_latches(hton, thd);
- }
- return 0;
-}
-
-/**
Check if all storage engines used in transaction agree that after
rollback to savepoint it is safe to release MDL locks acquired after
savepoint creation.
@@ -2247,7 +2256,8 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin,
if (hton->state == SHOW_OPTION_YES &&
hton->start_consistent_snapshot)
{
- hton->start_consistent_snapshot(hton, thd);
+ if (hton->start_consistent_snapshot(hton, thd))
+ return TRUE;
*((bool *)arg)= false;
}
return FALSE;
@@ -2255,7 +2265,7 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin,
int ha_start_consistent_snapshot(THD *thd)
{
- bool warn= true;
+ bool err, warn= true;
/*
Holding the LOCK_commit_ordered mutex ensures that we get the same
@@ -2265,9 +2275,15 @@ int ha_start_consistent_snapshot(THD *thd)
have a consistent binlog position.
*/
mysql_mutex_lock(&LOCK_commit_ordered);
- plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn);
+ err= plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn);
mysql_mutex_unlock(&LOCK_commit_ordered);
+ if (err)
+ {
+ ha_rollback_trans(thd, true);
+ return 1;
+ }
+
/*
Same idea as when one wants to CREATE TABLE in one engine which does not
exist:
@@ -2614,6 +2630,8 @@ int handler::ha_rnd_next(uchar *buf)
if (!result)
{
update_rows_read();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
increment_statistics(&SSV::ha_read_rnd_next_count);
}
else if (result == HA_ERR_RECORD_DELETED)
@@ -2638,7 +2656,11 @@ int handler::ha_rnd_pos(uchar *buf, uchar *pos)
{ result= rnd_pos(buf, pos); })
increment_statistics(&SSV::ha_read_rnd_count);
if (!result)
+ {
update_rows_read();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(result);
}
@@ -2657,7 +2679,11 @@ int handler::ha_index_read_map(uchar *buf, const uchar *key,
{ result= index_read_map(buf, key, keypart_map, find_flag); })
increment_statistics(&SSV::ha_read_key_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(result);
}
@@ -2684,6 +2710,8 @@ int handler::ha_index_read_idx_map(uchar *buf, uint index, const uchar *key,
{
update_rows_read();
index_rows_read[index]++;
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
}
table->status=result ? STATUS_NOT_FOUND: 0;
return result;
@@ -2701,7 +2729,11 @@ int handler::ha_index_next(uchar * buf)
{ result= index_next(buf); })
increment_statistics(&SSV::ha_read_next_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(result);
}
@@ -2718,7 +2750,11 @@ int handler::ha_index_prev(uchar * buf)
{ result= index_prev(buf); })
increment_statistics(&SSV::ha_read_prev_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(result);
}
@@ -2734,7 +2770,11 @@ int handler::ha_index_first(uchar * buf)
{ result= index_first(buf); })
increment_statistics(&SSV::ha_read_first_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
return result;
}
@@ -2750,7 +2790,11 @@ int handler::ha_index_last(uchar * buf)
{ result= index_last(buf); })
increment_statistics(&SSV::ha_read_last_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
return result;
}
@@ -2766,7 +2810,11 @@ int handler::ha_index_next_same(uchar *buf, const uchar *key, uint keylen)
{ result= index_next_same(buf, key, keylen); })
increment_statistics(&SSV::ha_read_next_count);
if (!result)
+ {
update_index_statistics();
+ if (table->vfield && buf == table->record[0])
+ table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
+ }
table->status=result ? STATUS_NOT_FOUND: 0;
return result;
}
@@ -2800,7 +2848,7 @@ int handler::ha_rnd_init_with_error(bool scan)
*/
int handler::read_first_row(uchar * buf, uint primary_key)
{
- register int error;
+ int error;
DBUG_ENTER("handler::read_first_row");
/*
@@ -2879,7 +2927,7 @@ void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
{
/*
If we have set THD::next_insert_id previously and plan to insert an
- explicitely-specified value larger than this, we need to increase
+ explicitly-specified value larger than this, we need to increase
THD::next_insert_id to be greater than the explicit value.
*/
if ((next_insert_id > 0) && (nr >= next_insert_id))
@@ -3136,6 +3184,7 @@ int handler::update_auto_increment()
if (unlikely(nr == ULONGLONG_MAX))
DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
+ DBUG_ASSERT(nr != 0);
DBUG_PRINT("info",("auto_increment: %llu nb_reserved_values: %llu",
nr, append ? nb_reserved_values : 0));
@@ -3228,8 +3277,8 @@ void handler::column_bitmaps_signal()
{
DBUG_ENTER("column_bitmaps_signal");
if (table)
- DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx",
- (long) table->read_set, (long) table->write_set));
+ DBUG_PRINT("info", ("read_set: %p write_set: %p",
+ table->read_set, table->write_set));
DBUG_VOID_RETURN;
}
@@ -3258,11 +3307,9 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
{
ulonglong nr;
int error;
+ MY_BITMAP *old_read_set;
- (void) extra(HA_EXTRA_KEYREAD);
- table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
- table->read_set);
- column_bitmaps_signal();
+ old_read_set= table->prepare_for_keyread(table->s->next_number_index);
if (ha_index_init(table->s->next_number_index, 1))
{
@@ -3314,7 +3361,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
nr= ((ulonglong) table->next_number_field->
val_int_offset(table->s->rec_buff_length)+1);
ha_index_end();
- (void) extra(HA_EXTRA_NO_KEYREAD);
+ table->restore_column_maps_after_keyread(old_read_set);
*first_value= nr;
return;
}
@@ -3416,6 +3463,12 @@ void handler::print_error(int error, myf errflag)
DBUG_ENTER("handler::print_error");
DBUG_PRINT("enter",("error: %d",error));
+ if (ha_thd()->transaction_rollback_request)
+ {
+ /* Ensure this becomes a true error */
+ errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO);
+ }
+
int textno= -1; // impossible value
switch (error) {
case EACCES:
@@ -3559,10 +3612,14 @@ void handler::print_error(int error, myf errflag)
textno=ER_LOCK_TABLE_FULL;
break;
case HA_ERR_LOCK_DEADLOCK:
- textno=ER_LOCK_DEADLOCK;
- /* cannot continue. the statement was already aborted in the engine */
- SET_FATAL_ERROR;
- break;
+ {
+ String str, full_err_msg(ER_DEFAULT(ER_LOCK_DEADLOCK), system_charset_info);
+
+ get_error_message(error, &str);
+ full_err_msg.append(str);
+ my_printf_error(ER_LOCK_DEADLOCK, "%s", errflag, full_err_msg.c_ptr_safe());
+ DBUG_VOID_RETURN;
+ }
case HA_ERR_READ_ONLY_TRANSACTION:
textno=ER_READ_ONLY_TRANSACTION;
break;
@@ -3607,9 +3664,10 @@ void handler::print_error(int error, myf errflag)
DBUG_VOID_RETURN;
}
case HA_ERR_TABLE_NEEDS_UPGRADE:
+ textno= ER_TABLE_NEEDS_UPGRADE;
my_error(ER_TABLE_NEEDS_UPGRADE, errflag,
"TABLE", table_share->table_name.str);
- break;
+ DBUG_VOID_RETURN;
case HA_ERR_NO_PARTITION_FOUND:
textno=ER_WRONG_PARTITION_NAME;
break;
@@ -3792,7 +3850,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
}
}
}
- if (table->s->frm_version != FRM_VER_TRUE_VARCHAR)
+ if (table->s->frm_version < FRM_VER_TRUE_VARCHAR)
return HA_ADMIN_NEEDS_ALTER;
if ((error= check_collation_compatibility()))
@@ -4009,9 +4067,7 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
if it is started.
*/
-inline
-void
-handler::mark_trx_read_write()
+void handler::mark_trx_read_write_internal()
{
Ha_trx_info *ha_info= &ha_thd()->ha_data[ht->slot].ha_info[0];
/*
@@ -4279,6 +4335,7 @@ handler::check_if_supported_inplace_alter(TABLE *altered_table,
Alter_inplace_info::ALTER_COLUMN_OPTION |
Alter_inplace_info::CHANGE_CREATE_OPTION |
Alter_inplace_info::ALTER_PARTITIONED |
+ Alter_inplace_info::ALTER_VIRTUAL_GCOL_EXPR |
Alter_inplace_info::ALTER_RENAME;
/* Is there at least one operation that requires copy algorithm? */
@@ -4766,7 +4823,7 @@ int ha_create_table(THD *thd, const char *path,
share.table_name.str, share.table_name.length);
}
- (void) closefrm(&table, 0);
+ (void) closefrm(&table);
err:
free_table_share(&share);
@@ -5004,7 +5061,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -5017,7 +5074,7 @@ public:
return TRUE;
}
- if (level == Sql_condition::WARN_LEVEL_ERROR)
+ if (*level == Sql_condition::WARN_LEVEL_ERROR)
m_unhandled_errors++;
return FALSE;
}
@@ -5130,7 +5187,9 @@ bool ha_table_exists(THD *thd, const char *db, const char *table_name,
Table_exists_error_handler no_such_table_handler;
thd->push_internal_handler(&no_such_table_handler);
- TABLE_SHARE *share= tdc_acquire_share(thd, db, table_name, flags);
+ table.init_one_table(db, strlen(db), table_name, strlen(table_name),
+ table_name, TL_READ);
+ TABLE_SHARE *share= tdc_acquire_share(thd, &table, flags);
thd->pop_internal_handler();
if (hton && share)
@@ -5464,7 +5523,7 @@ int handler::compare_key(key_range *range)
This is used by index condition pushdown implementation.
*/
-int handler::compare_key2(key_range *range)
+int handler::compare_key2(key_range *range) const
{
int cmp;
if (!range)
@@ -5579,9 +5638,9 @@ TYPELIB *ha_known_exts(void)
}
-static bool stat_print(THD *thd, const char *type, uint type_len,
- const char *file, uint file_len,
- const char *status, uint status_len)
+static bool stat_print(THD *thd, const char *type, size_t type_len,
+ const char *file, size_t file_len,
+ const char *status, size_t status_len)
{
Protocol *protocol= thd->protocol;
protocol->prepare_for_resend();
@@ -5660,30 +5719,38 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
correct for the table.
A row in the given table should be replicated if:
+ - It's not called by partition engine
- Row-based replication is enabled in the current thread
- The binlog is enabled
- It is not a temporary table
- The binary log is open
- The database the table resides in shall be binlogged (binlog_*_db rules)
- table is not mysql.event
+
+ RETURN VALUE
+ 0 No binary logging in row format
+ 1 Row needs to be logged
*/
-static bool check_table_binlog_row_based(THD *thd, TABLE *table)
+inline bool handler::check_table_binlog_row_based(bool binlog_row)
{
- if (table->s->cached_row_logging_check == -1)
+ if (unlikely((table->in_use->variables.sql_log_bin_off)))
+ return 0; /* Called by partitioning engine */
+ if (unlikely((!check_table_binlog_row_based_done)))
{
- int const check(table->s->tmp_table == NO_TMP_TABLE &&
- ! table->no_replicate &&
- binlog_filter->db_ok(table->s->db.str));
- table->s->cached_row_logging_check= check;
+ check_table_binlog_row_based_done= 1;
+ check_table_binlog_row_based_result=
+ check_table_binlog_row_based_internal(binlog_row);
}
+ return check_table_binlog_row_based_result;
+}
- DBUG_ASSERT(table->s->cached_row_logging_check == 0 ||
- table->s->cached_row_logging_check == 1);
+bool handler::check_table_binlog_row_based_internal(bool binlog_row)
+{
+ THD *thd= table->in_use;
- return (thd->is_current_stmt_binlog_format_row() &&
- table->s->cached_row_logging_check &&
-#ifdef WITH_WSREP
+ return (table->s->cached_row_logging_check &&
+ thd->is_current_stmt_binlog_format_row() &&
/*
Wsrep partially enables binary logging if it have not been
explicitly turned on. As a result we return 'true' if we are in
@@ -5698,14 +5765,13 @@ static bool check_table_binlog_row_based(THD *thd, TABLE *table)
Otherwise, return 'true' if binary logging is on.
*/
- (thd->variables.sql_log_bin_off != 1) &&
- ((WSREP_EMULATE_BINLOG(thd) && (thd->wsrep_exec_mode != REPL_RECV)) ||
- ((WSREP(thd) || (thd->variables.option_bits & OPTION_BIN_LOG)) &&
- mysql_bin_log.is_open())));
-#else
- (thd->variables.option_bits & OPTION_BIN_LOG) &&
- mysql_bin_log.is_open());
-#endif
+ IF_WSREP(((WSREP_EMULATE_BINLOG(thd) &&
+ (thd->wsrep_exec_mode != REPL_RECV)) ||
+ ((WSREP(thd) ||
+ (thd->variables.option_bits & OPTION_BIN_LOG)) &&
+ mysql_bin_log.is_open())),
+ (thd->variables.option_bits & OPTION_BIN_LOG) &&
+ mysql_bin_log.is_open()));
}
@@ -5733,60 +5799,57 @@ static bool check_table_binlog_row_based(THD *thd, TABLE *table)
static int write_locked_table_maps(THD *thd)
{
DBUG_ENTER("write_locked_table_maps");
- DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx "
- "thd->extra_lock: 0x%lx",
- (long) thd, (long) thd->lock, (long) thd->extra_lock));
+ DBUG_PRINT("enter", ("thd:%p thd->lock:%p "
+ "thd->extra_lock: %p",
+ thd, thd->lock, thd->extra_lock));
DBUG_PRINT("debug", ("get_binlog_table_maps(): %d", thd->get_binlog_table_maps()));
- if (thd->get_binlog_table_maps() == 0)
+ MYSQL_LOCK *locks[2];
+ locks[0]= thd->extra_lock;
+ locks[1]= thd->lock;
+ my_bool with_annotate= thd->variables.binlog_annotate_row_events &&
+ thd->query() && thd->query_length();
+
+ for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
{
- MYSQL_LOCK *locks[2];
- locks[0]= thd->extra_lock;
- locks[1]= thd->lock;
- my_bool with_annotate= thd->variables.binlog_annotate_row_events &&
- thd->query() && thd->query_length();
+ MYSQL_LOCK const *const lock= locks[i];
+ if (lock == NULL)
+ continue;
- for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
+ TABLE **const end_ptr= lock->table + lock->table_count;
+ for (TABLE **table_ptr= lock->table ;
+ table_ptr != end_ptr ;
+ ++table_ptr)
{
- MYSQL_LOCK const *const lock= locks[i];
- if (lock == NULL)
- continue;
-
- TABLE **const end_ptr= lock->table + lock->table_count;
- for (TABLE **table_ptr= lock->table ;
- table_ptr != end_ptr ;
- ++table_ptr)
+ TABLE *const table= *table_ptr;
+ DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
+ if (table->current_lock == F_WRLCK &&
+ table->file->check_table_binlog_row_based(0))
{
- TABLE *const table= *table_ptr;
- DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
- if (table->current_lock == F_WRLCK &&
- check_table_binlog_row_based(thd, table))
- {
- /*
- We need to have a transactional behavior for SQLCOM_CREATE_TABLE
- (e.g. CREATE TABLE... SELECT * FROM TABLE) in order to keep a
- compatible behavior with the STMT based replication even when
- the table is not transactional. In other words, if the operation
- fails while executing the insert phase nothing is written to the
- binlog.
-
- Note that at this point, we check the type of a set of tables to
- create the table map events. In the function binlog_log_row(),
- which calls the current function, we check the type of the table
- of the current row.
- */
- bool const has_trans= thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
- table->file->has_transactions();
- int const error= thd->binlog_write_table_map(table, has_trans,
- &with_annotate);
- /*
- If an error occurs, it is the responsibility of the caller to
- roll back the transaction.
- */
- if (unlikely(error))
- DBUG_RETURN(1);
- }
+ /*
+ We need to have a transactional behavior for SQLCOM_CREATE_TABLE
+ (e.g. CREATE TABLE... SELECT * FROM TABLE) in order to keep a
+ compatible behavior with the STMT based replication even when
+ the table is not transactional. In other words, if the operation
+ fails while executing the insert phase nothing is written to the
+ binlog.
+
+ Note that at this point, we check the type of a set of tables to
+ create the table map events. In the function binlog_log_row(),
+ which calls the current function, we check the type of the table
+ of the current row.
+ */
+ bool const has_trans= thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
+ table->file->has_transactions();
+ int const error= thd->binlog_write_table_map(table, has_trans,
+ &with_annotate);
+ /*
+ If an error occurs, it is the responsibility of the caller to
+ roll back the transaction.
+ */
+ if (unlikely(error))
+ DBUG_RETURN(1);
}
}
}
@@ -5796,22 +5859,49 @@ static int write_locked_table_maps(THD *thd)
typedef bool Log_func(THD*, TABLE*, bool, const uchar*, const uchar*);
-static int binlog_log_row(TABLE* table,
- const uchar *before_record,
- const uchar *after_record,
- Log_func *log_func)
+static int binlog_log_row_internal(TABLE* table,
+ const uchar *before_record,
+ const uchar *after_record,
+ Log_func *log_func)
{
bool error= 0;
THD *const thd= table->in_use;
-#ifdef WITH_WSREP
/*
- Only InnoDB tables will be replicated through binlog emulation. Also
- updates in mysql.gtid_slave_state table should not be binlogged.
+ If there are no table maps written to the binary log, this is
+ the first row handled in this statement. In that case, we need
+ to write table maps for all locked tables to the binary log.
*/
+ if (likely(!(error= ((thd->get_binlog_table_maps() == 0 &&
+ write_locked_table_maps(thd))))))
+ {
+ /*
+ We need to have a transactional behavior for SQLCOM_CREATE_TABLE
+ (i.e. CREATE TABLE... SELECT * FROM TABLE) in order to keep a
+ compatible behavior with the STMT based replication even when
+ the table is not transactional. In other words, if the operation
+ fails while executing the insert phase nothing is written to the
+ binlog.
+ */
+ bool const has_trans= thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
+ table->file->has_transactions();
+ error= (*log_func)(thd, table, has_trans, before_record, after_record);
+ }
+ return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
+}
+
+static inline int binlog_log_row(TABLE* table,
+ const uchar *before_record,
+ const uchar *after_record,
+ Log_func *log_func)
+{
+#ifdef WITH_WSREP
+ THD *const thd= table->in_use;
+
+ /* only InnoDB tables will be replicated through binlog emulation */
if ((WSREP_EMULATE_BINLOG(thd) &&
table->file->partition_ht()->db_type != DB_TYPE_INNODB) ||
- (thd->wsrep_ignore_table == true))
+ (thd->wsrep_ignore_table == true))
return 0;
/* enforce wsrep_max_ws_rows */
@@ -5827,33 +5917,14 @@ static int binlog_log_row(TABLE* table,
return ER_ERROR_DURING_COMMIT;
}
}
-#endif /* WITH_WSREP */
+#endif
- if (check_table_binlog_row_based(thd, table))
- {
- /*
- If there are no table maps written to the binary log, this is
- the first row handled in this statement. In that case, we need
- to write table maps for all locked tables to the binary log.
- */
- if (likely(!(error= write_locked_table_maps(thd))))
- {
- /*
- We need to have a transactional behavior for SQLCOM_CREATE_TABLE
- (i.e. CREATE TABLE... SELECT * FROM TABLE) in order to keep a
- compatible behavior with the STMT based replication even when
- the table is not transactional. In other words, if the operation
- fails while executing the insert phase nothing is written to the
- binlog.
- */
- bool const has_trans= thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
- table->file->has_transactions();
- error= (*log_func)(thd, table, has_trans, before_record, after_record);
- }
- }
- return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
+ if (!table->file->check_table_binlog_row_based(1))
+ return 0;
+ return binlog_log_row_internal(table, before_record, after_record, log_func);
}
+
int handler::ha_external_lock(THD *thd, int lock_type)
{
int error;
@@ -5892,8 +5963,6 @@ int handler::ha_external_lock(THD *thd, int lock_type)
}
}
- ha_statistic_increment(&SSV::ha_external_lock_count);
-
/*
We cache the table flags if the locking succeeded. Otherwise, we
keep them as they were when they were fetched in ha_open().
@@ -5943,15 +6012,15 @@ int handler::ha_reset()
table->s->column_bitmap_size ==
(uchar*) table->def_write_set.bitmap);
DBUG_ASSERT(bitmap_is_set_all(&table->s->all_set));
- DBUG_ASSERT(table->key_read == 0);
+ DBUG_ASSERT(!table->file->keyread_enabled());
/* ensure that ha_index_end / ha_rnd_end has been called */
DBUG_ASSERT(inited == NONE);
- /* Free cache used by filesort */
- free_io_cache(table);
/* reset the bitmaps to point to defaults */
table->default_column_bitmaps();
pushed_cond= NULL;
tracker= NULL;
+ mark_trx_read_write_done= check_table_binlog_row_based_done=
+ check_table_binlog_row_based_result= 0;
/* Reset information about pushed engine conditions */
cancel_pushed_idx_cond();
/* Reset information about pushed index conditions */
@@ -5976,14 +6045,13 @@ int handler::ha_write_row(uchar *buf)
{ error= write_row(buf); })
MYSQL_INSERT_ROW_DONE(error);
- if (unlikely(error))
- DBUG_RETURN(error);
- rows_changed++;
- if (unlikely(error= binlog_log_row(table, 0, buf, log_func)))
- DBUG_RETURN(error); /* purecov: inspected */
-
+ if (likely(!error))
+ {
+ rows_changed++;
+ error= binlog_log_row(table, 0, buf, log_func);
+ }
DEBUG_SYNC_C("ha_write_row_end");
- DBUG_RETURN(0);
+ DBUG_RETURN(error);
}
@@ -6009,12 +6077,12 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data)
{ error= update_row(old_data, new_data);})
MYSQL_UPDATE_ROW_DONE(error);
- if (unlikely(error))
- return error;
- rows_changed++;
- if (unlikely(error= binlog_log_row(table, old_data, new_data, log_func)))
- return error;
- return 0;
+ if (likely(!error))
+ {
+ rows_changed++;
+ error= binlog_log_row(table, old_data, new_data, log_func);
+ }
+ return error;
}
int handler::ha_delete_row(const uchar *buf)
@@ -6036,12 +6104,12 @@ int handler::ha_delete_row(const uchar *buf)
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_DELETE_ROW, active_index, 0,
{ error= delete_row(buf);})
MYSQL_DELETE_ROW_DONE(error);
- if (unlikely(error))
- return error;
- rows_changed++;
- if (unlikely(error= binlog_log_row(table, buf, 0, log_func)))
- return error;
- return 0;
+ if (likely(!error))
+ {
+ rows_changed++;
+ error= binlog_log_row(table, buf, 0, log_func);
+ }
+ return error;
}
diff --git a/sql/handler.h b/sql/handler.h
index af492e75da6..848c9956868 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -25,7 +25,9 @@
#pragma interface /* gcc class implementation */
#endif
+#include <my_global.h> /* For handlers */
#include "sql_const.h"
+#include "sql_basic_types.h"
#include "mysqld.h" /* server_id */
#include "sql_plugin.h" /* plugin_ref, st_plugin_int, plugin */
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA */
@@ -42,6 +44,7 @@
#include <mysql/psi/mysql_table.h>
class Alter_info;
+class Virtual_column_info;
// the following is for checking tables
@@ -192,7 +195,7 @@ enum enum_alter_inplace_result {
#define HA_HAS_NEW_CHECKSUM (1ULL << 38)
#define HA_CAN_VIRTUAL_COLUMNS (1ULL << 39)
#define HA_MRR_CANT_SORT (1ULL << 40)
-#define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (1ULL << 41)
+#define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (1ULL << 41) /* unused */
/*
This storage engine supports condition pushdown
@@ -233,12 +236,12 @@ enum enum_alter_inplace_result {
@note This optimization in combination with batching may be used to
remove even more roundtrips.
*/
-#define HA_READ_BEFORE_WRITE_REMOVAL (1LL << 43)
+#define HA_READ_BEFORE_WRITE_REMOVAL (1ULL << 43)
/*
Engine supports extended fulltext API
*/
-#define HA_CAN_FULLTEXT_EXT (1LL << 44)
+#define HA_CAN_FULLTEXT_EXT (1ULL << 44)
/*
Storage engine supports table export using the
@@ -246,7 +249,7 @@ enum enum_alter_inplace_result {
(meaning, after this statement one can copy table files out of the
datadir and later "import" (somehow) in another MariaDB instance)
*/
-#define HA_CAN_EXPORT (1LL << 45)
+#define HA_CAN_EXPORT (1ULL << 45)
/*
Storage engine does not require an exclusive metadata lock
@@ -256,7 +259,7 @@ enum enum_alter_inplace_result {
read or modify the table - this is defined by THR locks and the
::store_lock() method).
*/
-#define HA_CONCURRENT_OPTIMIZE (1LL << 46)
+#define HA_CONCURRENT_OPTIMIZE (1ULL << 46)
/*
Set of all binlog flags. Currently only contain the capabilities
@@ -295,24 +298,24 @@ enum enum_alter_inplace_result {
Partitioning needs both ADD and DROP to be supported by its underlying
handlers, due to error handling, see bug#57778.
*/
-#define HA_INPLACE_ADD_INDEX_NO_READ_WRITE (1L << 0)
-#define HA_INPLACE_DROP_INDEX_NO_READ_WRITE (1L << 1)
-#define HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE (1L << 2)
-#define HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE (1L << 3)
-#define HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE (1L << 4)
-#define HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE (1L << 5)
+#define HA_INPLACE_ADD_INDEX_NO_READ_WRITE (1UL << 0)
+#define HA_INPLACE_DROP_INDEX_NO_READ_WRITE (1UL << 1)
+#define HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE (1UL << 2)
+#define HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE (1UL << 3)
+#define HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE (1UL << 4)
+#define HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE (1UL << 5)
/*
These are set if different kinds of indexes can be created or dropped
in-place while still allowing concurrent reads (but not writes) of table
data. If a handler is capable of one or more of these, it should also set
the corresponding *_NO_READ_WRITE bit(s).
*/
-#define HA_INPLACE_ADD_INDEX_NO_WRITE (1L << 6)
-#define HA_INPLACE_DROP_INDEX_NO_WRITE (1L << 7)
-#define HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE (1L << 8)
-#define HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE (1L << 9)
-#define HA_INPLACE_ADD_PK_INDEX_NO_WRITE (1L << 10)
-#define HA_INPLACE_DROP_PK_INDEX_NO_WRITE (1L << 11)
+#define HA_INPLACE_ADD_INDEX_NO_WRITE (1UL << 6)
+#define HA_INPLACE_DROP_INDEX_NO_WRITE (1UL << 7)
+#define HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE (1UL << 8)
+#define HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE (1UL << 9)
+#define HA_INPLACE_ADD_PK_INDEX_NO_WRITE (1UL << 10)
+#define HA_INPLACE_DROP_PK_INDEX_NO_WRITE (1UL << 11)
/*
HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
supported at all.
@@ -338,9 +341,9 @@ enum enum_alter_inplace_result {
the storage engine. A typical engine to support this is NDB (through
WL #2498).
*/
-#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
-#define HA_FAST_CHANGE_PARTITION (1L << 13)
-#define HA_PARTITION_ONE_PHASE (1L << 14)
+#define HA_PARTITION_FUNCTION_SUPPORTED (1UL << 12)
+#define HA_FAST_CHANGE_PARTITION (1UL << 13)
+#define HA_PARTITION_ONE_PHASE (1UL << 14)
/* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0
@@ -366,17 +369,10 @@ enum enum_alter_inplace_result {
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
*/
-#define HA_OPEN_KEYFILE 1
-#define HA_OPEN_RNDFILE 2
-#define HA_GET_INDEX 4
-#define HA_GET_INFO 8 /* do a ha_info() after open */
-#define HA_READ_ONLY 16 /* File opened as readonly */
+#define HA_OPEN_KEYFILE 1U
+#define HA_READ_ONLY 16U /* File opened as readonly */
/* Try readonly if can't open with read and write */
-#define HA_TRY_READ_ONLY 32
-#define HA_WAIT_IF_LOCKED 64 /* Wait if locked on open */
-#define HA_ABORT_IF_LOCKED 128 /* skip if locked on open.*/
-#define HA_BLOCK_LOCK 256 /* unlock when reading some records */
-#define HA_OPEN_TEMPORARY 512
+#define HA_TRY_READ_ONLY 32U
/* Some key definitions */
#define HA_KEY_NULL_LENGTH 1
@@ -388,16 +384,16 @@ enum enum_alter_inplace_result {
+(MAX_REF_PARTS \
*(HA_KEY_NULL_LENGTH + HA_KEY_BLOB_LENGTH)))
-#define HA_LEX_CREATE_TMP_TABLE 1
-#define HA_CREATE_TMP_ALTER 8
+#define HA_LEX_CREATE_TMP_TABLE 1U
+#define HA_CREATE_TMP_ALTER 8U
#define HA_MAX_REC_LENGTH 65535
/* Table caching type */
#define HA_CACHE_TBL_NONTRANSACT 0
-#define HA_CACHE_TBL_NOCACHE 1
-#define HA_CACHE_TBL_ASKTRANSACT 2
-#define HA_CACHE_TBL_TRANSACT 4
+#define HA_CACHE_TBL_NOCACHE 1U
+#define HA_CACHE_TBL_ASKTRANSACT 2U
+#define HA_CACHE_TBL_TRANSACT 4U
/**
Options for the START TRANSACTION statement.
@@ -419,9 +415,9 @@ static const uint MYSQL_START_TRANS_OPT_READ_ONLY = 2;
static const uint MYSQL_START_TRANS_OPT_READ_WRITE = 4;
/* Flags for method is_fatal_error */
-#define HA_CHECK_DUP_KEY 1
-#define HA_CHECK_DUP_UNIQUE 2
-#define HA_CHECK_FK_ERROR 4
+#define HA_CHECK_DUP_KEY 1U
+#define HA_CHECK_DUP_UNIQUE 2U
+#define HA_CHECK_FK_ERROR 4U
#define HA_CHECK_DUP (HA_CHECK_DUP_KEY + HA_CHECK_DUP_UNIQUE)
#define HA_CHECK_ALL (~0U)
@@ -488,49 +484,49 @@ enum enum_binlog_command {
/* struct to hold information about the table that should be created */
/* Bits in used_fields */
-#define HA_CREATE_USED_AUTO (1L << 0)
-#define HA_CREATE_USED_RAID (1L << 1) //RAID is no longer availble
-#define HA_CREATE_USED_UNION (1L << 2)
-#define HA_CREATE_USED_INSERT_METHOD (1L << 3)
-#define HA_CREATE_USED_MIN_ROWS (1L << 4)
-#define HA_CREATE_USED_MAX_ROWS (1L << 5)
-#define HA_CREATE_USED_AVG_ROW_LENGTH (1L << 6)
-#define HA_CREATE_USED_PACK_KEYS (1L << 7)
-#define HA_CREATE_USED_CHARSET (1L << 8)
-#define HA_CREATE_USED_DEFAULT_CHARSET (1L << 9)
-#define HA_CREATE_USED_DATADIR (1L << 10)
-#define HA_CREATE_USED_INDEXDIR (1L << 11)
-#define HA_CREATE_USED_ENGINE (1L << 12)
-#define HA_CREATE_USED_CHECKSUM (1L << 13)
-#define HA_CREATE_USED_DELAY_KEY_WRITE (1L << 14)
-#define HA_CREATE_USED_ROW_FORMAT (1L << 15)
-#define HA_CREATE_USED_COMMENT (1L << 16)
-#define HA_CREATE_USED_PASSWORD (1L << 17)
-#define HA_CREATE_USED_CONNECTION (1L << 18)
-#define HA_CREATE_USED_KEY_BLOCK_SIZE (1L << 19)
+#define HA_CREATE_USED_AUTO (1UL << 0)
+#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble
+#define HA_CREATE_USED_UNION (1UL << 2)
+#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
+#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
+#define HA_CREATE_USED_MAX_ROWS (1UL << 5)
+#define HA_CREATE_USED_AVG_ROW_LENGTH (1UL << 6)
+#define HA_CREATE_USED_PACK_KEYS (1UL << 7)
+#define HA_CREATE_USED_CHARSET (1UL << 8)
+#define HA_CREATE_USED_DEFAULT_CHARSET (1UL << 9)
+#define HA_CREATE_USED_DATADIR (1UL << 10)
+#define HA_CREATE_USED_INDEXDIR (1UL << 11)
+#define HA_CREATE_USED_ENGINE (1UL << 12)
+#define HA_CREATE_USED_CHECKSUM (1UL << 13)
+#define HA_CREATE_USED_DELAY_KEY_WRITE (1UL << 14)
+#define HA_CREATE_USED_ROW_FORMAT (1UL << 15)
+#define HA_CREATE_USED_COMMENT (1UL << 16)
+#define HA_CREATE_USED_PASSWORD (1UL << 17)
+#define HA_CREATE_USED_CONNECTION (1UL << 18)
+#define HA_CREATE_USED_KEY_BLOCK_SIZE (1UL << 19)
/* The following two are used by Maria engine: */
-#define HA_CREATE_USED_TRANSACTIONAL (1L << 20)
-#define HA_CREATE_USED_PAGE_CHECKSUM (1L << 21)
+#define HA_CREATE_USED_TRANSACTIONAL (1UL << 20)
+#define HA_CREATE_USED_PAGE_CHECKSUM (1UL << 21)
/** This is set whenever STATS_PERSISTENT=0|1|default has been
specified in CREATE/ALTER TABLE. See also HA_OPTION_STATS_PERSISTENT in
include/my_base.h. It is possible to distinguish whether
STATS_PERSISTENT=default has been specified or no STATS_PERSISTENT= is
given at all. */
-#define HA_CREATE_USED_STATS_PERSISTENT (1L << 22)
+#define HA_CREATE_USED_STATS_PERSISTENT (1UL << 22)
/**
This is set whenever STATS_AUTO_RECALC=0|1|default has been
specified in CREATE/ALTER TABLE. See enum_stats_auto_recalc.
It is possible to distinguish whether STATS_AUTO_RECALC=default
has been specified or no STATS_AUTO_RECALC= is given at all.
*/
-#define HA_CREATE_USED_STATS_AUTO_RECALC (1L << 23)
+#define HA_CREATE_USED_STATS_AUTO_RECALC (1UL << 23)
/**
This is set whenever STATS_SAMPLE_PAGES=N|default has been
specified in CREATE/ALTER TABLE. It is possible to distinguish whether
STATS_SAMPLE_PAGES=default has been specified or no STATS_SAMPLE_PAGES= is
given at all.
*/
-#define HA_CREATE_USED_STATS_SAMPLE_PAGES (1L << 24)
+#define HA_CREATE_USED_STATS_SAMPLE_PAGES (1UL << 24)
/*
@@ -578,11 +574,11 @@ struct xid_t {
long bqual_length;
char data[XIDDATASIZE]; // not \0-terminated !
- xid_t() {} /* Remove gcc warning */
+ xid_t() {} /* Remove gcc warning */
bool eq(struct xid_t *xid)
- { return eq(xid->gtrid_length, xid->bqual_length, xid->data); }
+ { return !xid->is_null() && eq(xid->gtrid_length, xid->bqual_length, xid->data); }
bool eq(long g, long b, const char *d)
- { return g == gtrid_length && b == bqual_length && !memcmp(d, data, g+b); }
+ { return !is_null() && g == gtrid_length && b == bqual_length && !memcmp(d, data, g+b); }
void set(struct xid_t *xid)
{ memcpy(this, xid, xid->length()); }
void set(long f, const char *g, long gl, const char *b, long bl)
@@ -624,8 +620,7 @@ struct xid_t {
}
uint length()
{
- return sizeof(formatID)+sizeof(gtrid_length)+sizeof(bqual_length)+
- gtrid_length+bqual_length;
+ return static_cast<uint>(sizeof(formatID)) + key_length();
}
uchar *key() const
{
@@ -633,7 +628,8 @@ struct xid_t {
}
uint key_length() const
{
- return sizeof(gtrid_length)+sizeof(bqual_length)+gtrid_length+bqual_length;
+ return static_cast<uint>(sizeof(gtrid_length)+sizeof(bqual_length)+
+ gtrid_length+bqual_length);
}
};
typedef struct xid_t XID;
@@ -731,6 +727,7 @@ enum enum_schema_tables
SCH_ALL_PLUGINS,
SCH_APPLICABLE_ROLES,
SCH_CHARSETS,
+ SCH_CHECK_CONSTRAINTS,
SCH_COLLATIONS,
SCH_COLLATION_CHARACTER_SET_APPLICABILITY,
SCH_COLUMNS,
@@ -776,9 +773,9 @@ struct TABLE_SHARE;
struct HA_CREATE_INFO;
struct st_foreign_key_info;
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
-typedef bool (stat_print_fn)(THD *thd, const char *type, uint type_len,
- const char *file, uint file_len,
- const char *status, uint status_len);
+typedef bool (stat_print_fn)(THD *thd, const char *type, size_t type_len,
+ const char *file, size_t file_len,
+ const char *status, size_t status_len);
enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX };
extern st_plugin_int *hton2plugin[MAX_HA];
@@ -1221,7 +1218,6 @@ struct handlerton
enum_binlog_command binlog_command,
const char *query, uint query_length,
const char *db, const char *table_name);
- int (*release_temporary_latches)(handlerton *hton, THD *thd);
/*
Get log status.
@@ -1663,6 +1659,7 @@ struct Schema_specification_st
- LIKE another_table_name ... // Copy structure from another table
- [AS] SELECT ... // Copy structure from a subquery
*/
+
struct Table_scope_and_contents_source_st
{
CHARSET_INFO *table_charset;
@@ -1678,6 +1675,8 @@ struct Table_scope_and_contents_source_st
ulong avg_row_length;
ulong used_fields;
ulong key_block_size;
+ ulong expression_length;
+ ulong field_check_constraints;
/*
number of pages to sample during
stats estimation, if used, otherwise 0.
@@ -1705,6 +1704,8 @@ struct Table_scope_and_contents_source_st
enum_stats_auto_recalc stats_auto_recalc;
bool varchar; ///< 1 if table has a VARCHAR
+ List<Virtual_column_info> *check_constraint_list;
+
/* the following three are only for ALTER TABLE, check_if_incompatible_data() */
ha_table_option_struct *option_struct; ///< structure with parsed table options
ha_field_option_struct **fields_option_struct; ///< array of field option structures
@@ -1861,37 +1862,49 @@ public:
attribute has really changed we might choose to set flag
pessimistically, for example, relying on parser output only.
*/
- typedef ulong HA_ALTER_FLAGS;
+ typedef ulonglong HA_ALTER_FLAGS;
// Add non-unique, non-primary index
- static const HA_ALTER_FLAGS ADD_INDEX = 1L << 0;
+ static const HA_ALTER_FLAGS ADD_INDEX = 1ULL << 0;
// Drop non-unique, non-primary index
- static const HA_ALTER_FLAGS DROP_INDEX = 1L << 1;
+ static const HA_ALTER_FLAGS DROP_INDEX = 1ULL << 1;
// Add unique, non-primary index
- static const HA_ALTER_FLAGS ADD_UNIQUE_INDEX = 1L << 2;
+ static const HA_ALTER_FLAGS ADD_UNIQUE_INDEX = 1ULL << 2;
// Drop unique, non-primary index
- static const HA_ALTER_FLAGS DROP_UNIQUE_INDEX = 1L << 3;
+ static const HA_ALTER_FLAGS DROP_UNIQUE_INDEX = 1ULL << 3;
// Add primary index
- static const HA_ALTER_FLAGS ADD_PK_INDEX = 1L << 4;
+ static const HA_ALTER_FLAGS ADD_PK_INDEX = 1ULL << 4;
// Drop primary index
- static const HA_ALTER_FLAGS DROP_PK_INDEX = 1L << 5;
-
- // Add column
- static const HA_ALTER_FLAGS ADD_COLUMN = 1L << 6;
+ static const HA_ALTER_FLAGS DROP_PK_INDEX = 1ULL << 5;
+
+ // Virtual generated column
+ static const HA_ALTER_FLAGS ADD_VIRTUAL_COLUMN = 1ULL << 6;
+ // Stored base (non-generated) column
+ static const HA_ALTER_FLAGS ADD_STORED_BASE_COLUMN = 1ULL << 7;
+ // Stored generated column
+ static const HA_ALTER_FLAGS ADD_STORED_GENERATED_COLUMN= 1ULL << 8;
+ // Add generic column (convience constant).
+ static const HA_ALTER_FLAGS ADD_COLUMN= ADD_VIRTUAL_COLUMN |
+ ADD_STORED_BASE_COLUMN |
+ ADD_STORED_GENERATED_COLUMN;
// Drop column
- static const HA_ALTER_FLAGS DROP_COLUMN = 1L << 7;
+ static const HA_ALTER_FLAGS DROP_VIRTUAL_COLUMN = 1ULL << 9;
+ static const HA_ALTER_FLAGS DROP_STORED_COLUMN = 1ULL << 10;
+ static const HA_ALTER_FLAGS DROP_COLUMN= DROP_VIRTUAL_COLUMN |
+ DROP_STORED_COLUMN;
// Rename column
- static const HA_ALTER_FLAGS ALTER_COLUMN_NAME = 1L << 8;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NAME = 1ULL << 11;
// Change column datatype
- static const HA_ALTER_FLAGS ALTER_COLUMN_TYPE = 1L << 9;
+ static const HA_ALTER_FLAGS ALTER_VIRTUAL_COLUMN_TYPE = 1ULL << 12;
+ static const HA_ALTER_FLAGS ALTER_STORED_COLUMN_TYPE = 1ULL << 13;
/**
Change column datatype in such way that new type has compatible
@@ -1899,79 +1912,99 @@ public:
possible to perform change by only updating data dictionary
without changing table rows.
*/
- static const HA_ALTER_FLAGS ALTER_COLUMN_EQUAL_PACK_LENGTH = 1L << 10;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_EQUAL_PACK_LENGTH = 1ULL << 14;
// Reorder column
- static const HA_ALTER_FLAGS ALTER_COLUMN_ORDER = 1L << 11;
+ static const HA_ALTER_FLAGS ALTER_STORED_COLUMN_ORDER = 1ULL << 15;
+
+ // Reorder column
+ static const HA_ALTER_FLAGS ALTER_VIRTUAL_COLUMN_ORDER = 1ULL << 16;
// Change column from NOT NULL to NULL
- static const HA_ALTER_FLAGS ALTER_COLUMN_NULLABLE = 1L << 12;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NULLABLE = 1ULL << 17;
// Change column from NULL to NOT NULL
- static const HA_ALTER_FLAGS ALTER_COLUMN_NOT_NULLABLE = 1L << 13;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NOT_NULLABLE = 1ULL << 18;
// Set or remove default column value
- static const HA_ALTER_FLAGS ALTER_COLUMN_DEFAULT = 1L << 14;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_DEFAULT = 1ULL << 19;
+ // Change column generation expression
+ static const HA_ALTER_FLAGS ALTER_VIRTUAL_GCOL_EXPR = 1ULL << 20;
+ static const HA_ALTER_FLAGS ALTER_STORED_GCOL_EXPR = 1ULL << 21;
+ //
// Add foreign key
- static const HA_ALTER_FLAGS ADD_FOREIGN_KEY = 1L << 15;
+ static const HA_ALTER_FLAGS ADD_FOREIGN_KEY = 1ULL << 22;
// Drop foreign key
- static const HA_ALTER_FLAGS DROP_FOREIGN_KEY = 1L << 16;
+ static const HA_ALTER_FLAGS DROP_FOREIGN_KEY = 1ULL << 23;
// table_options changed, see HA_CREATE_INFO::used_fields for details.
- static const HA_ALTER_FLAGS CHANGE_CREATE_OPTION = 1L << 17;
+ static const HA_ALTER_FLAGS CHANGE_CREATE_OPTION = 1ULL << 24;
// Table is renamed
- static const HA_ALTER_FLAGS ALTER_RENAME = 1L << 18;
+ static const HA_ALTER_FLAGS ALTER_RENAME = 1ULL << 25;
// column's engine options changed, something in field->option_struct
- static const HA_ALTER_FLAGS ALTER_COLUMN_OPTION = 1L << 19;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_OPTION = 1ULL << 26;
// MySQL alias for the same thing:
- static const HA_ALTER_FLAGS ALTER_COLUMN_STORAGE_TYPE = 1L << 19;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_STORAGE_TYPE = 1ULL << 26;
// Change the column format of column
- static const HA_ALTER_FLAGS ALTER_COLUMN_COLUMN_FORMAT = 1L << 20;
+ static const HA_ALTER_FLAGS ALTER_COLUMN_COLUMN_FORMAT = 1ULL << 27;
// Add partition
- static const HA_ALTER_FLAGS ADD_PARTITION = 1L << 21;
+ static const HA_ALTER_FLAGS ADD_PARTITION = 1ULL << 28;
// Drop partition
- static const HA_ALTER_FLAGS DROP_PARTITION = 1L << 22;
+ static const HA_ALTER_FLAGS DROP_PARTITION = 1ULL << 29;
// Changing partition options
- static const HA_ALTER_FLAGS ALTER_PARTITION = 1L << 23;
+ static const HA_ALTER_FLAGS ALTER_PARTITION = 1ULL << 30;
// Coalesce partition
- static const HA_ALTER_FLAGS COALESCE_PARTITION = 1L << 24;
+ static const HA_ALTER_FLAGS COALESCE_PARTITION = 1ULL << 31;
// Reorganize partition ... into
- static const HA_ALTER_FLAGS REORGANIZE_PARTITION = 1L << 25;
+ static const HA_ALTER_FLAGS REORGANIZE_PARTITION = 1ULL << 32;
// Reorganize partition
- static const HA_ALTER_FLAGS ALTER_TABLE_REORG = 1L << 26;
+ static const HA_ALTER_FLAGS ALTER_TABLE_REORG = 1ULL << 33;
// Remove partitioning
- static const HA_ALTER_FLAGS ALTER_REMOVE_PARTITIONING = 1L << 27;
+ static const HA_ALTER_FLAGS ALTER_REMOVE_PARTITIONING = 1ULL << 34;
// Partition operation with ALL keyword
- static const HA_ALTER_FLAGS ALTER_ALL_PARTITION = 1L << 28;
+ static const HA_ALTER_FLAGS ALTER_ALL_PARTITION = 1ULL << 35;
/**
Recreate the table for ALTER TABLE FORCE, ALTER TABLE ENGINE
and OPTIMIZE TABLE operations.
*/
- static const HA_ALTER_FLAGS RECREATE_TABLE = 1L << 29;
+ static const HA_ALTER_FLAGS RECREATE_TABLE = 1ULL << 36;
- // Virtual columns changed
- static const HA_ALTER_FLAGS ALTER_COLUMN_VCOL = 1L << 30;
+ /**
+ Changes in generated columns that affect storage,
+ for example, when a vcol type or expression changes
+ and this vcol is indexed or used in a partitioning expression
+ */
+ static const HA_ALTER_FLAGS ALTER_COLUMN_VCOL = 1ULL << 37;
/**
ALTER TABLE for a partitioned table. The engine needs to commit
online alter of all partitions atomically (using group_commit_ctx)
*/
- static const HA_ALTER_FLAGS ALTER_PARTITIONED = 1L << 31;
+ static const HA_ALTER_FLAGS ALTER_PARTITIONED = 1ULL << 38;
+
+ static const HA_ALTER_FLAGS ALTER_ADD_CHECK_CONSTRAINT = 1ULL << 39;
+
+ static const HA_ALTER_FLAGS ALTER_DROP_CHECK_CONSTRAINT= 1ULL << 40;
+
+ /**
+ Change in index length such that it doesn't require index rebuild.
+ */
+ static const HA_ALTER_FLAGS ALTER_COLUMN_INDEX_LENGTH= 1ULL << 41;
/**
Create options (like MAX_ROWS) for the new version of table.
@@ -2327,12 +2360,13 @@ public:
/**
Whether or not all costs in the object are zero
-
+
@return true if all costs are zero, false otherwise
*/
bool is_zero() const
- {
- return !(io_count || cpu_cost || import_cost || mem_cost);
+ {
+ return io_count == 0.0 && cpu_cost == 0.0 &&
+ import_cost == 0.0 && mem_cost == 0.0;
}
void reset()
@@ -2397,29 +2431,29 @@ void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
The ranges may not use the full key but all of them will use the same number
of key parts.
*/
-#define HA_MRR_SINGLE_POINT 1
-#define HA_MRR_FIXED_KEY 2
+#define HA_MRR_SINGLE_POINT 1U
+#define HA_MRR_FIXED_KEY 2U
/*
Indicates that RANGE_SEQ_IF::next(&range) doesn't need to fill in the
'range' parameter.
*/
-#define HA_MRR_NO_ASSOCIATION 4
+#define HA_MRR_NO_ASSOCIATION 4U
/*
The MRR user will provide ranges in key order, and MRR implementation
must return rows in key order.
*/
-#define HA_MRR_SORTED 8
+#define HA_MRR_SORTED 8U
/* MRR implementation doesn't have to retrieve full records */
-#define HA_MRR_INDEX_ONLY 16
+#define HA_MRR_INDEX_ONLY 16U
/*
The passed memory buffer is of maximum possible size, the caller can't
assume larger buffer.
*/
-#define HA_MRR_LIMITS 32
+#define HA_MRR_LIMITS 32U
/*
@@ -2428,14 +2462,14 @@ void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
flag. SQL layer remembers the flag value and then passes it to
multi_read_range_init().
*/
-#define HA_MRR_USE_DEFAULT_IMPL 64
+#define HA_MRR_USE_DEFAULT_IMPL 64U
/*
Used only as parameter to multi_range_read_info():
Flag set <=> the caller guarantees that the bounds of the scanned ranges
will not have NULL values.
*/
-#define HA_MRR_NO_NULL_ENDPOINTS 128
+#define HA_MRR_NO_NULL_ENDPOINTS 128U
/*
The MRR user has materialized range keys somewhere in the user's buffer.
@@ -2446,7 +2480,7 @@ void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
pointer in range->start_key.key will point to a key value that will remain
there until the end of the MRR scan.
*/
-#define HA_MRR_MATERIALIZED_KEYS 256
+#define HA_MRR_MATERIALIZED_KEYS 256U
/*
The following bits are reserved for use by MRR implementation. The intended
@@ -2464,15 +2498,15 @@ void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
handler->multi_range_read_explain_info(mrr_mode) to get a text description
of the picked MRR scan; the description will be a part of EXPLAIN output.
*/
-#define HA_MRR_IMPLEMENTATION_FLAG1 512
-#define HA_MRR_IMPLEMENTATION_FLAG2 1024
-#define HA_MRR_IMPLEMENTATION_FLAG3 2048
-#define HA_MRR_IMPLEMENTATION_FLAG4 4096
-#define HA_MRR_IMPLEMENTATION_FLAG5 8192
-#define HA_MRR_IMPLEMENTATION_FLAG6 16384
+#define HA_MRR_IMPLEMENTATION_FLAG1 512U
+#define HA_MRR_IMPLEMENTATION_FLAG2 1024U
+#define HA_MRR_IMPLEMENTATION_FLAG3 2048U
+#define HA_MRR_IMPLEMENTATION_FLAG4 4096U
+#define HA_MRR_IMPLEMENTATION_FLAG5 8192U
+#define HA_MRR_IMPLEMENTATION_FLAG6 16384U
#define HA_MRR_IMPLEMENTATION_FLAGS \
- (512 | 1024 | 2048 | 4096 | 8192 | 16384)
+ (512U | 1024U | 2048U | 4096U | 8192U | 16384U)
/*
This is a buffer area that the handler can use to store rows.
@@ -2622,11 +2656,6 @@ public:
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
- /* TRUE <=> source MRR ranges and the output are ordered */
- bool mrr_is_output_sorted;
-
- /** TRUE <=> we're currently traversing a range in mrr_cur_range. */
- bool mrr_have_range;
/** Current range (the one we're now returning rows from) */
KEY_MULTI_RANGE mrr_cur_range;
@@ -2634,23 +2663,32 @@ public:
key_range save_end_range, *end_range;
KEY_PART_INFO *range_key_part;
int key_compare_result_on_equal;
- bool eq_range;
- bool internal_tmp_table; /* If internal tmp table */
- uint errkey; /* Last dup key */
- uint key_used_on_scan;
- uint active_index;
+ /* TRUE <=> source MRR ranges and the output are ordered */
+ bool mrr_is_output_sorted;
+ /** TRUE <=> we're currently traversing a range in mrr_cur_range. */
+ bool mrr_have_range;
+ bool eq_range;
+ bool internal_tmp_table; /* If internal tmp table */
+ bool implicit_emptied; /* Can be !=0 only if HEAP */
+ bool mark_trx_read_write_done; /* mark_trx_read_write was called */
+ bool check_table_binlog_row_based_done; /* check_table_binlog.. was called */
+ bool check_table_binlog_row_based_result; /* cached check_table_binlog... */
/*
TRUE <=> the engine guarantees that returned records are within the range
being scanned.
*/
bool in_range_check_pushed_down;
+ uint errkey; /* Last dup key */
+ uint key_used_on_scan;
+ uint active_index, keyread;
+
/** Length of ref (1-8 or the clustered key length) */
uint ref_length;
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
- bool implicit_emptied; /* Can be !=0 only if HEAP */
+
const COND *pushed_cond;
/**
next_insert_id is the next value which should be inserted into the
@@ -2686,7 +2724,6 @@ private:
public:
void set_time_tracker(Exec_time_tracker *tracker_arg) { tracker=tracker_arg;}
-
Item *pushed_idx_cond;
uint pushed_idx_cond_keyno; /* The index which the above condition is for */
@@ -2734,11 +2771,16 @@ public:
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0),
estimation_rows_to_insert(0), ht(ht_arg),
- ref(0), end_range(NULL), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
+ ref(0), end_range(NULL),
+ implicit_emptied(0),
+ mark_trx_read_write_done(0),
+ check_table_binlog_row_based_done(0),
+ check_table_binlog_row_based_result(0),
in_range_check_pushed_down(FALSE),
+ key_used_on_scan(MAX_KEY),
+ active_index(MAX_KEY), keyread(MAX_KEY),
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE),
- implicit_emptied(0),
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
tracker(NULL),
pushed_idx_cond(NULL),
@@ -2841,6 +2883,21 @@ public:
int ha_delete_row(const uchar * buf);
void ha_release_auto_increment();
+ bool keyread_enabled() { return keyread < MAX_KEY; }
+ int ha_start_keyread(uint idx)
+ {
+ int res= keyread_enabled() ? 0 : extra_opt(HA_EXTRA_KEYREAD, idx);
+ keyread= idx;
+ return res;
+ }
+ int ha_end_keyread()
+ {
+ if (!keyread_enabled())
+ return 0;
+ keyread= MAX_KEY;
+ return extra(HA_EXTRA_NO_KEYREAD);
+ }
+
int check_collation_compatibility();
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
/** to be actually called to get 'check()' functionality*/
@@ -2957,7 +3014,6 @@ public:
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
bool has_transactions()
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
- virtual uint extra_rec_buf_length() const { return 0; }
/**
This method is used to analyse the error to see whether the error
@@ -3170,7 +3226,7 @@ public:
virtual int read_range_next();
void set_end_range(const key_range *end_key);
int compare_key(key_range *range);
- int compare_key2(key_range *range);
+ int compare_key2(key_range *range) const;
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
void ft_end() { ft_handler=NULL; }
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
@@ -3234,7 +3290,7 @@ public:
uint part_id);
virtual int extra(enum ha_extra_function operation)
{ return 0; }
- virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
+ virtual int extra_opt(enum ha_extra_function operation, ulong arg)
{ return extra(operation); }
/**
@@ -3885,25 +3941,10 @@ public:
}
LEX_STRING *engine_name() { return hton_name(ht); }
-
- /*
- @brief
- Check whether the engine supports virtual columns
-
- @retval
- FALSE if the engine does not support virtual columns
- @retval
- TRUE if the engine supports virtual columns
- */
-
- virtual bool check_if_supported_virtual_columns(void) { return FALSE;}
TABLE* get_table() { return table; }
TABLE_SHARE* get_table_share() { return table_share; }
protected:
- /* deprecated, don't use in new engines */
- inline void ha_statistic_increment(ulong SSV::*offset) const { }
-
/* Service methods for use by storage engines. */
void **ha_data(THD *) const;
THD *ha_thd(void) const;
@@ -3928,13 +3969,31 @@ protected:
*/
virtual int delete_table(const char *name);
+public:
+ inline bool check_table_binlog_row_based(bool binlog_row);
private:
- /* Private helpers */
- inline void mark_trx_read_write();
-private:
+ /* Cache result to avoid extra calls */
+ inline void mark_trx_read_write()
+ {
+ if (unlikely(!mark_trx_read_write_done))
+ {
+ mark_trx_read_write_done= 1;
+ mark_trx_read_write_internal();
+ }
+ }
+ void mark_trx_read_write_internal();
+ bool check_table_binlog_row_based_internal(bool binlog_row);
+
+protected:
+ /*
+ These are intended to be used only by handler::ha_xxxx() functions
+ However, engines that implement read_range_XXX() (like MariaRocks)
+ or embed other engines (like ha_partition) may need to call these also
+ */
inline void increment_statistics(ulong SSV::*offset) const;
inline void decrement_statistics(ulong SSV::*offset) const;
+private:
/*
Low-level primitives for storage engines. These should be
overridden by the storage engine class. To call these methods, use
@@ -4259,9 +4318,6 @@ int ha_change_key_cache_param(KEY_CACHE *key_cache);
int ha_repartition_key_cache(KEY_CACHE *key_cache);
int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache);
-/* report to InnoDB that control passes to the client */
-int ha_release_temporary_latches(THD *thd);
-
/* transactions: interface to handlerton functions */
int ha_start_consistent_snapshot(THD *thd);
int ha_commit_or_rollback_by_xid(XID *xid, bool commit);
diff --git a/sql/hostname.cc b/sql/hostname.cc
index 39e4b34d615..e8d6780d095 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -411,7 +411,7 @@ static inline bool is_hostname_valid(const char *hostname)
int ip_to_hostname(struct sockaddr_storage *ip_storage,
const char *ip_string,
- char **hostname,
+ const char **hostname,
uint *connect_errors)
{
const struct sockaddr *ip= (const sockaddr *) ip_storage;
@@ -435,7 +435,7 @@ int ip_to_hostname(struct sockaddr_storage *ip_storage,
DBUG_PRINT("info", ("Loopback address detected."));
/* Do not count connect errors from localhost. */
- *hostname= (char *) my_localhost;
+ *hostname= my_localhost;
DBUG_RETURN(0);
}
diff --git a/sql/hostname.h b/sql/hostname.h
index 81a1d0de88d..d6137b7c260 100644
--- a/sql/hostname.h
+++ b/sql/hostname.h
@@ -168,7 +168,7 @@ extern ulong host_cache_size;
#define RC_BLOCKED_HOST 1
int ip_to_hostname(struct sockaddr_storage *ip_storage,
const char *ip_string,
- char **hostname, uint *connect_errors);
+ const char **hostname, uint *connect_errors);
void inc_host_errors(const char *ip_string, Host_errors *errors);
void reset_host_connect_errors(const char *ip_string);
diff --git a/sql/init.h b/sql/init.h
index 88cd8e6e178..af2621e5e70 100644
--- a/sql/init.h
+++ b/sql/init.h
@@ -19,6 +19,6 @@
#include "my_global.h" /* ulong */
void unireg_init(ulong options);
-void unireg_end(void) __attribute__((noreturn));
+ATTRIBUTE_NORETURN void unireg_end(void);
#endif /* INIT_INCLUDED */
diff --git a/sql/item.cc b/sql/item.cc
index 3bfbdb75c40..f7092eb6c86 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -43,6 +43,7 @@
#include "sql_expression_cache.h"
const String my_null_string("NULL", 4, default_charset_info);
+const String my_default_string("DEFAULT", 7, default_charset_info);
static int save_field_in_field(Field *, bool *, Field *, bool);
@@ -83,6 +84,22 @@ void item_init(void)
}
+void Item::push_note_converted_to_negative_complement(THD *thd)
+{
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
+ "Cast to signed converted positive out-of-range integer to "
+ "it's negative complement");
+}
+
+
+void Item::push_note_converted_to_positive_complement(THD *thd)
+{
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
+ "Cast to unsigned converted negative integer to it's "
+ "positive complement");
+}
+
+
/**
@todo
Make this functions class dependent
@@ -134,13 +151,9 @@ bool Item::get_date_with_conversion(MYSQL_TIME *ltime, ulonglong fuzzydate)
if (get_date(ltime, fuzzydate | time_flag))
return true;
if (ltime->time_type == MYSQL_TIMESTAMP_TIME &&
- !(fuzzydate & TIME_TIME_ONLY))
- {
- MYSQL_TIME tmp;
- if (time_to_datetime_with_warn(thd, ltime, &tmp, fuzzydate))
- return null_value= true;
- *ltime= tmp;
- }
+ !(fuzzydate & TIME_TIME_ONLY) &&
+ convert_time_to_datetime(thd, ltime, fuzzydate))
+ return true;
return false;
}
@@ -371,14 +384,6 @@ longlong Item::val_int_from_date()
}
-longlong Item::val_int_from_real()
-{
- DBUG_ASSERT(fixed == 1);
- bool error;
- return double_to_longlong(val_real(), false /*unsigned_flag*/, &error);
-}
-
-
double Item::val_real_from_date()
{
DBUG_ASSERT(fixed == 1);
@@ -425,7 +430,7 @@ int Item::save_time_in_field(Field *field)
int Item::save_date_in_field(Field *field)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, sql_mode_for_dates(current_thd)))
+ if (get_date(&ltime, sql_mode_for_dates(field->table->in_use)))
return set_field_to_null_with_conversions(field, 0);
field->set_notnull();
return field->store_time_dec(&ltime, decimals);
@@ -469,7 +474,7 @@ Item::Item(THD *thd):
{
DBUG_ASSERT(thd);
marker= 0;
- maybe_null=null_value=with_sum_func=with_field=0;
+ maybe_null=null_value=with_sum_func=with_window_func=with_field=0;
in_rollup= 0;
with_subselect= 0;
with_param= 0;
@@ -489,12 +494,21 @@ Item::Item(THD *thd):
{
enum_parsing_place place=
thd->lex->current_select->parsing_place;
- if (place == SELECT_LIST ||
- place == IN_HAVING)
+ if (place == SELECT_LIST || place == IN_HAVING)
thd->lex->current_select->select_n_having_items++;
}
}
+
+const TABLE_SHARE *Item::field_table_or_null()
+{
+ if (real_item()->type() != Item::FIELD_ITEM)
+ return NULL;
+
+ return ((Item_field *) this)->field->table->s;
+}
+
+
/**
Constructor used by Item_field, Item_ref & aggregate (sum)
functions.
@@ -517,6 +531,7 @@ Item::Item(THD *thd, Item *item):
null_value(item->null_value),
with_sum_func(item->with_sum_func),
with_param(item->with_param),
+ with_window_func(item->with_window_func),
with_field(item->with_field),
fixed(item->fixed),
is_autogenerated_name(item->is_autogenerated_name),
@@ -570,6 +585,24 @@ uint Item::temporal_precision(enum_field_types type_arg)
}
+void Item::print_parenthesised(String *str, enum_query_type query_type,
+ enum precedence parent_prec)
+{
+ bool need_parens= precedence() < parent_prec;
+ if (need_parens)
+ str->append('(');
+ print(str, query_type);
+ if (need_parens)
+ str->append(')');
+}
+
+
+void Item::print(String *str, enum_query_type query_type)
+{
+ str->append(full_name());
+}
+
+
void Item::print_item_w_name(String *str, enum_query_type query_type)
{
print(str, query_type);
@@ -592,8 +625,9 @@ void Item::print_value(String *str)
str->append("NULL");
else
{
- switch (result_type()) {
+ switch (cmp_type()) {
case STRING_RESULT:
+ case TIME_RESULT:
append_unescaped(str, ptr->ptr(), ptr->length());
break;
case DECIMAL_RESULT:
@@ -602,7 +636,6 @@ void Item::print_value(String *str)
str->append(*ptr);
break;
case ROW_RESULT:
- case TIME_RESULT:
DBUG_ASSERT(0);
}
}
@@ -628,7 +661,7 @@ void Item::cleanup()
@param arg a dummy parameter, is not used here
*/
-bool Item::cleanup_processor(uchar *arg)
+bool Item::cleanup_processor(void *arg)
{
if (fixed)
cleanup();
@@ -653,48 +686,6 @@ void Item::rename(char *new_name)
name= new_name;
}
-Item_result Item::cmp_type() const
-{
- switch (field_type()) {
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_NEWDECIMAL:
- return DECIMAL_RESULT;
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_BIT:
- return INT_RESULT;
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- return REAL_RESULT;
- case MYSQL_TYPE_NULL:
- case MYSQL_TYPE_VARCHAR:
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_STRING:
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- case MYSQL_TYPE_GEOMETRY:
- return STRING_RESULT;
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_TIMESTAMP2:
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_TIME2:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_DATETIME2:
- case MYSQL_TYPE_NEWDATE:
- return TIME_RESULT;
- };
- DBUG_ASSERT(0);
- return STRING_RESULT;
-}
/**
Traverse item tree possibly transforming it (replacing items).
@@ -833,7 +824,7 @@ void Item_ident::cleanup()
DBUG_VOID_RETURN;
}
-bool Item_ident::remove_dependence_processor(uchar * arg)
+bool Item_ident::remove_dependence_processor(void * arg)
{
DBUG_ENTER("Item_ident::remove_dependence_processor");
if (get_depended_from() == (st_select_lex *) arg)
@@ -843,7 +834,7 @@ bool Item_ident::remove_dependence_processor(uchar * arg)
}
-bool Item_ident::collect_outer_ref_processor(uchar *param)
+bool Item_ident::collect_outer_ref_processor(void *param)
{
Collect_deps_prm *prm= (Collect_deps_prm *)param;
if (depended_from &&
@@ -877,7 +868,7 @@ bool Item_ident::collect_outer_ref_processor(uchar *param)
for the subsequent items.
*/
-bool Item_field::collect_item_field_processor(uchar *arg)
+bool Item_field::collect_item_field_processor(void *arg)
{
DBUG_ENTER("Item_field::collect_item_field_processor");
DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname"));
@@ -894,7 +885,7 @@ bool Item_field::collect_item_field_processor(uchar *arg)
}
-bool Item_field::add_field_to_set_processor(uchar *arg)
+bool Item_field::add_field_to_set_processor(void *arg)
{
DBUG_ENTER("Item_field::add_field_to_set_processor");
DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname"));
@@ -904,6 +895,34 @@ bool Item_field::add_field_to_set_processor(uchar *arg)
DBUG_RETURN(FALSE);
}
+
+/**
+ Rename fields in an expression to new field name as speficied by ALTER TABLE
+*/
+
+bool Item_field::rename_fields_processor(void *arg)
+{
+ Item::func_processor_rename *rename= (Item::func_processor_rename*) arg;
+ List_iterator<Create_field> def_it(rename->fields);
+ Create_field *def;
+
+ while ((def=def_it++))
+ {
+ if (def->change &&
+ (!db_name || !db_name[0] ||
+ !my_strcasecmp(table_alias_charset, db_name, rename->db_name.str)) &&
+ (!table_name || !table_name[0] ||
+ !my_strcasecmp(table_alias_charset, table_name, rename->table_name.str)) &&
+ !my_strcasecmp(system_charset_info, field_name, def->change))
+ {
+ field_name= def->field_name;
+ break;
+ }
+ }
+ return 0;
+}
+
+
/**
Check if an Item_field references some field from a list of fields.
@@ -920,7 +939,7 @@ bool Item_field::add_field_to_set_processor(uchar *arg)
FALSE otherwise
*/
-bool Item_field::find_item_in_field_list_processor(uchar *arg)
+bool Item_field::find_item_in_field_list_processor(void *arg)
{
KEY_PART_INFO *first_non_group_part= *((KEY_PART_INFO **) arg);
KEY_PART_INFO *last_part= *(((KEY_PART_INFO **) arg) + 1);
@@ -940,18 +959,21 @@ bool Item_field::find_item_in_field_list_processor(uchar *arg)
NOTES
This is used by filesort to register used fields in a a temporary
- column read set or to register used fields in a view
+ column read set or to register used fields in a view or check constraint
*/
-bool Item_field::register_field_in_read_map(uchar *arg)
+bool Item_field::register_field_in_read_map(void *arg)
{
TABLE *table= (TABLE *) arg;
+ int res= 0;
+ if (field->vcol_info &&
+ !bitmap_fast_test_and_set(field->table->vcol_set, field->field_index))
+ {
+ res= field->vcol_info->expr->walk(&Item::register_field_in_read_map,1,arg);
+ }
if (field->table == table || !table)
bitmap_set_bit(field->table->read_set, field->field_index);
- if (field->vcol_info && field->vcol_info->expr_item)
- return field->vcol_info->expr_item->walk(&Item::register_field_in_read_map,
- 1, arg);
- return 0;
+ return res;
}
/*
@@ -959,7 +981,7 @@ bool Item_field::register_field_in_read_map(uchar *arg)
Mark field in bitmap supplied as *arg
*/
-bool Item_field::register_field_in_bitmap(uchar *arg)
+bool Item_field::register_field_in_bitmap(void *arg)
{
MY_BITMAP *bitmap= (MY_BITMAP *) arg;
DBUG_ASSERT(bitmap);
@@ -975,7 +997,7 @@ bool Item_field::register_field_in_bitmap(uchar *arg)
This is used by UPDATE to register underlying fields of used view fields.
*/
-bool Item_field::register_field_in_write_map(uchar *arg)
+bool Item_field::register_field_in_write_map(void *arg)
{
TABLE *table= (TABLE *) arg;
if (field->table == table || !table)
@@ -983,6 +1005,64 @@ bool Item_field::register_field_in_write_map(uchar *arg)
return 0;
}
+/**
+ Check that we are not referring to any not yet initialized fields
+
+ Fields are initialized in this order:
+ - All fields that have default value as a constant are initialized first.
+ - Then user-specified values from the INSERT list
+ - Then all fields that has a default expression, in field_index order.
+ - Then all virtual fields, in field_index order.
+ - Then auto-increment values
+
+ This means:
+ - For default fields we can't access the same field or a field after
+ itself that doesn't have a non-constant default value.
+ - A virtual field can't access itself or a virtual field after itself.
+ - user-specified values will not see virtual fields or default expressions,
+ as in INSERT t1 (a) VALUES (b);
+ - no virtual fields can access auto-increment values
+
+ This is used by fix_vcol_expr() when a table is opened
+
+ We don't have to check fields that are marked as NO_DEFAULT_VALUE
+ as the upper level will ensure that all these will be given a value.
+*/
+
+bool Item_field::check_field_expression_processor(void *arg)
+{
+ Field *org_field= (Field*) arg;
+ if (field->flags & NO_DEFAULT_VALUE_FLAG)
+ return 0;
+ if ((field->default_value && field->default_value->flags) || field->vcol_info)
+ {
+ if (field == org_field ||
+ (!org_field->vcol_info && field->vcol_info) ||
+ (((field->vcol_info && org_field->vcol_info) ||
+ (!field->vcol_info && !org_field->vcol_info)) &&
+ field->field_index >= org_field->field_index))
+ {
+ my_error(ER_EXPRESSION_REFERS_TO_UNINIT_FIELD,
+ MYF(0),
+ org_field->field_name, field->field_name);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+bool Item_field::update_vcol_processor(void *arg)
+{
+ MY_BITMAP *map= (MY_BITMAP *) arg;
+ if (field->vcol_info &&
+ !bitmap_fast_test_and_set(map, field->field_index))
+ {
+ field->vcol_info->expr->walk(&Item::update_vcol_processor, 0, arg);
+ field->vcol_info->expr->save_in_field(field, 0);
+ }
+ return 0;
+}
+
bool Item::check_cols(uint c)
{
@@ -995,7 +1075,7 @@ bool Item::check_cols(uint c)
}
-void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
+void Item::set_name(THD *thd, const char *str, uint length, CHARSET_INFO *cs)
{
if (!length)
{
@@ -1009,7 +1089,7 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
if (!cs->ctype || cs->mbminlen > 1)
{
str+= cs->cset->scan(cs, str, str + length, MY_SEQ_SPACES);
- length-= str - str_start;
+ length-= (uint)(str - str_start);
}
else
{
@@ -1026,7 +1106,6 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
if (str != str_start && !is_autogenerated_name)
{
char buff[SAFE_NAME_LEN];
- THD *thd= current_thd;
strmake(buff, str_start,
MY_MIN(sizeof(buff)-1, length + (int) (str-str_start)));
@@ -1044,28 +1123,29 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
if (!my_charset_same(cs, system_charset_info))
{
size_t res_length;
- name= sql_strmake_with_convert(str, length, cs,
+ name= sql_strmake_with_convert(thd, str, length, cs,
MAX_ALIAS_NAME, system_charset_info,
&res_length);
name_length= res_length;
}
else
- name= sql_strmake(str, (name_length= MY_MIN(length,MAX_ALIAS_NAME)));
+ name= thd->strmake(str, (name_length= MY_MIN(length,MAX_ALIAS_NAME)));
}
-void Item::set_name_no_truncate(const char *str, uint length, CHARSET_INFO *cs)
+void Item::set_name_no_truncate(THD *thd, const char *str, uint length,
+ CHARSET_INFO *cs)
{
if (!my_charset_same(cs, system_charset_info))
{
size_t res_length;
- name= sql_strmake_with_convert(str, length, cs,
+ name= sql_strmake_with_convert(thd, str, length, cs,
UINT_MAX, system_charset_info,
&res_length);
name_length= res_length;
}
else
- name= sql_strmake(str, (name_length= length));
+ name= thd->strmake(str, (name_length= length));
}
@@ -1074,7 +1154,7 @@ void Item::set_name_for_rollback(THD *thd, const char *str, uint length,
{
char *old_name, *new_name;
old_name= name;
- set_name(str, length, cs);
+ set_name(thd, str, length, cs);
new_name= name;
if (old_name != new_name)
{
@@ -1132,6 +1212,7 @@ Item *Item::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
TODO: we should eventually check all other use cases of change_item_tree().
Perhaps some more potentially dangerous substitution examples exist.
*/
+
Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
{
if (!example)
@@ -1160,6 +1241,7 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
the latter returns a non-fixed Item, so val_str() crashes afterwards.
Override Item_num method, to return a fixed item.
*/
+
Item *Item_num::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
{
/*
@@ -1285,6 +1367,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
}
if (null_value || int_to_datetime_with_warn(neg, neg ? -value : value,
ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
return null_value= false;
@@ -1293,6 +1376,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
{
double value= val_real();
if (null_value || double_to_datetime_with_warn(value, ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
return null_value= false;
@@ -1302,6 +1386,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
my_decimal value, *res;
if (!(res= val_decimal(&value)) ||
decimal_to_datetime_with_warn(res, ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
return null_value= false;
@@ -1389,7 +1474,7 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
THD *thd= table->in_use;
enum_check_fields tmp= thd->count_cuted_fields;
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
- ulonglong sql_mode= thd->variables.sql_mode;
+ sql_mode_t sql_mode= thd->variables.sql_mode;
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
@@ -1402,6 +1487,43 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
return res;
}
+#ifndef DBUG_OFF
+static inline
+void mark_unsupported_func(const char *where, const char *processor_name)
+{
+ char buff[64];
+ my_snprintf(buff, sizeof(buff), "%s::%s", where ? where: "", processor_name);
+ DBUG_ENTER(buff);
+ my_snprintf(buff, sizeof(buff), "%s returns TRUE: unsupported function", processor_name);
+ DBUG_PRINT("info", ("%s", buff));
+ DBUG_VOID_RETURN;
+}
+#else
+#define mark_unsupported_func(X,Y) {}
+#endif
+
+bool mark_unsupported_function(const char *where, void *store, uint result)
+{
+ Item::vcol_func_processor_result *res=
+ (Item::vcol_func_processor_result*) store;
+ uint old_errors= res->errors;
+ mark_unsupported_func(where, "check_vcol_func_processor");
+ res->errors|= result; /* Store type of expression */
+ /* Store the name to the highest violation (normally VCOL_IMPOSSIBLE) */
+ if (result > old_errors)
+ res->name= where ? where : "";
+ return false;
+}
+
+/* convenience helper for mark_unsupported_function() above */
+bool mark_unsupported_function(const char *w1, const char *w2,
+ void *store, uint result)
+{
+ char *ptr= (char*)current_thd->alloc(strlen(w1) + strlen(w2) + 1);
+ if (ptr)
+ strxmov(ptr, w1, w2, NullS);
+ return mark_unsupported_function(ptr, store, result);
+}
/*****************************************************************************
Item_sp_variable methods
@@ -1432,7 +1554,7 @@ bool Item_sp_variable::fix_fields(THD *thd, Item **)
decimals= it->decimals;
unsigned_flag= it->unsigned_flag;
with_param= 1;
- if (thd->lex->current_select->master_unit()->item)
+ if (thd->lex->current_select && thd->lex->current_select->master_unit()->item)
thd->lex->current_select->master_unit()->item->with_param= 1;
fixed= 1;
collation.set(it->collation.collation, it->collation.derivation);
@@ -1529,8 +1651,7 @@ Item_splocal::Item_splocal(THD *thd, const LEX_STRING &sp_var_name,
sp_var_type= real_type_to_type(sp_var_type);
m_type= sp_map_item_type(sp_var_type);
- m_field_type= sp_var_type;
- m_result_type= sp_map_result_type(sp_var_type);
+ set_handler_by_field_type(sp_var_type);
}
@@ -1679,7 +1800,7 @@ Item_name_const::Item_name_const(THD *thd, Item *name_arg, Item *val):
if (!name_item->basic_const_item() ||
!(name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name
goto err;
- set_name(name_str->ptr(), name_str->length(), name_str->charset());
+ set_name(thd, name_str->ptr(), name_str->length(), name_str->charset());
if (value_item->basic_const_item())
return; // ok
@@ -1741,8 +1862,10 @@ Item::Type Item_name_const::type() const
bool Item_name_const::fix_fields(THD *thd, Item **ref)
{
- if (value_item->fix_fields(thd, &value_item) ||
- name_item->fix_fields(thd, &name_item) ||
+ if ((!value_item->fixed &&
+ value_item->fix_fields(thd, &value_item)) ||
+ (!name_item->fixed &&
+ name_item->fix_fields(thd, &name_item)) ||
!value_item->const_item() ||
!name_item->const_item())
{
@@ -1823,7 +1946,7 @@ public:
thd->fatal_error() may be called if we are out of memory
*/
-void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
+void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, Item **ref,
uint split_flags)
{
@@ -1834,6 +1957,21 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
((Item_sum *) this)->ref_by)
return;
}
+ else if (type() == WINDOW_FUNC_ITEM || with_window_func)
+ {
+ /*
+ Skip the else part, window functions are very special functions:
+ they need to have their own fields in the temp. table, but they
+ need to be proceessed differently than regular aggregate functions
+
+ Call split_sum_func here so that each argument gets its fields to
+ point to the temporary table.
+ */
+ split_sum_func(thd, ref_pointer_array, fields, split_flags);
+ if (type() == FUNC_ITEM) {
+ return;
+ }
+ }
else
{
/* Not a SUM() function */
@@ -1858,7 +1996,6 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
}
if (unlikely((!(used_tables() & ~PARAM_TABLE_BIT) ||
- type() == SUBSELECT_ITEM ||
(type() == REF_ITEM &&
((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF))))
return;
@@ -1874,7 +2011,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
Exception is Item_direct_view_ref which we need to convert to
Item_ref to allow fields from view being stored in tmp table.
*/
- Item_aggregate_ref *item_ref;
+ Item_ref *item_ref;
uint el= fields.elements;
/*
If this is an item_ref, get the original item
@@ -1884,13 +2021,24 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
Item *real_itm= real_item();
ref_pointer_array[el]= real_itm;
- if (!(item_ref= (new (thd->mem_root)
- Item_aggregate_ref(thd,
- &thd->lex->current_select->context,
- ref_pointer_array + el, 0, name))))
- return; // fatal_error is set
+ if (type() == WINDOW_FUNC_ITEM)
+ {
+ if (!(item_ref= (new (thd->mem_root)
+ Item_direct_ref(thd,
+ &thd->lex->current_select->context,
+ &ref_pointer_array[el], 0, name))))
+ return; // fatal_error is set
+ }
+ else
+ {
+ if (!(item_ref= (new (thd->mem_root)
+ Item_aggregate_ref(thd,
+ &thd->lex->current_select->context,
+ &ref_pointer_array[el], 0, name))))
+ return; // fatal_error is set
+ }
if (type() == SUM_FUNC_ITEM)
- item_ref->depended_from= ((Item_sum *) this)->depended_from();
+ item_ref->depended_from= ((Item_sum *) this)->depended_from();
fields.push_front(real_itm);
thd->change_item_tree(ref, item_ref);
}
@@ -2035,6 +2183,9 @@ bool DTCollation::aggregate(const DTCollation &dt, uint flags)
set(0, DERIVATION_NONE, 0);
return 1;
}
+ if (collation->state & MY_CS_BINSORT &&
+ dt.collation->state & MY_CS_BINSORT)
+ return 1;
if (collation->state & MY_CS_BINSORT)
return 0;
if (dt.collation->state & MY_CS_BINSORT)
@@ -2217,7 +2368,84 @@ bool Item_func_or_sum::agg_item_set_converter(const DTCollation &coll,
}
-void Item_ident_for_show::make_field(Send_field *tmp_field)
+/**
+ @brief
+ Building clone for Item_func_or_sum
+
+ @param thd thread handle
+ @param mem_root part of the memory for the clone
+
+ @details
+ This method gets copy of the current item and also
+ build clones for its referencies. For the referencies
+ build_copy is called again.
+
+ @retval
+ clone of the item
+ 0 if an error occured
+*/
+
+Item* Item_func_or_sum::build_clone(THD *thd, MEM_ROOT *mem_root)
+{
+ Item_func_or_sum *copy= (Item_func_or_sum *) get_copy(thd, mem_root);
+ if (!copy)
+ return 0;
+ if (arg_count > 2)
+ {
+ copy->args=
+ (Item**) alloc_root(mem_root, sizeof(Item*) * arg_count);
+ if (!copy->args)
+ return 0;
+ }
+ else if (arg_count > 0)
+ copy->args= copy->tmp_arg;
+
+
+ for (uint i= 0; i < arg_count; i++)
+ {
+ Item *arg_clone= args[i]->build_clone(thd, mem_root);
+ if (!arg_clone)
+ return 0;
+ copy->args[i]= arg_clone;
+ }
+ return copy;
+}
+
+
+/**
+ @brief
+ Building clone for Item_ref
+
+ @param thd thread handle
+ @param mem_root part of the memory for the clone
+
+ @details
+ This method gets copy of the current item and also
+ builds clone for its reference.
+
+ @retval
+ clone of the item
+ 0 if an error occured
+*/
+
+Item* Item_ref::build_clone(THD *thd, MEM_ROOT *mem_root)
+{
+ Item_ref *copy= (Item_ref *) get_copy(thd, mem_root);
+ if (!copy)
+ return 0;
+ copy->ref=
+ (Item**) alloc_root(mem_root, sizeof(Item*));
+ if (!copy->ref)
+ return 0;
+ Item *item_clone= (* ref)->build_clone(thd, mem_root);
+ if (!item_clone)
+ return 0;
+ *copy->ref= item_clone;
+ return copy;
+}
+
+
+void Item_ident_for_show::make_field(THD *thd, Send_field *tmp_field)
{
tmp_field->table_name= tmp_field->org_table_name= table_name;
tmp_field->db_name= db_name;
@@ -2416,14 +2644,39 @@ void Item_field::reset_field(Field *f)
}
-bool Item_field::enumerate_field_refs_processor(uchar *arg)
+void Item_field::load_data_print_for_log_event(THD *thd, String *to) const
+{
+ append_identifier(thd, to, name, (uint) strlen(name));
+}
+
+
+bool Item_field::load_data_set_no_data(THD *thd, const Load_data_param *param)
+{
+ if (field->load_data_set_no_data(thd, param->is_fixed_length()))
+ return true;
+ /*
+ TODO: We probably should not throw warning for each field.
+ But how about intention to always have the same number
+ of warnings in THD::cuted_fields (and get rid of cuted_fields
+ in the end ?)
+ */
+ thd->cuted_fields++;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_TOO_FEW_RECORDS,
+ ER_THD(thd, ER_WARN_TOO_FEW_RECORDS),
+ thd->get_stmt_da()->current_row_for_warning());
+ return false;
+}
+
+
+bool Item_field::enumerate_field_refs_processor(void *arg)
{
Field_enumerator *fe= (Field_enumerator*)arg;
fe->visit_field(this);
return FALSE;
}
-bool Item_field::update_table_bitmaps_processor(uchar *arg)
+bool Item_field::update_table_bitmaps_processor(void *arg)
{
update_table_bitmaps();
return FALSE;
@@ -2439,7 +2692,7 @@ static inline void set_field_to_new_field(Field **field, Field **new_field)
}
}
-bool Item_field::switch_to_nullable_fields_processor(uchar *arg)
+bool Item_field::switch_to_nullable_fields_processor(void *arg)
{
Field **new_fields= (Field **)arg;
set_field_to_new_field(&field, new_fields);
@@ -2481,16 +2734,49 @@ void Item_ident::print(String *str, enum_query_type query_type)
THD *thd= current_thd;
char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME];
const char *d_name= db_name, *t_name= table_name;
+ bool use_table_name= table_name && table_name[0];
+ bool use_db_name= use_table_name && db_name && db_name[0] && !alias_name_used;
+
+ if (use_db_name && (query_type & QT_ITEM_IDENT_SKIP_DB_NAMES))
+ use_db_name= !thd->db || strcmp(thd->db, db_name);
+
+ if (use_db_name)
+ use_db_name= !(cached_table && cached_table->belong_to_view &&
+ cached_table->belong_to_view->compact_view_format);
+
+ if (use_table_name && (query_type & QT_ITEM_IDENT_SKIP_TABLE_NAMES))
+ {
+ /*
+ Don't print the table name if it's the only table in the context
+ XXX technically, that's a sufficient, but too strong condition
+ */
+ if (!context)
+ use_db_name= use_table_name= false;
+ else if (context->outer_context)
+ use_table_name= true;
+ else if (context->last_name_resolution_table == context->first_name_resolution_table)
+ use_db_name= use_table_name= false;
+ else if (!context->last_name_resolution_table &&
+ !context->first_name_resolution_table->next_name_resolution_table)
+ use_db_name= use_table_name= false;
+ }
+
+ if (!field_name || !field_name[0])
+ {
+ append_identifier(thd, str, STRING_WITH_LEN("tmp_field"));
+ return;
+ }
+
if (lower_case_table_names== 1 ||
(lower_case_table_names == 2 && !alias_name_used))
{
- if (table_name && table_name[0])
+ if (use_table_name)
{
strmov(t_name_buff, table_name);
my_casedn_str(files_charset_info, t_name_buff);
t_name= t_name_buff;
}
- if (db_name && db_name[0])
+ if (use_db_name)
{
strmov(d_name_buff, db_name);
my_casedn_str(files_charset_info, d_name_buff);
@@ -2498,43 +2784,18 @@ void Item_ident::print(String *str, enum_query_type query_type)
}
}
- if (!table_name || !field_name || !field_name[0])
+ if (use_db_name)
{
- const char *nm= (field_name && field_name[0]) ?
- field_name : name ? name : "tmp_field";
- append_identifier(thd, str, nm, (uint) strlen(nm));
- return;
- }
- if (db_name && db_name[0] && !alias_name_used)
- {
- /*
- When printing EXPLAIN, don't print database name when it's the same as
- current database.
- */
- bool skip_db= (query_type & QT_ITEM_IDENT_SKIP_CURRENT_DATABASE) &&
- thd->db && !strcmp(thd->db, db_name);
- if (!skip_db &&
- !(cached_table && cached_table->belong_to_view &&
- cached_table->belong_to_view->compact_view_format))
- {
- append_identifier(thd, str, d_name, (uint)strlen(d_name));
- str->append('.');
- }
- append_identifier(thd, str, t_name, (uint)strlen(t_name));
+ append_identifier(thd, str, d_name, (uint)strlen(d_name));
str->append('.');
- append_identifier(thd, str, field_name, (uint)strlen(field_name));
+ DBUG_ASSERT(use_table_name);
}
- else
+ if (use_table_name)
{
- if (table_name[0])
- {
- append_identifier(thd, str, t_name, (uint) strlen(t_name));
- str->append('.');
- append_identifier(thd, str, field_name, (uint) strlen(field_name));
- }
- else
- append_identifier(thd, str, field_name, (uint) strlen(field_name));
+ append_identifier(thd, str, t_name, (uint) strlen(t_name));
+ str->append('.');
}
+ append_identifier(thd, str, field_name, (uint) strlen(field_name));
}
/* ARGSUSED */
@@ -2834,6 +3095,14 @@ void Item_int::print(String *str, enum_query_type query_type)
}
+Item *Item_bool::neg_transformer(THD *thd)
+{
+ value= !value;
+ name= 0;
+ return this;
+}
+
+
Item_uint::Item_uint(THD *thd, const char *str_arg, uint length):
Item_int(thd, str_arg, length)
{
@@ -3169,13 +3438,22 @@ default_set_param_func(Item_param *param,
Item_param::Item_param(THD *thd, uint pos_in_query_arg):
Item_basic_value(thd),
Rewritable_query_parameter(pos_in_query_arg, 1),
+ Type_handler_hybrid_field_type(MYSQL_TYPE_VARCHAR),
state(NO_VALUE),
- item_result_type(STRING_RESULT),
/* Don't pretend to be a literal unless value for this item is set. */
item_type(PARAM_ITEM),
- param_type(MYSQL_TYPE_VARCHAR),
+ indicator(STMT_INDICATOR_NONE),
set_param_func(default_set_param_func),
- m_out_param_info(NULL)
+ m_out_param_info(NULL),
+ /*
+ Set m_is_settable_routine_parameter to "true" by default.
+ This is needed for client-server protocol,
+ whose parameters are always settable.
+ For dynamic SQL, settability depends on the type of Item passed
+ as an actual parameter. See Item_param::set_from_item().
+ */
+ m_is_settable_routine_parameter(true),
+ m_clones(thd->mem_root)
{
name= (char*) "?";
/*
@@ -3187,10 +3465,63 @@ Item_param::Item_param(THD *thd, uint pos_in_query_arg):
}
+/* Add reference to Item_param used in a copy of CTE to its master as a clone */
+
+bool Item_param::add_as_clone(THD *thd)
+{
+ LEX *lex= thd->lex;
+ uint master_pos= pos_in_query + lex->clone_spec_offset;
+ List_iterator_fast<Item_param> it(lex->param_list);
+ Item_param *master_param;
+ while ((master_param = it++))
+ {
+ if (master_pos == master_param->pos_in_query)
+ return master_param->register_clone(this);
+ }
+ DBUG_ASSERT(false);
+ return false;
+}
+
+
+/* Update all clones of Item_param to sync their values with the item's value */
+
+void Item_param::sync_clones()
+{
+ Item_param **c_ptr= m_clones.begin();
+ Item_param **end= m_clones.end();
+ for ( ; c_ptr < end; c_ptr++)
+ {
+ Item_param *c= *c_ptr;
+ /* Scalar-type members: */
+ c->maybe_null= maybe_null;
+ c->null_value= null_value;
+ c->max_length= max_length;
+ c->decimals= decimals;
+ c->state= state;
+ c->item_type= item_type;
+ c->set_param_func= set_param_func;
+ c->value= value;
+ c->unsigned_flag= unsigned_flag;
+ /* Class-type members: */
+ c->decimal_value= decimal_value;
+ /*
+ Note that String's assignment op properly sets m_is_alloced to 'false',
+ which is correct here: c->str_value doesn't own anything.
+ */
+ c->str_value= str_value;
+ c->str_value_ptr= str_value_ptr;
+ c->collation= collation;
+ }
+}
+
+
void Item_param::set_null()
{
DBUG_ENTER("Item_param::set_null");
- /* These are cleared after each execution by reset() method */
+ /*
+ These are cleared after each execution by reset() method or by setting
+ other value.
+ */
null_value= 1;
/*
Because of NULL and string values we need to set max_length for each new
@@ -3200,7 +3531,7 @@ void Item_param::set_null()
max_length= 0;
decimals= 0;
state= NULL_VALUE;
- item_type= Item::NULL_ITEM;
+ fix_type(Item::NULL_ITEM);
DBUG_VOID_RETURN;
}
@@ -3209,9 +3540,12 @@ void Item_param::set_int(longlong i, uint32 max_length_arg)
DBUG_ENTER("Item_param::set_int");
value.integer= (longlong) i;
state= INT_VALUE;
+ collation.set_numeric();
max_length= max_length_arg;
decimals= 0;
maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::INT_ITEM);
DBUG_VOID_RETURN;
}
@@ -3220,9 +3554,12 @@ void Item_param::set_double(double d)
DBUG_ENTER("Item_param::set_double");
value.real= d;
state= REAL_VALUE;
+ collation.set_numeric();
max_length= DBL_DIG + 8;
decimals= NOT_FIXED_DEC;
maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::REAL_ITEM);
DBUG_VOID_RETURN;
}
@@ -3248,25 +3585,55 @@ void Item_param::set_decimal(const char *str, ulong length)
str2my_decimal(E_DEC_FATAL_ERROR, str, &decimal_value, &end);
state= DECIMAL_VALUE;
decimals= decimal_value.frac;
+ collation.set_numeric();
max_length=
my_decimal_precision_to_length_no_truncation(decimal_value.precision(),
decimals, unsigned_flag);
maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::DECIMAL_ITEM);
DBUG_VOID_RETURN;
}
-void Item_param::set_decimal(const my_decimal *dv)
+void Item_param::set_decimal(const my_decimal *dv, bool unsigned_arg)
{
state= DECIMAL_VALUE;
my_decimal2decimal(dv, &decimal_value);
decimals= (uint8) decimal_value.frac;
- unsigned_flag= !decimal_value.sign();
+ collation.set_numeric();
+ unsigned_flag= unsigned_arg;
max_length= my_decimal_precision_to_length(decimal_value.intg + decimals,
decimals, unsigned_flag);
+ maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::DECIMAL_ITEM);
}
+
+void Item_param::fix_temporal(uint32 max_length_arg, uint decimals_arg)
+{
+ state= TIME_VALUE;
+ collation.set_numeric();
+ max_length= max_length_arg;
+ decimals= decimals_arg;
+ maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::DATE_ITEM);
+}
+
+
+void Item_param::set_time(const MYSQL_TIME *tm,
+ uint32 max_length_arg, uint decimals_arg)
+{
+ value.time= *tm;
+ maybe_null= 0;
+ null_value= 0;
+ fix_temporal(max_length_arg, decimals_arg);
+}
+
+
/**
Set parameter value from MYSQL_TIME value.
@@ -3292,14 +3659,13 @@ void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type,
{
ErrConvTime str(&value.time);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, time_type, 0);
- set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR);
+ &str, time_type, 0, 0);
+ set_zero_time(&value.time, time_type);
}
-
- state= TIME_VALUE;
maybe_null= 0;
- max_length= max_length_arg;
- decimals= tm->second_part > 0 ? TIME_SECOND_PART_DIGITS : 0;
+ null_value= 0;
+ fix_temporal(max_length_arg,
+ tm->second_part > 0 ? TIME_SECOND_PART_DIGITS : 0);
DBUG_VOID_RETURN;
}
@@ -3318,8 +3684,10 @@ bool Item_param::set_str(const char *str, ulong length)
state= STRING_VALUE;
max_length= length;
maybe_null= 0;
+ null_value= 0;
/* max_length and decimals are set after charset conversion */
/* sic: str may be not null-terminated, don't add DBUG_PRINT here */
+ fix_type(Item::STRING_ITEM);
DBUG_RETURN(FALSE);
}
@@ -3351,16 +3719,44 @@ bool Item_param::set_longdata(const char *str, ulong length)
DBUG_RETURN(TRUE);
state= LONG_DATA_VALUE;
maybe_null= 0;
+ null_value= 0;
+ fix_type(Item::STRING_ITEM);
DBUG_RETURN(FALSE);
}
+void Item_param::CONVERSION_INFO::set(THD *thd, CHARSET_INFO *fromcs)
+{
+ CHARSET_INFO *tocs= thd->variables.collation_connection;
+
+ character_set_of_placeholder= fromcs;
+ character_set_client= thd->variables.character_set_client;
+ /*
+ Setup source and destination character sets so that they
+ are different only if conversion is necessary: this will
+ make later checks easier.
+ */
+ uint32 dummy_offset;
+ final_character_set_of_str_value=
+ String::needs_conversion(0, fromcs, tocs, &dummy_offset) ?
+ tocs : fromcs;
+}
+
+
+bool Item_param::CONVERSION_INFO::convert(THD *thd, String *str)
+{
+ return thd->convert_string(str,
+ character_set_of_placeholder,
+ final_character_set_of_str_value);
+}
+
+
/**
- Set parameter value from user variable value.
+ Set parameter value from Item.
@param thd Current thread
- @param entry User variable structure (NULL means use NULL value)
+ @param item Item
@retval
0 OK
@@ -3368,73 +3764,65 @@ bool Item_param::set_longdata(const char *str, ulong length)
1 Out of memory
*/
-bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry)
+bool Item_param::set_from_item(THD *thd, Item *item)
{
- DBUG_ENTER("Item_param::set_from_user_var");
- if (entry && entry->value)
+ DBUG_ENTER("Item_param::set_from_item");
+ m_is_settable_routine_parameter= item->get_settable_routine_parameter();
+ if (limit_clause_param)
{
- item_result_type= entry->type;
- unsigned_flag= entry->unsigned_flag;
- if (limit_clause_param)
+ longlong val= item->val_int();
+ if (item->null_value)
+ {
+ set_null();
+ DBUG_RETURN(false);
+ }
+ else
{
- bool unused;
- set_int(entry->val_int(&unused), MY_INT64_NUM_DECIMAL_DIGITS);
- item_type= Item::INT_ITEM;
+ unsigned_flag= item->unsigned_flag;
+ set_int(val, MY_INT64_NUM_DECIMAL_DIGITS);
+ set_handler_by_result_type(item->result_type());
DBUG_RETURN(!unsigned_flag && value.integer < 0 ? 1 : 0);
}
- switch (item_result_type) {
+ }
+ struct st_value tmp;
+ if (!item->store(&tmp, 0))
+ {
+ unsigned_flag= item->unsigned_flag;
+ switch (item->cmp_type()) {
case REAL_RESULT:
- set_double(*(double*)entry->value);
- item_type= Item::REAL_ITEM;
- param_type= MYSQL_TYPE_DOUBLE;
+ set_double(tmp.value.m_double);
+ set_handler_by_field_type(MYSQL_TYPE_DOUBLE);
break;
case INT_RESULT:
- set_int(*(longlong*)entry->value, MY_INT64_NUM_DECIMAL_DIGITS);
- item_type= Item::INT_ITEM;
- param_type= MYSQL_TYPE_LONGLONG;
+ set_int(tmp.value.m_longlong, MY_INT64_NUM_DECIMAL_DIGITS);
+ set_handler_by_field_type(MYSQL_TYPE_LONGLONG);
break;
case STRING_RESULT:
{
- CHARSET_INFO *fromcs= entry->charset();
- CHARSET_INFO *tocs= thd->variables.collation_connection;
- uint32 dummy_offset;
-
- value.cs_info.character_set_of_placeholder= fromcs;
- value.cs_info.character_set_client= thd->variables.character_set_client;
- /*
- Setup source and destination character sets so that they
- are different only if conversion is necessary: this will
- make later checks easier.
- */
- value.cs_info.final_character_set_of_str_value=
- String::needs_conversion(0, fromcs, tocs, &dummy_offset) ?
- tocs : fromcs;
+ value.cs_info.set(thd, item->collation.collation);
/*
Exact value of max_length is not known unless data is converted to
charset of connection, so we have to set it later.
*/
- item_type= Item::STRING_ITEM;
- param_type= MYSQL_TYPE_VARCHAR;
+ set_handler_by_field_type(MYSQL_TYPE_VARCHAR);
- if (set_str((const char *)entry->value, entry->length))
+ if (set_str(tmp.m_string.ptr(), tmp.m_string.length()))
DBUG_RETURN(1);
break;
}
case DECIMAL_RESULT:
{
- const my_decimal *ent_value= (const my_decimal *)entry->value;
- my_decimal2decimal(ent_value, &decimal_value);
- state= DECIMAL_VALUE;
- decimals= ent_value->frac;
- max_length=
- my_decimal_precision_to_length_no_truncation(ent_value->precision(),
- decimals, unsigned_flag);
- item_type= Item::DECIMAL_ITEM;
- param_type= MYSQL_TYPE_NEWDECIMAL;
+ set_decimal(&tmp.m_decimal, unsigned_flag);
+ set_handler_by_field_type(MYSQL_TYPE_NEWDECIMAL);
break;
}
- case ROW_RESULT:
case TIME_RESULT:
+ {
+ set_time(&tmp.value.m_time, item->max_length, item->decimals);
+ set_handler(item->type_handler());
+ break;
+ }
+ case ROW_RESULT:
DBUG_ASSERT(0);
set_null();
}
@@ -3471,6 +3859,7 @@ void Item_param::reset()
state= NO_VALUE;
maybe_null= 1;
null_value= 0;
+ fixed= false;
/*
Don't reset item_type to PARAM_ITEM: it's only needed to guard
us from item optimizations at prepare stage, when item doesn't yet
@@ -3488,6 +3877,11 @@ int Item_param::save_in_field(Field *field, bool no_conversions)
{
field->set_notnull();
+ /*
+ There's no "default" intentionally, to make compiler complain
+ when adding a new XXX_VALUE value.
+ Garbage (e.g. in case of a memory overrun) is handled after the switch.
+ */
switch (state) {
case INT_VALUE:
return field->store(value.integer, unsigned_flag);
@@ -3504,14 +3898,30 @@ int Item_param::save_in_field(Field *field, bool no_conversions)
str_value.charset());
case NULL_VALUE:
return set_field_to_null_with_conversions(field, no_conversions);
+ case DEFAULT_VALUE:
+ return field->save_in_field_default_value(field->table->pos_in_table_list->
+ top_table() !=
+ field->table->pos_in_table_list);
+ case IGNORE_VALUE:
+ return field->save_in_field_ignore_value(field->table->pos_in_table_list->
+ top_table() !=
+ field->table->pos_in_table_list);
case NO_VALUE:
- default:
- DBUG_ASSERT(0);
+ DBUG_ASSERT(0); // Should not be possible
+ return true;
}
+ DBUG_ASSERT(0); // Garbage
return 1;
}
+void Item_param::invalid_default_param() const
+{
+ my_message(ER_INVALID_DEFAULT_PARAM,
+ ER_THD(current_thd, ER_INVALID_DEFAULT_PARAM), MYF(0));
+}
+
+
bool Item_param::get_date(MYSQL_TIME *res, ulonglong fuzzydate)
{
if (state == TIME_VALUE)
@@ -3525,6 +3935,7 @@ bool Item_param::get_date(MYSQL_TIME *res, ulonglong fuzzydate)
double Item_param::val_real()
{
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case REAL_VALUE:
return value.real;
@@ -3547,23 +3958,27 @@ double Item_param::val_real()
time value for the placeholder.
*/
return TIME_to_double(&value.time);
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ // fall through
case NULL_VALUE:
return 0.0;
- default:
- DBUG_ASSERT(0);
+ case NO_VALUE:
+ DBUG_ASSERT(0); // Should not be possible
+ return 0.0;
}
+ DBUG_ASSERT(0); // Garbage
return 0.0;
}
longlong Item_param::val_int()
{
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case REAL_VALUE:
- {
- bool error;
- return double_to_longlong(value.real, unsigned_flag, &error);
- }
+ return Converter_double_to_longlong(value.real, unsigned_flag).result();
case INT_VALUE:
return value.integer;
case DECIMAL_VALUE:
@@ -3579,17 +3994,24 @@ longlong Item_param::val_int()
}
case TIME_VALUE:
return (longlong) TIME_to_ulonglong(&value.time);
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ // fall through
case NULL_VALUE:
return 0;
- default:
- DBUG_ASSERT(0);
+ case NO_VALUE:
+ DBUG_ASSERT(0); // Should not be possible
+ return 0;
}
+ DBUG_ASSERT(0); // Garbage
return 0;
}
my_decimal *Item_param::val_decimal(my_decimal *dec)
{
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case DECIMAL_VALUE:
return &decimal_value;
@@ -3606,17 +4028,24 @@ my_decimal *Item_param::val_decimal(my_decimal *dec)
{
return TIME_to_my_decimal(&value.time, dec);
}
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ // fall through
case NULL_VALUE:
- return 0;
- default:
- DBUG_ASSERT(0);
+ return 0;
+ case NO_VALUE:
+ DBUG_ASSERT(0); // Should not be possible
+ return 0;
}
+ DBUG_ASSERT(0); // Gabrage
return 0;
}
String *Item_param::val_str(String* str)
{
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case STRING_VALUE:
case LONG_DATA_VALUE:
@@ -3641,12 +4070,18 @@ String *Item_param::val_str(String* str)
str->set_charset(&my_charset_bin);
return str;
}
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ // fall through
case NULL_VALUE:
return NULL;
- default:
- DBUG_ASSERT(0);
+ case NO_VALUE:
+ DBUG_ASSERT(0); // Should not be possible
+ return NULL;
}
- return str;
+ DBUG_ASSERT(0); // Garbage
+ return NULL;
}
/**
@@ -3662,37 +4097,54 @@ String *Item_param::val_str(String* str)
const String *Item_param::query_val_str(THD *thd, String* str) const
{
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
case INT_VALUE:
str->set_int(value.integer, unsigned_flag, &my_charset_bin);
- break;
+ return str;
case REAL_VALUE:
str->set_real(value.real, NOT_FIXED_DEC, &my_charset_bin);
- break;
+ return str;
case DECIMAL_VALUE:
if (my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value,
0, 0, 0, str) > 1)
return &my_null_string;
- break;
+ return str;
case TIME_VALUE:
{
+ static const uint32 typelen= 9; // "TIMESTAMP" is the longest type name
char *buf, *ptr;
str->length(0);
/*
TODO: in case of error we need to notify replication
that binary log contains wrong statement
*/
- if (str->reserve(MAX_DATE_STRING_REP_LENGTH+3))
+ if (str->reserve(MAX_DATE_STRING_REP_LENGTH + 3 + typelen))
break;
/* Create date string inplace */
+ switch (value.time.time_type) {
+ case MYSQL_TIMESTAMP_DATE:
+ str->append(C_STRING_WITH_LEN("DATE"));
+ break;
+ case MYSQL_TIMESTAMP_TIME:
+ str->append(C_STRING_WITH_LEN("TIME"));
+ break;
+ case MYSQL_TIMESTAMP_DATETIME:
+ str->append(C_STRING_WITH_LEN("TIMESTAMP"));
+ break;
+ case MYSQL_TIMESTAMP_ERROR:
+ case MYSQL_TIMESTAMP_NONE:
+ break;
+ }
+ DBUG_ASSERT(str->length() <= typelen);
buf= str->c_ptr_quick();
- ptr= buf;
+ ptr= buf + str->length();
*ptr++= '\'';
ptr+= (uint) my_TIME_to_str(&value.time, ptr, decimals);
*ptr++= '\'';
str->length((uint32) (ptr - buf));
- break;
+ return str;
}
case STRING_VALUE:
case LONG_DATA_VALUE:
@@ -3701,14 +4153,19 @@ const String *Item_param::query_val_str(THD *thd, String* str) const
append_query_string(value.cs_info.character_set_client, str,
str_value.ptr(), str_value.length(),
thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES);
- break;
+ return str;
}
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ return &my_default_string;
case NULL_VALUE:
return &my_null_string;
- default:
- DBUG_ASSERT(0);
+ case NO_VALUE:
+ DBUG_ASSERT(0); // Should not be possible
+ return NULL;
}
- return str;
+ DBUG_ASSERT(0); // Garbage
+ return NULL;
}
@@ -3722,21 +4179,7 @@ bool Item_param::convert_str_value(THD *thd)
bool rc= FALSE;
if (state == STRING_VALUE || state == LONG_DATA_VALUE)
{
- /*
- Check is so simple because all charsets were set up properly
- in setup_one_conversion_function, where typecode of
- placeholder was also taken into account: the variables are different
- here only if conversion is really necessary.
- */
- if (value.cs_info.final_character_set_of_str_value !=
- value.cs_info.character_set_of_placeholder)
- {
- rc= thd->convert_string(&str_value,
- value.cs_info.character_set_of_placeholder,
- value.cs_info.final_character_set_of_str_value);
- }
- else
- str_value.set_charset(value.cs_info.final_character_set_of_str_value);
+ rc= value.cs_info.convert_if_needed(thd, &str_value);
/* Here str_value is guaranteed to be in final_character_set_of_str_value */
/*
@@ -3754,6 +4197,7 @@ bool Item_param::convert_str_value(THD *thd)
bool Item_param::basic_const_item() const
{
+ DBUG_ASSERT(fixed || state == NO_VALUE);
if (state == NO_VALUE || state == TIME_VALUE)
return FALSE;
return TRUE;
@@ -3766,7 +4210,12 @@ Item *
Item_param::clone_item(THD *thd)
{
MEM_ROOT *mem_root= thd->mem_root;
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ // fall through
case NULL_VALUE:
return new (mem_root) Item_null(thd, name);
case INT_VALUE:
@@ -3776,6 +4225,8 @@ Item_param::clone_item(THD *thd)
case REAL_VALUE:
return new (mem_root) Item_float(thd, name, value.real, decimals,
max_length);
+ case DECIMAL_VALUE:
+ return 0; // Should create Item_decimal. See MDEV-11361.
case STRING_VALUE:
case LONG_DATA_VALUE:
return new (mem_root) Item_string(thd, name, str_value.c_ptr_quick(),
@@ -3783,11 +4234,11 @@ Item_param::clone_item(THD *thd)
collation.derivation,
collation.repertoire);
case TIME_VALUE:
- break;
+ return 0;
case NO_VALUE:
- default:
- DBUG_ASSERT(0);
- };
+ return 0;
+ }
+ DBUG_ASSERT(0); // Garbage
return 0;
}
@@ -3798,7 +4249,12 @@ Item_param::eq(const Item *item, bool binary_cmp) const
if (!basic_const_item())
return FALSE;
+ // There's no "default". See comments in Item_param::save_in_field().
switch (state) {
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ invalid_default_param();
+ return false;
case NULL_VALUE:
return null_eq(item);
case INT_VALUE:
@@ -3808,9 +4264,12 @@ Item_param::eq(const Item *item, bool binary_cmp) const
case STRING_VALUE:
case LONG_DATA_VALUE:
return str_eq(&str_value, item, binary_cmp);
- default:
- break;
+ case DECIMAL_VALUE:
+ case TIME_VALUE:
+ case NO_VALUE:
+ return false;
}
+ DBUG_ASSERT(0); // Garbage
return FALSE;
}
@@ -3818,10 +4277,18 @@ Item_param::eq(const Item *item, bool binary_cmp) const
void Item_param::print(String *str, enum_query_type query_type)
{
- if (state == NO_VALUE || query_type & QT_NO_DATA_EXPANSION)
+ if (state == NO_VALUE)
{
str->append('?');
}
+ else if (state == DEFAULT_VALUE)
+ {
+ str->append("default");
+ }
+ else if (state == IGNORE_VALUE)
+ {
+ str->append("ignore");
+ }
else
{
char buffer[STRING_BUFFER_USUAL_SIZE];
@@ -3858,14 +4325,14 @@ void
Item_param::set_param_type_and_swap_value(Item_param *src)
{
Type_std_attributes::set(src);
- param_type= src->param_type;
+ set_handler(src->type_handler());
set_param_func= src->set_param_func;
item_type= src->item_type;
- item_result_type= src->item_result_type;
maybe_null= src->maybe_null;
null_value= src->null_value;
state= src->state;
+ fixed= src->fixed;
value= src->value;
decimal_value.swap(src->decimal_value);
@@ -3874,6 +4341,30 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
}
+void Item_param::set_default()
+{
+ m_is_settable_routine_parameter= false;
+ state= DEFAULT_VALUE;
+ fixed= true;
+ /*
+ When Item_param is set to DEFAULT_VALUE:
+ - its val_str() and val_decimal() return NULL
+ - get_date() returns true
+ It's important also to have null_value==true for DEFAULT_VALUE.
+ Otherwise the callers of val_xxx() and get_date(), e.g. Item::send(),
+ can misbehave (e.g. crash on asserts).
+ */
+ null_value= true;
+}
+
+void Item_param::set_ignore()
+{
+ m_is_settable_routine_parameter= false;
+ state= IGNORE_VALUE;
+ fixed= true;
+ null_value= true;
+}
+
/**
This operation is intended to store some item value in Item_param to be
used later.
@@ -3899,6 +4390,7 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
}
null_value= FALSE;
+ unsigned_flag= arg->unsigned_flag;
switch (arg->result_type()) {
case STRING_RESULT:
@@ -3916,7 +4408,6 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
str_value.charset());
collation.set(str_value.charset(), DERIVATION_COERCIBLE);
decimals= 0;
-
break;
}
@@ -3936,7 +4427,7 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
if (!dv)
return TRUE;
- set_decimal(dv);
+ set_decimal(dv, !dv->sign());
break;
}
@@ -3949,8 +4440,7 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
return FALSE;
}
- item_result_type= arg->result_type();
- item_type= arg->type();
+ set_handler_by_result_type(arg->result_type());
return FALSE;
}
@@ -3968,7 +4458,7 @@ void
Item_param::set_out_param_info(Send_field *info)
{
m_out_param_info= info;
- param_type= m_out_param_info->type;
+ set_handler_by_field_type(m_out_param_info->type);
}
@@ -3997,9 +4487,9 @@ Item_param::get_out_param_info() const
@param field container for meta-data to be filled
*/
-void Item_param::make_field(Send_field *field)
+void Item_param::make_field(THD *thd, Send_field *field)
{
- Item::make_field(field);
+ Item::make_field(thd, field);
if (!m_out_param_info)
return;
@@ -4412,9 +4902,11 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
in the SELECT clause of Q.
- Search for a column named col_ref_i [in table T_j]
in the GROUP BY clause of Q.
- - If found different columns with the same name in GROUP BY and SELECT
- - issue a warning and return the GROUP BY column,
- - otherwise
+ - If found different columns with the same name in GROUP BY and SELECT:
+ - if the condition that uses this column name is pushed down into
+ the HAVING clause return the SELECT column
+ - else issue a warning and return the GROUP BY column.
+ - Otherwise
- if the MODE_ONLY_FULL_GROUP_BY mode is enabled return error
- else return the found SELECT column.
@@ -4453,7 +4945,8 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
/* Check if the fields found in SELECT and GROUP BY are the same field. */
if (group_by_ref && (select_ref != not_found_item) &&
- !((*group_by_ref)->eq(*select_ref, 0)))
+ !((*group_by_ref)->eq(*select_ref, 0)) &&
+ (!select->having_fix_field_for_pushed_cond))
{
ambiguous_fields= TRUE;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -4489,7 +4982,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
return NULL;
}
DBUG_ASSERT((*select_ref)->fixed);
- return (select->ref_pointer_array + counter);
+ return &select->ref_pointer_array[counter];
}
if (group_by_ref)
return group_by_ref;
@@ -4607,7 +5100,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
*/
Name_resolution_context *last_checked_context= context;
Item **ref= (Item **) not_found_item;
- SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select;
+ SELECT_LEX *current_sel= thd->lex->current_select;
Name_resolution_context *outer_context= 0;
SELECT_LEX *select= 0;
/* Currently derived tables cannot be correlated */
@@ -4940,6 +5433,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
DBUG_ASSERT(fixed == 0);
Field *from_field= (Field *)not_found_field;
bool outer_fixed= false;
+ SELECT_LEX *select= thd->lex->current_select;
if (!field) // If field is not checked
{
@@ -4961,13 +5455,14 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
not_found_field)
{
int ret;
+
/* Look up in current select's item_list to find aliased fields */
- if (thd->lex->current_select->is_item_list_lookup)
+ if (select && select->is_item_list_lookup)
{
uint counter;
enum_resolution_type resolution;
Item** res= find_item_in_list(this,
- thd->lex->current_select->item_list,
+ select->item_list,
&counter, REPORT_EXCEPT_NOT_FOUND,
&resolution);
if (!res)
@@ -4999,7 +5494,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
We can not "move" aggregate function in the place where
its arguments are not defined.
*/
- set_max_sum_func_level(thd, thd->lex->current_select);
+ set_max_sum_func_level(thd, select);
set_field(new_field);
return 0;
}
@@ -5019,20 +5514,25 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
if (err)
return TRUE;
- SELECT_LEX *select= thd->lex->current_select;
thd->change_item_tree(reference,
- select->parsing_place == IN_GROUP_BY &&
+ select->context_analysis_place == IN_GROUP_BY &&
alias_name_used ? *rf->ref : rf);
/*
We can not "move" aggregate function in the place where
its arguments are not defined.
*/
- set_max_sum_func_level(thd, thd->lex->current_select);
+ set_max_sum_func_level(thd, select);
return FALSE;
}
}
}
+
+ if (!select)
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where);
+ goto error;
+ }
if ((ret= fix_outer_field(thd, &from_field, reference)) < 0)
goto error;
outer_fixed= TRUE;
@@ -5061,9 +5561,9 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
if (thd->lex->in_sum_func &&
thd->lex->in_sum_func->nest_level ==
- thd->lex->current_select->nest_level)
+ select->nest_level)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
- thd->lex->current_select->nest_level);
+ select->nest_level);
/*
if it is not expression from merged VIEW we will set this field.
@@ -5125,13 +5625,16 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
}
#endif
fixed= 1;
+ if (field->vcol_info)
+ fix_session_vcol_expr_for_read(thd, field, field->vcol_info);
if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY &&
!outer_fixed && !thd->lex->in_sum_func &&
- thd->lex->current_select->cur_pos_in_select_list != UNDEF_POS &&
- thd->lex->current_select->join)
+ select &&
+ select->cur_pos_in_select_list != UNDEF_POS &&
+ select->join)
{
- thd->lex->current_select->join->non_agg_fields.push_back(this, thd->mem_root);
- marker= thd->lex->current_select->cur_pos_in_select_list;
+ select->join->non_agg_fields.push_back(this, thd->mem_root);
+ marker= select->cur_pos_in_select_list;
}
mark_non_agg_field:
/*
@@ -5168,7 +5671,7 @@ mark_non_agg_field:
if (outer_fixed)
thd->lex->in_sum_func->outer_fields.push_back(this, thd->mem_root);
else if (thd->lex->in_sum_func->nest_level !=
- thd->lex->current_select->nest_level)
+ select->nest_level)
select_lex->set_non_agg_field_used(true);
}
}
@@ -5179,21 +5682,26 @@ error:
return TRUE;
}
-/*
- @brief
- Mark virtual columns as used in a partitioning expression
-*/
-
-bool Item_field::vcol_in_partition_func_processor(uchar *int_arg)
+bool Item_field::post_fix_fields_part_expr_processor(void *int_arg)
{
DBUG_ASSERT(fixed);
if (field->vcol_info)
- {
field->vcol_info->mark_as_in_partitioning_expr();
- }
+ /*
+ Update table_name to be real table name, not the alias. Because alias is
+ reallocated for every statement, and this item has a long life time */
+ table_name= field->table->s->table_name.str;
return FALSE;
}
+bool Item_field::check_valid_arguments_processor(void *bool_arg)
+{
+ Virtual_column_info *vcol= field->vcol_info;
+ if (!vcol)
+ return FALSE;
+ return vcol->expr->walk(&Item::check_partition_func_processor, 0, NULL)
+ || vcol->expr->walk(&Item::check_valid_arguments_processor, 0, NULL);
+}
void Item_field::cleanup()
{
@@ -5395,34 +5903,18 @@ void Item::init_make_field(Send_field *tmp_field,
tmp_field->flags |= UNSIGNED_FLAG;
}
-void Item::make_field(Send_field *tmp_field)
+void Item::make_field(THD *thd, Send_field *tmp_field)
{
init_make_field(tmp_field, field_type());
}
-void Item_empty_string::make_field(Send_field *tmp_field)
+void Item_empty_string::make_field(THD *thd, Send_field *tmp_field)
{
init_make_field(tmp_field, string_field_type());
}
-enum_field_types Item::field_type() const
-{
- switch (result_type()) {
- case STRING_RESULT: return string_field_type();
- case INT_RESULT: return MYSQL_TYPE_LONGLONG;
- case DECIMAL_RESULT: return MYSQL_TYPE_NEWDECIMAL;
- case REAL_RESULT: return MYSQL_TYPE_DOUBLE;
- case ROW_RESULT:
- case TIME_RESULT:
- DBUG_ASSERT(0);
- return MYSQL_TYPE_VARCHAR;
- }
- return MYSQL_TYPE_VARCHAR;
-}
-
-
/**
Verifies that the input string is well-formed according to its character set.
@param send_error If true, call my_error if string is not well-formed.
@@ -5496,8 +5988,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst,
if (const char *pos= cannot_convert_error_pos())
{
char buf[16];
- int mblen= srccs->cset->charlen(srccs, (const uchar *) pos,
- (const uchar *) src + src_length);
+ int mblen= my_charlen(srccs, pos, src + src_length);
DBUG_ASSERT(mblen > 0 && mblen * 2 + 1 <= (int) sizeof(buf));
octet2hex(buf, pos, mblen);
push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN,
@@ -5722,7 +6213,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table,
/* ARGSUSED */
-void Item_field::make_field(Send_field *tmp_field)
+void Item_field::make_field(THD *thd, Send_field *tmp_field)
{
field->make_field(tmp_field);
DBUG_ASSERT(tmp_field->table_name != 0);
@@ -5786,19 +6277,9 @@ static int save_field_in_field(Field *from, bool *null_value,
}
-static int memcpy_field_value(Field *to, Field *from)
-{
- if (to->ptr != from->ptr)
- memcpy(to->ptr,from->ptr, to->pack_length());
- return 0;
-}
-
fast_field_copier Item_field::setup_fast_field_copier(Field *to)
{
- DBUG_ENTER("Item_field::setup_fast_field_copier");
- DBUG_RETURN(memcpy_field_possible(to, field) ?
- &memcpy_field_value :
- &field_conv_incompatible);
+ return to->get_fast_field_copier(field);
}
@@ -5810,8 +6291,8 @@ void Item_field::save_org_in_field(Field *to,
fast_field_copier fast_field_copier_func)
{
DBUG_ENTER("Item_field::save_org_in_field");
- DBUG_PRINT("enter", ("setup: 0x%lx data: 0x%lx",
- (ulong) to, (ulong) fast_field_copier_func));
+ DBUG_PRINT("enter", ("setup: %p data: %p",
+ to, fast_field_copier_func));
if (fast_field_copier_func)
{
if (field->is_null())
@@ -5937,6 +6418,12 @@ int Item::save_in_field(Field *field, bool no_conversions)
}
+bool Item::save_in_param(THD *thd, Item_param *param)
+{
+ return param->set_from_item(thd, this);
+}
+
+
int Item_string::save_in_field(Field *field, bool no_conversions)
{
String *result;
@@ -6013,9 +6500,64 @@ Item *Item_int_with_ref::clone_item(THD *thd)
}
-Item_num *Item_uint::neg(THD *thd)
+Item *Item::neg(THD *thd)
{
- Item_decimal *item= new (thd->mem_root) Item_decimal(thd, value, 1);
+ return new (thd->mem_root) Item_func_neg(thd, this);
+}
+
+Item *Item_int::neg(THD *thd)
+{
+ /*
+ The following if should never be true with code generated by
+ our parser as LONGLONG_MIN values will be stored as decimal.
+ The code is here in case someone generates an int from inside
+ MariaDB
+ */
+ if (unlikely(value == LONGLONG_MIN))
+ {
+ /* Precision for int not big enough; Convert constant to decimal */
+ Item_decimal *item= new (thd->mem_root) Item_decimal(thd, value, 0);
+ return item ? item->neg(thd) : item;
+ }
+ if (value > 0)
+ max_length++;
+ else if (value < 0 && max_length)
+ max_length--;
+ value= -value;
+ name= 0;
+ return this;
+}
+
+Item *Item_decimal::neg(THD *thd)
+{
+ my_decimal_neg(&decimal_value);
+ unsigned_flag= 0;
+ name= 0;
+ max_length= my_decimal_precision_to_length_no_truncation(
+ decimal_value.intg + decimals, decimals, unsigned_flag);
+ return this;
+}
+
+Item *Item_float::neg(THD *thd)
+{
+ if (value > 0)
+ max_length++;
+ else if (value < 0 && max_length)
+ max_length--;
+ value= -value;
+ name= presentation= 0 ;
+ return this;
+}
+
+Item *Item_uint::neg(THD *thd)
+{
+ Item_decimal *item;
+ if (((ulonglong)value) <= LONGLONG_MAX)
+ return new (thd->mem_root) Item_int(thd, -value, max_length+1);
+ if (value == LONGLONG_MIN)
+ return new (thd->mem_root) Item_int(thd, value, max_length+1);
+ if (!(item= new (thd->mem_root) Item_decimal(thd, value, 1)))
+ return 0;
return item->neg(thd);
}
@@ -6156,50 +6698,6 @@ void Item_hex_constant::hex_string_init(THD *thd, const char *str,
unsigned_flag= 1;
}
-longlong Item_hex_hybrid::val_int()
-{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
- char *end=(char*) str_value.ptr()+str_value.length(),
- *ptr=end-MY_MIN(str_value.length(),sizeof(longlong));
-
- ulonglong value=0;
- for (; ptr != end ; ptr++)
- value=(value << 8)+ (ulonglong) (uchar) *ptr;
- return (longlong) value;
-}
-
-
-int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions)
-{
- field->set_notnull();
- if (field->result_type() == STRING_RESULT)
- return field->store(str_value.ptr(), str_value.length(),
- collation.collation);
-
- ulonglong nr;
- uint32 length= str_value.length();
-
- if (length > 8)
- {
- nr= field->flags & UNSIGNED_FLAG ? ULONGLONG_MAX : LONGLONG_MAX;
- goto warn;
- }
- nr= (ulonglong) val_int();
- if ((length == 8) && !(field->flags & UNSIGNED_FLAG) && (nr > LONGLONG_MAX))
- {
- nr= LONGLONG_MAX;
- goto warn;
- }
- return field->store((longlong) nr, TRUE); // Assume hex numbers are unsigned
-
-warn:
- if (!field->store((longlong) nr, TRUE))
- field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
- 1);
- return 1;
-}
-
void Item_hex_hybrid::print(String *str, enum_query_type query_type)
{
@@ -6545,7 +7043,7 @@ Item* Item::cache_const_expr_transformer(THD *thd, uchar *arg)
/**
Find Item by reference in the expression
*/
-bool Item::find_item_processor(uchar *arg)
+bool Item::find_item_processor(void *arg)
{
return (this == ((Item *) arg));
}
@@ -6615,15 +7113,14 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg)
type() != Item::TRIGGER_FIELD_ITEM)
{
List<Item> *all_fields= &select->join->all_fields;
- Item **ref_pointer_array= select->ref_pointer_array;
- DBUG_ASSERT(all_fields->elements <= select->ref_pointer_array_size);
+ Ref_ptr_array &ref_pointer_array= select->ref_pointer_array;
int el= all_fields->elements;
Item_ref *ref;
ref_pointer_array[el]= (Item*)this;
all_fields->push_front((Item*)this, thd->mem_root);
ref= new (thd->mem_root)
- Item_ref(thd, &select->context, ref_pointer_array + el,
+ Item_ref(thd, &select->context, &ref_pointer_array[el],
table_name, field_name);
return ref;
}
@@ -6631,6 +7128,207 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg)
}
+static
+Item *get_field_item_for_having(THD *thd, Item *item, st_select_lex *sel)
+{
+ DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
+ (item->type() == Item::REF_ITEM &&
+ ((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF));
+ Item_field *field_item= NULL;
+ table_map map= sel->master_unit()->derived->table->map;
+ Item_equal *item_equal= item->get_item_equal();
+ if (!item_equal)
+ field_item= (Item_field *)(item->real_item());
+ else
+ {
+ Item_equal_fields_iterator li(*item_equal);
+ Item *equal_item;
+ while ((equal_item= li++))
+ {
+ if (equal_item->used_tables() == map)
+ {
+ field_item= (Item_field *)(equal_item->real_item());
+ break;
+ }
+ }
+ }
+ if (field_item)
+ {
+ Item_ref *ref= new (thd->mem_root) Item_ref(thd, &sel->context,
+ NullS, NullS,
+ field_item->field_name);
+ return ref;
+ }
+ DBUG_ASSERT(0);
+ return NULL;
+}
+
+
+Item *Item_field::derived_field_transformer_for_having(THD *thd, uchar *arg)
+{
+ st_select_lex *sel= (st_select_lex *)arg;
+ table_map tab_map= sel->master_unit()->derived->table->map;
+ if (item_equal && !(item_equal->used_tables() & tab_map))
+ return this;
+ if (!item_equal && used_tables() != tab_map)
+ return this;
+ Item *item= get_field_item_for_having(thd, this, sel);
+ if (item)
+ item->marker|= SUBSTITUTION_FL;
+ return item;
+}
+
+
+Item *Item_direct_view_ref::derived_field_transformer_for_having(THD *thd,
+ uchar *arg)
+{
+ if ((*ref)->marker & SUBSTITUTION_FL)
+ {
+ this->marker|= SUBSTITUTION_FL;
+ return this;
+ }
+ st_select_lex *sel= (st_select_lex *)arg;
+ table_map tab_map= sel->master_unit()->derived->table->map;
+ if ((item_equal && !(item_equal->used_tables() & tab_map)) ||
+ !item_equal)
+ return this;
+ return get_field_item_for_having(thd, this, sel);
+}
+
+
+static
+Item *find_producing_item(Item *item, st_select_lex *sel)
+{
+ DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
+ (item->type() == Item::REF_ITEM &&
+ ((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF));
+ Item *producing_item;
+ Item_field *field_item= NULL;
+ Item_equal *item_equal= item->get_item_equal();
+ table_map tab_map= sel->master_unit()->derived->table->map;
+ if (item->used_tables() == tab_map)
+ field_item= (Item_field *) (item->real_item());
+ if (!field_item && item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *equal_item;
+ while ((equal_item= it++))
+ {
+ if (equal_item->used_tables() == tab_map)
+ {
+ field_item= (Item_field *) (equal_item->real_item());
+ break;
+ }
+ }
+ }
+ List_iterator_fast<Item> li(sel->item_list);
+ if (field_item)
+ {
+ uint field_no= field_item->field->field_index;
+ for (uint i= 0; i <= field_no; i++)
+ producing_item= li++;
+ return producing_item;
+ }
+ return NULL;
+}
+
+Item *Item_field::derived_field_transformer_for_where(THD *thd, uchar *arg)
+{
+ st_select_lex *sel= (st_select_lex *)arg;
+ Item *producing_item= find_producing_item(this, sel);
+ if (producing_item)
+ {
+ Item *producing_clone= producing_item->build_clone(thd, thd->mem_root);
+ if (producing_clone)
+ producing_clone->marker|= SUBSTITUTION_FL;
+ return producing_clone;
+ }
+ return this;
+}
+
+Item *Item_direct_view_ref::derived_field_transformer_for_where(THD *thd,
+ uchar *arg)
+{
+ if ((*ref)->marker & SUBSTITUTION_FL)
+ return (*ref);
+ if (item_equal)
+ {
+ st_select_lex *sel= (st_select_lex *)arg;
+ Item *producing_item= find_producing_item(this, sel);
+ DBUG_ASSERT (producing_item != NULL);
+ return producing_item->build_clone(thd, thd->mem_root);
+ }
+ return (*ref);
+}
+
+static
+Grouping_tmp_field *find_matching_grouping_field(Item *item,
+ st_select_lex *sel)
+{
+ DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
+ (item->type() == Item::REF_ITEM &&
+ ((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF));
+ List_iterator<Grouping_tmp_field> li(sel->grouping_tmp_fields);
+ Grouping_tmp_field *gr_field;
+ Item_field *field_item= (Item_field *) (item->real_item());
+ while ((gr_field= li++))
+ {
+ if (field_item->field == gr_field->tmp_field)
+ return gr_field;
+ }
+ Item_equal *item_equal= item->get_item_equal();
+ if (item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *equal_item;
+ while ((equal_item= it++))
+ {
+ field_item= (Item_field *) (equal_item->real_item());
+ li.rewind();
+ while ((gr_field= li++))
+ {
+ if (field_item->field == gr_field->tmp_field)
+ return gr_field;
+ }
+ }
+ }
+ return NULL;
+}
+
+
+Item *Item_field::derived_grouping_field_transformer_for_where(THD *thd,
+ uchar *arg)
+{
+ st_select_lex *sel= (st_select_lex *)arg;
+ Grouping_tmp_field *gr_field= find_matching_grouping_field(this, sel);
+ if (gr_field)
+ {
+ Item *producing_clone=
+ gr_field->producing_item->build_clone(thd, thd->mem_root);
+ if (producing_clone)
+ producing_clone->marker|= SUBSTITUTION_FL;
+ return producing_clone;
+ }
+ return this;
+}
+
+
+Item *
+Item_direct_view_ref::derived_grouping_field_transformer_for_where(THD *thd,
+ uchar *arg)
+{
+ if ((*ref)->marker & SUBSTITUTION_FL)
+ {
+ this->marker|= SUBSTITUTION_FL;
+ return this;
+ }
+ if (!item_equal)
+ return this;
+ st_select_lex *sel= (st_select_lex *)arg;
+ Grouping_tmp_field *gr_field= find_matching_grouping_field(this, sel);
+ return gr_field->producing_item->build_clone(thd, thd->mem_root);
+}
+
void Item_field::print(String *str, enum_query_type query_type)
{
if (field && field->table->const_table &&
@@ -7020,6 +7718,7 @@ void Item_ref::set_properties()
*/
with_sum_func= (*ref)->with_sum_func;
with_param= (*ref)->with_param;
+ with_window_func= (*ref)->with_window_func;
with_field= (*ref)->with_field;
fixed= 1;
if (alias_name_used)
@@ -7133,7 +7832,9 @@ void Item_ref::print(String *str, enum_query_type query_type)
{
if (ref)
{
- if ((*ref)->type() != Item::CACHE_ITEM && ref_type() != VIEW_REF &&
+ if ((*ref)->type() != Item::CACHE_ITEM &&
+ (*ref)->type() != Item::WINDOW_FUNC_ITEM &&
+ ref_type() != VIEW_REF &&
!table_name && name && alias_name_used)
{
THD *thd= current_thd;
@@ -7331,9 +8032,9 @@ void Item_ref::save_org_in_field(Field *field, fast_field_copier optimizer_data)
}
-void Item_ref::make_field(Send_field *field)
+void Item_ref::make_field(THD *thd, Send_field *field)
{
- (*ref)->make_field(field);
+ (*ref)->make_field(thd, field);
/* Non-zero in case of a view */
if (name)
field->col_name= name;
@@ -7957,7 +8658,7 @@ void Item_ref::fix_after_pullout(st_select_lex *new_parent, Item **refptr,
FALSE always
*/
-bool Item_outer_ref::check_inner_refs_processor(uchar *arg)
+bool Item_outer_ref::check_inner_refs_processor(void *arg)
{
List_iterator_fast<Item_outer_ref> *it=
((List_iterator_fast<Item_outer_ref> *) arg);
@@ -8103,6 +8804,32 @@ Item *Item_direct_view_ref::replace_equal_field(THD *thd, uchar *arg)
}
+bool Item_direct_view_ref::excl_dep_on_table(table_map tab_map)
+{
+ table_map used= used_tables();
+ if (used & OUTER_REF_TABLE_BIT)
+ return false;
+ if (!(used & ~tab_map))
+ return true;
+ if (item_equal)
+ {
+ DBUG_ASSERT(real_item()->type() == Item::FIELD_ITEM);
+ return item_equal->used_tables() & tab_map;
+ }
+ return (*ref)->excl_dep_on_table(tab_map);
+}
+
+bool Item_direct_view_ref::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ if (item_equal)
+ {
+ DBUG_ASSERT(real_item()->type() == Item::FIELD_ITEM);
+ return find_matching_grouping_field(this, sel) != NULL;
+ }
+ return (*ref)->excl_dep_on_grouping_fields(sel);
+}
+
+
bool Item_default_value::eq(const Item *item, bool binary_cmp) const
{
return item->type() == DEFAULT_VALUE_ITEM &&
@@ -8122,8 +8849,19 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
fixed= 1;
return FALSE;
}
+
+ /*
+ DEFAULT() do not need table field so should not ask handler to bring
+ field value (mark column for read)
+ */
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
if (!arg->fixed && arg->fix_fields(thd, &arg))
+ {
+ thd->mark_used_columns= save_mark_used_columns;
goto error;
+ }
+ thd->mark_used_columns= save_mark_used_columns;
real_arg= arg->real_item();
@@ -8134,7 +8872,7 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
}
field_arg= (Item_field *)real_arg;
- if (field_arg->field->flags & NO_DEFAULT_VALUE_FLAG)
+ if ((field_arg->field->flags & NO_DEFAULT_VALUE_FLAG))
{
my_error(ER_NO_DEFAULT_FOR_FIELD, MYF(0), field_arg->field->field_name);
goto error;
@@ -8143,9 +8881,25 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
goto error;
memcpy((void *)def_field, (void *)field_arg->field,
field_arg->field->size_of());
- def_field->move_field_offset((my_ptrdiff_t)
- (def_field->table->s->default_values -
- def_field->table->record[0]));
+ // If non-constant default value expression
+ if (def_field->default_value && def_field->default_value->flags)
+ {
+ uchar *newptr= (uchar*) thd->alloc(1+def_field->pack_length());
+ if (!newptr)
+ goto error;
+ /*
+ Even if DEFAULT() do not read tables fields, the default value
+ expression can do it.
+ */
+ fix_session_vcol_expr_for_read(thd, def_field, def_field->default_value);
+ if (thd->mark_used_columns != MARK_COLUMNS_NONE)
+ def_field->default_value->expr->update_used_tables();
+ def_field->move_field(newptr+1, def_field->maybe_null() ? newptr : 0, 1);
+ }
+ else
+ def_field->move_field_offset((my_ptrdiff_t)
+ (def_field->table->s->default_values -
+ def_field->table->record[0]));
set_field(def_field);
return FALSE;
@@ -8163,55 +8917,81 @@ void Item_default_value::print(String *str, enum_query_type query_type)
return;
}
str->append(STRING_WITH_LEN("default("));
+ /*
+ We take DEFAULT from a field so do not need it value in case of const
+ tables but its name so we set QT_NO_DATA_EXPANSION (as we print for
+ table definition, also we do not need table and database name)
+ */
+ query_type= (enum_query_type) (query_type | QT_NO_DATA_EXPANSION);
arg->print(str, query_type);
str->append(')');
}
+void Item_default_value::calculate()
+{
+ if (field->default_value)
+ field->set_default();
+ DEBUG_SYNC(field->table->in_use, "after_Item_default_value_calculate");
+}
-int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
+String *Item_default_value::val_str(String *str)
{
- if (!arg)
- {
- TABLE *table= field_arg->table;
- THD *thd= table->in_use;
+ calculate();
+ return Item_field::val_str(str);
+}
- if (field_arg->flags & NO_DEFAULT_VALUE_FLAG &&
- field_arg->real_type() != MYSQL_TYPE_ENUM)
- {
- if (field_arg->reset())
- {
- my_message(ER_CANT_CREATE_GEOMETRY_OBJECT,
- ER_THD(thd, ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0));
- return -1;
- }
+double Item_default_value::val_real()
+{
+ calculate();
+ return Item_field::val_real();
+}
- if (context->error_processor == &view_error_processor)
- {
- TABLE_LIST *view= table->pos_in_table_list->top_table();
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_NO_DEFAULT_FOR_VIEW_FIELD,
- ER_THD(thd, ER_NO_DEFAULT_FOR_VIEW_FIELD),
- view->view_db.str,
- view->view_name.str);
- }
- else
- {
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_NO_DEFAULT_FOR_FIELD,
- ER_THD(thd, ER_NO_DEFAULT_FOR_FIELD),
- field_arg->field_name);
- }
- return 1;
- }
- field_arg->set_default();
- return
- !field_arg->is_null() &&
- field_arg->validate_value_in_record_with_warn(thd, table->record[0]) &&
- thd->is_error() ? -1 : 0;
+longlong Item_default_value::val_int()
+{
+ calculate();
+ return Item_field::val_int();
+}
+
+my_decimal *Item_default_value::val_decimal(my_decimal *decimal_value)
+{
+ calculate();
+ return Item_field::val_decimal(decimal_value);
+}
+
+bool Item_default_value::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+{
+ calculate();
+ return Item_field::get_date(ltime, fuzzydate);
+}
+
+bool Item_default_value::send(Protocol *protocol, String *buffer)
+{
+ calculate();
+ return Item_field::send(protocol, buffer);
+}
+
+int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
+{
+ if (arg)
+ {
+ calculate();
+ return Item_field::save_in_field(field_arg, no_conversions);
}
- return Item_field::save_in_field(field_arg, no_conversions);
+
+ if (field_arg->default_value && field_arg->default_value->flags)
+ return 0; // defaut fields will be set later, no need to do it twice
+ return field_arg->save_in_field_default_value(context->error_processor ==
+ &view_error_processor);
}
+table_map Item_default_value::used_tables() const
+{
+ if (!field || !field->default_value)
+ return static_cast<table_map>(0);
+ if (!field->default_value->expr) // not fully parsed field
+ return static_cast<table_map>(RAND_TABLE_BIT);
+ return field->default_value->expr->used_tables();
+}
/**
This method like the walk method traverses the item tree, but at the
@@ -8245,6 +9025,57 @@ Item *Item_default_value::transform(THD *thd, Item_transformer transformer,
return (this->*transformer)(thd, args);
}
+void Item_ignore_value::print(String *str, enum_query_type query_type)
+{
+ str->append(STRING_WITH_LEN("ignore"));
+}
+
+int Item_ignore_value::save_in_field(Field *field_arg, bool no_conversions)
+{
+ return field_arg->save_in_field_ignore_value(context->error_processor ==
+ &view_error_processor);
+}
+
+String *Item_ignore_value::val_str(String *str)
+{
+ DBUG_ASSERT(0); // never should be called
+ null_value= 1;
+ return 0;
+}
+
+double Item_ignore_value::val_real()
+{
+ DBUG_ASSERT(0); // never should be called
+ null_value= 1;
+ return 0.0;
+}
+
+longlong Item_ignore_value::val_int()
+{
+ DBUG_ASSERT(0); // never should be called
+ null_value= 1;
+ return 0;
+}
+
+my_decimal *Item_ignore_value::val_decimal(my_decimal *decimal_value)
+{
+ DBUG_ASSERT(0); // never should be called
+ null_value= 1;
+ return 0;
+}
+
+bool Item_ignore_value::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+{
+ DBUG_ASSERT(0); // never should be called
+ null_value= 1;
+ return TRUE;
+}
+
+bool Item_ignore_value::send(Protocol *protocol, String *buffer)
+{
+ DBUG_ASSERT(0); // never should be called
+ return TRUE;
+}
bool Item_insert_value::eq(const Item *item, bool binary_cmp) const
{
@@ -8329,7 +9160,7 @@ void Item_insert_value::print(String *str, enum_query_type query_type)
this stage we can't say exactly what Field object (corresponding
to TABLE::record[0] or TABLE::record[1]) should be bound to this
Item, we only find out index of the Field and then select concrete
- Field object in fix_fields() (by that time Table_trigger_list::old_field/
+ Field object in fix_fields() (by that time Table_triggers_list::old_field/
new_field should point to proper array of Fields).
It also binds Item_trigger_field to Table_triggers_list object for
table of trigger which uses this item.
@@ -8458,6 +9289,13 @@ void Item_trigger_field::print(String *str, enum_query_type query_type)
}
+bool Item_trigger_field::check_vcol_func_processor(void *arg)
+{
+ const char *ver= row_version == NEW_ROW ? "NEW." : "OLD.";
+ return mark_unsupported_function(ver, field_name, arg, VCOL_IMPOSSIBLE);
+}
+
+
void Item_trigger_field::cleanup()
{
want_privilege= original_privilege;
@@ -8492,19 +9330,22 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item)
Item *new_item= NULL;
Item_result res_type= item_cmp_type(comp_item, item);
- char *name=item->name; // Alloced by sql_alloc
+ char *name= item->name; // Alloced on THD::mem_root
MEM_ROOT *mem_root= thd->mem_root;
switch (res_type) {
case TIME_RESULT:
{
- bool is_null;
- Item **ref_copy= ref;
- /* the following call creates a constant and puts it in new_item */
enum_field_types type= item->field_type_for_temporal_comparison(comp_item);
- get_datetime_value(thd, &ref_copy, &new_item, type, &is_null);
- if (is_null)
+ longlong value= item->val_temporal_packed(type);
+ if (item->null_value)
new_item= new (mem_root) Item_null(thd, name);
+ else
+ {
+ Item_cache_temporal *cache= new (mem_root) Item_cache_temporal(thd, type);
+ cache->store_packed(value, item);
+ new_item= cache;
+ }
break;
}
case STRING_RESULT:
@@ -8517,7 +9358,7 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item)
else
{
uint length= result->length();
- char *tmp_str= sql_strmake(result->ptr(), length);
+ char *tmp_str= thd->strmake(result->ptr(), length);
new_item= new (mem_root) Item_string(thd, name, tmp_str, length, result->charset());
}
break;
@@ -8680,12 +9521,6 @@ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item)
return 0;
}
-Item_cache* Item_cache::get_cache(THD *thd, const Item *item)
-{
- return get_cache(thd, item, item->cmp_type());
-}
-
-
/**
Get a cache item of given type.
@@ -8696,12 +9531,12 @@ Item_cache* Item_cache::get_cache(THD *thd, const Item *item)
*/
Item_cache* Item_cache::get_cache(THD *thd, const Item *item,
- const Item_result type)
+ const Item_result type, const enum_field_types f_type)
{
MEM_ROOT *mem_root= thd->mem_root;
switch (type) {
case INT_RESULT:
- return new (mem_root) Item_cache_int(thd, item->field_type());
+ return new (mem_root) Item_cache_int(thd, f_type);
case REAL_RESULT:
return new (mem_root) Item_cache_real(thd);
case DECIMAL_RESULT:
@@ -8711,7 +9546,7 @@ Item_cache* Item_cache::get_cache(THD *thd, const Item *item,
case ROW_RESULT:
return new (mem_root) Item_cache_row(thd);
case TIME_RESULT:
- return new (mem_root) Item_cache_temporal(thd, item->field_type());
+ return new (mem_root) Item_cache_temporal(thd, f_type);
}
return 0; // Impossible
}
@@ -8726,6 +9561,14 @@ void Item_cache::store(Item *item)
void Item_cache::print(String *str, enum_query_type query_type)
{
+ if (example && // There is a cached item
+ (query_type & QT_NO_DATA_EXPANSION)) // Caller is show-create-table
+ {
+ // Instead of "cache" or the cached value, print the cached item name
+ example->print(str, query_type);
+ return;
+ }
+
if (value_cached)
{
print_value(str);
@@ -8813,20 +9656,32 @@ int Item_cache_int::save_in_field(Field *field, bool no_conversions)
}
+Item *Item_cache_int::convert_to_basic_const_item(THD *thd)
+{
+ Item *new_item;
+ DBUG_ASSERT(value_cached || example != 0);
+ if (!value_cached)
+ cache_value();
+ new_item= null_value ?
+ (Item*) new (thd->mem_root) Item_null(thd) :
+ (Item*) new (thd->mem_root) Item_int(thd, val_int(), max_length);
+ return new_item;
+}
+
+
Item_cache_temporal::Item_cache_temporal(THD *thd,
enum_field_types field_type_arg):
Item_cache_int(thd, field_type_arg)
{
- if (mysql_type_to_time_type(cached_field_type) == MYSQL_TIMESTAMP_ERROR)
- cached_field_type= MYSQL_TYPE_DATETIME;
+ if (mysql_type_to_time_type(Item_cache_temporal::field_type()) ==
+ MYSQL_TIMESTAMP_ERROR)
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
}
longlong Item_cache_temporal::val_datetime_packed()
{
DBUG_ASSERT(fixed == 1);
- if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME)
- return Item::val_datetime_packed(); // TIME-to-DATETIME conversion needed
if ((!value_cached && !cache_value()) || null_value)
{
null_value= TRUE;
@@ -8839,8 +9694,7 @@ longlong Item_cache_temporal::val_datetime_packed()
longlong Item_cache_temporal::val_time_packed()
{
DBUG_ASSERT(fixed == 1);
- if (Item_cache_temporal::field_type() != MYSQL_TYPE_TIME)
- return Item::val_time_packed(); // DATETIME-to-TIME conversion needed
+ DBUG_ASSERT(Item_cache_temporal::field_type() == MYSQL_TYPE_TIME);
if ((!value_cached && !cache_value()) || null_value)
{
null_value= TRUE;
@@ -8898,18 +9752,26 @@ double Item_cache_temporal::val_real()
}
-bool Item_cache_temporal::cache_value()
+bool Item_cache_temporal::cache_value()
{
if (!example)
return false;
-
value_cached= true;
-
+
MYSQL_TIME ltime;
- if (example->get_date_result(&ltime, 0))
- value=0;
- else
+ uint fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES;
+ if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME)
+ fuzzydate|= TIME_TIME_ONLY;
+
+ value= 0;
+ if (!example->get_date_result(&ltime, fuzzydate))
+ {
+ if (ltime.time_type == MYSQL_TIMESTAMP_TIME &&
+ !(fuzzydate & TIME_TIME_ONLY) &&
+ convert_time_to_datetime(current_thd, &ltime, fuzzydate))
+ return true;
value= pack_time(&ltime);
+ }
null_value= example->null_value;
return true;
}
@@ -8922,18 +9784,22 @@ bool Item_cache_temporal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
if (!has_value())
{
bzero((char*) ltime,sizeof(*ltime));
- return 1;
+ return null_value= true;
}
unpack_time(value, ltime);
ltime->time_type= mysql_type_to_time_type(field_type());
if (ltime->time_type == MYSQL_TIMESTAMP_TIME)
{
- ltime->hour+= (ltime->month*32+ltime->day)*24;
- ltime->month= ltime->day= 0;
+ if (fuzzydate & TIME_TIME_ONLY)
+ {
+ ltime->hour+= (ltime->month*32+ltime->day)*24;
+ ltime->month= ltime->day= 0;
+ }
+ else if (convert_time_to_datetime(current_thd, ltime, fuzzydate))
+ return true;
}
return 0;
-
}
@@ -8950,7 +9816,7 @@ int Item_cache_temporal::save_in_field(Field *field, bool no_conversions)
void Item_cache_temporal::store_packed(longlong val_arg, Item *example_arg)
{
- /* An explicit values is given, save it. */
+ /* An explicit value is given, save it. */
store(example_arg);
value_cached= true;
value= val_arg;
@@ -8961,12 +9827,40 @@ void Item_cache_temporal::store_packed(longlong val_arg, Item *example_arg)
Item *Item_cache_temporal::clone_item(THD *thd)
{
Item_cache_temporal *item= new (thd->mem_root)
- Item_cache_temporal(thd, cached_field_type);
+ Item_cache_temporal(thd, Item_cache_temporal::field_type());
item->store_packed(value, example);
return item;
}
+Item *Item_cache_temporal::convert_to_basic_const_item(THD *thd)
+{
+ Item *new_item;
+ DBUG_ASSERT(value_cached || example != 0);
+ if (!value_cached)
+ cache_value();
+ if (null_value)
+ new_item= (Item*) new (thd->mem_root) Item_null(thd);
+ else
+ {
+ MYSQL_TIME ltime;
+ if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME)
+ {
+ unpack_time(val_time_packed(), &ltime);
+ new_item= (Item*) new (thd->mem_root) Item_time_literal(thd, &ltime,
+ decimals);
+ }
+ else
+ {
+ unpack_time(val_datetime_packed(), &ltime);
+ new_item= (Item*) new (thd->mem_root) Item_datetime_literal(thd, &ltime,
+ decimals);
+ }
+ }
+ return new_item;
+}
+
+
bool Item_cache_real::cache_value()
{
if (!example)
@@ -8991,8 +9885,7 @@ longlong Item_cache_real::val_int()
DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0;
- bool error;
- return double_to_longlong(value, unsigned_flag, &error);
+ return Converter_double_to_longlong(value, unsigned_flag).result();
}
@@ -9016,6 +9909,20 @@ my_decimal *Item_cache_real::val_decimal(my_decimal *decimal_val)
}
+Item *Item_cache_real::convert_to_basic_const_item(THD *thd)
+{
+ Item *new_item;
+ DBUG_ASSERT(value_cached || example != 0);
+ if (!value_cached)
+ cache_value();
+ new_item= null_value ?
+ (Item*) new (thd->mem_root) Item_null(thd) :
+ (Item*) new (thd->mem_root) Item_float(thd, val_real(),
+ decimals);
+ return new_item;
+}
+
+
bool Item_cache_decimal::cache_value()
{
if (!example)
@@ -9067,6 +9974,24 @@ my_decimal *Item_cache_decimal::val_decimal(my_decimal *val)
}
+Item *Item_cache_decimal::convert_to_basic_const_item(THD *thd)
+{
+ Item *new_item;
+ DBUG_ASSERT(value_cached || example != 0);
+ if (!value_cached)
+ cache_value();
+ if (null_value)
+ new_item= (Item*) new (thd->mem_root) Item_null(thd);
+ else
+ {
+ my_decimal decimal_value;
+ my_decimal *result= val_decimal(&decimal_value);
+ new_item= (Item*) new (thd->mem_root) Item_decimal(thd, result);
+ }
+ return new_item;
+}
+
+
bool Item_cache_str::cache_value()
{
if (!example)
@@ -9146,6 +10071,28 @@ bool Item_cache_row::allocate(THD *thd, uint num)
}
+Item *Item_cache_str::convert_to_basic_const_item(THD *thd)
+{
+ Item *new_item;
+ DBUG_ASSERT(value_cached || example != 0);
+ if (!value_cached)
+ cache_value();
+ if (null_value)
+ new_item= (Item*) new (thd->mem_root) Item_null(thd);
+ else
+ {
+ char buff[MAX_FIELD_WIDTH];
+ String tmp(buff, sizeof(buff), value->charset());
+ String *result= val_str(&tmp);
+ uint length= result->length();
+ char *tmp_str= thd->strmake(result->ptr(), length);
+ new_item= new (thd->mem_root) Item_string(thd, tmp_str, length,
+ result->charset());
+ }
+ return new_item;
+}
+
+
bool Item_cache_row::setup(THD *thd, Item *item)
{
example= item;
@@ -9260,16 +10207,28 @@ void Item_cache_row::set_null()
Item_type_holder::Item_type_holder(THD *thd, Item *item)
:Item(thd, item),
+ Type_handler_hybrid_real_field_type(get_real_type(item)),
enum_set_typelib(0),
- fld_type(get_real_type(item)),
geometry_type(Field::GEOM_GEOMETRY)
{
DBUG_ASSERT(item->fixed);
maybe_null= item->maybe_null;
collation.set(item->collation);
get_full_info(item);
+ /**
+ Field::result_merge_type(real_field_type()) should be equal to
+ result_type(), with one exception when "this" is a Item_field for
+ a BIT field:
+ - Field_bit::result_type() returns INT_RESULT, so does its Item_field.
+ - Field::result_merge_type(MYSQL_TYPE_BIT) returns STRING_RESULT.
+ Perhaps we need a new method in Type_handler to cover these type
+ merging rules for UNION.
+ */
+ DBUG_ASSERT(real_field_type() == MYSQL_TYPE_BIT ||
+ Item_type_holder::result_type() ==
+ Field::result_merge_type(Item_type_holder::real_field_type()));
/* fix variable decimals which always is NOT_FIXED_DEC */
- if (Field::result_merge_type(fld_type) == INT_RESULT)
+ if (Field::result_merge_type(real_field_type()) == INT_RESULT)
decimals= 0;
prev_decimal_int_part= item->decimal_int_part();
#ifdef HAVE_SPATIAL
@@ -9280,19 +10239,6 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item)
/**
- Return expression type of Item_type_holder.
-
- @return
- Item_result (type of internal MySQL expression result)
-*/
-
-Item_result Item_type_holder::result_type() const
-{
- return Field::result_merge_type(fld_type);
-}
-
-
-/**
Find real field type of item.
@return
@@ -9342,7 +10288,7 @@ enum_field_types Item_type_holder::get_real_type(Item *item)
*/
switch (item->result_type()) {
case STRING_RESULT:
- return MYSQL_TYPE_VAR_STRING;
+ return MYSQL_TYPE_VARCHAR;
case INT_RESULT:
return MYSQL_TYPE_LONGLONG;
case REAL_RESULT:
@@ -9352,10 +10298,21 @@ enum_field_types Item_type_holder::get_real_type(Item *item)
case ROW_RESULT:
case TIME_RESULT:
DBUG_ASSERT(0);
- return MYSQL_TYPE_VAR_STRING;
+ return MYSQL_TYPE_VARCHAR;
}
}
break;
+ case TYPE_HOLDER:
+ /*
+ Item_type_holder and Item_blob should not appear in this context.
+ In case they for some reasons do, returning field_type() is wrong anyway.
+ They must return Item_type_holder::real_field_type() instead, to make
+ the code in sql_type.cc and sql_type.h happy, as it expectes
+ Field::real_type()-compatible rather than Field::field_type()-compatible
+ valies in some places, and may in the future add some asserts preventing
+ use of field_type() instead of real_type() and the other way around.
+ */
+ DBUG_ASSERT(0);
default:
break;
}
@@ -9381,25 +10338,26 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
uint decimals_orig= decimals;
DBUG_ENTER("Item_type_holder::join_types");
DBUG_PRINT("info:", ("was type %d len %d, dec %d name %s",
- fld_type, max_length, decimals,
+ real_field_type(), max_length, decimals,
(name ? name : "<NULL>")));
DBUG_PRINT("info:", ("in type %d len %d, dec %d",
get_real_type(item),
item->max_length, item->decimals));
- fld_type= Field::field_type_merge(fld_type, get_real_type(item));
+ set_handler_by_real_type(Field::field_type_merge(real_field_type(),
+ get_real_type(item)));
{
uint item_decimals= item->decimals;
/* fix variable decimals which always is NOT_FIXED_DEC */
- if (Field::result_merge_type(fld_type) == INT_RESULT)
+ if (Field::result_merge_type(real_field_type()) == INT_RESULT)
item_decimals= 0;
decimals= MY_MAX(decimals, item_decimals);
}
- if (fld_type == FIELD_TYPE_GEOMETRY)
+ if (Item_type_holder::field_type() == FIELD_TYPE_GEOMETRY)
geometry_type=
Field_geom::geometry_type_merge(geometry_type, item->get_geometry_type());
- if (Field::result_merge_type(fld_type) == DECIMAL_RESULT)
+ if (Field::result_merge_type(real_field_type()) == DECIMAL_RESULT)
{
collation.set_numeric();
decimals= MY_MIN(MY_MAX(decimals, item->decimals), DECIMAL_MAX_SCALE);
@@ -9412,7 +10370,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
unsigned_flag);
}
- switch (Field::result_merge_type(fld_type))
+ switch (Field::result_merge_type(real_field_type()))
{
case STRING_RESULT:
{
@@ -9459,12 +10417,14 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
int delta1= max_length_orig - decimals_orig;
int delta2= item->max_length - item->decimals;
max_length= MY_MAX(delta1, delta2) + decimals;
- if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2)
+ if (Item_type_holder::real_field_type() == MYSQL_TYPE_FLOAT &&
+ max_length > FLT_DIG + 2)
{
max_length= MAX_FLOAT_STR_LENGTH;
decimals= NOT_FIXED_DEC;
}
- else if (fld_type == MYSQL_TYPE_DOUBLE && max_length > DBL_DIG + 2)
+ else if (Item_type_holder::real_field_type() == MYSQL_TYPE_DOUBLE &&
+ max_length > DBL_DIG + 2)
{
max_length= MAX_DOUBLE_STR_LENGTH;
decimals= NOT_FIXED_DEC;
@@ -9472,11 +10432,12 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
}
}
else
- max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7;
+ max_length= (Item_type_holder::field_type() == MYSQL_TYPE_FLOAT) ?
+ FLT_DIG+6 : DBL_DIG+7;
break;
}
default:
- if (fld_type == MYSQL_TYPE_YEAR)
+ if (real_field_type() == MYSQL_TYPE_YEAR)
max_length= MY_MAX(max_length, item->max_length);
else
max_length= MY_MAX(max_length, display_length(item));
@@ -9487,7 +10448,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
/* Remember decimal integer part to be used in DECIMAL_RESULT handleng */
prev_decimal_int_part= decimal_int_part();
DBUG_PRINT("info", ("become type: %d len: %u dec: %u",
- (int) fld_type, max_length, (uint) decimals));
+ (int) real_field_type(), max_length, (uint) decimals));
DBUG_RETURN(FALSE);
}
@@ -9568,7 +10529,7 @@ Field *Item_type_holder::make_field_by_type(TABLE *table)
uchar *null_ptr= maybe_null ? (uchar*) "" : 0;
Field *field;
- switch (fld_type) {
+ switch (Item_type_holder::real_field_type()) {
case MYSQL_TYPE_ENUM:
DBUG_ASSERT(enum_set_typelib);
field= new Field_enum((uchar *) 0, max_length, null_ptr, 0,
@@ -9604,8 +10565,8 @@ Field *Item_type_holder::make_field_by_type(TABLE *table)
*/
void Item_type_holder::get_full_info(Item *item)
{
- if (fld_type == MYSQL_TYPE_ENUM ||
- fld_type == MYSQL_TYPE_SET)
+ if (Item_type_holder::real_field_type() == MYSQL_TYPE_ENUM ||
+ Item_type_holder::real_field_type() == MYSQL_TYPE_SET)
{
if (item->type() == Item::SUM_FUNC_ITEM &&
(((Item_sum*)item)->sum_func() == Item_sum::MAX_FUNC ||
@@ -9757,7 +10718,7 @@ table_map Item_ref_null_helper::used_tables() const
#ifndef DBUG_OFF
/* Debugger help function */
-static char dbug_item_print_buf[256];
+static char dbug_item_print_buf[2048];
const char *dbug_print_item(Item *item)
{
@@ -9781,4 +10742,70 @@ const char *dbug_print_item(Item *item)
return "Couldn't fit into buffer";
}
+const char *dbug_print_select(SELECT_LEX *sl)
+{
+ char *buf= dbug_item_print_buf;
+ String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin);
+ str.length(0);
+ if (!sl)
+ return "(SELECT_LEX*)NULL";
+
+ THD *thd= current_thd;
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+
+ sl->print(thd, &str, QT_EXPLAIN);
+
+ thd->variables.option_bits= save_option_bits;
+
+ if (str.c_ptr() == buf)
+ return buf;
+ else
+ return "Couldn't fit into buffer";
+}
+
+const char *dbug_print_unit(SELECT_LEX_UNIT *un)
+{
+ char *buf= dbug_item_print_buf;
+ String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin);
+ str.length(0);
+ if (!un)
+ return "(SELECT_LEX_UNIT*)NULL";
+
+ THD *thd= current_thd;
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+
+ un->print(&str, QT_EXPLAIN);
+
+ thd->variables.option_bits= save_option_bits;
+
+ if (str.c_ptr() == buf)
+ return buf;
+ else
+ return "Couldn't fit into buffer";
+}
+
+const char *dbug_print(Item *x) { return dbug_print_item(x); }
+const char *dbug_print(SELECT_LEX *x) { return dbug_print_select(x); }
+const char *dbug_print(SELECT_LEX_UNIT *x) { return dbug_print_unit(x); }
+
#endif /*DBUG_OFF*/
+
+bool Item_field::excl_dep_on_table(table_map tab_map)
+{
+ return used_tables() == tab_map ||
+ (item_equal && (item_equal->used_tables() & tab_map));
+}
+
+bool
+Item_field::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ return find_matching_grouping_field(this, sel) != NULL;
+}
+
+void Item::register_in(THD *thd)
+{
+ next= thd->free_list;
+ thd->free_list= this;
+}
diff --git a/sql/item.h b/sql/item.h
index 5c432887ed9..bd72fed5300 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -25,45 +25,49 @@
#include "sql_priv.h" /* STRING_BUFFER_USUAL_SIZE */
#include "unireg.h"
#include "sql_const.h" /* RAND_TABLE_BIT, MAX_FIELD_NAME */
-#include "thr_malloc.h" /* sql_calloc */
#include "field.h" /* Derivation */
#include "sql_type.h"
+#include "sql_time.h"
+#include "mem_root_array.h"
C_MODE_START
#include <ma_dyncol.h>
-C_MODE_END
-#ifndef DBUG_OFF
-static inline
-bool trace_unsupported_func(const char *where, const char *processor_name)
+/*
+ A prototype for a C-compatible structure to store a value of any data type.
+ Currently it has to stay in /sql, as it depends on String and my_decimal.
+ We'll do the following changes:
+ 1. add pure C "struct st_string" and "struct st_my_decimal"
+ 2. change type of m_string to struct st_string and move inside the union
+ 3. change type of m_decmal to struct st_my_decimal and move inside the union
+ 4. move the definition to some file in /include
+*/
+struct st_value
{
- char buff[64];
- sprintf(buff, "%s::%s", where, processor_name);
- DBUG_ENTER(buff);
- sprintf(buff, "%s returns TRUE: unsupported function", processor_name);
- DBUG_PRINT("info", ("%s", buff));
- DBUG_RETURN(TRUE);
-}
-#else
-#define trace_unsupported_func(X,Y) TRUE
-#endif
+ enum enum_dynamic_column_type m_type;
+ union
+ {
+ longlong m_longlong;
+ double m_double;
+ MYSQL_TIME m_time;
+ } value;
+ String m_string;
+ my_decimal m_decimal;
+};
-static inline
-bool trace_unsupported_by_check_vcol_func_processor(const char *where)
-{
- return trace_unsupported_func(where, "check_vcol_func_processor");
-}
+C_MODE_END
#ifdef DBUG_OFF
static inline const char *dbug_print_item(Item *item) { return NULL; }
#else
-extern const char *dbug_print_item(Item *item);
+const char *dbug_print_item(Item *item);
#endif
class Protocol;
struct TABLE_LIST;
void item_init(void); /* Init item functions */
class Item_field;
+class Item_param;
class user_var_entry;
class JOIN;
struct KEY_FIELD;
@@ -71,14 +75,45 @@ struct SARGABLE_PARAM;
class RANGE_OPT_PARAM;
class SEL_TREE;
+enum precedence {
+ LOWEST_PRECEDENCE,
+ ASSIGN_PRECEDENCE, // :=
+ OR_PRECEDENCE, // OR, || (unless PIPES_AS_CONCAT)
+ XOR_PRECEDENCE, // XOR
+ AND_PRECEDENCE, // AND, &&
+ NOT_PRECEDENCE, // NOT (unless HIGH_NOT_PRECEDENCE)
+ BETWEEN_PRECEDENCE, // BETWEEN, CASE, WHEN, THEN, ELSE
+ CMP_PRECEDENCE, // =, <=>, >=, >, <=, <, <>, !=, IS, LIKE, REGEXP, IN
+ BITOR_PRECEDENCE, // |
+ BITAND_PRECEDENCE, // &
+ SHIFT_PRECEDENCE, // <<, >>
+ ADDINTERVAL_PRECEDENCE, // first argument in +INTERVAL
+ ADD_PRECEDENCE, // +, -
+ MUL_PRECEDENCE, // *, /, DIV, %, MOD
+ BITXOR_PRECEDENCE, // ^
+ PIPES_PRECEDENCE, // || (if PIPES_AS_CONCAT)
+ NEG_PRECEDENCE, // unary -, ~
+ BANG_PRECEDENCE, // !, NOT (if HIGH_NOT_PRECEDENCE)
+ COLLATE_PRECEDENCE, // BINARY, COLLATE
+ INTERVAL_PRECEDENCE, // INTERVAL
+ DEFAULT_PRECEDENCE,
+ HIGHEST_PRECEDENCE
+};
+
+typedef Bounds_checked_array<Item*> Ref_ptr_array;
static inline uint32
-char_to_byte_length_safe(uint32 char_length_arg, uint32 mbmaxlen_arg)
+char_to_byte_length_safe(size_t char_length_arg, uint32 mbmaxlen_arg)
{
- ulonglong tmp= ((ulonglong) char_length_arg) * mbmaxlen_arg;
- return (tmp > UINT_MAX32) ? (uint32) UINT_MAX32 : (uint32) tmp;
+ ulonglong tmp= ((ulonglong) char_length_arg) * mbmaxlen_arg;
+ return tmp > UINT_MAX32 ? UINT_MAX32 : static_cast<uint32>(tmp);
}
+bool mark_unsupported_function(const char *where, void *store, uint result);
+
+/* convenience helper for mark_unsupported_function() above */
+bool mark_unsupported_function(const char *w1, const char *w2,
+ void *store, uint result);
/* Bits for the split_sum_func() function */
#define SPLIT_SUM_SKIP_REGISTERED 1 /* Skip registered funcs */
@@ -110,6 +145,11 @@ char_to_byte_length_safe(uint32 char_length_arg, uint32 mbmaxlen_arg)
#define MY_COLL_ALLOW_CONV (MY_COLL_ALLOW_SUPERSET_CONV | MY_COLL_ALLOW_COERCIBLE_CONV)
#define MY_COLL_CMP_CONV (MY_COLL_ALLOW_CONV | MY_COLL_DISALLOW_NONE)
+#define NO_EXTRACTION_FL (1 << 6)
+#define FULL_EXTRACTION_FL (1 << 7)
+#define SUBSTITUTION_FL (1 << 8)
+#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL)
+
class DTCollation {
public:
CHARSET_INFO *collation;
@@ -194,7 +234,7 @@ public:
{
return collation->coll->strnncollsp(collation,
(uchar *) s->ptr(), s->length(),
- (uchar *) t->ptr(), t->length(), 0);
+ (uchar *) t->ptr(), t->length());
}
};
@@ -490,7 +530,7 @@ class Copy_query_with_rewrite
bool copy_up_to(size_t bytes)
{
DBUG_ASSERT(bytes >= from);
- return dst->append(src + from, bytes - from);
+ return dst->append(src + from, uint32(bytes - from));
}
public:
@@ -521,7 +561,7 @@ struct st_dyncall_create_def
typedef struct st_dyncall_create_def DYNCALL_CREATE_DEF;
-typedef bool (Item::*Item_processor) (uchar *arg);
+typedef bool (Item::*Item_processor) (void *arg);
/*
Analyzer function
SYNOPSIS
@@ -611,7 +651,6 @@ class Item: public Value_source,
public Type_std_attributes,
public Type_handler
{
- Item(const Item &); /* Prevent use of these */
void operator=(Item &);
/**
The index in the JOIN::join_tab array of the JOIN_TAB this Item is attached
@@ -632,7 +671,8 @@ public:
static void operator delete(void *ptr,size_t size) { TRASH_FREE(ptr, size); }
static void operator delete(void *ptr, MEM_ROOT *mem_root) {}
- enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM,
+ enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM,
+ WINDOW_FUNC_ITEM, STRING_ITEM,
INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM,
COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM,
PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM,
@@ -662,6 +702,10 @@ protected:
SEL_TREE *get_mm_tree_for_const(RANGE_OPT_PARAM *param);
+ virtual Field *make_string_field(TABLE *table);
+ Field *tmp_table_field_from_field_type(TABLE *table,
+ bool fixed_length,
+ bool set_blob_packlength);
Field *create_tmp_field(bool group, TABLE *table, uint convert_int_length);
/* Helper methods, to get an Item value from another Item */
double val_real_from_item(Item *item)
@@ -709,6 +753,8 @@ protected:
*/
bool make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ void push_note_converted_to_negative_complement(THD *thd);
+ void push_note_converted_to_positive_complement(THD *thd);
public:
/*
Cache val_str() into the own buffer, e.g. to evaluate constant
@@ -740,6 +786,7 @@ public:
bool null_value; /* if item is null */
bool with_sum_func; /* True if item contains a sum func */
bool with_param; /* True if contains an SP parameter */
+ bool with_window_func; /* True if item contains a window func */
/**
True if any item except Item_sum contains a field. Set during parsing.
*/
@@ -750,7 +797,7 @@ public:
bool with_subselect; /* If this item is a subselect or some
of its arguments is or contains a
subselect */
- // alloc & destruct is done as start of select using sql_alloc
+ // alloc & destruct is done as start of select on THD::mem_root
Item(THD *thd);
/*
Constructor used by Item_field, Item_ref & aggregate (sum) functions.
@@ -767,15 +814,15 @@ public:
name=0;
#endif
} /*lint -e1509 */
- void set_name(const char *str, uint length, CHARSET_INFO *cs);
- void set_name_no_truncate(const char *str, uint length, CHARSET_INFO *cs);
+ void set_name(THD *thd, const char *str, uint length, CHARSET_INFO *cs);
+ void set_name_no_truncate(THD *thd, const char *str, uint length,
+ CHARSET_INFO *cs);
void set_name_for_rollback(THD *thd, const char *str, uint length,
CHARSET_INFO *cs);
void rename(char *new_name);
void init_make_field(Send_field *tmp_field,enum enum_field_types type);
virtual void cleanup();
- virtual void make_field(Send_field *field);
- virtual Field *make_string_field(TABLE *table);
+ virtual void make_field(THD *thd, Send_field *field);
virtual bool fix_fields(THD *, Item **);
/*
Fix after some tables has been pulled out. Basically re-calculate all
@@ -797,9 +844,61 @@ public:
supposed to be applied recursively.
*/
virtual inline void quick_fix_field() { fixed= 1; }
+
+ bool store(struct st_value *value, ulonglong fuzzydate)
+ {
+ switch (cmp_type()) {
+ case INT_RESULT:
+ {
+ value->m_type= unsigned_flag ? DYN_COL_UINT : DYN_COL_INT;
+ value->value.m_longlong= val_int();
+ break;
+ }
+ case REAL_RESULT:
+ {
+ value->m_type= DYN_COL_DOUBLE;
+ value->value.m_double= val_real();
+ break;
+ }
+ case DECIMAL_RESULT:
+ {
+ value->m_type= DYN_COL_DECIMAL;
+ my_decimal *dec= val_decimal(&value->m_decimal);
+ if (dec != &value->m_decimal && !null_value)
+ my_decimal2decimal(dec, &value->m_decimal);
+ break;
+ }
+ case STRING_RESULT:
+ {
+ value->m_type= DYN_COL_STRING;
+ String *str= val_str(&value->m_string);
+ if (str != &value->m_string && !null_value)
+ value->m_string.set(str->ptr(), str->length(), str->charset());
+ break;
+ }
+ case TIME_RESULT:
+ {
+ value->m_type= DYN_COL_DATETIME;
+ get_date(&value->value.m_time, fuzzydate);
+ break;
+ }
+ case ROW_RESULT:
+ DBUG_ASSERT(false);
+ null_value= true;
+ break;
+ }
+ if (null_value)
+ {
+ value->m_type= DYN_COL_NULL;
+ return true;
+ }
+ return false;
+ }
+
/* Function returns 1 on overflow and -1 on fatal errors */
int save_in_field_no_warnings(Field *field, bool no_conversions);
virtual int save_in_field(Field *field, bool no_conversions);
+ virtual bool save_in_param(THD *thd, Item_param *param);
virtual void save_org_in_field(Field *field,
fast_field_copier data
__attribute__ ((__unused__)))
@@ -810,16 +909,41 @@ public:
{ return save_in_field(field, 1); }
virtual bool send(Protocol *protocol, String *str);
virtual bool eq(const Item *, bool binary_cmp) const;
+ const Type_handler *type_handler() const
+ {
+ return get_handler_by_field_type(field_type());
+ }
+ Field *make_num_distinct_aggregator_field(MEM_ROOT *mem_root,
+ const Item *item) const
+ {
+ return type_handler()->make_num_distinct_aggregator_field(mem_root, this);
+ }
+ Field *make_conversion_table_field(TABLE *table,
+ uint metadata, const Field *target) const
+ {
+ DBUG_ASSERT(0); // Should not be called in Item context
+ return NULL;
+ }
/* result_type() of an item specifies how the value should be returned */
- Item_result result_type() const { return REAL_RESULT; }
+ Item_result result_type() const { return type_handler()->result_type(); }
/* ... while cmp_type() specifies how it should be compared */
- Item_result cmp_type() const;
+ Item_result cmp_type() const { return type_handler()->cmp_type(); }
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+ {
+ type_handler()->make_sort_key(to, item, sort_field, param);
+ }
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const
+ {
+ type_handler()->sortlength(thd, item, attr);
+ }
virtual Item_result cast_to_int_type() const { return cmp_type(); }
enum_field_types string_field_type() const
{
return Type_handler::string_type_handler(max_length)->field_type();
}
- enum_field_types field_type() const;
virtual enum Type type() const =0;
/*
real_type() is the type of base item. This is same as type() for
@@ -896,6 +1020,20 @@ public:
If value is not null null_value flag will be reset to FALSE.
*/
virtual longlong val_int()=0;
+ /**
+ Get a value for CAST(x AS SIGNED).
+ Too large positive unsigned integer values are converted
+ to negative complements.
+ Values of non-integer data types are adjusted to the SIGNED range.
+ */
+ virtual longlong val_int_signed_typecast();
+ /**
+ Get a value for CAST(x AS UNSIGNED).
+ Negative signed integer values are converted
+ to positive complements.
+ Values of non-integer data types are adjusted to the UNSIGNED range.
+ */
+ virtual longlong val_int_unsigned_typecast();
Longlong_hybrid to_longlong_hybrid()
{
return Longlong_hybrid(val_int(), unsigned_flag);
@@ -1010,6 +1148,8 @@ public:
Returns the val_str() value converted to the given character set.
*/
String *val_str(String *str, String *converter, CHARSET_INFO *to);
+
+ virtual String *val_json(String *str) { return val_str(str); }
/*
Return decimal representation of item with fixed point.
@@ -1068,7 +1208,12 @@ public:
my_decimal *val_decimal_from_time(my_decimal *decimal_value);
longlong val_int_from_decimal();
longlong val_int_from_date();
- longlong val_int_from_real();
+ longlong val_int_from_real()
+ {
+ DBUG_ASSERT(fixed == 1);
+ return Converter_double_to_longlong_with_warn(val_real(), false).result();
+ }
+ longlong val_int_from_str(int *error);
double val_real_from_decimal();
double val_real_from_date();
@@ -1087,6 +1232,7 @@ public:
virtual const char *full_name() const { return name ? name : "???"; }
const char *field_name_or_null()
{ return real_item()->type() == Item::FIELD_ITEM ? name : NULL; }
+ const TABLE_SHARE *field_table_or_null();
/*
*result* family of methods is analog of *val* family (see above) but
@@ -1132,9 +1278,10 @@ public:
virtual bool basic_const_item() const { return 0; }
/* cloning of constant items (0 if it is not const) */
virtual Item *clone_item(THD *thd) { return 0; }
+ virtual Item* build_clone(THD *thd, MEM_ROOT *mem_root) { return get_copy(thd, mem_root); }
virtual cond_result eq_cmp_result() const { return COND_OK; }
inline uint float_length(uint decimals_par) const
- { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;}
+ { return decimals < FLOATING_POINT_DECIMALS ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;}
/* Returns total number of decimal digits */
virtual uint decimal_precision() const;
/* Returns the number of integer part digits only */
@@ -1203,15 +1350,33 @@ public:
query and why they should be generated from the Item-tree, @see
mysql_register_view().
*/
- virtual inline void print(String *str, enum_query_type query_type)
+ virtual enum precedence precedence() const { return DEFAULT_PRECEDENCE; }
+ void print_parenthesised(String *str, enum_query_type query_type,
+ enum precedence parent_prec);
+ /**
+ This helper is used to print expressions as a part of a table definition,
+ in particular for
+ - generated columns
+ - check constraints
+ - default value expressions
+ - partitioning expressions
+ */
+ void print_for_table_def(String *str)
{
- str->append(full_name());
+ print_parenthesised(str,
+ (enum_query_type)(QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_ITEM_IDENT_SKIP_DB_NAMES |
+ QT_ITEM_IDENT_SKIP_TABLE_NAMES |
+ QT_NO_DATA_EXPANSION |
+ QT_TO_SYSTEM_CHARSET),
+ LOWEST_PRECEDENCE);
}
+ virtual void print(String *str, enum_query_type query_type);
+ void print_item_w_name(String *str, enum_query_type query_type);
+ void print_value(String *str);
- void print_item_w_name(String *, enum_query_type query_type);
- void print_value(String *);
virtual void update_used_tables() {}
- virtual COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
+ virtual COND *build_equal_items(THD *thd, COND_EQUAL *inheited,
bool link_item_fields,
COND_EQUAL **cond_equal_ref)
{
@@ -1247,20 +1412,29 @@ public:
{
return false;
}
- virtual void split_sum_func(THD *thd, Item **ref_pointer_array,
+ virtual void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags) {}
/* Called for items that really have to be split */
- void split_sum_func2(THD *thd, Item **ref_pointer_array, List<Item> &fields,
+ void split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields,
Item **ref, uint flags);
virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
bool get_time(MYSQL_TIME *ltime)
{ return get_date(ltime, TIME_TIME_ONLY | TIME_INVALID_DATES); }
// Get date with automatic TIME->DATETIME conversion
+ bool convert_time_to_datetime(THD *thd, MYSQL_TIME *ltime, ulonglong fuzzydate)
+ {
+ MYSQL_TIME tmp;
+ if (time_to_datetime_with_warn(thd, ltime, &tmp, fuzzydate))
+ return null_value= true;
+ *ltime= tmp;
+ return false;
+ }
bool get_date_with_conversion(MYSQL_TIME *ltime, ulonglong fuzzydate);
/*
Get time with automatic DATE/DATETIME to TIME conversion.
- Performce a reserve operation to get_date_with_conversion().
+ Performes a reverse operation to get_date_with_conversion().
Suppose:
- we have a set of items (typically with the native MYSQL_TYPE_TIME type)
whose item->get_date() return TIME1 value, and
@@ -1398,6 +1572,9 @@ public:
virtual void set_result_field(Field *field) {}
virtual bool is_result_field() { return 0; }
virtual bool is_bool_type() { return false; }
+ virtual bool is_json_type() { return false; }
+ /* This is to handle printing of default values */
+ virtual bool need_parentheses_in_default() { return false; }
virtual void save_in_result_field(bool no_conversions) {}
/*
set value of aggregate function in case of no rows for grouping were found
@@ -1421,12 +1598,7 @@ public:
&my_charset_bin;
};
- virtual bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
- {
- return (this->*processor)(arg);
- }
-
- virtual bool walk_top_and(Item_processor processor, uchar *arg)
+ virtual bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
return (this->*processor)(arg);
}
@@ -1463,73 +1635,59 @@ public:
(*traverser)(this, arg);
}
- /*
- This is used to get the most recent version of any function in
- an item tree. The version is the version where a MySQL function
- was introduced in. So any function which is added should use
- this function and set the int_arg to maximum of the input data
- and their own version info.
- */
- virtual bool intro_version(uchar *int_arg) { return 0; }
-
- virtual bool remove_dependence_processor(uchar * arg) { return 0; }
- virtual bool cleanup_processor(uchar *arg);
- virtual bool collect_item_field_processor(uchar * arg) { return 0; }
- virtual bool add_field_to_set_processor(uchar * arg) { return 0; }
- virtual bool find_item_in_field_list_processor(uchar *arg) { return 0; }
- virtual bool find_item_processor(uchar *arg);
- virtual bool change_context_processor(uchar *context) { return 0; }
- virtual bool reset_query_id_processor(uchar *query_id_arg) { return 0; }
- virtual bool is_expensive_processor(uchar *arg) { return 0; }
- virtual bool register_field_in_read_map(uchar *arg) { return 0; }
- virtual bool register_field_in_write_map(uchar *arg) { return 0; }
- virtual bool enumerate_field_refs_processor(uchar *arg) { return 0; }
- virtual bool mark_as_eliminated_processor(uchar *arg) { return 0; }
- virtual bool eliminate_subselect_processor(uchar *arg) { return 0; }
- virtual bool set_fake_select_as_master_processor(uchar *arg) { return 0; }
- virtual bool update_table_bitmaps_processor(uchar *arg) { return 0; }
- virtual bool view_used_tables_processor(uchar *arg) { return 0; }
- virtual bool eval_not_null_tables(uchar *opt_arg) { return 0; }
- virtual bool is_subquery_processor (uchar *opt_arg) { return 0; }
- virtual bool count_sargable_conds(uchar *arg) { return 0; }
- virtual bool limit_index_condition_pushdown_processor(uchar *opt_arg)
- {
- return FALSE;
- }
- virtual bool exists2in_processor(uchar *opt_arg) { return 0; }
- virtual bool find_selective_predicates_list_processor(uchar *opt_arg)
- { return 0; }
- bool cleanup_is_expensive_cache_processor(uchar *arg)
+ /*========= Item processors, to be used with Item::walk() ========*/
+ virtual bool remove_dependence_processor(void *arg) { return 0; }
+ virtual bool cleanup_processor(void *arg);
+ virtual bool cleanup_excluding_fields_processor(void *arg) { return cleanup_processor(arg); }
+ virtual bool cleanup_excluding_const_fields_processor(void *arg) { return cleanup_processor(arg); }
+ virtual bool collect_item_field_processor(void *arg) { return 0; }
+ virtual bool collect_outer_ref_processor(void *arg) {return 0; }
+ virtual bool check_inner_refs_processor(void *arg) { return 0; }
+ virtual bool find_item_in_field_list_processor(void *arg) { return 0; }
+ virtual bool find_item_processor(void *arg);
+ virtual bool change_context_processor(void *arg) { return 0; }
+ virtual bool reset_query_id_processor(void *arg) { return 0; }
+ virtual bool is_expensive_processor(void *arg) { return 0; }
+
+ // FIXME reduce the number of "add field to bitmap" processors
+ virtual bool add_field_to_set_processor(void *arg) { return 0; }
+ virtual bool register_field_in_read_map(void *arg) { return 0; }
+ virtual bool register_field_in_write_map(void *arg) { return 0; }
+ virtual bool register_field_in_bitmap(void *arg) { return 0; }
+ virtual bool update_table_bitmaps_processor(void *arg) { return 0; }
+
+ virtual bool enumerate_field_refs_processor(void *arg) { return 0; }
+ virtual bool mark_as_eliminated_processor(void *arg) { return 0; }
+ virtual bool eliminate_subselect_processor(void *arg) { return 0; }
+ virtual bool set_fake_select_as_master_processor(void *arg) { return 0; }
+ virtual bool view_used_tables_processor(void *arg) { return 0; }
+ virtual bool eval_not_null_tables(void *arg) { return 0; }
+ virtual bool is_subquery_processor(void *arg) { return 0; }
+ virtual bool count_sargable_conds(void *arg) { return 0; }
+ virtual bool limit_index_condition_pushdown_processor(void *arg) { return 0; }
+ virtual bool exists2in_processor(void *arg) { return 0; }
+ virtual bool find_selective_predicates_list_processor(void *arg) { return 0; }
+ bool cleanup_is_expensive_cache_processor(void *arg)
{
is_expensive_cache= (int8)(-1);
return 0;
}
- /* To call bool function for all arguments */
- struct bool_func_call_args
- {
- Item *original_func_item;
- void (Item::*bool_function)();
- };
- bool call_bool_func_processor(uchar *org_item)
- {
- bool_func_call_args *info= (bool_func_call_args*) org_item;
- /* Avoid recursion, as walk also calls for original item */
- if (info->original_func_item != this)
- (this->*(info->bool_function))();
- return FALSE;
- }
-
-
- /*
- The next function differs from the previous one that a bitmap to be updated
- is passed as uchar *arg.
+ /*
+ TRUE if the expression depends only on the table indicated by tab_map
+ or can be converted to such an exression using equalities.
+ Not to be used for AND/OR formulas.
*/
- virtual bool register_field_in_bitmap(uchar *arg) { return 0; }
-
- bool cache_const_expr_analyzer(uchar **arg);
- Item* cache_const_expr_transformer(THD *thd, uchar *arg);
+ virtual bool excl_dep_on_table(table_map tab_map) { return false; }
+ /*
+ TRUE if the expression depends only on grouping fields of sel
+ or can be converted to such an exression using equalities.
+ Not to be used for AND/OR formulas.
+ */
+ virtual bool excl_dep_on_grouping_fields(st_select_lex *sel) { return false; }
+ virtual bool switch_to_nullable_fields_processor(void *arg) { return 0; }
+ virtual bool find_function_processor (void *arg) { return 0; }
/*
Check if a partition function is allowed
SYNOPSIS
@@ -1581,21 +1739,47 @@ public:
assumes that there are no multi-byte collations amongst the partition
fields.
*/
- virtual bool check_partition_func_processor(uchar *bool_arg) { return TRUE;}
- /*
- @brief
- Processor used to mark virtual columns used in partitioning expression
+ virtual bool check_partition_func_processor(void *arg) { return 1;}
+ virtual bool post_fix_fields_part_expr_processor(void *arg) { return 0; }
+ virtual bool rename_fields_processor(void *arg) { return 0; }
+ /** Processor used to check acceptability of an item in the defining
+ expression for a virtual column
- @param
- arg always ignored
+ @param arg always ignored
- @retval
- FALSE always
+ @retval 0 the item is accepted in the definition of a virtual column
+ @retval 1 otherwise
*/
- virtual bool vcol_in_partition_func_processor(uchar *arg)
+ struct vcol_func_processor_result
{
- return FALSE;
+ uint errors; /* Bits of possible errors */
+ const char *name; /* Not supported function */
+ };
+ struct func_processor_rename
+ {
+ LEX_CSTRING db_name;
+ LEX_CSTRING table_name;
+ List<Create_field> fields;
+ };
+ virtual bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(full_name(), arg, VCOL_IMPOSSIBLE);
}
+ virtual bool check_field_expression_processor(void *arg) { return 0; }
+ virtual bool check_func_default_processor(void *arg) { return 0; }
+ /*
+ Check if an expression value has allowed arguments, like DATE/DATETIME
+ for date functions. Also used by partitioning code to reject
+ timezone-dependent expressions in a (sub)partitioning function.
+ */
+ virtual bool check_valid_arguments_processor(void *arg) { return 0; }
+ virtual bool update_vcol_processor(void *arg) { return 0; }
+ /*============== End of Item processor list ======================*/
+
+ virtual Item *get_copy(THD *thd, MEM_ROOT *mem_root)=0;
+
+ bool cache_const_expr_analyzer(uchar **arg);
+ Item* cache_const_expr_transformer(THD *thd, uchar *arg);
virtual Item* propagate_equal_fields(THD*, const Context &, COND_EQUAL *)
{
@@ -1607,35 +1791,9 @@ public:
COND_EQUAL *cond,
Item **place);
- /*
- @brief
- Processor used to check acceptability of an item in the defining
- expression for a virtual column
-
- @param
- arg always ignored
-
- @retval
- FALSE the item is accepted in the definition of a virtual column
- @retval
- TRUE otherwise
- */
- virtual bool check_vcol_func_processor(uchar *arg)
- {
- return trace_unsupported_by_check_vcol_func_processor(full_name());
- }
-
/* arg points to REPLACE_EQUAL_FIELD_ARG object */
virtual Item *replace_equal_field(THD *thd, uchar *arg) { return this; }
- /*
- Check if an expression value has allowed arguments, like DATE/DATETIME
- for date functions. Also used by partitioning code to reject
- timezone-dependent expressions in a (sub)partitioning function.
- */
- virtual bool check_valid_arguments_processor(uchar *bool_arg)
- {
- return FALSE;
- }
+
struct Collect_deps_prm
{
List<Item> *parameters;
@@ -1645,31 +1803,6 @@ public:
int nest_level;
bool collect;
};
- /**
- Collect outer references
- */
- virtual bool collect_outer_ref_processor(uchar *arg) {return FALSE; }
-
- /**
- Find a function of a given type
-
- @param arg the function type to search (enum Item_func::Functype)
- @return
- @retval TRUE the function type we're searching for is found
- @retval FALSE the function type wasn't found
-
- @description
- This function can be used (together with Item::walk()) to find functions
- in an item tree fragment.
- */
- virtual bool find_function_processor (uchar *arg)
- {
- return FALSE;
- }
-
- virtual bool check_inner_refs_processor(uchar *arg) { return FALSE; }
-
- virtual bool switch_to_nullable_fields_processor(uchar *arg) { return FALSE; }
/*
For SP local variable returns pointer to Item representing its
@@ -1703,9 +1836,6 @@ public:
return create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS - 2);
}
- Field *tmp_table_field_from_field_type(TABLE *table,
- bool fixed_length,
- bool set_blob_packlength);
virtual Item_field *field_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }
@@ -1713,6 +1843,13 @@ public:
{ return this; }
virtual Item *expr_cache_insert_transformer(THD *thd, uchar *unused)
{ return this; }
+ virtual Item *derived_field_transformer_for_having(THD *thd, uchar *arg)
+ { return this; }
+ virtual Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
+ { return this; }
+ virtual Item *derived_grouping_field_transformer_for_where(THD *thd,
+ uchar *arg)
+ { return this; }
virtual bool expr_cache_is_needed(THD *) { return FALSE; }
virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
bool needs_charset_converter(uint32 length, CHARSET_INFO *tocs) const
@@ -1775,6 +1912,20 @@ public:
{
return 0;
}
+
+ virtual Load_data_outvar *get_load_data_outvar()
+ {
+ return 0;
+ }
+ Load_data_outvar *get_load_data_outvar_or_error()
+ {
+ Load_data_outvar *dst= get_load_data_outvar();
+ if (dst)
+ return dst;
+ my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), name);
+ return NULL;
+ }
+
/**
Test whether an expression is expensive to compute. Used during
optimization to avoid computing expensive expressions during this
@@ -1794,7 +1945,7 @@ public:
virtual bool is_expensive()
{
if (is_expensive_cache < 0)
- is_expensive_cache= walk(&Item::is_expensive_processor, 0, (uchar*)0);
+ is_expensive_cache= walk(&Item::is_expensive_processor, 0, NULL);
return MY_TEST(is_expensive_cache);
}
virtual Field::geometry_type get_geometry_type() const
@@ -1810,7 +1961,7 @@ public:
max_length= char_to_byte_length_safe(max_char_length_arg, cs->mbmaxlen);
collation.collation= cs;
}
- void fix_char_length(uint32 max_char_length_arg)
+ void fix_char_length(size_t max_char_length_arg)
{
max_length= char_to_byte_length_safe(max_char_length_arg,
collation.collation->mbmaxlen);
@@ -1845,7 +1996,7 @@ public:
table_map view_used_tables(TABLE_LIST *view)
{
view->view_used_tables= 0;
- walk(&Item::view_used_tables_processor, 0, (uchar *) view);
+ walk(&Item::view_used_tables_processor, 0, view);
return view->view_used_tables;
}
@@ -1863,14 +2014,42 @@ public:
/* how much position should be reserved for Exists2In transformation */
virtual uint exists2in_reserved_items() { return 0; };
+ virtual Item *neg(THD *thd);
+
/**
Inform the item that it is located under a NOT, which is a top-level item.
*/
virtual void under_not(Item_func_not * upper
__attribute__((unused))) {};
+
+
+ void register_in(THD *thd);
+
+ bool depends_only_on(table_map view_map)
+ { return marker & FULL_EXTRACTION_FL; }
+ int get_extraction_flag()
+ { return marker & EXTRACTION_MASK; }
+ void set_extraction_flag(int flags)
+ {
+ marker &= ~EXTRACTION_MASK;
+ marker|= flags;
+ }
+ void clear_extraction_flag()
+ {
+ marker &= ~EXTRACTION_MASK;
+ }
};
+template <class T>
+inline Item* get_item_copy (THD *thd, MEM_ROOT *mem_root, T* item)
+{
+ Item *copy= new (mem_root) T(*item);
+ copy->register_in(thd);
+ return copy;
+}
+
+
/**
Compare two Items for List<Item>::add_unique()
*/
@@ -1890,7 +2069,7 @@ bool cmp_items(Item *a, Item *b);
}
My_enumerator enumerator;
- item->walk(Item::enumerate_field_refs_processor, ...,(uchar*)&enumerator);
+ item->walk(Item::enumerate_field_refs_processor, ...,&enumerator);
This is similar to Visitor pattern.
*/
@@ -2002,6 +2181,7 @@ public:
Item_basic_constant(THD *thd): Item_basic_value(thd), used_table_map(0) {};
void set_used_tables(table_map map) { used_table_map= map; }
table_map used_tables() const { return used_table_map; }
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
/* to prevent drop fixed flag (no need parent cleanup call) */
void cleanup()
{
@@ -2057,27 +2237,31 @@ public:
bool is_null();
public:
- inline void make_field(Send_field *field);
+ inline void make_field(THD *thd, Send_field *field);
inline bool const_item() const;
inline int save_in_field(Field *field, bool no_conversions);
inline bool send(Protocol *protocol, String *str);
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(m_name.str, arg, VCOL_IMPOSSIBLE);
+ }
};
/*****************************************************************************
Item_sp_variable inline implementation.
*****************************************************************************/
-inline void Item_sp_variable::make_field(Send_field *field)
+inline void Item_sp_variable::make_field(THD *thd, Send_field *field)
{
Item *it= this_item();
if (name)
- it->set_name(name, (uint) strlen(name), system_charset_info);
+ it->set_name(thd, name, (uint) strlen(name), system_charset_info);
else
- it->set_name(m_name.str, (uint) m_name.length, system_charset_info);
- it->make_field(field);
+ it->set_name(thd, m_name.str, (uint) m_name.length, system_charset_info);
+ it->make_field(thd, field);
}
inline bool Item_sp_variable::const_item() const
@@ -2103,13 +2287,12 @@ inline bool Item_sp_variable::send(Protocol *protocol, String *str)
class Item_splocal :public Item_sp_variable,
private Settable_routine_parameter,
- public Rewritable_query_parameter
+ public Rewritable_query_parameter,
+ public Type_handler_hybrid_field_type
{
uint m_var_idx;
Type m_type;
- Item_result m_result_type;
- enum_field_types m_field_type;
public:
Item_splocal(THD *thd, const LEX_STRING &sp_var_name, uint sp_var_idx,
enum_field_types sp_var_type,
@@ -2127,8 +2310,12 @@ public:
inline uint get_var_idx() const;
inline enum Type type() const;
- inline Item_result result_type() const;
- inline enum_field_types field_type() const { return m_field_type; }
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
private:
bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
@@ -2143,6 +2330,8 @@ public:
{ return this; }
bool append_for_log(THD *thd, String *str);
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
/*****************************************************************************
@@ -2164,12 +2353,6 @@ inline enum Item::Type Item_splocal::type() const
return m_type;
}
-inline Item_result Item_splocal::result_type() const
-{
- return m_result_type;
-}
-
-
/*****************************************************************************
A reference to case expression in SP, used in runtime.
*****************************************************************************/
@@ -2186,6 +2369,7 @@ public:
inline enum Type type() const;
inline Item_result result_type() const;
+ enum_field_types field_type() const { return this_item()->field_type(); }
public:
/*
@@ -2194,6 +2378,7 @@ public:
purposes.
*/
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
private:
uint m_case_expr_id;
@@ -2246,6 +2431,11 @@ public:
bool is_null();
virtual void print(String *str, enum_query_type query_type);
+ enum_field_types field_type() const
+ {
+ return value_item->field_type();
+ }
+
Item_result result_type() const
{
return value_item->result_type();
@@ -2265,20 +2455,20 @@ public:
{
return value_item->send(protocol, str);
}
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("name_const");
+ return mark_unsupported_function("name_const()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_name_const>(thd, mem_root, this); }
};
class Item_num: public Item_basic_constant
{
public:
Item_num(THD *thd): Item_basic_constant(thd) { collation.set_numeric(); }
- virtual Item_num *neg(THD *thd)= 0;
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
- bool check_partition_func_processor(uchar *int_arg) { return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) { return FALSE;}
};
#define NO_CACHED_FIELD_INDEX ((uint)(-1))
@@ -2310,7 +2500,7 @@ public:
save_in_field(result_field, no_conversions);
}
void cleanup();
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
};
@@ -2367,14 +2557,14 @@ public:
const char *full_name() const;
void cleanup();
st_select_lex *get_depended_from() const;
- bool remove_dependence_processor(uchar * arg);
+ bool remove_dependence_processor(void * arg);
virtual void print(String *str, enum_query_type query_type);
- virtual bool change_context_processor(uchar *cntx)
+ virtual bool change_context_processor(void *cntx)
{ context= (Name_resolution_context *)cntx; return FALSE; }
/**
Collect outer references
*/
- virtual bool collect_outer_ref_processor(uchar *arg);
+ virtual bool collect_outer_ref_processor(void *arg);
friend bool insert_fields(THD *thd, Name_resolution_context *context,
const char *db_name,
const char *table_name, List_iterator<Item> *it,
@@ -2399,13 +2589,17 @@ public:
longlong val_int() { return field->val_int(); }
String *val_str(String *str) { return field->val_str(str); }
my_decimal *val_decimal(my_decimal *dec) { return field->val_decimal(dec); }
- void make_field(Send_field *tmp_field);
+ void make_field(THD *thd, Send_field *tmp_field);
CHARSET_INFO *charset_for_protocol(void) const
{ return field->charset_for_protocol(); }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ Item* get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_ident_for_show>(thd, mem_root, this); }
};
-class Item_field :public Item_ident
+class Item_field :public Item_ident,
+ public Load_data_outvar
{
protected:
void set_field(Field *field);
@@ -2452,10 +2646,34 @@ public:
bool val_bool_result();
bool is_null_result();
bool send(Protocol *protocol, String *str_arg);
+ Load_data_outvar *get_load_data_outvar()
+ {
+ return this;
+ }
+ bool load_data_set_null(THD *thd, const Load_data_param *param)
+ {
+ return field->load_data_set_null(thd);
+ }
+ bool load_data_set_value(THD *thd, const char *pos, uint length,
+ const Load_data_param *param)
+ {
+ field->load_data_set_value(pos, length, param->charset());
+ return false;
+ }
+ bool load_data_set_no_data(THD *thd, const Load_data_param *param);
+ void load_data_print_for_log_event(THD *thd, String *to) const;
+ bool load_data_add_outvar(THD *thd, Load_data_param *param) const
+ {
+ return param->add_outvar_field(thd, field);
+ }
+ uint load_data_fixed_length() const
+ {
+ return field->field_length;
+ }
void reset_field(Field *f);
bool fix_fields(THD *, Item **);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
- void make_field(Send_field *tmp_field);
+ void make_field(THD *thd, Send_field *tmp_field);
int save_in_field(Field *field,bool no_conversions);
void save_org_in_field(Field *field, fast_field_copier optimizer_data);
fast_field_copier setup_fast_field_copier(Field *field);
@@ -2488,7 +2706,6 @@ public:
{
TABLE *tab= field->table;
tab->covering_keys.intersect(field->part_of_key);
- tab->merge_keys.merge(field->part_of_key);
if (tab->read_set)
bitmap_fast_test_and_set(tab->read_set, field->field_index);
/*
@@ -2528,17 +2745,31 @@ public:
void set_result_field(Field *field_arg) {}
void save_in_result_field(bool no_conversions) { }
Item *get_tmp_table_item(THD *thd);
- bool collect_item_field_processor(uchar * arg);
- bool add_field_to_set_processor(uchar * arg);
- bool find_item_in_field_list_processor(uchar *arg);
- bool register_field_in_read_map(uchar *arg);
- bool register_field_in_write_map(uchar *arg);
- bool register_field_in_bitmap(uchar *arg);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool vcol_in_partition_func_processor(uchar *bool_arg);
- bool enumerate_field_refs_processor(uchar *arg);
- bool update_table_bitmaps_processor(uchar *arg);
- bool switch_to_nullable_fields_processor(uchar *arg);
+ bool collect_item_field_processor(void * arg);
+ bool add_field_to_set_processor(void * arg);
+ bool find_item_in_field_list_processor(void *arg);
+ bool register_field_in_read_map(void *arg);
+ bool register_field_in_write_map(void *arg);
+ bool register_field_in_bitmap(void *arg);
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool post_fix_fields_part_expr_processor(void *bool_arg);
+ bool check_valid_arguments_processor(void *bool_arg);
+ bool check_field_expression_processor(void *arg);
+ bool enumerate_field_refs_processor(void *arg);
+ bool update_table_bitmaps_processor(void *arg);
+ bool switch_to_nullable_fields_processor(void *arg);
+ bool update_vcol_processor(void *arg);
+ bool rename_fields_processor(void *arg);
+ bool check_vcol_func_processor(void *arg)
+ {
+ context= 0;
+ if (field && (field->unireg_check == Field::NEXT_NUMBER))
+ {
+ // Auto increment fields are unsupported
+ return mark_unsupported_function(field_name, arg, VCOL_FIELD_REF | VCOL_AUTO_INC);
+ }
+ return mark_unsupported_function(field_name, arg, VCOL_FIELD_REF);
+ }
void cleanup();
Item_equal *get_item_equal() { return item_equal; }
void set_item_equal(Item_equal *item_eq) { item_equal= item_eq; }
@@ -2549,7 +2780,19 @@ public:
Item_field *field_for_view_update() { return this; }
int fix_outer_field(THD *thd, Field **field, Item **reference);
virtual Item *update_value_transformer(THD *thd, uchar *select_arg);
+ Item *derived_field_transformer_for_having(THD *thd, uchar *arg);
+ Item *derived_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg);
virtual void print(String *str, enum_query_type query_type);
+ bool excl_dep_on_table(table_map tab_map);
+ bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ bool cleanup_excluding_fields_processor(void *arg)
+ { return field ? 0 : cleanup_processor(arg); }
+ bool cleanup_excluding_const_fields_processor(void *arg)
+ { return field && const_item() ? 0 : cleanup_processor(arg); }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_field>(thd, mem_root, this); }
bool is_outer_field() const
{
DBUG_ASSERT(fixed);
@@ -2642,8 +2885,9 @@ public:
}
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_null>(thd, mem_root, this); }
};
class Item_null_result :public Item_null
@@ -2671,27 +2915,135 @@ public:
{
save_in_field(result_field, no_conversions);
}
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
- bool check_vcol_func_processor(uchar *arg)
+ bool check_partition_func_processor(void *int_arg) {return TRUE;}
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(full_name());
+ return mark_unsupported_function(full_name(), arg, VCOL_IMPOSSIBLE);
}
};
-/* Item represents one placeholder ('?') of prepared statement */
+/*
+ Item represents one placeholder ('?') of prepared statement
+
+ Notes:
+ Item_param::field_type() is used when this item is in a temporary table.
+ This is NOT placeholder metadata sent to client, as this value
+ is assigned after sending metadata (in setup_one_conversion_function).
+ For example in case of 'SELECT ?' you'll get MYSQL_TYPE_STRING both
+ in result set and placeholders metadata, no matter what type you will
+ supply for this placeholder in mysql_stmt_execute.
+*/
class Item_param :public Item_basic_value,
private Settable_routine_parameter,
- public Rewritable_query_parameter
+ public Rewritable_query_parameter,
+ public Type_handler_hybrid_field_type
{
-public:
+ /*
+ NO_VALUE is a special value meaning that the parameter has not been
+ assigned yet. Item_param::state is assigned to NO_VALUE in constructor
+ and is used at prepare time.
+
+ 1. At prepare time
+ Item_param::fix_fields() sets "fixed" to true,
+ but as Item_param::state is still NO_VALUE,
+ Item_param::basic_const_item() returns false. This prevents various
+ optimizations to happen at prepare time fix_fields().
+ For example, in this query:
+ PREPARE stmt FROM 'SELECT FORMAT(10000,2,?)';
+ Item_param::basic_const_item() is tested from
+ Item_func_format::fix_length_and_dec().
+
+ 2. At execute time:
+ When Item_param gets a value
+ (or a pseudo-value like DEFAULT_VALUE or IGNORE_VALUE):
+ - Item_param::state changes from NO_VALUE to something else
+ - Item_param::fixed is changed to true
+ All Item_param::set_xxx() make sure to do so.
+ In the state with an assigned value:
+ - Item_param::basic_const_item() returns true
+ - Item::type() returns NULL_ITEM, INT_ITEM, REAL_ITEM, DECIMAL_ITEM,
+ DATE_ITEM, STRING_ITEM, depending on the value assigned.
+ So in this state Item_param behaves in many cases like a literal.
+
+ When Item_param::cleanup() is called:
+ - Item_param::state does not change
+ - Item_param::fixed changes to false
+ Note, this puts Item_param into an inconsistent state:
+ - Item_param::basic_const_item() still returns "true"
+ - Item_param::type() still pretends to be a basic constant Item
+ Both are not expected in combination with fixed==false.
+ However, these methods are not really called in this state,
+ see asserts in Item_param::basic_const_item() and Item_param::type().
+
+ When Item_param::reset() is called:
+ - Item_param::state changes to NO_VALUE
+ - Item_param::fixed changes to false
+ */
enum enum_item_param_state
{
NO_VALUE, NULL_VALUE, INT_VALUE, REAL_VALUE,
STRING_VALUE, TIME_VALUE, LONG_DATA_VALUE,
- DECIMAL_VALUE
+ DECIMAL_VALUE, DEFAULT_VALUE, IGNORE_VALUE
} state;
+ enum Type item_type;
+
+ void fix_type(Type type)
+ {
+ item_type= type;
+ fixed= true;
+ }
+
+ void fix_temporal(uint32 max_length_arg, uint decimals_arg);
+
+public:
+ struct CONVERSION_INFO
+ {
+ /*
+ Character sets conversion info for string values.
+ Character sets of client and connection defined at bind time are used
+ for all conversions, even if one of them is later changed (i.e.
+ between subsequent calls to mysql_stmt_execute).
+ */
+ CHARSET_INFO *character_set_client;
+ CHARSET_INFO *character_set_of_placeholder;
+ /*
+ This points at character set of connection if conversion
+ to it is required (i. e. if placeholder typecode is not BLOB).
+ Otherwise it's equal to character_set_client (to simplify
+ check in convert_str_value()).
+ */
+ CHARSET_INFO *final_character_set_of_str_value;
+ private:
+ bool needs_conversion() const
+ {
+ return final_character_set_of_str_value !=
+ character_set_of_placeholder;
+ }
+ bool convert(THD *thd, String *str);
+ public:
+ void set(THD *thd, CHARSET_INFO *cs);
+ bool convert_if_needed(THD *thd, String *str)
+ {
+ /*
+ Check is so simple because all charsets were set up properly
+ in setup_one_conversion_function, where typecode of
+ placeholder was also taken into account: the variables are different
+ here only if conversion is really necessary.
+ */
+ if (needs_conversion())
+ return convert(thd, str);
+ str->set_charset(final_character_set_of_str_value);
+ return false;
+ }
+ };
+
+ /*
+ Used for bulk protocol only.
+ */
+ enum enum_indicator_type indicator;
+
/*
A buffer for string and long data values. Historically all allocated
values returned from val_str() were treated as eligible to
@@ -2708,46 +3060,24 @@ public:
{
longlong integer;
double real;
- /*
- Character sets conversion info for string values.
- Character sets of client and connection defined at bind time are used
- for all conversions, even if one of them is later changed (i.e.
- between subsequent calls to mysql_stmt_execute).
- */
- struct CONVERSION_INFO
- {
- CHARSET_INFO *character_set_client;
- CHARSET_INFO *character_set_of_placeholder;
- /*
- This points at character set of connection if conversion
- to it is required (i. e. if placeholder typecode is not BLOB).
- Otherwise it's equal to character_set_client (to simplify
- check in convert_str_value()).
- */
- CHARSET_INFO *final_character_set_of_str_value;
- } cs_info;
+ CONVERSION_INFO cs_info;
MYSQL_TIME time;
} value;
- /* Cached values for virtual methods to save us one switch. */
- enum Item_result item_result_type;
- enum Type item_type;
-
- /*
- Used when this item is used in a temporary table.
- This is NOT placeholder metadata sent to client, as this value
- is assigned after sending metadata (in setup_one_conversion_function).
- For example in case of 'SELECT ?' you'll get MYSQL_TYPE_STRING both
- in result set and placeholders metadata, no matter what type you will
- supply for this placeholder in mysql_stmt_execute.
- */
- enum enum_field_types param_type;
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
Item_param(THD *thd, uint pos_in_query_arg);
- enum Item_result result_type () const { return item_result_type; }
- enum Type type() const { return item_type; }
- enum_field_types field_type() const { return param_type; }
+ enum Type type() const
+ {
+ DBUG_ASSERT(fixed || state == NO_VALUE);
+ return item_type;
+ }
double val_real();
longlong val_int();
@@ -2756,15 +3086,18 @@ public:
bool get_date(MYSQL_TIME *tm, ulonglong fuzzydate);
int save_in_field(Field *field, bool no_conversions);
+ void set_default();
+ void set_ignore();
void set_null();
void set_int(longlong i, uint32 max_length_arg);
void set_double(double i);
void set_decimal(const char *str, ulong length);
- void set_decimal(const my_decimal *dv);
+ void set_decimal(const my_decimal *dv, bool unsigned_arg);
bool set_str(const char *str, ulong length);
bool set_longdata(const char *str, ulong length);
void set_time(MYSQL_TIME *tm, timestamp_type type, uint32 max_length_arg);
- bool set_from_user_var(THD *thd, const user_var_entry *entry);
+ void set_time(const MYSQL_TIME *tm, uint32 max_length_arg, uint decimals_arg);
+ bool set_from_item(THD *thd, Item *item);
void reset();
/*
Assign placeholder value from bind data.
@@ -2789,6 +3122,18 @@ public:
bool is_null()
{ DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; }
bool basic_const_item() const;
+ bool has_no_value() const
+ {
+ return state == NO_VALUE;
+ }
+ bool has_long_data_value() const
+ {
+ return state == LONG_DATA_VALUE;
+ }
+ bool has_int_value() const
+ {
+ return state == INT_VALUE;
+ }
/*
This method is used to make a copy of a basic constant item when
propagating constants in the optimizer. The reason to create a new
@@ -2812,11 +3157,19 @@ public:
Rewritable_query_parameter *get_rewritable_query_parameter()
{ return this; }
Settable_routine_parameter *get_settable_routine_parameter()
- { return this; }
+ { return m_is_settable_routine_parameter ? this : NULL; }
bool append_for_log(THD *thd, String *str);
+ bool check_vcol_func_processor(void *int_arg) {return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
+
+ bool add_as_clone(THD *thd);
+ void sync_clones();
+ bool register_clone(Item_param *i) { return m_clones.push_back(i); }
private:
+ void invalid_default_param() const;
+
virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
virtual void set_out_param_info(Send_field *info);
@@ -2824,10 +3177,17 @@ private:
public:
virtual const Send_field *get_out_param_info() const;
- virtual void make_field(Send_field *field);
+ virtual void make_field(THD *thd, Send_field *field);
private:
Send_field *m_out_param_info;
+ bool m_is_settable_routine_parameter;
+ /*
+ Array of all references of this parameter marker used in a CTE to its clones
+ created for copies of this marker used the CTE's copies. It's used to
+ synchronize the actual value of the parameter with the values of the clones.
+ */
+ Mem_root_array<Item_param *, true> m_clones;
};
@@ -2859,13 +3219,28 @@ public:
bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
virtual void print(String *str, enum_query_type query_type);
- Item_num *neg(THD *thd) { value= -value; return this; }
+ Item *neg(THD *thd);
uint decimal_precision() const
{ return (uint) (max_length - MY_TEST(value < 0)); }
bool eq(const Item *item, bool binary_cmp) const
{ return int_eq(value, item); }
- bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_int>(thd, mem_root, this); }
+};
+
+
+/*
+ We sometimes need to distinguish a number from a boolean:
+ a[1] and a[true] are different things in XPath.
+ Also in JSON boolean values should be treated differently.
+*/
+class Item_bool :public Item_int
+{
+public:
+ Item_bool(THD *thd, const char *str_arg, longlong i):
+ Item_int(thd, str_arg, i, 1) {}
+ bool is_bool_type() { return true; }
+ Item *neg_transformer(THD *thd);
};
@@ -2880,8 +3255,10 @@ public:
String *val_str(String*);
Item *clone_item(THD *thd);
virtual void print(String *str, enum_query_type query_type);
- Item_num *neg(THD *thd);
+ Item *neg(THD *thd);
uint decimal_precision() const { return max_length; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_uint>(thd, mem_root, this); }
};
@@ -2924,17 +3301,12 @@ public:
bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
virtual void print(String *str, enum_query_type query_type);
- Item_num *neg(THD *thd)
- {
- my_decimal_neg(&decimal_value);
- unsigned_flag= !decimal_value.sign();
- return this;
- }
+ Item *neg(THD *thd);
uint decimal_precision() const { return decimal_value.precision(); }
bool eq(const Item *, bool binary_cmp) const;
void set_decimal_value(my_decimal *value_par);
- bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_decimal>(thd, mem_root, this); }
};
@@ -2979,10 +3351,12 @@ public:
my_decimal *val_decimal(my_decimal *);
bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
- Item_num *neg(THD *thd) { value= -value; return this; }
+ Item *neg(THD *thd);
virtual void print(String *str, enum_query_type query_type);
bool eq(const Item *item, bool binary_cmp) const
{ return real_eq(value, item); }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_float>(thd, mem_root, this); }
};
@@ -3016,10 +3390,11 @@ protected:
// it is constant => can be used without fix_fields (and frequently used)
fixed= 1;
}
- void fix_and_set_name_from_value(Derivation dv, const Metadata metadata)
+ void fix_and_set_name_from_value(THD *thd, Derivation dv,
+ const Metadata metadata)
{
fix_from_value(dv, metadata);
- set_name(str_value.ptr(), str_value.length(), str_value.charset());
+ set_name(thd, str_value.ptr(), str_value.length(), str_value.charset());
}
protected:
/* Just create an item and do not fill string representation */
@@ -3028,7 +3403,7 @@ protected:
{
collation.set(cs, dv);
max_length= 0;
- set_name(NULL, 0, system_charset_info);
+ set_name(thd, NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
fixed= 1;
}
@@ -3037,7 +3412,7 @@ public:
Item_basic_constant(thd)
{
collation.set(csi, DERIVATION_COERCIBLE);
- set_name(NULL, 0, system_charset_info);
+ set_name(thd, NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
fixed= 1;
str_value.copy(str_arg, length_arg, csi);
@@ -3048,14 +3423,14 @@ public:
Derivation dv, uint repertoire): Item_basic_constant(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
- fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire));
+ fix_and_set_name_from_value(thd, dv, Metadata(&str_value, repertoire));
}
Item_string(THD *thd, const char *str, uint length,
CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE):
Item_basic_constant(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
- fix_and_set_name_from_value(dv, Metadata(&str_value));
+ fix_and_set_name_from_value(thd, dv, Metadata(&str_value));
}
Item_string(THD *thd, const String *str, CHARSET_INFO *tocs, uint *conv_errors,
Derivation dv, uint repertoire): Item_basic_constant(thd)
@@ -3063,7 +3438,7 @@ public:
if (str_value.copy(str, tocs, conv_errors))
str_value.set("", 0, tocs); // EOM ?
str_value.mark_as_const();
- fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire));
+ fix_and_set_name_from_value(thd, dv, Metadata(&str_value, repertoire));
}
// Constructors with an externally provided item name
Item_string(THD *thd, const char *name_par, const char *str, uint length,
@@ -3072,7 +3447,7 @@ public:
{
str_value.set_or_copy_aligned(str, length, cs);
fix_from_value(dv, Metadata(&str_value));
- set_name(name_par, 0, system_charset_info);
+ set_name(thd, name_par, 0, system_charset_info);
}
Item_string(THD *thd, const char *name_par, const char *str, uint length,
CHARSET_INFO *cs, Derivation dv, uint repertoire):
@@ -3080,7 +3455,7 @@ public:
{
str_value.set_or_copy_aligned(str, length, cs);
fix_from_value(dv, Metadata(&str_value, repertoire));
- set_name(name_par, 0, system_charset_info);
+ set_name(thd, name_par, 0, system_charset_info);
}
void print_value(String *to) const
{
@@ -3114,8 +3489,7 @@ public:
max_length= str_value.numchars() * collation.collation->mbmaxlen;
}
virtual void print(String *str, enum_query_type query_type);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
/**
Return TRUE if character-set-introducer was explicitly specified in the
@@ -3172,6 +3546,9 @@ public:
}
return MYSQL_TYPE_STRING; // Not a temporal literal
}
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_string>(thd, mem_root, this); }
};
@@ -3201,7 +3578,7 @@ public:
Item_string(thd, str, length, system_charset_info)
{ }
Item_string_sys(THD *thd, const char *str):
- Item_string(thd, str, strlen(str), system_charset_info)
+ Item_string(thd, str, (uint) strlen(str), system_charset_info)
{ }
};
@@ -3214,7 +3591,7 @@ public:
DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII)
{ }
Item_string_ascii(THD *thd, const char *str):
- Item_string(thd, str, strlen(str), &my_charset_latin1,
+ Item_string(thd, str, (uint)strlen(str), &my_charset_latin1,
DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII)
{ }
};
@@ -3246,10 +3623,12 @@ public:
str->append(func_name);
}
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
- bool check_vcol_func_processor(uchar *arg)
- {
- return trace_unsupported_by_check_vcol_func_processor(func_name);
+ bool check_partition_func_processor(void *int_arg) {return TRUE;}
+
+ bool check_vcol_func_processor(void *arg)
+ { // VCOL_TIME_FUNC because the value is not constant, but does not
+ // require fix_fields() to be re-run for every statement.
+ return mark_unsupported_function(func_name, arg, VCOL_TIME_FUNC);
}
};
@@ -3262,9 +3641,9 @@ public:
CHARSET_INFO *cs= NULL):
Item_string(thd, name_arg, length, cs)
{}
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("safe_string");
+ return mark_unsupported_function("safe_string", arg, VCOL_IMPOSSIBLE);
}
};
@@ -3286,7 +3665,7 @@ class Item_blob :public Item_partition_func_safe_string
{
public:
Item_blob(THD *thd, const char *name_arg, uint length):
- Item_partition_func_safe_string(thd, name_arg, strlen(name_arg), &my_charset_bin)
+ Item_partition_func_safe_string(thd, name_arg, (uint) strlen(name_arg), &my_charset_bin)
{ max_length= length; }
enum Type type() const { return TYPE_HOLDER; }
enum_field_types field_type() const { return MYSQL_TYPE_BLOB; }
@@ -3309,7 +3688,7 @@ public:
Item_partition_func_safe_string(thd, "", 0,
cs ? cs : &my_charset_utf8_general_ci)
{ name=(char*) header; max_length= length * collation.collation->mbmaxlen; }
- void make_field(Send_field *field);
+ void make_field(THD *thd, Send_field *field);
};
@@ -3351,8 +3730,7 @@ public:
{
return const_charset_converter(thd, tocs, true);
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
bool basic_const_item() const { return 1; }
bool eq(const Item *item, bool binary_cmp) const
{
@@ -3380,7 +3758,12 @@ public:
DBUG_ASSERT(fixed == 1);
return (double) (ulonglong) Item_hex_hybrid::val_int();
}
- longlong val_int();
+ longlong val_int()
+ {
+ // following assert is redundant, because fixed=1 assigned in constructor
+ DBUG_ASSERT(fixed == 1);
+ return longlong_from_hex_hybrid(str_value.ptr(), str_value.length());
+ }
my_decimal *val_decimal(my_decimal *decimal_value)
{
// following assert is redundant, because fixed=1 assigned in constructor
@@ -3389,9 +3772,15 @@ public:
int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value);
return decimal_value;
}
- int save_in_field(Field *field, bool no_conversions);
+ int save_in_field(Field *field, bool no_conversions)
+ {
+ field->set_notnull();
+ return field->store_hex_hybrid(str_value.ptr(), str_value.length());
+ }
enum Item_result cast_to_int_type() const { return INT_RESULT; }
void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_hex_hybrid>(thd, mem_root, this); }
};
@@ -3432,6 +3821,8 @@ public:
}
enum Item_result cast_to_int_type() const { return STRING_RESULT; }
void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_hex_string>(thd, mem_root, this); }
};
@@ -3471,8 +3862,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
Item_result cmp_type() const { return TIME_RESULT; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
bool is_null()
{ return is_null_from_temporal(); }
@@ -3514,6 +3904,8 @@ public:
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_date_literal>(thd, mem_root, this); }
};
@@ -3533,6 +3925,8 @@ public:
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_time_literal>(thd, mem_root, this); }
};
@@ -3554,6 +3948,8 @@ public:
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_datetime_literal>(thd, mem_root, this); }
};
@@ -3574,7 +3970,7 @@ class Item_date_literal_for_invalid_dates: public Item_date_literal
Item_date_literal_for_invalid_dates::get_date()
(unlike the regular Item_date_literal::get_date())
- does not check the result for NO_ZERO_IN_DATE and NO_ZER_DATE,
+ does not check the result for NO_ZERO_IN_DATE and NO_ZERO_DATE,
always returns success (false), and does not produce error/warning messages.
We need these _for_invalid_dates classes to be able to rewrite:
@@ -3623,7 +4019,7 @@ protected:
Item **args, *tmp_arg[2];
uint arg_count;
void set_arguments(THD *thd, List<Item> &list);
- bool walk_args(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk_args(Item_processor processor, bool walk_subquery, void *arg)
{
for (uint i= 0; i < arg_count; i++)
{
@@ -3634,6 +4030,28 @@ protected:
}
bool transform_args(THD *thd, Item_transformer transformer, uchar *arg);
void propagate_equal_fields(THD *, const Item::Context &, COND_EQUAL *);
+ bool excl_dep_on_table(table_map tab_map)
+ {
+ for (uint i= 0; i < arg_count; i++)
+ {
+ if (args[i]->const_item())
+ continue;
+ if (!args[i]->excl_dep_on_table(tab_map))
+ return false;
+ }
+ return true;
+ }
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ {
+ for (uint i= 0; i < arg_count; i++)
+ {
+ if (args[i]->const_item())
+ continue;
+ if (!args[i]->excl_dep_on_grouping_fields(sel))
+ return false;
+ }
+ return true;
+ }
public:
Item_args(void)
:args(NULL), arg_count(0)
@@ -3648,28 +4066,28 @@ public:
{
args[0]= a; args[1]= b;
}
- Item_args(Item *a, Item *b, Item *c)
+ Item_args(THD *thd, Item *a, Item *b, Item *c)
{
arg_count= 0;
- if ((args= (Item**) sql_alloc(sizeof(Item*) * 3)))
+ if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 3)))
{
arg_count= 3;
args[0]= a; args[1]= b; args[2]= c;
}
}
- Item_args(Item *a, Item *b, Item *c, Item *d)
+ Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d)
{
arg_count= 0;
- if ((args= (Item**) sql_alloc(sizeof(Item*) * 4)))
+ if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 4)))
{
arg_count= 4;
args[0]= a; args[1]= b; args[2]= c; args[3]= d;
}
}
- Item_args(Item *a, Item *b, Item *c, Item *d, Item* e)
+ Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e)
{
arg_count= 5;
- if ((args= (Item**) sql_alloc(sizeof(Item*) * 5)))
+ if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 5)))
{
arg_count= 5;
args[0]= a; args[1]= b; args[2]= c; args[3]= d; args[4]= e;
@@ -3878,17 +4296,17 @@ public:
Item_func_or_sum(THD *thd, Item *a, Item *b):
Item_result_field(thd), Item_args(a, b) { }
Item_func_or_sum(THD *thd, Item *a, Item *b, Item *c):
- Item_result_field(thd), Item_args(a, b, c) { }
+ Item_result_field(thd), Item_args(thd, a, b, c) { }
Item_func_or_sum(THD *thd, Item *a, Item *b, Item *c, Item *d):
- Item_result_field(thd), Item_args(a, b, c, d) { }
+ Item_result_field(thd), Item_args(thd, a, b, c, d) { }
Item_func_or_sum(THD *thd, Item *a, Item *b, Item *c, Item *d, Item *e):
- Item_result_field(thd), Item_args(a, b, c, d, e) { }
+ Item_result_field(thd), Item_args(thd, a, b, c, d, e) { }
Item_func_or_sum(THD *thd, Item_func_or_sum *item):
Item_result_field(thd, item), Item_args(thd, item),
Used_tables_and_const_cache(item) { }
Item_func_or_sum(THD *thd, List<Item> &list):
Item_result_field(thd), Item_args(thd, list) { }
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
if (walk_args(processor, walk_subquery, arg))
return true;
@@ -3911,9 +4329,10 @@ public:
also to make printing of items inherited from Item_sum uniform.
*/
virtual const char *func_name() const= 0;
- virtual void fix_length_and_dec()= 0;
+ virtual bool fix_length_and_dec()= 0;
bool const_item() const { return const_item_cache; }
table_map used_tables() const { return used_tables_cache; }
+ Item* build_clone(THD *thd, MEM_ROOT *mem_root);
};
@@ -3978,7 +4397,7 @@ public:
bool val_bool_result();
bool is_null_result();
bool send(Protocol *prot, String *tmp);
- void make_field(Send_field *field);
+ void make_field(THD *thd, Send_field *field);
bool fix_fields(THD *, Item **);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
int save_in_field(Field *field, bool no_conversions);
@@ -4030,7 +4449,7 @@ public:
{
return ref ? (*ref)->real_item() : this;
}
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
if (ref && *ref)
return (*ref)->walk(processor, walk_subquery, arg) ||
@@ -4041,7 +4460,7 @@ public:
Item* transform(THD *thd, Item_transformer, uchar *arg);
Item* compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t);
- bool enumerate_field_refs_processor(uchar *arg)
+ bool enumerate_field_refs_processor(void *arg)
{ return (*ref)->enumerate_field_refs_processor(arg); }
void no_rows_in_result()
{
@@ -4055,6 +4474,10 @@ public:
void cleanup();
Item_field *field_for_view_update()
{ return (*ref)->field_for_view_update(); }
+ Load_data_outvar *get_load_data_outvar()
+ {
+ return (*ref)->get_load_data_outvar();
+ }
virtual Ref_Type ref_type() { return REF; }
// Row emulation: forwarding of ROW-related calls to ref
@@ -4084,9 +4507,9 @@ public:
if (ref && result_type() == ROW_RESULT)
(*ref)->bring_value();
}
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("ref");
+ return mark_unsupported_function("ref", arg, VCOL_IMPOSSIBLE);
}
bool basic_const_item() const { return ref && (*ref)->basic_const_item(); }
bool is_outer_field() const
@@ -4095,6 +4518,8 @@ public:
DBUG_ASSERT(ref);
return (*ref)->is_outer_field();
}
+
+ Item* build_clone(THD *thd, MEM_ROOT *mem_root);
/**
Checks if the item tree that ref points to contains a subquery.
@@ -4103,6 +4528,33 @@ public:
{
return (*ref)->has_subquery();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_ref>(thd, mem_root, this); }
+ bool excl_dep_on_table(table_map tab_map)
+ {
+ table_map used= used_tables();
+ if (used & OUTER_REF_TABLE_BIT)
+ return false;
+ return (used == tab_map) || (*ref)->excl_dep_on_table(tab_map);
+ }
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ { return (*ref)->excl_dep_on_grouping_fields(sel); }
+ bool cleanup_excluding_fields_processor(void *arg)
+ {
+ Item *item= real_item();
+ if (item && item->type() == FIELD_ITEM &&
+ ((Item_field *)item)->field)
+ return 0;
+ return cleanup_processor(arg);
+ }
+ bool cleanup_excluding_const_fields_processor(void *arg)
+ {
+ Item *item= real_item();
+ if (item && item->type() == FIELD_ITEM &&
+ ((Item_field *) item)->field && item->const_item())
+ return 0;
+ return cleanup_processor(arg);
+ }
};
@@ -4145,6 +4597,8 @@ public:
bool is_null();
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
virtual Ref_Type ref_type() { return DIRECT_REF; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_direct_ref>(thd, mem_root, this); }
};
@@ -4250,7 +4704,8 @@ public:
virtual void print(String *str, enum_query_type query_type);
virtual const char *full_name() const { return orig_item->full_name(); }
- virtual void make_field(Send_field *field) { orig_item->make_field(field); }
+ virtual void make_field(THD *thd, Send_field *field)
+ { orig_item->make_field(thd, field); }
bool eq(const Item *item, bool binary_cmp) const
{
Item *it= ((Item *) item)->real_item();
@@ -4270,12 +4725,12 @@ public:
}
bool const_item() const { return orig_item->const_item(); }
table_map not_null_tables() const { return orig_item->not_null_tables(); }
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
return orig_item->walk(processor, walk_subquery, arg) ||
(this->*processor)(arg);
}
- bool enumerate_field_refs_processor(uchar *arg)
+ bool enumerate_field_refs_processor(void *arg)
{ return orig_item->enumerate_field_refs_processor(arg); }
Item_field *field_for_view_update()
{ return orig_item->field_for_view_update(); }
@@ -4301,12 +4756,15 @@ public:
orig_item->bring_value();
}
bool is_expensive() { return orig_item->is_expensive(); }
- bool is_expensive_processor(uchar *arg)
+ bool is_expensive_processor(void *arg)
{ return orig_item->is_expensive_processor(arg); }
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("cache");
+ return mark_unsupported_function("cache", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_wrapper>(thd, mem_root, this); }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
@@ -4374,18 +4832,25 @@ public:
void update_used_tables();
table_map not_null_tables() const;
bool const_item() const { return used_tables() == 0; }
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
return (*ref)->walk(processor, walk_subquery, arg) ||
(this->*processor)(arg);
}
- bool view_used_tables_processor(uchar *arg)
+ bool view_used_tables_processor(void *arg)
{
TABLE_LIST *view_arg= (TABLE_LIST *) arg;
if (view_arg == view)
view_arg->view_used_tables|= (*ref)->used_tables();
return 0;
}
+ bool excl_dep_on_table(table_map tab_map);
+ bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ Item *derived_field_transformer_for_having(THD *thd, uchar *arg);
+ Item *derived_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *derived_grouping_field_transformer_for_where(THD *thd,
+ uchar *arg);
+
void save_val(Field *to)
{
if (check_null_ref())
@@ -4467,6 +4932,8 @@ public:
item_equal= NULL;
Item_direct_ref::cleanup();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_direct_view_ref>(thd, mem_root, this); }
};
@@ -4523,7 +4990,7 @@ public:
}
table_map not_null_tables() const { return 0; }
virtual Ref_Type ref_type() { return OUTER_REF; }
- bool check_inner_refs_processor(uchar * arg);
+ bool check_inner_refs_processor(void * arg);
};
@@ -4558,6 +5025,8 @@ public:
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
virtual void print(String *str, enum_query_type query_type);
table_map used_tables() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_ref_null_helper>(thd, mem_root, this); }
};
/*
@@ -4598,6 +5067,7 @@ public:
#include "item_timefunc.h"
#include "item_subselect.h"
#include "item_xmlfunc.h"
+#include "item_jsonfunc.h"
#include "item_create.h"
#endif
@@ -4621,26 +5091,21 @@ public:
from Item_).
*/
-class Item_copy :public Item
+class Item_copy :public Item,
+ public Type_handler_hybrid_field_type
{
protected:
/**
- Stores the type of the resulting field that would be used to store the data
+ Type_handler_hybrid_field_type is used to
+ store the type of the resulting field that would be used to store the data
in the cache. This is to avoid calls to the original item.
*/
- enum enum_field_types cached_field_type;
/** The original item that is copied */
Item *item;
/**
- Stores the result type of the original item, so it can be returned
- without calling the original item's method
- */
- Item_result cached_result_type;
-
- /**
Constructor of the Item_copy class
stores metadata information about the original class as well as a
@@ -4652,8 +5117,7 @@ protected:
null_value=maybe_null=item->maybe_null;
Type_std_attributes::set(item);
name=item->name;
- cached_field_type= item->field_type();
- cached_result_type= item->result_type();
+ set_handler_by_field_type(item->field_type());
fixed= item->fixed;
}
@@ -4671,16 +5135,21 @@ public:
Item *get_item() { return item; }
/** All of the subclasses should have the same type tag */
enum Type type() const { return COPY_STR_ITEM; }
- enum_field_types field_type() const { return cached_field_type; }
- enum Item_result result_type () const { return cached_result_type; }
- void make_field(Send_field *field) { item->make_field(field); }
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
+
+ void make_field(THD *thd, Send_field *field) { item->make_field(thd, field); }
table_map used_tables() const { return (table_map) 1L; }
bool const_item() const { return 0; }
bool is_null() { return null_value; }
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("copy");
+ return mark_unsupported_function("copy", arg, VCOL_IMPOSSIBLE);
}
/*
@@ -4693,7 +5162,7 @@ public:
virtual double val_real() = 0;
virtual longlong val_int() = 0;
virtual int save_in_field(Field *field, bool no_conversions) = 0;
- bool walk(Item_processor processor, bool walk_subquery, uchar *args)
+ bool walk(Item_processor processor, bool walk_subquery, void *args)
{
return (item->walk(processor, walk_subquery, args)) ||
(this->*processor)(args);
@@ -4716,6 +5185,8 @@ public:
longlong val_int();
void copy();
int save_in_field(Field *field, bool no_conversions);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_copy_string>(thd, mem_root, this); }
};
@@ -4728,17 +5199,10 @@ public:
- cmp() method that compares the saved value with the current value of the
source item, and if they were not equal saves item's value into the saved
value.
-*/
-/*
- Cached_item_XXX objects are not exactly caches. They do the following:
-
- Each Cached_item_XXX object has
- - its source item
- - saved value of the source item
- - cmp() method that compares the saved value with the current value of the
- source item, and if they were not equal saves item's value into the saved
- value.
+ TODO: add here:
+ - a way to save the new value w/o comparison
+ - a way to do less/equal/greater comparison
*/
class Cached_item :public Sql_alloc
@@ -4746,48 +5210,75 @@ class Cached_item :public Sql_alloc
public:
bool null_value;
Cached_item() :null_value(0) {}
+ /*
+ Compare the cached value with the source value. If not equal, copy
+ the source value to the cache.
+ @return
+ true - Not equal
+ false - Equal
+ */
virtual bool cmp(void)=0;
+
+ /* Compare the cached value with the source value, without copying */
+ virtual int cmp_read_only()=0;
+
virtual ~Cached_item(); /*line -e1509 */
};
-class Cached_item_str :public Cached_item
+class Cached_item_item : public Cached_item
{
+protected:
Item *item;
+
+ Cached_item_item(Item *arg) : item(arg) {}
+public:
+ void fetch_value_from(Item *new_item)
+ {
+ Item *save= item;
+ item= new_item;
+ cmp();
+ item= save;
+ }
+};
+
+class Cached_item_str :public Cached_item_item
+{
uint32 value_max_length;
String value,tmp_value;
public:
Cached_item_str(THD *thd, Item *arg);
bool cmp(void);
+ int cmp_read_only();
~Cached_item_str(); // Deallocate String:s
};
-class Cached_item_real :public Cached_item
+class Cached_item_real :public Cached_item_item
{
- Item *item;
double value;
public:
- Cached_item_real(Item *item_par) :item(item_par),value(0.0) {}
+ Cached_item_real(Item *item_par) :Cached_item_item(item_par),value(0.0) {}
bool cmp(void);
+ int cmp_read_only();
};
-class Cached_item_int :public Cached_item
+class Cached_item_int :public Cached_item_item
{
- Item *item;
longlong value;
public:
- Cached_item_int(Item *item_par) :item(item_par),value(0) {}
+ Cached_item_int(Item *item_par) :Cached_item_item(item_par),value(0) {}
bool cmp(void);
+ int cmp_read_only();
};
-class Cached_item_decimal :public Cached_item
+class Cached_item_decimal :public Cached_item_item
{
- Item *item;
my_decimal value;
public:
Cached_item_decimal(Item *item_par);
bool cmp(void);
+ int cmp_read_only();
};
class Cached_item_field :public Cached_item
@@ -4797,17 +5288,19 @@ class Cached_item_field :public Cached_item
uint length;
public:
- Cached_item_field(Field *arg_field) : field(arg_field)
+ Cached_item_field(THD *thd, Field *arg_field): field(arg_field)
{
field= arg_field;
/* TODO: take the memory allocation below out of the constructor. */
- buff= (uchar*) sql_calloc(length=field->pack_length());
+ buff= (uchar*) thd_calloc(thd, length= field->pack_length());
}
bool cmp(void);
+ int cmp_read_only();
};
class Item_default_value : public Item_field
{
+ void calculate();
public:
Item *arg;
Item_default_value(THD *thd, Name_resolution_context *context_arg)
@@ -4818,18 +5311,41 @@ public:
:Item_field(thd, context_arg, (const char *)NULL, (const char *)NULL,
(const char *)NULL),
arg(a) {}
+ Item_default_value(THD *thd, Name_resolution_context *context_arg, Field *a)
+ :Item_field(thd, context_arg, (const char *)NULL, (const char *)NULL,
+ (const char *)NULL),
+ arg(NULL) {}
enum Type type() const { return DEFAULT_VALUE_ITEM; }
bool eq(const Item *item, bool binary_cmp) const;
bool fix_fields(THD *, Item **);
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+ String *val_str(String *str);
+ double val_real();
+ longlong val_int();
+ my_decimal *val_decimal(my_decimal *decimal_value);
+ bool get_date(MYSQL_TIME *ltime,ulonglong fuzzydate);
+ bool send(Protocol *protocol, String *buffer);
int save_in_field(Field *field_arg, bool no_conversions);
- table_map used_tables() const { return (table_map)0L; }
-
+ bool save_in_param(THD *thd, Item_param *param)
+ {
+ // It should not be possible to have "EXECUTE .. USING DEFAULT(a)"
+ DBUG_ASSERT(arg == NULL);
+ param->set_default();
+ return false;
+ }
+ table_map used_tables() const;
+ virtual void update_used_tables()
+ {
+ if (field && field->default_value)
+ field->default_value->expr->update_used_tables();
+ }
Field *get_tmp_table_field() { return 0; }
Item *get_tmp_table_item(THD *thd) { return this; }
Item_field *field_for_view_update() { return 0; }
+ bool update_vcol_processor(void *arg) { return 0; }
+ bool check_func_default_processor(void *arg) { return true; }
- bool walk(Item_processor processor, bool walk_subquery, uchar *args)
+ bool walk(Item_processor processor, bool walk_subquery, void *args)
{
return (arg && arg->walk(processor, walk_subquery, args)) ||
(this->*processor)(args);
@@ -4838,6 +5354,37 @@ public:
Item *transform(THD *thd, Item_transformer transformer, uchar *args);
};
+/**
+ This class is used as bulk parameter INGNORE representation.
+
+ It just do nothing when assigned to a field
+
+*/
+
+class Item_ignore_value : public Item_default_value
+{
+public:
+ Item_ignore_value(THD *thd, Name_resolution_context *context_arg)
+ :Item_default_value(thd, context_arg)
+ {};
+
+ void print(String *str, enum_query_type query_type);
+ int save_in_field(Field *field_arg, bool no_conversions);
+ bool save_in_param(THD *thd, Item_param *param)
+ {
+ param->set_ignore();
+ return false;
+ }
+
+ String *val_str(String *str);
+ double val_real();
+ longlong val_int();
+ my_decimal *val_decimal(my_decimal *decimal_value);
+ bool get_date(MYSQL_TIME *ltime,ulonglong fuzzydate);
+ bool send(Protocol *protocol, String *buffer);
+};
+
+
/*
Item_insert_value -- an implementation of VALUES() function.
You can use the VALUES(col_name) function in the UPDATE clause
@@ -4872,15 +5419,16 @@ public:
Item_field *field_for_view_update() { return 0; }
- bool walk(Item_processor processor, bool walk_subquery, uchar *args)
+ bool walk(Item_processor processor, bool walk_subquery, void *args)
{
return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
- bool check_vcol_func_processor(uchar *arg_arg)
+ bool check_partition_func_processor(void *int_arg) {return TRUE;}
+ bool update_vcol_processor(void *arg) { return 0; }
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("values");
+ return mark_unsupported_function("values()", arg, VCOL_IMPOSSIBLE);
}
};
@@ -4967,10 +5515,7 @@ private:
*/
bool read_only;
public:
- bool check_vcol_func_processor(uchar *arg)
- {
- return trace_unsupported_by_check_vcol_func_processor("trigger");
- }
+ bool check_vcol_func_processor(void *arg);
};
@@ -4981,7 +5526,8 @@ public:
for any value.
*/
-class Item_cache: public Item_basic_constant
+class Item_cache: public Item_basic_constant,
+ public Type_handler_hybrid_field_type
{
protected:
Item *example;
@@ -4991,7 +5537,6 @@ protected:
by IN->EXISTS transformation.
*/
Field *cached_field;
- enum enum_field_types cached_field_type;
/*
TRUE <=> cache holds value of the last stored item (i.e actual value).
store() stores item to be cached and sets this flag to FALSE.
@@ -5003,18 +5548,19 @@ protected:
public:
Item_cache(THD *thd):
Item_basic_constant(thd),
+ Type_handler_hybrid_field_type(MYSQL_TYPE_STRING),
example(0), cached_field(0),
- cached_field_type(MYSQL_TYPE_STRING),
value_cached(0)
{
fixed= 1;
maybe_null= 1;
null_value= 1;
}
+protected:
Item_cache(THD *thd, enum_field_types field_type_arg):
Item_basic_constant(thd),
+ Type_handler_hybrid_field_type(field_type_arg),
example(0), cached_field(0),
- cached_field_type(field_type_arg),
value_cached(0)
{
fixed= 1;
@@ -5022,6 +5568,7 @@ public:
null_value= 1;
}
+public:
virtual bool allocate(THD *thd, uint i) { return 0; }
virtual bool setup(THD *thd, Item *item)
{
@@ -5032,12 +5579,28 @@ public:
return 0;
};
enum Type type() const { return CACHE_ITEM; }
- enum_field_types field_type() const { return cached_field_type; }
- static Item_cache* get_cache(THD *thd, const Item *item);
- static Item_cache* get_cache(THD *thd, const Item* item, const Item_result type);
+
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
+
+ static Item_cache* get_cache(THD *thd, const Item* item,
+ const Item_result type, const enum_field_types f_type);
+ static Item_cache* get_cache(THD *thd, const Item* item,
+ const Item_result type)
+ {
+ return get_cache(thd, item, type, item->field_type());
+ }
+ static Item_cache* get_cache(THD *thd, const Item *item)
+ {
+ return get_cache(thd, item, item->cmp_type());
+ }
virtual void keep_array() {}
virtual void print(String *str, enum_query_type query_type);
- bool eq_def(Field *field)
+ bool eq_def(const Field *field)
{
return cached_field ? cached_field->eq_def (field) : FALSE;
}
@@ -5045,9 +5608,26 @@ public:
{
return this == item;
}
- bool check_vcol_func_processor(uchar *arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("cache");
+ if (example)
+ {
+ Item::vcol_func_processor_result *res= (Item::vcol_func_processor_result*)arg;
+ example->check_vcol_func_processor(arg);
+ /*
+ Item_cache of a non-deterministic function requires re-fixing
+ even if the function itself doesn't (e.g. CURRENT_TIMESTAMP)
+ */
+ if (res->errors & VCOL_NOT_STRICTLY_DETERMINISTIC)
+ res->errors|= VCOL_SESSION_FUNC;
+ return false;
+ }
+ return mark_unsupported_function("cache", arg, VCOL_IMPOSSIBLE);
+ }
+ void cleanup()
+ {
+ clear();
+ Item_basic_constant::cleanup();
}
/**
Check if saved item has a non-NULL value.
@@ -5062,7 +5642,7 @@ public:
virtual void store(Item *item);
virtual bool cache_value()= 0;
bool basic_const_item() const
- { return MY_TEST(example && example->basic_const_item()); }
+ { return example && example->basic_const_item(); }
virtual void clear() { null_value= TRUE; value_cached= FALSE; }
bool is_null() { return !has_value(); }
virtual bool is_expensive()
@@ -5071,7 +5651,7 @@ public:
return false;
return example->is_expensive();
}
- bool is_expensive_processor(uchar *arg)
+ bool is_expensive_processor(void *arg)
{
DBUG_ASSERT(example);
if (value_cached)
@@ -5079,19 +5659,27 @@ public:
return example->is_expensive_processor(arg);
}
virtual void set_null();
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
if (example && example->walk(processor, walk_subquery, arg))
return TRUE;
return (this->*processor)(arg);
}
virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
- void split_sum_func2_example(THD *thd, Item **ref_pointer_array,
+ void split_sum_func2_example(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
example->split_sum_func2(thd, ref_pointer_array, fields, &example, flags);
}
Item *get_example() const { return example; }
+
+ virtual Item *convert_to_basic_const_item(THD *thd) { return 0; };
+ Item *derived_field_transformer_for_having(THD *thd, uchar *arg)
+ { return convert_to_basic_const_item(thd); }
+ Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
+ { return convert_to_basic_const_item(thd); }
+ Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ { return convert_to_basic_const_item(thd); }
};
@@ -5112,6 +5700,9 @@ public:
enum Item_result result_type() const { return INT_RESULT; }
bool cache_value();
int save_in_field(Field *field, bool no_conversions);
+ Item *convert_to_basic_const_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_int>(thd, mem_root, this); }
};
@@ -5136,6 +5727,9 @@ public:
Important when storing packed datetime values.
*/
Item *clone_item(THD *thd);
+ Item *convert_to_basic_const_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_temporal>(thd, mem_root, this); }
};
@@ -5152,6 +5746,9 @@ public:
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type() const { return REAL_RESULT; }
bool cache_value();
+ Item *convert_to_basic_const_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_real>(thd, mem_root, this); }
};
@@ -5168,6 +5765,9 @@ public:
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type() const { return DECIMAL_RESULT; }
bool cache_value();
+ Item *convert_to_basic_const_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_decimal>(thd, mem_root, this); }
};
@@ -5181,7 +5781,7 @@ public:
Item_cache_str(THD *thd, const Item *item):
Item_cache(thd, item->field_type()), value(0),
is_varbinary(item->type() == FIELD_ITEM &&
- cached_field_type == MYSQL_TYPE_VARCHAR &&
+ Item_cache_str::field_type() == MYSQL_TYPE_VARCHAR &&
!((const Item_field *) item)->field->has_charset())
{
collation.set(const_cast<DTCollation&>(item->collation));
@@ -5194,6 +5794,9 @@ public:
CHARSET_INFO *charset() const { return value->charset(); };
int save_in_field(Field *field, bool no_conversions);
bool cache_value();
+ Item *convert_to_basic_const_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_str>(thd, mem_root, this); }
};
@@ -5217,6 +5820,8 @@ public:
*/
return Item::safe_charset_converter(thd, tocs);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_str_for_nullif>(thd, mem_root, this); }
};
@@ -5242,7 +5847,7 @@ public:
bool setup(THD *thd, Item *item);
void store(Item *item);
void illegal_method_call(const char *);
- void make_field(Send_field *)
+ void make_field(THD *thd, Send_field *)
{
illegal_method_call((const char*)"make_field");
};
@@ -5288,6 +5893,8 @@ public:
}
bool cache_value();
virtual void set_null();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cache_row>(thd, mem_root, this); }
};
@@ -5298,11 +5905,11 @@ public:
Item_type_holder do not need cleanup() because its time of live limited by
single SP/PS execution.
*/
-class Item_type_holder: public Item
+class Item_type_holder: public Item,
+ public Type_handler_hybrid_real_field_type
{
protected:
TYPELIB *enum_set_typelib;
- enum_field_types fld_type;
Field::geometry_type geometry_type;
void get_full_info(Item *item);
@@ -5312,8 +5919,27 @@ protected:
public:
Item_type_holder(THD*, Item*);
- Item_result result_type() const;
- enum_field_types field_type() const { return fld_type; };
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_real_field_type::field_type(); }
+ enum_field_types real_field_type() const
+ { return Type_handler_hybrid_real_field_type::real_field_type(); }
+ enum Item_result result_type () const
+ {
+ /*
+ In 10.1 Item_type_holder::result_type() returned
+ Field::result_merge_type(field_type()), which returned STRING_RESULT
+ for the BIT data type. In 10.2 it returns INT_RESULT, similar
+ to what Field_bit::result_type() does. This should not be
+ important because Item_type_holder is a limited purpose Item
+ and its result_type() should not be called from outside of
+ Item_type_holder. It's called only internally from decimal_int_part()
+ from join_types(), to calculate "decimals" of the result data type.
+ As soon as we get BIT as one of the joined types, the result field
+ type cannot be numeric: it's either BIT, or VARBINARY.
+ */
+ return Type_handler_hybrid_real_field_type::result_type();
+ }
+
enum Type type() const { return TYPE_HOLDER; }
double val_real();
longlong val_int();
@@ -5324,6 +5950,7 @@ public:
static uint32 display_length(Item *item);
static enum_field_types get_real_type(Item *);
Field::geometry_type get_geometry_type() const { return geometry_type; };
+ Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
@@ -5426,4 +6053,26 @@ public:
void close() {}
};
+
+/*
+ It's used in ::fix_fields() methods of LIKE and JSON_SEARCH
+ functions to handle the ESCAPE parameter.
+ This parameter is quite non-standard so the specific function.
+*/
+bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
+ bool escape_used_in_parsing, CHARSET_INFO *cmp_cs,
+ int *escape);
+
+inline bool Virtual_column_info::is_equal(const Virtual_column_info* vcol) const
+{
+ return field_type == vcol->get_real_type()
+ && stored_in_db == vcol->is_stored()
+ && expr->eq(vcol->expr, true);
+}
+
+inline void Virtual_column_info::print(String* str)
+{
+ expr->print_for_table_def(str);
+}
+
#endif /* SQL_ITEM_INCLUDED */
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index d1134525f7b..488eb52fb77 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -43,7 +43,7 @@ Cached_item *new_Cached_item(THD *thd, Item *item, bool pass_through_ref)
{
Item_field *real_item= (Item_field *) item->real_item();
Field *cached_field= real_item->field;
- return new Cached_item_field(cached_field);
+ return new (thd->mem_root) Cached_item_field(thd, cached_field);
}
switch (item->result_type()) {
case STRING_RESULT:
@@ -71,7 +71,7 @@ Cached_item::~Cached_item() {}
*/
Cached_item_str::Cached_item_str(THD *thd, Item *arg)
- :item(arg),
+ :Cached_item_item(arg),
value_max_length(MY_MIN(arg->max_length, thd->variables.max_sort_length)),
value(value_max_length)
{}
@@ -98,6 +98,25 @@ bool Cached_item_str::cmp(void)
return tmp;
}
+
+int Cached_item_str::cmp_read_only()
+{
+ String *res= item->val_str(&tmp_value);
+
+ if (null_value)
+ {
+ if (item->null_value)
+ return 0;
+ else
+ return -1;
+ }
+ if (item->null_value)
+ return 1;
+
+ return sortcmp(&value, res, item->collation.collation);
+}
+
+
Cached_item_str::~Cached_item_str()
{
item=0; // Safety
@@ -115,6 +134,23 @@ bool Cached_item_real::cmp(void)
return FALSE;
}
+
+int Cached_item_real::cmp_read_only()
+{
+ double nr= item->val_real();
+ if (null_value)
+ {
+ if (item->null_value)
+ return 0;
+ else
+ return -1;
+ }
+ if (item->null_value)
+ return 1;
+ return (nr == value)? 0 : ((nr < value)? 1: -1);
+}
+
+
bool Cached_item_int::cmp(void)
{
longlong nr=item->val_int();
@@ -128,6 +164,22 @@ bool Cached_item_int::cmp(void)
}
+int Cached_item_int::cmp_read_only()
+{
+ longlong nr= item->val_int();
+ if (null_value)
+ {
+ if (item->null_value)
+ return 0;
+ else
+ return -1;
+ }
+ if (item->null_value)
+ return 1;
+ return (nr == value)? 0 : ((nr < value)? 1: -1);
+}
+
+
bool Cached_item_field::cmp(void)
{
bool tmp= FALSE; // Value is identical
@@ -148,8 +200,24 @@ bool Cached_item_field::cmp(void)
}
+int Cached_item_field::cmp_read_only()
+{
+ if (null_value)
+ {
+ if (field->is_null())
+ return 0;
+ else
+ return -1;
+ }
+ if (field->is_null())
+ return 1;
+
+ return field->cmp(buff);
+}
+
+
Cached_item_decimal::Cached_item_decimal(Item *it)
- :item(it)
+ :Cached_item_item(it)
{
my_decimal_set_zero(&value);
}
@@ -174,3 +242,20 @@ bool Cached_item_decimal::cmp()
return FALSE;
}
+
+int Cached_item_decimal::cmp_read_only()
+{
+ my_decimal tmp;
+ my_decimal *ptmp= item->val_decimal(&tmp);
+ if (null_value)
+ {
+ if (item->null_value)
+ return 0;
+ else
+ return -1;
+ }
+ if (item->null_value)
+ return 1;
+ return my_decimal_cmp(&value, ptmp);
+}
+
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 4e572b95ea3..83716a0e924 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -34,15 +34,21 @@
#include "sql_time.h" // make_truncated_value_warning
#include "sql_base.h" // dynamic_column_error_message
+static Item** cache_converted_constant(THD *thd, Item **value,
+ Item **cache_item, Item_result type,enum_field_types f_type);
/**
find an temporal type (item) that others will be converted to
for the purpose of comparison.
+ for IN/CASE conversion only happens if the first item defines the
+ comparison context.
+
this is the type that will be used in warnings like
"Incorrect <<TYPE>> value".
*/
-Item *find_date_time_item(Item **args, uint nargs, uint col)
+static Item *find_date_time_item(THD *thd, Item **args, uint nargs, uint col,
+ bool in_case)
{
Item *date_arg= 0, **arg, **arg_end;
for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++)
@@ -50,10 +56,22 @@ Item *find_date_time_item(Item **args, uint nargs, uint col)
Item *item= arg[0]->element_index(col);
if (item->cmp_type() != TIME_RESULT)
continue;
- if (item->field_type() == MYSQL_TYPE_DATETIME)
- return item;
if (!date_arg)
date_arg= item;
+ if (item->field_type() == MYSQL_TYPE_DATETIME)
+ break;
+ }
+ if (in_case ? date_arg == args[0]->element_index(col) : date_arg != NULL)
+ {
+ enum_field_types f_type= date_arg->field_type();
+ for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++)
+ {
+ Item *cache, **a= arg[0]->addr(col);
+ if (!a)
+ a= arg;
+ if (cache_converted_constant(thd, a, &cache, TIME_RESULT, f_type) != a)
+ thd->change_item_tree(a, cache);
+ }
}
return date_arg;
}
@@ -291,20 +309,10 @@ longlong Item_func_not::val_int()
return ((!null_value && value == 0) ? 1 : 0);
}
-/*
- We put any NOT expression into parenthesis to avoid
- possible problems with internal view representations where
- any '!' is converted to NOT. It may cause a problem if
- '!' is used in an expression together with other operators
- whose precedence is lower than the precedence of '!' yet
- higher than the precedence of NOT.
-*/
-
void Item_func_not::print(String *str, enum_query_type query_type)
{
- str->append('(');
- Item_func::print(str, query_type);
- str->append(')');
+ str->append('!');
+ args[0]->print_parenthesised(str, query_type, precedence());
}
/**
@@ -418,7 +426,7 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
if ((*item)->const_item() && !(*item)->is_expensive())
{
TABLE *table= field->table;
- ulonglong orig_sql_mode= thd->variables.sql_mode;
+ sql_mode_t orig_sql_mode= thd->variables.sql_mode;
enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields;
my_bitmap_map *old_maps[2];
ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */
@@ -523,7 +531,7 @@ bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp)
}
-void Item_bool_rowready_func2::fix_length_and_dec()
+bool Item_bool_rowready_func2::fix_length_and_dec()
{
max_length= 1; // Function returns 0 or 1
@@ -532,16 +540,15 @@ void Item_bool_rowready_func2::fix_length_and_dec()
we have to check for out of memory conditions here
*/
if (!args[0] || !args[1])
- return;
- setup_args_and_comparator(current_thd, &cmp);
+ return FALSE;
+ return setup_args_and_comparator(current_thd, &cmp);
}
int Arg_comparator::set_compare_func(Item_func_or_sum *item, Item_result type)
{
owner= item;
- func= comparator_matrix[type]
- [is_owner_equal_func()];
+ func= comparator_matrix[type][is_owner_equal_func()];
switch (type) {
case TIME_RESULT:
@@ -642,6 +649,21 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg,
*/
if (owner->agg_arg_charsets_for_comparison(&m_compare_collation, a, b))
return 1;
+
+ if ((*a)->type() == Item::FUNC_ITEM &&
+ ((Item_func *) (*a))->functype() == Item_func::JSON_EXTRACT_FUNC)
+ {
+ func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str:
+ &Arg_comparator::compare_json_str;
+ return 0;
+ }
+ else if ((*b)->type() == Item::FUNC_ITEM &&
+ ((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC)
+ {
+ func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str:
+ &Arg_comparator::compare_str_json;
+ return 0;
+ }
}
if (m_compare_type == TIME_RESULT)
@@ -657,6 +679,8 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg,
func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime :
&Arg_comparator::compare_datetime;
}
+ a= cache_converted_constant(thd, a, &a_cache, m_compare_type, f_type);
+ b= cache_converted_constant(thd, b, &b_cache, m_compare_type, f_type);
return 0;
}
@@ -684,9 +708,11 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg,
func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime :
&Arg_comparator::compare_datetime;
}
-
- a= cache_converted_constant(thd, a, &a_cache, m_compare_type);
- b= cache_converted_constant(thd, b, &b_cache, m_compare_type);
+ else
+ {
+ a= cache_converted_constant(thd, a, &a_cache, m_compare_type, (*a)->field_type());
+ b= cache_converted_constant(thd, b, &b_cache, m_compare_type, (*b)->field_type());
+ }
return set_compare_func(owner_arg, m_compare_type);
}
@@ -708,21 +734,15 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg,
@return cache item or original value.
*/
-Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value,
- Item **cache_item,
- Item_result type)
+static Item** cache_converted_constant(THD *thd, Item **value,
+ Item **cache_item, Item_result type, enum_field_types f_type)
{
- /*
- Don't need cache if doing context analysis only.
- Also, get_datetime_value creates Item_cache internally.
- Unless fixed, we should not do it here.
- */
- if (!thd_arg->lex->is_ps_or_view_context_analysis() &&
- (*value)->const_item() && type != (*value)->result_type() &&
- type != TIME_RESULT)
+ /* Don't need cache if doing context analysis only. */
+ if (!thd->lex->is_ps_or_view_context_analysis() &&
+ (*value)->const_item() && type != (*value)->result_type())
{
- Item_cache *cache= Item_cache::get_cache(thd_arg, *value, type);
- cache->setup(thd_arg, *value);
+ Item_cache *cache= Item_cache::get_cache(thd, *value, type, f_type);
+ cache->setup(thd, *value);
*cache_item= cache;
return cache_item;
}
@@ -730,61 +750,6 @@ Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value,
}
-/**
- Retrieves correct DATETIME value from given item.
-
- @param[in] thd thread handle
- @param[in,out] item_arg item to retrieve DATETIME value from
- @param[in,out] cache_arg pointer to place to store the caching item to
- @param[in] warn_item item for issuing the conversion warning
- @param[out] is_null TRUE <=> the item_arg is null
-
- @details
- Retrieves the correct DATETIME value from given item for comparison by the
- compare_datetime() function.
-
- If the value should be compared as time (TIME_RESULT), it's retrieved as
- MYSQL_TIME. Otherwise it's read as a number/string and converted to time.
- Constant items are cached, so the convertion is only done once for them.
-
- Note the f_type behavior: if the item can be compared as time, then
- f_type is this item's field_type(). Otherwise it's field_type() of
- warn_item (which is the other operand of the comparison operator).
- This logic provides correct string/number to date/time conversion
- depending on the other operand (when comparing a string with a date, it's
- parsed as a date, when comparing a string with a time it's parsed as a time)
-
- If the item is a constant it is replaced by the Item_cache_int, that
- holds the packed datetime value.
-
- @return
- MYSQL_TIME value, packed in a longlong, suitable for comparison.
-*/
-
-longlong
-get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
- enum_field_types f_type, bool *is_null)
-{
- longlong UNINIT_VAR(value);
- Item *item= **item_arg;
- value= item->val_temporal_packed(f_type);
- if ((*is_null= item->null_value))
- return ~(ulonglong) 0;
- if (cache_arg && item->const_item() &&
- !(item->type() == Item::CACHE_ITEM && item->cmp_type() == TIME_RESULT))
- {
- if (!thd)
- thd= current_thd;
-
- Item_cache_temporal *cache= new (thd->mem_root) Item_cache_temporal(thd, f_type);
- cache->store_packed(value, item);
- *cache_arg= cache;
- *item_arg= cache_arg;
- }
- return value;
-}
-
-
/*
Compare items values as dates.
@@ -793,8 +758,7 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
DESCRIPTION
Compare items values as DATE/DATETIME for both EQUAL_FUNC and from other
- comparison functions. The correct DATETIME values are obtained
- with help of the get_datetime_value() function.
+ comparison functions.
RETURN
-1 a < b or at least one item is null
@@ -804,20 +768,19 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
int Arg_comparator::compare_temporal(enum_field_types type)
{
- bool a_is_null, b_is_null;
longlong a_value, b_value;
if (set_null)
owner->null_value= 1;
/* Get DATE/DATETIME/TIME value of the 'a' item. */
- a_value= get_datetime_value(0, &a, &a_cache, type, &a_is_null);
- if (a_is_null)
+ a_value= (*a)->val_temporal_packed(type);
+ if ((*a)->null_value)
return -1;
/* Get DATE/DATETIME/TIME value of the 'b' item. */
- b_value= get_datetime_value(0, &b, &b_cache, type, &b_is_null);
- if (b_is_null)
+ b_value= (*b)->val_temporal_packed(type);
+ if ((*b)->null_value)
return -1;
/* Here we have two not-NULL values. */
@@ -830,16 +793,15 @@ int Arg_comparator::compare_temporal(enum_field_types type)
int Arg_comparator::compare_e_temporal(enum_field_types type)
{
- bool a_is_null, b_is_null;
longlong a_value, b_value;
/* Get DATE/DATETIME/TIME value of the 'a' item. */
- a_value= get_datetime_value(0, &a, &a_cache, type, &a_is_null);
+ a_value= (*a)->val_temporal_packed(type);
/* Get DATE/DATETIME/TIME value of the 'b' item. */
- b_value= get_datetime_value(0, &b, &b_cache, type, &b_is_null);
- return a_is_null || b_is_null ? a_is_null == b_is_null
- : a_value == b_value;
+ b_value= (*b)->val_temporal_packed(type);
+ return (*a)->null_value || (*b)->null_value ?
+ (*a)->null_value == (*b)->null_value : a_value == b_value;
}
int Arg_comparator::compare_string()
@@ -1175,19 +1137,43 @@ int Arg_comparator::compare_e_row()
}
-void Item_func_truth::fix_length_and_dec()
+int Arg_comparator::compare_json_str()
+{
+ return compare_json_str_basic(*a, *b);
+}
+
+
+int Arg_comparator::compare_str_json()
+{
+ return -compare_json_str_basic(*b, *a);
+}
+
+
+int Arg_comparator::compare_e_json_str()
+{
+ return compare_e_json_str_basic(*a, *b);
+}
+
+
+int Arg_comparator::compare_e_str_json()
+{
+ return compare_e_json_str_basic(*b, *a);
+}
+
+
+bool Item_func_truth::fix_length_and_dec()
{
maybe_null= 0;
null_value= 0;
decimals= 0;
max_length= 1;
+ return FALSE;
}
void Item_func_truth::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" is "));
if (! affirmative)
str->append(STRING_WITH_LEN("not "));
@@ -1195,7 +1181,6 @@ void Item_func_truth::print(String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN("true"));
else
str->append(STRING_WITH_LEN("false"));
- str->append(')');
}
@@ -1246,7 +1231,7 @@ void Item_in_optimizer::fix_after_pullout(st_select_lex *new_parent,
}
-bool Item_in_optimizer::eval_not_null_tables(uchar *opt_arg)
+bool Item_in_optimizer::eval_not_null_tables(void *opt_arg)
{
not_null_tables_cache= 0;
if (is_top_level_item())
@@ -1342,8 +1327,7 @@ bool Item_in_optimizer::fix_left(THD *thd)
for (uint i= 0; i < n; i++)
{
/* Check that the expression (part of row) do not contain a subquery */
- if (args[0]->element_index(i)->walk(&Item::is_subquery_processor,
- FALSE, NULL))
+ if (args[0]->element_index(i)->walk(&Item::is_subquery_processor, 0, 0))
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"SUBQUERY in ROW in left expression of IN/ALL/ANY");
@@ -1777,7 +1761,7 @@ Item *Item_in_optimizer::transform(THD *thd, Item_transformer transformer,
}
-bool Item_in_optimizer::is_expensive_processor(uchar *arg)
+bool Item_in_optimizer::is_expensive_processor(void *arg)
{
DBUG_ASSERT(fixed);
return args[0]->is_expensive_processor(arg) ||
@@ -1802,10 +1786,11 @@ longlong Item_func_eq::val_int()
/** Same as Item_func_eq, but NULL = NULL. */
-void Item_func_equal::fix_length_and_dec()
+bool Item_func_equal::fix_length_and_dec()
{
- Item_bool_rowready_func2::fix_length_and_dec();
+ bool rc= Item_bool_rowready_func2::fix_length_and_dec();
maybe_null=null_value=0;
+ return rc;
}
longlong Item_func_equal::val_int()
@@ -1902,7 +1887,7 @@ bool Item_func_interval::fix_fields(THD *thd, Item **ref)
}
-void Item_func_interval::fix_length_and_dec()
+bool Item_func_interval::fix_length_and_dec()
{
uint rows= row->cols();
@@ -1920,10 +1905,13 @@ void Item_func_interval::fix_length_and_dec()
not_null_consts&= el->const_item() && !el->is_null();
}
- if (not_null_consts &&
- (intervals=
- (interval_range*) sql_alloc(sizeof(interval_range) * (rows - 1))))
+ if (not_null_consts)
{
+ intervals= (interval_range*) current_thd->alloc(sizeof(interval_range) *
+ (rows - 1));
+ if (!intervals)
+ return TRUE;
+
if (use_decimal_comparison)
{
for (uint i= 1; i < rows; i++)
@@ -1964,6 +1952,7 @@ void Item_func_interval::fix_length_and_dec()
with_sum_func= with_sum_func || row->with_sum_func;
with_param= with_param || row->with_param;
with_field= with_field || row->with_field;
+ return FALSE;
}
@@ -2089,7 +2078,7 @@ longlong Item_func_interval::val_int()
*/
-bool Item_func_between::eval_not_null_tables(uchar *opt_arg)
+bool Item_func_between::eval_not_null_tables(void *opt_arg)
{
if (Item_func_opt_neg::eval_not_null_tables(NULL))
return 1;
@@ -2106,7 +2095,7 @@ bool Item_func_between::eval_not_null_tables(uchar *opt_arg)
}
-bool Item_func_between::count_sargable_conds(uchar *arg)
+bool Item_func_between::count_sargable_conds(void *arg)
{
SELECT_LEX *sel= (SELECT_LEX *) arg;
sel->cond_count++;
@@ -2124,7 +2113,7 @@ void Item_func_between::fix_after_pullout(st_select_lex *new_parent,
eval_not_null_tables(NULL);
}
-void Item_func_between::fix_length_and_dec()
+bool Item_func_between::fix_length_and_dec()
{
THD *thd= current_thd;
max_length= 1;
@@ -2135,26 +2124,24 @@ void Item_func_between::fix_length_and_dec()
we have to check for out of memory conditions here
*/
if (!args[0] || !args[1] || !args[2])
- return;
+ return TRUE;
if (agg_cmp_type(&m_compare_type, args, 3, false))
- return;
+ return TRUE;
if (m_compare_type == STRING_RESULT &&
agg_arg_charsets_for_comparison(cmp_collation, args, 3))
- return;
+ return TRUE;
/*
When comparing as date/time, we need to convert non-temporal values
- (e.g. strings) to MYSQL_TIME. get_datetime_value() does it
- automatically when one of the operands is a date/time. But here we
- may need to compare two strings as dates (str1 BETWEEN str2 AND date).
+ (e.g. strings) to MYSQL_TIME.
For this to work, we need to know what date/time type we compare
strings as.
*/
if (m_compare_type == TIME_RESULT)
- compare_as_dates= find_date_time_item(args, 3, 0);
+ compare_as_dates= find_date_time_item(thd, args, 3, 0, false);
- /* See the comment about the similar block in Item_bool_func2 */
+ /* See the comment for Item_func::convert_const_compared_to_int_field */
if (args[0]->real_item()->type() == FIELD_ITEM &&
!thd->lex->is_ps_or_view_context_analysis())
{
@@ -2168,6 +2155,7 @@ void Item_func_between::fix_length_and_dec()
m_compare_type= INT_RESULT; // Works for all types.
}
}
+ return FALSE;
}
@@ -2269,36 +2257,23 @@ longlong Item_func_between::val_int()
switch (m_compare_type) {
case TIME_RESULT:
{
- THD *thd= current_thd;
longlong value, a, b;
- Item *cache, **ptr;
- bool value_is_null, a_is_null, b_is_null;
- ptr= &args[0];
enum_field_types f_type= field_type_for_temporal_comparison(compare_as_dates);
- value= get_datetime_value(thd, &ptr, &cache, f_type, &value_is_null);
- if (ptr != &args[0])
- thd->change_item_tree(&args[0], *ptr);
+ value= args[0]->val_temporal_packed(f_type);
- if ((null_value= value_is_null))
+ if ((null_value= args[0]->null_value))
return 0;
- ptr= &args[1];
- a= get_datetime_value(thd, &ptr, &cache, f_type, &a_is_null);
- if (ptr != &args[1])
- thd->change_item_tree(&args[1], *ptr);
-
- ptr= &args[2];
- b= get_datetime_value(thd, &ptr, &cache, f_type, &b_is_null);
- if (ptr != &args[2])
- thd->change_item_tree(&args[2], *ptr);
+ a= args[1]->val_temporal_packed(f_type);
+ b= args[2]->val_temporal_packed(f_type);
- if (!a_is_null && !b_is_null)
+ if (!args[1]->null_value && !args[2]->null_value)
return (longlong) ((value >= a && value <= b) != negated);
- if (a_is_null && b_is_null)
+ if (args[1]->null_value && args[2]->null_value)
null_value=1;
- else if (a_is_null)
- null_value= value <= b; // not null if false range.
+ else if (args[1]->null_value)
+ null_value= value <= b; // not null if false range.
else
null_value= value >= a;
break;
@@ -2322,15 +2297,13 @@ longlong Item_func_between::val_int()
void Item_func_between::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, precedence());
if (negated)
str->append(STRING_WITH_LEN(" not"));
str->append(STRING_WITH_LEN(" between "));
- args[1]->print(str, query_type);
+ args[1]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" and "));
- args[2]->print(str, query_type);
- str->append(')');
+ args[2]->print_parenthesised(str, query_type, precedence());
}
@@ -2461,7 +2434,7 @@ Item_func_if::fix_fields(THD *thd, Item **ref)
bool
-Item_func_if::eval_not_null_tables(uchar *opt_arg)
+Item_func_if::eval_not_null_tables(void *opt_arg)
{
if (Item_func::eval_not_null_tables(NULL))
return 1;
@@ -2491,7 +2464,7 @@ void Item_func_if::cache_type_info(Item *source)
}
-void
+bool
Item_func_if::fix_length_and_dec()
{
// Let IF(cond, expr, NULL) and IF(cond, NULL, expr) inherit type from expr.
@@ -2502,15 +2475,15 @@ Item_func_if::fix_length_and_dec()
// If both arguments are NULL, make resulting type BINARY(0).
if (args[2]->type() == NULL_ITEM)
set_handler_by_field_type(MYSQL_TYPE_STRING);
- return;
+ return FALSE;
}
if (args[2]->type() == NULL_ITEM)
{
cache_type_info(args[1]);
maybe_null= true;
- return;
+ return FALSE;
}
- Item_func_case_abbreviation2::fix_length_and_dec2(args + 1);
+ return Item_func_case_abbreviation2::fix_length_and_dec2(args + 1);
}
@@ -2568,7 +2541,7 @@ bool Item_func_if::date_op(MYSQL_TIME *ltime, uint fuzzydate)
}
-void Item_func_nullif::split_sum_func(THD *thd, Item **ref_pointer_array,
+void Item_func_nullif::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
if (m_cache)
@@ -2585,7 +2558,7 @@ void Item_func_nullif::split_sum_func(THD *thd, Item **ref_pointer_array,
bool Item_func_nullif::walk(Item_processor processor,
- bool walk_subquery, uchar *arg)
+ bool walk_subquery, void *arg)
{
/*
No needs to iterate through args[2] when it's just a copy of args[0].
@@ -2624,7 +2597,7 @@ void Item_func_nullif::update_used_tables()
-void
+bool
Item_func_nullif::fix_length_and_dec()
{
/*
@@ -2774,6 +2747,8 @@ Item_func_nullif::fix_length_and_dec()
m_cache= args[0]->cmp_type() == STRING_RESULT ?
new (thd->mem_root) Item_cache_str_for_nullif(thd, args[0]) :
Item_cache::get_cache(thd, args[0]);
+ if (!m_cache)
+ return TRUE;
m_cache->setup(thd, args[0]);
m_cache->store(args[0]);
m_cache->set_used_tables(args[0]->used_tables());
@@ -2787,7 +2762,8 @@ Item_func_nullif::fix_length_and_dec()
fix_char_length(args[2]->max_char_length());
maybe_null=1;
m_arg0= args[0];
- setup_args_and_comparator(thd, &cmp);
+ if (setup_args_and_comparator(thd, &cmp))
+ return TRUE;
/*
A special code for EXECUTE..PREPARE.
@@ -2827,6 +2803,7 @@ Item_func_nullif::fix_length_and_dec()
*/
if (args[0] == m_arg0)
m_arg0= NULL;
+ return FALSE;
}
@@ -2998,11 +2975,12 @@ Item_func_case::Item_func_case(THD *thd, List<Item> &list,
Item_func_hybrid_field_type(thd), first_expr_num(-1), else_expr_num(-1),
left_cmp_type(INT_RESULT), case_item(0), m_found_types(0)
{
- ncases= list.elements;
+ DBUG_ASSERT(list.elements % 2 == 0);
+ nwhens= list.elements / 2;
if (first_expr_arg)
{
- first_expr_num= list.elements;
- list.push_back(first_expr_arg, thd->mem_root);
+ first_expr_num= 0;
+ list.push_front(first_expr_arg, thd->mem_root);
}
if (else_expr_arg)
{
@@ -3010,6 +2988,22 @@ Item_func_case::Item_func_case(THD *thd, List<Item> &list,
list.push_back(else_expr_arg, thd->mem_root);
}
set_arguments(thd, list);
+
+ /*
+ Reorder args, to have at first the optional CASE expression, then all WHEN
+ expressions, then all THEN expressions. And the optional ELSE expression
+ at the end.
+ */
+ const size_t size= sizeof(Item*)*nwhens*2;
+ Item **arg_buffer= (Item **)my_safe_alloca(size);
+ memcpy(arg_buffer, args + first_expr_num + 1, size);
+ for (uint i= 0; i < nwhens ; i++)
+ {
+ args[first_expr_num + 1 + i]= arg_buffer[i*2];
+ args[first_expr_num + 1 + i + nwhens] = arg_buffer[i*2 + 1];
+ }
+ my_safe_afree(arg_buffer, size);
+
bzero(&cmp_items, sizeof(cmp_items));
}
@@ -3040,18 +3034,17 @@ Item *Item_func_case::find_item(String *str)
if (first_expr_num == -1)
{
- for (uint i=0 ; i < ncases ; i+=2)
+ for (uint i=0 ; i < nwhens ; i++)
{
// No expression between CASE and the first WHEN
if (args[i]->val_bool())
- return args[i+1];
- continue;
+ return args[i+nwhens];
}
}
else
{
/* Compare every WHEN argument with it and return the first match */
- for (uint i=0 ; i < ncases ; i+=2)
+ for (uint i=1 ; i <= nwhens; i++)
{
if (args[i]->real_item()->type() == NULL_ITEM)
continue;
@@ -3060,13 +3053,13 @@ Item *Item_func_case::find_item(String *str)
DBUG_ASSERT(cmp_items[(uint)cmp_type]);
if (!(value_added_map & (1U << (uint)cmp_type)))
{
- cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]);
- if ((null_value=args[first_expr_num]->null_value))
+ cmp_items[(uint)cmp_type]->store_value(args[0]);
+ if ((null_value= args[0]->null_value))
return else_expr_num != -1 ? args[else_expr_num] : 0;
value_added_map|= 1U << (uint)cmp_type;
}
if (cmp_items[(uint)cmp_type]->cmp(args[i]) == FALSE)
- return args[i + 1];
+ return args[i + nwhens];
}
}
// No, WHEN clauses all missed, return ELSE expression
@@ -3169,9 +3162,6 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref)
*/
uchar buff[MAX_FIELD_WIDTH*2+sizeof(String)*2+sizeof(String*)*2+sizeof(double)*2+sizeof(longlong)*2];
- if (!(arg_buffer= (Item**) thd->alloc(sizeof(Item*)*(ncases+1))))
- return TRUE;
-
bool res= Item_func::fix_fields(thd, ref);
/*
Call check_stack_overrun after fix_fields to be sure that stack variable
@@ -3186,31 +3176,17 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref)
/**
Check if (*place) and new_value points to different Items and call
THD::change_item_tree() if needed.
-
- This function is a workaround for implementation deficiency in
- Item_func_case. The problem there is that the 'args' attribute contains
- Items from different expressions.
-
- The function must not be used elsewhere and will be remove eventually.
*/
-static void change_item_tree_if_needed(THD *thd,
- Item **place,
- Item *new_value)
+static void change_item_tree_if_needed(THD *thd, Item **place, Item *new_value)
{
- if (*place == new_value)
- return;
-
- thd->change_item_tree(place, new_value);
+ if (new_value && *place != new_value)
+ thd->change_item_tree(place, new_value);
}
-void Item_func_case::fix_length_and_dec()
+bool Item_func_case::fix_length_and_dec()
{
- Item **agg= arg_buffer;
- uint nagg;
- THD *thd= current_thd;
-
m_found_types= 0;
if (else_expr_num == -1 || args[else_expr_num]->maybe_null)
maybe_null= 1;
@@ -3219,33 +3195,17 @@ void Item_func_case::fix_length_and_dec()
Aggregate all THEN and ELSE expression types
and collations when string result
*/
-
- for (nagg= 0 ; nagg < ncases/2 ; nagg++)
- agg[nagg]= args[nagg*2+1];
-
- if (else_expr_num != -1)
- agg[nagg++]= args[else_expr_num];
-
- set_handler_by_field_type(agg_field_type(agg, nagg, true));
+ Item **rets= args + first_expr_num + 1 + nwhens;
+ uint nrets= nwhens + (else_expr_num != -1);
+ set_handler_by_field_type(agg_field_type(rets, nrets, true));
if (Item_func_case::result_type() == STRING_RESULT)
{
- if (count_string_result_length(Item_func_case::field_type(), agg, nagg))
- return;
- /*
- Copy all THEN and ELSE items back to args[] array.
- Some of the items might have been changed to Item_func_conv_charset.
- */
- for (nagg= 0 ; nagg < ncases / 2 ; nagg++)
- change_item_tree_if_needed(thd, &args[nagg * 2 + 1], agg[nagg]);
-
- if (else_expr_num != -1)
- change_item_tree_if_needed(thd, &args[else_expr_num], agg[nagg++]);
+ if (count_string_result_length(Item_func_case::field_type(), rets, nrets))
+ return TRUE;
}
else
- {
- fix_attributes(agg, nagg);
- }
+ fix_attributes(rets, nrets);
/*
Aggregate first expression and all WHEN expression types
@@ -3253,25 +3213,14 @@ void Item_func_case::fix_length_and_dec()
*/
if (first_expr_num != -1)
{
- uint i;
- agg[0]= args[first_expr_num];
- left_cmp_type= agg[0]->cmp_type();
+ left_cmp_type= args[0]->cmp_type();
- /*
- As the first expression and WHEN expressions
- are intermixed in args[] array THEN and ELSE items,
- extract the first expression and all WHEN expressions into
- a temporary array, to process them easier.
- */
- for (nagg= 0; nagg < ncases/2 ; nagg++)
- agg[nagg+1]= args[nagg*2];
- nagg++;
- if (!(m_found_types= collect_cmp_types(agg, nagg)))
- return;
+ if (!(m_found_types= collect_cmp_types(args, nwhens + 1)))
+ return TRUE;
Item *date_arg= 0;
if (m_found_types & (1U << TIME_RESULT))
- date_arg= find_date_time_item(args, arg_count, 0);
+ date_arg= find_date_time_item(current_thd, args, nwhens + 1, 0, true);
if (m_found_types & (1U << STRING_RESULT))
{
@@ -3299,32 +3248,23 @@ void Item_func_case::fix_length_and_dec()
CASE utf16_item WHEN CONVERT(latin1_item USING utf16) THEN ... END
*/
- if (agg_arg_charsets_for_comparison(cmp_collation, agg, nagg))
- return;
- /*
- Now copy first expression and all WHEN expressions back to args[]
- arrray, because some of the items might have been changed to converters
- (e.g. Item_func_conv_charset, or Item_string for constants).
- */
- change_item_tree_if_needed(thd, &args[first_expr_num], agg[0]);
-
- for (nagg= 0; nagg < ncases / 2; nagg++)
- change_item_tree_if_needed(thd, &args[nagg * 2], agg[nagg + 1]);
+ if (agg_arg_charsets_for_comparison(cmp_collation, args, nwhens + 1))
+ return TRUE;
}
- for (i= 0; i <= (uint)TIME_RESULT; i++)
+ for (uint i= 0; i <= (uint)TIME_RESULT; i++)
{
if (m_found_types & (1U << i) && !cmp_items[i])
{
DBUG_ASSERT((Item_result)i != ROW_RESULT);
-
if (!(cmp_items[i]=
cmp_item::get_comparator((Item_result)i, date_arg,
cmp_collation.collation)))
- return;
+ return TRUE;
}
}
}
+ return FALSE;
}
@@ -3337,75 +3277,59 @@ Item* Item_func_case::propagate_equal_fields(THD *thd, const Context &ctx, COND_
return this;
}
- for (uint i= 0; i < arg_count; i++)
+ /*
+ First, replace CASE expression.
+ We cannot replace the CASE (the switch) argument if
+ there are multiple comparison types were found, or found a single
+ comparison type that is not equal to args[0]->cmp_type().
+
+ - Example: multiple comparison types, can't propagate:
+ WHERE CASE str_column
+ WHEN 'string' THEN TRUE
+ WHEN 1 THEN TRUE
+ ELSE FALSE END;
+
+ - Example: a single incompatible comparison type, can't propagate:
+ WHERE CASE str_column
+ WHEN DATE'2001-01-01' THEN TRUE
+ ELSE FALSE END;
+
+ - Example: a single incompatible comparison type, can't propagate:
+ WHERE CASE str_column
+ WHEN 1 THEN TRUE
+ ELSE FALSE END;
+
+ - Example: a single compatible comparison type, ok to propagate:
+ WHERE CASE str_column
+ WHEN 'str1' THEN TRUE
+ WHEN 'str2' THEN TRUE
+ ELSE FALSE END;
+ */
+ if (m_found_types == (1UL << left_cmp_type))
+ change_item_tree_if_needed(thd, args,
+ args[0]->propagate_equal_fields(thd, Context(ANY_SUBST, left_cmp_type,
+ cmp_collation.collation),
+ cond));
+ uint i= 1;
+ for (; i <= nwhens ; i++) // WHEN expressions
{
/*
- Even "i" values cover items that are in a comparison context:
- CASE x0 WHEN x1 .. WHEN x2 .. WHEN x3 ..
- Odd "i" values cover items that are not in comparison:
- CASE ... THEN y1 ... THEN y2 ... THEN y3 ... ELSE y4 END
+ These arguments are in comparison.
+ Allow invariants of the same value during propagation.
+ Note, as we pass ANY_SUBST, none of the WHEN arguments will be
+ replaced to zero-filled constants (only IDENTITY_SUBST allows this).
+ Such a change for WHEN arguments would require rebuilding cmp_items.
*/
- Item *new_item= 0;
- if ((int) i == first_expr_num) // Then CASE (the switch) argument
- {
- /*
- Cannot replace the CASE (the switch) argument if
- there are multiple comparison types were found, or found a single
- comparison type that is not equal to args[0]->cmp_type().
-
- - Example: multiple comparison types, can't propagate:
- WHERE CASE str_column
- WHEN 'string' THEN TRUE
- WHEN 1 THEN TRUE
- ELSE FALSE END;
-
- - Example: a single incompatible comparison type, can't propagate:
- WHERE CASE str_column
- WHEN DATE'2001-01-01' THEN TRUE
- ELSE FALSE END;
-
- - Example: a single incompatible comparison type, can't propagate:
- WHERE CASE str_column
- WHEN 1 THEN TRUE
- ELSE FALSE END;
-
- - Example: a single compatible comparison type, ok to propagate:
- WHERE CASE str_column
- WHEN 'str1' THEN TRUE
- WHEN 'str2' THEN TRUE
- ELSE FALSE END;
- */
- if (m_found_types == (1UL << left_cmp_type))
- new_item= args[i]->propagate_equal_fields(thd,
- Context(
- ANY_SUBST,
- left_cmp_type,
- cmp_collation.collation),
- cond);
- }
- else if ((i % 2) == 0) // WHEN arguments
- {
- /*
- These arguments are in comparison.
- Allow invariants of the same value during propagation.
- Note, as we pass ANY_SUBST, none of the WHEN arguments will be
- replaced to zero-filled constants (only IDENTITY_SUBST allows this).
- Such a change for WHEN arguments would require rebuilding cmp_items.
- */
- Item_result tmp_cmp_type= item_cmp_type(args[first_expr_num], args[i]);
- new_item= args[i]->propagate_equal_fields(thd,
- Context(
- ANY_SUBST,
- tmp_cmp_type,
- cmp_collation.collation),
- cond);
- }
- else // THEN and ELSE arguments (they are not in comparison)
- {
- new_item= args[i]->propagate_equal_fields(thd, Context_identity(), cond);
- }
- if (new_item && new_item != args[i])
- thd->change_item_tree(&args[i], new_item);
+ Item_result tmp_cmp_type= item_cmp_type(args[first_expr_num], args[i]);
+ change_item_tree_if_needed(thd, args + i,
+ args[i]->propagate_equal_fields(thd, Context(ANY_SUBST, tmp_cmp_type,
+ cmp_collation.collation),
+ cond));
+ }
+ for (; i < arg_count ; i++) // THEN expressions and optional ELSE expression
+ {
+ change_item_tree_if_needed(thd, args + i,
+ args[i]->propagate_equal_fields(thd, Context_identity(), cond));
}
return this;
}
@@ -3414,11 +3338,8 @@ Item* Item_func_case::propagate_equal_fields(THD *thd, const Context &ctx, COND_
uint Item_func_case::decimal_precision() const
{
int max_int_part=0;
- for (uint i=0 ; i < ncases ; i+=2)
- set_if_bigger(max_int_part, args[i+1]->decimal_int_part());
-
- if (else_expr_num != -1)
- set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part());
+ for (uint i=first_expr_num + 1 + nwhens ; i < arg_count; i++)
+ set_if_bigger(max_int_part, args[i]->decimal_int_part());
return MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
}
@@ -3430,27 +3351,27 @@ uint Item_func_case::decimal_precision() const
void Item_func_case::print(String *str, enum_query_type query_type)
{
- str->append(STRING_WITH_LEN("(case "));
+ str->append(STRING_WITH_LEN("case "));
if (first_expr_num != -1)
{
- args[first_expr_num]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, precedence());
str->append(' ');
}
- for (uint i=0 ; i < ncases ; i+=2)
+ for (uint i= first_expr_num + 1 ; i < nwhens + first_expr_num + 1; i++)
{
str->append(STRING_WITH_LEN("when "));
- args[i]->print(str, query_type);
+ args[i]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" then "));
- args[i+1]->print(str, query_type);
+ args[i+nwhens]->print_parenthesised(str, query_type, precedence());
str->append(' ');
}
if (else_expr_num != -1)
{
str->append(STRING_WITH_LEN("else "));
- args[else_expr_num]->print(str, query_type);
+ args[else_expr_num]->print_parenthesised(str, query_type, precedence());
str->append(' ');
}
- str->append(STRING_WITH_LEN("end)"));
+ str->append(STRING_WITH_LEN("end"));
}
@@ -3714,8 +3635,9 @@ bool in_vector::find(Item *item)
return ((*compare)(collation, base+start*size, result) == 0);
}
-in_string::in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs)
- :in_vector(elements, sizeof(String), cmp_func, cs),
+in_string::in_string(THD *thd, uint elements, qsort2_cmp cmp_func,
+ CHARSET_INFO *cs)
+ :in_vector(thd, elements, sizeof(String), cmp_func, cs),
tmp(buff, sizeof(buff), &my_charset_bin)
{}
@@ -3723,7 +3645,7 @@ in_string::~in_string()
{
if (base)
{
- // base was allocated with help of sql_alloc => following is OK
+ // base was allocated on THD::mem_root => following is OK
for (uint i=0 ; i < count ; i++)
((String*) base)[i].free();
}
@@ -3793,13 +3715,14 @@ uchar *in_row::get_value(Item *item)
void in_row::set(uint pos, Item *item)
{
DBUG_ENTER("in_row::set");
- DBUG_PRINT("enter", ("pos: %u item: 0x%lx", pos, (ulong) item));
+ DBUG_PRINT("enter", ("pos: %u item: %p", pos,item));
((cmp_item_row*) base)[pos].store_value_by_template(current_thd, &tmp, item);
DBUG_VOID_RETURN;
}
-in_longlong::in_longlong(uint elements)
- :in_vector(elements,sizeof(packed_longlong),(qsort2_cmp) cmp_longlong, 0)
+in_longlong::in_longlong(THD *thd, uint elements)
+ :in_vector(thd, elements, sizeof(packed_longlong),
+ (qsort2_cmp) cmp_longlong, 0)
{}
void in_longlong::set(uint pos,Item *item)
@@ -3839,11 +3762,8 @@ void in_datetime::set(uint pos,Item *item)
uchar *in_datetime::get_value(Item *item)
{
- bool is_null;
- Item **tmp_item= lval_cache ? &lval_cache : &item;
- enum_field_types f_type=
- tmp_item[0]->field_type_for_temporal_comparison(warn_item);
- tmp.val= get_datetime_value(0, &tmp_item, &lval_cache, f_type, &is_null);
+ enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item);
+ tmp.val= item->val_temporal_packed(f_type);
if (item->null_value)
return 0;
tmp.unsigned_flag= 1L;
@@ -3856,8 +3776,8 @@ Item *in_datetime::create_item(THD *thd)
}
-in_double::in_double(uint elements)
- :in_vector(elements,sizeof(double),(qsort2_cmp) cmp_double, 0)
+in_double::in_double(THD *thd, uint elements)
+ :in_vector(thd, elements, sizeof(double), (qsort2_cmp) cmp_double, 0)
{}
void in_double::set(uint pos,Item *item)
@@ -3879,8 +3799,8 @@ Item *in_double::create_item(THD *thd)
}
-in_decimal::in_decimal(uint elements)
- :in_vector(elements, sizeof(my_decimal),(qsort2_cmp) cmp_decimal, 0)
+in_decimal::in_decimal(THD *thd, uint elements)
+ :in_vector(thd, elements, sizeof(my_decimal), (qsort2_cmp) cmp_decimal, 0)
{}
@@ -3957,7 +3877,7 @@ cmp_item* cmp_item_row::make_same()
cmp_item_row::~cmp_item_row()
{
DBUG_ENTER("~cmp_item_row");
- DBUG_PRINT("enter",("this: 0x%lx", (long) this));
+ DBUG_PRINT("enter",("this: %p", this));
if (comparators)
{
for (uint i= 0; i < n; i++)
@@ -4104,11 +4024,8 @@ cmp_item* cmp_item_decimal::make_same()
void cmp_item_datetime::store_value(Item *item)
{
- bool is_null;
- Item **tmp_item= lval_cache ? &lval_cache : &item;
- enum_field_types f_type=
- tmp_item[0]->field_type_for_temporal_comparison(warn_item);
- value= get_datetime_value(0, &tmp_item, &lval_cache, f_type, &is_null);
+ enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item);
+ value= item->val_temporal_packed(f_type);
m_null_value= item->null_value;
}
@@ -4133,7 +4050,7 @@ cmp_item *cmp_item_datetime::make_same()
}
-bool Item_func_in::count_sargable_conds(uchar *arg)
+bool Item_func_in::count_sargable_conds(void *arg)
{
((SELECT_LEX*) arg)->cond_count++;
return 0;
@@ -4192,7 +4109,7 @@ Item_func_in::fix_fields(THD *thd, Item **ref)
bool
-Item_func_in::eval_not_null_tables(uchar *opt_arg)
+Item_func_in::eval_not_null_tables(void *opt_arg)
{
Item **arg, **arg_end;
@@ -4225,10 +4142,70 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
{
return cs->coll->strnncollsp(cs,
(uchar *) x->ptr(),x->length(),
- (uchar *) y->ptr(),y->length(), 0);
+ (uchar *) y->ptr(),y->length());
}
-void Item_func_in::fix_length_and_dec()
+/*
+ Create 'array' for this IN predicate with the respect to its result type
+ and put values from <in value list> in 'array'.
+*/
+
+bool Item_func_in::create_array(THD *thd)
+{
+ Item *date_arg= 0;
+
+ switch (m_compare_type) {
+ case STRING_RESULT:
+ array=new (thd->mem_root) in_string(thd, arg_count - 1,
+ (qsort2_cmp) srtcmp_in,
+ cmp_collation.collation);
+ break;
+ case INT_RESULT:
+ array= new (thd->mem_root) in_longlong(thd, arg_count - 1);
+ break;
+ case REAL_RESULT:
+ array= new (thd->mem_root) in_double(thd, arg_count - 1);
+ break;
+ case ROW_RESULT:
+ /*
+ The row comparator was created at the beginning but only DATETIME
+ items comparators were initialized. Call store_value() to setup
+ others.
+ */
+ ((in_row*)array)->tmp.store_value(args[0]);
+ break;
+ case DECIMAL_RESULT:
+ array= new (thd->mem_root) in_decimal(thd, arg_count - 1);
+ break;
+ case TIME_RESULT:
+ date_arg= find_date_time_item(thd, args, arg_count, 0, true);
+ array= new (thd->mem_root) in_datetime(thd, date_arg, arg_count - 1);
+ break;
+ }
+ if (!array || thd->is_fatal_error) // OOM
+ return true;
+ uint j=0;
+ for (uint i=1 ; i < arg_count ; i++)
+ {
+ array->set(j,args[i]);
+ if (!args[i]->null_value)
+ j++; // include this cell in the array.
+ else
+ {
+ /*
+ We don't put NULL values in array to avoid erronous matches in
+ bisection.
+ */
+ have_null= 1;
+ }
+ }
+ if ((array->used_count= j))
+ array->sort();
+ return false;
+}
+
+
+bool Item_func_in::fix_length_and_dec()
{
Item **arg, **arg_end;
bool const_itm= 1;
@@ -4240,7 +4217,7 @@ void Item_func_in::fix_length_and_dec()
m_compare_type= STRING_RESULT;
left_cmp_type= args[0]->cmp_type();
if (!(found_types= collect_cmp_types(args, arg_count, true)))
- return;
+ return TRUE;
for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++)
{
@@ -4289,7 +4266,7 @@ void Item_func_in::fix_length_and_dec()
{
if (m_compare_type == STRING_RESULT &&
agg_arg_charsets_for_comparison(cmp_collation, args, arg_count))
- return;
+ return TRUE;
arg_types_compatible= TRUE;
if (m_compare_type == ROW_RESULT)
@@ -4300,12 +4277,14 @@ void Item_func_in::fix_length_and_dec()
if (bisection_possible)
{
array= new (thd->mem_root) in_row(thd, arg_count-1, 0);
+ if (!array)
+ return TRUE;
cmp= &((in_row*)array)->tmp;
}
else
{
if (!(cmp= new (thd->mem_root) cmp_item_row))
- return;
+ return TRUE;
cmp_items[ROW_RESULT]= cmp;
}
cmp->n= cols;
@@ -4313,7 +4292,7 @@ void Item_func_in::fix_length_and_dec()
for (uint col= 0; col < cols; col++)
{
- date_arg= find_date_time_item(args, arg_count, col);
+ date_arg= find_date_time_item(thd, args, arg_count, col, true);
if (date_arg)
{
cmp_item **cmp= 0;
@@ -4322,6 +4301,8 @@ void Item_func_in::fix_length_and_dec()
else
cmp= ((cmp_item_row*)cmp_items[ROW_RESULT])->comparators + col;
*cmp= new (thd->mem_root) cmp_item_datetime(date_arg);
+ if (!(*cmp))
+ return TRUE;
}
}
}
@@ -4336,7 +4317,7 @@ void Item_func_in::fix_length_and_dec()
values on the right can be compared as integers and adjust the
comparison type accordingly.
- See the comment about the similar block in Item_bool_func2
+ And see the comment for Item_func::convert_const_compared_to_int_field
*/
if (args[0]->real_item()->type() == FIELD_ITEM &&
!thd->lex->is_view_context_analysis() && m_compare_type != INT_RESULT)
@@ -4364,60 +4345,16 @@ void Item_func_in::fix_length_and_dec()
m_compare_type= INT_RESULT;
}
}
- switch (m_compare_type) {
- case STRING_RESULT:
- array=new (thd->mem_root) in_string(arg_count-1,(qsort2_cmp) srtcmp_in,
- cmp_collation.collation);
- break;
- case INT_RESULT:
- array= new (thd->mem_root) in_longlong(arg_count-1);
- break;
- case REAL_RESULT:
- array= new (thd->mem_root) in_double(arg_count-1);
- break;
- case ROW_RESULT:
- /*
- The row comparator was created at the beginning but only DATETIME
- items comparators were initialized. Call store_value() to setup
- others.
- */
- ((in_row*)array)->tmp.store_value(args[0]);
- break;
- case DECIMAL_RESULT:
- array= new (thd->mem_root) in_decimal(arg_count - 1);
- break;
- case TIME_RESULT:
- date_arg= find_date_time_item(args, arg_count, 0);
- array= new (thd->mem_root) in_datetime(date_arg, arg_count - 1);
- break;
- }
- if (!array || thd->is_fatal_error) // OOM
- return;
- uint j=0;
- for (uint i=1 ; i < arg_count ; i++)
- {
- array->set(j,args[i]);
- if (!args[i]->null_value)
- j++; // include this cell in the array.
- else
- {
- /*
- We don't put NULL values in array, to avoid erronous matches in
- bisection.
- */
- have_null= 1;
- }
- }
- if ((array->used_count= j))
- array->sort();
+ if (create_array(thd))
+ return TRUE;
}
else
{
if (found_types & (1U << TIME_RESULT))
- date_arg= find_date_time_item(args, arg_count, 0);
+ date_arg= find_date_time_item(thd, args, arg_count, 0, true);
if (found_types & (1U << STRING_RESULT) &&
agg_arg_charsets_for_comparison(cmp_collation, args, arg_count))
- return;
+ return TRUE;
for (i= 0; i <= (uint) TIME_RESULT; i++)
{
if (found_types & (1U << i) && !cmp_items[i])
@@ -4425,23 +4362,23 @@ void Item_func_in::fix_length_and_dec()
if (!cmp_items[i] && !(cmp_items[i]=
cmp_item::get_comparator((Item_result)i, date_arg,
cmp_collation.collation)))
- return;
+ return TRUE;
}
}
}
max_length= 1;
+ return FALSE;
}
void Item_func_in::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, precedence());
if (negated)
str->append(STRING_WITH_LEN(" not"));
str->append(STRING_WITH_LEN(" in ("));
print_args(str, 1, query_type);
- str->append(STRING_WITH_LEN("))"));
+ str->append(STRING_WITH_LEN(")"));
}
@@ -4519,6 +4456,18 @@ longlong Item_func_in::val_int()
}
+Item *Item_func_in::build_clone(THD *thd, MEM_ROOT *mem_root)
+{
+ Item_func_in *clone= (Item_func_in *) Item_func::build_clone(thd, mem_root);
+ if (clone)
+ {
+ bzero(&clone->cmp_items, sizeof(cmp_items));
+ clone->fix_length_and_dec();
+ }
+ return clone;
+}
+
+
longlong Item_func_bit_or::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -4701,21 +4650,22 @@ Item_cond::fix_fields(THD *thd, Item **ref)
const_item_cache= FALSE;
}
- with_sum_func= with_sum_func || item->with_sum_func;
- with_param= with_param || item->with_param;
- with_field= with_field || item->with_field;
- with_subselect|= item->has_subquery();
- if (item->maybe_null)
- maybe_null=1;
- }
- fix_length_and_dec();
+ with_sum_func|= item->with_sum_func;
+ with_param|= item->with_param;
+ with_field|= item->with_field;
+ with_subselect|= item->has_subquery();
+ with_window_func|= item->with_window_func;
+ maybe_null|= item->maybe_null;
+ }
+ if (fix_length_and_dec())
+ return TRUE;
fixed= 1;
return FALSE;
}
bool
-Item_cond::eval_not_null_tables(uchar *opt_arg)
+Item_cond::eval_not_null_tables(void *opt_arg)
{
Item *item;
bool is_and_cond= functype() == Item_func::COND_AND_FUNC;
@@ -4793,7 +4743,7 @@ void Item_cond::fix_after_pullout(st_select_lex *new_parent, Item **ref,
}
-bool Item_cond::walk(Item_processor processor, bool walk_subquery, uchar *arg)
+bool Item_cond::walk(Item_processor processor, bool walk_subquery, void *arg)
{
List_iterator_fast<Item> li(list);
Item *item;
@@ -4803,17 +4753,6 @@ bool Item_cond::walk(Item_processor processor, bool walk_subquery, uchar *arg)
return Item_func::walk(processor, walk_subquery, arg);
}
-bool Item_cond_and::walk_top_and(Item_processor processor, uchar *arg)
-{
- List_iterator_fast<Item> li(list);
- Item *item;
- while ((item= li++))
- if (item->walk_top_and(processor, arg))
- return 1;
- return Item_cond::walk_top_and(processor, arg);
-}
-
-
/**
Transform an Item_cond object with a transformer callback function.
@@ -4967,7 +4906,7 @@ void Item_cond::traverse_cond(Cond_traverser traverser,
that have or refer (HAVING) to a SUM expression.
*/
-void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array,
+void Item_cond::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
List_iterator<Item> li(list);
@@ -4987,19 +4926,17 @@ Item_cond::used_tables() const
void Item_cond::print(String *str, enum_query_type query_type)
{
- str->append('(');
List_iterator_fast<Item> li(list);
Item *item;
if ((item=li++))
- item->print(str, query_type);
+ item->print_parenthesised(str, query_type, precedence());
while ((item=li++))
{
str->append(' ');
str->append(func_name());
str->append(' ');
- item->print(str, query_type);
+ item->print_parenthesised(str, query_type, precedence());
}
- str->append(')');
}
@@ -5020,6 +4957,73 @@ void Item_cond::neg_arguments(THD *thd)
}
+/**
+ @brief
+ Building clone for Item_cond
+
+ @param thd thread handle
+ @param mem_root part of the memory for the clone
+
+ @details
+ This method gets copy of the current item and also
+ build clones for its elements. For this elements
+ build_copy is called again.
+
+ @retval
+ clone of the item
+ 0 if an error occured
+*/
+
+Item *Item_cond::build_clone(THD *thd, MEM_ROOT *mem_root)
+{
+ List_iterator_fast<Item> li(list);
+ Item *item;
+ Item_cond *copy= (Item_cond *) get_copy(thd, mem_root);
+ if (!copy)
+ return 0;
+ copy->list.empty();
+ while ((item= li++))
+ {
+ Item *arg_clone= item->build_clone(thd, mem_root);
+ if (!arg_clone)
+ return 0;
+ if (copy->list.push_back(arg_clone, mem_root))
+ return 0;
+ }
+ return copy;
+}
+
+
+bool Item_cond::excl_dep_on_table(table_map tab_map)
+{
+ if (used_tables() & OUTER_REF_TABLE_BIT)
+ return false;
+ if (!(used_tables() & ~tab_map))
+ return true;
+ List_iterator_fast<Item> li(list);
+ Item *item;
+ while ((item= li++))
+ {
+ if (!item->excl_dep_on_table(tab_map))
+ return false;
+ }
+ return true;
+}
+
+
+bool Item_cond::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ List_iterator_fast<Item> li(list);
+ Item *item;
+ while ((item= li++))
+ {
+ if (!item->excl_dep_on_grouping_fields(sel))
+ return false;
+ }
+ return true;
+}
+
+
void Item_cond_and::mark_as_condition_AND_part(TABLE_LIST *embedding)
{
List_iterator<Item> li(list);
@@ -5138,26 +5142,13 @@ Item *and_expressions(THD *thd, Item *a, Item *b, Item **org_item)
}
-bool Item_func_null_predicate::count_sargable_conds(uchar *arg)
+bool Item_func_null_predicate::count_sargable_conds(void *arg)
{
((SELECT_LEX*) arg)->cond_count++;
return 0;
}
-void Item_func_isnull::print(String *str, enum_query_type query_type)
-{
- str->append(func_name());
- str->append('(');
- if (const_item() && !args[0]->maybe_null &&
- !(query_type & (QT_NO_DATA_EXPANSION | QT_VIEW_INTERNAL)))
- str->append("/*always not null*/ 1");
- else
- args[0]->print(str, query_type);
- str->append(')');
-}
-
-
longlong Item_func_isnull::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -5167,6 +5158,17 @@ longlong Item_func_isnull::val_int()
}
+void Item_func_isnull::print(String *str, enum_query_type query_type)
+{
+ if (const_item() && !args[0]->maybe_null &&
+ !(query_type & (QT_NO_DATA_EXPANSION | QT_VIEW_INTERNAL)))
+ str->append("/*always not null*/ 1");
+ else
+ args[0]->print_parenthesised(str, query_type, precedence());
+ str->append(STRING_WITH_LEN(" is null"));
+}
+
+
longlong Item_is_not_null_test::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -5204,18 +5206,33 @@ longlong Item_func_isnotnull::val_int()
void Item_func_isnotnull::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
- str->append(STRING_WITH_LEN(" is not null)"));
+ args[0]->print_parenthesised(str, query_type, precedence());
+ str->append(STRING_WITH_LEN(" is not null"));
}
-bool Item_bool_func2::count_sargable_conds(uchar *arg)
+bool Item_bool_func2::count_sargable_conds(void *arg)
{
((SELECT_LEX*) arg)->cond_count++;
return 0;
}
+void Item_func_like::print(String *str, enum_query_type query_type)
+{
+ args[0]->print_parenthesised(str, query_type, precedence());
+ str->append(' ');
+ if (negated)
+ str->append(STRING_WITH_LEN(" not "));
+ str->append(func_name());
+ str->append(' ');
+ args[1]->print_parenthesised(str, query_type, precedence());
+ if (escape_used_in_parsing)
+ {
+ str->append(STRING_WITH_LEN(" escape "));
+ escape_item->print(str, query_type);
+ }
+}
+
longlong Item_func_like::val_int()
{
@@ -5234,11 +5251,11 @@ longlong Item_func_like::val_int()
}
null_value=0;
if (canDoTurboBM)
- return turboBM_matches(res->ptr(), res->length()) ? 1 : 0;
+ return turboBM_matches(res->ptr(), res->length()) ? !negated : negated;
return my_wildcmp(cmp_collation.collation,
res->ptr(),res->ptr()+res->length(),
res2->ptr(),res2->ptr()+res2->length(),
- escape,wild_one,wild_many) ? 0 : 1;
+ escape,wild_one,wild_many) ? negated : !negated;
}
@@ -5248,6 +5265,9 @@ longlong Item_func_like::val_int()
bool Item_func_like::with_sargable_pattern() const
{
+ if (negated)
+ return false;
+
if (!args[1]->const_item() || args[1]->is_expensive())
return false;
@@ -5276,13 +5296,10 @@ SEL_TREE *Item_func_like::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
}
-bool Item_func_like::fix_fields(THD *thd, Item **ref)
+bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
+ bool escape_used_in_parsing, CHARSET_INFO *cmp_cs,
+ int *escape)
{
- DBUG_ASSERT(fixed == 0);
- if (Item_bool_func2::fix_fields(thd, ref) ||
- escape_item->fix_fields(thd, &escape_item))
- return TRUE;
-
if (!escape_item->const_during_execution())
{
my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE");
@@ -5292,7 +5309,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
if (escape_item->const_item())
{
/* If we are on execution stage */
- String *escape_str= escape_item->val_str(&cmp_value1);
+ String *escape_str= escape_item->val_str(tmp_str);
if (escape_str)
{
const char *escape_str_ptr= escape_str->ptr();
@@ -5305,7 +5322,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
return TRUE;
}
- if (use_mb(cmp_collation.collation))
+ if (use_mb(cmp_cs))
{
CHARSET_INFO *cs= escape_str->charset();
my_wc_t wc;
@@ -5313,7 +5330,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
(const uchar*) escape_str_ptr,
(const uchar*) escape_str_ptr +
escape_str->length());
- escape= (int) (rc > 0 ? wc : '\\');
+ *escape= (int) (rc > 0 ? wc : '\\');
}
else
{
@@ -5322,25 +5339,40 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
code instead of Unicode code as "escape" argument.
Convert to "cs" if charset of escape differs.
*/
- CHARSET_INFO *cs= cmp_collation.collation;
uint32 unused;
if (escape_str->needs_conversion(escape_str->length(),
- escape_str->charset(), cs, &unused))
+ escape_str->charset(),cmp_cs,&unused))
{
char ch;
uint errors;
- uint32 cnvlen= copy_and_convert(&ch, 1, cs, escape_str_ptr,
+ uint32 cnvlen= copy_and_convert(&ch, 1, cmp_cs, escape_str_ptr,
escape_str->length(),
escape_str->charset(), &errors);
- escape= cnvlen ? ch : '\\';
+ *escape= cnvlen ? ch : '\\';
}
else
- escape= escape_str_ptr ? *escape_str_ptr : '\\';
+ *escape= escape_str_ptr ? *escape_str_ptr : '\\';
}
}
else
- escape= '\\';
+ *escape= '\\';
+ }
+
+ return FALSE;
+}
+
+bool Item_func_like::fix_fields(THD *thd, Item **ref)
+{
+ DBUG_ASSERT(fixed == 0);
+ if (Item_bool_func2::fix_fields(thd, ref) ||
+ escape_item->fix_fields(thd, &escape_item) ||
+ fix_escape_item(thd, escape_item, &cmp_value1, escape_used_in_parsing,
+ cmp_collation.collation, &escape))
+ return TRUE;
+
+ if (escape_item->const_item())
+ {
/*
We could also do boyer-more for non-const items, but as we would have to
recompute the tables for each row it's not worth it.
@@ -5396,7 +5428,7 @@ void Item_func_like::cleanup()
}
-bool Item_func_like::find_selective_predicates_list_processor(uchar *arg)
+bool Item_func_like::find_selective_predicates_list_processor(void *arg)
{
find_selective_predicates_list_processor_data *data=
(find_selective_predicates_list_processor_data *) arg;
@@ -5430,7 +5462,7 @@ void Regexp_processor_pcre::set_recursion_limit(THD *thd)
DBUG_ASSERT(thd == current_thd);
stack_used= available_stack_size(thd->thread_stack, &stack_used);
m_pcre_extra.match_limit_recursion=
- (my_thread_stack_size - STACK_MIN_SIZE - stack_used)/my_pcre_frame_size;
+ (ulong)((my_thread_stack_size - STACK_MIN_SIZE - stack_used)/my_pcre_frame_size);
}
@@ -5473,9 +5505,8 @@ bool Regexp_processor_pcre::compile(String *pattern, bool send_error)
{
if (!stringcmp(pattern, &m_prev_pattern))
return false;
+ cleanup();
m_prev_pattern.copy(*pattern);
- pcre_free(m_pcre);
- m_pcre= NULL;
}
if (!(pattern= convert_if_needed(pattern, &pattern_converter)))
@@ -5700,16 +5731,16 @@ bool Item_func_regex::fix_fields(THD *thd, Item **ref)
return Item_bool_func::fix_fields(thd, ref);
}
-void
+bool
Item_func_regex::fix_length_and_dec()
{
- Item_bool_func::fix_length_and_dec();
-
- if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
- return;
+ if (Item_bool_func::fix_length_and_dec() ||
+ agg_arg_charsets_for_comparison(cmp_collation, args, 2))
+ return TRUE;
re.init(cmp_collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
+ return FALSE;
}
@@ -5733,14 +5764,15 @@ bool Item_func_regexp_instr::fix_fields(THD *thd, Item **ref)
}
-void
+bool
Item_func_regexp_instr::fix_length_and_dec()
{
if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
- return;
+ return TRUE;
re.init(cmp_collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
+ return FALSE;
}
@@ -5901,8 +5933,8 @@ void Item_func_like::turboBM_compute_bad_character_shifts()
bool Item_func_like::turboBM_matches(const char* text, int text_len) const
{
- register int bcShift;
- register int turboShift;
+ int bcShift;
+ int turboShift;
int shift = pattern_len;
int j = 0;
int u = 0;
@@ -5916,7 +5948,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
{
while (j <= tlmpl)
{
- register int i= plm1;
+ int i= plm1;
while (i >= 0 && pattern[i] == text[i + j])
{
i--;
@@ -5926,7 +5958,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
if (i < 0)
return 1;
- register const int v = plm1 - i;
+ const int v= plm1 - i;
turboShift = u - v;
bcShift = bmBc[(uint) (uchar) text[i + j]] - plm1 + i;
shift = MY_MAX(turboShift, bcShift);
@@ -5947,7 +5979,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
{
while (j <= tlmpl)
{
- register int i = plm1;
+ int i= plm1;
while (i >= 0 && likeconv(cs,pattern[i]) == likeconv(cs,text[i + j]))
{
i--;
@@ -5957,7 +5989,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
if (i < 0)
return 1;
- register const int v = plm1 - i;
+ const int v= plm1 - i;
turboShift = u - v;
bcShift = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i;
shift = MY_MAX(turboShift, bcShift);
@@ -6682,7 +6714,8 @@ bool Item_equal::fix_fields(THD *thd, Item **ref)
}
if (prev_equal_field && last_equal_field != first_equal_field)
last_equal_field->next_equal_field= first_equal_field;
- fix_length_and_dec();
+ if (fix_length_and_dec())
+ return TRUE;
fixed= 1;
return FALSE;
}
@@ -6710,7 +6743,7 @@ void Item_equal::update_used_tables()
}
-bool Item_equal::count_sargable_conds(uchar *arg)
+bool Item_equal::count_sargable_conds(void *arg)
{
SELECT_LEX *sel= (SELECT_LEX *) arg;
uint m= equal_items.elements;
@@ -6768,15 +6801,16 @@ longlong Item_equal::val_int()
}
-void Item_equal::fix_length_and_dec()
+bool Item_equal::fix_length_and_dec()
{
Item *item= get_first(NO_PARTICULAR_TAB, NULL);
eval_item= cmp_item::get_comparator(item->cmp_type(), item,
item->collation.collation);
+ return FALSE;
}
-bool Item_equal::walk(Item_processor processor, bool walk_subquery, uchar *arg)
+bool Item_equal::walk(Item_processor processor, bool walk_subquery, void *arg)
{
Item *item;
Item_equal_fields_iterator it(*this);
@@ -7005,10 +7039,9 @@ longlong Item_func_dyncol_exists::val_int()
}
else
{
- uint strlen;
+ uint strlen= nm->length() * DYNCOL_UTF->mbmaxlen + 1;
uint dummy_errors;
- buf.str= (char *)sql_alloc((strlen= nm->length() *
- DYNCOL_UTF->mbmaxlen + 1));
+ buf.str= (char *) current_thd->alloc(strlen);
if (buf.str)
{
buf.length=
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index eb1da504e7c..d083248f6cd 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -23,10 +23,10 @@
#pragma interface /* gcc class implementation */
#endif
-#include "thr_malloc.h" /* sql_calloc */
#include "item_func.h" /* Item_int_func, Item_bool_func */
#define PCRE_STATIC 1 /* Important on Windows */
#include "pcre.h" /* pcre header file */
+#include "item.h"
extern Item_result item_cmp_type(Item_result a,Item_result b);
inline Item_result item_cmp_type(const Item *a, const Item *b)
@@ -106,9 +106,13 @@ public:
int compare_e_datetime() { return compare_e_temporal(MYSQL_TYPE_DATETIME); }
int compare_time() { return compare_temporal(MYSQL_TYPE_TIME); }
int compare_e_time() { return compare_e_temporal(MYSQL_TYPE_TIME); }
+ int compare_json_str_basic(Item *j, Item *s);
+ int compare_json_str();
+ int compare_str_json();
+ int compare_e_json_str_basic(Item *j, Item *s);
+ int compare_e_json_str();
+ int compare_e_str_json();
- Item** cache_converted_constant(THD *thd, Item **value, Item **cache,
- Item_result type);
static arg_cmp_func comparator_matrix [6][2];
inline bool is_owner_equal_func()
{
@@ -124,6 +128,7 @@ public:
comparators= 0;
}
friend class Item_func;
+ friend class Item_bool_rowready_func2;
};
@@ -195,8 +200,9 @@ public:
Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {}
bool is_bool_type() { return true; }
virtual CHARSET_INFO *compare_collation() const { return NULL; }
- void fix_length_and_dec() { decimals=0; max_length=1; }
+ bool fix_length_and_dec() { decimals=0; max_length=1; return FALSE; }
uint decimal_precision() const { return 1; }
+ bool need_parentheses_in_default() { return true; }
};
@@ -210,8 +216,9 @@ class Item_func_truth : public Item_bool_func
public:
virtual bool val_bool();
virtual longlong val_int();
- virtual void fix_length_and_dec();
+ virtual bool fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
protected:
Item_func_truth(THD *thd, Item *a, bool a_value, bool a_affirmative):
@@ -243,6 +250,8 @@ public:
Item_func_istrue(THD *thd, Item *a): Item_func_truth(thd, a, true, true) {}
~Item_func_istrue() {}
virtual const char* func_name() const { return "istrue"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_istrue>(thd, mem_root, this); }
};
@@ -257,6 +266,8 @@ public:
Item_func_truth(thd, a, true, false) {}
~Item_func_isnottrue() {}
virtual const char* func_name() const { return "isnottrue"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isnottrue>(thd, mem_root, this); }
};
@@ -270,6 +281,8 @@ public:
Item_func_isfalse(THD *thd, Item *a): Item_func_truth(thd, a, false, true) {}
~Item_func_isfalse() {}
virtual const char* func_name() const { return "isfalse"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isfalse>(thd, mem_root, this); }
};
@@ -284,6 +297,8 @@ public:
Item_func_truth(thd, a, false, false) {}
~Item_func_isnotfalse() {}
virtual const char* func_name() const { return "isnotfalse"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isnotfalse>(thd, mem_root, this); }
};
@@ -336,22 +351,22 @@ public:
void keep_top_level_cache();
Item *transform(THD *thd, Item_transformer transformer, uchar *arg);
virtual Item *expr_cache_insert_transformer(THD *thd, uchar *unused);
- bool is_expensive_processor(uchar *arg);
+ bool is_expensive_processor(void *arg);
bool is_expensive();
void set_join_tab_idx(uint join_tab_idx_arg)
{ args[1]->set_join_tab_idx(join_tab_idx_arg); }
virtual void get_cache_parameters(List<Item> &parameters);
bool is_top_level_item();
- bool eval_not_null_tables(uchar *opt_arg);
+ bool eval_not_null_tables(void *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
bool invisible_mode();
void reset_cache() { cache= NULL; }
virtual void print(String *str, enum_query_type query_type);
void restore_first_argument();
Item* get_wrapped_in_subselect_item()
- {
- return args[1];
- }
+ { return args[1]; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_in_optimizer>(thd, mem_root, this); }
};
@@ -371,7 +386,7 @@ public:
bool is_null() { return MY_TEST(args[0]->is_null() || args[1]->is_null()); }
COND *remove_eq_conds(THD *thd, Item::cond_result *cond_value,
bool top_level);
- bool count_sargable_conds(uchar *arg);
+ bool count_sargable_conds(void *arg);
/*
Specifies which result type the function uses to compare its arguments.
This method is used in equal field propagation.
@@ -474,6 +489,7 @@ public:
{
Item_func::print_op(str, query_type);
}
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
Item *neg_transformer(THD *thd);
virtual Item *negated_item(THD *thd);
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
@@ -485,7 +501,7 @@ public:
cond);
return this;
}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
int set_cmp_func()
{
return cmp.set_cmp_func(this, tmp_arg, tmp_arg + 1, true);
@@ -505,12 +521,22 @@ public:
return add_key_fields_optimize_op(join, key_fields, and_level,
usable_tables, sargables, false);
}
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root)
+ {
+ Item_bool_rowready_func2 *clone=
+ (Item_bool_rowready_func2 *) Item_func::build_clone(thd, mem_root);
+ if (clone)
+ {
+ clone->cmp.comparators= 0;
+ }
+ return clone;
+ }
};
/**
XOR inherits from Item_bool_func because it is not optimized yet.
Later, when XOR is optimized, it needs to inherit from
- Item_cond instead. See WL#5800.
+ Item_cond instead. See WL#5800.
*/
class Item_func_xor :public Item_bool_func
{
@@ -518,6 +544,7 @@ public:
Item_func_xor(THD *thd, Item *i1, Item *i2): Item_bool_func(thd, i1, i2) {}
enum Functype functype() const { return XOR_FUNC; }
const char *func_name() const { return "xor"; }
+ enum precedence precedence() const { return XOR_PRECEDENCE; }
void print(String *str, enum_query_type query_type)
{ Item_func::print_op(str, query_type); }
longlong val_int();
@@ -527,6 +554,8 @@ public:
Item_args::propagate_equal_fields(thd, Context_boolean(), cond);
return this;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xor>(thd, mem_root, this); }
};
class Item_func_not :public Item_bool_func
@@ -540,9 +569,12 @@ public:
longlong val_int();
enum Functype functype() const { return NOT_FUNC; }
const char *func_name() const { return "not"; }
+ enum precedence precedence() const { return BANG_PRECEDENCE; }
Item *neg_transformer(THD *thd);
bool fix_fields(THD *, Item **);
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_not>(thd, mem_root, this); }
};
class Item_maxmin_subselect;
@@ -590,6 +622,8 @@ public:
void add_key_fields(JOIN *join, KEY_FIELD **key_fields,
uint *and_level, table_map usable_tables,
SARGABLE_PARAM **sargables);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_trig_cond>(thd, mem_root, this); }
};
class Item_func_not_all :public Item_func_not
@@ -626,6 +660,8 @@ public:
longlong val_int();
const char *func_name() const { return "<nop>"; }
Item *neg_transformer(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_nop_all>(thd, mem_root, this); }
};
@@ -665,6 +701,8 @@ public:
uint in_equality_no;
virtual uint exists2in_reserved_items() { return 1; };
friend class Arg_comparator;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_eq>(thd, mem_root, this); }
};
class Item_func_equal :public Item_bool_rowready_func2
@@ -673,7 +711,7 @@ public:
Item_func_equal(THD *thd, Item *a, Item *b):
Item_bool_rowready_func2(thd, a, b) {}
longlong val_int();
- void fix_length_and_dec();
+ bool fix_length_and_dec();
table_map not_null_tables() const { return 0; }
enum Functype functype() const { return EQUAL_FUNC; }
enum Functype rev_functype() const { return EQUAL_FUNC; }
@@ -687,6 +725,8 @@ public:
return add_key_fields_optimize_op(join, key_fields, and_level,
usable_tables, sargables, true);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_equal>(thd, mem_root, this); }
};
@@ -701,6 +741,8 @@ public:
cond_result eq_cmp_result() const { return COND_TRUE; }
const char *func_name() const { return ">="; }
Item *negated_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ge>(thd, mem_root, this); }
};
@@ -715,6 +757,8 @@ public:
cond_result eq_cmp_result() const { return COND_FALSE; }
const char *func_name() const { return ">"; }
Item *negated_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_gt>(thd, mem_root, this); }
};
@@ -729,6 +773,8 @@ public:
cond_result eq_cmp_result() const { return COND_TRUE; }
const char *func_name() const { return "<="; }
Item *negated_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_le>(thd, mem_root, this); }
};
@@ -743,6 +789,8 @@ public:
cond_result eq_cmp_result() const { return COND_FALSE; }
const char *func_name() const { return "<"; }
Item *negated_item(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_lt>(thd, mem_root, this); }
};
@@ -766,6 +814,8 @@ public:
Item *negated_item(THD *thd);
void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level,
table_map usable_tables, SARGABLE_PARAM **sargables);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ne>(thd, mem_root, this); }
};
@@ -800,7 +850,6 @@ public:
Item_func_opt_neg(THD *thd, List<Item> &list):
Item_bool_func(thd, list), negated(0), pred_level(0) {}
public:
- inline void negate() { negated= !negated; }
inline void top_level_item() { pred_level= 1; }
bool is_top_level_item() const { return pred_level; }
Item *neg_transformer(THD *thd)
@@ -828,11 +877,12 @@ public:
longlong val_int();
enum Functype functype() const { return BETWEEN; }
const char *func_name() const { return "between"; }
- void fix_length_and_dec();
+ enum precedence precedence() const { return BETWEEN_PRECEDENCE; }
+ bool fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
- bool eval_not_null_tables(uchar *opt_arg);
+ bool eval_not_null_tables(void *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
- bool count_sargable_conds(uchar *arg);
+ bool count_sargable_conds(void *arg);
void add_key_fields(JOIN *join, KEY_FIELD **key_fields,
uint *and_level, table_map usable_tables,
SARGABLE_PARAM **sargables);
@@ -846,6 +896,8 @@ public:
cond);
return this;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_between>(thd, mem_root, this); }
longlong val_int_cmp_string();
longlong val_int_cmp_int();
@@ -864,11 +916,15 @@ public:
longlong val_int();
uint decimal_precision() const { return 1; }
const char *func_name() const { return "strcmp"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
- agg_arg_charsets_for_comparison(cmp_collation, args, 2);
- fix_char_length(2); // returns "1" or "0" or "-1"
+ if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
+ return TRUE;
+ fix_char_length(2);
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_strcmp>(thd, mem_root, this); }
};
@@ -892,7 +948,7 @@ public:
}
bool fix_fields(THD *, Item **);
longlong val_int();
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "interval"; }
uint decimal_precision() const { return 2; }
void print(String *str, enum_query_type query_type)
@@ -900,6 +956,8 @@ public:
str->append(func_name());
print_args(str, 0, query_type);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_interval>(thd, mem_root, this); }
};
@@ -915,13 +973,16 @@ public:
String *str_op(String *);
my_decimal *decimal_op(my_decimal *);
bool date_op(MYSQL_TIME *ltime,uint fuzzydate);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
set_handler_by_field_type(agg_field_type(args, arg_count, true));
fix_attributes(args, arg_count);
+ return FALSE;
}
const char *func_name() const { return "coalesce"; }
table_map not_null_tables() const { return 0; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_coalesce>(thd, mem_root, this); }
};
@@ -933,10 +994,11 @@ public:
class Item_func_case_abbreviation2 :public Item_func_hybrid_field_type
{
protected:
- void fix_length_and_dec2(Item **items)
+ bool fix_length_and_dec2(Item **items)
{
set_handler_by_field_type(agg_field_type(items, 2, true));
fix_attributes(items, 2);
+ return FALSE;
}
uint decimal_precision2(Item **args) const;
public:
@@ -957,10 +1019,12 @@ public:
String *str_op(String *str);
my_decimal *decimal_op(my_decimal *);
bool date_op(MYSQL_TIME *ltime,uint fuzzydate);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
- Item_func_case_abbreviation2::fix_length_and_dec2(args);
+ if (Item_func_case_abbreviation2::fix_length_and_dec2(args))
+ return TRUE;
maybe_null= args[1]->maybe_null;
+ return FALSE;
}
const char *func_name() const { return "ifnull"; }
Field *create_field_for_create_select(TABLE *table)
@@ -971,6 +1035,8 @@ public:
{
return Item_func_case_abbreviation2::decimal_precision2(args);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ifnull>(thd, mem_root, this); }
};
@@ -986,14 +1052,16 @@ public:
my_decimal *decimal_op(my_decimal *);
String *str_op(String *);
bool fix_fields(THD *, Item **);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
uint decimal_precision() const
{
return Item_func_case_abbreviation2::decimal_precision2(args + 1);
}
const char *func_name() const { return "if"; }
- bool eval_not_null_tables(uchar *opt_arg);
+ bool eval_not_null_tables(void *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_if>(thd, mem_root, this); }
private:
void cache_type_info(Item *source);
};
@@ -1013,7 +1081,7 @@ class Item_func_nullif :public Item_func_hybrid_field_type
The left "a" is in a comparison and can be replaced by:
- Item_func::convert_const_compared_to_int_field()
- agg_item_set_converter() in set_cmp_func()
- - Arg_comparator::cache_converted_constant() in set_cmp_func()
+ - cache_converted_constant() in set_cmp_func()
Both "a"s are subject to equal fields propagation and can be replaced by:
- Item_field::propagate_equal_fields(ANY_SUBST) for the left "a"
@@ -1021,6 +1089,11 @@ class Item_func_nullif :public Item_func_hybrid_field_type
*/
Item_cache *m_cache;
int compare();
+ void reset_first_arg_if_needed()
+ {
+ if (arg_count == 3 && args[0] != args[2])
+ args[0]= args[2];
+ }
Item *m_arg0;
public:
/*
@@ -1046,13 +1119,13 @@ public:
longlong int_op();
String *str_op(String *str);
my_decimal *decimal_op(my_decimal *);
- void fix_length_and_dec();
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
+ bool fix_length_and_dec();
+ bool walk(Item_processor processor, bool walk_subquery, void *arg);
uint decimal_precision() const { return args[2]->decimal_precision(); }
const char *func_name() const { return "nullif"; }
void print(String *str, enum_query_type query_type);
- void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields,
- uint flags);
+ void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags);
void update_used_tables();
table_map not_null_tables() const { return 0; }
bool is_null();
@@ -1076,6 +1149,14 @@ public:
cond, &args[2]);
return this;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_nullif>(thd, mem_root, this); }
+ Item *derived_field_transformer_for_having(THD *thd, uchar *arg)
+ { reset_first_arg_if_needed(); return this; }
+ Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
+ { reset_first_arg_if_needed(); return this; }
+ Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ { reset_first_arg_if_needed(); return this; }
};
@@ -1094,9 +1175,9 @@ public:
uint count;
uint used_count;
in_vector() {}
- in_vector(uint elements,uint element_length,qsort2_cmp cmp_func,
+ in_vector(THD *thd, uint elements, uint element_length, qsort2_cmp cmp_func,
CHARSET_INFO *cmp_coll)
- :base((char*) sql_calloc(elements*element_length)),
+ :base((char*) thd_calloc(thd, elements * element_length)),
size(element_length), compare(cmp_func), collation(cmp_coll),
count(elements), used_count(elements) {}
virtual ~in_vector() {}
@@ -1153,7 +1234,7 @@ class in_string :public in_vector
}
};
public:
- in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs);
+ in_string(THD *thd, uint elements, qsort2_cmp cmp_func, CHARSET_INFO *cs);
~in_string();
void set(uint pos,Item *item);
uchar *get_value(Item *item);
@@ -1181,7 +1262,7 @@ protected:
longlong unsigned_flag; // Use longlong, not bool, to preserve alignment
} tmp;
public:
- in_longlong(uint elements);
+ in_longlong(THD *thd, uint elements);
void set(uint pos,Item *item);
uchar *get_value(Item *item);
Item* create_item(THD *thd);
@@ -1199,20 +1280,15 @@ public:
/*
Class to represent a vector of constant DATE/DATETIME values.
- Values are obtained with help of the get_datetime_value() function.
- If the left item is a constant one then its value is cached in the
- lval_cache variable.
*/
class in_datetime :public in_longlong
{
public:
/* An item used to issue warnings. */
Item *warn_item;
- /* Cache for the left item. */
- Item *lval_cache;
- in_datetime(Item *warn_item_arg, uint elements)
- :in_longlong(elements), warn_item(warn_item_arg), lval_cache(0) {};
+ in_datetime(THD *thd, Item *warn_item_arg, uint elements)
+ :in_longlong(thd, elements), warn_item(warn_item_arg) {}
void set(uint pos,Item *item);
uchar *get_value(Item *item);
Item *create_item(THD *thd);
@@ -1230,7 +1306,7 @@ class in_double :public in_vector
{
double tmp;
public:
- in_double(uint elements);
+ in_double(THD *thd, uint elements);
void set(uint pos,Item *item);
uchar *get_value(Item *item);
Item *create_item(THD *thd);
@@ -1246,7 +1322,7 @@ class in_decimal :public in_vector
{
my_decimal val;
public:
- in_decimal(uint elements);
+ in_decimal(THD *thd, uint elements);
void set(uint pos, Item *item);
uchar *get_value(Item *item);
Item *create_item(THD *thd);
@@ -1257,7 +1333,6 @@ public:
item_dec->set_decimal_value(dec);
}
Item_result result_type() { return DECIMAL_RESULT; }
-
};
@@ -1381,9 +1456,6 @@ public:
/*
Compare items in the DATETIME context.
- Values are obtained with help of the get_datetime_value() function.
- If the left item is a constant one then its value is cached in the
- lval_cache variable.
*/
class cmp_item_datetime : public cmp_item_scalar
{
@@ -1391,11 +1463,9 @@ class cmp_item_datetime : public cmp_item_scalar
public:
/* Item used for issuing warnings. */
Item *warn_item;
- /* Cache for the left item. */
- Item *lval_cache;
cmp_item_datetime(Item *warn_item_arg)
- : warn_item(warn_item_arg), lval_cache(0) {}
+ : warn_item(warn_item_arg) {}
void store_value(Item *item);
int cmp(Item *arg);
int compare(cmp_item *ci);
@@ -1496,12 +1566,11 @@ class Item_func_case :public Item_func_hybrid_field_type
int first_expr_num, else_expr_num;
enum Item_result left_cmp_type;
String tmp_value;
- uint ncases;
+ uint nwhens;
Item_result cmp_type;
DTCollation cmp_collation;
cmp_item *cmp_items[6]; /* For all result types */
cmp_item *case_item;
- Item **arg_buffer;
uint m_found_types;
public:
Item_func_case(THD *thd, List<Item> &list, Item *first_expr_arg,
@@ -1512,15 +1581,29 @@ public:
my_decimal *decimal_op(my_decimal *);
bool date_op(MYSQL_TIME *ltime, uint fuzzydate);
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
uint decimal_precision() const;
table_map not_null_tables() const { return 0; }
const char *func_name() const { return "case"; }
+ enum precedence precedence() const { return BETWEEN_PRECEDENCE; }
virtual void print(String *str, enum_query_type query_type);
Item *find_item(String *str);
CHARSET_INFO *compare_collation() const { return cmp_collation.collation; }
void cleanup();
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond);
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_case>(thd, mem_root, this); }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root)
+ {
+ Item_func_case *clone= (Item_func_case *) Item_func::build_clone(thd, mem_root);
+ if (clone)
+ {
+ clone->case_item= 0;
+ bzero(&clone->cmp_items, sizeof(cmp_items));
+ }
+ return clone;
+ }
};
/*
@@ -1576,7 +1659,8 @@ public:
}
longlong val_int();
bool fix_fields(THD *, Item **);
- void fix_length_and_dec();
+ bool create_array(THD *thd);
+ bool fix_length_and_dec();
void cleanup()
{
uint i;
@@ -1594,6 +1678,7 @@ public:
void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level,
table_map usable_tables, SARGABLE_PARAM **sargables);
SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
+ SEL_TREE *get_func_row_mm_tree(RANGE_OPT_PARAM *param, Item_row *key_row);
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
{
/*
@@ -1613,10 +1698,14 @@ public:
}
virtual void print(String *str, enum_query_type query_type);
enum Functype functype() const { return IN_FUNC; }
- const char *func_name() const { return " IN "; }
- bool eval_not_null_tables(uchar *opt_arg);
+ const char *func_name() const { return "in"; }
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
+ bool eval_not_null_tables(void *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
- bool count_sargable_conds(uchar *arg);
+ bool count_sargable_conds(void *arg);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_in>(thd, mem_root, this); }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root);
};
class cmp_item_row :public cmp_item
@@ -1632,7 +1721,8 @@ public:
int compare(cmp_item *arg);
cmp_item *make_same();
void store_value_by_template(THD *thd, cmp_item *tmpl, Item *);
- friend void Item_func_in::fix_length_and_dec();
+ friend bool Item_func_in::fix_length_and_dec();
+ cmp_item *get_comparator(uint i) { return comparators[i]; }
};
@@ -1644,8 +1734,10 @@ public:
~in_row();
void set(uint pos,Item *item);
uchar *get_value(Item *item);
- friend void Item_func_in::fix_length_and_dec();
+ friend bool Item_func_in::create_array(THD *thd);
+ friend bool Item_func_in::fix_length_and_dec();
Item_result result_type() { return ROW_RESULT; }
+ cmp_item *get_cmp_item() { return &tmp; }
};
/* Functions used by where clause */
@@ -1675,8 +1767,12 @@ public:
}
CHARSET_INFO *compare_collation() const
{ return args[0]->collation.collation; }
- void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=0; }
- bool count_sargable_conds(uchar *arg);
+ bool fix_length_and_dec()
+ {
+ decimals=0; max_length=1; maybe_null=0;
+ return FALSE;
+ }
+ bool count_sargable_conds(void *arg);
};
@@ -1686,12 +1782,9 @@ public:
Item_func_isnull(THD *thd, Item *a): Item_func_null_predicate(thd, a) {}
longlong val_int();
enum Functype functype() const { return ISNULL_FUNC; }
- void fix_length_and_dec()
- {
- Item_func_null_predicate::fix_length_and_dec();
- update_used_tables();
- }
const char *func_name() const { return "isnull"; }
+ void print(String *str, enum_query_type query_type);
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
bool arg_is_datetime_notnull_field()
{
@@ -1725,9 +1818,10 @@ public:
}
COND *remove_eq_conds(THD *thd, Item::cond_result *cond_value,
bool top_level);
- virtual void print(String *str, enum_query_type query_type);
table_map not_null_tables() const { return 0; }
Item *neg_transformer(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isnull>(thd, mem_root, this); }
};
/* Functions used by HAVING for rewriting IN subquery */
@@ -1768,11 +1862,14 @@ public:
longlong val_int();
enum Functype functype() const { return ISNOTNULL_FUNC; }
const char *func_name() const { return "isnotnull"; }
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
table_map not_null_tables() const
{ return abort_on_null ? not_null_tables_cache : 0; }
Item *neg_transformer(THD *thd);
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
void top_level_item() { abort_on_null=1; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isnotnull>(thd, mem_root, this); }
};
@@ -1797,6 +1894,7 @@ class Item_func_like :public Item_bool_func2
bool escape_used_in_parsing;
bool use_sampling;
+ bool negated;
DTCollation cmp_collation;
String cmp_value1, cmp_value2;
@@ -1817,13 +1915,10 @@ public:
Item_func_like(THD *thd, Item *a, Item *b, Item *escape_arg, bool escape_used):
Item_bool_func2(thd, a, b), canDoTurboBM(FALSE), pattern(0), pattern_len(0),
bmGs(0), bmBc(0), escape_item(escape_arg),
- escape_used_in_parsing(escape_used), use_sampling(0) {}
+ escape_used_in_parsing(escape_used), use_sampling(0), negated(0) {}
longlong val_int();
enum Functype functype() const { return LIKE_FUNC; }
- void print(String *str, enum_query_type query_type)
- {
- Item_func::print_op(str, query_type);
- }
+ void print(String *str, enum_query_type query_type);
CHARSET_INFO *compare_collation() const
{ return cmp_collation.collation; }
cond_result eq_cmp_result() const
@@ -1898,15 +1993,25 @@ public:
return this;
}
const char *func_name() const { return "like"; }
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
max_length= 1;
- agg_arg_charsets_for_comparison(cmp_collation, args, 2);
+ return agg_arg_charsets_for_comparison(cmp_collation, args, 2);
}
void cleanup();
- bool find_selective_predicates_list_processor(uchar *arg);
+ Item *neg_transformer(THD *thd)
+ {
+ negated= !negated;
+ return this;
+ }
+
+ bool find_selective_predicates_list_processor(void *arg);
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_like>(thd, mem_root, this); }
};
@@ -1981,15 +2086,16 @@ public:
{
return subpattern_end(n) - subpattern_start(n);
}
- void cleanup()
+ void reset()
{
- if (m_pcre)
- {
- pcre_free(m_pcre);
- m_pcre= NULL;
- }
+ m_pcre= NULL;
m_prev_pattern.length(0);
}
+ void cleanup()
+ {
+ pcre_free(m_pcre);
+ reset();
+ }
bool is_compiled() const { return m_pcre != NULL; }
bool is_const() const { return m_is_const; }
void set_const(bool arg) { m_is_const= arg; }
@@ -2013,10 +2119,11 @@ public:
}
longlong val_int();
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "regexp"; }
-
- virtual inline void print(String *str, enum_query_type query_type)
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
+ void print(String *str, enum_query_type query_type)
{
print_op(str, query_type);
}
@@ -2041,8 +2148,9 @@ public:
}
longlong val_int();
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "regexp_instr"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
@@ -2104,14 +2212,14 @@ public:
SARGABLE_PARAM **sargables);
SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
virtual void print(String *str, enum_query_type query_type);
- void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields,
- uint flags);
+ void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags);
friend int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
COND **conds);
void top_level_item() { abort_on_null=1; }
bool top_level() { return abort_on_null; }
void copy_andor_arguments(THD *thd, Item_cond *item);
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
+ bool walk(Item_processor processor, bool walk_subquery, void *arg);
Item *transform(THD *thd, Item_transformer transformer, uchar *arg);
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
void neg_arguments(THD *thd);
@@ -2119,7 +2227,10 @@ public:
Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *);
Item *compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t);
- bool eval_not_null_tables(uchar *opt_arg);
+ bool eval_not_null_tables(void *opt_arg);
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root);
+ bool excl_dep_on_table(table_map tab_map);
+ bool excl_dep_on_grouping_fields(st_select_lex *sel);
};
template <template<class> class LI, class T> class Item_equal_iterator;
@@ -2132,7 +2243,7 @@ template <template<class> class LI, class T> class Item_equal_iterator;
All equality predicates of the form field1=field2 contained in a
conjunction are substituted for a sequence of items of this class.
An item of this class Item_equal(f1,f2,...fk) represents a
- multiple equality f1=f2=...=fk.
+ multiple equality f1=f2=...=fk.l
If a conjunction contains predicates f1=f2 and f2=f3, a new item of
this class is created Item_equal(f1,f2,f3) representing the multiple
@@ -2274,7 +2385,7 @@ public:
longlong val_int();
const char *func_name() const { return "multiple equal"; }
void sort(Item_field_cmpfunc compare, void *arg);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
void cleanup()
{
@@ -2289,7 +2400,7 @@ public:
uint *and_level, table_map usable_tables,
SARGABLE_PARAM **sargables);
SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
+ bool walk(Item_processor processor, bool walk_subquery, void *arg);
Item *transform(THD *thd, Item_transformer transformer, uchar *arg);
virtual void print(String *str, enum_query_type query_type);
Item_result compare_type() const { return m_compare_type; }
@@ -2297,8 +2408,17 @@ public:
void set_context_field(Item_field *ctx_field) { context_field= ctx_field; }
void set_link_equal_fields(bool flag) { link_equal_fields= flag; }
+ Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
+ /*
+ This does not comply with the specification of the virtual method,
+ but Item_equal items are processed distinguishly anyway
+ */
+ bool excl_dep_on_table(table_map tab_map)
+ {
+ return used_tables() & tab_map;
+ }
friend class Item_equal_fields_iterator;
- bool count_sargable_conds(uchar *arg);
+ bool count_sargable_conds(void *arg);
friend class Item_equal_iterator<List_iterator_fast,Item>;
friend class Item_equal_iterator<List_iterator,Item>;
friend Item *eliminate_item_equal(THD *thd, COND *cond,
@@ -2429,19 +2549,21 @@ public:
enum Functype functype() const { return COND_AND_FUNC; }
longlong val_int();
const char *func_name() const { return "and"; }
+ enum precedence precedence() const { return AND_PRECEDENCE; }
table_map not_null_tables() const
{ return abort_on_null ? not_null_tables_cache: and_tables_cache; }
Item *copy_andor_structure(THD *thd);
Item *neg_transformer(THD *thd);
void mark_as_condition_AND_part(TABLE_LIST *embedding);
virtual uint exists2in_reserved_items() { return list.elements; };
- bool walk_top_and(Item_processor processor, uchar *arg);
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
bool link_item_fields,
COND_EQUAL **cond_equal_ref);
void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level,
table_map usable_tables, SARGABLE_PARAM **sargables);
SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cond_and>(thd, mem_root, this); }
};
inline bool is_cond_and(Item *item)
@@ -2463,9 +2585,12 @@ public:
enum Functype functype() const { return COND_OR_FUNC; }
longlong val_int();
const char *func_name() const { return "or"; }
+ enum precedence precedence() const { return OR_PRECEDENCE; }
table_map not_null_tables() const { return and_tables_cache; }
Item *copy_andor_structure(THD *thd);
Item *neg_transformer(THD *thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_cond_or>(thd, mem_root, this); }
};
class Item_func_dyncol_check :public Item_bool_func
@@ -2474,6 +2599,9 @@ public:
Item_func_dyncol_check(THD *thd, Item *str): Item_bool_func(thd, str) {}
longlong val_int();
const char *func_name() const { return "column_check"; }
+ bool need_parentheses_in_default() { return false; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_check>(thd, mem_root, this); }
};
class Item_func_dyncol_exists :public Item_bool_func
@@ -2483,6 +2611,9 @@ public:
Item_bool_func(thd, str, num) {}
longlong val_int();
const char *func_name() const { return "column_exists"; }
+ bool need_parentheses_in_default() { return false; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_exists>(thd, mem_root, this); }
};
inline bool is_cond_or(Item *item)
@@ -2496,10 +2627,6 @@ inline bool is_cond_or(Item *item)
Item *and_expressions(Item *a, Item *b, Item **org_item);
-longlong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
- enum_field_types f_type, bool *is_null);
-
-
class Comp_creator
{
public:
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 82f6bbd3173..3218d4df844 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -71,7 +71,7 @@ static void wrong_precision_error(uint errcode, Item *a,
*/
bool get_length_and_scale(ulonglong length, ulonglong decimals,
- ulong *out_length, uint *out_decimals,
+ uint *out_length, uint *out_decimals,
uint max_precision, uint max_scale,
Item *a)
{
@@ -88,8 +88,9 @@ bool get_length_and_scale(ulonglong length, ulonglong decimals,
*out_decimals= (uint) decimals;
my_decimal_trim(&length, out_decimals);
- *out_length= (ulong) length;
-
+ *out_length= (uint) length;
+
+
if (*out_length < *out_decimals)
{
my_error(ER_M_BIGGER_THAN_D, MYF(0), "");
@@ -1262,6 +1263,34 @@ protected:
#ifdef HAVE_SPATIAL
+class Create_func_geometry_from_json : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_geometry_from_json s_singleton;
+
+protected:
+ Create_func_geometry_from_json() {}
+ virtual ~Create_func_geometry_from_json() {}
+};
+
+
+class Create_func_as_geojson : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_as_geojson s_singleton;
+
+protected:
+ Create_func_as_geojson() {}
+ virtual ~Create_func_as_geojson() {}
+};
+#endif /*HAVE_SPATIAL*/
+
+
+#ifdef HAVE_SPATIAL
class Create_func_geometry_type : public Create_func_arg1
{
public:
@@ -1708,6 +1737,344 @@ protected:
#endif
+class Create_func_json_exists : public Create_func_arg2
+{
+public:
+ virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_json_exists s_singleton;
+
+protected:
+ Create_func_json_exists() {}
+ virtual ~Create_func_json_exists() {}
+};
+
+
+class Create_func_json_valid : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_valid s_singleton;
+
+protected:
+ Create_func_json_valid() {}
+ virtual ~Create_func_json_valid() {}
+};
+
+
+class Create_func_json_compact : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_compact s_singleton;
+
+protected:
+ Create_func_json_compact() {}
+ virtual ~Create_func_json_compact() {}
+};
+
+
+class Create_func_json_loose : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_loose s_singleton;
+
+protected:
+ Create_func_json_loose() {}
+ virtual ~Create_func_json_loose() {}
+};
+
+
+class Create_func_json_detailed: public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_detailed s_singleton;
+
+protected:
+ Create_func_json_detailed() {}
+ virtual ~Create_func_json_detailed() {}
+};
+
+
+class Create_func_json_type : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_type s_singleton;
+
+protected:
+ Create_func_json_type() {}
+ virtual ~Create_func_json_type() {}
+};
+
+
+class Create_func_json_depth : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_depth s_singleton;
+
+protected:
+ Create_func_json_depth() {}
+ virtual ~Create_func_json_depth() {}
+};
+
+
+class Create_func_json_value : public Create_func_arg2
+{
+public:
+ virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_json_value s_singleton;
+
+protected:
+ Create_func_json_value() {}
+ virtual ~Create_func_json_value() {}
+};
+
+
+class Create_func_json_query : public Create_func_arg2
+{
+public:
+ virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_json_query s_singleton;
+
+protected:
+ Create_func_json_query() {}
+ virtual ~Create_func_json_query() {}
+};
+
+
+class Create_func_json_keys: public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_keys s_singleton;
+
+protected:
+ Create_func_json_keys() {}
+ virtual ~Create_func_json_keys() {}
+};
+
+
+class Create_func_json_contains: public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_contains s_singleton;
+
+protected:
+ Create_func_json_contains() {}
+ virtual ~Create_func_json_contains() {}
+};
+
+
+class Create_func_json_contains_path : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_contains_path s_singleton;
+
+protected:
+ Create_func_json_contains_path() {}
+ virtual ~Create_func_json_contains_path() {}
+};
+
+
+class Create_func_json_extract : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_extract s_singleton;
+
+protected:
+ Create_func_json_extract() {}
+ virtual ~Create_func_json_extract() {}
+};
+
+
+class Create_func_json_search : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_search s_singleton;
+
+protected:
+ Create_func_json_search() {}
+ virtual ~Create_func_json_search() {}
+};
+
+
+class Create_func_json_array : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_array s_singleton;
+
+protected:
+ Create_func_json_array() {}
+ virtual ~Create_func_json_array() {}
+};
+
+
+class Create_func_json_array_append : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_array_append s_singleton;
+
+protected:
+ Create_func_json_array_append() {}
+ virtual ~Create_func_json_array_append() {}
+};
+
+
+class Create_func_json_array_insert : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_array_insert s_singleton;
+
+protected:
+ Create_func_json_array_insert() {}
+ virtual ~Create_func_json_array_insert() {}
+};
+
+
+class Create_func_json_insert : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_insert s_singleton;
+
+protected:
+ Create_func_json_insert() {}
+ virtual ~Create_func_json_insert() {}
+};
+
+
+class Create_func_json_set : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_set s_singleton;
+
+protected:
+ Create_func_json_set() {}
+ virtual ~Create_func_json_set() {}
+};
+
+
+class Create_func_json_replace : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_replace s_singleton;
+
+protected:
+ Create_func_json_replace() {}
+ virtual ~Create_func_json_replace() {}
+};
+
+
+class Create_func_json_remove : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_remove s_singleton;
+
+protected:
+ Create_func_json_remove() {}
+ virtual ~Create_func_json_remove() {}
+};
+
+
+class Create_func_json_object : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_object s_singleton;
+
+protected:
+ Create_func_json_object() {}
+ virtual ~Create_func_json_object() {}
+};
+
+
+class Create_func_json_length : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_length s_singleton;
+
+protected:
+ Create_func_json_length() {}
+ virtual ~Create_func_json_length() {}
+};
+
+
+class Create_func_json_merge : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
+
+ static Create_func_json_merge s_singleton;
+
+protected:
+ Create_func_json_merge() {}
+ virtual ~Create_func_json_merge() {}
+};
+
+
+class Create_func_json_quote : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_quote s_singleton;
+
+protected:
+ Create_func_json_quote() {}
+ virtual ~Create_func_json_quote() {}
+};
+
+
+class Create_func_json_unquote : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_json_unquote s_singleton;
+
+protected:
+ Create_func_json_unquote() {}
+ virtual ~Create_func_json_unquote() {}
+};
+
+
class Create_func_last_day : public Create_func_arg1
{
public:
@@ -2943,7 +3310,7 @@ Create_udf_func Create_udf_func::s_singleton;
Item*
Create_udf_func::create_func(THD *thd, LEX_STRING name, List<Item> *item_list)
{
- udf_func *udf= find_udf(name.str, name.length);
+ udf_func *udf= find_udf(name.str, (uint)name.length);
DBUG_ASSERT(udf);
return create(thd, udf, item_list);
}
@@ -4234,6 +4601,101 @@ Create_func_geometry_from_wkb::create_native(THD *thd, LEX_STRING name,
#ifdef HAVE_SPATIAL
+Create_func_geometry_from_json Create_func_geometry_from_json::s_singleton;
+
+Item*
+Create_func_geometry_from_json::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *json= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_json(thd, json);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 2:
+ {
+ Item *json= item_list->pop();
+ Item *options= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_json(thd, json, options);
+ break;
+ }
+ case 3:
+ {
+ Item *json= item_list->pop();
+ Item *options= item_list->pop();
+ Item *srid= item_list->pop();
+ func= new (thd->mem_root) Item_func_geometry_from_json(thd, json, options,
+ srid);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
+}
+
+
+Create_func_as_geojson Create_func_as_geojson::s_singleton;
+
+Item*
+Create_func_as_geojson::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count) {
+ case 1:
+ {
+ Item *geom= item_list->pop();
+ func= new (thd->mem_root) Item_func_as_geojson(thd, geom);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ break;
+ }
+ case 2:
+ {
+ Item *geom= item_list->pop();
+ Item *max_dec= item_list->pop();
+ func= new (thd->mem_root) Item_func_as_geojson(thd, geom, max_dec);
+ break;
+ }
+ case 3:
+ {
+ Item *geom= item_list->pop();
+ Item *max_dec= item_list->pop();
+ Item *options= item_list->pop();
+ func= new (thd->mem_root) Item_func_as_geojson(thd, geom, max_dec, options);
+ break;
+ }
+ default:
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+ }
+
+ return func;
+}
+#endif /*HAVE_SPATIAL*/
+
+
+#ifdef HAVE_SPATIAL
Create_func_geometry_type Create_func_geometry_type::s_singleton;
Item*
@@ -4572,6 +5034,123 @@ Create_func_issimple::create_1_arg(THD *thd, Item *arg1)
#endif
+Create_func_json_exists Create_func_json_exists::s_singleton;
+
+Item*
+Create_func_json_exists::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_json_exists(thd, arg1, arg2);
+}
+
+
+Create_func_json_detailed Create_func_json_detailed::s_singleton;
+
+Item*
+Create_func_json_detailed::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_format(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_loose Create_func_json_loose::s_singleton;
+
+Item*
+Create_func_json_loose::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_format(thd, arg1,
+ Item_func_json_format::LOOSE);
+}
+
+
+Create_func_json_compact Create_func_json_compact::s_singleton;
+
+Item*
+Create_func_json_compact::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_format(thd, arg1,
+ Item_func_json_format::COMPACT);
+}
+
+
+Create_func_json_valid Create_func_json_valid::s_singleton;
+
+Item*
+Create_func_json_valid::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_valid(thd, arg1);
+}
+
+
+Create_func_json_type Create_func_json_type::s_singleton;
+
+Item*
+Create_func_json_type::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_type(thd, arg1);
+}
+
+
+Create_func_json_depth Create_func_json_depth::s_singleton;
+
+Item*
+Create_func_json_depth::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_depth(thd, arg1);
+}
+
+
+Create_func_json_value Create_func_json_value::s_singleton;
+
+Item*
+Create_func_json_value::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_json_value(thd, arg1, arg2);
+}
+
+
+Create_func_json_query Create_func_json_query::s_singleton;
+
+Item*
+Create_func_json_query::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_json_query(thd, arg1, arg2);
+}
+
+
+Create_func_json_quote Create_func_json_quote::s_singleton;
+
+Item*
+Create_func_json_quote::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_quote(thd, arg1);
+}
+
+
+Create_func_json_unquote Create_func_json_unquote::s_singleton;
+
+Item*
+Create_func_json_unquote::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_json_unquote(thd, arg1);
+}
+
+
Create_func_last_day Create_func_last_day::s_singleton;
Item*
@@ -4581,6 +5160,385 @@ Create_func_last_day::create_1_arg(THD *thd, Item *arg1)
}
+Create_func_json_array Create_func_json_array::s_singleton;
+
+Item*
+Create_func_json_array::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func;
+
+ if (item_list != NULL)
+ {
+ func= new (thd->mem_root) Item_func_json_array(thd, *item_list);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_array(thd);
+ }
+
+ return func;
+}
+
+
+Create_func_json_array_append Create_func_json_array_append::s_singleton;
+
+Item*
+Create_func_json_array_append::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_array_append(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_array_insert Create_func_json_array_insert::s_singleton;
+
+Item*
+Create_func_json_array_insert::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_array_insert(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_insert Create_func_json_insert::s_singleton;
+
+Item*
+Create_func_json_insert::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_insert(true, false,
+ thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_set Create_func_json_set::s_singleton;
+
+Item*
+Create_func_json_set::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_insert(true, true,
+ thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_replace Create_func_json_replace::s_singleton;
+
+Item*
+Create_func_json_replace::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_insert(false, true,
+ thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_remove Create_func_json_remove::s_singleton;
+
+Item*
+Create_func_json_remove::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2 /*json_doc, path [,path]*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_remove(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_object Create_func_json_object::s_singleton;
+
+Item*
+Create_func_json_object::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func;
+ int arg_count;
+
+ if (item_list != NULL)
+ {
+ arg_count= item_list->elements;
+ if ((arg_count & 1) != 0 /*is odd*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ func= NULL;
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_object(thd, *item_list);
+ }
+ }
+ else
+ {
+ arg_count= 0;
+ func= new (thd->mem_root) Item_func_json_object(thd);
+ }
+
+ return func;
+}
+
+
+Create_func_json_length Create_func_json_length::s_singleton;
+
+Item*
+Create_func_json_length::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func;
+ int arg_count;
+
+ if (item_list == NULL ||
+ (arg_count= item_list->elements) == 0)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ func= NULL;
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_length(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_merge Create_func_json_merge::s_singleton;
+
+Item*
+Create_func_json_merge::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func;
+ int arg_count;
+
+ if (item_list == NULL ||
+ (arg_count= item_list->elements) < 2) // json, json
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ func= NULL;
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_merge(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_contains Create_func_json_contains::s_singleton;
+
+Item*
+Create_func_json_contains::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count == 2 || arg_count == 3/* json_doc, val, [path] */)
+ {
+ func= new (thd->mem_root) Item_func_json_contains(thd, *item_list);
+ }
+ else
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+
+ return func;
+}
+
+
+Create_func_json_keys Create_func_json_keys::s_singleton;
+
+Item*
+Create_func_json_keys::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_keys(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_contains_path Create_func_json_contains_path::s_singleton;
+
+Item*
+Create_func_json_contains_path::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 /* json_doc, one_or_all, path, [path]...*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_contains_path(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_extract Create_func_json_extract::s_singleton;
+
+Item*
+Create_func_json_extract::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2 /* json_doc, path, [path]...*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_extract(thd, *item_list);
+ }
+
+ return func;
+}
+
+
+Create_func_json_search Create_func_json_search::s_singleton;
+
+Item*
+Create_func_json_search::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 3 /* json_doc, one_or_all, search_str, [escape_char[, path]...*/)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ }
+ else
+ {
+ func= new (thd->mem_root) Item_func_json_search(thd, *item_list);
+ }
+
+ return func;
+}
+
+
Create_func_last_insert_id Create_func_last_insert_id::s_singleton;
Item*
@@ -5615,7 +6573,7 @@ Create_func_weekofyear Create_func_weekofyear::s_singleton;
Item*
Create_func_weekofyear::create_1_arg(THD *thd, Item *arg1)
{
- Item *i1= new (thd->mem_root) Item_int(thd, (char*) "0", 3, 1);
+ Item *i1= new (thd->mem_root) Item_int(thd, (char*) "3", 3, 1);
return new (thd->mem_root) Item_func_week(thd, arg1, i1);
}
@@ -5852,6 +6810,32 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("ISSIMPLE") }, GEOM_BUILDER(Create_func_issimple)},
{ { C_STRING_WITH_LEN("IS_FREE_LOCK") }, BUILDER(Create_func_is_free_lock)},
{ { C_STRING_WITH_LEN("IS_USED_LOCK") }, BUILDER(Create_func_is_used_lock)},
+ { { C_STRING_WITH_LEN("JSON_ARRAY") }, BUILDER(Create_func_json_array)},
+ { { C_STRING_WITH_LEN("JSON_ARRAY_APPEND") }, BUILDER(Create_func_json_array_append)},
+ { { C_STRING_WITH_LEN("JSON_ARRAY_INSERT") }, BUILDER(Create_func_json_array_insert)},
+ { { C_STRING_WITH_LEN("JSON_COMPACT") }, BUILDER(Create_func_json_compact)},
+ { { C_STRING_WITH_LEN("JSON_CONTAINS") }, BUILDER(Create_func_json_contains)},
+ { { C_STRING_WITH_LEN("JSON_CONTAINS_PATH") }, BUILDER(Create_func_json_contains_path)},
+ { { C_STRING_WITH_LEN("JSON_DEPTH") }, BUILDER(Create_func_json_depth)},
+ { { C_STRING_WITH_LEN("JSON_DETAILED") }, BUILDER(Create_func_json_detailed)},
+ { { C_STRING_WITH_LEN("JSON_EXISTS") }, BUILDER(Create_func_json_exists)},
+ { { C_STRING_WITH_LEN("JSON_EXTRACT") }, BUILDER(Create_func_json_extract)},
+ { { C_STRING_WITH_LEN("JSON_INSERT") }, BUILDER(Create_func_json_insert)},
+ { { C_STRING_WITH_LEN("JSON_KEYS") }, BUILDER(Create_func_json_keys)},
+ { { C_STRING_WITH_LEN("JSON_LENGTH") }, BUILDER(Create_func_json_length)},
+ { { C_STRING_WITH_LEN("JSON_LOOSE") }, BUILDER(Create_func_json_loose)},
+ { { C_STRING_WITH_LEN("JSON_MERGE") }, BUILDER(Create_func_json_merge)},
+ { { C_STRING_WITH_LEN("JSON_QUERY") }, BUILDER(Create_func_json_query)},
+ { { C_STRING_WITH_LEN("JSON_QUOTE") }, BUILDER(Create_func_json_quote)},
+ { { C_STRING_WITH_LEN("JSON_OBJECT") }, BUILDER(Create_func_json_object)},
+ { { C_STRING_WITH_LEN("JSON_REMOVE") }, BUILDER(Create_func_json_remove)},
+ { { C_STRING_WITH_LEN("JSON_REPLACE") }, BUILDER(Create_func_json_replace)},
+ { { C_STRING_WITH_LEN("JSON_SET") }, BUILDER(Create_func_json_set)},
+ { { C_STRING_WITH_LEN("JSON_SEARCH") }, BUILDER(Create_func_json_search)},
+ { { C_STRING_WITH_LEN("JSON_TYPE") }, BUILDER(Create_func_json_type)},
+ { { C_STRING_WITH_LEN("JSON_UNQUOTE") }, BUILDER(Create_func_json_unquote)},
+ { { C_STRING_WITH_LEN("JSON_VALID") }, BUILDER(Create_func_json_valid)},
+ { { C_STRING_WITH_LEN("JSON_VALUE") }, BUILDER(Create_func_json_value)},
{ { C_STRING_WITH_LEN("LAST_DAY") }, BUILDER(Create_func_last_day)},
{ { C_STRING_WITH_LEN("LAST_INSERT_ID") }, BUILDER(Create_func_last_insert_id)},
{ { C_STRING_WITH_LEN("LCASE") }, BUILDER(Create_func_lcase)},
@@ -5882,6 +6866,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("MBRCONTAINS") }, GEOM_BUILDER(Create_func_mbr_contains)},
{ { C_STRING_WITH_LEN("MBRDISJOINT") }, GEOM_BUILDER(Create_func_mbr_disjoint)},
{ { C_STRING_WITH_LEN("MBREQUAL") }, GEOM_BUILDER(Create_func_mbr_equals)},
+ { { C_STRING_WITH_LEN("MBREQUALS") }, GEOM_BUILDER(Create_func_mbr_equals)},
{ { C_STRING_WITH_LEN("MBRINTERSECTS") }, GEOM_BUILDER(Create_func_mbr_intersects)},
{ { C_STRING_WITH_LEN("MBROVERLAPS") }, GEOM_BUILDER(Create_func_mbr_overlaps)},
{ { C_STRING_WITH_LEN("MBRTOUCHES") }, GEOM_BUILDER(Create_func_touches)},
@@ -5949,6 +6934,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("STR_TO_DATE") }, BUILDER(Create_func_str_to_date)},
{ { C_STRING_WITH_LEN("ST_AREA") }, GEOM_BUILDER(Create_func_area)},
{ { C_STRING_WITH_LEN("ST_ASBINARY") }, GEOM_BUILDER(Create_func_as_wkb)},
+ { { C_STRING_WITH_LEN("ST_ASGEOJSON") }, GEOM_BUILDER(Create_func_as_geojson)},
{ { C_STRING_WITH_LEN("ST_ASTEXT") }, GEOM_BUILDER(Create_func_as_wkt)},
{ { C_STRING_WITH_LEN("ST_ASWKB") }, GEOM_BUILDER(Create_func_as_wkb)},
{ { C_STRING_WITH_LEN("ST_ASWKT") }, GEOM_BUILDER(Create_func_as_wkt)},
@@ -5974,6 +6960,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("ST_GEOMETRYFROMWKB") }, GEOM_BUILDER(Create_func_geometry_from_wkb)},
{ { C_STRING_WITH_LEN("ST_GEOMETRYN") }, GEOM_BUILDER(Create_func_geometryn)},
{ { C_STRING_WITH_LEN("ST_GEOMETRYTYPE") }, GEOM_BUILDER(Create_func_geometry_type)},
+ { { C_STRING_WITH_LEN("ST_GEOMFROMGEOJSON") }, GEOM_BUILDER(Create_func_geometry_from_json)},
{ { C_STRING_WITH_LEN("ST_GEOMFROMTEXT") }, GEOM_BUILDER(Create_func_geometry_from_text)},
{ { C_STRING_WITH_LEN("ST_GEOMFROMWKB") }, GEOM_BUILDER(Create_func_geometry_from_wkb)},
#ifndef DBUG_OFF
@@ -6196,7 +7183,7 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type,
break;
case ITEM_CAST_DECIMAL:
{
- ulong len;
+ uint len;
uint dec;
if (get_length_and_scale(length, decimals, &len, &dec,
DECIMAL_MAX_PRECISION, DECIMAL_MAX_SCALE,
@@ -6207,9 +7194,7 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type,
}
case ITEM_CAST_DOUBLE:
{
- ulong len;
- uint dec;
-
+ uint len, dec;
if (!c_len)
{
length= DBL_DIG+7;
@@ -6279,7 +7264,7 @@ Item *create_temporal_literal(THD *thd,
MYSQL_TIME_STATUS status;
MYSQL_TIME ltime;
Item *item= NULL;
- ulonglong flags= sql_mode_for_dates(thd);
+ sql_mode_t flags= sql_mode_for_dates(thd);
switch(type)
{
@@ -6314,7 +7299,7 @@ Item *create_temporal_literal(THD *thd,
ErrConvString err(str, length, cs);
make_truncated_value_warning(thd,
Sql_condition::time_warn_level(status.warnings),
- &err, ltime.time_type, 0);
+ &err, ltime.time_type, 0, 0);
}
return item;
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index a1a2c3f1d1c..33f0b982445 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -53,6 +53,7 @@
#include "sp.h"
#include "set_var.h"
#include "debug_sync.h"
+#include "sql_cte.h"
#ifdef NO_EMBEDDED_ACCESS_CHECKS
#define sp_restore_security_context(A,B) while (0) {}
@@ -120,6 +121,7 @@ void Item_func::sync_with_sum_func_and_with_field(List<Item> &list)
while ((item= li++))
{
with_sum_func|= item->with_sum_func;
+ with_window_func|= item->with_window_func;
with_field|= item->with_field;
with_param|= item->with_param;
}
@@ -141,8 +143,9 @@ void Item_func::sync_with_sum_func_and_with_field(List<Item> &list)
is to allow all Item_field() objects to setup pointers to the table fields.
Sets as a side effect the following class variables:
- maybe_null Set if any argument may return NULL
- with_sum_func Set if any of the arguments contains a sum function
+ maybe_null Set if any argument may return NULL
+ with_sum_func Set if any of the arguments contains a sum function
+ with_window_func Set if any of the arguments contain a window function
with_field Set if any of the arguments contains or is a field
used_tables_cache Set to union of the tables used by arguments
@@ -216,13 +219,13 @@ Item_func::fix_fields(THD *thd, Item **ref)
with_sum_func= with_sum_func || item->with_sum_func;
with_param= with_param || item->with_param;
+ with_window_func= with_window_func || item->with_window_func;
with_field= with_field || item->with_field;
used_tables_and_const_cache_join(item);
with_subselect|= item->has_subquery();
}
}
- fix_length_and_dec();
- if (thd->is_error()) // An error inside fix_length_and_dec occurred
+ if (fix_length_and_dec())
return TRUE;
fixed= 1;
return FALSE;
@@ -245,7 +248,7 @@ Item_func::quick_fix_field()
bool
-Item_func::eval_not_null_tables(uchar *opt_arg)
+Item_func::eval_not_null_tables(void *opt_arg)
{
Item **arg,**arg_end;
not_null_tables_cache= 0;
@@ -422,7 +425,7 @@ void Item_args::propagate_equal_fields(THD *thd,
See comments in Item_cond::split_sum_func()
*/
-void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array,
+void Item_func::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
Item **arg, **arg_end;
@@ -460,16 +463,15 @@ void Item_func::print_args(String *str, uint from, enum_query_type query_type)
void Item_func::print_op(String *str, enum_query_type query_type)
{
- str->append('(');
for (uint i=0 ; i < arg_count-1 ; i++)
{
- args[i]->print(str, query_type);
+ args[i]->print_parenthesised(str, query_type, precedence());
str->append(' ');
str->append(func_name());
str->append(' ');
}
- args[arg_count-1]->print(str, query_type);
- str->append(')');
+ args[arg_count-1]->print_parenthesised(str, query_type,
+ (enum precedence)(precedence() + 1));
}
@@ -637,14 +639,15 @@ void Item_func::count_real_length(Item **items, uint nitems)
unsigned_flag= false;
for (uint i=0 ; i < nitems ; i++)
{
- if (decimals != NOT_FIXED_DEC)
+ if (decimals < FLOATING_POINT_DECIMALS)
{
set_if_bigger(decimals, items[i]->decimals);
+ /* Will be ignored if items[i]->decimals >= FLOATING_POINT_DECIMALS */
set_if_bigger(length, (items[i]->max_length - items[i]->decimals));
}
set_if_bigger(max_length, items[i]->max_length);
}
- if (decimals != NOT_FIXED_DEC)
+ if (decimals < FLOATING_POINT_DECIMALS)
{
max_length= length;
length+= decimals;
@@ -717,10 +720,12 @@ String *Item_int_func::val_str(String *str)
}
-void Item_func_connection_id::fix_length_and_dec()
+bool Item_func_connection_id::fix_length_and_dec()
{
- Item_int_func::fix_length_and_dec();
+ if (Item_int_func::fix_length_and_dec())
+ return TRUE;
max_length= 10;
+ return FALSE;
}
@@ -739,7 +744,7 @@ bool Item_func_connection_id::fix_fields(THD *thd, Item **ref)
function of two arguments.
*/
-void Item_num_op::fix_length_and_dec(void)
+bool Item_num_op::fix_length_and_dec(void)
{
DBUG_ENTER("Item_num_op::fix_length_and_dec");
DBUG_PRINT("info", ("name %s", func_name()));
@@ -775,7 +780,7 @@ void Item_num_op::fix_length_and_dec(void)
result_type() == DECIMAL_RESULT ? "DECIMAL_RESULT" :
result_type() == INT_RESULT ? "INT_RESULT" :
"--ILLEGAL!!!--")));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -785,7 +790,7 @@ void Item_num_op::fix_length_and_dec(void)
type depends only on the first argument)
*/
-void Item_func_num1::fix_length_and_dec()
+bool Item_func_num1::fix_length_and_dec()
{
DBUG_ENTER("Item_func_num1::fix_length_and_dec");
DBUG_PRINT("info", ("name %s", func_name()));
@@ -816,7 +821,7 @@ void Item_func_num1::fix_length_and_dec()
result_type() == DECIMAL_RESULT ? "DECIMAL_RESULT" :
result_type() == INT_RESULT ? "INT_RESULT" :
"--ILLEGAL!!!--")));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -928,10 +933,7 @@ longlong Item_func_hybrid_field_type::val_int()
case INT_RESULT:
return int_op();
case REAL_RESULT:
- {
- bool error;
- return double_to_longlong(real_op(), unsigned_flag, &error);
- }
+ return Converter_double_to_longlong(real_op(), unsigned_flag).result();
case TIME_RESULT:
{
MYSQL_TIME ltime;
@@ -1009,6 +1011,7 @@ bool Item_func_hybrid_field_type::get_date(MYSQL_TIME *ltime,
my_decimal value, *res;
if (!(res= decimal_op_with_null_check(&value)) ||
decimal_to_datetime_with_warn(res, ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
break;
@@ -1019,6 +1022,7 @@ bool Item_func_hybrid_field_type::get_date(MYSQL_TIME *ltime,
bool neg= !unsigned_flag && value < 0;
if (null_value || int_to_datetime_with_warn(neg, neg ? -value : value,
ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
break;
@@ -1027,14 +1031,15 @@ bool Item_func_hybrid_field_type::get_date(MYSQL_TIME *ltime,
{
double value= real_op();
if (null_value || double_to_datetime_with_warn(value, ltime, fuzzydate,
+ field_table_or_null(),
field_name_or_null()))
goto err;
break;
}
case TIME_RESULT:
return date_op(ltime,
- fuzzydate |
- (field_type() == MYSQL_TYPE_TIME ? TIME_TIME_ONLY : 0));
+ (uint)(fuzzydate |
+ (field_type() == MYSQL_TYPE_TIME ? TIME_TIME_ONLY : 0)));
case STRING_RESULT:
{
char buff[40];
@@ -1066,7 +1071,7 @@ void Item_func_signed::print(String *str, enum_query_type query_type)
}
-longlong Item_func_signed::val_int_from_str(int *error)
+longlong Item::val_int_from_str(int *error)
{
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff), &my_charset_bin), *res;
@@ -1076,13 +1081,11 @@ longlong Item_func_signed::val_int_from_str(int *error)
to a longlong
*/
- if (!(res= args[0]->val_str(&tmp)))
+ if (!(res= val_str(&tmp)))
{
- null_value= 1;
*error= 0;
return 0;
}
- null_value= 0;
Converter_strtoll10_with_warn cnv(NULL, Warn_filter_all(),
res->charset(), res->ptr(), res->length());
*error= cnv.error();
@@ -1090,37 +1093,15 @@ longlong Item_func_signed::val_int_from_str(int *error)
}
-longlong Item_func_signed::val_int()
+longlong Item::val_int_signed_typecast()
{
- longlong value;
- int error;
-
- if (args[0]->cast_to_int_type() != STRING_RESULT)
- {
- value= args[0]->val_int();
- null_value= args[0]->null_value;
- return value;
- }
- else if (args[0]->dynamic_result())
- {
- /* We come here when argument has an unknown type */
- args[0]->unsigned_flag= 0; // Mark that we want to have a signed value
- value= args[0]->val_int();
- null_value= args[0]->null_value;
- if (!null_value && args[0]->unsigned_flag && value < 0)
- goto err; // Warn about overflow
- return value;
- }
-
- value= val_int_from_str(&error);
- if (value < 0 && error == 0)
- goto err;
- return value;
+ if (cast_to_int_type() != STRING_RESULT)
+ return val_int();
-err:
- push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
- "Cast to signed converted positive out-of-range integer to "
- "it's negative complement");
+ int error;
+ longlong value= val_int_from_str(&error);
+ if (!null_value && value < 0 && error == 0)
+ push_note_converted_to_negative_complement(current_thd);
return value;
}
@@ -1134,49 +1115,30 @@ void Item_func_unsigned::print(String *str, enum_query_type query_type)
}
-longlong Item_func_unsigned::val_int()
+longlong Item::val_int_unsigned_typecast()
{
- longlong value;
- int error;
-
- if (args[0]->cast_to_int_type() == DECIMAL_RESULT)
+ if (cast_to_int_type() == DECIMAL_RESULT)
{
- my_decimal tmp, *dec= args[0]->val_decimal(&tmp);
- if (!(null_value= args[0]->null_value))
+ longlong value;
+ my_decimal tmp, *dec= val_decimal(&tmp);
+ if (!null_value)
my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &value);
else
value= 0;
return value;
}
- else if (args[0]->dynamic_result())
- {
- /* We come here when argument has an unknown type */
- args[0]->unsigned_flag= 1; // Mark that we want to have an unsigned value
- value= args[0]->val_int();
- null_value= args[0]->null_value;
- if (!null_value && args[0]->unsigned_flag == 0 && value < 0)
- goto err; // Warn about overflow
- return value;
- }
- else if (args[0]->cast_to_int_type() != STRING_RESULT)
+ else if (cast_to_int_type() != STRING_RESULT)
{
- value= args[0]->val_int();
- null_value= args[0]->null_value;
- if (!null_value && args[0]->unsigned_flag == 0 && value < 0)
- goto err; // Warn about overflow
+ longlong value= val_int();
+ if (!null_value && unsigned_flag == 0 && value < 0)
+ push_note_converted_to_positive_complement(current_thd);
return value;
}
- value= val_int_from_str(&error);
- if (error < 0)
- goto err;
-
- return value;
-
-err:
- push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
- "Cast to unsigned converted negative integer to it's "
- "positive complement");
+ int error;
+ longlong value= val_int_from_str(&error);
+ if (!null_value && error < 0)
+ push_note_converted_to_positive_complement(current_thd);
return value;
}
@@ -1447,12 +1409,14 @@ void Item_func_additive_op::result_precision()
subtraction of UNSIGNED BIGINT to return negative values.
*/
-void Item_func_minus::fix_length_and_dec()
+bool Item_func_minus::fix_length_and_dec()
{
- Item_num_op::fix_length_and_dec();
+ if (Item_num_op::fix_length_and_dec())
+ return TRUE;
if (unsigned_flag &&
(current_thd->variables.sql_mode & MODE_NO_UNSIGNED_SUBTRACTION))
unsigned_flag=0;
+ return FALSE;
}
@@ -1750,11 +1714,12 @@ void Item_func_div::result_precision()
}
-void Item_func_div::fix_length_and_dec()
+bool Item_func_div::fix_length_and_dec()
{
DBUG_ENTER("Item_func_div::fix_length_and_dec");
prec_increment= current_thd->variables.div_precincrement;
- Item_num_op::fix_length_and_dec();
+ if (Item_num_op::fix_length_and_dec())
+ DBUG_RETURN(TRUE);
switch (Item_func_div::result_type()) {
case REAL_RESULT:
{
@@ -1785,7 +1750,7 @@ void Item_func_div::fix_length_and_dec()
DBUG_ASSERT(0);
}
maybe_null= 1; // devision by zero
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -1861,7 +1826,7 @@ longlong Item_func_int_div::val_int()
}
-void Item_func_int_div::fix_length_and_dec()
+bool Item_func_int_div::fix_length_and_dec()
{
Item_result argtype= args[0]->result_type();
/* use precision ony for the data type it is applicable for and valid */
@@ -1872,6 +1837,7 @@ void Item_func_int_div::fix_length_and_dec()
MY_INT64_NUM_DECIMAL_DIGITS : char_length);
maybe_null=1;
unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag;
+ return false;
}
@@ -1955,11 +1921,13 @@ void Item_func_mod::result_precision()
}
-void Item_func_mod::fix_length_and_dec()
+bool Item_func_mod::fix_length_and_dec()
{
- Item_num_op::fix_length_and_dec();
+ if (Item_num_op::fix_length_and_dec())
+ return true;
maybe_null= 1;
unsigned_flag= args[0]->unsigned_flag;
+ return false;
}
@@ -2006,10 +1974,11 @@ my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value)
}
-void Item_func_neg::fix_length_and_dec()
+bool Item_func_neg::fix_length_and_dec()
{
DBUG_ENTER("Item_func_neg::fix_length_and_dec");
- Item_func_num1::fix_length_and_dec();
+ if (Item_func_num1::fix_length_and_dec())
+ DBUG_RETURN(TRUE);
/* 1 add because sign can appear */
max_length= args[0]->max_length + 1;
@@ -2035,7 +2004,7 @@ void Item_func_neg::fix_length_and_dec()
}
}
unsigned_flag= 0;
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -2075,10 +2044,12 @@ my_decimal *Item_func_abs::decimal_op(my_decimal *decimal_value)
}
-void Item_func_abs::fix_length_and_dec()
+bool Item_func_abs::fix_length_and_dec()
{
- Item_func_num1::fix_length_and_dec();
+ if (Item_func_num1::fix_length_and_dec())
+ return TRUE;
unsigned_flag= args[0]->unsigned_flag;
+ return FALSE;
}
@@ -2310,7 +2281,7 @@ longlong Item_func_bit_neg::val_int()
// Conversion functions
-void Item_func_int_val::fix_length_and_dec()
+bool Item_func_int_val::fix_length_and_dec()
{
DBUG_ENTER("Item_func_int_val::fix_length_and_dec");
DBUG_PRINT("info", ("name %s", func_name()));
@@ -2358,7 +2329,7 @@ void Item_func_int_val::fix_length_and_dec()
result_type() == INT_RESULT ? "INT_RESULT" :
"--ILLEGAL!!!--")));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -2456,7 +2427,7 @@ my_decimal *Item_func_floor::decimal_op(my_decimal *decimal_value)
}
-void Item_func_round::fix_length_and_dec()
+bool Item_func_round::fix_length_and_dec()
{
int decimals_to_set;
longlong val1;
@@ -2474,12 +2445,12 @@ void Item_func_round::fix_length_and_dec()
}
else
set_handler_by_result_type(REAL_RESULT);
- return;
+ return FALSE;
}
val1= args[1]->val_int();
if ((null_value= args[1]->null_value))
- return;
+ return FALSE;
val1_unsigned= args[1]->unsigned_flag;
if (val1 < 0)
@@ -2492,7 +2463,7 @@ void Item_func_round::fix_length_and_dec()
decimals= MY_MIN(decimals_to_set, NOT_FIXED_DEC);
max_length= float_length(decimals);
set_handler_by_result_type(REAL_RESULT);
- return;
+ return FALSE;
}
switch (args[0]->result_type()) {
@@ -2533,6 +2504,7 @@ void Item_func_round::fix_length_and_dec()
case TIME_RESULT:
DBUG_ASSERT(0); /* This result type isn't handled */
}
+ return FALSE;
}
double my_double_round(double value, longlong dec, bool dec_unsigned,
@@ -2757,7 +2729,7 @@ double Item_func_units::val_real()
}
-void Item_func_min_max::fix_length_and_dec()
+bool Item_func_min_max::fix_length_and_dec()
{
uint unsigned_count= 0;
int max_int_part=0;
@@ -2870,6 +2842,7 @@ void Item_func_min_max::fix_length_and_dec()
set_handler_by_field_type(MYSQL_TYPE_DOUBLE);
break;
}
+ return FALSE;
}
@@ -2926,7 +2899,7 @@ bool Item_func_min_max::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
ltime->hour+= (ltime->month * 32 + ltime->day) * 24;
ltime->year= ltime->month= ltime->day= 0;
if (adjust_time_range_with_warn(ltime,
- std::min<uint>(decimals, TIME_SECOND_PART_DIGITS)))
+ MY_MIN(decimals, TIME_SECOND_PART_DIGITS)))
return (null_value= true);
}
@@ -3120,10 +3093,10 @@ longlong Item_func_coercibility::val_int()
}
-void Item_func_locate::fix_length_and_dec()
+bool Item_func_locate::fix_length_and_dec()
{
max_length= MY_INT32_NUM_DECIMAL_DIGITS;
- agg_arg_charsets_for_comparison(cmp_collation, args, 2);
+ return agg_arg_charsets_for_comparison(cmp_collation, args, 2);
}
@@ -3240,14 +3213,15 @@ longlong Item_func_field::val_int()
}
-void Item_func_field::fix_length_and_dec()
+bool Item_func_field::fix_length_and_dec()
{
maybe_null=0; max_length=3;
cmp_type= args[0]->result_type();
for (uint i=1; i < arg_count ; i++)
cmp_type= item_cmp_type(cmp_type, args[i]->result_type());
if (cmp_type == STRING_RESULT)
- agg_arg_charsets_for_comparison(cmp_collation, args, arg_count);
+ return agg_arg_charsets_for_comparison(cmp_collation, args, arg_count);
+ return FALSE;
}
@@ -3278,8 +3252,8 @@ longlong Item_func_ord::val_int()
#ifdef USE_MB
if (use_mb(res->charset()))
{
- register const char *str=res->ptr();
- register uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length());
+ const char *str=res->ptr();
+ uint32 n=0, l=my_ismbchar(res->charset(),str,str+res->length());
if (!l)
return (longlong)((uchar) *str);
while (l--)
@@ -3294,7 +3268,7 @@ longlong Item_func_ord::val_int()
/* Returns number of found type >= 1 or 0 if not found */
/* This optimizes searching in enums to bit testing! */
-void Item_func_find_in_set::fix_length_and_dec()
+bool Item_func_find_in_set::fix_length_and_dec()
{
decimals=0;
max_length=3; // 1-999
@@ -3316,7 +3290,7 @@ void Item_func_find_in_set::fix_length_and_dec()
}
}
}
- agg_arg_charsets_for_comparison(cmp_collation, args, 2);
+ return agg_arg_charsets_for_comparison(cmp_collation, args, 2);
}
static const char separator=',';
@@ -3500,13 +3474,15 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
if (item->maybe_null)
func->maybe_null=1;
func->with_sum_func= func->with_sum_func || item->with_sum_func;
+ func->with_window_func= func->with_window_func ||
+ item->with_window_func;
func->with_field= func->with_field || item->with_field;
func->with_param= func->with_param || item->with_param;
func->with_subselect|= item->with_subselect;
func->used_tables_and_const_cache_join(item);
f_args.arg_type[i]=item->result_type();
}
- //TODO: why all following memory is not allocated with 1 call of sql_alloc?
+ //TODO: why all following memory is not allocated with 1 thd->alloc() call?
if (!(buffers=new String[arg_count]) ||
!(f_args.args= (char**) thd->alloc(arg_count * sizeof(char *))) ||
!(f_args.lengths= (ulong*) thd->alloc(arg_count * sizeof(long))) ||
@@ -3522,7 +3498,8 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
DBUG_RETURN(TRUE);
}
}
- func->fix_length_and_dec();
+ if (func->fix_length_and_dec())
+ DBUG_RETURN(TRUE);
initid.max_length=func->max_length;
initid.maybe_null=func->maybe_null;
initid.const_item=func->const_item_cache;
@@ -3860,13 +3837,13 @@ String *Item_func_udf_decimal::val_str(String *str)
/* Default max_length is max argument length */
-void Item_func_udf_str::fix_length_and_dec()
+bool Item_func_udf_str::fix_length_and_dec()
{
DBUG_ENTER("Item_func_udf_str::fix_length_and_dec");
max_length=0;
for (uint i = 0; i < arg_count; i++)
set_if_bigger(max_length,args[i]->max_length);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
String *Item_func_udf_str::val_str(String *str)
@@ -4162,7 +4139,7 @@ public:
bool handle_condition(THD * /* thd */, uint sql_errno,
const char * /* sqlstate */,
- Sql_condition::enum_warning_level /* level */,
+ Sql_condition::enum_warning_level* /* level */,
const char *message,
Sql_condition ** /* cond_hdl */);
};
@@ -4171,7 +4148,7 @@ bool
Lock_wait_timeout_handler::
handle_condition(THD *thd, uint sql_errno,
const char * /* sqlstate */,
- Sql_condition::enum_warning_level /* level */,
+ Sql_condition::enum_warning_level* /* level */,
const char *message,
Sql_condition ** /* cond_hdl */)
{
@@ -4337,7 +4314,8 @@ longlong Item_func_release_lock::val_int()
User_level_lock *ull;
- if (!(ull=
+ if (!my_hash_inited(&thd->ull_hash) ||
+ !(ull=
(User_level_lock*) my_hash_search(&thd->ull_hash,
ull_key.ptr(), ull_key.length())))
{
@@ -4612,6 +4590,11 @@ longlong Item_func_sleep::val_int()
}
+bool Item_func_user_var::check_vcol_func_processor(void *arg)
+{
+ return mark_unsupported_function("@", name.str, arg, VCOL_NON_DETERMINISTIC);
+}
+
#define extra_size sizeof(double)
user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
@@ -4733,15 +4716,21 @@ bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref)
TABLE_LIST *derived;
for (derived= unit->derived;
derived;
- derived= derived->select_lex->master_unit()->derived)
+ derived= unit->derived)
+ {
derived->set_materialized_derived();
+ derived->prohibit_cond_pushdown= true;
+ if (unit->with_element && unit->with_element->is_recursive)
+ break;
+ unit= derived->select_lex->master_unit();
+ }
}
return FALSE;
}
-void
+bool
Item_func_set_user_var::fix_length_and_dec()
{
maybe_null=args[0]->maybe_null;
@@ -4755,6 +4744,7 @@ Item_func_set_user_var::fix_length_and_dec()
args[0]->collation.collation);
}
unsigned_flag= args[0]->unsigned_flag;
+ return FALSE;
}
@@ -4766,7 +4756,7 @@ Item_func_set_user_var::fix_length_and_dec()
column read set or to register used fields in a view
*/
-bool Item_func_set_user_var::register_field_in_read_map(uchar *arg)
+bool Item_func_set_user_var::register_field_in_read_map(void *arg)
{
if (result_field)
{
@@ -4775,7 +4765,7 @@ bool Item_func_set_user_var::register_field_in_read_map(uchar *arg)
bitmap_set_bit(result_field->table->read_set, result_field->field_index);
if (result_field->vcol_info)
return result_field->vcol_info->
- expr_item->walk(&Item::register_field_in_read_map, 1, arg);
+ expr->walk(&Item::register_field_in_read_map, 1, arg);
}
return 0;
}
@@ -4785,7 +4775,7 @@ bool Item_func_set_user_var::register_field_in_read_map(uchar *arg)
*/
-bool Item_func_set_user_var::register_field_in_bitmap(uchar *arg)
+bool Item_func_set_user_var::register_field_in_bitmap(void *arg)
{
MY_BITMAP *bitmap = (MY_BITMAP *) arg;
DBUG_ASSERT(bitmap);
@@ -4886,12 +4876,19 @@ Item_func_set_user_var::update_hash(void *ptr, uint length,
bool unsigned_arg)
{
/*
- If we set a variable explicitely to NULL then keep the old
+ If we set a variable explicitly to NULL then keep the old
result type of the variable
*/
- if ((null_value= args[0]->null_value) && null_item)
+ if (args[0]->type() == Item::FIELD_ITEM)
+ {
+ /* args[0]->null_value may be outdated */
+ null_value= ((Item_field*)args[0])->field->is_null();
+ }
+ else
+ null_value= args[0]->null_value;
+ if (null_value && null_item)
res_type= m_var_entry->type; // Don't change type of item
- if (::update_hash(m_var_entry, (null_value= args[0]->null_value),
+ if (::update_hash(m_var_entry, null_value,
ptr, length, res_type, cs, unsigned_arg))
{
null_value= 1;
@@ -5266,11 +5263,10 @@ bool Item_func_set_user_var::is_null_result()
void Item_func_set_user_var::print(String *str, enum_query_type query_type)
{
- str->append(STRING_WITH_LEN("(@"));
+ str->append(STRING_WITH_LEN("@"));
str->append(name.str, name.length);
str->append(STRING_WITH_LEN(":="));
- args[0]->print(str, query_type);
- str->append(')');
+ args[0]->print_parenthesised(str, query_type, precedence());
}
@@ -5280,8 +5276,7 @@ void Item_func_set_user_var::print_as_stmt(String *str,
str->append(STRING_WITH_LEN("set @"));
str->append(name.str, name.length);
str->append(STRING_WITH_LEN(":="));
- args[0]->print(str, query_type);
- str->append(')');
+ args[0]->print_parenthesised(str, query_type, precedence());
}
bool Item_func_set_user_var::send(Protocol *protocol, String *str_arg)
@@ -5295,7 +5290,7 @@ bool Item_func_set_user_var::send(Protocol *protocol, String *str_arg)
return Item::send(protocol, str_arg);
}
-void Item_func_set_user_var::make_field(Send_field *tmp_field)
+void Item_func_set_user_var::make_field(THD *thd, Send_field *tmp_field)
{
if (result_field)
{
@@ -5305,7 +5300,7 @@ void Item_func_set_user_var::make_field(Send_field *tmp_field)
tmp_field->col_name=Item::name; // Use user supplied name
}
else
- Item::make_field(tmp_field);
+ Item::make_field(thd, tmp_field);
}
@@ -5588,7 +5583,7 @@ err:
return 1;
}
-void Item_func_get_user_var::fix_length_and_dec()
+bool Item_func_get_user_var::fix_length_and_dec()
{
THD *thd=current_thd;
int error;
@@ -5638,6 +5633,7 @@ void Item_func_get_user_var::fix_length_and_dec()
set_handler_by_field_type(MYSQL_TYPE_LONG_BLOB);
max_length= MAX_BLOB_WIDTH;
}
+ return false;
}
@@ -5650,9 +5646,8 @@ bool Item_func_get_user_var::const_item() const
void Item_func_get_user_var::print(String *str, enum_query_type query_type)
{
- str->append(STRING_WITH_LEN("(@"));
+ str->append(STRING_WITH_LEN("@"));
append_identifier(current_thd, str, name.str, name.length);
- str->append(')');
}
@@ -5746,7 +5741,9 @@ my_decimal* Item_user_var_as_out_param::val_decimal(my_decimal *decimal_buffer)
}
-void Item_user_var_as_out_param::print_for_load(THD *thd, String *str)
+void Item_user_var_as_out_param::load_data_print_for_log_event(THD *thd,
+ String *str)
+ const
{
str->append('@');
append_identifier(thd, str, name.str, name.length);
@@ -5761,7 +5758,7 @@ Item_func_get_system_var(THD *thd, sys_var *var_arg, enum_var_type var_type_arg,
orig_var_type(var_type_arg), component(*component_arg), cache_present(0)
{
/* set_name() will allocate the name */
- set_name(name_arg, (uint) name_len_arg, system_charset_info);
+ set_name(thd, name_arg, (uint) name_len_arg, system_charset_info);
}
@@ -5781,7 +5778,7 @@ void Item_func_get_system_var::update_null_value()
}
-void Item_func_get_system_var::fix_length_and_dec()
+bool Item_func_get_system_var::fix_length_and_dec()
{
char *cptr;
maybe_null= TRUE;
@@ -5793,7 +5790,7 @@ void Item_func_get_system_var::fix_length_and_dec()
{
my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0),
var->name.str, var_type == OPT_GLOBAL ? "SESSION" : "GLOBAL");
- return;
+ return TRUE;
}
/* As there was no local variable, return the global value */
var_type= OPT_GLOBAL;
@@ -5857,14 +5854,34 @@ void Item_func_get_system_var::fix_length_and_dec()
my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name.str);
break;
}
+ return FALSE;
}
void Item_func_get_system_var::print(String *str, enum_query_type query_type)
{
- str->append(name, name_length);
+ if (name_length)
+ str->append(name, name_length);
+ else
+ {
+ str->append(STRING_WITH_LEN("@@"));
+ if (component.length)
+ {
+ str->append(&component);
+ str->append('.');
+ }
+ else if (var_type == SHOW_OPT_GLOBAL && var->scope() != sys_var::GLOBAL)
+ {
+ str->append(STRING_WITH_LEN("global."));
+ }
+ str->append(&var->name);
+ }
}
+bool Item_func_get_system_var::check_vcol_func_processor(void *arg)
+{
+ return mark_unsupported_function("@@", var->name.str, arg, VCOL_SESSION_FUNC);
+}
enum Item_result Item_func_get_system_var::result_type() const
{
@@ -6614,7 +6631,7 @@ bool Item_func_sp::is_expensive()
@note called from Item::fix_fields.
*/
-void Item_func_sp::fix_length_and_dec()
+bool Item_func_sp::fix_length_and_dec()
{
DBUG_ENTER("Item_func_sp::fix_length_and_dec");
@@ -6622,7 +6639,7 @@ void Item_func_sp::fix_length_and_dec()
Type_std_attributes::set(sp_result_field);
maybe_null= 1;
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -6719,7 +6736,7 @@ error:
void
-Item_func_sp::make_field(Send_field *tmp_field)
+Item_func_sp::make_field(THD *thd, Send_field *tmp_field)
{
DBUG_ENTER("Item_func_sp::make_field");
DBUG_ASSERT(sp_result_field);
@@ -6871,6 +6888,10 @@ void Item_func_sp::update_used_tables()
}
}
+bool Item_func_sp::check_vcol_func_processor(void *arg)
+{
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
+}
/*
uuid_short handling.
@@ -6957,9 +6978,10 @@ my_decimal *Item_func_last_value::val_decimal(my_decimal *decimal_value)
}
-void Item_func_last_value::fix_length_and_dec()
+bool Item_func_last_value::fix_length_and_dec()
{
last_value= args[arg_count -1];
Type_std_attributes::set(last_value);
maybe_null= last_value->maybe_null;
+ return FALSE;
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 36a2f94b31d..cd8e4c08168 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -64,10 +64,11 @@ public:
SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING,
SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, SP_RELATE_FUNC,
NOT_FUNC, NOT_ALL_FUNC,
- NOW_FUNC, TRIG_COND_FUNC,
+ NOW_FUNC, NOW_UTC_FUNC, SYSDATE_FUNC, TRIG_COND_FUNC,
SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC,
EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC,
- NEG_FUNC, GSYSVAR_FUNC, IN_OPTIMIZER_FUNC, DYNCOL_FUNC };
+ NEG_FUNC, GSYSVAR_FUNC, IN_OPTIMIZER_FUNC, DYNCOL_FUNC,
+ JSON_EXTRACT_FUNC };
enum Type type() const { return FUNC_ITEM; }
virtual enum Functype functype() const { return UNKNOWN_FUNC; }
Item_func(THD *thd): Item_func_or_sum(thd), allowed_arg_cols(1)
@@ -159,8 +160,8 @@ public:
sync_with_sum_func_and_with_field(list);
list.empty(); // Fields are used
}
- void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields,
- uint flags);
+ void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags);
virtual void print(String *str, enum_query_type query_type);
void print_op(String *str, enum_query_type query_type);
void print_args(String *str, uint from, enum_query_type query_type);
@@ -208,8 +209,8 @@ public:
Item_transformer transformer, uchar *arg_t);
void traverse_cond(Cond_traverser traverser,
void * arg, traverse_order order);
- bool eval_not_null_tables(uchar *opt_arg);
- // bool is_expensive_processor(uchar *arg);
+ bool eval_not_null_tables(void *opt_arg);
+ // bool is_expensive_processor(void *arg);
// virtual bool is_expensive() { return 0; }
inline void raise_numeric_overflow(const char *type_name)
{
@@ -324,6 +325,19 @@ public:
return this;
}
+ bool excl_dep_on_table(table_map tab_map)
+ {
+ if (used_tables() & OUTER_REF_TABLE_BIT)
+ return false;
+ return !(used_tables() & ~tab_map) ||
+ Item_args::excl_dep_on_table(tab_map);
+ }
+
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ {
+ return Item_args::excl_dep_on_grouping_fields(sel);
+ }
+
/*
We assume the result of any function that has a TIMESTAMP argument to be
timezone-dependent, since a TIMESTAMP value in both numeric and string
@@ -332,12 +346,12 @@ public:
representation of a TIMESTAMP argument verbatim, and thus does not depend on
the timezone.
*/
- virtual bool check_valid_arguments_processor(uchar *bool_arg)
+ virtual bool check_valid_arguments_processor(void *bool_arg)
{
return has_timestamp_args();
}
- virtual bool find_function_processor (uchar *arg)
+ virtual bool find_function_processor (void *arg)
{
return functype() == *(Functype *) arg;
}
@@ -384,12 +398,12 @@ public:
longlong val_int()
{
DBUG_ASSERT(fixed == 1);
- bool error;
- return double_to_longlong(val_real(), unsigned_flag, &error);
+ return Converter_double_to_longlong(val_real(), unsigned_flag).result();
}
enum Item_result result_type () const { return REAL_RESULT; }
- void fix_length_and_dec()
- { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ bool fix_length_and_dec()
+ { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); return FALSE; }
};
@@ -460,8 +474,7 @@ class Item_func_hybrid_field_type: public Item_hybrid_func
DBUG_ASSERT((res != NULL) ^ null_value);
return res;
}
-protected:
- Item_result cached_result_type;
+
public:
Item_func_hybrid_field_type(THD *thd):
Item_hybrid_func(thd)
@@ -567,7 +580,7 @@ class Item_func_num1: public Item_func_numhybrid
public:
Item_func_num1(THD *thd, Item *a): Item_func_numhybrid(thd, a) {}
Item_func_num1(THD *thd, Item *a, Item *b): Item_func_numhybrid(thd, a, b) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
};
@@ -582,8 +595,8 @@ class Item_num_op :public Item_func_numhybrid
{
print_op(str, query_type);
}
-
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ bool need_parentheses_in_default() { return true; }
};
@@ -608,7 +621,8 @@ public:
double val_real();
String *val_str(String*str);
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec() { return FALSE; }
};
@@ -619,10 +633,15 @@ class Item_func_connection_id :public Item_int_func
public:
Item_func_connection_id(THD *thd): Item_int_func(thd) {}
const char *func_name() const { return "connection_id"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
- bool check_vcol_func_processor(uchar *int_arg) { return TRUE;}
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_connection_id>(thd, mem_root, this); }
};
@@ -634,9 +653,13 @@ public:
unsigned_flag= 0;
}
const char *func_name() const { return "cast_as_signed"; }
- longlong val_int();
- longlong val_int_from_str(int *error);
- void fix_length_and_dec()
+ longlong val_int()
+ {
+ longlong value= args[0]->val_int_signed_typecast();
+ null_value= args[0]->null_value;
+ return value;
+ }
+ bool fix_length_and_dec()
{
uint32 char_length= MY_MIN(args[0]->max_char_length(),
MY_INT64_NUM_DECIMAL_DIGITS);
@@ -647,9 +670,13 @@ public:
*/
set_if_bigger(char_length, 1U + (unsigned_flag ? 0 : 1));
fix_char_length(char_length);
+ return FALSE;
}
virtual void print(String *str, enum_query_type query_type);
uint decimal_precision() const { return args[0]->decimal_precision(); }
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_signed>(thd, mem_root, this); }
};
@@ -661,8 +688,15 @@ public:
unsigned_flag= 1;
}
const char *func_name() const { return "cast_as_unsigned"; }
- longlong val_int();
+ longlong val_int()
+ {
+ longlong value= args[0]->val_int_unsigned_typecast();
+ null_value= args[0]->null_value;
+ return value;
+ }
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_unsigned>(thd, mem_root, this); }
};
@@ -683,9 +717,12 @@ public:
my_decimal *val_decimal(my_decimal*);
enum Item_result result_type () const { return DECIMAL_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; }
- void fix_length_and_dec() {}
+ bool fix_length_and_dec() { return FALSE; }
const char *func_name() const { return "decimal_typecast"; }
virtual void print(String *str, enum_query_type query_type);
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_decimal_typecast>(thd, mem_root, this); }
};
@@ -700,9 +737,12 @@ public:
}
double val_real();
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
- void fix_length_and_dec() { maybe_null= 1; }
+ bool fix_length_and_dec() { maybe_null= 1; return FALSE; }
const char *func_name() const { return "double_typecast"; }
virtual void print(String *str, enum_query_type query_type);
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_double_typecast>(thd, mem_root, this); }
};
@@ -712,8 +752,8 @@ class Item_func_additive_op :public Item_num_op
public:
Item_func_additive_op(THD *thd, Item *a, Item *b): Item_num_op(thd, a, b) {}
void result_precision();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
};
@@ -723,9 +763,12 @@ public:
Item_func_plus(THD *thd, Item *a, Item *b):
Item_func_additive_op(thd, a, b) {}
const char *func_name() const { return "+"; }
+ enum precedence precedence() const { return ADD_PRECEDENCE; }
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_plus>(thd, mem_root, this); }
};
class Item_func_minus :public Item_func_additive_op
@@ -734,10 +777,13 @@ public:
Item_func_minus(THD *thd, Item *a, Item *b):
Item_func_additive_op(thd, a, b) {}
const char *func_name() const { return "-"; }
+ enum precedence precedence() const { return ADD_PRECEDENCE; }
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_minus>(thd, mem_root, this); }
};
@@ -747,12 +793,15 @@ public:
Item_func_mul(THD *thd, Item *a, Item *b):
Item_num_op(thd, a, b) {}
const char *func_name() const { return "*"; }
+ enum precedence precedence() const { return MUL_PRECEDENCE; }
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
void result_precision();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_mul>(thd, mem_root, this); }
};
@@ -765,8 +814,11 @@ public:
double real_op();
my_decimal *decimal_op(my_decimal *);
const char *func_name() const { return "/"; }
- void fix_length_and_dec();
+ enum precedence precedence() const { return MUL_PRECEDENCE; }
+ bool fix_length_and_dec();
void result_precision();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_div>(thd, mem_root, this); }
};
@@ -777,15 +829,18 @@ public:
{}
longlong val_int();
const char *func_name() const { return "DIV"; }
- void fix_length_and_dec();
-
- virtual inline void print(String *str, enum_query_type query_type)
+ enum precedence precedence() const { return MUL_PRECEDENCE; }
+ bool fix_length_and_dec();
+ void print(String *str, enum_query_type query_type)
{
print_op(str, query_type);
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_int_div>(thd, mem_root, this); }
};
@@ -797,10 +852,13 @@ public:
double real_op();
my_decimal *decimal_op(my_decimal *);
const char *func_name() const { return "%"; }
+ enum precedence precedence() const { return MUL_PRECEDENCE; }
void result_precision();
- void fix_length_and_dec();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool fix_length_and_dec();
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_mod>(thd, mem_root, this); }
};
@@ -813,10 +871,19 @@ public:
my_decimal *decimal_op(my_decimal *);
const char *func_name() const { return "-"; }
enum Functype functype() const { return NEG_FUNC; }
- void fix_length_and_dec();
+ enum precedence precedence() const { return NEG_PRECEDENCE; }
+ void print(String *str, enum_query_type query_type)
+ {
+ str->append(func_name());
+ args[0]->print_parenthesised(str, query_type, precedence());
+ }
+ bool fix_length_and_dec();
uint decimal_precision() const { return args[0]->decimal_precision(); }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_neg>(thd, mem_root, this); }
};
@@ -828,9 +895,11 @@ public:
longlong int_op();
my_decimal *decimal_op(my_decimal *);
const char *func_name() const { return "abs"; }
- void fix_length_and_dec();
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool fix_length_and_dec();
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_abs>(thd, mem_root, this); }
};
// A class to handle logarithmic and trigonometric functions
@@ -840,10 +909,11 @@ class Item_dec_func :public Item_real_func
public:
Item_dec_func(THD *thd, Item *a): Item_real_func(thd, a) {}
Item_dec_func(THD *thd, Item *a, Item *b): Item_real_func(thd, a, b) {}
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals=NOT_FIXED_DEC; max_length=float_length(decimals);
maybe_null=1;
+ return FALSE;
}
};
@@ -853,6 +923,8 @@ public:
Item_func_exp(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "exp"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_exp>(thd, mem_root, this); }
};
@@ -862,6 +934,8 @@ public:
Item_func_ln(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "ln"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ln>(thd, mem_root, this); }
};
@@ -872,6 +946,8 @@ public:
Item_func_log(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {}
double val_real();
const char *func_name() const { return "log"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_log>(thd, mem_root, this); }
};
@@ -881,6 +957,8 @@ public:
Item_func_log2(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "log2"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_log2>(thd, mem_root, this); }
};
@@ -890,6 +968,8 @@ public:
Item_func_log10(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "log10"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_log10>(thd, mem_root, this); }
};
@@ -899,6 +979,8 @@ public:
Item_func_sqrt(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "sqrt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sqrt>(thd, mem_root, this); }
};
@@ -908,6 +990,8 @@ public:
Item_func_pow(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {}
double val_real();
const char *func_name() const { return "pow"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_pow>(thd, mem_root, this); }
};
@@ -917,6 +1001,8 @@ public:
Item_func_acos(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "acos"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_acos>(thd, mem_root, this); }
};
class Item_func_asin :public Item_dec_func
@@ -925,6 +1011,8 @@ public:
Item_func_asin(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "asin"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_asin>(thd, mem_root, this); }
};
class Item_func_atan :public Item_dec_func
@@ -934,6 +1022,8 @@ public:
Item_func_atan(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {}
double val_real();
const char *func_name() const { return "atan"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_atan>(thd, mem_root, this); }
};
class Item_func_cos :public Item_dec_func
@@ -942,6 +1032,8 @@ public:
Item_func_cos(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "cos"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_cos>(thd, mem_root, this); }
};
class Item_func_sin :public Item_dec_func
@@ -950,6 +1042,8 @@ public:
Item_func_sin(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "sin"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sin>(thd, mem_root, this); }
};
class Item_func_tan :public Item_dec_func
@@ -958,6 +1052,8 @@ public:
Item_func_tan(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "tan"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_tan>(thd, mem_root, this); }
};
class Item_func_cot :public Item_dec_func
@@ -966,6 +1062,8 @@ public:
Item_func_cot(THD *thd, Item *a): Item_dec_func(thd, a) {}
double val_real();
const char *func_name() const { return "cot"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_cot>(thd, mem_root, this); }
};
@@ -973,7 +1071,7 @@ class Item_func_int_val :public Item_func_num1
{
public:
Item_func_int_val(THD *thd, Item *a): Item_func_num1(thd, a) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
};
@@ -985,8 +1083,10 @@ public:
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ceiling>(thd, mem_root, this); }
};
@@ -998,8 +1098,10 @@ public:
longlong int_op();
double real_op();
my_decimal *decimal_op(my_decimal *);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_floor>(thd, mem_root, this); }
};
/* This handles round and truncate */
@@ -1014,7 +1116,9 @@ public:
double real_op();
longlong int_op();
my_decimal *decimal_op(my_decimal *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_round>(thd, mem_root, this); }
};
@@ -1032,10 +1136,12 @@ public:
void update_used_tables();
bool fix_fields(THD *thd, Item **ref);
void cleanup() { first_eval= TRUE; Item_real_func::cleanup(); }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_rand>(thd, mem_root, this); }
private:
void seed_random (Item * val);
};
@@ -1047,6 +1153,8 @@ public:
Item_func_sign(THD *thd, Item *a): Item_int_func(thd, a) {}
const char *func_name() const { return "sign"; }
longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sign>(thd, mem_root, this); }
};
@@ -1060,8 +1168,10 @@ public:
Item_real_func(thd, a), name(name_arg), mul(mul_arg), add(add_arg) {}
double val_real();
const char *func_name() const { return name; }
- void fix_length_and_dec()
- { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); }
+ bool fix_length_and_dec()
+ { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_units>(thd, mem_root, this); }
};
@@ -1089,7 +1199,7 @@ public:
String *val_str(String *);
my_decimal *val_decimal(my_decimal *);
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
};
class Item_func_min :public Item_func_min_max
@@ -1097,6 +1207,8 @@ class Item_func_min :public Item_func_min_max
public:
Item_func_min(THD *thd, List<Item> &list): Item_func_min_max(thd, list, 1) {}
const char *func_name() const { return "least"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_min>(thd, mem_root, this); }
};
class Item_func_max :public Item_func_min_max
@@ -1104,6 +1216,8 @@ class Item_func_max :public Item_func_min_max
public:
Item_func_max(THD *thd, List<Item> &list): Item_func_min_max(thd, list, -1) {}
const char *func_name() const { return "greatest"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_max>(thd, mem_root, this); }
};
@@ -1130,14 +1244,18 @@ public:
const char *func_name() const { return "rollup_const"; }
bool const_item() const { return 0; }
Item_result result_type() const { return args[0]->result_type(); }
- void fix_length_and_dec()
+ enum_field_types field_type() const { return args[0]->field_type(); }
+ bool fix_length_and_dec()
{
collation= args[0]->collation;
max_length= args[0]->max_length;
decimals=args[0]->decimals;
/* The item could be a NULL constant. */
null_value= args[0]->is_null();
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_rollup_const>(thd, mem_root, this); }
};
@@ -1148,7 +1266,9 @@ public:
Item_func_length(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "length"; }
- void fix_length_and_dec() { max_length=10; }
+ bool fix_length_and_dec() { max_length=10; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_length>(thd, mem_root, this); }
};
class Item_func_bit_length :public Item_func_length
@@ -1158,6 +1278,8 @@ public:
longlong val_int()
{ DBUG_ASSERT(fixed == 1); return Item_func_length::val_int()*8; }
const char *func_name() const { return "bit_length"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_length>(thd, mem_root, this); }
};
class Item_func_char_length :public Item_int_func
@@ -1167,7 +1289,9 @@ public:
Item_func_char_length(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "char_length"; }
- void fix_length_and_dec() { max_length=10; }
+ bool fix_length_and_dec() { max_length=10; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_char_length>(thd, mem_root, this); }
};
class Item_func_coercibility :public Item_int_func
@@ -1176,8 +1300,8 @@ public:
Item_func_coercibility(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "coercibility"; }
- void fix_length_and_dec() { max_length=10; maybe_null= 0; }
- bool eval_not_null_tables(uchar *)
+ bool fix_length_and_dec() { max_length=10; maybe_null= 0; return FALSE; }
+ bool eval_not_null_tables(void *)
{
not_null_tables_cache= 0;
return false;
@@ -1185,6 +1309,8 @@ public:
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
{ return this; }
bool const_item() const { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_coercibility>(thd, mem_root, this); }
};
class Item_func_locate :public Item_int_func
@@ -1196,8 +1322,10 @@ public:
Item_func_locate(THD *thd, Item *a, Item *b, Item *c): Item_int_func(thd, a, b, c) {}
const char *func_name() const { return "locate"; }
longlong val_int();
- void fix_length_and_dec();
+ bool fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_locate>(thd, mem_root, this); }
};
@@ -1210,7 +1338,9 @@ public:
Item_func_field(THD *thd, List<Item> &list): Item_int_func(thd, list) {}
longlong val_int();
const char *func_name() const { return "field"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_field>(thd, mem_root, this); }
};
@@ -1221,7 +1351,9 @@ public:
Item_func_ascii(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "ascii"; }
- void fix_length_and_dec() { max_length=3; }
+ bool fix_length_and_dec() { max_length=3; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ascii>(thd, mem_root, this); }
};
class Item_func_ord :public Item_int_func
@@ -1231,6 +1363,8 @@ public:
Item_func_ord(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "ord"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ord>(thd, mem_root, this); }
};
class Item_func_find_in_set :public Item_int_func
@@ -1244,7 +1378,9 @@ public:
Item_int_func(thd, a, b), enum_value(0) {}
longlong val_int();
const char *func_name() const { return "find_in_set"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_find_in_set>(thd, mem_root, this); }
};
/* Base class for all bit functions: '~', '|', '^', '&', '>>', '<<' */
@@ -1254,12 +1390,13 @@ class Item_func_bit: public Item_int_func
public:
Item_func_bit(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
Item_func_bit(THD *thd, Item *a): Item_int_func(thd, a) {}
- void fix_length_and_dec() { unsigned_flag= 1; }
+ bool fix_length_and_dec() { unsigned_flag= 1; return FALSE; }
virtual inline void print(String *str, enum_query_type query_type)
{
print_op(str, query_type);
}
+ bool need_parentheses_in_default() { return true; }
};
class Item_func_bit_or :public Item_func_bit
@@ -1268,6 +1405,9 @@ public:
Item_func_bit_or(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "|"; }
+ enum precedence precedence() const { return BITOR_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_or>(thd, mem_root, this); }
};
class Item_func_bit_and :public Item_func_bit
@@ -1276,6 +1416,9 @@ public:
Item_func_bit_and(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "&"; }
+ enum precedence precedence() const { return BITAND_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_and>(thd, mem_root, this); }
};
class Item_func_bit_count :public Item_int_func
@@ -1284,7 +1427,9 @@ public:
Item_func_bit_count(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "bit_count"; }
- void fix_length_and_dec() { max_length=2; }
+ bool fix_length_and_dec() { max_length=2; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_count>(thd, mem_root, this); }
};
class Item_func_shift_left :public Item_func_bit
@@ -1293,6 +1438,9 @@ public:
Item_func_shift_left(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "<<"; }
+ enum precedence precedence() const { return SHIFT_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_shift_left>(thd, mem_root, this); }
};
class Item_func_shift_right :public Item_func_bit
@@ -1301,6 +1449,9 @@ public:
Item_func_shift_right(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {}
longlong val_int();
const char *func_name() const { return ">>"; }
+ enum precedence precedence() const { return SHIFT_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_shift_right>(thd, mem_root, this); }
};
class Item_func_bit_neg :public Item_func_bit
@@ -1309,11 +1460,14 @@ public:
Item_func_bit_neg(THD *thd, Item *a): Item_func_bit(thd, a) {}
longlong val_int();
const char *func_name() const { return "~"; }
-
- virtual inline void print(String *str, enum_query_type query_type)
+ enum precedence precedence() const { return NEG_PRECEDENCE; }
+ void print(String *str, enum_query_type query_type)
{
- Item_func::print(str, query_type);
+ str->append(func_name());
+ args[0]->print_parenthesised(str, query_type, precedence());
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_neg>(thd, mem_root, this); }
};
@@ -1324,18 +1478,21 @@ public:
Item_func_last_insert_id(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "last_insert_id"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
unsigned_flag= TRUE;
if (arg_count)
max_length= args[0]->max_length;
unsigned_flag=1;
+ return FALSE;
}
bool fix_fields(THD *thd, Item **ref);
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_last_insert_id>(thd, mem_root, this); }
};
@@ -1347,12 +1504,14 @@ public:
{}
longlong val_int();
const char *func_name() const { return "benchmark"; }
- void fix_length_and_dec() { max_length=1; maybe_null=0; }
+ bool fix_length_and_dec() { max_length=1; maybe_null=0; return FALSE; }
virtual void print(String *str, enum_query_type query_type);
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_benchmark>(thd, mem_root, this); }
};
@@ -1371,10 +1530,12 @@ public:
}
bool is_expensive() { return 1; }
longlong val_int();
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sleep>(thd, mem_root, this); }
};
@@ -1394,7 +1555,7 @@ class Item_udf_func :public Item_func
}
protected:
udf_handler udf;
- bool is_expensive_processor(uchar *arg) { return TRUE; }
+ bool is_expensive_processor(void *arg) { return TRUE; }
public:
Item_udf_func(THD *thd, udf_func *udf_arg):
@@ -1412,7 +1573,7 @@ public:
return res;
}
void fix_num_length_and_dec();
- void update_used_tables()
+ void update_used_tables()
{
/*
TODO: Make a member in UDF_INIT and return if a UDF is deterministic or
@@ -1463,13 +1624,17 @@ public:
}
void cleanup();
Item_result result_type () const { return udf.result_type(); }
- bool eval_not_null_tables(uchar *opt_arg)
+ bool eval_not_null_tables(void *opt_arg)
{
not_null_tables_cache= 0;
return 0;
}
bool is_expensive() { return 1; }
virtual void print(String *str, enum_query_type query_type);
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC);
+ }
};
@@ -1484,9 +1649,8 @@ class Item_func_udf_float :public Item_udf_func
longlong val_int()
{
DBUG_ASSERT(fixed == 1);
- bool error;
- return double_to_longlong(Item_func_udf_float::val_real(),
- unsigned_flag, &error);
+ return Converter_double_to_longlong(Item_func_udf_float::val_real(),
+ unsigned_flag).result();
}
my_decimal *val_decimal(my_decimal *dec_buf)
{
@@ -1498,7 +1662,10 @@ class Item_func_udf_float :public Item_udf_func
}
double val_real();
String *val_str(String *str);
- void fix_length_and_dec() { fix_num_length_and_dec(); }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_udf_float>(thd, mem_root, this); }
};
@@ -1514,7 +1681,10 @@ public:
double val_real() { return (double) Item_func_udf_int::val_int(); }
String *val_str(String *str);
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec() { decimals= 0; max_length= 21; }
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec() { decimals= 0; max_length= 21; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_udf_int>(thd, mem_root, this); }
};
@@ -1530,7 +1700,10 @@ public:
my_decimal *val_decimal(my_decimal *);
String *val_str(String *str);
enum Item_result result_type () const { return DECIMAL_RESULT; }
- void fix_length_and_dec() { fix_num_length_and_dec(); }
+ enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; }
+ bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_udf_decimal>(thd, mem_root, this); }
};
@@ -1567,7 +1740,10 @@ public:
return dec_buf;
}
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec();
+ enum_field_types field_type() const { return string_field_type(); }
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_udf_str>(thd, mem_root, this); }
};
#else /* Dummy functions to get sql_yacc.cc compiled */
@@ -1617,7 +1793,7 @@ public:
double val_real() { DBUG_ASSERT(fixed == 1); null_value= 1; return 0.0; }
longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; }
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec() { maybe_null=1; max_length=0; }
+ bool fix_length_and_dec() { maybe_null=1; max_length=0; return FALSE; }
};
#endif /* HAVE_DLOPEN */
@@ -1632,17 +1808,19 @@ class Item_func_get_lock :public Item_int_func
Item_func_get_lock(THD *thd, Item *a, Item *b) :Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "get_lock"; }
- void fix_length_and_dec() { max_length=1; maybe_null=1;}
+ bool fix_length_and_dec() { max_length=1; maybe_null=1; return FALSE; }
table_map used_tables() const
{
return used_tables_cache | RAND_TABLE_BIT;
}
bool const_item() const { return 0; }
bool is_expensive() { return 1; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_get_lock>(thd, mem_root, this); }
};
class Item_func_release_lock :public Item_int_func
@@ -1652,17 +1830,19 @@ public:
Item_func_release_lock(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "release_lock"; }
- void fix_length_and_dec() { max_length= 1; maybe_null= 1;}
+ bool fix_length_and_dec() { max_length= 1; maybe_null= 1; return FALSE; }
table_map used_tables() const
{
return used_tables_cache | RAND_TABLE_BIT;
}
bool const_item() const { return 0; }
bool is_expensive() { return 1; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_release_lock>(thd, mem_root, this); }
};
/* replication functions */
@@ -1678,11 +1858,13 @@ public:
Item_int_func(thd, a, b, c, d) {}
longlong val_int();
const char *func_name() const { return "master_pos_wait"; }
- void fix_length_and_dec() { max_length=21; maybe_null=1;}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec() { max_length=21; maybe_null=1; return FALSE; }
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_master_pos_wait>(thd, mem_root, this); }
};
@@ -1694,11 +1876,13 @@ public:
Item_master_gtid_wait(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "master_gtid_wait"; }
- void fix_length_and_dec() { max_length=2; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec() { max_length=2; return FALSE; }
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_master_gtid_wait>(thd, mem_root, this); }
};
@@ -1723,7 +1907,7 @@ public:
Item_func_user_var(THD *thd, Item_func_user_var *item)
:Item_hybrid_func(thd, item),
m_var_entry(item->m_var_entry), name(item->name) { }
- bool check_vcol_func_processor(uchar *int_arg) { return true; }
+ bool check_vcol_func_processor(void *arg);
};
@@ -1777,19 +1961,20 @@ public:
bool update_hash(void *ptr, uint length, enum Item_result type,
CHARSET_INFO *cs, bool unsigned_arg);
bool send(Protocol *protocol, String *str_arg);
- void make_field(Send_field *tmp_field);
+ void make_field(THD *thd, Send_field *tmp_field);
bool check(bool use_result_field);
void save_item_result(Item *item);
bool update();
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
Field *create_field_for_create_select(TABLE *table)
{
return result_type() != STRING_RESULT ?
create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) :
tmp_table_field_from_field_type(table, false, true);
}
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+ enum precedence precedence() const { return ASSIGN_PRECEDENCE; }
void print_as_stmt(String *str, enum_query_type query_type);
const char *func_name() const { return "set_user_var"; }
int save_in_field(Field *field, bool no_conversions,
@@ -1801,10 +1986,12 @@ public:
void save_org_in_field(Field *field,
fast_field_copier data __attribute__ ((__unused__)))
{ (void)save_in_field(field, 1, 0); }
- bool register_field_in_read_map(uchar *arg);
- bool register_field_in_bitmap(uchar *arg);
+ bool register_field_in_read_map(void *arg);
+ bool register_field_in_bitmap(void *arg);
bool set_entry(THD *thd, bool create_if_not_exists);
void cleanup();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_set_user_var>(thd, mem_root, this); }
};
@@ -1820,7 +2007,7 @@ public:
longlong val_int();
my_decimal *val_decimal(my_decimal*);
String *val_str(String* str);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
/*
We must always return variables as strings to guard against selects of type
@@ -1831,6 +2018,8 @@ public:
table_map used_tables() const
{ return const_item() ? 0 : RAND_TABLE_BIT; }
bool eq(const Item *item, bool binary_cmp) const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_get_user_var>(thd, mem_root, this); }
private:
bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
@@ -1851,13 +2040,43 @@ public:
in List<Item> and desire to place this code somewhere near other functions
working with user variables.
*/
-class Item_user_var_as_out_param :public Item
+class Item_user_var_as_out_param :public Item,
+ public Load_data_outvar
{
LEX_STRING name;
user_var_entry *entry;
public:
Item_user_var_as_out_param(THD *thd, LEX_STRING a): Item(thd), name(a)
- { set_name(a.str, 0, system_charset_info); }
+ { set_name(thd, a.str, 0, system_charset_info); }
+ Load_data_outvar *get_load_data_outvar()
+ {
+ return this;
+ }
+ bool load_data_set_null(THD *thd, const Load_data_param *param)
+ {
+ set_null_value(param->charset());
+ return false;
+ }
+ bool load_data_set_no_data(THD *thd, const Load_data_param *param)
+ {
+ set_null_value(param->charset());
+ return false;
+ }
+ bool load_data_set_value(THD *thd, const char *pos, uint length,
+ const Load_data_param *param)
+ {
+ set_value(pos, length, param->charset());
+ return false;
+ }
+ void load_data_print_for_log_event(THD *thd, String *to) const;
+ bool load_data_add_outvar(THD *thd, Load_data_param *param) const
+ {
+ return param->add_outvar_user_var(thd);
+ }
+ uint load_data_fixed_length() const
+ {
+ return 0;
+ }
/* We should return something different from FIELD_ITEM here */
enum Type type() const { return STRING_ITEM;}
double val_real();
@@ -1866,9 +2085,11 @@ public:
my_decimal *val_decimal(my_decimal *decimal_buffer);
/* fix_fields() binds variable name with its entry structure */
bool fix_fields(THD *thd, Item **ref);
- void print_for_load(THD *thd, String *str);
void set_null_value(CHARSET_INFO* cs);
void set_value(const char *str, uint length, CHARSET_INFO* cs);
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_user_var_as_out_param>(thd, mem_root, this); }
};
@@ -1897,7 +2118,7 @@ public:
size_t name_len_arg);
enum Functype functype() const { return GSYSVAR_FUNC; }
void update_null_value();
- void fix_length_and_dec();
+ bool fix_length_and_dec();
void print(String *str, enum_query_type query_type);
bool const_item() const { return true; }
table_map used_tables() const { return 0; }
@@ -1922,7 +2143,9 @@ public:
bool eq(const Item *item, bool binary_cmp) const;
void cleanup();
- bool check_vcol_func_processor(uchar *int_arg) { return TRUE;}
+ bool check_vcol_func_processor(void *arg);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_get_system_var>(thd, mem_root, this); }
};
@@ -1955,10 +2178,10 @@ public:
table= 0; // required by Item_func_match::eq()
DBUG_VOID_RETURN;
}
- bool is_expensive_processor(uchar *arg) { return TRUE; }
+ bool is_expensive_processor(void *arg) { return TRUE; }
enum Functype functype() const { return FT_FUNC; }
const char *func_name() const { return "match"; }
- bool eval_not_null_tables(uchar *opt_arg)
+ bool eval_not_null_tables(void *opt_arg)
{
not_null_tables_cache= 0;
return 0;
@@ -1972,11 +2195,13 @@ public:
bool fix_index();
void init_search(THD *thd, bool no_order);
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- /* TODO: consider adding in support for the MATCH-based virtual columns */
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function("match ... against()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_match>(thd, mem_root, this); }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; }
private:
/**
Check whether storage engine for given table,
@@ -2011,7 +2236,6 @@ private:
return false;
}
-
};
@@ -2021,6 +2245,9 @@ public:
Item_func_bit_xor(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "^"; }
+ enum precedence precedence() const { return BITXOR_PRECEDENCE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_bit_xor>(thd, mem_root, this); }
};
class Item_func_is_free_lock :public Item_int_func
@@ -2030,11 +2257,17 @@ public:
Item_func_is_free_lock(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "is_free_lock"; }
- void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1;}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec()
+ {
+ decimals=0; max_length=1; maybe_null=1;
+ return FALSE;
+ }
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_free_lock>(thd, mem_root, this); }
};
class Item_func_is_used_lock :public Item_int_func
@@ -2044,11 +2277,17 @@ public:
Item_func_is_used_lock(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "is_used_lock"; }
- void fix_length_and_dec() { decimals=0; max_length=10; maybe_null=1;}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec()
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ decimals=0; max_length=10; maybe_null=1;
+ return FALSE;
+ }
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_used_lock>(thd, mem_root, this); }
};
/* For type casts */
@@ -2061,18 +2300,46 @@ enum Cast_target
};
+struct Lex_cast_type_st: public Lex_length_and_dec_st
+{
+private:
+ Cast_target m_type;
+public:
+ void set(Cast_target type, const char *length, const char *dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::set(length, dec);
+ }
+ void set(Cast_target type, Lex_length_and_dec_st length_and_dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::operator=(length_and_dec);
+ }
+ void set(Cast_target type, const char *length)
+ {
+ set(type, length, 0);
+ }
+ void set(Cast_target type)
+ {
+ set(type, 0, 0);
+ }
+ Cast_target type() const { return m_type; }
+};
+
+
class Item_func_row_count :public Item_int_func
{
public:
Item_func_row_count(THD *thd): Item_int_func(thd) {}
longlong val_int();
const char *func_name() const { return "row_count"; }
- void fix_length_and_dec() { decimals= 0; maybe_null=0; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec() { decimals= 0; maybe_null=0; return FALSE; }
+ bool check_vcol_func_processor(void *arg)
{
-
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_row_count>(thd, mem_root, this); }
};
@@ -2104,7 +2371,7 @@ private:
bool init_result_field(THD *thd);
protected:
- bool is_expensive_processor(uchar *arg)
+ bool is_expensive_processor(void *arg)
{ return is_expensive(); }
public:
@@ -2131,7 +2398,7 @@ public:
sp_result_field :
tmp_table_field_from_field_type(table, false, false);
}
- void make_field(Send_field *tmp_field);
+ void make_field(THD *thd, Send_field *tmp_field);
Item_result result_type() const;
@@ -2156,6 +2423,13 @@ public:
return sp_result_field->val_decimal(dec_buf);
}
+ bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ {
+ if (execute())
+ return true;
+ return sp_result_field->get_date(ltime, fuzzydate);
+ }
+
String *val_str(String *str)
{
String buf;
@@ -2180,14 +2454,14 @@ public:
execute();
}
- virtual bool change_context_processor(uchar *cntx)
+ virtual bool change_context_processor(void *cntx)
{ context= (Name_resolution_context *)cntx; return FALSE; }
bool sp_check_access(THD * thd);
virtual enum Functype functype() const { return FUNC_SP; }
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec(void);
+ bool fix_length_and_dec(void);
bool is_expensive();
inline Field *get_sp_result_field()
@@ -2195,15 +2469,13 @@ public:
return sp_result_field;
}
- bool check_vcol_func_processor(uchar *int_arg)
- {
- return trace_unsupported_by_check_vcol_func_processor(func_name());
- }
- bool limit_index_condition_pushdown_processor(uchar *opt_arg)
+ bool check_vcol_func_processor(void *arg);
+ bool limit_index_condition_pushdown_processor(void *opt_arg)
{
return TRUE;
}
- bool eval_not_null_tables(uchar *opt_arg)
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
+ bool eval_not_null_tables(void *opt_arg)
{
not_null_tables_cache= 0;
return 0;
@@ -2217,11 +2489,13 @@ public:
Item_func_found_rows(THD *thd): Item_int_func(thd) {}
longlong val_int();
const char *func_name() const { return "found_rows"; }
- void fix_length_and_dec() { decimals= 0; maybe_null=0; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool fix_length_and_dec() { decimals= 0; maybe_null=0; return FALSE; }
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_found_rows>(thd, mem_root, this); }
};
@@ -2234,13 +2508,15 @@ public:
const char *func_name() const { return "uuid_short"; }
longlong val_int();
bool const_item() const { return false; }
+ bool fix_length_and_dec()
+ { max_length= 21; unsigned_flag=1; return FALSE; }
table_map used_tables() const { return RAND_TABLE_BIT; }
- void fix_length_and_dec()
- { max_length= 21; unsigned_flag=1; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_uuid_short>(thd, mem_root, this); }
};
@@ -2254,10 +2530,10 @@ public:
longlong val_int();
String *val_str(String *);
my_decimal *val_decimal(my_decimal *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
enum Item_result result_type () const { return last_value->result_type(); }
const char *func_name() const { return "last_value"; }
- bool eval_not_null_tables(uchar *opt_arg)
+ bool eval_not_null_tables(void *)
{
not_null_tables_cache= 0;
return 0;
@@ -2270,6 +2546,8 @@ public:
Item_func::update_used_tables();
maybe_null= last_value->maybe_null;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_last_value>(thd, mem_root, this); }
};
@@ -2278,7 +2556,6 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
extern bool check_reserved_words(LEX_STRING *name);
extern enum_field_types agg_field_type(Item **items, uint nitems,
bool treat_bit_as_number);
-Item *find_date_time_item(Item **args, uint nargs, uint col);
double my_double_round(double value, longlong dec, bool dec_unsigned,
bool truncate);
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 51a4636df1f..246be438e36 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -49,12 +49,13 @@ Field *Item_geometry_func::create_field_for_create_select(TABLE *t_arg)
return result;
}
-void Item_geometry_func::fix_length_and_dec()
+bool Item_geometry_func::fix_length_and_dec()
{
collation.set(&my_charset_bin);
decimals=0;
max_length= (uint32) 4294967295U;
maybe_null= 1;
+ return FALSE;
}
@@ -75,9 +76,9 @@ String *Item_func_geometry_from_text::val_str(String *str)
srid= (uint32)args[1]->val_int();
str->set_charset(&my_charset_bin);
+ str->length(0);
if (str->reserve(SRID_SIZE, 512))
return 0;
- str->length(0);
str->q_append(srid);
if ((null_value= !Geometry::create_from_wkt(&buffer, &trs, str, 0)))
return 0;
@@ -121,6 +122,85 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
}
+void report_json_error_ex(String *js, json_engine_t *je,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv);
+
+String *Item_func_geometry_from_json::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ Geometry_buffer buffer;
+ String *js= args[0]->val_str_ascii(&tmp_js);
+ uint32 srid= 0;
+ longlong options= 0;
+ json_engine_t je;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ if (arg_count > 1 && !args[1]->null_value)
+ {
+ options= args[1]->val_int();
+ if (options > 4 || options < 1)
+ {
+ String *sv= args[1]->val_str(&tmp_js);
+ my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0),
+ "option", sv->c_ptr_safe(), "ST_GeometryFromJSON");
+ null_value= 1;
+ return 0;
+ }
+ }
+
+ if ((arg_count == 3) && !args[2]->null_value)
+ srid= (uint32)args[2]->val_int();
+
+ str->set_charset(&my_charset_bin);
+ if (str->reserve(SRID_SIZE, 512))
+ return 0;
+ str->length(0);
+ str->q_append(srid);
+
+ json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
+ (const uchar *) js->end());
+
+ if ((null_value= !Geometry::create_from_json(&buffer, &je, options==1, str)))
+ {
+ int code= 0;
+
+ switch (je.s.error)
+ {
+ case Geometry::GEOJ_INCORRECT_GEOJSON:
+ code= ER_GEOJSON_INCORRECT;
+ break;
+ case Geometry::GEOJ_TOO_FEW_POINTS:
+ code= ER_GEOJSON_TOO_FEW_POINTS;
+ break;
+ case Geometry::GEOJ_EMPTY_COORDINATES:
+ code= ER_GEOJSON_EMPTY_COORDINATES;
+ break;
+ case Geometry::GEOJ_POLYGON_NOT_CLOSED:
+ code= ER_GEOJSON_NOT_CLOSED;
+ break;
+ case Geometry::GEOJ_DIMENSION_NOT_SUPPORTED:
+ my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON");
+ break;
+ default:
+ report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN);
+ return NULL;
+ }
+
+ if (code)
+ {
+ THD *thd= current_thd;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, code,
+ ER_THD(thd, code));
+ }
+ return 0;
+ }
+ return str;
+}
+
+
String *Item_func_as_wkt::val_str_ascii(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -144,11 +224,12 @@ String *Item_func_as_wkt::val_str_ascii(String *str)
}
-void Item_func_as_wkt::fix_length_and_dec()
+bool Item_func_as_wkt::fix_length_and_dec()
{
collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
max_length=MAX_BLOB_WIDTH;
maybe_null= 1;
+ return FALSE;
}
@@ -170,6 +251,69 @@ String *Item_func_as_wkb::val_str(String *str)
}
+bool Item_func_as_geojson::fix_length_and_dec()
+{
+ collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ max_length=MAX_BLOB_WIDTH;
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_as_geojson::val_str_ascii(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ String arg_val;
+ String *swkb= args[0]->val_str(&arg_val);
+ uint max_dec= FLOATING_POINT_DECIMALS;
+ longlong options= 0;
+ Geometry_buffer buffer;
+ Geometry *geom= NULL;
+ const char *dummy;
+
+ if ((null_value=
+ (args[0]->null_value ||
+ !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length())))))
+ return 0;
+
+ if (arg_count > 1)
+ {
+ max_dec= (uint) args[1]->val_int();
+ if (args[1]->null_value)
+ max_dec= FLOATING_POINT_DECIMALS;
+ if (arg_count > 2)
+ {
+ options= args[2]->val_int();
+ if (args[2]->null_value)
+ options= 0;
+ }
+ }
+
+ str->length(0);
+ str->set_charset(&my_charset_latin1);
+
+ if (str->reserve(1, 512))
+ return 0;
+
+ str->qs_append('{');
+
+ if (options & 1)
+ {
+ if (geom->bbox_as_json(str) || str->append(", ", 2))
+ goto error;
+ }
+
+ if ((geom->as_json(str, max_dec, &dummy) || str->append("}", 1)))
+ goto error;
+
+ return str;
+
+error:
+ null_value= 1;
+ return 0;
+}
+
+
String *Item_func_geometry_type::val_str_ascii(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -982,11 +1126,11 @@ Item_func_spatial_rel::get_mm_leaf(RANGE_OPT_PARAM *param,
tree->max_flag= NO_MAX_RANGE;
break;
case SP_WITHIN_FUNC:
- tree->min_flag= GEOM_FLAG | HA_READ_MBR_WITHIN;// NEAR_MIN;//512;
+ tree->min_flag= GEOM_FLAG | HA_READ_MBR_CONTAIN;// NEAR_MIN;//512;
tree->max_flag= NO_MAX_RANGE;
break;
case SP_CONTAINS_FUNC:
- tree->min_flag= GEOM_FLAG | HA_READ_MBR_CONTAIN;// NEAR_MIN;//512;
+ tree->min_flag= GEOM_FLAG | HA_READ_MBR_WITHIN;// NEAR_MIN;//512;
tree->max_flag= NO_MAX_RANGE;
break;
case SP_OVERLAPS_FUNC:
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index 251cae1121e..acc94183d47 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -38,7 +38,7 @@ public:
Item_geometry_func(THD *thd, Item *a, Item *b, Item *c):
Item_str_func(thd, a, b, c) {}
Item_geometry_func(THD *thd, List<Item> &list): Item_str_func(thd, list) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; }
Field *create_field_for_create_select(TABLE *table);
};
@@ -51,6 +51,8 @@ public:
Item_geometry_func(thd, a, srid) {}
const char *func_name() const { return "st_geometryfromtext"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_geometry_from_text>(thd, mem_root, this); }
};
class Item_func_geometry_from_wkb: public Item_geometry_func
@@ -61,15 +63,36 @@ public:
Item_geometry_func(thd, a, srid) {}
const char *func_name() const { return "st_geometryfromwkb"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_geometry_from_wkb>(thd, mem_root, this); }
};
+
+class Item_func_geometry_from_json: public Item_geometry_func
+{
+ String tmp_js;
+public:
+ Item_func_geometry_from_json(THD *thd, Item *js): Item_geometry_func(thd, js) {}
+ Item_func_geometry_from_json(THD *thd, Item *js, Item *opt):
+ Item_geometry_func(thd, js, opt) {}
+ Item_func_geometry_from_json(THD *thd, Item *js, Item *opt, Item *srid):
+ Item_geometry_func(thd, js, opt, srid) {}
+ const char *func_name() const { return "st_geomfromgeojson"; }
+ String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_geometry_from_json>(thd, mem_root, this); }
+};
+
+
class Item_func_as_wkt: public Item_str_ascii_func
{
public:
Item_func_as_wkt(THD *thd, Item *a): Item_str_ascii_func(thd, a) {}
const char *func_name() const { return "st_astext"; }
String *val_str_ascii(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_as_wkt>(thd, mem_root, this); }
};
class Item_func_as_wkb: public Item_geometry_func
@@ -79,20 +102,42 @@ public:
const char *func_name() const { return "st_aswkb"; }
String *val_str(String *);
enum_field_types field_type() const { return MYSQL_TYPE_BLOB; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_as_wkb>(thd, mem_root, this); }
};
+
+class Item_func_as_geojson: public Item_str_ascii_func
+{
+public:
+ Item_func_as_geojson(THD *thd, Item *js): Item_str_ascii_func(thd, js) {}
+ Item_func_as_geojson(THD *thd, Item *js, Item *max_dec_digits):
+ Item_str_ascii_func(thd, js, max_dec_digits) {}
+ Item_func_as_geojson(THD *thd, Item *js, Item *max_dec_digits, Item *opt):
+ Item_str_ascii_func(thd, js, max_dec_digits, opt) {}
+ const char *func_name() const { return "st_asgeojson"; }
+ bool fix_length_and_dec();
+ String *val_str_ascii(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_as_geojson>(thd, mem_root, this); }
+};
+
+
class Item_func_geometry_type: public Item_str_ascii_func
{
public:
Item_func_geometry_type(THD *thd, Item *a): Item_str_ascii_func(thd, a) {}
String *val_str_ascii(String *);
const char *func_name() const { return "st_geometrytype"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
// "GeometryCollection" is the longest
fix_length_and_charset(20, default_charset());
maybe_null= 1;
+ return FALSE;
};
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_geometry_type>(thd, mem_root, this); }
};
@@ -125,6 +170,8 @@ public:
{}
const char *func_name() const { return "st_convexhull"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_convexhull>(thd, mem_root, this); }
};
@@ -135,6 +182,8 @@ public:
const char *func_name() const { return "st_centroid"; }
String *val_str(String *);
Field::geometry_type get_geometry_type() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_centroid>(thd, mem_root, this); }
};
class Item_func_envelope: public Item_geometry_func
@@ -144,6 +193,8 @@ public:
const char *func_name() const { return "st_envelope"; }
String *val_str(String *);
Field::geometry_type get_geometry_type() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_envelope>(thd, mem_root, this); }
};
@@ -175,6 +226,8 @@ public:
Item_func_boundary(THD *thd, Item *a): Item_geometry_func(thd, a) {}
const char *func_name() const { return "st_boundary"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_boundary>(thd, mem_root, this); }
};
@@ -187,6 +240,8 @@ public:
const char *func_name() const { return "point"; }
String *val_str(String *);
Field::geometry_type get_geometry_type() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_point>(thd, mem_root, this); }
};
class Item_func_spatial_decomp: public Item_geometry_func
@@ -211,6 +266,8 @@ public:
}
}
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_decomp>(thd, mem_root, this); }
};
class Item_func_spatial_decomp_n: public Item_geometry_func
@@ -235,6 +292,8 @@ public:
}
}
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_decomp_n>(thd, mem_root, this); }
};
class Item_func_spatial_collection: public Item_geometry_func
@@ -250,9 +309,10 @@ public:
item_type=it;
}
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
- Item_geometry_func::fix_length_and_dec();
+ if (Item_geometry_func::fix_length_and_dec())
+ return TRUE;
for (unsigned int i= 0; i < arg_count; ++i)
{
if (args[i]->fixed && args[i]->field_type() != MYSQL_TYPE_GEOMETRY)
@@ -262,11 +322,15 @@ public:
str.append('\0');
my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "non geometric",
str.ptr());
+ return TRUE;
}
}
+ return FALSE;
}
- const char *func_name() const { return "st_multipoint"; }
+ const char *func_name() const { return "geometrycollection"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_collection>(thd, mem_root, this); }
};
@@ -289,7 +353,18 @@ public:
maybe_null= true;
}
enum Functype functype() const { return spatial_rel; }
- enum Functype rev_functype() const { return spatial_rel; }
+ enum Functype rev_functype() const
+ {
+ switch (spatial_rel)
+ {
+ case SP_CONTAINS_FUNC:
+ return SP_WITHIN_FUNC;
+ case SP_WITHIN_FUNC:
+ return SP_CONTAINS_FUNC;
+ default:
+ return spatial_rel;
+ }
+ }
bool is_null() { (void) val_int(); return null_value; }
void add_key_fields(JOIN *join, KEY_FIELD **key_fields,
uint *and_level, table_map usable_tables,
@@ -298,6 +373,8 @@ public:
return add_key_fields_optimize_op(join, key_fields, and_level,
usable_tables, sargables, false);
}
+ bool need_parentheses_in_default() { return false; }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
@@ -309,6 +386,8 @@ public:
{ }
longlong val_int();
const char *func_name() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_mbr_rel>(thd, mem_root, this); }
};
@@ -323,6 +402,8 @@ public:
{ }
longlong val_int();
const char *func_name() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_precise_rel>(thd, mem_root, this); }
};
@@ -338,6 +419,9 @@ public:
{ }
longlong val_int();
const char *func_name() const { return "st_relate"; }
+ bool need_parentheses_in_default() { return false; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_relate>(thd, mem_root, this); }
};
@@ -367,6 +451,8 @@ public:
{
Item_func::print(str, query_type);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_spatial_operation>(thd, mem_root, this); }
};
@@ -417,6 +503,8 @@ public:
Item_geometry_func(thd, obj, distance) {}
const char *func_name() const { return "st_buffer"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_buffer>(thd, mem_root, this); }
};
@@ -426,7 +514,10 @@ public:
Item_func_isempty(THD *thd, Item *a): Item_bool_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_isempty"; }
- void fix_length_and_dec() { maybe_null= 1; }
+ bool fix_length_and_dec() { maybe_null= 1; return FALSE; }
+ bool need_parentheses_in_default() { return false; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isempty>(thd, mem_root, this); }
};
class Item_func_issimple: public Item_int_func
@@ -439,8 +530,10 @@ public:
Item_func_issimple(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_issimple"; }
- void fix_length_and_dec() { decimals=0; max_length=2; }
+ bool fix_length_and_dec() { decimals=0; max_length=2; return FALSE; }
uint decimal_precision() const { return 1; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_issimple>(thd, mem_root, this); }
};
class Item_func_isclosed: public Item_int_func
@@ -449,8 +542,10 @@ public:
Item_func_isclosed(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_isclosed"; }
- void fix_length_and_dec() { decimals=0; max_length=2; }
+ bool fix_length_and_dec() { decimals=0; max_length=2; return FALSE; }
uint decimal_precision() const { return 1; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isclosed>(thd, mem_root, this); }
};
class Item_func_isring: public Item_func_issimple
@@ -459,6 +554,8 @@ public:
Item_func_isring(THD *thd, Item *a): Item_func_issimple(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_isring"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_isring>(thd, mem_root, this); }
};
class Item_func_dimension: public Item_int_func
@@ -468,7 +565,9 @@ public:
Item_func_dimension(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_dimension"; }
- void fix_length_and_dec() { max_length= 10; maybe_null= 1; }
+ bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dimension>(thd, mem_root, this); }
};
class Item_func_x: public Item_real_func
@@ -478,11 +577,15 @@ public:
Item_func_x(THD *thd, Item *a): Item_real_func(thd, a) {}
double val_real();
const char *func_name() const { return "st_x"; }
- void fix_length_and_dec()
- {
- Item_real_func::fix_length_and_dec();
- maybe_null= 1;
+ bool fix_length_and_dec()
+ {
+ if (Item_real_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_x>(thd, mem_root, this); }
};
@@ -493,11 +596,15 @@ public:
Item_func_y(THD *thd, Item *a): Item_real_func(thd, a) {}
double val_real();
const char *func_name() const { return "st_y"; }
- void fix_length_and_dec()
- {
- Item_real_func::fix_length_and_dec();
- maybe_null= 1;
+ bool fix_length_and_dec()
+ {
+ if (Item_real_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_y>(thd, mem_root, this); }
};
@@ -508,7 +615,9 @@ public:
Item_func_numgeometries(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_numgeometries"; }
- void fix_length_and_dec() { max_length= 10; maybe_null= 1; }
+ bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_numgeometries>(thd, mem_root, this); }
};
@@ -519,7 +628,9 @@ public:
Item_func_numinteriorring(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_numinteriorrings"; }
- void fix_length_and_dec() { max_length= 10; maybe_null= 1; }
+ bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_numinteriorring>(thd, mem_root, this); }
};
@@ -530,7 +641,9 @@ public:
Item_func_numpoints(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "st_numpoints"; }
- void fix_length_and_dec() { max_length= 10; maybe_null= 1; }
+ bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_numpoints>(thd, mem_root, this); }
};
@@ -541,11 +654,15 @@ public:
Item_func_area(THD *thd, Item *a): Item_real_func(thd, a) {}
double val_real();
const char *func_name() const { return "st_area"; }
- void fix_length_and_dec()
- {
- Item_real_func::fix_length_and_dec();
- maybe_null= 1;
+ bool fix_length_and_dec()
+ {
+ if (Item_real_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_area>(thd, mem_root, this); }
};
@@ -556,11 +673,15 @@ public:
Item_func_glength(THD *thd, Item *a): Item_real_func(thd, a) {}
double val_real();
const char *func_name() const { return "st_length"; }
- void fix_length_and_dec()
- {
- Item_real_func::fix_length_and_dec();
- maybe_null= 1;
+ bool fix_length_and_dec()
+ {
+ if (Item_real_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_glength>(thd, mem_root, this); }
};
@@ -571,7 +692,9 @@ public:
Item_func_srid(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "srid"; }
- void fix_length_and_dec() { max_length= 10; maybe_null= 1; }
+ bool fix_length_and_dec() { max_length= 10; maybe_null= 1; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_srid>(thd, mem_root, this); }
};
@@ -586,6 +709,8 @@ public:
Item_func_distance(THD *thd, Item *a, Item *b): Item_real_func(thd, a, b) {}
double val_real();
const char *func_name() const { return "st_distance"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_distance>(thd, mem_root, this); }
};
@@ -600,6 +725,8 @@ public:
const char *func_name() const { return "st_pointonsurface"; }
String *val_str(String *);
Field::geometry_type get_geometry_type() const;
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_pointonsurface>(thd, mem_root, this); }
};
@@ -611,6 +738,12 @@ class Item_func_gis_debug: public Item_int_func
{ null_value= false; }
const char *func_name() const { return "st_gis_debug"; }
longlong val_int();
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_gis_debug>(thd, mem_root, this); }
};
#endif
diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc
index 4c4dfa4497b..4ea0f544819 100644
--- a/sql/item_inetfunc.cc
+++ b/sql/item_inetfunc.cc
@@ -212,13 +212,13 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
IPv4-part differently on different platforms.
*/
-static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
+static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_address)
{
if (str_length < 7)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): "
"invalid IPv4 address: too short.",
- str_length, str));
+ (int)str_length, str));
return false;
}
@@ -226,7 +226,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): "
"invalid IPv4 address: too long.",
- str_length, str));
+ (int)str_length, str));
return false;
}
@@ -237,7 +237,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
int dot_count= 0;
char c= 0;
- while (((p - str) < str_length) && *p)
+ while (((p - str) < (int)str_length) && *p)
{
c= *p++;
@@ -249,7 +249,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
"too many characters in a group.",
- str_length, str));
+ (int)str_length, str));
return false;
}
@@ -259,7 +259,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
"invalid byte value.",
- str_length, str));
+ (int)str_length, str));
return false;
}
}
@@ -269,7 +269,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
"too few characters in a group.",
- str_length, str));
+ (int)str_length, str));
return false;
}
@@ -282,7 +282,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
if (dot_count > 3)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
- "too many dots.", str_length, str));
+ "too many dots.", (int)str_length, str));
return false;
}
}
@@ -290,7 +290,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
"invalid character at pos %d.",
- str_length, str, (int) (p - str)));
+ (int)str_length, str, (int) (p - str)));
return false;
}
}
@@ -298,7 +298,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
if (c == '.')
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
- "ending at '.'.", str_length, str));
+ "ending at '.'.",(int)str_length, str));
return false;
}
@@ -306,14 +306,14 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address)
{
DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
"too few groups.",
- str_length, str));
+ (int)str_length, str));
return false;
}
ipv4_bytes[3]= (unsigned char) byte_value;
DBUG_PRINT("info", ("str_to_ipv4(%.*s): valid IPv4 address: %d.%d.%d.%d",
- str_length, str,
+ (int)str_length, str,
ipv4_bytes[0], ipv4_bytes[1],
ipv4_bytes[2], ipv4_bytes[3]));
return true;
@@ -494,7 +494,7 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
return false;
}
- int bytes_to_move= dst - gap_ptr;
+ int bytes_to_move= (int)(dst - gap_ptr);
for (int i= 1; i <= bytes_to_move; ++i)
{
diff --git a/sql/item_inetfunc.h b/sql/item_inetfunc.h
index eaafd005f91..670dce3da9f 100644
--- a/sql/item_inetfunc.h
+++ b/sql/item_inetfunc.h
@@ -30,13 +30,16 @@ public:
Item_func_inet_aton(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "inet_aton"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals= 0;
max_length= 21;
maybe_null= 1;
unsigned_flag= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_inet_aton>(thd, mem_root, this); }
};
@@ -51,12 +54,15 @@ public:
{ }
String* val_str(String* str);
const char *func_name() const { return "inet_ntoa"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals= 0;
fix_length_and_charset(3 * 8 + 7, default_charset());
maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_inet_ntoa>(thd, mem_root, this); }
};
@@ -76,6 +82,7 @@ public:
public:
virtual longlong val_int();
+ bool need_parentheses_in_default() { return false; }
protected:
virtual bool calc_value(const String *arg) = 0;
@@ -117,12 +124,15 @@ public:
virtual const char *func_name() const
{ return "inet6_aton"; }
- virtual void fix_length_and_dec()
+ virtual bool fix_length_and_dec()
{
decimals= 0;
fix_length_and_charset(16, &my_charset_bin);
maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_inet6_aton>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg, String *buffer);
@@ -144,7 +154,7 @@ public:
virtual const char *func_name() const
{ return "inet6_ntoa"; }
- virtual void fix_length_and_dec()
+ virtual bool fix_length_and_dec()
{
decimals= 0;
@@ -154,7 +164,10 @@ public:
fix_length_and_charset(8 * 4 + 7, default_charset());
maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_inet6_ntoa>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg, String *buffer);
@@ -175,6 +188,8 @@ public:
public:
virtual const char *func_name() const
{ return "is_ipv4"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_ipv4>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg);
@@ -195,6 +210,8 @@ public:
public:
virtual const char *func_name() const
{ return "is_ipv6"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_ipv6>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg);
@@ -215,6 +232,8 @@ public:
public:
virtual const char *func_name() const
{ return "is_ipv4_compat"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_ipv4_compat>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg);
@@ -235,6 +254,8 @@ public:
public:
virtual const char *func_name() const
{ return "is_ipv4_mapped"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_is_ipv4_mapped>(thd, mem_root, this); }
protected:
virtual bool calc_value(const String *arg);
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
new file mode 100644
index 00000000000..e236010c459
--- /dev/null
+++ b/sql/item_jsonfunc.cc
@@ -0,0 +1,3308 @@
+/* Copyright (c) 2016, Monty Program Ab.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+#include <my_global.h>
+#include "sql_priv.h"
+#include "sql_class.h"
+#include "item.h"
+
+
+/*
+ Compare ASCII string against the string with the specified
+ character set.
+ Only compares the equality, case insencitive.
+*/
+static bool eq_ascii_string(const CHARSET_INFO *cs,
+ const char *ascii,
+ const char *s, uint32 s_len)
+{
+ const char *s_end= s + s_len;
+
+ while (*ascii && s < s_end)
+ {
+ my_wc_t wc;
+ int wc_len;
+
+ wc_len= cs->cset->mb_wc(cs, &wc, (uchar *) s, (uchar *) s_end);
+ if (wc_len <= 0 || (wc | 0x20) != (my_wc_t) *ascii)
+ return 0;
+
+ ascii++;
+ s+= wc_len;
+ }
+
+ return *ascii == 0 && s >= s_end;
+}
+
+
+static bool append_simple(String *s, const char *a, size_t a_len)
+{
+ if (!s->realloc_with_extra_if_needed(s->length() + a_len))
+ {
+ s->q_append(a, a_len);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+static inline bool append_simple(String *s, const uchar *a, size_t a_len)
+{
+ return append_simple(s, (const char *) a, a_len);
+}
+
+
+/*
+ Appends JSON string to the String object taking charsets in
+ consideration.
+*/
+static int st_append_json(String *s,
+ CHARSET_INFO *json_cs, const uchar *js, uint js_len)
+{
+ int str_len= js_len * s->charset()->mbmaxlen;
+
+ if (!s->reserve(str_len, 1024) &&
+ (str_len= json_unescape(json_cs, js, js + js_len,
+ s->charset(), (uchar *) s->end(), (uchar *) s->end() + str_len)) > 0)
+ {
+ s->length(s->length() + str_len);
+ return 0;
+ }
+
+ return str_len;
+}
+
+
+/*
+ Appends arbitrary String to the JSON string taking charsets in
+ consideration.
+*/
+static int st_append_escaped(String *s, const String *a)
+{
+ /*
+ In the worst case one character from the 'a' string
+ turns into '\uXXXX\uXXXX' which is 12.
+ */
+ int str_len= a->length() * 12 * s->charset()->mbmaxlen /
+ a->charset()->mbminlen;
+ if (!s->reserve(str_len, 1024) &&
+ (str_len=
+ json_escape(a->charset(), (uchar *) a->ptr(), (uchar *)a->end(),
+ s->charset(),
+ (uchar *) s->end(), (uchar *)s->end() + str_len)) > 0)
+ {
+ s->length(s->length() + str_len);
+ return 0;
+ }
+
+ return a->length();
+}
+
+
+static const int TAB_SIZE_LIMIT= 8;
+static const char tab_arr[TAB_SIZE_LIMIT+1]= " ";
+
+static int append_tab(String *js, int depth, int tab_size)
+{
+ if (js->append("\n", 1))
+ return 1;
+ for (int i=0; i<depth; i++)
+ {
+ if (js->append(tab_arr, tab_size))
+ return 1;
+ }
+ return 0;
+}
+
+
+static int json_nice(json_engine_t *je, String *nice_js,
+ Item_func_json_format::formats mode, int tab_size=4)
+{
+ int depth= 0;
+ const char *comma, *colon;
+ uint comma_len, colon_len;
+ int first_value= 1;
+
+ DBUG_ASSERT(je->s.cs == nice_js->charset());
+ DBUG_ASSERT(mode != Item_func_json_format::DETAILED ||
+ (tab_size >= 0 && tab_size <= TAB_SIZE_LIMIT));
+
+ comma= ", ";
+ colon= "\": ";
+ if (mode == Item_func_json_format::LOOSE)
+ {
+ comma_len= 2;
+ colon_len= 3;
+ }
+ else if (mode == Item_func_json_format::DETAILED)
+ {
+ comma_len= 1;
+ colon_len= 3;
+ }
+ else
+ {
+ comma_len= 1;
+ colon_len= 2;
+ }
+
+ do
+ {
+ switch (je->state)
+ {
+ case JST_KEY:
+ {
+ const uchar *key_start= je->s.c_str;
+ const uchar *key_end;
+
+ do
+ {
+ key_end= je->s.c_str;
+ } while (json_read_keyname_chr(je) == 0);
+
+ if (je->s.error)
+ goto error;
+
+ if (!first_value)
+ nice_js->append(comma, comma_len);
+
+ if (mode == Item_func_json_format::DETAILED &&
+ append_tab(nice_js, depth, tab_size))
+ goto error;
+
+ nice_js->append("\"", 1);
+ append_simple(nice_js, key_start, key_end - key_start);
+ nice_js->append(colon, colon_len);
+ }
+ /* now we have key value to handle, so no 'break'. */
+ DBUG_ASSERT(je->state == JST_VALUE);
+ goto handle_value;
+
+ case JST_VALUE:
+ if (!first_value)
+ nice_js->append(comma, comma_len);
+
+ if (mode == Item_func_json_format::DETAILED &&
+ depth > 0 &&
+ append_tab(nice_js, depth, tab_size))
+ goto error;
+
+handle_value:
+ if (json_read_value(je))
+ goto error;
+ if (json_value_scalar(je))
+ {
+ if (append_simple(nice_js, je->value_begin,
+ je->value_end - je->value_begin))
+ goto error;
+
+ first_value= 0;
+ }
+ else
+ {
+ if (mode == Item_func_json_format::DETAILED &&
+ depth > 0 &&
+ append_tab(nice_js, depth, tab_size))
+ goto error;
+ nice_js->append((je->value_type == JSON_VALUE_OBJECT) ? "{" : "[", 1);
+ first_value= 1;
+ depth++;
+ }
+
+ break;
+
+ case JST_OBJ_END:
+ case JST_ARRAY_END:
+ depth--;
+ if (mode == Item_func_json_format::DETAILED &&
+ append_tab(nice_js, depth, tab_size))
+ goto error;
+ nice_js->append((je->state == JST_OBJ_END) ? "}": "]", 1);
+ first_value= 0;
+ break;
+
+ default:
+ break;
+ };
+ } while (json_scan_next(je) == 0);
+
+ return je->s.error;
+
+error:
+ return 1;
+}
+
+
+#define report_json_error(js, je, n_param) \
+ report_json_error_ex(js, je, func_name(), n_param, \
+ Sql_condition::WARN_LEVEL_WARN)
+
+void report_json_error_ex(String *js, json_engine_t *je,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv)
+{
+ THD *thd= current_thd;
+ int position= (int)((const char *) je->s.c_str - js->ptr());
+ uint code;
+
+ n_param++;
+
+ switch (je->s.error)
+ {
+ case JE_BAD_CHR:
+ code= ER_JSON_BAD_CHR;
+ break;
+
+ case JE_NOT_JSON_CHR:
+ code= ER_JSON_NOT_JSON_CHR;
+ break;
+
+ case JE_EOS:
+ code= ER_JSON_EOS;
+ break;
+
+ case JE_SYN:
+ case JE_STRING_CONST:
+ code= ER_JSON_SYNTAX;
+ break;
+
+ case JE_ESCAPING:
+ code= ER_JSON_ESCAPING;
+ break;
+
+ case JE_DEPTH:
+ code= ER_JSON_DEPTH;
+ push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT,
+ n_param, fname, position);
+ return;
+
+ default:
+ return;
+ }
+
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
+}
+
+
+
+#define NO_WILDCARD_ALLOWED 1
+#define SHOULD_END_WITH_ARRAY 2
+#define TRIVIAL_PATH_NOT_ALLOWED 3
+
+#define report_path_error(js, je, n_param) \
+ report_path_error_ex(js, je, func_name(), n_param,\
+ Sql_condition::WARN_LEVEL_WARN)
+
+static void report_path_error_ex(String *ps, json_path_t *p,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv)
+{
+ THD *thd= current_thd;
+ int position= (int)((const char *) p->s.c_str - ps->ptr() + 1);
+ uint code;
+
+ n_param++;
+
+ switch (p->s.error)
+ {
+ case JE_BAD_CHR:
+ case JE_NOT_JSON_CHR:
+ case JE_SYN:
+ code= ER_JSON_PATH_SYNTAX;
+ break;
+
+ case JE_EOS:
+ code= ER_JSON_PATH_EOS;
+ break;
+
+ case JE_DEPTH:
+ code= ER_JSON_PATH_DEPTH;
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ JSON_DEPTH_LIMIT, n_param, fname, position);
+ return;
+
+ case NO_WILDCARD_ALLOWED:
+ code= ER_JSON_PATH_NO_WILDCARD;
+ break;
+
+ case TRIVIAL_PATH_NOT_ALLOWED:
+ code= ER_JSON_PATH_EMPTY;
+ break;
+
+
+ default:
+ return;
+ }
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
+}
+
+
+
+/*
+ Checks if the path has '.*' '[*]' or '**' constructions
+ and sets the NO_WILDCARD_ALLOWED error if the case.
+*/
+static int path_setup_nwc(json_path_t *p, CHARSET_INFO *i_cs,
+ const uchar *str, const uchar *end)
+{
+ if (!json_path_setup(p, i_cs, str, end))
+ {
+ if ((p->types_used & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD)) == 0)
+ return 0;
+ p->s.error= NO_WILDCARD_ALLOWED;
+ }
+
+ return 1;
+}
+
+
+longlong Item_func_json_valid::val_int()
+{
+ String *js= args[0]->val_json(&tmp_value);
+ json_engine_t je;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
+ (const uchar *) js->ptr()+js->length());
+
+ while (json_scan_next(&je) == 0) {}
+
+ return je.s.error == 0;
+}
+
+
+bool Item_func_json_exists::fix_length_and_dec()
+{
+ if (Item_int_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ path.set_constant_flag(args[1]->const_item());
+ return FALSE;
+}
+
+
+longlong Item_func_json_exists::val_int()
+{
+ json_engine_t je;
+ uint array_counters[JSON_DEPTH_LIMIT];
+
+ String *js= args[0]->val_json(&tmp_js);
+
+ if (!path.parsed)
+ {
+ String *s_p= args[1]->val_str(&tmp_path);
+ if (s_p &&
+ json_path_setup(&path.p, s_p->charset(), (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ goto err_return;
+ path.parsed= path.constant;
+ }
+
+ if ((null_value= args[0]->null_value || args[1]->null_value))
+ {
+ null_value= 1;
+ return 0;
+ }
+
+ null_value= 0;
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ path.cur_step= path.p.steps;
+ if (json_find_path(&je, &path.p, &path.cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto err_return;
+ return 0;
+ }
+
+ return 1;
+
+err_return:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_value::fix_length_and_dec()
+{
+ collation.set(args[0]->collation);
+ max_length= args[0]->max_length;
+ path.set_constant_flag(args[1]->const_item());
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+/*
+ Returns NULL, not an error if the found value
+ is not a scalar.
+*/
+String *Item_func_json_value::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ int error= 0;
+ uint array_counters[JSON_DEPTH_LIMIT];
+
+ if (!path.parsed)
+ {
+ String *s_p= args[1]->val_str(&tmp_path);
+ if (s_p &&
+ json_path_setup(&path.p, s_p->charset(), (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ goto err_return;
+ path.parsed= path.constant;
+ }
+
+ if ((null_value= args[0]->null_value || args[1]->null_value))
+ return NULL;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ str->length(0);
+ str->set_charset(collation.collation);
+
+ path.cur_step= path.p.steps;
+continue_search:
+ if (json_find_path(&je, &path.p, &path.cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto err_return;
+
+ null_value= 1;
+ return 0;
+ }
+
+ if (json_read_value(&je))
+ goto err_return;
+
+ if (check_and_get_value(&je, str, &error))
+ {
+ if (error)
+ goto err_return;
+ goto continue_search;
+ }
+
+ return str;
+
+err_return:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_value::check_and_get_value(json_engine_t *je, String *res,
+ int *error)
+{
+ CHARSET_INFO *json_cs;
+ const uchar *js;
+ uint js_len;
+
+ if (!json_value_scalar(je))
+ {
+ /* We only look for scalar values! */
+ if (json_skip_level(je) || json_scan_next(je))
+ *error= 1;
+ return true;
+ }
+
+ if (je->value_type == JSON_VALUE_TRUE ||
+ je->value_type == JSON_VALUE_FALSE)
+ {
+ json_cs= &my_charset_utf8mb4_bin;
+ js= (const uchar *) ((je->value_type == JSON_VALUE_TRUE) ? "1" : "0");
+ js_len= 1;
+ }
+ else
+ {
+ json_cs= je->s.cs;
+ js= je->value;
+ js_len= je->value_len;
+ }
+
+
+ return st_append_json(res, json_cs, js, js_len);
+}
+
+
+bool Item_func_json_query::check_and_get_value(json_engine_t *je, String *res,
+ int *error)
+{
+ const uchar *value;
+ if (json_value_scalar(je))
+ {
+ /* We skip scalar values. */
+ if (json_scan_next(je))
+ *error= 1;
+ return true;
+ }
+
+ value= je->value;
+ if (json_skip_level(je))
+ {
+ *error= 1;
+ return true;
+ }
+
+ res->set((const char *) je->value, (uint32)(je->s.c_str - value), je->s.cs);
+ return false;
+}
+
+
+bool Item_func_json_quote::fix_length_and_dec()
+{
+ collation.set(&my_charset_utf8mb4_bin);
+ /*
+ Odd but realistic worst case is when all characters
+ of the argument turn into '\uXXXX\uXXXX', which is 12.
+ */
+ fix_char_length_ulonglong((ulonglong) args[0]->max_char_length() * 12 + 2);
+ return FALSE;
+}
+
+
+String *Item_func_json_quote::val_str(String *str)
+{
+ String *s= args[0]->val_str(&tmp_s);
+
+ if ((null_value= (args[0]->null_value ||
+ args[0]->result_type() != STRING_RESULT)))
+ return NULL;
+
+ str->length(0);
+ str->set_charset(&my_charset_utf8mb4_bin);
+
+ if (str->append("\"", 1) ||
+ st_append_escaped(str, s) ||
+ str->append("\"", 1))
+ {
+ /* Report an error. */
+ null_value= 1;
+ return 0;
+ }
+
+ return str;
+}
+
+
+bool Item_func_json_unquote::fix_length_and_dec()
+{
+ collation.set(&my_charset_utf8_general_ci,
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ max_length= args[0]->max_length;
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_unquote::read_json(json_engine_t *je)
+{
+ String *js= args[0]->val_json(&tmp_s);
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ json_scan_start(je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ je->value_type= (enum json_value_types) -1; /* To report errors right. */
+
+ if (json_read_value(je))
+ goto error;
+
+ return js;
+
+error:
+ if (je->value_type == JSON_VALUE_STRING)
+ report_json_error(js, je, 0);
+ return js;
+}
+
+
+String *Item_func_json_unquote::val_str(String *str)
+{
+ json_engine_t je;
+ int c_len;
+ String *js;
+
+ if (!(js= read_json(&je)))
+ return NULL;
+
+ if (je.s.error || je.value_type != JSON_VALUE_STRING)
+ return js;
+
+ str->length(0);
+ str->set_charset(&my_charset_utf8_general_ci);
+
+ if (str->realloc_with_extra_if_needed(je.value_len) ||
+ (c_len= json_unescape(js->charset(),
+ je.value, je.value + je.value_len,
+ &my_charset_utf8_general_ci,
+ (uchar *) str->ptr(), (uchar *) (str->ptr() + je.value_len))) < 0)
+ goto error;
+
+ str->length(c_len);
+ return str;
+
+error:
+ report_json_error(js, &je, 0);
+ return js;
+}
+
+
+static int alloc_tmp_paths(THD *thd, uint n_paths,
+ json_path_with_flags **paths, String **tmp_paths)
+{
+ if (n_paths > 0)
+ {
+ if (*tmp_paths == 0)
+ {
+ MEM_ROOT *root= thd->stmt_arena->mem_root;
+
+ *paths= (json_path_with_flags *) alloc_root(root,
+ sizeof(json_path_with_flags) * n_paths);
+
+ *tmp_paths= new (root) String[n_paths];
+ if (*paths == 0 || *tmp_paths == 0)
+ return 1;
+
+ for (uint c_path=0; c_path < n_paths; c_path++)
+ (*tmp_paths)[c_path].set_charset(&my_charset_utf8_general_ci);
+ }
+
+ return 0;
+ }
+
+ /* n_paths == 0 */
+ *paths= 0;
+ *tmp_paths= 0;
+ return 0;
+}
+
+
+static void mark_constant_paths(json_path_with_flags *p,
+ Item** args, uint n_args)
+{
+ uint n;
+ for (n= 0; n < n_args; n++)
+ p[n].set_constant_flag(args[n]->const_item());
+}
+
+
+bool Item_json_str_multipath::fix_fields(THD *thd, Item **ref)
+{
+ return alloc_tmp_paths(thd, get_n_paths(), &paths, &tmp_paths) ||
+ Item_str_func::fix_fields(thd, ref);
+}
+
+
+void Item_json_str_multipath::cleanup()
+{
+ if (tmp_paths)
+ {
+ for (uint i= get_n_paths(); i>0; i--)
+ tmp_paths[i-1].free();
+ }
+ Item_str_func::cleanup();
+}
+
+
+bool Item_func_json_extract::fix_length_and_dec()
+{
+ collation.set(args[0]->collation);
+ max_length= args[0]->max_length * (arg_count - 1);
+
+ mark_constant_paths(paths, args+1, arg_count-1);
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+static bool path_exact(const json_path_with_flags *paths_list, int n_paths,
+ const json_path_t *p, json_value_types vt)
+{
+ for (; n_paths > 0; n_paths--, paths_list++)
+ {
+ if (json_path_compare(&paths_list->p, p, vt) == 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static bool path_ok(const json_path_with_flags *paths_list, int n_paths,
+ const json_path_t *p, json_value_types vt)
+{
+ for (; n_paths > 0; n_paths--, paths_list++)
+ {
+ if (json_path_compare(&paths_list->p, p, vt) >= 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+String *Item_func_json_extract::read_json(String *str,
+ json_value_types *type,
+ char **out_val, int *value_len)
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je, sav_je;
+ json_path_t p;
+ const uchar *value;
+ int not_first_value= 0;
+ uint n_arg;
+ size_t v_len;
+ int possible_multiple_values;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ for (n_arg=1; n_arg < arg_count; n_arg++)
+ {
+ json_path_with_flags *c_path= paths + n_arg - 1;
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths + (n_arg-1));
+ if (s_p &&
+ json_path_setup(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto return_null;
+ }
+ c_path->parsed= c_path->constant;
+ }
+
+ if (args[n_arg]->null_value)
+ goto return_null;
+ }
+
+ possible_multiple_values= arg_count > 2 ||
+ (paths[0].p.types_used & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD));
+
+ *type= possible_multiple_values ? JSON_VALUE_ARRAY : JSON_VALUE_NULL;
+
+ if (str)
+ {
+ str->set_charset(js->charset());
+ str->length(0);
+
+ if (possible_multiple_values && str->append("[", 1))
+ goto error;
+ }
+
+ json_get_path_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length(), &p);
+
+ while (json_get_path_next(&je, &p) == 0)
+ {
+ if (!path_exact(paths, arg_count-1, &p, je.value_type))
+ continue;
+
+ value= je.value_begin;
+
+ if (*type == JSON_VALUE_NULL)
+ {
+ *type= je.value_type;
+ *out_val= (char *) je.value;
+ *value_len= je.value_len;
+ }
+ if (!str)
+ {
+ /* If str is NULL, we only care about the first found value. */
+ goto return_ok;
+ }
+
+ if (json_value_scalar(&je))
+ v_len= je.value_end - value;
+ else
+ {
+ if (possible_multiple_values)
+ sav_je= je;
+ if (json_skip_level(&je))
+ goto error;
+ v_len= je.s.c_str - value;
+ if (possible_multiple_values)
+ je= sav_je;
+ }
+
+ if ((not_first_value && str->append(", ", 2)) ||
+ str->append((const char *) value, v_len))
+ goto error; /* Out of memory. */
+
+ not_first_value= 1;
+
+ if (!possible_multiple_values)
+ {
+ /* Loop to the end of the JSON just to make sure it's valid. */
+ while (json_get_path_next(&je, &p) == 0) {}
+ break;
+ }
+ }
+
+ if (je.s.error)
+ goto error;
+
+ if (!not_first_value)
+ {
+ /* Nothing was found. */
+ goto return_null;
+ }
+
+ if (possible_multiple_values && str->append("]", 1))
+ goto error; /* Out of memory. */
+
+ js= str;
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+ tmp_js.length(0);
+ tmp_js.set_charset(js->charset());
+ if (json_nice(&je, &tmp_js, Item_func_json_format::LOOSE))
+ goto error;
+
+return_ok:
+ return &tmp_js;
+
+error:
+ report_json_error(js, &je, 0);
+return_null:
+ null_value= 1;
+ return 0;
+}
+
+
+String *Item_func_json_extract::val_str(String *str)
+{
+ json_value_types type;
+ char *value;
+ int value_len;
+ return read_json(str, &type, &value, &value_len);
+}
+
+
+longlong Item_func_json_extract::val_int()
+{
+ json_value_types type;
+ char *value;
+ int value_len;
+ longlong i= 0;
+
+ if (read_json(NULL, &type, &value, &value_len) != NULL)
+ {
+ switch (type)
+ {
+ case JSON_VALUE_NUMBER:
+ case JSON_VALUE_STRING:
+ {
+ char *end;
+ int err;
+ i= my_strntoll(collation.collation, value, value_len, 10, &end, &err);
+ break;
+ }
+ case JSON_VALUE_TRUE:
+ i= 1;
+ break;
+ default:
+ i= 0;
+ break;
+ };
+ }
+ return i;
+}
+
+
+double Item_func_json_extract::val_real()
+{
+ json_value_types type;
+ char *value;
+ int value_len;
+ double d= 0.0;
+
+ if (read_json(NULL, &type, &value, &value_len) != NULL)
+ {
+ switch (type)
+ {
+ case JSON_VALUE_STRING:
+ case JSON_VALUE_NUMBER:
+ {
+ char *end;
+ int err;
+ d= my_strntod(collation.collation, value, value_len, &end, &err);
+ break;
+ }
+ case JSON_VALUE_TRUE:
+ d= 1.0;
+ break;
+ default:
+ break;
+ };
+ }
+
+ return d;
+}
+
+
+bool Item_func_json_contains::fix_length_and_dec()
+{
+ a2_constant= args[1]->const_item();
+ a2_parsed= FALSE;
+ maybe_null= 1;
+ if (arg_count > 2)
+ path.set_constant_flag(args[2]->const_item());
+ return Item_int_func::fix_length_and_dec();
+}
+
+
+static int find_key_in_object(json_engine_t *j, json_string_t *key)
+{
+ const uchar *c_str= key->c_str;
+
+ while (json_scan_next(j) == 0 && j->state != JST_OBJ_END)
+ {
+ DBUG_ASSERT(j->state == JST_KEY);
+ if (json_key_matches(j, key))
+ return TRUE;
+ if (json_skip_key(j))
+ return FALSE;
+ key->c_str= c_str;
+ }
+
+ return FALSE;
+}
+
+
+static int check_contains(json_engine_t *js, json_engine_t *value)
+{
+ json_engine_t loc_js;
+ bool set_js;
+
+ switch (js->value_type)
+ {
+ case JSON_VALUE_OBJECT:
+ {
+ json_string_t key_name;
+
+ if (value->value_type != JSON_VALUE_OBJECT)
+ return FALSE;
+
+ loc_js= *js;
+ set_js= FALSE;
+ json_string_set_cs(&key_name, value->s.cs);
+ while (json_scan_next(value) == 0 && value->state != JST_OBJ_END)
+ {
+ const uchar *k_start, *k_end;
+
+ DBUG_ASSERT(value->state == JST_KEY);
+ k_start= value->s.c_str;
+ do
+ {
+ k_end= value->s.c_str;
+ } while (json_read_keyname_chr(value) == 0);
+
+ if (value->s.error || json_read_value(value))
+ return FALSE;
+
+ if (set_js)
+ *js= loc_js;
+ else
+ set_js= TRUE;
+
+ json_string_set_str(&key_name, k_start, k_end);
+ if (!find_key_in_object(js, &key_name) ||
+ json_read_value(js) ||
+ !check_contains(js, value))
+ return FALSE;
+ }
+
+ return value->state == JST_OBJ_END && !json_skip_level(js);
+ }
+ case JSON_VALUE_ARRAY:
+ if (value->value_type != JSON_VALUE_ARRAY)
+ {
+ loc_js= *value;
+ set_js= FALSE;
+ while (json_scan_next(js) == 0 && js->state != JST_ARRAY_END)
+ {
+ int c_level, v_scalar;
+ DBUG_ASSERT(js->state == JST_VALUE);
+ if (json_read_value(js))
+ return FALSE;
+
+ if (!(v_scalar= json_value_scalar(js)))
+ c_level= json_get_level(js);
+
+ if (set_js)
+ *value= loc_js;
+ else
+ set_js= TRUE;
+
+ if (check_contains(js, value))
+ {
+ if (json_skip_level(js))
+ return FALSE;
+ return TRUE;
+ }
+ if (value->s.error || js->s.error ||
+ (!v_scalar && json_skip_to_level(js, c_level)))
+ return FALSE;
+ }
+ return FALSE;
+ }
+ /* else */
+ loc_js= *js;
+ set_js= FALSE;
+ while (json_scan_next(value) == 0 && value->state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(value->state == JST_VALUE);
+ if (json_read_value(value))
+ return FALSE;
+
+ if (set_js)
+ *js= loc_js;
+ else
+ set_js= TRUE;
+ if (!check_contains(js, value))
+ return FALSE;
+ }
+
+ return value->state == JST_ARRAY_END;
+
+ case JSON_VALUE_STRING:
+ if (value->value_type != JSON_VALUE_STRING)
+ return FALSE;
+ /*
+ TODO: make proper json-json comparison here that takes excapint
+ into account.
+ */
+ return value->value_len == js->value_len &&
+ memcmp(value->value, js->value, value->value_len) == 0;
+ case JSON_VALUE_NUMBER:
+ if (value->value_type == JSON_VALUE_NUMBER)
+ {
+ double d_j, d_v;
+ char *end;
+ int err;
+
+ d_j= my_strntod(js->s.cs, (char *) js->value, js->value_len,
+ &end, &err);;
+ d_v= my_strntod(value->s.cs, (char *) value->value, value->value_len,
+ &end, &err);;
+
+ return (fabs(d_j - d_v) < 1e-12);
+ }
+ else
+ return FALSE;
+
+ default:
+ break;
+ }
+
+ /*
+ We have these not mentioned in the 'switch' above:
+
+ case JSON_VALUE_TRUE:
+ case JSON_VALUE_FALSE:
+ case JSON_VALUE_NULL:
+ */
+ return value->value_type == js->value_type;
+}
+
+
+longlong Item_func_json_contains::val_int()
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je, ve;
+ int result;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ if (!a2_parsed)
+ {
+ val= args[1]->val_json(&tmp_val);
+ a2_parsed= a2_constant;
+ }
+
+ if (val == 0)
+ {
+ null_value= 1;
+ return 0;
+ }
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ if (arg_count>2) /* Path specified. */
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ if (!path.parsed)
+ {
+ String *s_p= args[2]->val_str(&tmp_path);
+ if (s_p &&
+ path_setup_nwc(&path.p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->end()))
+ {
+ report_path_error(s_p, &path.p, 2);
+ goto return_null;
+ }
+ path.parsed= path.constant;
+ }
+ if (args[2]->null_value)
+ goto return_null;
+
+ path.cur_step= path.p.steps;
+ if (json_find_path(&je, &path.p, &path.cur_step, array_counters))
+ {
+ if (je.s.error)
+ {
+ ve.s.error= 0;
+ goto error;
+ }
+
+ return FALSE;
+ }
+ }
+
+ json_scan_start(&ve, val->charset(),(const uchar *) val->ptr(),
+ (const uchar *) val->end());
+
+ if (json_read_value(&je) || json_read_value(&ve))
+ goto error;
+
+ result= check_contains(&je, &ve);
+ if (je.s.error || ve.s.error)
+ goto error;
+
+ return result;
+
+error:
+ if (je.s.error)
+ report_json_error(js, &je, 0);
+ if (ve.s.error)
+ report_json_error(val, &ve, 1);
+return_null:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_contains_path::fix_fields(THD *thd, Item **ref)
+{
+ return alloc_tmp_paths(thd, arg_count-2, &paths, &tmp_paths) ||
+ (p_found= (bool *) alloc_root(thd->mem_root,
+ (arg_count-2)*sizeof(bool))) == NULL ||
+ Item_int_func::fix_fields(thd, ref);
+}
+
+
+bool Item_func_json_contains_path::fix_length_and_dec()
+{
+ ooa_constant= args[1]->const_item();
+ ooa_parsed= FALSE;
+ maybe_null= 1;
+ mark_constant_paths(paths, args+2, arg_count-2);
+ return Item_int_func::fix_length_and_dec();
+}
+
+
+void Item_func_json_contains_path::cleanup()
+{
+ if (tmp_paths)
+ {
+ for (uint i= arg_count-2; i>0; i--)
+ tmp_paths[i-1].free();
+ tmp_paths= 0;
+ }
+ Item_int_func::cleanup();
+}
+
+
+static int parse_one_or_all(const Item_func *f, Item *ooa_arg,
+ bool *ooa_parsed, bool ooa_constant, bool *mode_one)
+{
+ if (!*ooa_parsed)
+ {
+ char buff[20];
+ String *res, tmp(buff, sizeof(buff), &my_charset_bin);
+ if ((res= ooa_arg->val_str(&tmp)) == NULL)
+ return TRUE;
+
+ *mode_one=eq_ascii_string(res->charset(), "one",
+ res->ptr(), res->length());
+ if (!*mode_one)
+ {
+ if (!eq_ascii_string(res->charset(), "all", res->ptr(), res->length()))
+ {
+ THD *thd= current_thd;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_JSON_ONE_OR_ALL, ER_THD(thd, ER_JSON_ONE_OR_ALL),
+ f->func_name());
+ *mode_one= TRUE;
+ return TRUE;
+ }
+ }
+ *ooa_parsed= ooa_constant;
+ }
+ return FALSE;
+}
+
+
+#ifdef DUMMY
+longlong Item_func_json_contains_path::val_int()
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ uint n_arg;
+ longlong result;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ if (parse_one_or_all(this, args[1], &ooa_parsed, ooa_constant, &mode_one))
+ goto return_null;
+
+ result= !mode_one;
+ for (n_arg=2; n_arg < arg_count; n_arg++)
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ json_path_with_flags *c_path= paths + n_arg - 2;
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths+(n_arg-2));
+ if (s_p &&
+ json_path_setup(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg-2);
+ goto return_null;
+ }
+ c_path->parsed= c_path->constant;
+ }
+
+ if (args[n_arg]->null_value)
+ goto return_null;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ c_path->cur_step= c_path->p.steps;
+ if (json_find_path(&je, &c_path->p, &c_path->cur_step, array_counters))
+ {
+ /* Path wasn't found. */
+ if (je.s.error)
+ goto js_error;
+
+ if (!mode_one)
+ {
+ result= 0;
+ break;
+ }
+ }
+ else if (mode_one)
+ {
+ result= 1;
+ break;
+ }
+ }
+
+
+ return result;
+
+js_error:
+ report_json_error(js, &je, 0);
+return_null:
+ null_value= 1;
+ return 0;
+}
+#endif /*DUMMY*/
+
+longlong Item_func_json_contains_path::val_int()
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ uint n_arg;
+ longlong result;
+ json_path_t p;
+ int n_found;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ if (parse_one_or_all(this, args[1], &ooa_parsed, ooa_constant, &mode_one))
+ goto null_return;;
+
+ for (n_arg=2; n_arg < arg_count; n_arg++)
+ {
+ json_path_with_flags *c_path= paths + n_arg - 2;
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths + (n_arg-2));
+ if (s_p &&
+ json_path_setup(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto null_return;
+ }
+ c_path->parsed= c_path->constant;
+ }
+ if (args[n_arg]->null_value)
+ goto null_return;
+ }
+
+ json_get_path_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length(), &p);
+
+
+ if (!mode_one)
+ {
+ bzero(p_found, (arg_count-2) * sizeof(bool));
+ n_found= arg_count - 2;
+ }
+ else
+ n_found= 0; /* Jost to prevent 'uninitialized value' warnings */
+
+ result= 0;
+ while (json_get_path_next(&je, &p) == 0)
+ {
+ int n_path= arg_count - 2;
+ json_path_with_flags *c_path= paths;
+ for (; n_path > 0; n_path--, c_path++)
+ {
+ if (json_path_compare(&c_path->p, &p, je.value_type) >= 0)
+ {
+ if (mode_one)
+ {
+ result= 1;
+ break;
+ }
+ /* mode_all */
+ if (p_found[n_path-1])
+ continue; /* already found */
+ if (--n_found == 0)
+ {
+ result= 1;
+ break;
+ }
+ p_found[n_path-1]= TRUE;
+ }
+ }
+ }
+
+ if (je.s.error == 0)
+ return result;
+
+ report_json_error(js, &je, 0);
+null_return:
+ null_value= 1;
+ return 0;
+}
+
+
+static int append_json_value(String *str, Item *item, String *tmp_val)
+{
+ if (item->is_bool_type())
+ {
+ longlong v_int= item->val_int();
+ const char *t_f;
+ int t_f_len;
+
+ if (item->null_value)
+ goto append_null;
+
+ if (v_int)
+ {
+ t_f= "true";
+ t_f_len= 4;
+ }
+ else
+ {
+ t_f= "false";
+ t_f_len= 5;
+ }
+
+ return str->append(t_f, t_f_len);
+ }
+ {
+ String *sv= item->val_json(tmp_val);
+ if (item->null_value)
+ goto append_null;
+ if (item->is_json_type())
+ return str->append(sv->ptr(), sv->length());
+
+ if (item->result_type() == STRING_RESULT)
+ {
+ return str->append("\"", 1) ||
+ st_append_escaped(str, sv) ||
+ str->append("\"", 1);
+ }
+ return st_append_escaped(str, sv);
+ }
+
+append_null:
+ return str->append("null", 4);
+}
+
+
+static int append_json_keyname(String *str, Item *item, String *tmp_val)
+{
+ String *sv= item->val_str(tmp_val);
+ if (item->null_value)
+ goto append_null;
+
+ return str->append("\"", 1) ||
+ st_append_escaped(str, sv) ||
+ str->append("\": ", 3);
+
+append_null:
+ return str->append("\"\": ", 4);
+}
+
+
+bool Item_func_json_array::fix_length_and_dec()
+{
+ ulonglong char_length= 2;
+ uint n_arg;
+
+ result_limit= 0;
+
+ if (arg_count == 0)
+ {
+ THD* thd= current_thd;
+ collation.set(thd->variables.collation_connection,
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ tmp_val.set_charset(thd->variables.collation_connection);
+ max_length= 2;
+ return FALSE;
+ }
+
+ if (agg_arg_charsets_for_string_result(collation, args, arg_count))
+ return TRUE;
+
+ for (n_arg=0 ; n_arg < arg_count ; n_arg++)
+ char_length+= args[n_arg]->max_char_length() + 4;
+
+ fix_char_length_ulonglong(char_length);
+ tmp_val.set_charset(collation.collation);
+ return FALSE;
+}
+
+
+String *Item_func_json_array::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ uint n_arg;
+
+ str->length(0);
+ str->set_charset(collation.collation);
+
+ if (str->append("[", 1) ||
+ ((arg_count > 0) && append_json_value(str, args[0], &tmp_val)))
+ goto err_return;
+
+ for (n_arg=1; n_arg < arg_count; n_arg++)
+ {
+ if (str->append(", ", 2) ||
+ append_json_value(str, args[n_arg], &tmp_val))
+ goto err_return;
+ }
+
+ if (str->append("]", 1))
+ goto err_return;
+
+ if (result_limit == 0)
+ result_limit= current_thd->variables.max_allowed_packet;
+
+ if (str->length() <= result_limit)
+ return str;
+
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED,
+ ER_THD(current_thd, ER_WARN_ALLOWED_PACKET_OVERFLOWED),
+ func_name(), result_limit);
+
+err_return:
+ /*TODO: Launch out of memory error. */
+ null_value= 1;
+ return NULL;
+}
+
+
+bool Item_func_json_array_append::fix_length_and_dec()
+{
+ uint n_arg;
+ ulonglong char_length;
+
+ collation.set(args[0]->collation);
+ char_length= args[0]->max_char_length();
+
+ for (n_arg= 1; n_arg < arg_count; n_arg+= 2)
+ {
+ paths[n_arg/2].set_constant_flag(args[n_arg]->const_item());
+ char_length+= args[n_arg/2+1]->max_char_length() + 4;
+ }
+
+ fix_char_length_ulonglong(char_length);
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_array_append::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ uint n_arg, n_path;
+ size_t str_rest_len;
+ const uchar *ar_end;
+
+ DBUG_ASSERT(fixed == 1);
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ for (n_arg=1, n_path=0; n_arg < arg_count; n_arg+=2, n_path++)
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ json_path_with_flags *c_path= paths + n_path;
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths+n_path);
+ if (s_p &&
+ path_setup_nwc(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto return_null;
+ }
+ c_path->parsed= c_path->constant;
+ }
+ if (args[n_arg]->null_value)
+ goto return_null;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ c_path->cur_step= c_path->p.steps;
+
+ if (json_find_path(&je, &c_path->p, &c_path->cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto js_error;
+
+ goto return_null;
+ }
+
+ if (json_read_value(&je))
+ goto js_error;
+
+ str->length(0);
+ str->set_charset(js->charset());
+ if (str->reserve(js->length() + 8, 1024))
+ goto return_null; /* Out of memory. */
+
+ if (je.value_type == JSON_VALUE_ARRAY)
+ {
+ int n_items;
+ if (json_skip_level_and_count(&je, &n_items))
+ goto js_error;
+
+ ar_end= je.s.c_str - je.sav_c_len;
+ str_rest_len= js->length() - (ar_end - (const uchar *) js->ptr());
+ str->q_append(js->ptr(), ar_end-(const uchar *) js->ptr());
+ if (n_items)
+ str->append(", ", 2);
+ if (append_json_value(str, args[n_arg+1], &tmp_val))
+ goto return_null; /* Out of memory. */
+
+ if (str->reserve(str_rest_len, 1024))
+ goto return_null; /* Out of memory. */
+ str->q_append((const char *) ar_end, str_rest_len);
+ }
+ else
+ {
+ const uchar *c_from, *c_to;
+
+ /* Wrap as an array. */
+ str->q_append(js->ptr(), (const char *) je.value_begin - js->ptr());
+ c_from= je.value_begin;
+
+ if (je.value_type == JSON_VALUE_OBJECT)
+ {
+ if (json_skip_level(&je))
+ goto js_error;
+ c_to= je.s.c_str;
+ }
+ else
+ c_to= je.value_end;
+
+ if (str->append("[", 1) ||
+ str->append((const char *) c_from, c_to - c_from) ||
+ str->append(", ", 2) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ str->append("]", 1) ||
+ str->append((const char *) je.s.c_str,
+ js->end() - (const char *) je.s.c_str))
+ goto return_null; /* Out of memory. */
+ }
+ {
+ /* Swap str and js. */
+ if (str == &tmp_js)
+ {
+ str= js;
+ js= &tmp_js;
+ }
+ else
+ {
+ js= str;
+ str= &tmp_js;
+ }
+ }
+ }
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+ str->length(0);
+ str->set_charset(js->charset());
+ if (json_nice(&je, str, Item_func_json_format::LOOSE))
+ goto js_error;
+
+ return str;
+
+js_error:
+ report_json_error(js, &je, 0);
+
+return_null:
+ null_value= 1;
+ return 0;
+}
+
+
+String *Item_func_json_array_insert::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ uint n_arg, n_path;
+
+ DBUG_ASSERT(fixed == 1);
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ for (n_arg=1, n_path=0; n_arg < arg_count; n_arg+=2, n_path++)
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ json_path_with_flags *c_path= paths + n_path;
+ const char *item_pos;
+ uint n_item;
+
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths+n_path);
+ if (s_p &&
+ (path_setup_nwc(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()) ||
+ c_path->p.last_step - 1 < c_path->p.steps ||
+ c_path->p.last_step->type != JSON_PATH_ARRAY))
+ {
+ if (c_path->p.s.error == 0)
+ c_path->p.s.error= SHOULD_END_WITH_ARRAY;
+
+ report_path_error(s_p, &c_path->p, n_arg);
+
+ goto return_null;
+ }
+ c_path->parsed= c_path->constant;
+ c_path->p.last_step--;
+ }
+ if (args[n_arg]->null_value)
+ goto return_null;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ c_path->cur_step= c_path->p.steps;
+
+ if (json_find_path(&je, &c_path->p, &c_path->cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto js_error;
+
+ /* Can't find the array to insert. */
+ continue;
+ }
+
+ if (json_read_value(&je))
+ goto js_error;
+
+ if (je.value_type != JSON_VALUE_ARRAY)
+ {
+ /* Must be an array. */
+ continue;
+ }
+
+ item_pos= 0;
+ n_item= 0;
+
+ while (json_scan_next(&je) == 0 && je.state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(je.state == JST_VALUE);
+ if (n_item == c_path->p.last_step[1].n_item)
+ {
+ item_pos= (const char *) je.s.c_str;
+ break;
+ }
+ n_item++;
+
+ if (json_read_value(&je) ||
+ (!json_value_scalar(&je) && json_skip_level(&je)))
+ goto js_error;
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+ str->length(0);
+ str->set_charset(js->charset());
+ if (item_pos)
+ {
+ if (append_simple(str, js->ptr(), item_pos - js->ptr()) ||
+ (n_item > 0 && str->append(" ", 1)) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ str->append(",", 1) ||
+ (n_item == 0 && str->append(" ", 1)) ||
+ append_simple(str, item_pos, js->end() - item_pos))
+ goto return_null; /* Out of memory. */
+ }
+ else
+ {
+ /* Insert position wasn't found - append to the array. */
+ DBUG_ASSERT(je.state == JST_ARRAY_END);
+ item_pos= (const char *) (je.s.c_str - je.sav_c_len);
+ if (append_simple(str, js->ptr(), item_pos - js->ptr()) ||
+ (n_item > 0 && str->append(", ", 2)) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ append_simple(str, item_pos, js->end() - item_pos))
+ goto return_null; /* Out of memory. */
+ }
+
+ {
+ /* Swap str and js. */
+ if (str == &tmp_js)
+ {
+ str= js;
+ js= &tmp_js;
+ }
+ else
+ {
+ js= str;
+ str= &tmp_js;
+ }
+ }
+ }
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+ str->length(0);
+ str->set_charset(js->charset());
+ if (json_nice(&je, str, Item_func_json_format::LOOSE))
+ goto js_error;
+
+ return str;
+
+js_error:
+ report_json_error(js, &je, 0);
+return_null:
+ null_value= 1;
+ return 0;
+}
+
+
+String *Item_func_json_object::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ uint n_arg;
+
+ str->length(0);
+ str->set_charset(collation.collation);
+
+ if (str->append("{", 1) ||
+ (arg_count > 0 &&
+ (append_json_keyname(str, args[0], &tmp_val) ||
+ append_json_value(str, args[1], &tmp_val))))
+ goto err_return;
+
+ for (n_arg=2; n_arg < arg_count; n_arg+=2)
+ {
+ if (str->append(", ", 2) ||
+ append_json_keyname(str, args[n_arg], &tmp_val) ||
+ append_json_value(str, args[n_arg+1], &tmp_val))
+ goto err_return;
+ }
+
+ if (str->append("}", 1))
+ goto err_return;
+
+ if (result_limit == 0)
+ result_limit= current_thd->variables.max_allowed_packet;
+
+ if (str->length() <= result_limit)
+ return str;
+
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED,
+ ER_THD(current_thd, ER_WARN_ALLOWED_PACKET_OVERFLOWED),
+ func_name(), result_limit);
+
+err_return:
+ /*TODO: Launch out of memory error. */
+ null_value= 1;
+ return NULL;
+}
+
+
+static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
+{
+ if (json_read_value(je1) || json_read_value(je2))
+ return 1;
+
+ if (je1->value_type == JSON_VALUE_OBJECT &&
+ je2->value_type == JSON_VALUE_OBJECT)
+ {
+ json_engine_t sav_je1= *je1;
+ json_engine_t sav_je2= *je2;
+
+ int first_key= 1;
+ json_string_t key_name;
+
+ json_string_set_cs(&key_name, je1->s.cs);
+
+ if (str->append("{", 1))
+ return 3;
+ while (json_scan_next(je1) == 0 &&
+ je1->state != JST_OBJ_END)
+ {
+ const uchar *key_start, *key_end;
+ /* Loop through the Json_1 keys and compare with the Json_2 keys. */
+ DBUG_ASSERT(je1->state == JST_KEY);
+ key_start= je1->s.c_str;
+ do
+ {
+ key_end= je1->s.c_str;
+ } while (json_read_keyname_chr(je1) == 0);
+
+ if (je1->s.error)
+ return 1;
+
+ if (first_key)
+ first_key= 0;
+ else
+ {
+ if (str->append(", ", 2))
+ return 3;
+ *je2= sav_je2;
+ }
+
+ if (str->append("\"", 1) ||
+ append_simple(str, key_start, key_end - key_start) ||
+ str->append("\":", 2))
+ return 3;
+
+ while (json_scan_next(je2) == 0 &&
+ je2->state != JST_OBJ_END)
+ {
+ int ires;
+ DBUG_ASSERT(je2->state == JST_KEY);
+ json_string_set_str(&key_name, key_start, key_end);
+ if (!json_key_matches(je2, &key_name))
+ {
+ if (je2->s.error || json_skip_key(je2))
+ return 2;
+ continue;
+ }
+
+ /* Json_2 has same key as Json_1. Merge them. */
+ if ((ires= do_merge(str, je1, je2)))
+ return ires;
+ goto merged_j1;
+ }
+ if (je2->s.error)
+ return 2;
+
+ key_start= je1->s.c_str;
+ /* Just append the Json_1 key value. */
+ if (json_skip_key(je1))
+ return 1;
+ if (append_simple(str, key_start, je1->s.c_str - key_start))
+ return 3;
+
+merged_j1:
+ continue;
+ }
+
+ *je2= sav_je2;
+ /*
+ Now loop through the Json_2 keys.
+ Skip if there is same key in Json_1
+ */
+ while (json_scan_next(je2) == 0 &&
+ je2->state != JST_OBJ_END)
+ {
+ const uchar *key_start, *key_end;
+ DBUG_ASSERT(je2->state == JST_KEY);
+ key_start= je2->s.c_str;
+ do
+ {
+ key_end= je2->s.c_str;
+ } while (json_read_keyname_chr(je2) == 0);
+
+ if (je2->s.error)
+ return 1;
+
+ *je1= sav_je1;
+ while (json_scan_next(je1) == 0 &&
+ je1->state != JST_OBJ_END)
+ {
+ DBUG_ASSERT(je1->state == JST_KEY);
+ json_string_set_str(&key_name, key_start, key_end);
+ if (!json_key_matches(je1, &key_name))
+ {
+ if (je1->s.error || json_skip_key(je1))
+ return 2;
+ continue;
+ }
+ if (json_skip_key(je2) ||
+ json_skip_level(je1))
+ return 1;
+ goto continue_j2;
+ }
+
+ if (je1->s.error)
+ return 2;
+
+ if (first_key)
+ first_key= 0;
+ else if (str->append(", ", 2))
+ return 3;
+
+ if (json_skip_key(je2))
+ return 1;
+
+ if (str->append("\"", 1) ||
+ append_simple(str, key_start, je2->s.c_str - key_start))
+ return 3;
+
+continue_j2:
+ continue;
+ }
+
+ if (str->append("}", 1))
+ return 3;
+ }
+ else
+ {
+ const uchar *end1, *beg1, *end2, *beg2;
+ int n_items1=1, n_items2= 1;
+
+ beg1= je1->value_begin;
+
+ /* Merge as a single array. */
+ if (je1->value_type == JSON_VALUE_ARRAY)
+ {
+ if (json_skip_level_and_count(je1, &n_items1))
+ return 1;
+
+ end1= je1->s.c_str - je1->sav_c_len;
+ }
+ else
+ {
+ if (str->append("[", 1))
+ return 3;
+ if (je1->value_type == JSON_VALUE_OBJECT)
+ {
+ if (json_skip_level(je1))
+ return 1;
+ end1= je1->s.c_str;
+ }
+ else
+ end1= je1->value_end;
+ }
+
+ if (str->append((const char*) beg1, end1 - beg1))
+ return 3;
+
+ if (json_value_scalar(je2))
+ {
+ beg2= je2->value_begin;
+ end2= je2->value_end;
+ }
+ else
+ {
+ if (je2->value_type == JSON_VALUE_OBJECT)
+ {
+ beg2= je2->value_begin;
+ if (json_skip_level(je2))
+ return 2;
+ }
+ else
+ {
+ beg2= je2->s.c_str;
+ if (json_skip_level_and_count(je2, &n_items2))
+ return 2;
+ }
+ end2= je2->s.c_str;
+ }
+
+ if ((n_items1 && n_items2 && str->append(", ", 2)) ||
+ str->append((const char*) beg2, end2 - beg2))
+ return 3;
+
+ if (je2->value_type != JSON_VALUE_ARRAY &&
+ str->append("]", 1))
+ return 3;
+ }
+
+ return 0;
+}
+
+
+String *Item_func_json_merge::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ json_engine_t je1, je2;
+ String *js1= args[0]->val_json(&tmp_js1), *js2=NULL;
+ uint n_arg;
+
+ if (args[0]->null_value)
+ goto null_return;
+
+ for (n_arg=1; n_arg < arg_count; n_arg++)
+ {
+ str->set_charset(js1->charset());
+ str->length(0);
+
+ js2= args[n_arg]->val_json(&tmp_js2);
+ if (args[n_arg]->null_value)
+ goto null_return;
+
+ json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
+ (const uchar *) js1->ptr() + js1->length());
+
+ json_scan_start(&je2, js2->charset(),(const uchar *) js2->ptr(),
+ (const uchar *) js2->ptr() + js2->length());
+
+ if (do_merge(str, &je1, &je2))
+ goto error_return;
+
+ {
+ /* Swap str and js1. */
+ if (str == &tmp_js1)
+ {
+ str= js1;
+ js1= &tmp_js1;
+ }
+ else
+ {
+ js1= str;
+ str= &tmp_js1;
+ }
+ }
+ }
+
+ json_scan_start(&je1, js1->charset(),(const uchar *) js1->ptr(),
+ (const uchar *) js1->ptr() + js1->length());
+ str->length(0);
+ str->set_charset(js1->charset());
+ if (json_nice(&je1, str, Item_func_json_format::LOOSE))
+ goto error_return;
+
+ null_value= 0;
+ return str;
+
+error_return:
+ if (je1.s.error)
+ report_json_error(js1, &je1, 0);
+ if (je2.s.error)
+ report_json_error(js2, &je2, n_arg);
+null_return:
+ null_value= 1;
+ return NULL;
+}
+
+
+bool Item_func_json_length::fix_length_and_dec()
+{
+ if (arg_count > 1)
+ path.set_constant_flag(args[1]->const_item());
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+longlong Item_func_json_length::val_int()
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ uint length= 0;
+ uint array_counters[JSON_DEPTH_LIMIT];
+ int err;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ if (arg_count > 1)
+ {
+ /* Path specified - let's apply it. */
+ if (!path.parsed)
+ {
+ String *s_p= args[1]->val_str(&tmp_path);
+ if (s_p &&
+ path_setup_nwc(&path.p, s_p->charset(), (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &path.p, 1);
+ goto null_return;
+ }
+ path.parsed= path.constant;
+ }
+ if (args[1]->null_value)
+ goto null_return;
+
+ path.cur_step= path.p.steps;
+ if (json_find_path(&je, &path.p, &path.cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto err_return;
+ goto null_return;
+ }
+ }
+
+
+ if (json_read_value(&je))
+ goto err_return;
+
+ if (json_value_scalar(&je))
+ return 1;
+
+ while (!(err= json_scan_next(&je)) &&
+ je.state != JST_OBJ_END && je.state != JST_ARRAY_END)
+ {
+ switch (je.state)
+ {
+ case JST_VALUE:
+ case JST_KEY:
+ length++;
+ break;
+ case JST_OBJ_START:
+ case JST_ARRAY_START:
+ if (json_skip_level(&je))
+ goto err_return;
+ break;
+ default:
+ break;
+ };
+ }
+
+ if (!err)
+ {
+ /* Parse to the end of the JSON just to check it's valid. */
+ while (json_scan_next(&je) == 0) {}
+ }
+
+ if (!je.s.error)
+ return length;
+
+err_return:
+ report_json_error(js, &je, 0);
+null_return:
+ null_value= 1;
+ return 0;
+}
+
+
+longlong Item_func_json_depth::val_int()
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ uint depth= 0, c_depth= 0;
+ bool inc_depth= TRUE;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ do
+ {
+ switch (je.state)
+ {
+ case JST_VALUE:
+ case JST_KEY:
+ if (inc_depth)
+ {
+ c_depth++;
+ inc_depth= FALSE;
+ if (c_depth > depth)
+ depth= c_depth;
+ }
+ break;
+ case JST_OBJ_START:
+ case JST_ARRAY_START:
+ inc_depth= TRUE;
+ break;
+ case JST_OBJ_END:
+ case JST_ARRAY_END:
+ if (!inc_depth)
+ c_depth--;
+ inc_depth= FALSE;
+ break;
+ default:
+ break;
+ }
+ } while (json_scan_next(&je) == 0);
+
+ if (!je.s.error)
+ return depth;
+
+ report_json_error(js, &je, 0);
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_type::fix_length_and_dec()
+{
+ collation.set(&my_charset_utf8_general_ci);
+ max_length= 12;
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_type::val_str(String *str)
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ const char *type;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ if (json_read_value(&je))
+ goto error;
+
+ switch (je.value_type)
+ {
+ case JSON_VALUE_OBJECT:
+ type= "OBJECT";
+ break;
+ case JSON_VALUE_ARRAY:
+ type= "ARRAY";
+ break;
+ case JSON_VALUE_STRING:
+ type= "STRING";
+ break;
+ case JSON_VALUE_NUMBER:
+ type= (je.num_flags & JSON_NUM_FRAC_PART) ? "DOUBLE" : "INTEGER";
+ break;
+ case JSON_VALUE_TRUE:
+ case JSON_VALUE_FALSE:
+ type= "BOOLEAN";
+ break;
+ default:
+ type= "NULL";
+ break;
+ }
+
+ str->set(type, strlen(type), &my_charset_utf8_general_ci);
+ return str;
+
+error:
+ report_json_error(js, &je, 0);
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_insert::fix_length_and_dec()
+{
+ uint n_arg;
+ ulonglong char_length;
+
+ collation.set(args[0]->collation);
+ char_length= args[0]->max_char_length();
+
+ for (n_arg= 1; n_arg < arg_count; n_arg+= 2)
+ {
+ paths[n_arg/2].set_constant_flag(args[n_arg]->const_item());
+ char_length+= args[n_arg/2+1]->max_char_length() + 4;
+ }
+
+ fix_char_length_ulonglong(char_length);
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_insert::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ uint n_arg, n_path;
+ json_string_t key_name;
+
+ DBUG_ASSERT(fixed == 1);
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ str->set_charset(collation.collation);
+ tmp_js.set_charset(collation.collation);
+ json_string_set_cs(&key_name, collation.collation);
+
+ for (n_arg=1, n_path=0; n_arg < arg_count; n_arg+=2, n_path++)
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ json_path_with_flags *c_path= paths + n_path;
+ const char *v_to;
+ const json_path_step_t *lp;
+
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths+n_path);
+ if (s_p)
+ {
+ if (path_setup_nwc(&c_path->p,s_p->charset(),
+ (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto return_null;
+ }
+
+ /* We search to the last step. */
+ c_path->p.last_step--;
+ }
+ c_path->parsed= c_path->constant;
+ }
+ if (args[n_arg]->null_value)
+ goto return_null;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ if (c_path->p.last_step < c_path->p.steps)
+ goto v_found;
+
+ c_path->cur_step= c_path->p.steps;
+
+ if (c_path->p.last_step >= c_path->p.steps &&
+ json_find_path(&je, &c_path->p, &c_path->cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto js_error;
+ continue;
+ }
+
+ if (json_read_value(&je))
+ goto js_error;
+
+ lp= c_path->p.last_step+1;
+ if (lp->type & JSON_PATH_ARRAY)
+ {
+ uint n_item= 0;
+
+ if (je.value_type != JSON_VALUE_ARRAY)
+ {
+ const uchar *v_from= je.value_begin;
+ int do_array_autowrap;
+
+ if (mode_insert)
+ {
+ if (mode_replace)
+ do_array_autowrap= lp->n_item > 0;
+ else
+ {
+ if (lp->n_item == 0)
+ continue;
+ do_array_autowrap= 1;
+ }
+ }
+ else
+ {
+ if (lp->n_item)
+ continue;
+ do_array_autowrap= 0;
+ }
+
+
+ str->length(0);
+ /* Wrap the value as an array. */
+ if (append_simple(str, js->ptr(), (const char *) v_from - js->ptr()) ||
+ (do_array_autowrap && str->append("[", 1)))
+ goto js_error; /* Out of memory. */
+
+ if (je.value_type == JSON_VALUE_OBJECT)
+ {
+ if (json_skip_level(&je))
+ goto js_error;
+ }
+
+ if ((do_array_autowrap &&
+ (append_simple(str, v_from, je.s.c_str - v_from) ||
+ str->append(", ", 2))) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ (do_array_autowrap && str->append("]", 1)) ||
+ append_simple(str, je.s.c_str, js->end()-(const char *) je.s.c_str))
+ goto js_error; /* Out of memory. */
+
+ goto continue_point;
+ }
+
+ while (json_scan_next(&je) == 0 && je.state != JST_ARRAY_END)
+ {
+ switch (je.state)
+ {
+ case JST_VALUE:
+ if (n_item == lp->n_item)
+ goto v_found;
+ n_item++;
+ if (json_skip_array_item(&je))
+ goto js_error;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+ if (!mode_insert)
+ continue;
+
+ v_to= (const char *) (je.s.c_str - je.sav_c_len);
+ str->length(0);
+ if (append_simple(str, js->ptr(), v_to - js->ptr()) ||
+ (n_item > 0 && str->append(", ", 2)) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ append_simple(str, v_to, js->end() - v_to))
+ goto js_error; /* Out of memory. */
+ }
+ else /*JSON_PATH_KEY*/
+ {
+ uint n_key= 0;
+
+ if (je.value_type != JSON_VALUE_OBJECT)
+ continue;
+
+ while (json_scan_next(&je) == 0 && je.state != JST_OBJ_END)
+ {
+ switch (je.state)
+ {
+ case JST_KEY:
+ json_string_set_str(&key_name, lp->key, lp->key_end);
+ if (json_key_matches(&je, &key_name))
+ goto v_found;
+ n_key++;
+ if (json_skip_key(&je))
+ goto js_error;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+ if (!mode_insert)
+ continue;
+
+ v_to= (const char *) (je.s.c_str - je.sav_c_len);
+ str->length(0);
+ if (append_simple(str, js->ptr(), v_to - js->ptr()) ||
+ (n_key > 0 && str->append(", ", 2)) ||
+ str->append("\"", 1) ||
+ append_simple(str, lp->key, lp->key_end - lp->key) ||
+ str->append("\":", 2) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ append_simple(str, v_to, js->end() - v_to))
+ goto js_error; /* Out of memory. */
+ }
+
+ goto continue_point;
+
+v_found:
+
+ if (!mode_replace)
+ continue;
+
+ if (json_read_value(&je))
+ goto js_error;
+
+ v_to= (const char *) je.value_begin;
+ str->length(0);
+ if (!json_value_scalar(&je))
+ {
+ if (json_skip_level(&je))
+ goto js_error;
+ }
+
+ if (append_simple(str, js->ptr(), v_to - js->ptr()) ||
+ append_json_value(str, args[n_arg+1], &tmp_val) ||
+ append_simple(str, je.s.c_str, js->end()-(const char *) je.s.c_str))
+ goto js_error; /* Out of memory. */
+continue_point:
+ {
+ /* Swap str and js. */
+ if (str == &tmp_js)
+ {
+ str= js;
+ js= &tmp_js;
+ }
+ else
+ {
+ js= str;
+ str= &tmp_js;
+ }
+ }
+ }
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+ str->length(0);
+ if (json_nice(&je, str, Item_func_json_format::LOOSE))
+ goto js_error;
+
+ return str;
+
+js_error:
+ report_json_error(js, &je, 0);
+return_null:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_remove::fix_length_and_dec()
+{
+ collation.set(args[0]->collation);
+ max_length= args[0]->max_length;
+
+ mark_constant_paths(paths, args+1, arg_count-1);
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_remove::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ uint n_arg, n_path;
+ json_string_t key_name;
+
+ DBUG_ASSERT(fixed == 1);
+
+ if (args[0]->null_value)
+ goto null_return;
+
+ str->set_charset(js->charset());
+ json_string_set_cs(&key_name, js->charset());
+
+ for (n_arg=1, n_path=0; n_arg < arg_count; n_arg++, n_path++)
+ {
+ uint array_counters[JSON_DEPTH_LIMIT];
+ json_path_with_flags *c_path= paths + n_path;
+ const char *rem_start, *rem_end;
+ const json_path_step_t *lp;
+ uint n_item= 0;
+
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths+n_path);
+ if (s_p)
+ {
+ if (path_setup_nwc(&c_path->p,s_p->charset(),
+ (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto null_return;
+ }
+
+ /* We search to the last step. */
+ c_path->p.last_step--;
+ if (c_path->p.last_step < c_path->p.steps)
+ {
+ c_path->p.s.error= TRIVIAL_PATH_NOT_ALLOWED;
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto null_return;
+ }
+ }
+ c_path->parsed= c_path->constant;
+ }
+ if (args[n_arg]->null_value)
+ goto null_return;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ c_path->cur_step= c_path->p.steps;
+
+ if (json_find_path(&je, &c_path->p, &c_path->cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto js_error;
+ }
+
+ if (json_read_value(&je))
+ goto js_error;
+
+ lp= c_path->p.last_step+1;
+ if (lp->type & JSON_PATH_ARRAY)
+ {
+ if (je.value_type != JSON_VALUE_ARRAY)
+ continue;
+
+ while (json_scan_next(&je) == 0 && je.state != JST_ARRAY_END)
+ {
+ switch (je.state)
+ {
+ case JST_VALUE:
+ if (n_item == lp->n_item)
+ {
+ rem_start= (const char *) (je.s.c_str -
+ (n_item ? je.sav_c_len : 0));
+ goto v_found;
+ }
+ n_item++;
+ if (json_skip_array_item(&je))
+ goto js_error;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+ continue;
+ }
+ else /*JSON_PATH_KEY*/
+ {
+ if (je.value_type != JSON_VALUE_OBJECT)
+ continue;
+
+ while (json_scan_next(&je) == 0 && je.state != JST_OBJ_END)
+ {
+ switch (je.state)
+ {
+ case JST_KEY:
+ if (n_item == 0)
+ rem_start= (const char *) (je.s.c_str - je.sav_c_len);
+ json_string_set_str(&key_name, lp->key, lp->key_end);
+ if (json_key_matches(&je, &key_name))
+ goto v_found;
+
+ if (json_skip_key(&je))
+ goto js_error;
+
+ rem_start= (const char *) je.s.c_str;
+ n_item++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+ continue;
+ }
+
+v_found:
+
+ if (json_skip_key(&je) || json_scan_next(&je))
+ goto js_error;
+
+ rem_end= (je.state == JST_VALUE && n_item == 0) ?
+ (const char *) je.s.c_str : (const char *) (je.s.c_str - je.sav_c_len);
+
+ str->length(0);
+
+ if (append_simple(str, js->ptr(), rem_start - js->ptr()) ||
+ (je.state == JST_KEY && n_item > 0 && str->append(",", 1)) ||
+ append_simple(str, rem_end, js->end() - rem_end))
+ goto js_error; /* Out of memory. */
+
+ {
+ /* Swap str and js. */
+ if (str == &tmp_js)
+ {
+ str= js;
+ js= &tmp_js;
+ }
+ else
+ {
+ js= str;
+ str= &tmp_js;
+ }
+ }
+ }
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+ str->length(0);
+ str->set_charset(js->charset());
+ if (json_nice(&je, str, Item_func_json_format::LOOSE))
+ goto js_error;
+
+ null_value= 0;
+ return str;
+
+js_error:
+ report_json_error(js, &je, 0);
+null_return:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_keys::fix_length_and_dec()
+{
+ collation.set(args[0]->collation);
+ max_length= args[0]->max_length;
+ maybe_null= 1;
+ if (arg_count > 1)
+ path.set_constant_flag(args[1]->const_item());
+ return FALSE;
+}
+
+
+/*
+ That function is for Item_func_json_keys::val_str exclusively.
+ It utilizes the fact the resulting string is in specific format:
+ ["key1", "key2"...]
+*/
+static int check_key_in_list(String *res,
+ const uchar *key, int key_len)
+{
+ const uchar *c= (const uchar *) res->ptr() + 2; /* beginning '["' */
+ const uchar *end= (const uchar *) res->end() - 1; /* ending '"' */
+
+ while (c < end)
+ {
+ int n_char;
+ for (n_char=0; c[n_char] != '"' && n_char < key_len; n_char++)
+ {
+ if (c[n_char] != key[n_char])
+ break;
+ }
+ if (c[n_char] == '"')
+ {
+ if (n_char == key_len)
+ return 1;
+ }
+ else
+ {
+ while (c[n_char] != '"')
+ n_char++;
+ }
+ c+= n_char + 4; /* skip ', "' */
+ }
+ return 0;
+}
+
+
+String *Item_func_json_keys::val_str(String *str)
+{
+ json_engine_t je;
+ String *js= args[0]->val_json(&tmp_js);
+ uint n_keys= 0;
+ uint array_counters[JSON_DEPTH_LIMIT];
+
+ if ((args[0]->null_value))
+ goto null_return;
+
+ json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length());
+
+ if (arg_count < 2)
+ goto skip_search;
+
+ if (!path.parsed)
+ {
+ String *s_p= args[1]->val_str(&tmp_path);
+ if (s_p &&
+ path_setup_nwc(&path.p, s_p->charset(), (const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &path.p, 1);
+ goto null_return;
+ }
+ path.parsed= path.constant;
+ }
+
+ if (args[1]->null_value)
+ goto null_return;
+
+ path.cur_step= path.p.steps;
+
+ if (json_find_path(&je, &path.p, &path.cur_step, array_counters))
+ {
+ if (je.s.error)
+ goto err_return;
+
+ goto null_return;
+ }
+
+skip_search:
+ if (json_read_value(&je))
+ goto err_return;
+
+ if (je.value_type != JSON_VALUE_OBJECT)
+ goto null_return;
+
+ str->length(0);
+ if (str->append("[", 1))
+ goto err_return; /* Out of memory. */
+ /* Parse the OBJECT collecting the keys. */
+ while (json_scan_next(&je) == 0 && je.state != JST_OBJ_END)
+ {
+ const uchar *key_start, *key_end;
+ int key_len;
+
+ switch (je.state)
+ {
+ case JST_KEY:
+ key_start= je.s.c_str;
+ do
+ {
+ key_end= je.s.c_str;
+ } while (json_read_keyname_chr(&je) == 0);
+ if (je.s.error)
+ goto err_return;
+ key_len= (int)(key_end - key_start);
+
+ if (!check_key_in_list(str, key_start, key_len))
+ {
+ if ((n_keys > 0 && str->append(", ", 2)) ||
+ str->append("\"", 1) ||
+ append_simple(str, key_start, key_len) ||
+ str->append("\"", 1))
+ goto err_return;
+ n_keys++;
+ }
+ break;
+ case JST_OBJ_START:
+ case JST_ARRAY_START:
+ if (json_skip_level(&je))
+ break;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (je.s.error || str->append("]", 1))
+ goto err_return;
+
+ null_value= 0;
+ return str;
+
+err_return:
+ report_json_error(js, &je, 0);
+null_return:
+ null_value= 1;
+ return 0;
+}
+
+
+bool Item_func_json_search::fix_fields(THD *thd, Item **ref)
+{
+ if (Item_json_str_multipath::fix_fields(thd, ref))
+ return TRUE;
+
+ if (arg_count < 4)
+ {
+ escape= '\\';
+ return FALSE;
+ }
+
+ return fix_escape_item(thd, args[3], &tmp_js, true,
+ args[0]->collation.collation, &escape);
+}
+
+
+static const uint SQR_MAX_BLOB_WIDTH= (uint) sqrt(MAX_BLOB_WIDTH);
+
+bool Item_func_json_search::fix_length_and_dec()
+{
+ collation.set(args[0]->collation);
+
+ /*
+ It's rather difficult to estimate the length of the result.
+ I belive arglen^2 is the reasonable upper limit.
+ */
+ if (args[0]->max_length > SQR_MAX_BLOB_WIDTH)
+ max_length= MAX_BLOB_WIDTH;
+ else
+ {
+ max_length= args[0]->max_length;
+ max_length*= max_length;
+ }
+
+ ooa_constant= args[1]->const_item();
+ ooa_parsed= FALSE;
+
+ if (arg_count > 4)
+ mark_constant_paths(paths, args+4, arg_count-4);
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+int Item_func_json_search::compare_json_value_wild(json_engine_t *je,
+ const String *cmp_str)
+{
+ if (je->value_type != JSON_VALUE_STRING || !je->value_escaped)
+ return my_wildcmp(collation.collation,
+ (const char *) je->value, (const char *) (je->value + je->value_len),
+ cmp_str->ptr(), cmp_str->end(), escape, wild_one, wild_many) ? 0 : 1;
+
+ {
+ int esc_len;
+ if (esc_value.alloced_length() < (uint) je->value_len &&
+ esc_value.alloc((je->value_len / 1024 + 1) * 1024))
+ return 0;
+
+ esc_len= json_unescape(je->s.cs, je->value, je->value + je->value_len,
+ je->s.cs, (uchar *) esc_value.ptr(),
+ (uchar *) (esc_value.ptr() +
+ esc_value.alloced_length()));
+ if (esc_len <= 0)
+ return 0;
+
+ return my_wildcmp(collation.collation,
+ esc_value.ptr(), esc_value.ptr() + esc_len,
+ cmp_str->ptr(), cmp_str->end(), escape, wild_one, wild_many) ? 0 : 1;
+ }
+}
+
+
+static int append_json_path(String *str, const json_path_t *p)
+{
+ const json_path_step_t *c;
+
+ if (str->append("\"$", 2))
+ return TRUE;
+
+ for (c= p->steps+1; c <= p->last_step; c++)
+ {
+ if (c->type & JSON_PATH_KEY)
+ {
+ if (str->append(".", 1) ||
+ append_simple(str, c->key, c->key_end-c->key))
+ return TRUE;
+ }
+ else /*JSON_PATH_ARRAY*/
+ {
+
+ if (str->append("[", 1) ||
+ str->append_ulonglong(c->n_item) ||
+ str->append("]", 1))
+ return TRUE;
+ }
+ }
+
+ return str->append("\"", 1);
+}
+
+
+String *Item_func_json_search::val_str(String *str)
+{
+ String *js= args[0]->val_json(&tmp_js);
+ String *s_str= args[2]->val_str(&tmp_path);
+ json_engine_t je;
+ json_path_t p, sav_path;
+ uint n_arg;
+
+ if (args[0]->null_value || args[2]->null_value)
+ goto null_return;
+
+ if (parse_one_or_all(this, args[1], &ooa_parsed, ooa_constant, &mode_one))
+ goto null_return;
+
+ n_path_found= 0;
+ str->set_charset(js->charset());
+ str->length(0);
+
+ for (n_arg=4; n_arg < arg_count; n_arg++)
+ {
+ json_path_with_flags *c_path= paths + n_arg - 4;
+ if (!c_path->parsed)
+ {
+ String *s_p= args[n_arg]->val_str(tmp_paths + (n_arg-4));
+ if (s_p &&
+ json_path_setup(&c_path->p,s_p->charset(),(const uchar *) s_p->ptr(),
+ (const uchar *) s_p->ptr() + s_p->length()))
+ {
+ report_path_error(s_p, &c_path->p, n_arg);
+ goto null_return;
+ }
+ c_path->parsed= c_path->constant;
+ }
+ if (args[n_arg]->null_value)
+ goto null_return;
+ }
+
+ json_get_path_start(&je, js->charset(),(const uchar *) js->ptr(),
+ (const uchar *) js->ptr() + js->length(), &p);
+
+ while (json_get_path_next(&je, &p) == 0)
+ {
+ if (json_value_scalar(&je))
+ {
+ if ((arg_count < 5 || path_ok(paths, arg_count - 4, &p, je.value_type)) &&
+ compare_json_value_wild(&je, s_str) != 0)
+ {
+ ++n_path_found;
+ if (n_path_found == 1)
+ {
+ sav_path= p;
+ sav_path.last_step= sav_path.steps + (p.last_step - p.steps);
+ }
+ else
+ {
+ if (n_path_found == 2)
+ {
+ if (str->append("[", 1) ||
+ append_json_path(str, &sav_path))
+ goto js_error;
+ }
+ if (str->append(", ", 2) || append_json_path(str, &p))
+ goto js_error;
+ }
+ if (mode_one)
+ goto end;
+ }
+ }
+ }
+
+ if (je.s.error)
+ goto js_error;
+
+end:
+ if (n_path_found == 0)
+ goto null_return;
+ if (n_path_found == 1)
+ {
+ if (append_json_path(str, &sav_path))
+ goto js_error;
+ }
+ else
+ {
+ if (str->append("]", 1))
+ goto js_error;
+ }
+
+ null_value= 0;
+ return str;
+
+
+js_error:
+ report_json_error(js, &je, 0);
+null_return:
+ null_value= 1;
+ return 0;
+}
+
+
+const char *Item_func_json_format::func_name() const
+{
+ switch (fmt)
+ {
+ case COMPACT:
+ return "json_compact";
+ case LOOSE:
+ return "json_loose";
+ case DETAILED:
+ return "json_detailed";
+ default:
+ DBUG_ASSERT(0);
+ };
+
+ return "";
+}
+
+
+bool Item_func_json_format::fix_length_and_dec()
+{
+ decimals= 0;
+ max_length= args[0]->max_length;
+ maybe_null= 1;
+ return FALSE;
+}
+
+
+String *Item_func_json_format::val_str(String *str)
+{
+ String *js= args[0]->val_json(&tmp_js);
+ json_engine_t je;
+ int tab_size= 4;
+
+ if ((null_value= args[0]->null_value))
+ return 0;
+
+ if (fmt == DETAILED)
+ {
+ if (arg_count > 1)
+ {
+ tab_size= (int)args[1]->val_int();
+ if (args[1]->null_value)
+ {
+ null_value= 1;
+ return 0;
+ }
+ }
+ if (tab_size < 0)
+ tab_size= 0;
+ else if (tab_size > TAB_SIZE_LIMIT)
+ tab_size= TAB_SIZE_LIMIT;
+ }
+
+ json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
+ (const uchar *) js->ptr()+js->length());
+
+ str->length(0);
+ str->set_charset(js->charset());
+ if (json_nice(&je, str, fmt, tab_size))
+ {
+ null_value= 1;
+ report_json_error(js, &je, 0);
+ return 0;
+ }
+
+ return str;
+}
+
+
+String *Item_func_json_format::val_json(String *str)
+{
+ String *js= args[0]->val_json(&tmp_js);
+ if ((null_value= args[0]->null_value))
+ return 0;
+ return js;
+}
+
+int Arg_comparator::compare_json_str_basic(Item *j, Item *s)
+{
+ String *js,*str;
+ int c_len;
+ json_engine_t je;
+
+ if ((js= j->val_str(&value1)))
+ {
+ json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
+ (const uchar *) js->ptr()+js->length());
+ if (json_read_value(&je))
+ goto error;
+ if (je.value_type == JSON_VALUE_STRING)
+ {
+ if (value2.realloc_with_extra_if_needed(je.value_len) ||
+ (c_len= json_unescape(js->charset(), je.value,
+ je.value + je.value_len,
+ &my_charset_utf8_general_ci,
+ (uchar *) value2.ptr(),
+ (uchar *) (value2.ptr() + je.value_len))) < 0)
+ goto error;
+
+ value2.length(c_len);
+ js= &value2;
+ str= &value1;
+ }
+ else
+ {
+ str= &value2;
+ }
+
+
+ if ((str= s->val_str(str)))
+ {
+ if (set_null)
+ owner->null_value= 0;
+ return sortcmp(js, str, compare_collation());
+ }
+ }
+
+error:
+ if (set_null)
+ owner->null_value= 1;
+ return -1;
+}
+
+
+int Arg_comparator::compare_e_json_str_basic(Item *j, Item *s)
+{
+ String *res1,*res2;
+ json_value_types type;
+ char *value;
+ int value_len, c_len;
+ Item_func_json_extract *e= (Item_func_json_extract *) j;
+
+ res1= e->read_json(&value1, &type, &value, &value_len);
+ res2= s->val_str(&value2);
+
+ if (!res1 || !res2)
+ return MY_TEST(res1 == res2);
+
+ if (type == JSON_VALUE_STRING)
+ {
+ if (value1.realloc_with_extra_if_needed(value_len) ||
+ (c_len= json_unescape(value1.charset(), (uchar *) value,
+ (uchar *) value+value_len,
+ &my_charset_utf8_general_ci,
+ (uchar *) value1.ptr(),
+ (uchar *) (value1.ptr() + value_len))) < 0)
+ return 1;
+ value1.length(c_len);
+ res1= &value1;
+ }
+
+ return MY_TEST(sortcmp(res1, res2, compare_collation()) == 0);
+}
+
+
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
new file mode 100644
index 00000000000..f2358168080
--- /dev/null
+++ b/sql/item_jsonfunc.h
@@ -0,0 +1,454 @@
+#ifndef ITEM_JSONFUNC_INCLUDED
+#define ITEM_JSONFUNC_INCLUDED
+
+/* Copyright (c) 2016, MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+/* This file defines all JSON functions */
+
+
+#include <json_lib.h>
+#include "item_cmpfunc.h" // Item_bool_func
+#include "item_strfunc.h" // Item_str_func
+
+
+class json_path_with_flags
+{
+public:
+ json_path_t p;
+ bool constant;
+ bool parsed;
+ json_path_step_t *cur_step;
+ void set_constant_flag(bool s_constant)
+ {
+ constant= s_constant;
+ parsed= FALSE;
+ }
+};
+
+
+class Item_func_json_valid: public Item_int_func
+{
+protected:
+ String tmp_value;
+
+public:
+ Item_func_json_valid(THD *thd, Item *json) : Item_int_func(thd, json) {}
+ longlong val_int();
+ const char *func_name() const { return "json_valid"; }
+ bool fix_length_and_dec()
+ {
+ if (Item_int_func::fix_length_and_dec())
+ return TRUE;
+ maybe_null= 1;
+ return FALSE;
+ }
+ bool is_bool_type() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_valid>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_exists: public Item_int_func
+{
+protected:
+ json_path_with_flags path;
+ String tmp_js, tmp_path;
+
+public:
+ Item_func_json_exists(THD *thd, Item *js, Item *i_path):
+ Item_int_func(thd, js, i_path) {}
+ const char *func_name() const { return "json_exists"; }
+ bool is_bool_type() { return true; }
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_exists>(thd, mem_root, this); }
+ longlong val_int();
+};
+
+
+class Item_func_json_value: public Item_str_func
+{
+protected:
+ json_path_with_flags path;
+ String tmp_js, tmp_path;
+
+public:
+ Item_func_json_value(THD *thd, Item *js, Item *i_path):
+ Item_str_func(thd, js, i_path) {}
+ const char *func_name() const { return "json_value"; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ virtual bool check_and_get_value(json_engine_t *je, String *res, int *error);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_value>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_query: public Item_func_json_value
+{
+public:
+ Item_func_json_query(THD *thd, Item *js, Item *i_path):
+ Item_func_json_value(thd, js, i_path) {}
+ bool is_json_type() { return true; }
+ const char *func_name() const { return "json_query"; }
+ bool check_and_get_value(json_engine_t *je, String *res, int *error);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_query>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_quote: public Item_str_func
+{
+protected:
+ String tmp_s;
+
+public:
+ Item_func_json_quote(THD *thd, Item *s): Item_str_func(thd, s) {}
+ const char *func_name() const { return "json_quote"; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_quote>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_unquote: public Item_str_func
+{
+protected:
+ String tmp_s;
+ String *read_json(json_engine_t *je);
+public:
+ Item_func_json_unquote(THD *thd, Item *s): Item_str_func(thd, s) {}
+ const char *func_name() const { return "json_unquote"; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_unquote>(thd, mem_root, this); }
+};
+
+
+class Item_json_str_multipath: public Item_str_func
+{
+protected:
+ json_path_with_flags *paths;
+ String *tmp_paths;
+public:
+ Item_json_str_multipath(THD *thd, List<Item> &list):
+ Item_str_func(thd, list), tmp_paths(0) {}
+ bool fix_fields(THD *thd, Item **ref);
+ void cleanup();
+ virtual uint get_n_paths() const = 0;
+ bool is_json_type() { return true; }
+};
+
+
+class Item_func_json_extract: public Item_json_str_multipath
+{
+protected:
+ String tmp_js;
+public:
+ String *read_json(String *str, json_value_types *type,
+ char **out_val, int *value_len);
+ Item_func_json_extract(THD *thd, List<Item> &list):
+ Item_json_str_multipath(thd, list) {}
+ const char *func_name() const { return "json_extract"; }
+ enum Functype functype() const { return JSON_EXTRACT_FUNC; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ longlong val_int();
+ double val_real();
+ uint get_n_paths() const { return arg_count - 1; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_extract>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_contains: public Item_int_func
+{
+protected:
+ String tmp_js;
+ json_path_with_flags path;
+ String tmp_path;
+ bool a2_constant, a2_parsed;
+ String tmp_val, *val;
+public:
+ Item_func_json_contains(THD *thd, List<Item> &list):
+ Item_int_func(thd, list) {}
+ const char *func_name() const { return "json_contains"; }
+ bool fix_length_and_dec();
+ longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_contains>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_contains_path: public Item_int_func
+{
+protected:
+ String tmp_js;
+ json_path_with_flags *paths;
+ String *tmp_paths;
+ bool mode_one;
+ bool ooa_constant, ooa_parsed;
+ bool *p_found;
+
+public:
+ Item_func_json_contains_path(THD *thd, List<Item> &list):
+ Item_int_func(thd, list), tmp_paths(0) {}
+ const char *func_name() const { return "json_contains_path"; }
+ bool fix_fields(THD *thd, Item **ref);
+ bool fix_length_and_dec();
+ void cleanup();
+ longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_contains_path>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_array: public Item_str_func
+{
+protected:
+ String tmp_val;
+ ulong result_limit;
+public:
+ Item_func_json_array(THD *thd):
+ Item_str_func(thd) {}
+ Item_func_json_array(THD *thd, List<Item> &list):
+ Item_str_func(thd, list) {}
+ String *val_str(String *);
+ bool is_json_type() { return true; }
+ bool fix_length_and_dec();
+ const char *func_name() const { return "json_array"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_array>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_array_append: public Item_json_str_multipath
+{
+protected:
+ String tmp_js;
+ String tmp_val;
+public:
+ Item_func_json_array_append(THD *thd, List<Item> &list):
+ Item_json_str_multipath(thd, list) {}
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ uint get_n_paths() const { return arg_count/2; }
+ const char *func_name() const { return "json_array_append"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_array_append>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_array_insert: public Item_func_json_array_append
+{
+public:
+ Item_func_json_array_insert(THD *thd, List<Item> &list):
+ Item_func_json_array_append(thd, list) {}
+ String *val_str(String *);
+ const char *func_name() const { return "json_array_insert"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_array_insert>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_object: public Item_func_json_array
+{
+public:
+ Item_func_json_object(THD *thd):
+ Item_func_json_array(thd) {}
+ Item_func_json_object(THD *thd, List<Item> &list):
+ Item_func_json_array(thd, list) {}
+ String *val_str(String *);
+ bool is_json_type() { return true; }
+ const char *func_name() const { return "json_object"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_object>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_merge: public Item_func_json_array
+{
+protected:
+ String tmp_js1, tmp_js2;
+public:
+ Item_func_json_merge(THD *thd, List<Item> &list):
+ Item_func_json_array(thd, list) {}
+ String *val_str(String *);
+ bool is_json_type() { return true; }
+ const char *func_name() const { return "json_merge"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_merge>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_length: public Item_int_func
+{
+protected:
+ json_path_with_flags path;
+ String tmp_js;
+ String tmp_path;
+public:
+ Item_func_json_length(THD *thd, List<Item> &list):
+ Item_int_func(thd, list) {}
+ const char *func_name() const { return "json_length"; }
+ bool fix_length_and_dec();
+ longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_length>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_depth: public Item_int_func
+{
+protected:
+ String tmp_js;
+public:
+ Item_func_json_depth(THD *thd, Item *js): Item_int_func(thd, js) {}
+ const char *func_name() const { return "json_depth"; }
+ longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_depth>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_type: public Item_str_func
+{
+protected:
+ String tmp_js;
+public:
+ Item_func_json_type(THD *thd, Item *js): Item_str_func(thd, js) {}
+ const char *func_name() const { return "json_type"; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_type>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_insert: public Item_json_str_multipath
+{
+protected:
+ String tmp_js;
+ String tmp_val;
+ bool mode_insert, mode_replace;
+public:
+ Item_func_json_insert(bool i_mode, bool r_mode, THD *thd, List<Item> &list):
+ Item_json_str_multipath(thd, list),
+ mode_insert(i_mode), mode_replace(r_mode) {}
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ uint get_n_paths() const { return arg_count/2; }
+ const char *func_name() const
+ {
+ return mode_insert ?
+ (mode_replace ? "json_set" : "json_insert") : "json_update";
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_insert>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_remove: public Item_json_str_multipath
+{
+protected:
+ String tmp_js;
+public:
+ Item_func_json_remove(THD *thd, List<Item> &list):
+ Item_json_str_multipath(thd, list) {}
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ uint get_n_paths() const { return arg_count - 1; }
+ const char *func_name() const { return "json_remove"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_remove>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_keys: public Item_str_func
+{
+protected:
+ json_path_with_flags path;
+ String tmp_js, tmp_path;
+
+public:
+ Item_func_json_keys(THD *thd, List<Item> &list):
+ Item_str_func(thd, list) {}
+ const char *func_name() const { return "json_keys"; }
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_keys>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_search: public Item_json_str_multipath
+{
+protected:
+ String tmp_js, tmp_path, esc_value;
+ bool mode_one;
+ bool ooa_constant, ooa_parsed;
+ int escape;
+ int n_path_found;
+ json_path_t sav_path;
+
+ int compare_json_value_wild(json_engine_t *je, const String *cmp_str);
+
+public:
+ Item_func_json_search(THD *thd, List<Item> &list):
+ Item_json_str_multipath(thd, list) {}
+ const char *func_name() const { return "json_search"; }
+ bool fix_fields(THD *thd, Item **ref);
+ bool fix_length_and_dec();
+ String *val_str(String *);
+ uint get_n_paths() const { return arg_count > 4 ? arg_count - 4 : 0; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_search>(thd, mem_root, this); }
+};
+
+
+class Item_func_json_format: public Item_str_func
+{
+public:
+ enum formats
+ {
+ NONE,
+ COMPACT,
+ LOOSE,
+ DETAILED
+ };
+protected:
+ formats fmt;
+ String tmp_js;
+public:
+ Item_func_json_format(THD *thd, Item *js, formats format):
+ Item_str_func(thd, js), fmt(format) {}
+ Item_func_json_format(THD *thd, List<Item> &list):
+ Item_str_func(thd, list), fmt(DETAILED) {}
+
+ const char *func_name() const;
+ bool fix_length_and_dec();
+ String *val_str(String *str);
+ String *val_json(String *str);
+ bool is_json_type() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_json_format>(thd, mem_root, this); }
+};
+
+
+#endif /* ITEM_JSONFUNC_INCLUDED */
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 8c6edacad7f..24320d884dc 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -62,6 +62,7 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
}
maybe_null|= item->maybe_null;
with_sum_func= with_sum_func || item->with_sum_func;
+ with_window_func = with_window_func || item->with_window_func;
with_field= with_field || item->with_field;
with_subselect|= item->with_subselect;
with_param|= item->with_param;
@@ -72,7 +73,7 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
bool
-Item_row::eval_not_null_tables(uchar *opt_arg)
+Item_row::eval_not_null_tables(void *opt_arg)
{
Item **arg,**arg_end;
not_null_tables_cache= 0;
@@ -100,7 +101,7 @@ void Item_row::cleanup()
}
-void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array,
+void Item_row::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
Item **arg, **arg_end;
@@ -161,3 +162,21 @@ void Item_row::bring_value()
for (uint i= 0; i < arg_count; i++)
args[i]->bring_value();
}
+
+
+Item* Item_row::build_clone(THD *thd, MEM_ROOT *mem_root)
+{
+ Item_row *copy= (Item_row *) get_copy(thd, mem_root);
+ if (!copy)
+ return 0;
+ copy->args= (Item**) alloc_root(mem_root, sizeof(Item*) * arg_count);
+ for (uint i= 0; i < arg_count; i++)
+ {
+ Item *arg_clone= args[i]->build_clone(thd, mem_root);
+ if (!arg_clone)
+ return 0;
+ copy->args[i]= arg_clone;
+ }
+ return copy;
+}
+
diff --git a/sql/item_row.h b/sql/item_row.h
index 5e8071ec495..bc82b31f9e2 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -55,7 +55,7 @@ public:
enum Type type() const { return ROW_ITEM; };
void illegal_method_call(const char *);
bool is_null() { return null_value; }
- void make_field(Send_field *)
+ void make_field(THD *thd, Send_field *)
{
illegal_method_call((const char*)"make_field");
};
@@ -82,12 +82,16 @@ public:
bool fix_fields(THD *thd, Item **ref);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
void cleanup();
- void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields,
- uint flags);
+ void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags);
table_map used_tables() const { return used_tables_cache; };
bool const_item() const { return const_item_cache; };
enum Item_result result_type() const { return ROW_RESULT; }
Item_result cmp_type() const { return ROW_RESULT; }
+ enum_field_types field_type() const
+ {
+ return MYSQL_TYPE_NULL;
+ }
void update_used_tables()
{
used_tables_and_const_cache_init();
@@ -96,14 +100,14 @@ public:
table_map not_null_tables() const { return not_null_tables_cache; }
virtual void print(String *str, enum_query_type query_type);
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
if (walk_args(processor, walk_subquery, arg))
return true;
return (this->*processor)(arg);
}
Item *transform(THD *thd, Item_transformer transformer, uchar *arg);
- bool eval_not_null_tables(uchar *opt_arg);
+ bool eval_not_null_tables(void *opt_arg);
uint cols() { return arg_count; }
Item* element_index(uint i) { return args[i]; }
@@ -111,7 +115,27 @@ public:
bool check_cols(uint c);
bool null_inside() { return with_null; };
void bring_value();
- bool check_vcol_func_processor(uchar *int_arg) {return FALSE; }
+
+ Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
+ {
+ Item_args::propagate_equal_fields(thd, Context_identity(), cond);
+ return this;
+ }
+
+ bool excl_dep_on_table(table_map tab_map)
+ {
+ return Item_args::excl_dep_on_table(tab_map);
+ }
+
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ {
+ return Item_args::excl_dep_on_grouping_fields(sel);
+ }
+
+ bool check_vcol_func_processor(void *arg) {return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_row>(thd, mem_root, this); }
+ Item *build_clone(THD *thd, MEM_ROOT *mem_root);
};
#endif /* ITEM_ROW_INCLUDED */
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index de13999bab8..b88f376e8dd 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB Corporation
+ Copyright (c) 2009, 2019, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -49,7 +49,6 @@
// my_make_scrambled_password_323
#include <m_ctype.h>
#include <my_md5.h>
-#include <zlib.h>
C_MODE_START
#include "../mysys/my_static.h" // For soundex_map
C_MODE_END
@@ -189,10 +188,11 @@ String *Item_func_sha::val_str_ascii(String *str)
return 0;
}
-void Item_func_sha::fix_length_and_dec()
+bool Item_func_sha::fix_length_and_dec()
{
// size of hex representation of hash
fix_length_and_charset(MY_SHA1_HASH_SIZE * 2, default_charset());
+ return FALSE;
}
String *Item_func_sha2::val_str_ascii(String *str)
@@ -258,7 +258,7 @@ String *Item_func_sha2::val_str_ascii(String *str)
str->realloc((uint) digest_length*2 + 1); /* Each byte as two nybbles */
/* Convert the large number to a string-hex representation. */
- array_to_hex((char *) str->ptr(), digest_buf, digest_length);
+ array_to_hex((char *) str->ptr(), digest_buf, (uint)digest_length);
/* We poked raw bytes in. We must inform the the String of its length. */
str->length((uint) digest_length*2); /* Each byte as two nybbles */
@@ -268,12 +268,12 @@ String *Item_func_sha2::val_str_ascii(String *str)
}
-void Item_func_sha2::fix_length_and_dec()
+bool Item_func_sha2::fix_length_and_dec()
{
maybe_null= 1;
max_length = 0;
- int sha_variant= args[1]->const_item() ? args[1]->val_int() : 512;
+ int sha_variant= (int)(args[1]->const_item() ? args[1]->val_int() : 512);
switch (sha_variant) {
case 0: // SHA-256 is the default
@@ -293,6 +293,7 @@ void Item_func_sha2::fix_length_and_dec()
ER_THD(thd, ER_WRONG_PARAMETERS_TO_NATIVE_FCT),
"sha2");
}
+ return FALSE;
}
/* Implementation of AES encryption routines */
@@ -345,37 +346,40 @@ String *Item_aes_crypt::val_str(String *str2)
return 0;
}
-void Item_func_aes_encrypt::fix_length_and_dec()
+bool Item_func_aes_encrypt::fix_length_and_dec()
{
max_length=my_aes_get_size(MY_AES_ECB, args[0]->max_length);
what= ENCRYPTION_FLAG_ENCRYPT;
+ return FALSE;
}
-void Item_func_aes_decrypt::fix_length_and_dec()
+bool Item_func_aes_decrypt::fix_length_and_dec()
{
max_length=args[0]->max_length;
maybe_null= 1;
what= ENCRYPTION_FLAG_DECRYPT;
+ return FALSE;
}
-void Item_func_to_base64::fix_length_and_dec()
+bool Item_func_to_base64::fix_length_and_dec()
{
maybe_null= args[0]->maybe_null;
collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
- if (args[0]->max_length > (uint) base64_encode_max_arg_length())
+ if (args[0]->max_length > (uint) my_base64_encode_max_arg_length())
{
maybe_null= 1;
- fix_char_length_ulonglong((ulonglong) base64_encode_max_arg_length());
+ fix_char_length_ulonglong((ulonglong) my_base64_encode_max_arg_length());
}
else
{
- int length= base64_needed_encoded_length((int) args[0]->max_length);
+ int length= my_base64_needed_encoded_length((int) args[0]->max_length);
DBUG_ASSERT(length > 0);
fix_char_length_ulonglong((ulonglong) length - 1);
}
+ return FALSE;
}
@@ -385,9 +389,9 @@ String *Item_func_to_base64::val_str_ascii(String *str)
bool too_long= false;
int length;
if (!res ||
- res->length() > (uint) base64_encode_max_arg_length() ||
+ res->length() > (uint) my_base64_encode_max_arg_length() ||
(too_long=
- ((uint) (length= base64_needed_encoded_length((int) res->length())) >
+ ((uint) (length= my_base64_needed_encoded_length((int) res->length())) >
current_thd->variables.max_allowed_packet)) ||
str->alloc((uint) length))
{
@@ -403,7 +407,7 @@ String *Item_func_to_base64::val_str_ascii(String *str)
}
return 0;
}
- base64_encode(res->ptr(), (int) res->length(), (char*) str->ptr());
+ my_base64_encode(res->ptr(), (int) res->length(), (char*) str->ptr());
DBUG_ASSERT(length > 0);
str->length((uint) length - 1); // Without trailing '\0'
null_value= 0;
@@ -411,18 +415,19 @@ String *Item_func_to_base64::val_str_ascii(String *str)
}
-void Item_func_from_base64::fix_length_and_dec()
+bool Item_func_from_base64::fix_length_and_dec()
{
- if (args[0]->max_length > (uint) base64_decode_max_arg_length())
+ if (args[0]->max_length > (uint) my_base64_decode_max_arg_length())
{
- fix_char_length_ulonglong((ulonglong) base64_decode_max_arg_length());
+ fix_char_length_ulonglong((ulonglong) my_base64_decode_max_arg_length());
}
else
{
- int length= base64_needed_decoded_length((int) args[0]->max_length);
+ int length= my_base64_needed_decoded_length((int) args[0]->max_length);
fix_char_length_ulonglong((ulonglong) length);
}
maybe_null= 1; // Can be NULL, e.g. in case of badly formed input string
+ return FALSE;
}
@@ -435,8 +440,8 @@ String *Item_func_from_base64::val_str(String *str)
if (!res)
goto err;
- if (res->length() > (uint) base64_decode_max_arg_length() ||
- ((uint) (length= base64_needed_decoded_length((int) res->length())) >
+ if (res->length() > (uint) my_base64_decode_max_arg_length() ||
+ ((uint) (length= my_base64_needed_decoded_length((int) res->length())) >
current_thd->variables.max_allowed_packet))
{
THD *thd= current_thd;
@@ -451,8 +456,8 @@ String *Item_func_from_base64::val_str(String *str)
if (str->alloc((uint) length))
goto err;
- if ((length= base64_decode(res->ptr(), (int) res->length(),
- (char *) str->ptr(), &end_ptr, 0)) < 0 ||
+ if ((length= my_base64_decode(res->ptr(), (int) res->length(),
+ (char *) str->ptr(), &end_ptr, 0)) < 0 ||
end_ptr < res->ptr() + res->length())
{
THD *thd= current_thd;
@@ -510,7 +515,7 @@ String *Item_func_decode_histogram::val_str(String *str)
uint i;
str->length(0);
char numbuf[32];
- const uchar *p= (uchar*)res->c_ptr();
+ const uchar *p= (uchar*)res->c_ptr_safe();
for (i= 0; i < res->length(); i++)
{
double val;
@@ -598,7 +603,7 @@ String *Item_func_concat::val_str(String *str)
goto null;
if (res != str)
- str->copy(res->ptr(), res->length(), res->charset());
+ str->copy_or_move(res->ptr(), res->length(), res->charset());
for (uint i= 1 ; i < arg_count ; i++)
{
@@ -631,17 +636,18 @@ null:
}
-void Item_func_concat::fix_length_and_dec()
+bool Item_func_concat::fix_length_and_dec()
{
ulonglong char_length= 0;
if (agg_arg_charsets_for_string_result(collation, args, arg_count))
- return;
+ return TRUE;
for (uint i=0 ; i < arg_count ; i++)
char_length+= args[i]->max_char_length();
fix_char_length_ulonglong(char_length);
+ return FALSE;
}
/**
@@ -993,12 +999,12 @@ null:
}
-void Item_func_concat_ws::fix_length_and_dec()
+bool Item_func_concat_ws::fix_length_and_dec()
{
ulonglong char_length;
if (agg_arg_charsets_for_string_result(collation, args, arg_count))
- return;
+ return TRUE;
/*
arg_count cannot be less than 2,
@@ -1010,6 +1016,7 @@ void Item_func_concat_ws::fix_length_and_dec()
char_length+= args[i]->max_char_length();
fix_char_length_ulonglong(char_length);
+ return FALSE;
}
@@ -1039,7 +1046,7 @@ String *Item_func_reverse::val_str(String *str)
#ifdef USE_MB
if (use_mb(res->charset()))
{
- register uint32 l;
+ uint32 l;
while (ptr < end)
{
if ((l= my_ismbchar(res->charset(),ptr,end)))
@@ -1063,11 +1070,12 @@ String *Item_func_reverse::val_str(String *str)
}
-void Item_func_reverse::fix_length_and_dec()
+bool Item_func_reverse::fix_length_and_dec()
{
agg_arg_charsets_for_string_result(collation, args, 1);
DBUG_ASSERT(collation.collation != NULL);
fix_char_length(args[0]->max_char_length());
+ return FALSE;
}
/**
@@ -1088,7 +1096,7 @@ String *Item_func_replace::val_str(String *str)
bool alloced=0;
#ifdef USE_MB
const char *ptr,*end,*strend,*search,*search_end;
- register uint32 l;
+ uint32 l;
bool binary_cmp;
#endif
THD *thd= 0;
@@ -1141,7 +1149,7 @@ redo:
{
if (*ptr == *search)
{
- register char *i,*j;
+ char *i,*j;
i=(char*) ptr+1; j=(char*) search+1;
while (j != search_end)
if (*i++ != *j++) goto skip;
@@ -1209,7 +1217,7 @@ null:
}
-void Item_func_replace::fix_length_and_dec()
+bool Item_func_replace::fix_length_and_dec()
{
ulonglong char_length= (ulonglong) args[0]->max_char_length();
int diff=(int) (args[2]->max_char_length() - args[1]->max_char_length());
@@ -1220,8 +1228,9 @@ void Item_func_replace::fix_length_and_dec()
}
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 3))
- return;
+ return TRUE;
fix_char_length_ulonglong(char_length);
+ return FALSE;
}
@@ -1233,13 +1242,14 @@ bool Item_func_regexp_replace::fix_fields(THD *thd, Item **ref)
}
-void Item_func_regexp_replace::fix_length_and_dec()
+bool Item_func_regexp_replace::fix_length_and_dec()
{
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 3))
- return;
+ return TRUE;
max_length= MAX_BLOB_WIDTH;
re.init(collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
+ return FALSE;
}
@@ -1375,13 +1385,14 @@ bool Item_func_regexp_substr::fix_fields(THD *thd, Item **ref)
}
-void Item_func_regexp_substr::fix_length_and_dec()
+bool Item_func_regexp_substr::fix_length_and_dec()
{
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 2))
- return;
+ return TRUE;
fix_char_length(args[0]->max_char_length());
re.init(collation.collation, 0);
re.fix_owner(this, args[0], args[1]);
+ return FALSE;
}
@@ -1489,16 +1500,17 @@ null:
}
-void Item_func_insert::fix_length_and_dec()
+bool Item_func_insert::fix_length_and_dec()
{
ulonglong char_length;
// Handle character set for args[0] and args[3].
if (agg_arg_charsets_for_string_result(collation, args, 2, 3))
- return;
+ return TRUE;
char_length= ((ulonglong) args[0]->max_char_length() +
(ulonglong) args[3]->max_char_length());
fix_char_length_ulonglong(char_length);
+ return FALSE;
}
@@ -1521,22 +1533,26 @@ String *Item_str_conv::val_str(String *str)
}
-void Item_func_lcase::fix_length_and_dec()
+bool Item_func_lcase::fix_length_and_dec()
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
multiply= collation.collation->casedn_multiply;
converter= collation.collation->cset->casedn;
fix_char_length_ulonglong((ulonglong) args[0]->max_char_length() * multiply);
+ return FALSE;
}
-void Item_func_ucase::fix_length_and_dec()
+bool Item_func_ucase::fix_length_and_dec()
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
multiply= collation.collation->caseup_multiply;
converter= collation.collation->cset->caseup;
fix_char_length_ulonglong((ulonglong) args[0]->max_char_length() * multiply);
+ return FALSE;
}
@@ -1579,11 +1595,13 @@ void Item_str_func::left_right_max_length()
}
-void Item_func_left::fix_length_and_dec()
+bool Item_func_left::fix_length_and_dec()
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
left_right_max_length();
+ return FALSE;
}
@@ -1613,11 +1631,13 @@ String *Item_func_right::val_str(String *str)
}
-void Item_func_right::fix_length_and_dec()
+bool Item_func_right::fix_length_and_dec()
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
left_right_max_length();
+ return FALSE;
}
@@ -1668,11 +1688,12 @@ String *Item_func_substr::val_str(String *str)
}
-void Item_func_substr::fix_length_and_dec()
+bool Item_func_substr::fix_length_and_dec()
{
max_length=args[0]->max_length;
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
if (args[1]->const_item())
{
@@ -1693,14 +1714,16 @@ void Item_func_substr::fix_length_and_dec()
set_if_smaller(max_length,(uint) length);
}
max_length*= collation.collation->mbmaxlen;
+ return FALSE;
}
-void Item_func_substr_index::fix_length_and_dec()
-{
+bool Item_func_substr_index::fix_length_and_dec()
+{
if (agg_arg_charsets_for_string_result_with_comparison(collation, args, 2))
- return;
+ return TRUE;
fix_char_length(args[0]->max_char_length());
+ return FALSE;
}
@@ -1735,14 +1758,14 @@ String *Item_func_substr_index::val_str(String *str)
const char *search= delimiter->ptr();
const char *search_end= search+delimiter_length;
int32 n=0,c=count,pass;
- register uint32 l;
+ uint32 l;
for (pass=(count>0);pass<2;++pass)
{
while (ptr < end)
{
if (*ptr == *search)
{
- register char *i,*j;
+ char *i,*j;
i=(char*) ptr+1; j=(char*) search+1;
while (j != search_end)
if (*i++ != *j++) goto skip;
@@ -1926,7 +1949,7 @@ String *Item_func_rtrim::val_str(String *str)
end= ptr+res->length();
#ifdef USE_MB
char *p=ptr;
- register uint32 l;
+ uint32 l;
#endif
if (remove_length == 1)
{
@@ -2011,7 +2034,7 @@ String *Item_func_trim::val_str(String *str)
if (use_mb(collation.collation))
{
char *p=ptr;
- register uint32 l;
+ uint32 l;
loop:
while (ptr + remove_length < end)
{
@@ -2040,11 +2063,12 @@ String *Item_func_trim::val_str(String *str)
return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr));
}
-void Item_func_trim::fix_length_and_dec()
+bool Item_func_trim::fix_length_and_dec()
{
if (arg_count == 1)
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
remove.set_charset(collation.collation);
remove.set_ascii(" ",1);
@@ -2055,9 +2079,10 @@ void Item_func_trim::fix_length_and_dec()
// Note that we pass args[1] as the first item, and args[0] as the second.
if (agg_arg_charsets_for_string_result_with_comparison(collation,
&args[1], 2, -1))
- return;
+ return TRUE;
}
fix_char_length(args[0]->max_char_length());
+ return FALSE;
}
void Item_func_trim::print(String *str, enum_query_type query_type)
@@ -2197,7 +2222,7 @@ bool Item_func_encode::seed()
return FALSE;
}
-void Item_func_encode::fix_length_and_dec()
+bool Item_func_encode::fix_length_and_dec()
{
max_length=args[0]->max_length;
maybe_null=args[0]->maybe_null || args[1]->maybe_null;
@@ -2205,6 +2230,7 @@ void Item_func_encode::fix_length_and_dec()
/* Precompute the seed state if the item is constant. */
seeded= args[1]->const_item() &&
(args[1]->result_type() == STRING_RESULT) && !seed();
+ return FALSE;
}
String *Item_func_encode::val_str(String *str)
@@ -2325,7 +2351,7 @@ bool Item_func_current_user::fix_fields(THD *thd, Item **ref)
if (Item_func_sysconst::fix_fields(thd, ref))
return TRUE;
- Security_context *ctx= context->security_ctx
+ Security_context *ctx= context && context->security_ctx
? context->security_ctx : thd->security_ctx;
return init(ctx->priv_user, ctx->priv_host);
}
@@ -2335,7 +2361,7 @@ bool Item_func_current_role::fix_fields(THD *thd, Item **ref)
if (Item_func_sysconst::fix_fields(thd, ref))
return 1;
- Security_context *ctx= context->security_ctx
+ Security_context *ctx= context && context->security_ctx
? context->security_ctx : thd->security_ctx;
if (ctx->priv_role[0])
{
@@ -2350,13 +2376,14 @@ bool Item_func_current_role::fix_fields(THD *thd, Item **ref)
return 0;
}
-void Item_func_soundex::fix_length_and_dec()
+bool Item_func_soundex::fix_length_and_dec()
{
uint32 char_length= args[0]->max_char_length();
agg_arg_charsets_for_string_result(collation, args, 1);
DBUG_ASSERT(collation.collation != NULL);
set_if_bigger(char_length, 4);
fix_char_length(char_length);
+ return FALSE;
}
@@ -2532,7 +2559,7 @@ MY_LOCALE *Item_func_format::get_locale(Item *item)
return lc;
}
-void Item_func_format::fix_length_and_dec()
+bool Item_func_format::fix_length_and_dec()
{
uint32 char_length= args[0]->max_char_length();
uint32 max_sep_count= (char_length / 3) + (decimals ? 1 : 0) + /*sign*/1;
@@ -2542,6 +2569,7 @@ void Item_func_format::fix_length_and_dec()
locale= args[2]->basic_const_item() ? get_locale(args[2]) : NULL;
else
locale= &my_locale_en_US; /* Two arguments */
+ return FALSE;
}
@@ -2592,7 +2620,7 @@ String *Item_func_format::val_str_ascii(String *str)
return 0; /* purecov: inspected */
nr= my_double_round(nr, (longlong) dec, FALSE, FALSE);
str->set_real(nr, dec, &my_charset_numeric);
- if (isnan(nr) || my_isinf(nr))
+ if (!isfinite(nr))
return str;
str_length=str->length();
}
@@ -2655,27 +2683,13 @@ String *Item_func_format::val_str_ascii(String *str)
}
-void Item_func_format::print(String *str, enum_query_type query_type)
-{
- str->append(STRING_WITH_LEN("format("));
- args[0]->print(str, query_type);
- str->append(',');
- args[1]->print(str, query_type);
- if(arg_count > 2)
- {
- str->append(',');
- args[2]->print(str,query_type);
- }
- str->append(')');
-}
-
-void Item_func_elt::fix_length_and_dec()
+bool Item_func_elt::fix_length_and_dec()
{
uint32 char_length= 0;
decimals=0;
if (agg_arg_charsets_for_string_result(collation, args + 1, arg_count - 1))
- return;
+ return TRUE;
for (uint i= 1 ; i < arg_count ; i++)
{
@@ -2684,6 +2698,7 @@ void Item_func_elt::fix_length_and_dec()
}
fix_char_length(char_length);
maybe_null=1; // NULL if wrong first arg
+ return FALSE;
}
@@ -2730,16 +2745,17 @@ String *Item_func_elt::val_str(String *str)
}
-void Item_func_make_set::fix_length_and_dec()
+bool Item_func_make_set::fix_length_and_dec()
{
uint32 char_length= arg_count - 2; /* Separators */
if (agg_arg_charsets_for_string_result(collation, args + 1, arg_count - 1))
- return;
+ return TRUE;
for (uint i=1 ; i < arg_count ; i++)
char_length+= args[i]->max_char_length();
fix_char_length(char_length);
+ return FALSE;
}
@@ -2870,9 +2886,10 @@ inline String* alloc_buffer(String *res,String *str,String *tmp_value,
}
-void Item_func_repeat::fix_length_and_dec()
+bool Item_func_repeat::fix_length_and_dec()
{
- agg_arg_charsets_for_string_result(collation, args, 1);
+ if (agg_arg_charsets_for_string_result(collation, args, 1))
+ return TRUE;
DBUG_ASSERT(collation.collation != NULL);
if (args[1]->const_item())
{
@@ -2894,6 +2911,7 @@ void Item_func_repeat::fix_length_and_dec()
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
}
+ return FALSE;
}
/**
@@ -2955,7 +2973,7 @@ err:
}
-void Item_func_space::fix_length_and_dec()
+bool Item_func_space::fix_length_and_dec()
{
collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
if (args[0]->const_item())
@@ -2971,12 +2989,13 @@ void Item_func_space::fix_length_and_dec()
if (count > INT_MAX32)
count= INT_MAX32;
fix_char_length_ulonglong(count);
- return;
+ return FALSE;
}
end:
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
+ return FALSE;
}
@@ -3026,11 +3045,12 @@ err:
}
-void Item_func_binlog_gtid_pos::fix_length_and_dec()
+bool Item_func_binlog_gtid_pos::fix_length_and_dec()
{
collation.set(system_charset_info);
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
+ return FALSE;
}
@@ -3066,7 +3086,7 @@ err:
}
-void Item_func_pad::fix_length_and_dec()
+bool Item_func_pad::fix_length_and_dec()
{
String *str;
if (!args[2]->basic_const_item() || !(str= args[2]->val_str(&pad_str)) || !str->length())
@@ -3074,7 +3094,7 @@ void Item_func_pad::fix_length_and_dec()
// Handle character set for args[0] and args[2].
if (agg_arg_charsets_for_string_result(collation, &args[0], 2, 2))
- return;
+ return TRUE;
if (args[1]->const_item())
{
ulonglong char_length= (ulonglong) args[1]->val_int();
@@ -3092,6 +3112,7 @@ void Item_func_pad::fix_length_and_dec()
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
}
+ return FALSE;
}
@@ -3324,10 +3345,11 @@ String *Item_func_conv_charset::val_str(String *str)
0 : str;
}
-void Item_func_conv_charset::fix_length_and_dec()
+bool Item_func_conv_charset::fix_length_and_dec()
{
DBUG_ASSERT(collation.derivation == DERIVATION_IMPLICIT);
fix_char_length(args[0]->max_char_length());
+ return FALSE;
}
void Item_func_conv_charset::print(String *str, enum_query_type query_type)
@@ -3349,7 +3371,7 @@ String *Item_func_set_collation::val_str(String *str)
return str;
}
-void Item_func_set_collation::fix_length_and_dec()
+bool Item_func_set_collation::fix_length_and_dec()
{
CHARSET_INFO *set_collation;
const char *colname;
@@ -3360,8 +3382,8 @@ void Item_func_set_collation::fix_length_and_dec()
MY_CS_BINSORT,MYF(0));
else
{
- if (!(set_collation= mysqld_collation_get_by_name(colname)))
- return;
+ if (!(set_collation= mysqld_collation_get_by_name(colname)))
+ return TRUE;
}
if (!set_collation ||
@@ -3369,11 +3391,12 @@ void Item_func_set_collation::fix_length_and_dec()
{
my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
colname, args[0]->collation.collation->csname);
- return;
+ return TRUE;
}
collation.set(set_collation, DERIVATION_EXPLICIT,
args[0]->collation.repertoire);
max_length= args[0]->max_length;
+ return FALSE;
}
@@ -3400,13 +3423,11 @@ bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const
void Item_func_set_collation::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" collate "));
DBUG_ASSERT(args[1]->basic_const_item() &&
args[1]->type() == Item::STRING_ITEM);
((Item_string *)args[1])->print_value(str);
- str->append(')');
}
String *Item_func_charset::val_str(String *str)
@@ -3434,7 +3455,7 @@ String *Item_func_collation::val_str(String *str)
}
-void Item_func_weight_string::fix_length_and_dec()
+bool Item_func_weight_string::fix_length_and_dec()
{
CHARSET_INFO *cs= args[0]->collation.collation;
collation.set(&my_charset_bin, args[0]->collation.derivation);
@@ -3452,6 +3473,7 @@ void Item_func_weight_string::fix_length_and_dec()
max_length= cs->coll->strnxfrmlen(cs, char_length * cs->mbmaxlen);
}
maybe_null= 1;
+ return FALSE;
}
@@ -3535,6 +3557,21 @@ nl:
}
+void Item_func_weight_string::print(String *str, enum_query_type query_type)
+{
+ str->append(func_name());
+ str->append('(');
+ args[0]->print(str, query_type);
+ str->append(',');
+ str->append_ulonglong(result_length);
+ str->append(',');
+ str->append_ulonglong(nweights);
+ str->append(',');
+ str->append_ulonglong(flags);
+ str->append(')');
+}
+
+
String *Item_func_hex::val_str_ascii(String *str)
{
String *res;
@@ -3635,12 +3672,12 @@ String *Item_func_like_range::val_str(String *str)
if (!res || args[0]->null_value || args[1]->null_value ||
nbytes < 0 || nbytes > MAX_BLOB_WIDTH ||
- min_str.alloc(nbytes) || max_str.alloc(nbytes))
+ min_str.alloc((size_t)nbytes) || max_str.alloc((size_t)nbytes))
goto err;
null_value=0;
if (cs->coll->like_range(cs, res->ptr(), res->length(),
- '\\', '_', '%', nbytes,
+ '\\', '_', '%', (size_t)nbytes,
(char*) min_str.ptr(), (char*) max_str.ptr(),
&min_len, &max_len))
goto err;
@@ -3717,7 +3754,7 @@ String *Item_load_file::val_str(String *str)
if ((file= mysql_file_open(key_file_loadfile,
file_name->ptr(), O_RDONLY, MYF(0))) < 0)
goto err;
- if (mysql_file_read(file, (uchar*) tmp_value.ptr(), stat_info.st_size,
+ if (mysql_file_read(file, (uchar*) tmp_value.ptr(), (size_t)stat_info.st_size,
MYF(MY_NABP)))
{
mysql_file_close(file, MYF(0));
@@ -3820,15 +3857,16 @@ String* Item_func_export_set::val_str(String* str)
return str;
}
-void Item_func_export_set::fix_length_and_dec()
+bool Item_func_export_set::fix_length_and_dec()
{
uint32 length= MY_MAX(args[1]->max_char_length(), args[2]->max_char_length());
uint32 sep_length= (arg_count > 3 ? args[3]->max_char_length() : 1);
if (agg_arg_charsets_for_string_result(collation,
args + 1, MY_MIN(4, arg_count) - 1))
- return;
+ return TRUE;
fix_char_length(length * 64 + sep_length * 63);
+ return FALSE;
}
@@ -3943,7 +3981,7 @@ String *Item_func_quote::val_str(String *str)
if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0)
goto toolong;
to+= mblen;
- new_length= to - str->ptr();
+ new_length= (uint)(to - str->ptr());
goto ret;
}
@@ -4042,7 +4080,7 @@ longlong Item_func_crc32::val_int()
return 0; /* purecov: inspected */
}
null_value=0;
- return (longlong) crc32(0L, (uchar*)res->ptr(), res->length());
+ return (longlong) my_checksum(0L, (uchar*)res->ptr(), res->length());
}
#ifdef HAVE_COMPRESS
@@ -4228,15 +4266,16 @@ bool Item_func_dyncol_create::fix_fields(THD *thd, Item **ref)
}
-void Item_func_dyncol_create::fix_length_and_dec()
+bool Item_func_dyncol_create::fix_length_and_dec()
{
max_length= MAX_BLOB_WIDTH;
maybe_null= TRUE;
collation.set(&my_charset_bin);
decimals= 0;
+ return FALSE;
}
-bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
+bool Item_func_dyncol_create::prepare_arguments(THD *thd, bool force_names_arg)
{
char buff[STRING_BUFFER_USUAL_SIZE];
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
@@ -4356,16 +4395,13 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
if (my_charset_same(res->charset(), DYNCOL_UTF))
{
keys_str[i].length= res->length();
- keys_str[i].str= sql_strmake(res->ptr(), res->length());
+ keys_str[i].str= thd->strmake(res->ptr(), res->length());
}
else
{
- uint strlen;
+ uint strlen= res->length() * DYNCOL_UTF->mbmaxlen + 1;
uint dummy_errors;
- char *str=
- (char *)sql_alloc((strlen= res->length() *
- DYNCOL_UTF->mbmaxlen + 1));
- if (str)
+ if (char *str= (char *) thd->alloc(strlen))
{
keys_str[i].length=
copy_and_convert(str, strlen, DYNCOL_UTF,
@@ -4413,7 +4449,7 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
if (res && defs[i].cs)
res->set_charset(defs[i].cs);
if (res &&
- (vals[i].x.string.value.str= sql_strmake(res->ptr(), res->length())))
+ (vals[i].x.string.value.str= thd->strmake(res->ptr(), res->length())))
{
vals[i].x.string.value.length= res->length();
vals[i].x.string.charset= res->charset();
@@ -4445,7 +4481,7 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
case DYN_COL_DATETIME:
case DYN_COL_DATE:
args[valpos]->get_date(&vals[i].x.time_value,
- sql_mode_for_dates(current_thd));
+ sql_mode_for_dates(thd));
break;
case DYN_COL_TIME:
args[valpos]->get_time(&vals[i].x.time_value);
@@ -4471,7 +4507,8 @@ String *Item_func_dyncol_create::val_str(String *str)
enum enum_dyncol_func_result rc;
DBUG_ASSERT((arg_count & 0x1) == 0); // even number of arguments
- if (prepare_arguments(FALSE))
+ /* FIXME: add thd argument to Item::val_str() */
+ if (prepare_arguments(current_thd, FALSE))
{
res= NULL;
null_value= 1;
@@ -4617,7 +4654,8 @@ String *Item_func_dyncol_add::val_str(String *str)
col.length= res->length();
memcpy(col.str, res->ptr(), col.length);
- if (prepare_arguments(mariadb_dyncol_has_names(&col)))
+ /* FIXME: add thd argument to Item::val_str() */
+ if (prepare_arguments(current_thd, mariadb_dyncol_has_names(&col)))
goto null;
if ((rc= ((names || force_names) ?
@@ -4652,7 +4690,7 @@ void Item_func_dyncol_add::print(String *str,
enum_query_type query_type)
{
DBUG_ASSERT((arg_count & 0x1) == 1); // odd number of arguments
- str->append(STRING_WITH_LEN("column_create("));
+ str->append(STRING_WITH_LEN("column_add("));
args[arg_count - 1]->print(str, query_type);
str->append(',');
print_arguments(str, query_type);
@@ -4667,7 +4705,8 @@ void Item_func_dyncol_add::print(String *str,
This function ensures that null_value is set correctly
*/
-bool Item_dyncol_get::get_dyn_value(DYNAMIC_COLUMN_VALUE *val, String *tmp)
+bool Item_dyncol_get::get_dyn_value(THD *thd, DYNAMIC_COLUMN_VALUE *val,
+ String *tmp)
{
DYNAMIC_COLUMN dyn_str;
String *res;
@@ -4695,10 +4734,9 @@ bool Item_dyncol_get::get_dyn_value(DYNAMIC_COLUMN_VALUE *val, String *tmp)
}
else
{
- uint strlen;
+ uint strlen= nm->length() * DYNCOL_UTF->mbmaxlen + 1;
uint dummy_errors;
- buf.str= (char *)sql_alloc((strlen= nm->length() *
- DYNCOL_UTF->mbmaxlen + 1));
+ buf.str= (char *) thd->alloc(strlen);
if (buf.str)
{
buf.length=
@@ -4748,7 +4786,7 @@ String *Item_dyncol_get::val_str(String *str_result)
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff, sizeof(buff), &my_charset_bin);
- if (get_dyn_value(&val, &tmp))
+ if (get_dyn_value(current_thd, &val, &tmp))
return NULL;
switch (val.type) {
@@ -4830,11 +4868,12 @@ null:
longlong Item_dyncol_get::val_int()
{
+ THD *thd= current_thd;
DYNAMIC_COLUMN_VALUE val;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff, sizeof(buff), &my_charset_bin);
- if (get_dyn_value(&val, &tmp))
+ if (get_dyn_value(thd, &val, &tmp))
return 0;
switch (val.type) {
@@ -4848,24 +4887,8 @@ longlong Item_dyncol_get::val_int()
unsigned_flag= 0; // Make it possible for caller to detect sign
return val.x.long_value;
case DYN_COL_DOUBLE:
- {
- bool error;
- longlong num;
-
- num= double_to_longlong(val.x.double_value, unsigned_flag, &error);
- if (error)
- {
- THD *thd= current_thd;
- char buff[30];
- sprintf(buff, "%lg", val.x.double_value);
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_DATA_OVERFLOW,
- ER_THD(thd, ER_DATA_OVERFLOW),
- buff,
- unsigned_flag ? "UNSIGNED INT" : "INT");
- }
- return num;
- }
+ return Converter_double_to_longlong_with_warn(thd, val.x.double_value,
+ unsigned_flag).result();
case DYN_COL_STRING:
{
int error;
@@ -4875,7 +4898,6 @@ longlong Item_dyncol_get::val_int()
num= my_strtoll10(val.x.string.value.str, &end, &error);
if (end != org_end || error > 0)
{
- THD *thd= current_thd;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER_THD(thd, ER_BAD_DATA),
@@ -4912,11 +4934,12 @@ null:
double Item_dyncol_get::val_real()
{
+ THD *thd= current_thd;
DYNAMIC_COLUMN_VALUE val;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff, sizeof(buff), &my_charset_bin);
- if (get_dyn_value(&val, &tmp))
+ if (get_dyn_value(thd, &val, &tmp))
return 0.0;
switch (val.type) {
@@ -4939,7 +4962,6 @@ double Item_dyncol_get::val_real()
if (end != (char*) val.x.string.value.str + val.x.string.value.length ||
error)
{
- THD *thd= current_thd;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER_THD(thd, ER_BAD_DATA),
@@ -4971,11 +4993,12 @@ null:
my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
{
+ THD *thd= current_thd;
DYNAMIC_COLUMN_VALUE val;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff, sizeof(buff), &my_charset_bin);
- if (get_dyn_value(&val, &tmp))
+ if (get_dyn_value(thd, &val, &tmp))
return NULL;
switch (val.type) {
@@ -5000,7 +5023,6 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
if (rc != E_DEC_OK ||
end != val.x.string.value.str + val.x.string.value.length)
{
- THD *thd= current_thd;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER_THD(thd, ER_BAD_DATA),
@@ -5035,7 +5057,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
String tmp(buff, sizeof(buff), &my_charset_bin);
bool signed_value= 0;
- if (get_dyn_value(&val, &tmp))
+ if (get_dyn_value(current_thd, &val, &tmp))
return 1; // Error
switch (val.type) {
@@ -5048,10 +5070,11 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
case DYN_COL_UINT:
if (signed_value || val.x.ulong_value <= LONGLONG_MAX)
{
- bool neg= val.x.ulong_value > LONGLONG_MAX;
- if (int_to_datetime_with_warn(neg, neg ? -val.x.ulong_value :
- val.x.ulong_value,
- ltime, fuzzy_date, 0 /* TODO */))
+ longlong llval = (longlong)val.x.ulong_value;
+ bool neg = llval < 0;
+ if (int_to_datetime_with_warn(neg, (ulonglong)(neg ? -llval :
+ llval),
+ ltime, fuzzy_date, 0, 0 /* TODO */))
goto null;
return 0;
}
@@ -5060,12 +5083,12 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
/* fall through */
case DYN_COL_DOUBLE:
if (double_to_datetime_with_warn(val.x.double_value, ltime, fuzzy_date,
- 0 /* TODO */))
+ 0, 0 /* TODO */))
goto null;
return 0;
case DYN_COL_DECIMAL:
if (decimal_to_datetime_with_warn((my_decimal*)&val.x.decimal.value, ltime,
- fuzzy_date, 0 /* TODO */))
+ fuzzy_date, 0, 0 /* TODO */))
goto null;
return 0;
case DYN_COL_STRING:
@@ -5159,4 +5182,3 @@ null:
my_free(names);
return NULL;
}
-
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index ace246bc271..9a78a7f34f5 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -3,7 +3,7 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2009, 2015, MariaDB
+ Copyright (c) 2009, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -65,6 +65,7 @@ public:
double val_real();
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type () const { return STRING_RESULT; }
+ enum_field_types field_type() const { return string_field_type(); }
void left_right_max_length();
bool fix_fields(THD *thd, Item **ref);
void update_null_value()
@@ -92,7 +93,7 @@ public:
{
return val_str_from_val_str_ascii(str, &ascii_buf);
}
- virtual String *val_str_ascii(String *)= 0;
+ String *val_str_ascii(String *)= 0;
};
@@ -144,11 +145,14 @@ class Item_func_md5 :public Item_str_ascii_checksum_func
public:
Item_func_md5(THD *thd, Item *a): Item_str_ascii_checksum_func(thd, a) {}
String *val_str_ascii(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
fix_length_and_charset(32, default_charset());
+ return FALSE;
}
const char *func_name() const { return "md5"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_md5>(thd, mem_root, this); }
};
@@ -157,8 +161,10 @@ class Item_func_sha :public Item_str_ascii_checksum_func
public:
Item_func_sha(THD *thd, Item *a): Item_str_ascii_checksum_func(thd, a) {}
String *val_str_ascii(String *);
- void fix_length_and_dec();
- const char *func_name() const { return "sha"; }
+ bool fix_length_and_dec();
+ const char *func_name() const { return "sha"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sha>(thd, mem_root, this); }
};
class Item_func_sha2 :public Item_str_ascii_checksum_func
@@ -167,8 +173,10 @@ public:
Item_func_sha2(THD *thd, Item *a, Item *b)
:Item_str_ascii_checksum_func(thd, a, b) {}
String *val_str_ascii(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "sha2"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sha2>(thd, mem_root, this); }
};
class Item_func_to_base64 :public Item_str_ascii_checksum_func
@@ -178,8 +186,10 @@ public:
Item_func_to_base64(THD *thd, Item *a)
:Item_str_ascii_checksum_func(thd, a) {}
String *val_str_ascii(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "to_base64"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_to_base64>(thd, mem_root, this); }
};
class Item_func_from_base64 :public Item_str_binary_checksum_func
@@ -189,8 +199,10 @@ public:
Item_func_from_base64(THD *thd, Item *a)
:Item_str_binary_checksum_func(thd, a) { }
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "from_base64"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_from_base64>(thd, mem_root, this); }
};
#include <my_crypt.h>
@@ -214,8 +226,10 @@ class Item_func_aes_encrypt :public Item_aes_crypt
public:
Item_func_aes_encrypt(THD *thd, Item *a, Item *b)
:Item_aes_crypt(thd, a, b) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "aes_encrypt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_aes_encrypt>(thd, mem_root, this); }
};
class Item_func_aes_decrypt :public Item_aes_crypt
@@ -223,8 +237,10 @@ class Item_func_aes_decrypt :public Item_aes_crypt
public:
Item_func_aes_decrypt(THD *thd, Item *a, Item *b):
Item_aes_crypt(thd, a, b) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "aes_decrypt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_aes_decrypt>(thd, mem_root, this); }
};
@@ -236,8 +252,10 @@ public:
Item_func_concat(THD *thd, List<Item> &list): Item_str_func(thd, list) {}
Item_func_concat(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "concat"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_concat>(thd, mem_root, this); }
};
class Item_func_decode_histogram :public Item_str_func
@@ -246,13 +264,16 @@ public:
Item_func_decode_histogram(THD *thd, Item *a, Item *b):
Item_str_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(system_charset_info);
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
+ return FALSE;
}
const char *func_name() const { return "decode_histogram"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_decode_histogram>(thd, mem_root, this); }
};
class Item_func_concat_ws :public Item_str_func
@@ -261,9 +282,11 @@ class Item_func_concat_ws :public Item_str_func
public:
Item_func_concat_ws(THD *thd, List<Item> &list): Item_str_func(thd, list) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "concat_ws"; }
table_map not_null_tables() const { return 0; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_concat_ws>(thd, mem_root, this); }
};
class Item_func_reverse :public Item_str_func
@@ -272,8 +295,10 @@ class Item_func_reverse :public Item_str_func
public:
Item_func_reverse(THD *thd, Item *a): Item_str_func(thd, a) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "reverse"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_reverse>(thd, mem_root, this); }
};
@@ -284,8 +309,10 @@ public:
Item_func_replace(THD *thd, Item *org, Item *find, Item *replace):
Item_str_func(thd, org, find, replace) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "replace"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_replace>(thd, mem_root, this); }
};
@@ -308,8 +335,9 @@ public:
}
String *val_str(String *str);
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "regexp_replace"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0;}
};
@@ -329,8 +357,9 @@ public:
}
String *val_str(String *str);
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "regexp_substr"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
@@ -342,8 +371,10 @@ public:
Item *new_str):
Item_str_func(thd, org, start, length, new_str) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "insert"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_insert>(thd, mem_root, this); }
};
@@ -364,7 +395,9 @@ class Item_func_lcase :public Item_str_conv
public:
Item_func_lcase(THD *thd, Item *item): Item_str_conv(thd, item) {}
const char *func_name() const { return "lcase"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_lcase>(thd, mem_root, this); }
};
class Item_func_ucase :public Item_str_conv
@@ -372,7 +405,9 @@ class Item_func_ucase :public Item_str_conv
public:
Item_func_ucase(THD *thd, Item *item): Item_str_conv(thd, item) {}
const char *func_name() const { return "ucase"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ucase>(thd, mem_root, this); }
};
@@ -382,8 +417,10 @@ class Item_func_left :public Item_str_func
public:
Item_func_left(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "left"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_left>(thd, mem_root, this); }
};
@@ -393,8 +430,10 @@ class Item_func_right :public Item_str_func
public:
Item_func_right(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "right"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_right>(thd, mem_root, this); }
};
@@ -405,8 +444,10 @@ public:
Item_func_substr(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
Item_func_substr(THD *thd, Item *a, Item *b, Item *c): Item_str_func(thd, a, b, c) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "substr"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_substr>(thd, mem_root, this); }
};
@@ -417,8 +458,11 @@ public:
Item_func_substr_index(THD *thd, Item *a,Item *b,Item *c):
Item_str_func(thd, a, b, c) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "substring_index"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_substr_index>(thd, mem_root, this); }
+
};
@@ -446,10 +490,12 @@ public:
Item_func_trim(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
Item_func_trim(THD *thd, Item *a): Item_str_func(thd, a) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "trim"; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
virtual const char *mode_name() const { return "both"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_trim>(thd, mem_root, this); }
};
@@ -461,6 +507,8 @@ public:
String *val_str(String *);
const char *func_name() const { return "ltrim"; }
const char *mode_name() const { return "leading"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_ltrim>(thd, mem_root, this); }
};
@@ -472,6 +520,8 @@ public:
String *val_str(String *);
const char *func_name() const { return "rtrim"; }
const char *mode_name() const { return "trailing"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_rtrim>(thd, mem_root, this); }
};
@@ -498,17 +548,20 @@ public:
Item_str_ascii_checksum_func(thd, a), alg(al), deflt(0) {}
String *val_str_ascii(String *str);
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
fix_length_and_charset((alg == 1 ?
SCRAMBLED_PASSWORD_CHAR_LENGTH :
SCRAMBLED_PASSWORD_CHAR_LENGTH_323),
default_charset());
+ return FALSE;
}
const char *func_name() const { return ((deflt || alg == 1) ?
"password" : "old_password"); }
static char *alloc(THD *thd, const char *password, size_t pass_len,
enum PW_Alg al);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_password>(thd, mem_root, this); }
};
@@ -522,13 +575,16 @@ public:
Item_func_des_encrypt(THD *thd, Item *a, Item *b)
:Item_str_binary_checksum_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
maybe_null=1;
/* 9 = MAX ((8- (arg_len % 8)) + 1) */
max_length = args[0]->max_length + 9;
+ return FALSE;
}
const char *func_name() const { return "des_encrypt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_des_encrypt>(thd, mem_root, this); }
};
class Item_func_des_decrypt :public Item_str_binary_checksum_func
@@ -540,15 +596,18 @@ public:
Item_func_des_decrypt(THD *thd, Item *a, Item *b)
:Item_str_binary_checksum_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
maybe_null=1;
/* 9 = MAX ((8- (arg_len % 8)) + 1) */
max_length= args[0]->max_length;
if (max_length >= 9U)
max_length-= 9U;
+ return FALSE;
}
const char *func_name() const { return "des_decrypt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_des_decrypt>(thd, mem_root, this); }
};
@@ -577,12 +636,14 @@ public:
constructor_helper();
}
String *val_str(String *);
- void fix_length_and_dec() { maybe_null=1; max_length = 13; }
+ bool fix_length_and_dec() { maybe_null=1; max_length = 13; return FALSE; }
const char *func_name() const { return "encrypt"; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_encrypt>(thd, mem_root, this); }
};
#include "sql_crypt.h"
@@ -599,8 +660,10 @@ public:
Item_func_encode(THD *thd, Item *a, Item *seed_arg):
Item_str_binary_checksum_func(thd, a, seed_arg) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "encode"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_encode>(thd, mem_root, this); }
protected:
virtual void crypto_transform(String *);
private:
@@ -614,6 +677,8 @@ class Item_func_decode :public Item_func_encode
public:
Item_func_decode(THD *thd, Item *a, Item *seed_arg): Item_func_encode(thd, a, seed_arg) {}
const char *func_name() const { return "decode"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_decode>(thd, mem_root, this); }
protected:
void crypto_transform(String *);
};
@@ -631,10 +696,10 @@ public:
call
*/
virtual const char *fully_qualified_func_name() const = 0;
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(
- fully_qualified_func_name());
+ return mark_unsupported_function(fully_qualified_func_name(), arg,
+ VCOL_SESSION_FUNC);
}
bool const_item() const;
};
@@ -645,13 +710,16 @@ class Item_func_database :public Item_func_sysconst
public:
Item_func_database(THD *thd): Item_func_sysconst(thd) {}
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
max_length= MAX_FIELD_NAME * system_charset_info->mbmaxlen;
maybe_null=1;
+ return FALSE;
}
const char *func_name() const { return "database"; }
const char *fully_qualified_func_name() const { return "database()"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_database>(thd, mem_root, this); }
};
@@ -671,10 +739,11 @@ public:
return (null_value ? 0 : &str_value);
}
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
- max_length= (username_char_length +
+ max_length= (uint32) (username_char_length +
HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN;
+ return FALSE;
}
const char *func_name() const { return "user"; }
const char *fully_qualified_func_name() const { return "user()"; }
@@ -682,6 +751,8 @@ public:
{
return save_str_value_in_field(field, &str_value);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_user>(thd, mem_root, this); }
};
@@ -695,6 +766,12 @@ public:
bool fix_fields(THD *thd, Item **ref);
const char *func_name() const { return "current_user"; }
const char *fully_qualified_func_name() const { return "current_user()"; }
+ bool check_vcol_func_processor(void *arg)
+ {
+ context= 0;
+ return mark_unsupported_function(fully_qualified_func_name(), arg,
+ VCOL_SESSION_FUNC);
+ }
};
@@ -706,8 +783,11 @@ public:
Item_func_current_role(THD *thd, Name_resolution_context *context_arg):
Item_func_sysconst(thd), context(context_arg) {}
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec()
- { max_length= username_char_length * SYSTEM_CHARSET_MBMAXLEN; }
+ bool fix_length_and_dec()
+ {
+ max_length= (uint32) username_char_length * SYSTEM_CHARSET_MBMAXLEN;
+ return FALSE;
+ }
int save_in_field(Field *field, bool no_conversions)
{ return save_str_value_in_field(field, &str_value); }
const char *func_name() const { return "current_role"; }
@@ -717,6 +797,15 @@ public:
DBUG_ASSERT(fixed == 1);
return null_value ? NULL : &str_value;
}
+ bool check_vcol_func_processor(void *arg)
+ {
+
+ context= 0;
+ return mark_unsupported_function(fully_qualified_func_name(), arg,
+ VCOL_SESSION_FUNC);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_current_role>(thd, mem_root, this); }
};
@@ -726,8 +815,10 @@ class Item_func_soundex :public Item_str_func
public:
Item_func_soundex(THD *thd, Item *a): Item_str_func(thd, a) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "soundex"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_soundex>(thd, mem_root, this); }
};
@@ -738,8 +829,10 @@ public:
double val_real();
longlong val_int();
String *val_str(String *str);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "elt"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_elt>(thd, mem_root, this); }
};
@@ -750,8 +843,10 @@ class Item_func_make_set :public Item_str_func
public:
Item_func_make_set(THD *thd, List<Item> &list): Item_str_func(thd, list) {}
String *val_str(String *str);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "make_set"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_make_set>(thd, mem_root, this); }
};
@@ -766,9 +861,10 @@ public:
MY_LOCALE *get_locale(Item *item);
String *val_str_ascii(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "format"; }
- virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_format>(thd, mem_root, this); }
};
@@ -781,12 +877,15 @@ public:
Item_str_func(thd, list)
{ collation.set(cs); }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
max_length= arg_count * 4;
+ return FALSE;
}
const char *func_name() const { return "char"; }
void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_char>(thd, mem_root, this); }
};
@@ -797,8 +896,10 @@ public:
Item_func_repeat(THD *thd, Item *arg1, Item *arg2):
Item_str_func(thd, arg1, arg2) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "repeat"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_repeat>(thd, mem_root, this); }
};
@@ -807,8 +908,10 @@ class Item_func_space :public Item_str_func
public:
Item_func_space(THD *thd, Item *arg1): Item_str_func(thd, arg1) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "space"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_space>(thd, mem_root, this); }
};
@@ -818,8 +921,14 @@ public:
Item_func_binlog_gtid_pos(THD *thd, Item *arg1, Item *arg2):
Item_str_func(thd, arg1, arg2) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "binlog_gtid_pos"; }
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_binlog_gtid_pos>(thd, mem_root, this); }
};
@@ -830,7 +939,7 @@ protected:
public:
Item_func_pad(THD *thd, Item *arg1, Item *arg2, Item *arg3):
Item_str_func(thd, arg1, arg2, arg3) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
};
@@ -841,6 +950,8 @@ public:
Item_func_pad(thd, arg1, arg2, arg3) {}
String *val_str(String *);
const char *func_name() const { return "rpad"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_rpad>(thd, mem_root, this); }
};
@@ -851,6 +962,8 @@ public:
Item_func_pad(thd, arg1, arg2, arg3) {}
String *val_str(String *);
const char *func_name() const { return "lpad"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_lpad>(thd, mem_root, this); }
};
@@ -861,12 +974,15 @@ public:
Item_str_func(thd, a, b, c) {}
const char *func_name() const { return "conv"; }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(default_charset());
max_length=64;
maybe_null= 1;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_conv>(thd, mem_root, this); }
};
@@ -878,12 +994,15 @@ public:
Item_str_ascii_checksum_func(thd, a) {}
const char *func_name() const { return "hex"; }
String *val_str_ascii(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(default_charset());
decimals=0;
fix_char_length(args[0]->max_length * 2);
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_hex>(thd, mem_root, this); }
};
class Item_func_unhex :public Item_str_func
@@ -897,12 +1016,15 @@ public:
}
const char *func_name() const { return "unhex"; }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(&my_charset_bin);
decimals=0;
max_length=(1+args[0]->max_length)/2;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_unhex>(thd, mem_root, this); }
};
@@ -918,11 +1040,12 @@ public:
Item_str_func(thd, a, b), is_min(is_min_arg)
{ maybe_null= 1; }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(args[0]->collation);
decimals=0;
max_length= MAX_BLOB_WIDTH;
+ return FALSE;
}
};
@@ -933,6 +1056,8 @@ public:
Item_func_like_range_min(THD *thd, Item *a, Item *b):
Item_func_like_range(thd, a, b, true) { }
const char *func_name() const { return "like_range_min"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_like_range_min>(thd, mem_root, this); }
};
@@ -942,6 +1067,8 @@ public:
Item_func_like_range_max(THD *thd, Item *a, Item *b):
Item_func_like_range(thd, a, b, false) { }
const char *func_name() const { return "like_range_max"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_like_range_max>(thd, mem_root, this); }
};
#endif
@@ -959,13 +1086,17 @@ public:
tmp->set_charset(&my_charset_bin);
return tmp;
}
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(&my_charset_bin);
max_length=args[0]->max_length;
+ return FALSE;
}
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
const char *func_name() const { return "cast_as_binary"; }
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_binary>(thd, mem_root, this); }
};
@@ -976,16 +1107,19 @@ public:
Item_load_file(THD *thd, Item *a): Item_str_func(thd, a) {}
String *val_str(String *);
const char *func_name() const { return "load_file"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
maybe_null=1;
max_length=MAX_BLOB_WIDTH;
+ return FALSE;
}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_load_file>(thd, mem_root, this); }
};
@@ -999,8 +1133,10 @@ class Item_func_export_set: public Item_str_func
Item_func_export_set(THD *thd, Item *a, Item *b, Item* c, Item* d, Item* e):
Item_str_func(thd, a, b, c, d, e) {}
String *val_str(String *str);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "export_set"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_export_set>(thd, mem_root, this); }
};
@@ -1011,13 +1147,16 @@ public:
Item_func_quote(THD *thd, Item *a): Item_str_func(thd, a) {}
const char *func_name() const { return "quote"; }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(args[0]->collation);
ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 +
2 * collation.collation->mbmaxlen;
max_length= (uint32) MY_MIN(max_result_length, MAX_BLOB_WIDTH);
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_quote>(thd, mem_root, this); }
};
class Item_func_conv_charset :public Item_str_func
@@ -1060,6 +1199,7 @@ public:
(cs->state & MY_CS_UNICODE));
}
}
+ bool is_json_type() { return args[0]->is_json_type(); }
String *val_str(String *);
longlong val_int()
{
@@ -1097,9 +1237,11 @@ public:
return 1;
return res;
}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const { return "convert"; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_conv_charset>(thd, mem_root, this); }
};
class Item_func_set_collation :public Item_str_func
@@ -1108,16 +1250,20 @@ public:
Item_func_set_collation(THD *thd, Item *a, Item *b):
Item_str_func(thd, a, b) {}
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool eq(const Item *item, bool binary_cmp) const;
const char *func_name() const { return "collate"; }
+ enum precedence precedence() const { return COLLATE_PRECEDENCE; }
enum Functype functype() const { return COLLATE_FUNC; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
Item_field *field_for_view_update()
{
/* this function is transparent for view updating */
return args[0]->field_for_view_update();
}
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_set_collation>(thd, mem_root, this); }
};
@@ -1125,11 +1271,12 @@ class Item_func_expr_str_metadata :public Item_str_func
{
public:
Item_func_expr_str_metadata(THD *thd, Item *a): Item_str_func(thd, a) { }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(system_charset_info);
max_length= 64 * collation.collation->mbmaxlen; // should be enough
maybe_null= 0;
+ return FALSE;
};
table_map not_null_tables() const { return 0; }
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
@@ -1145,6 +1292,8 @@ public:
:Item_func_expr_str_metadata(thd, a) { }
String *val_str(String *);
const char *func_name() const { return "charset"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_charset>(thd, mem_root, this); }
};
@@ -1155,6 +1304,8 @@ public:
:Item_func_expr_str_metadata(thd, a) {}
String *val_str(String *);
const char *func_name() const { return "collation"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_collation>(thd, mem_root, this); }
};
@@ -1175,7 +1326,7 @@ public:
}
const char *func_name() const { return "weight_string"; }
String *val_str(String *);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool eq(const Item *item, bool binary_cmp) const
{
if (!Item_str_func::eq(item, binary_cmp))
@@ -1187,6 +1338,9 @@ public:
}
Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
{ return this; }
+ void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_weight_string>(thd, mem_root, this); }
};
class Item_func_crc32 :public Item_int_func
@@ -1196,8 +1350,10 @@ public:
Item_func_crc32(THD *thd, Item *a): Item_int_func(thd, a)
{ unsigned_flag= 1; }
const char *func_name() const { return "crc32"; }
- void fix_length_and_dec() { max_length=10; }
+ bool fix_length_and_dec() { max_length=10; return FALSE; }
longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_crc32>(thd, mem_root, this); }
};
class Item_func_uncompressed_length : public Item_int_func
@@ -1206,8 +1362,10 @@ class Item_func_uncompressed_length : public Item_int_func
public:
Item_func_uncompressed_length(THD *thd, Item *a): Item_int_func(thd, a) {}
const char *func_name() const{return "uncompressed_length";}
- void fix_length_and_dec() { max_length=10; maybe_null= true; }
+ bool fix_length_and_dec() { max_length=10; maybe_null= true; return FALSE; }
longlong val_int();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_uncompressed_length>(thd, mem_root, this); }
};
#ifdef HAVE_COMPRESS
@@ -1222,9 +1380,15 @@ class Item_func_compress: public Item_str_binary_checksum_func
public:
Item_func_compress(THD *thd, Item *a)
:Item_str_binary_checksum_func(thd, a) {}
- void fix_length_and_dec(){max_length= (args[0]->max_length*120)/100+12;}
+ bool fix_length_and_dec()
+ {
+ max_length= (args[0]->max_length * 120) / 100 + 12;
+ return FALSE;
+ }
const char *func_name() const{return "compress";}
String *val_str(String *) ZLIB_DEPENDED_FUNCTION
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_compress>(thd, mem_root, this); }
};
class Item_func_uncompress: public Item_str_binary_checksum_func
@@ -1233,9 +1397,15 @@ class Item_func_uncompress: public Item_str_binary_checksum_func
public:
Item_func_uncompress(THD *thd, Item *a)
:Item_str_binary_checksum_func(thd, a) {}
- void fix_length_and_dec(){ maybe_null= 1; max_length= MAX_BLOB_WIDTH; }
+ bool fix_length_and_dec()
+ {
+ maybe_null= 1; max_length= MAX_BLOB_WIDTH;
+ return FALSE;
+ }
const char *func_name() const{return "uncompress";}
String *val_str(String *) ZLIB_DEPENDED_FUNCTION
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_uncompress>(thd, mem_root, this); }
};
@@ -1243,20 +1413,23 @@ class Item_func_uuid: public Item_str_func
{
public:
Item_func_uuid(THD *thd): Item_str_func(thd) {}
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
collation.set(system_charset_info,
DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
fix_char_length(MY_UUID_STRING_LENGTH);
+ return FALSE;
}
bool const_item() const { return false; }
table_map used_tables() const { return RAND_TABLE_BIT; }
const char *func_name() const{ return "uuid"; }
String *val_str(String *);
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_uuid>(thd, mem_root, this); }
};
@@ -1268,16 +1441,18 @@ protected:
uint *keys_num;
LEX_STRING *keys_str;
bool names, force_names;
- bool prepare_arguments(bool force_names);
+ bool prepare_arguments(THD *thd, bool force_names);
void print_arguments(String *str, enum_query_type query_type);
public:
Item_func_dyncol_create(THD *thd, List<Item> &args, DYNCALL_CREATE_DEF *dfs);
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
const char *func_name() const{ return "column_create"; }
String *val_str(String *);
- virtual void print(String *str, enum_query_type query_type);
- virtual enum Functype functype() const { return DYNCOL_FUNC; }
+ void print(String *str, enum_query_type query_type);
+ enum Functype functype() const { return DYNCOL_FUNC; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_create>(thd, mem_root, this); }
};
@@ -1289,7 +1464,9 @@ public:
{}
const char *func_name() const{ return "column_add"; }
String *val_str(String *);
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_add>(thd, mem_root, this); }
};
class Item_func_dyncol_json: public Item_str_func
@@ -1299,12 +1476,15 @@ public:
{collation.set(DYNCOL_UTF);}
const char *func_name() const{ return "column_json"; }
String *val_str(String *);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
max_length= MAX_BLOB_WIDTH;
maybe_null= 1;
decimals= 0;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_json>(thd, mem_root, this); }
};
/*
@@ -1316,19 +1496,37 @@ class Item_dyncol_get: public Item_str_func
public:
Item_dyncol_get(THD *thd, Item *str, Item *num): Item_str_func(thd, str, num)
{}
- void fix_length_and_dec()
- { maybe_null= 1;; max_length= MAX_BLOB_WIDTH; }
+ bool fix_length_and_dec()
+ { maybe_null= 1;; max_length= MAX_BLOB_WIDTH; return FALSE; }
/* Mark that collation can change between calls */
bool dynamic_result() { return 1; }
const char *func_name() const { return "column_get"; }
String *val_str(String *);
longlong val_int();
+ longlong val_int_signed_typecast()
+ {
+ unsigned_flag= false; // Mark that we want to have a signed value
+ longlong value= val_int(); // val_int() can change unsigned_flag
+ if (!null_value && unsigned_flag && value < 0)
+ push_note_converted_to_negative_complement(current_thd);
+ return value;
+ }
+ longlong val_int_unsigned_typecast()
+ {
+ unsigned_flag= true; // Mark that we want to have an unsigned value
+ longlong value= val_int(); // val_int() can change unsigned_flag
+ if (!null_value && unsigned_flag == 0 && value < 0)
+ push_note_converted_to_positive_complement(current_thd);
+ return value;
+ }
double val_real();
my_decimal *val_decimal(my_decimal *);
- bool get_dyn_value(DYNAMIC_COLUMN_VALUE *val, String *tmp);
+ bool get_dyn_value(THD *thd, DYNAMIC_COLUMN_VALUE *val, String *tmp);
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_dyncol_get>(thd, mem_root, this); }
};
@@ -1337,9 +1535,12 @@ class Item_func_dyncol_list: public Item_str_func
public:
Item_func_dyncol_list(THD *thd, Item *str): Item_str_func(thd, str)
{collation.set(DYNCOL_UTF);}
- void fix_length_and_dec() { maybe_null= 1; max_length= MAX_BLOB_WIDTH; };
+ bool fix_length_and_dec()
+ { maybe_null= 1; max_length= MAX_BLOB_WIDTH; return FALSE; };
const char *func_name() const{ return "column_list"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dyncol_list>(thd, mem_root, this); }
};
#endif /* ITEM_STRFUNC_INCLUDED */
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index b275f749f25..8a9dd083911 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -40,6 +40,7 @@
#include "set_var.h"
#include "sql_select.h"
#include "sql_parse.h" // check_stack_overrun
+#include "sql_cte.h"
#include "sql_test.h"
double get_post_group_estimate(JOIN* join, double join_op_rows);
@@ -54,10 +55,12 @@ Item_subselect::Item_subselect(THD *thd_arg):
have_to_be_excluded(0),
inside_first_fix_fields(0), done_first_fix_fields(FALSE),
expr_cache(0), forced_const(FALSE), substitution(0), engine(0), eliminated(FALSE),
- changed(0), is_correlated(FALSE)
+ changed(0), is_correlated(FALSE), with_recursive_reference(0)
{
DBUG_ENTER("Item_subselect::Item_subselect");
- DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this));
+ DBUG_PRINT("enter", ("this: %p", this));
+ sortbuffer.str= 0;
+
#ifndef DBUG_OFF
exec_counter= 0;
#endif
@@ -81,8 +84,8 @@ void Item_subselect::init(st_select_lex *select_lex,
*/
DBUG_ENTER("Item_subselect::init");
- DBUG_PRINT("enter", ("select_lex: 0x%lx this: 0x%lx",
- (ulong) select_lex, (ulong) this));
+ DBUG_PRINT("enter", ("select_lex: %p this: %p",
+ select_lex, this));
unit= select_lex->master_unit();
if (unit->item)
@@ -113,9 +116,8 @@ void Item_subselect::init(st_select_lex *select_lex,
do not take into account expression inside aggregate functions because
they can access original table fields
*/
- parsing_place= (outer_select->in_sum_expr ?
- NO_MATTER :
- outer_select->parsing_place);
+ parsing_place= (outer_select->in_sum_expr ? NO_MATTER
+ : outer_select->parsing_place);
if (unit->is_union())
engine= new subselect_union_engine(unit, result, this);
else
@@ -128,7 +130,7 @@ void Item_subselect::init(st_select_lex *select_lex,
/* The subquery is an expression cache candidate */
upper->expr_cache_may_be_used[upper->parsing_place]= TRUE;
}
- DBUG_PRINT("info", ("engine: 0x%lx", (ulong)engine));
+ DBUG_PRINT("info", ("engine: %p", engine));
DBUG_VOID_RETURN;
}
@@ -152,6 +154,10 @@ void Item_subselect::cleanup()
if (engine)
engine->cleanup();
reset();
+ filesort_buffer.free_sort_buffer();
+ my_free(sortbuffer.str);
+ sortbuffer= null_lex_str;
+
value_assigned= 0;
expr_cache= 0;
forced_const= FALSE;
@@ -210,7 +216,7 @@ void Item_allany_subselect::cleanup()
Item_subselect::~Item_subselect()
{
DBUG_ENTER("Item_subselect::~Item_subselect");
- DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this));
+ DBUG_PRINT("enter", ("this: %p", this));
if (own_engine)
delete engine;
else
@@ -299,15 +305,20 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
if (engine->cols() > max_columns)
{
my_error(ER_OPERAND_COLUMNS, MYF(0), 1);
-
+ res= TRUE;
+ goto end;
+ }
+ if (fix_length_and_dec())
+ {
+ res= TRUE;
goto end;
}
- fix_length_and_dec();
}
else
goto end;
- if ((uncacheable= engine->uncacheable() & ~UNCACHEABLE_EXPLAIN))
+ if ((uncacheable= engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) ||
+ with_recursive_reference)
{
const_item_cache= 0;
if (uncacheable & UNCACHEABLE_RAND)
@@ -323,7 +334,7 @@ end:
}
-bool Item_subselect::enumerate_field_refs_processor(uchar *arg)
+bool Item_subselect::enumerate_field_refs_processor(void *arg)
{
List_iterator<Ref_to_outside> it(upper_refs);
Ref_to_outside *upper;
@@ -337,7 +348,7 @@ bool Item_subselect::enumerate_field_refs_processor(uchar *arg)
return FALSE;
}
-bool Item_subselect::mark_as_eliminated_processor(uchar *arg)
+bool Item_subselect::mark_as_eliminated_processor(void *arg)
{
eliminated= TRUE;
return FALSE;
@@ -354,7 +365,7 @@ bool Item_subselect::mark_as_eliminated_processor(uchar *arg)
FALSE to force the evaluation of the processor for the subsequent items.
*/
-bool Item_subselect::eliminate_subselect_processor(uchar *arg)
+bool Item_subselect::eliminate_subselect_processor(void *arg)
{
unit->item= NULL;
unit->exclude_from_tree();
@@ -374,7 +385,7 @@ bool Item_subselect::eliminate_subselect_processor(uchar *arg)
FALSE to force the evaluation of the processor for the subsequent items.
*/
-bool Item_subselect::set_fake_select_as_master_processor(uchar *arg)
+bool Item_subselect::set_fake_select_as_master_processor(void *arg)
{
SELECT_LEX *fake_select= (SELECT_LEX*) arg;
/*
@@ -523,8 +534,7 @@ void Item_subselect::recalc_used_tables(st_select_lex *new_parent,
Field_fixer fixer;
fixer.used_tables= 0;
fixer.new_parent= new_parent;
- upper->item->walk(&Item::enumerate_field_refs_processor, FALSE,
- (uchar*)&fixer);
+ upper->item->walk(&Item::enumerate_field_refs_processor, 0, &fixer);
used_tables_cache |= fixer.used_tables;
upper->item->walk(&Item::update_table_bitmaps_processor, FALSE, NULL);
/*
@@ -635,7 +645,7 @@ bool Item_subselect::is_expensive()
bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
- uchar *argument)
+ void *argument)
{
if (!(unit->uncacheable & ~UNCACHEABLE_DEPENDENT) && engine->is_executed() &&
!unit->describe)
@@ -735,7 +745,7 @@ void Item_subselect::get_cache_parameters(List<Item> &parameters)
unit->first_select()->nest_level, // nest_level
TRUE // collect
};
- walk(&Item::collect_outer_ref_processor, TRUE, (uchar*)&prm);
+ walk(&Item::collect_outer_ref_processor, TRUE, &prm);
}
int Item_in_subselect::optimize(double *out_rows, double *cost)
@@ -775,7 +785,7 @@ int Item_in_subselect::optimize(double *out_rows, double *cost)
}
/* Now with grouping */
- if (join->group_list)
+ if (join->group_list_for_estimates)
{
DBUG_PRINT("info",("Materialized join has grouping, trying to estimate it"));
double output_rows= get_post_group_estimate(join, *out_rows);
@@ -807,7 +817,8 @@ bool Item_subselect::expr_cache_is_needed(THD *thd)
engine->cols() == 1 &&
optimizer_flag(thd, OPTIMIZER_SWITCH_SUBQUERY_CACHE) &&
!(engine->uncacheable() & (UNCACHEABLE_RAND |
- UNCACHEABLE_SIDEEFFECT)));
+ UNCACHEABLE_SIDEEFFECT)) &&
+ !with_recursive_reference);
}
@@ -846,7 +857,8 @@ bool Item_in_subselect::expr_cache_is_needed(THD *thd)
{
return (optimizer_flag(thd, OPTIMIZER_SWITCH_SUBQUERY_CACHE) &&
!(engine->uncacheable() & (UNCACHEABLE_RAND |
- UNCACHEABLE_SIDEEFFECT)));
+ UNCACHEABLE_SIDEEFFECT)) &&
+ !with_recursive_reference);
}
@@ -897,9 +909,11 @@ Item::Type Item_subselect::type() const
}
-void Item_subselect::fix_length_and_dec()
+bool Item_subselect::fix_length_and_dec()
{
- engine->fix_length_and_dec(0);
+ if (engine->fix_length_and_dec(0))
+ return TRUE;
+ return FALSE;
}
@@ -913,7 +927,7 @@ table_map Item_subselect::used_tables() const
bool Item_subselect::const_item() const
{
DBUG_ASSERT(thd);
- return (thd->lex->context_analysis_only ?
+ return (thd->lex->context_analysis_only || with_recursive_reference ?
FALSE :
forced_const || const_item_cache);
}
@@ -933,7 +947,8 @@ void Item_subselect::update_used_tables()
if (!(engine->uncacheable() & ~UNCACHEABLE_EXPLAIN))
{
// did all used tables become static?
- if (!(used_tables_cache & ~engine->upper_select_const_tables()))
+ if (!(used_tables_cache & ~engine->upper_select_const_tables()) &&
+ ! with_recursive_reference)
const_item_cache= 1;
}
}
@@ -1186,17 +1201,19 @@ enum_field_types Item_singlerow_subselect::field_type() const
return engine->field_type();
}
-void Item_singlerow_subselect::fix_length_and_dec()
+bool Item_singlerow_subselect::fix_length_and_dec()
{
if ((max_columns= engine->cols()) == 1)
{
- engine->fix_length_and_dec(row= &value);
+ if (engine->fix_length_and_dec(row= &value))
+ return TRUE;
}
else
{
- if (!(row= (Item_cache**) sql_alloc(sizeof(Item_cache*)*max_columns)))
- return;
- engine->fix_length_and_dec(row);
+ if (!(row= (Item_cache**) current_thd->alloc(sizeof(Item_cache*) *
+ max_columns)) ||
+ engine->fix_length_and_dec(row))
+ return TRUE;
value= *row;
}
unsigned_flag= value->unsigned_flag;
@@ -1212,6 +1229,7 @@ void Item_singlerow_subselect::fix_length_and_dec()
for (uint i= 0; i < max_columns; i++)
row[i]->maybe_null= TRUE;
}
+ return FALSE;
}
@@ -1496,7 +1514,7 @@ void Item_exists_subselect::init_length_and_dec()
}
-void Item_exists_subselect::fix_length_and_dec()
+bool Item_exists_subselect::fix_length_and_dec()
{
DBUG_ENTER("Item_exists_subselect::fix_length_and_dec");
init_length_and_dec();
@@ -1504,14 +1522,17 @@ void Item_exists_subselect::fix_length_and_dec()
We need only 1 row to determine existence (i.e. any EXISTS that is not
an IN always requires LIMIT 1)
*/
+ Item *item= new (thd->mem_root) Item_int(thd, (int32) 1);
+ if (!item)
+ DBUG_RETURN(TRUE);
thd->change_item_tree(&unit->global_parameters()->select_limit,
- new (thd->mem_root) Item_int(thd, (int32) 1));
+ item);
DBUG_PRINT("info", ("Set limit to 1"));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
-void Item_in_subselect::fix_length_and_dec()
+bool Item_in_subselect::fix_length_and_dec()
{
DBUG_ENTER("Item_in_subselect::fix_length_and_dec");
init_length_and_dec();
@@ -1519,7 +1540,7 @@ void Item_in_subselect::fix_length_and_dec()
Unlike Item_exists_subselect, LIMIT 1 is set later for
Item_in_subselect, depending on the chosen strategy.
*/
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -1740,7 +1761,7 @@ bool Item_in_subselect::val_bool()
if (forced_const)
return value;
DBUG_ASSERT((engine->uncacheable() & ~UNCACHEABLE_EXPLAIN) ||
- ! engine->is_executed());
+ ! engine->is_executed() || with_recursive_reference);
null_value= was_null= FALSE;
if (exec())
{
@@ -1828,7 +1849,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
select and is not outer anymore.
*/
where_item->walk(&Item::remove_dependence_processor, 0,
- (uchar *) select_lex->outer_select());
+ select_lex->outer_select());
/*
fix_field of substitution item will be done in time of
substituting.
@@ -1946,7 +1967,8 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
(ALL && (> || =>)) || (ANY && (< || =<))
for ALL condition is inverted
*/
- item= new (thd->mem_root) Item_sum_max(thd, *select_lex->ref_pointer_array);
+ item= new (thd->mem_root) Item_sum_max(thd,
+ select_lex->ref_pointer_array[0]);
}
else
{
@@ -1954,11 +1976,12 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
(ALL && (< || =<)) || (ANY && (> || =>))
for ALL condition is inverted
*/
- item= new (thd->mem_root) Item_sum_min(thd, *select_lex->ref_pointer_array);
+ item= new (thd->mem_root) Item_sum_min(thd,
+ select_lex->ref_pointer_array[0]);
}
if (upper_item)
upper_item->set_sum_test(item);
- thd->change_item_tree(select_lex->ref_pointer_array, item);
+ thd->change_item_tree(&select_lex->ref_pointer_array[0], item);
{
List_iterator<Item> it(select_lex->item_list);
it++;
@@ -2106,8 +2129,8 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join,
thd,
&select_lex->context,
this,
- select_lex->
- ref_pointer_array,
+ &select_lex->
+ ref_pointer_array[0],
(char *)"<ref>",
this->full_name()));
if (!abort_on_null && left_expr->maybe_null)
@@ -2188,7 +2211,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join,
new (thd->mem_root) Item_ref_null_helper(thd,
&select_lex->context,
this,
- select_lex->ref_pointer_array,
+ &select_lex->ref_pointer_array[0],
(char *)"<no matter>",
(char *)"<result>"));
if (!abort_on_null && left_expr->maybe_null)
@@ -2270,7 +2293,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
/*
The uncacheable property controls a number of actions, e.g. whether to
save/restore (via init_save_join_tab/restore_tmp) the original JOIN for
- plans with a temp table where the original JOIN was overriden by
+ plans with a temp table where the original JOIN was overridden by
make_simple_join. The UNCACHEABLE_EXPLAIN is ignored by EXPLAIN, thus
non-correlated subqueries will not appear as such to EXPLAIN.
*/
@@ -2378,7 +2401,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
(char *)in_left_expr_name),
new (thd->mem_root)
Item_ref(thd, &select_lex->context,
- select_lex->ref_pointer_array + i,
+ &select_lex->ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
Item *item_isnull=
@@ -2386,12 +2409,13 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
Item_func_isnull(thd,
new (thd->mem_root)
Item_ref(thd, &select_lex->context,
- select_lex->ref_pointer_array+i,
+ &select_lex->ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
Item *col_item= new (thd->mem_root)
Item_cond_or(thd, item_eq, item_isnull);
- if (!abort_on_null && left_expr->element_index(i)->maybe_null)
+ if (!abort_on_null && left_expr->element_index(i)->maybe_null &&
+ get_cond_guard(i))
{
disable_cond_guard_for_const_null_left_expr(i);
if (!(col_item= new (thd->mem_root)
@@ -2405,11 +2429,12 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
Item_is_not_null_test(thd, this,
new (thd->mem_root)
Item_ref(thd, &select_lex->context,
- select_lex->
- ref_pointer_array + i,
+ &select_lex->
+ ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
- if (!abort_on_null && left_expr->element_index(i)->maybe_null)
+ if (!abort_on_null && left_expr->element_index(i)->maybe_null &&
+ get_cond_guard(i) )
{
disable_cond_guard_for_const_null_left_expr(i);
if (!(item_nnull_test=
@@ -2445,8 +2470,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
(char *)in_left_expr_name),
new (thd->mem_root)
Item_direct_ref(thd, &select_lex->context,
- select_lex->
- ref_pointer_array+i,
+ &select_lex->
+ ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
if (!abort_on_null && select_lex->ref_pointer_array[i]->maybe_null)
@@ -2456,7 +2481,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
Item_is_not_null_test(thd, this,
new (thd->mem_root)
Item_ref(thd, &select_lex->context,
- select_lex->ref_pointer_array + i,
+ &select_lex->ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
@@ -2465,12 +2490,12 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
Item_func_isnull(thd,
new (thd->mem_root)
Item_direct_ref(thd, &select_lex->context,
- select_lex->
- ref_pointer_array+i,
+ &select_lex->
+ ref_pointer_array[i],
(char *)"<no matter>",
(char *)"<list ref>"));
item= new (thd->mem_root) Item_cond_or(thd, item, item_isnull);
- if (left_expr->element_index(i)->maybe_null)
+ if (left_expr->element_index(i)->maybe_null && get_cond_guard(i))
{
disable_cond_guard_for_const_null_left_expr(i);
if (!(item= new (thd->mem_root)
@@ -2482,7 +2507,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
}
*having_item= and_items(thd, *having_item, having_col_item);
}
- if (!abort_on_null && left_expr->element_index(i)->maybe_null)
+ if (!abort_on_null && left_expr->element_index(i)->maybe_null &&
+ get_cond_guard(i))
{
if (!(item= new (thd->mem_root)
Item_func_trig_cond(thd, item, get_cond_guard(i))))
@@ -2570,7 +2596,7 @@ bool Item_in_subselect::create_in_to_exists_cond(JOIN *join_arg)
/*
The uncacheable property controls a number of actions, e.g. whether to
save/restore (via init_save_join_tab/restore_tmp) the original JOIN for
- plans with a temp table where the original JOIN was overriden by
+ plans with a temp table where the original JOIN was overridden by
make_simple_join. The UNCACHEABLE_EXPLAIN is ignored by EXPLAIN, thus
non-correlated subqueries will not appear as such to EXPLAIN.
*/
@@ -2829,7 +2855,7 @@ alloc_err:
@return TRUE in case of error and FALSE otherwise.
*/
-bool Item_exists_subselect::exists2in_processor(uchar *opt_arg)
+bool Item_exists_subselect::exists2in_processor(void *opt_arg)
{
THD *thd= (THD *)opt_arg;
SELECT_LEX *first_select=unit->first_select(), *save_select;
@@ -2855,7 +2881,8 @@ bool Item_exists_subselect::exists2in_processor(uchar *opt_arg)
join->having ||
first_select->with_sum_func ||
!first_select->leaf_tables.elements||
- !join->conds)
+ !join->conds ||
+ with_recursive_reference)
DBUG_RETURN(FALSE);
DBUG_ASSERT(first_select->order_list.elements == 0 &&
@@ -2879,7 +2906,7 @@ bool Item_exists_subselect::exists2in_processor(uchar *opt_arg)
unit->first_select()->nest_level, // nest_level
FALSE // collect
};
- walk(&Item::collect_outer_ref_processor, TRUE, (uchar*)&prm);
+ walk(&Item::collect_outer_ref_processor, TRUE, &prm);
DBUG_ASSERT(prm.count > 0);
DBUG_ASSERT(prm.count >= (uint)eqs.elements());
will_be_correlated= prm.count > (uint)eqs.elements();
@@ -3017,7 +3044,7 @@ bool Item_exists_subselect::exists2in_processor(uchar *opt_arg)
uint i;
for (i= 0; i < (uint)eqs.elements(); i++)
if (eqs.at(i).outer_exp->
- walk(&Item::find_item_processor, TRUE, (uchar*)upper->item))
+ walk(&Item::find_item_processor, TRUE, upper->item))
break;
if (i == (uint)eqs.elements() &&
(in_subs->upper_refs.push_back(upper, thd->stmt_arena->mem_root)))
@@ -3509,6 +3536,11 @@ int subselect_single_select_engine::get_identifier()
return select_lex->select_number;
}
+void subselect_single_select_engine::force_reexecution()
+{
+ executed= false;
+}
+
void subselect_single_select_engine::cleanup()
{
DBUG_ENTER("subselect_single_select_engine::cleanup");
@@ -3537,6 +3569,11 @@ bool subselect_union_engine::is_executed() const
return unit->executed;
}
+void subselect_union_engine::force_reexecution()
+{
+ unit->executed= false;
+}
+
/*
Check if last execution of the subquery engine produced any rows
@@ -3635,8 +3672,7 @@ int subselect_single_select_engine::prepare(THD *thd)
prepared= 1;
SELECT_LEX *save_select= thd->lex->current_select;
thd->lex->current_select= select_lex;
- if (join->prepare(&select_lex->ref_pointer_array,
- select_lex->table_list.first,
+ if (join->prepare(select_lex->table_list.first,
select_lex->with_wild,
select_lex->where,
select_lex->order_list.elements +
@@ -3687,11 +3723,11 @@ bool subselect_single_select_engine::no_rows()
}
-/*
- makes storage for the output values for the subquery and calcuates
+/**
+ Makes storage for the output values for the subquery and calcuates
their data and column types and their nullability.
-*/
-void subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
+*/
+bool subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
{
Item *sel_item;
List_iterator_fast<Item> li(item_list);
@@ -3707,44 +3743,51 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
item->unsigned_flag= sel_item->unsigned_flag;
maybe_null= sel_item->maybe_null;
if (!(row[i]= Item_cache::get_cache(thd, sel_item, sel_item->cmp_type())))
- return;
+ return TRUE;
row[i]->setup(thd, sel_item);
//psergey-backport-timours: row[i]->store(sel_item);
}
if (item_list.elements > 1)
cmp_type= res_type= ROW_RESULT;
+ return FALSE;
}
-void subselect_single_select_engine::fix_length_and_dec(Item_cache **row)
+bool subselect_single_select_engine::fix_length_and_dec(Item_cache **row)
{
DBUG_ASSERT(row || select_lex->item_list.elements==1);
- set_row(select_lex->item_list, row);
+ if (set_row(select_lex->item_list, row))
+ return TRUE;
item->collation.set(row[0]->collation);
if (cols() != 1)
maybe_null= 0;
+ return FALSE;
}
-void subselect_union_engine::fix_length_and_dec(Item_cache **row)
+bool subselect_union_engine::fix_length_and_dec(Item_cache **row)
{
DBUG_ASSERT(row || unit->first_select()->item_list.elements==1);
if (unit->first_select()->item_list.elements == 1)
{
- set_row(unit->types, row);
+ if (set_row(unit->types, row))
+ return TRUE;
item->collation.set(row[0]->collation);
}
else
{
bool maybe_null_saved= maybe_null;
- set_row(unit->types, row);
+ if (set_row(unit->types, row))
+ return TRUE;
maybe_null= maybe_null_saved;
}
+ return FALSE;
}
-void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row)
+bool subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row)
{
//this never should be called
DBUG_ASSERT(0);
+ return FALSE;
}
int read_first_record_seq(JOIN_TAB *tab);
@@ -3786,18 +3829,13 @@ int subselect_single_select_engine::exec()
*/
select_lex->uncacheable|= UNCACHEABLE_EXPLAIN;
select_lex->master_unit()->uncacheable|= UNCACHEABLE_EXPLAIN;
- /*
- Force join->join_tmp creation, because this subquery will be replaced
- by a simple select from the materialization temp table by optimize()
- called by EXPLAIN and we need to preserve the initial query structure
- so we can display it.
- */
- if (join->need_tmp && join->init_save_join_tab())
- DBUG_RETURN(1); /* purecov: inspected */
}
}
if (item->engine_changed(this))
+ {
+ thd->lex->current_select= save_select;
DBUG_RETURN(1);
+ }
}
if (select_lex->uncacheable &&
select_lex->uncacheable != UNCACHEABLE_EXPLAIN
@@ -3865,7 +3903,8 @@ int subselect_single_select_engine::exec()
tab->read_record.read_record= tab->save_read_record;
}
executed= 1;
- if (!(uncacheable() & ~UNCACHEABLE_EXPLAIN))
+ if (!(uncacheable() & ~UNCACHEABLE_EXPLAIN) &&
+ !item->with_recursive_reference)
item->make_const();
thd->where= save_where;
thd->lex->current_select= save_select;
@@ -4356,6 +4395,9 @@ table_map subselect_union_engine::upper_select_const_tables()
void subselect_single_select_engine::print(String *str,
enum_query_type query_type)
{
+ With_clause* with_clause= select_lex->get_with_clause();
+ if (with_clause)
+ with_clause->print(str, query_type);
select_lex->print(get_thd(), str, query_type);
}
@@ -4640,7 +4682,7 @@ subselect_hash_sj_engine::get_strategy_using_schema()
return COMPLETE_MATCH;
else
{
- List_iterator<Item> inner_col_it(*item_in->unit->get_unit_column_types());
+ List_iterator<Item> inner_col_it(*item_in->unit->get_column_types(false));
Item *outer_col, *inner_col;
for (uint i= 0; i < item_in->left_expr->cols(); i++)
@@ -4732,7 +4774,7 @@ subselect_hash_sj_engine::choose_partial_match_strategy(
/*
Choose according to global optimizer switch. If only one of the switches is
'ON', then the remaining strategy is the only possible one. The only cases
- when this will be overriden is when the total size of all buffers for the
+ when this will be overridden is when the total size of all buffers for the
merge strategy is bigger than the 'rowid_merge_buff_size' system variable,
or if there isn't enough physical memory to allocate the buffers.
*/
@@ -5336,7 +5378,7 @@ double get_post_group_estimate(JOIN* join, double join_op_rows)
table_map tables_in_group_list= table_map(0);
/* Find out which tables are used in GROUP BY list */
- for (ORDER *order= join->group_list; order; order= order->next)
+ for (ORDER *order= join->group_list_for_estimates; order; order= order->next)
{
Item *item= order->item[0];
table_map item_used_tables= item->used_tables();
@@ -5578,9 +5620,10 @@ void subselect_hash_sj_engine::print(String *str, enum_query_type query_type)
));
}
-void subselect_hash_sj_engine::fix_length_and_dec(Item_cache** row)
+bool subselect_hash_sj_engine::fix_length_and_dec(Item_cache** row)
{
DBUG_ASSERT(FALSE);
+ return FALSE;
}
void subselect_hash_sj_engine::exclude()
@@ -6254,7 +6297,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
Check if the first and only indexed column contains NULL in the curent
row, and add the row number to the corresponding key.
*/
- if (tmp_table->field[merge_keys[i]->get_field_idx(0)]->is_null())
+ if (merge_keys[i]->get_field(0)->is_null())
merge_keys[i]->set_null(cur_rownum);
else
merge_keys[i]->add_key(cur_rownum);
@@ -6712,6 +6755,13 @@ void subselect_table_scan_engine::cleanup()
}
+void Item_subselect::register_as_with_rec_ref(With_element *with_elem)
+{
+ with_elem->sq_with_rec_ref.link_in_list(this, &this->next_with_rec_ref);
+ with_recursive_reference= true;
+}
+
+
/*
Create an execution tracker for the expression cache we're using for this
subselect; add the tracker to the query plan.
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 424ea6f0512..bd6a1bdc498 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -32,6 +32,7 @@ class subselect_engine;
class subselect_hash_sj_engine;
class Item_bool_func2;
class Comp_creator;
+class With_element;
typedef class st_select_lex SELECT_LEX;
@@ -95,6 +96,9 @@ public:
subselect_engine *engine;
/* unit of subquery */
st_select_lex_unit *unit;
+ /* Cached buffers used when calling filesort in sub queries */
+ Filesort_buffer filesort_buffer;
+ LEX_STRING sortbuffer;
/* A reference from inside subquery predicate to somewhere outside of it */
class Ref_to_outside : public Sql_alloc
{
@@ -123,7 +127,17 @@ public:
bool changed;
/* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */
- bool is_correlated;
+ bool is_correlated;
+
+ /*
+ TRUE <=> the subquery contains a recursive reference in the FROM list
+ of one of its selects. In this case some of subquery optimization
+ strategies cannot be applied for the subquery;
+ */
+ bool with_recursive_reference;
+
+ /* To link Item_subselects containing references to the same recursive CTE */
+ Item_subselect *next_with_rec_ref;
enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS,
EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS};
@@ -182,7 +196,7 @@ public:
const_item_cache= 0;
forced_const= TRUE;
}
- virtual void fix_length_and_dec();
+ virtual bool fix_length_and_dec();
table_map used_tables() const;
table_map not_null_tables() const { return 0; }
bool const_item() const;
@@ -212,14 +226,14 @@ public:
*/
virtual void reset_value_registration() {}
enum_parsing_place place() { return parsing_place; }
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
- bool mark_as_eliminated_processor(uchar *arg);
- bool eliminate_subselect_processor(uchar *arg);
- bool set_fake_select_as_master_processor(uchar *arg);
- bool enumerate_field_refs_processor(uchar *arg);
- bool check_vcol_func_processor(uchar *int_arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg);
+ bool mark_as_eliminated_processor(void *arg);
+ bool eliminate_subselect_processor(void *arg);
+ bool set_fake_select_as_master_processor(void *arg);
+ bool enumerate_field_refs_processor(void *arg);
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("subselect");
+ return mark_unsupported_function("select ...", arg, VCOL_IMPOSSIBLE);
}
/**
Callback to test if an IN predicate is expensive.
@@ -230,7 +244,7 @@ public:
@retval TRUE if the predicate is expensive
@retval FALSE otherwise
*/
- bool is_expensive_processor(uchar *arg) { return is_expensive(); }
+ bool is_expensive_processor(void *arg) { return is_expensive(); }
/**
Get the SELECT_LEX structure associated with this Item.
@@ -239,14 +253,18 @@ public:
st_select_lex* get_select_lex();
virtual bool expr_cache_is_needed(THD *);
virtual void get_cache_parameters(List<Item> &parameters);
- virtual bool is_subquery_processor (uchar *opt_arg) { return 1; }
- bool exists2in_processor(uchar *opt_arg) { return 0; }
- bool limit_index_condition_pushdown_processor(uchar *opt_arg)
+ virtual bool is_subquery_processor (void *opt_arg) { return 1; }
+ bool exists2in_processor(void *opt_arg) { return 0; }
+ bool limit_index_condition_pushdown_processor(void *opt_arg)
{
return TRUE;
}
+ void register_as_with_rec_ref(With_element *with_elem);
void init_expr_cache_tracker(THD *thd);
+
+ Item* build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; }
+ Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
friend class select_result_interceptor;
@@ -288,7 +306,7 @@ public:
enum Item_result result_type() const;
enum Item_result cmp_type() const;
enum_field_types field_type() const;
- void fix_length_and_dec();
+ bool fix_length_and_dec();
uint cols();
Item* element_index(uint i) { return reinterpret_cast<Item*>(row[i]); }
@@ -378,18 +396,19 @@ public:
void no_rows_in_result();
enum Item_result result_type() const { return INT_RESULT;}
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
longlong val_int();
double val_real();
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
- virtual void print(String *str, enum_query_type query_type);
+ bool fix_length_and_dec();
+ void print(String *str, enum_query_type query_type);
bool select_transformer(JOIN *join);
void top_level_item() { abort_on_null=1; }
inline bool is_top_level_item() { return abort_on_null; }
- bool exists2in_processor(uchar *opt_arg);
+ bool exists2in_processor(void *opt_arg);
Item* expr_cache_insert_transformer(THD *thd, uchar *unused);
@@ -604,9 +623,10 @@ public:
void update_null_value () { (void) val_bool(); }
bool val_bool();
bool test_limit(st_select_lex_unit *unit);
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+ enum precedence precedence() const { return CMP_PRECEDENCE; }
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
bool const_item() const
{
@@ -709,13 +729,13 @@ public:
DBUG_VOID_RETURN;
}
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
return left_expr->walk(processor, walk_subquery, arg) ||
Item_subselect::walk(processor, walk_subquery, arg);
}
- bool exists2in_processor(uchar *opt_arg __attribute__((unused)))
+ bool exists2in_processor(void *opt_arg __attribute__((unused)))
{
return 0;
};
@@ -746,7 +766,7 @@ public:
subs_type substype() { return all?ALL_SUBS:ANY_SUBS; }
bool select_transformer(JOIN *join);
void create_comp_func(bool invert) { func= func_creator(invert); }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
bool is_maxmin_applicable(JOIN *join);
bool transform_into_max_min(JOIN *join);
void no_rows_in_result();
@@ -790,7 +810,7 @@ public:
void set_thd(THD *thd_arg);
THD * get_thd() { return thd ? thd : current_thd; }
virtual int prepare(THD *)= 0;
- virtual void fix_length_and_dec(Item_cache** row)= 0;
+ virtual bool fix_length_and_dec(Item_cache** row)= 0;
/*
Execute the engine
@@ -832,8 +852,9 @@ public:
virtual bool no_rows() = 0;
virtual enum_engine_type engine_type() { return ABSTRACT_ENGINE; }
virtual int get_identifier() { DBUG_ASSERT(0); return 0; }
+ virtual void force_reexecution() {}
protected:
- void set_row(List<Item> &item_list, Item_cache **row);
+ bool set_row(List<Item> &item_list, Item_cache **row);
};
@@ -849,13 +870,13 @@ public:
Item_subselect *item);
void cleanup();
int prepare(THD *thd);
- void fix_length_and_dec(Item_cache** row);
+ bool fix_length_and_dec(Item_cache** row);
int exec();
uint cols();
uint8 uncacheable();
void exclude();
table_map upper_select_const_tables();
- virtual void print (String *str, enum_query_type query_type);
+ void print (String *str, enum_query_type query_type);
bool change_result(Item_subselect *si,
select_result_interceptor *result,
bool temp);
@@ -865,6 +886,7 @@ public:
bool no_rows();
virtual enum_engine_type engine_type() { return SINGLE_SELECT_ENGINE; }
int get_identifier();
+ void force_reexecution();
friend class subselect_hash_sj_engine;
friend class Item_in_subselect;
@@ -883,18 +905,19 @@ public:
Item_subselect *item);
void cleanup();
int prepare(THD *);
- void fix_length_and_dec(Item_cache** row);
+ bool fix_length_and_dec(Item_cache** row);
int exec();
uint cols();
uint8 uncacheable();
void exclude();
table_map upper_select_const_tables();
- virtual void print (String *str, enum_query_type query_type);
+ void print (String *str, enum_query_type query_type);
bool change_result(Item_subselect *si,
select_result_interceptor *result,
bool temp= FALSE);
bool no_tables();
bool is_executed() const;
+ void force_reexecution();
bool no_rows();
virtual enum_engine_type engine_type() { return UNION_ENGINE; }
};
@@ -940,13 +963,13 @@ public:
~subselect_uniquesubquery_engine();
void cleanup();
int prepare(THD *);
- void fix_length_and_dec(Item_cache** row);
+ bool fix_length_and_dec(Item_cache** row);
int exec();
uint cols() { return 1; }
uint8 uncacheable() { return UNCACHEABLE_DEPENDENT_INJECTED; }
void exclude();
table_map upper_select_const_tables() { return 0; }
- virtual void print (String *str, enum_query_type query_type);
+ void print (String *str, enum_query_type query_type);
bool change_result(Item_subselect *si,
select_result_interceptor *result,
bool temp= FALSE);
@@ -1004,7 +1027,7 @@ public:
having(having_arg)
{}
int exec();
- virtual void print (String *str, enum_query_type query_type);
+ void print (String *str, enum_query_type query_type);
virtual enum_engine_type engine_type() { return INDEXSUBQUERY_ENGINE; }
};
@@ -1079,11 +1102,8 @@ public:
void cleanup();
int prepare(THD *);
int exec();
- virtual void print(String *str, enum_query_type query_type);
- uint cols()
- {
- return materialize_engine->cols();
- }
+ void print(String *str, enum_query_type query_type);
+ uint cols() { return materialize_engine->cols(); }
uint8 uncacheable() { return materialize_engine->uncacheable(); }
table_map upper_select_const_tables() { return 0; }
bool no_rows() { return !tmp_table->file->stats.records; }
@@ -1092,7 +1112,7 @@ public:
TODO: factor out all these methods in a base subselect_index_engine class
because all of them have dummy implementations and should never be called.
*/
- void fix_length_and_dec(Item_cache** row);//=>base class
+ bool fix_length_and_dec(Item_cache** row);//=>base class
void exclude(); //=>base class
//=>base class
bool change_result(Item_subselect *si,
@@ -1242,10 +1262,10 @@ public:
uint get_column_count() { return key_column_count; }
uint get_keyid() { return keyid; }
- uint get_field_idx(uint i)
+ Field *get_field(uint i)
{
DBUG_ASSERT(i < key_column_count);
- return key_columns[i]->field->field_index;
+ return key_columns[i]->field;
}
rownum_t get_min_null_row() { return min_null_row; }
rownum_t get_max_null_row() { return max_null_row; }
@@ -1365,7 +1385,7 @@ public:
uint count_columns_with_nulls_arg);
int prepare(THD *thd_arg) { set_thd(thd_arg); return 0; }
int exec();
- void fix_length_and_dec(Item_cache**) {}
+ bool fix_length_and_dec(Item_cache**) { return FALSE; }
uint cols() { /* TODO: what is the correct value? */ return 1; }
uint8 uncacheable() { return UNCACHEABLE_DEPENDENT; }
void exclude() {}
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 8ba5579646d..4405477b6a1 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -29,16 +29,17 @@
#include <my_global.h>
#include "sql_priv.h"
#include "sql_select.h"
+#include "uniques.h"
/**
Calculate the affordable RAM limit for structures like TREE or Unique
used in Item_sum_*
*/
-ulonglong Item_sum::ram_limitation(THD *thd)
+size_t Item_sum::ram_limitation(THD *thd)
{
- return MY_MIN(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size);
+ return (size_t)MY_MIN(thd->variables.tmp_memory_table_size,
+ thd->variables.max_heap_table_size);
}
@@ -67,14 +68,14 @@ ulonglong Item_sum::ram_limitation(THD *thd)
bool Item_sum::init_sum_func_check(THD *thd)
{
SELECT_LEX *curr_sel= thd->lex->current_select;
- if (!curr_sel->name_visibility_map)
+ if (curr_sel && !curr_sel->name_visibility_map)
{
for (SELECT_LEX *sl= curr_sel; sl; sl= sl->context.outer_select())
{
curr_sel->name_visibility_map|= (1 << sl-> nest_level);
}
}
- if (!(thd->lex->allow_sum_func & curr_sel->name_visibility_map))
+ if (!curr_sel || !(thd->lex->allow_sum_func & curr_sel->name_visibility_map))
{
my_message(ER_INVALID_GROUP_FUNC_USE, ER_THD(thd, ER_INVALID_GROUP_FUNC_USE),
MYF(0));
@@ -99,7 +100,11 @@ bool Item_sum::init_sum_func_check(THD *thd)
The method verifies whether context conditions imposed on a usage
of any set function are met for this occurrence.
- It checks whether the set function occurs in the position where it
+
+ The function first checks if we are using any window functions as
+ arguments to the set function. In that case it returns an error.
+
+ Afterwards, it checks whether the set function occurs in the position where it
can be aggregated and, when it happens to occur in argument of another
set function, the method checks that these two functions are aggregated in
different subqueries.
@@ -150,6 +155,22 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref)
curr_sel->name_visibility_map);
bool invalid= FALSE;
DBUG_ASSERT(curr_sel->name_visibility_map); // should be set already
+
+ /*
+ Window functions can not be used as arguments to sum functions.
+ Aggregation happes before window function computation, so there
+ are no values to aggregate over.
+ */
+ if (with_window_func)
+ {
+ my_message(ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG,
+ ER_THD(thd, ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG),
+ MYF(0));
+ return TRUE;
+ }
+
+ if (window_func_sum_expr_flag)
+ return false;
/*
The value of max_arg_level is updated if an argument of the set function
contains a column reference resolved against a subquery whose level is
@@ -394,7 +415,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
}
-bool Item_sum::collect_outer_ref_processor(uchar *param)
+bool Item_sum::collect_outer_ref_processor(void *param)
{
Collect_deps_prm *prm= (Collect_deps_prm *)param;
SELECT_LEX *ds;
@@ -459,6 +480,7 @@ void Item_sum::mark_as_sum_func()
const_item_cache= false;
with_sum_func= 1;
with_field= 0;
+ window_func_sum_expr_flag= false;
}
@@ -467,6 +489,13 @@ void Item_sum::print(String *str, enum_query_type query_type)
/* orig_args is not filled with valid values until fix_fields() */
Item **pargs= fixed ? orig_args : args;
str->append(func_name());
+ /*
+ TODO:
+ The fact that func_name() may return a name with an extra '('
+ is really annoying. This shoud be fixed.
+ */
+ if (!is_aggr_sum_func())
+ str->append('(');
for (uint i=0 ; i < arg_count ; i++)
{
if (i)
@@ -506,37 +535,6 @@ Item *Item_sum::get_tmp_table_item(THD *thd)
}
-Field *Item_sum::create_tmp_field(bool group, TABLE *table)
-{
- Field *UNINIT_VAR(field);
- MEM_ROOT *mem_root= table->in_use->mem_root;
-
- switch (result_type()) {
- case REAL_RESULT:
- field= new (mem_root)
- Field_double(max_length, maybe_null, name, decimals, TRUE);
- break;
- case INT_RESULT:
- field= new (mem_root)
- Field_longlong(max_length, maybe_null, name, unsigned_flag);
- break;
- case STRING_RESULT:
- return make_string_field(table);
- case DECIMAL_RESULT:
- field= Field_new_decimal::create_from_item(mem_root, this);
- break;
- case ROW_RESULT:
- case TIME_RESULT:
- // This case should never be choosen
- DBUG_ASSERT(0);
- return 0;
- }
- if (field)
- field->init(table);
- return field;
-}
-
-
void Item_sum::update_used_tables ()
{
if (!Item_sum::const_item())
@@ -619,6 +617,14 @@ Item *Item_sum::result_item(THD *thd, Field *field)
return new (thd->mem_root) Item_field(thd, field);
}
+bool Item_sum::check_vcol_func_processor(void *arg)
+{
+ return mark_unsupported_function(func_name(),
+ is_aggr_sum_func() ? ")" : "()",
+ arg, VCOL_IMPOSSIBLE);
+}
+
+
/**
Compare keys consisting of single field that cannot be compared as binary.
@@ -687,32 +693,6 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
}
-static enum enum_field_types
-calc_tmp_field_type(enum enum_field_types table_field_type,
- Item_result result_type)
-{
- /* Adjust tmp table type according to the chosen aggregation type */
- switch (result_type) {
- case STRING_RESULT:
- case REAL_RESULT:
- if (table_field_type != MYSQL_TYPE_FLOAT)
- table_field_type= MYSQL_TYPE_DOUBLE;
- break;
- case INT_RESULT:
- table_field_type= MYSQL_TYPE_LONGLONG;
- /* fallthrough */
- case DECIMAL_RESULT:
- if (table_field_type != MYSQL_TYPE_LONGLONG)
- table_field_type= MYSQL_TYPE_NEWDECIMAL;
- break;
- case ROW_RESULT:
- default:
- DBUG_ASSERT(0);
- }
- return table_field_type;
-}
-
-
/***************************************************************************/
C_MODE_START
@@ -889,8 +869,6 @@ bool Aggregator_distinct::setup(THD *thd)
}
else
{
- List<Create_field> field_list;
- Create_field field_def; /* field definition */
Item *arg;
DBUG_ENTER("Aggregator_distinct::setup");
/* It's legal to call setup() more than once when in a subquery */
@@ -902,8 +880,6 @@ bool Aggregator_distinct::setup(THD *thd)
PS/SP. Hence all further allocations are performed in the runtime
mem_root.
*/
- if (field_list.push_back(&field_def, thd->mem_root))
- DBUG_RETURN(TRUE);
item_sum->null_value= item_sum->maybe_null= 1;
item_sum->quick_group= 0;
@@ -921,17 +897,8 @@ bool Aggregator_distinct::setup(THD *thd)
if (always_null)
DBUG_RETURN(FALSE);
- enum enum_field_types field_type;
-
- field_type= calc_tmp_field_type(arg->field_type(),
- arg->result_type());
- field_def.init_for_tmp_table(field_type,
- arg->max_length,
- arg->decimals,
- arg->maybe_null,
- arg->unsigned_flag);
-
- if (! (table= create_virtual_tmp_table(thd, field_list)))
+ Field *field= arg->make_num_distinct_aggregator_field(thd->mem_root, arg);
+ if (!field || !(table= create_virtual_tmp_table(thd, field)))
DBUG_RETURN(TRUE);
/* XXX: check that the case of CHAR(0) works OK */
@@ -1160,13 +1127,13 @@ Item_sum_num::fix_fields(THD *thd, Item **ref)
set_if_bigger(decimals, args[i]->decimals);
with_subselect|= args[i]->with_subselect;
with_param|= args[i]->with_param;
+ with_window_func|= args[i]->with_window_func;
}
result_field=0;
max_length=float_length(decimals);
null_value=1;
- fix_length_and_dec();
-
- if (check_sum_func(thd, ref))
+ if (fix_length_and_dec() ||
+ check_sum_func(thd, ref))
return TRUE;
memcpy (orig_args, args, sizeof (Item *) * arg_count);
@@ -1192,6 +1159,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
Type_std_attributes::set(args[0]);
with_subselect= args[0]->with_subselect;
with_param= args[0]->with_param;
+ with_window_func|= args[0]->with_window_func;
Item *item2= item->real_item();
if (item2->type() == Item::FIELD_ITEM)
@@ -1214,14 +1182,14 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
case TIME_RESULT:
DBUG_ASSERT(0);
};
- setup_hybrid(thd, args[0], NULL);
+ if (!is_window_func_sum_expr())
+ setup_hybrid(thd, args[0], NULL);
/* MIN/MAX can return NULL for empty set indepedent of the used column */
maybe_null= 1;
result_field=0;
null_value=1;
- fix_length_and_dec();
-
- if (check_sum_func(thd, ref))
+ if (fix_length_and_dec() ||
+ check_sum_func(thd, ref))
return TRUE;
orig_args[0]= args[0];
@@ -1322,11 +1290,13 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table)
check if the following assignments are really needed
*/
Item_sum_sum::Item_sum_sum(THD *thd, Item_sum_sum *item)
- :Item_sum_num(thd, item), hybrid_type(item->hybrid_type),
- curr_dec_buff(item->curr_dec_buff)
+ :Item_sum_num(thd, item),
+ Type_handler_hybrid_field_type(item),
+ curr_dec_buff(item->curr_dec_buff),
+ count(item->count)
{
/* TODO: check if the following assignments are really needed */
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
my_decimal2decimal(item->dec_buffs, dec_buffs);
my_decimal2decimal(item->dec_buffs + 1, dec_buffs + 1);
@@ -1345,7 +1315,8 @@ void Item_sum_sum::clear()
{
DBUG_ENTER("Item_sum_sum::clear");
null_value=1;
- if (hybrid_type == DECIMAL_RESULT)
+ count= 0;
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
curr_dec_buff= 0;
my_decimal_set_zero(dec_buffs);
@@ -1356,7 +1327,7 @@ void Item_sum_sum::clear()
}
-void Item_sum_sum::fix_length_and_dec()
+bool Item_sum_sum::fix_length_and_dec()
{
DBUG_ENTER("Item_sum_sum::fix_length_and_dec");
maybe_null=null_value=1;
@@ -1364,7 +1335,7 @@ void Item_sum_sum::fix_length_and_dec()
switch (args[0]->cast_to_int_type()) {
case REAL_RESULT:
case STRING_RESULT:
- hybrid_type= REAL_RESULT;
+ set_handler_by_field_type(MYSQL_TYPE_DOUBLE);
sum= 0.0;
break;
case INT_RESULT:
@@ -1377,7 +1348,7 @@ void Item_sum_sum::fix_length_and_dec()
decimals,
unsigned_flag);
curr_dec_buff= 0;
- hybrid_type= DECIMAL_RESULT;
+ set_handler_by_field_type(MYSQL_TYPE_NEWDECIMAL);
my_decimal_set_zero(dec_buffs);
break;
}
@@ -1385,38 +1356,76 @@ void Item_sum_sum::fix_length_and_dec()
DBUG_ASSERT(0);
}
DBUG_PRINT("info", ("Type: %s (%d, %d)",
- (hybrid_type == REAL_RESULT ? "REAL_RESULT" :
- hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" :
- hybrid_type == INT_RESULT ? "INT_RESULT" :
+ (result_type() == REAL_RESULT ? "REAL_RESULT" :
+ result_type() == DECIMAL_RESULT ? "DECIMAL_RESULT" :
+ result_type() == INT_RESULT ? "INT_RESULT" :
"--ILLEGAL!!!--"),
max_length,
(int)decimals));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
bool Item_sum_sum::add()
{
DBUG_ENTER("Item_sum_sum::add");
- if (hybrid_type == DECIMAL_RESULT)
+ add_helper(false);
+ DBUG_RETURN(0);
+}
+
+void Item_sum_sum::add_helper(bool perform_removal)
+{
+ DBUG_ENTER("Item_sum_sum::add_helper");
+
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
my_decimal value;
const my_decimal *val= aggr->arg_val_decimal(&value);
if (!aggr->arg_is_null(true))
{
- my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff^1),
- val, dec_buffs + curr_dec_buff);
+ if (perform_removal)
+ {
+ if (count > 0)
+ {
+ my_decimal_sub(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff ^ 1),
+ dec_buffs + curr_dec_buff, val);
+ count--;
+ }
+ else
+ DBUG_VOID_RETURN;
+ }
+ else
+ {
+ count++;
+ my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff ^ 1),
+ val, dec_buffs + curr_dec_buff);
+ }
curr_dec_buff^= 1;
- null_value= 0;
+ null_value= (count > 0) ? 0 : 1;
}
}
else
{
- sum+= aggr->arg_val_real();
+ if (perform_removal && count > 0)
+ sum-= aggr->arg_val_real();
+ else
+ sum+= aggr->arg_val_real();
if (!aggr->arg_is_null(true))
- null_value= 0;
+ {
+ if (perform_removal)
+ {
+ if (count > 0)
+ {
+ count--;
+ }
+ }
+ else
+ count++;
+
+ null_value= (count > 0) ? 0 : 1;
+ }
}
- DBUG_RETURN(0);
+ DBUG_VOID_RETURN;
}
@@ -1425,7 +1434,7 @@ longlong Item_sum_sum::val_int()
DBUG_ASSERT(fixed == 1);
if (aggr)
aggr->endup();
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
longlong result;
my_decimal2int(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, unsigned_flag,
@@ -1441,7 +1450,7 @@ double Item_sum_sum::val_real()
DBUG_ASSERT(fixed == 1);
if (aggr)
aggr->endup();
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
my_decimal2double(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, &sum);
return sum;
}
@@ -1451,7 +1460,7 @@ String *Item_sum_sum::val_str(String *str)
{
if (aggr)
aggr->endup();
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
return val_string_from_decimal(str);
return val_string_from_real(str);
}
@@ -1461,11 +1470,18 @@ my_decimal *Item_sum_sum::val_decimal(my_decimal *val)
{
if (aggr)
aggr->endup();
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
return null_value ? NULL : (dec_buffs + curr_dec_buff);
return val_decimal_from_real(val);
}
+void Item_sum_sum::remove()
+{
+ DBUG_ENTER("Item_sum_sum::remove");
+ add_helper(true);
+ DBUG_VOID_RETURN;
+}
+
/**
Aggregate a distinct row from the distinct hash table.
@@ -1611,6 +1627,20 @@ bool Item_sum_count::add()
return 0;
}
+
+/*
+ Remove a row. This is used by window functions.
+*/
+
+void Item_sum_count::remove()
+{
+ DBUG_ASSERT(aggr->Aggrtype() == Aggregator::SIMPLE_AGGREGATOR);
+ if (aggr->arg_is_null(false))
+ return;
+ if (count > 0)
+ count--;
+}
+
longlong Item_sum_count::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -1632,12 +1662,13 @@ void Item_sum_count::cleanup()
/*
Avgerage
*/
-void Item_sum_avg::fix_length_and_dec()
+bool Item_sum_avg::fix_length_and_dec()
{
- Item_sum_sum::fix_length_and_dec();
+ if (Item_sum_sum::fix_length_and_dec())
+ return TRUE;
maybe_null=null_value=1;
prec_increment= current_thd->variables.div_precincrement;
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_avg::result_type() == DECIMAL_RESULT)
{
int precision= args[0]->decimal_precision() + prec_increment;
decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
@@ -1650,9 +1681,11 @@ void Item_sum_avg::fix_length_and_dec()
}
else
{
- decimals= MY_MIN(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
+ decimals= MY_MIN(args[0]->decimals + prec_increment,
+ FLOATING_POINT_DECIMALS);
max_length= MY_MIN(args[0]->max_length + prec_increment, float_length(decimals));
}
+ return FALSE;
}
@@ -1675,11 +1708,11 @@ Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table)
and unpack on access.
*/
field= new (mem_root)
- Field_string(((hybrid_type == DECIMAL_RESULT) ?
+ Field_string(((Item_sum_avg::result_type() == DECIMAL_RESULT) ?
dec_bin_size : sizeof(double)) + sizeof(longlong),
0, name, &my_charset_bin);
}
- else if (hybrid_type == DECIMAL_RESULT)
+ else if (Item_sum_avg::result_type() == DECIMAL_RESULT)
field= Field_new_decimal::create_from_item(mem_root, this);
else
field= new (mem_root) Field_double(max_length, maybe_null, name, decimals,
@@ -1706,6 +1739,16 @@ bool Item_sum_avg::add()
return FALSE;
}
+void Item_sum_avg::remove()
+{
+ Item_sum_sum::remove();
+ if (!aggr->arg_is_null(true))
+ {
+ if (count > 0)
+ count--;
+ }
+}
+
double Item_sum_avg::val_real()
{
DBUG_ASSERT(fixed == 1);
@@ -1734,10 +1777,10 @@ my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
}
/*
- For non-DECIMAL hybrid_type the division will be done in
+ For non-DECIMAL result_type() the division will be done in
Item_sum_avg::val_real().
*/
- if (hybrid_type != DECIMAL_RESULT)
+ if (Item_sum_avg::result_type() != DECIMAL_RESULT)
return val_decimal_from_real(val);
sum_dec= dec_buffs + curr_dec_buff;
@@ -1751,7 +1794,7 @@ String *Item_sum_avg::val_str(String *str)
{
if (aggr)
aggr->endup();
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_avg::result_type() == DECIMAL_RESULT)
return val_string_from_decimal(str);
return val_string_from_real(str);
}
@@ -1854,7 +1897,7 @@ Item_sum_variance::Item_sum_variance(THD *thd, Item_sum_variance *item):
}
-void Item_sum_variance::fix_length_and_dec()
+bool Item_sum_variance::fix_length_and_dec()
{
DBUG_ENTER("Item_sum_variance::fix_length_and_dec");
maybe_null= null_value= 1;
@@ -1870,13 +1913,14 @@ void Item_sum_variance::fix_length_and_dec()
switch (args[0]->result_type()) {
case REAL_RESULT:
case STRING_RESULT:
- decimals= MY_MIN(args[0]->decimals + 4, NOT_FIXED_DEC);
+ decimals= MY_MIN(args[0]->decimals + 4, FLOATING_POINT_DECIMALS);
break;
case INT_RESULT:
case DECIMAL_RESULT:
{
int precision= args[0]->decimal_precision()*2 + prec_increment;
- decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimals + prec_increment,
+ FLOATING_POINT_DECIMALS-1);
max_length= my_decimal_precision_to_length_no_truncation(precision,
decimals,
unsigned_flag);
@@ -1888,7 +1932,7 @@ void Item_sum_variance::fix_length_and_dec()
DBUG_ASSERT(0);
}
DBUG_PRINT("info", ("Type: REAL_RESULT (%d, %d)", max_length, (int)decimals));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -2036,6 +2080,18 @@ void Item_sum_hybrid::clear()
null_value= 1;
}
+bool
+Item_sum_hybrid::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return true;
+ bool retval= value->get_date(ltime, fuzzydate);
+ if ((null_value= value->null_value))
+ DBUG_ASSERT(retval == true);
+ return retval;
+}
+
double Item_sum_hybrid::val_real()
{
DBUG_ASSERT(fixed == 1);
@@ -2181,6 +2237,8 @@ longlong Item_sum_bit::val_int()
void Item_sum_bit::clear()
{
bits= reset_bits;
+ if (as_window_function)
+ clear_as_window();
}
Item *Item_sum_or::copy_or_same(THD* thd)
@@ -2188,15 +2246,82 @@ Item *Item_sum_or::copy_or_same(THD* thd)
return new (thd->mem_root) Item_sum_or(thd, this);
}
+bool Item_sum_bit::clear_as_window()
+{
+ memset(bit_counters, 0, sizeof(bit_counters));
+ num_values_added= 0;
+ set_bits_from_counters();
+ return 0;
+}
+
+bool Item_sum_bit::remove_as_window(ulonglong value)
+{
+ DBUG_ASSERT(as_window_function);
+ if (num_values_added == 0)
+ return 0; // Nothing to remove.
+
+ for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ {
+ if (!bit_counters[i])
+ {
+ // Don't attempt to remove values that were never added.
+ DBUG_ASSERT((value & (1ULL << i)) == 0);
+ continue;
+ }
+ bit_counters[i]-= (value & (1ULL << i)) ? 1 : 0;
+ }
+
+ // Prevent overflow;
+ num_values_added = MY_MIN(num_values_added, num_values_added - 1);
+ set_bits_from_counters();
+ return 0;
+}
+
+bool Item_sum_bit::add_as_window(ulonglong value)
+{
+ DBUG_ASSERT(as_window_function);
+ for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ {
+ bit_counters[i]+= (value & (1ULL << i)) ? 1 : 0;
+ }
+ // Prevent overflow;
+ num_values_added = MY_MAX(num_values_added, num_values_added + 1);
+ set_bits_from_counters();
+ return 0;
+}
+
+void Item_sum_or::set_bits_from_counters()
+{
+ ulonglong value= 0;
+ for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ {
+ value|= bit_counters[i] > 0 ? (1 << i) : 0;
+ }
+ bits= value | reset_bits;
+}
bool Item_sum_or::add()
{
ulonglong value= (ulonglong) args[0]->val_int();
if (!args[0]->null_value)
+ {
+ if (as_window_function)
+ return add_as_window(value);
bits|=value;
+ }
return 0;
}
+void Item_sum_xor::set_bits_from_counters()
+{
+ ulonglong value= 0;
+ for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ {
+ value|= (bit_counters[i] % 2) ? (1 << i) : 0;
+ }
+ bits= value ^ reset_bits;
+}
+
Item *Item_sum_xor::copy_or_same(THD* thd)
{
return new (thd->mem_root) Item_sum_xor(thd, this);
@@ -2207,10 +2332,31 @@ bool Item_sum_xor::add()
{
ulonglong value= (ulonglong) args[0]->val_int();
if (!args[0]->null_value)
+ {
+ if (as_window_function)
+ return add_as_window(value);
bits^=value;
+ }
return 0;
}
+void Item_sum_and::set_bits_from_counters()
+{
+ ulonglong value= 0;
+ if (!num_values_added)
+ {
+ bits= reset_bits;
+ return;
+ }
+
+ for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ {
+ // We've only added values of 1 for this bit.
+ if (bit_counters[i] == num_values_added)
+ value|= (1ULL << i);
+ }
+ bits= value & reset_bits;
+}
Item *Item_sum_and::copy_or_same(THD* thd)
{
return new (thd->mem_root) Item_sum_and(thd, this);
@@ -2221,7 +2367,11 @@ bool Item_sum_and::add()
{
ulonglong value= (ulonglong) args[0]->val_int();
if (!args[0]->null_value)
+ {
+ if (as_window_function)
+ return add_as_window(value);
bits&=value;
+ }
return 0;
}
@@ -2333,7 +2483,7 @@ void Item_sum_hybrid::reset_field()
void Item_sum_sum::reset_field()
{
DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
if (!arg_val) // Null
@@ -2342,7 +2492,7 @@ void Item_sum_sum::reset_field()
}
else
{
- DBUG_ASSERT(hybrid_type == REAL_RESULT);
+ DBUG_ASSERT(result_type() == REAL_RESULT);
double nr= args[0]->val_real(); // Nulls also return 0
float8store(result_field->ptr, nr);
}
@@ -2369,7 +2519,7 @@ void Item_sum_avg::reset_field()
{
uchar *res=result_field->ptr;
DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_avg::result_type() == DECIMAL_RESULT)
{
longlong tmp;
my_decimal value, *arg_dec= args[0]->val_decimal(&value);
@@ -2409,6 +2559,10 @@ void Item_sum_bit::reset_field()
void Item_sum_bit::update_field()
{
+ // We never call update_field when computing the function as a window
+ // function. Setting bits to a random value invalidates the bits counters and
+ // the result of the bit function becomes erroneous.
+ DBUG_ASSERT(!as_window_function);
uchar *res=result_field->ptr;
bits= uint8korr(res);
add();
@@ -2423,7 +2577,7 @@ void Item_sum_bit::update_field()
void Item_sum_sum::update_field()
{
DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_sum::result_type() == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
if (!args[0]->null_value)
@@ -2478,7 +2632,7 @@ void Item_sum_avg::update_field()
DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
- if (hybrid_type == DECIMAL_RESULT)
+ if (Item_sum_avg::result_type() == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
if (!args[0]->null_value)
@@ -2517,7 +2671,7 @@ void Item_sum_avg::update_field()
Item *Item_sum_avg::result_item(THD *thd, Field *field)
{
return
- hybrid_type == DECIMAL_RESULT ?
+ Item_sum_avg::result_type() == DECIMAL_RESULT ?
(Item_avg_field*) new (thd->mem_root) Item_avg_field_decimal(thd, this) :
(Item_avg_field*) new (thd->mem_root) Item_avg_field_double(thd, this);
}
@@ -2848,13 +3002,13 @@ my_decimal *Item_sum_udf_int::val_decimal(my_decimal *dec)
/** Default max_length is max argument length. */
-void Item_sum_udf_str::fix_length_and_dec()
+bool Item_sum_udf_str::fix_length_and_dec()
{
DBUG_ENTER("Item_sum_udf_str::fix_length_and_dec");
max_length=0;
for (uint i = 0; i < arg_count; i++)
set_if_bigger(max_length,args[i]->max_length);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(FALSE);
}
@@ -2991,7 +3145,7 @@ int group_concat_key_cmp_with_order(void* arg, const void* key1,
field->table->s->null_bytes);
int res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset);
if (res)
- return (*order_item)->asc ? res : -res;
+ return ((*order_item)->direction == ORDER::ORDER_ASC) ? res : -res;
}
/*
We can't return 0 because in that case the tree class would remove this
@@ -3012,7 +3166,7 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
{
Item_func_group_concat *item= (Item_func_group_concat *) item_arg;
TABLE *table= item->table;
- uint max_length= table->in_use->variables.group_concat_max_len;
+ uint max_length= (uint)table->in_use->variables.group_concat_max_len;
String tmp((char *)table->record[1], table->s->reclength,
default_charset_info);
String tmp2;
@@ -3062,21 +3216,18 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
/* stop if length of result more than max_length */
if (result->length() > max_length)
{
- int well_formed_error;
CHARSET_INFO *cs= item->collation.collation;
const char *ptr= result->ptr();
- uint add_length;
THD *thd= current_thd;
/*
It's ok to use item->result.length() as the fourth argument
as this is never used to limit the length of the data.
Cut is done with the third argument.
*/
- add_length= cs->cset->well_formed_len(cs,
- ptr + old_length,
- ptr + max_length,
- result->length(),
- &well_formed_error);
+ uint add_length= Well_formed_prefix(cs,
+ ptr + old_length,
+ ptr + max_length,
+ result->length()).length();
result->length(old_length + add_length);
item->warning_for_row= TRUE;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -3377,6 +3528,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
return TRUE;
with_subselect|= args[i]->with_subselect;
with_param|= args[i]->with_param;
+ with_window_func|= args[i]->with_window_func;
}
/* skip charset aggregation for order columns */
@@ -3387,9 +3539,9 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
result.set_charset(collation.collation);
result_field= 0;
null_value= 1;
- max_length= thd->variables.group_concat_max_len
+ max_length= (uint32)(thd->variables.group_concat_max_len
/ collation.collation->mbminlen
- * collation.collation->mbmaxlen;
+ * collation.collation->mbmaxlen);
uint32 offset;
if (separator->needs_conversion(separator->length(), separator->charset(),
@@ -3468,8 +3620,8 @@ bool Item_func_group_concat::setup(THD *thd)
if (!ref_pointer_array)
DBUG_RETURN(TRUE);
memcpy(ref_pointer_array, args, arg_count * sizeof(Item*));
- if (setup_order(thd, ref_pointer_array, context->table_list, list,
- all_fields, *order))
+ if (setup_order(thd, Ref_ptr_array(ref_pointer_array, n_elems),
+ context->table_list, list, all_fields, *order))
DBUG_RETURN(TRUE);
}
@@ -3533,7 +3685,7 @@ bool Item_func_group_concat::setup(THD *thd)
syntax of this function). If there is no ORDER BY clause, we don't
create this tree.
*/
- init_tree(tree, (uint) MY_MIN(thd->variables.max_heap_table_size,
+ init_tree(tree, (size_t)MY_MIN(thd->variables.max_heap_table_size,
thd->variables.sortbuff_size/16), 0,
tree_key_length,
group_concat_key_cmp_with_order, NULL, (void*) this,
@@ -3603,9 +3755,9 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type)
if (i)
str->append(',');
orig_args[i + arg_count_field]->print(str, query_type);
- if (order[i]->asc)
+ if (order[i]->direction == ORDER::ORDER_ASC)
str->append(STRING_WITH_LEN(" ASC"));
- else
+ else
str->append(STRING_WITH_LEN(" DESC"));
}
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index d6ccfeb8529..5c8ff520259 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -61,7 +61,7 @@ public:
Aggregator (Item_sum *arg): item_sum(arg) {}
virtual ~Aggregator () {} /* Keep gcc happy */
- enum Aggregator_type { SIMPLE_AGGREGATOR, DISTINCT_AGGREGATOR };
+ enum Aggregator_type { SIMPLE_AGGREGATOR, DISTINCT_AGGREGATOR };
virtual Aggregator_type Aggrtype() = 0;
/**
@@ -109,6 +109,7 @@ public:
class st_select_lex;
+class Window_spec;
/**
Class Item_sum is the base class used for special expressions that SQL calls
@@ -339,6 +340,9 @@ private:
*/
bool with_distinct;
+ /* TRUE if this is aggregate function of a window function */
+ bool window_func_sum_expr_flag;
+
public:
bool has_force_copy_fields() const { return force_copy_fields; }
@@ -347,7 +351,10 @@ public:
enum Sumfunctype
{ COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC,
AVG_DISTINCT_FUNC, MIN_FUNC, MAX_FUNC, STD_FUNC,
- VARIANCE_FUNC, SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC
+ VARIANCE_FUNC, SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC,
+ ROW_NUMBER_FUNC, RANK_FUNC, DENSE_RANK_FUNC, PERCENT_RANK_FUNC,
+ CUME_DIST_FUNC, NTILE_FUNC, FIRST_VALUE_FUNC, LAST_VALUE_FUNC,
+ NTH_VALUE_FUNC, LEAD_FUNC, LAG_FUNC
};
Item **ref_by; /* pointer to a ref to the object used to register it */
@@ -375,7 +382,7 @@ protected:
*/
Item **orig_args, *tmp_orig_args[2];
- static ulonglong ram_limitation(THD *thd);
+ static size_t ram_limitation(THD *thd);
public:
@@ -402,14 +409,35 @@ public:
Item_sum(THD *thd, Item_sum *item);
enum Type type() const { return SUM_FUNC_ITEM; }
virtual enum Sumfunctype sum_func () const=0;
+ bool is_aggr_sum_func()
+ {
+ switch (sum_func()) {
+ case COUNT_FUNC:
+ case COUNT_DISTINCT_FUNC:
+ case SUM_FUNC:
+ case SUM_DISTINCT_FUNC:
+ case AVG_FUNC:
+ case AVG_DISTINCT_FUNC:
+ case MIN_FUNC:
+ case MAX_FUNC:
+ case STD_FUNC:
+ case VARIANCE_FUNC:
+ case SUM_BIT_FUNC:
+ case UDF_SUM_FUNC:
+ case GROUP_CONCAT_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
/**
Resets the aggregate value to its default and aggregates the current
value of its attribute(s).
- */
+ */
inline bool reset_and_add()
{
- aggregator_clear();
- return aggregator_add();
+ aggregator_clear();
+ return aggregator_add();
};
/*
@@ -429,7 +457,8 @@ public:
*/
virtual void update_field()=0;
virtual bool keep_field_type(void) const { return 0; }
- virtual void fix_length_and_dec() { maybe_null=1; null_value=1; }
+ virtual bool fix_length_and_dec()
+ { maybe_null=1; null_value=1; return FALSE; }
virtual Item *result_item(THD *thd, Field *field);
void update_used_tables ();
@@ -455,7 +484,7 @@ public:
*/
void make_const ()
{
- used_tables_cache= 0;
+ used_tables_cache= 0;
const_item_cache= true;
}
void reset_forced_const() { const_item_cache= false; }
@@ -482,8 +511,11 @@ public:
}
virtual void make_unique() { force_copy_fields= TRUE; }
Item *get_tmp_table_item(THD *thd);
- Field *create_tmp_field(bool group, TABLE *table);
- virtual bool collect_outer_ref_processor(uchar *param);
+ Field *create_tmp_field(bool group, TABLE *table)
+ {
+ return Item::create_tmp_field(group, table, MY_INT32_NUM_DECIMAL_DIGITS);
+ }
+ virtual bool collect_outer_ref_processor(void *param);
bool init_sum_func_check(THD *thd);
bool check_sum_func(THD *thd, Item **ref);
bool register_sum_func(THD *thd, Item **ref);
@@ -539,11 +571,15 @@ public:
virtual bool add()= 0;
virtual bool setup(THD *thd) { return false; }
+ virtual bool supports_removal() const { return false; }
+ virtual void remove() { DBUG_ASSERT(0); }
+
virtual void cleanup();
- bool check_vcol_func_processor(uchar *int_arg)
- {
- return trace_unsupported_by_check_vcol_func_processor(func_name());
- }
+ bool check_vcol_func_processor(void *arg);
+ virtual void setup_window_func(THD *thd, Window_spec *window_spec) {}
+ void mark_as_window_func_sum_expr() { window_func_sum_expr_flag= true; }
+ bool is_window_func_sum_expr() { return window_func_sum_expr_flag; }
+ virtual void setup_caches(THD *thd) {};
};
@@ -708,6 +744,7 @@ public:
class Item_sum_int :public Item_sum_num
{
public:
+ Item_sum_int(THD *thd): Item_sum_num(thd) {}
Item_sum_int(THD *thd, Item *item_par): Item_sum_num(thd, item_par) {}
Item_sum_int(THD *thd, List<Item> &list): Item_sum_num(thd, list) {}
Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {}
@@ -715,19 +752,20 @@ public:
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec()
- { decimals=0; max_length=21; maybe_null=null_value=0; }
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec()
+ { decimals=0; max_length=21; maybe_null=null_value=0; return FALSE; }
};
-class Item_sum_sum :public Item_sum_num
+class Item_sum_sum :public Item_sum_num,
+ public Type_handler_hybrid_field_type
{
protected:
- Item_result hybrid_type;
double sum;
my_decimal dec_buffs[2];
uint curr_dec_buff;
- void fix_length_and_dec();
+ bool fix_length_and_dec();
public:
Item_sum_sum(THD *thd, Item *item_par, bool distinct):
@@ -746,7 +784,12 @@ public:
longlong val_int();
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
- enum Item_result result_type () const { return hybrid_type; }
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
void reset_field();
void update_field();
void no_rows_in_result() {}
@@ -755,6 +798,18 @@ public:
return has_with_distinct() ? "sum(distinct " : "sum(";
}
Item *copy_or_same(THD* thd);
+ void remove();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_sum>(thd, mem_root, this); }
+
+ bool supports_removal() const
+ {
+ return true;
+ }
+
+private:
+ void add_helper(bool perform_removal);
+ ulonglong count;
};
@@ -767,6 +822,7 @@ class Item_sum_count :public Item_sum_int
void clear();
bool add();
void cleanup();
+ void remove();
public:
Item_sum_count(THD *thd, Item *item_par):
@@ -807,12 +863,21 @@ class Item_sum_count :public Item_sum_int
return has_with_distinct() ? "count(distinct " : "count(";
}
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_count>(thd, mem_root, this); }
+
+ bool supports_removal() const
+ {
+ return true;
+ }
};
class Item_sum_avg :public Item_sum_sum
{
public:
+ // TODO-cvicentiu given that Item_sum_sum now uses a counter of its own, in
+ // order to implement remove(), it is possible to remove this member.
ulonglong count;
uint prec_increment;
uint f_precision, f_scale, dec_bin_size;
@@ -824,13 +889,14 @@ public:
:Item_sum_sum(thd, item), count(item->count),
prec_increment(item->prec_increment) {}
- void fix_length_and_dec();
+ bool fix_length_and_dec();
enum Sumfunctype sum_func () const
{
return has_with_distinct() ? AVG_DISTINCT_FUNC : AVG_FUNC;
}
void clear();
bool add();
+ void remove();
double val_real();
// In SPs we might force the "wrong" type with select into a declare variable
longlong val_int() { return val_int_from_real(); }
@@ -851,6 +917,13 @@ public:
count= 0;
Item_sum_sum::cleanup();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_avg>(thd, mem_root, this); }
+
+ bool supports_removal() const
+ {
+ return true;
+ }
};
@@ -876,7 +949,7 @@ But, this falls prey to catastrophic cancellation. Instead, use the recurrence
class Item_sum_variance : public Item_sum_num
{
- void fix_length_and_dec();
+ bool fix_length_and_dec();
public:
double recurrence_m, recurrence_s; /* Used in recurrence relation. */
@@ -909,6 +982,8 @@ public:
count= 0;
Item_sum_num::cleanup();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_variance>(thd, mem_root, this); }
};
/*
@@ -928,6 +1003,8 @@ class Item_sum_std :public Item_sum_variance
Item *result_item(THD *thd, Field *field);
const char *func_name() const { return "std("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_std>(thd, mem_root, this); }
};
// This class is a string or number function depending on num_func
@@ -961,6 +1038,7 @@ protected:
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *);
+ bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
void reset_field();
String *val_str(String *);
bool keep_field_type(void) const { return 1; }
@@ -980,6 +1058,7 @@ protected:
void no_rows_in_result();
void restore_to_before_no_rows_in_result();
Field *create_tmp_field(bool group, TABLE *table);
+ void setup_caches(THD *thd) { setup_hybrid(thd, arguments()[0], NULL); }
};
@@ -993,6 +1072,8 @@ public:
bool add();
const char *func_name() const { return "min("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_min>(thd, mem_root, this); }
};
@@ -1006,31 +1087,79 @@ public:
bool add();
const char *func_name() const { return "max("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_max>(thd, mem_root, this); }
};
class Item_sum_bit :public Item_sum_int
{
-protected:
- ulonglong reset_bits,bits;
-
public:
Item_sum_bit(THD *thd, Item *item_par, ulonglong reset_arg):
- Item_sum_int(thd, item_par), reset_bits(reset_arg), bits(reset_arg) {}
+ Item_sum_int(thd, item_par), reset_bits(reset_arg), bits(reset_arg),
+ as_window_function(FALSE), num_values_added(0) {}
Item_sum_bit(THD *thd, Item_sum_bit *item):
- Item_sum_int(thd, item), reset_bits(item->reset_bits), bits(item->bits) {}
+ Item_sum_int(thd, item), reset_bits(item->reset_bits), bits(item->bits),
+ as_window_function(item->as_window_function),
+ num_values_added(item->num_values_added)
+ {
+ if (as_window_function)
+ memcpy(bit_counters, item->bit_counters, sizeof(bit_counters));
+ }
enum Sumfunctype sum_func () const {return SUM_BIT_FUNC;}
void clear();
longlong val_int();
void reset_field();
void update_field();
- void fix_length_and_dec()
- { decimals= 0; max_length=21; unsigned_flag= 1; maybe_null= null_value= 0; }
+ bool fix_length_and_dec()
+ {
+ decimals= 0; max_length=21; unsigned_flag= 1; maybe_null= null_value= 0;
+ return FALSE;
+ }
void cleanup()
{
bits= reset_bits;
+ if (as_window_function)
+ clear_as_window();
Item_sum_int::cleanup();
}
+ void setup_window_func(THD *thd __attribute__((unused)),
+ Window_spec *window_spec __attribute__((unused)))
+ {
+ as_window_function= TRUE;
+ clear_as_window();
+ }
+ void remove()
+ {
+ if (as_window_function)
+ {
+ remove_as_window(args[0]->val_int());
+ return;
+ }
+ // Unless we're counting bits, we can not remove anything.
+ DBUG_ASSERT(0);
+ }
+
+ bool supports_removal() const
+ {
+ return true;
+ }
+
+protected:
+ static const int NUM_BIT_COUNTERS= 64;
+ ulonglong reset_bits,bits;
+ /*
+ Marks whether the function is to be computed as a window function.
+ */
+ bool as_window_function;
+ // When used as an aggregate window function, we need to store
+ // this additional information.
+ ulonglong num_values_added;
+ ulonglong bit_counters[NUM_BIT_COUNTERS];
+ bool add_as_window(ulonglong value);
+ bool remove_as_window(ulonglong value);
+ bool clear_as_window();
+ virtual void set_bits_from_counters()= 0;
};
@@ -1042,28 +1171,43 @@ public:
bool add();
const char *func_name() const { return "bit_or("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_or>(thd, mem_root, this); }
+
+private:
+ void set_bits_from_counters();
};
class Item_sum_and :public Item_sum_bit
{
- public:
+public:
Item_sum_and(THD *thd, Item *item_par):
Item_sum_bit(thd, item_par, ULONGLONG_MAX) {}
Item_sum_and(THD *thd, Item_sum_and *item) :Item_sum_bit(thd, item) {}
bool add();
const char *func_name() const { return "bit_and("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_and>(thd, mem_root, this); }
+
+private:
+ void set_bits_from_counters();
};
class Item_sum_xor :public Item_sum_bit
{
- public:
+public:
Item_sum_xor(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, 0) {}
Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {}
bool add();
const char *func_name() const { return "bit_xor("; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_xor>(thd, mem_root, this); }
+
+private:
+ void set_bits_from_counters();
};
@@ -1086,6 +1230,10 @@ public:
}
table_map used_tables() const { return (table_map) 1L; }
void save_in_result_field(bool no_conversions) { DBUG_ASSERT(0); }
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(name, arg, VCOL_IMPOSSIBLE);
+ }
};
@@ -1099,10 +1247,6 @@ public:
{ }
enum Type type() const { return FIELD_AVG_ITEM; }
bool is_null() { update_null_value(); return null_value; }
- bool check_vcol_func_processor(uchar *int_arg)
- {
- return trace_unsupported_by_check_vcol_func_processor("avg_field");
- }
};
@@ -1118,6 +1262,8 @@ public:
my_decimal *val_decimal(my_decimal *dec) { return val_decimal_from_real(dec); }
String *val_str(String *str) { return val_string_from_real(str); }
double val_real();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_avg_field_double>(thd, mem_root, this); }
};
@@ -1137,6 +1283,8 @@ public:
longlong val_int() { return val_int_from_decimal(); }
String *val_str(String *str) { return val_string_from_decimal(str); }
my_decimal *val_decimal(my_decimal *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_avg_field_decimal>(thd, mem_root, this); }
};
@@ -1157,10 +1305,8 @@ public:
bool is_null() { update_null_value(); return null_value; }
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
enum Item_result result_type () const { return REAL_RESULT; }
- bool check_vcol_func_processor(uchar *int_arg)
- {
- return trace_unsupported_by_check_vcol_func_processor("var_field");
- }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_variance_field>(thd, mem_root, this); }
};
@@ -1172,6 +1318,8 @@ public:
{ }
enum Type type() const { return FIELD_STD_ITEM; }
double val_real();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_std_field>(thd, mem_root, this); }
};
@@ -1254,8 +1402,13 @@ class Item_sum_udf_float :public Item_udf_sum
double val_real();
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
- void fix_length_and_dec() { fix_num_length_and_dec(); }
+ enum Item_result result_type () const { return REAL_RESULT; }
+ enum Item_result cmp_type () const { return REAL_RESULT; }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_udf_float>(thd, mem_root, this); }
};
@@ -1274,8 +1427,11 @@ public:
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec() { decimals=0; max_length=21; }
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec() { decimals=0; max_length=21; return FALSE; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_udf_int>(thd, mem_root, this); }
};
@@ -1313,8 +1469,11 @@ public:
}
my_decimal *val_decimal(my_decimal *dec);
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec();
+ enum_field_types field_type() const { return string_field_type(); }
+ bool fix_length_and_dec();
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_udf_str>(thd, mem_root, this); }
};
@@ -1332,8 +1491,11 @@ public:
longlong val_int();
my_decimal *val_decimal(my_decimal *);
enum Item_result result_type () const { return DECIMAL_RESULT; }
- void fix_length_and_dec() { fix_num_length_and_dec(); }
+ enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; }
+ bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
Item *copy_or_same(THD* thd);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_udf_decimal>(thd, mem_root, this); }
};
#else /* Dummy functions to get sql_yacc.cc compiled */
@@ -1405,7 +1567,7 @@ public:
double val_real() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; }
longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; }
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec() { maybe_null=1; max_length=0; }
+ bool fix_length_and_dec() { maybe_null=1; max_length=0; return FALSE; }
enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; }
void clear() {}
bool add() { return 0; }
@@ -1467,6 +1629,8 @@ class Item_func_group_concat : public Item_sum
friend int dump_leaf_key(void* key_arg,
element_count count __attribute__((unused)),
void* item_arg);
+protected:
+ virtual Field *make_string_field(TABLE *table);
public:
Item_func_group_concat(THD *thd, Name_resolution_context *context_arg,
@@ -1478,9 +1642,9 @@ public:
void cleanup();
enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;}
- const char *func_name() const { return "group_concat"; }
+ const char *func_name() const { return "group_concat("; }
virtual Item_result result_type () const { return STRING_RESULT; }
- virtual Field *make_string_field(TABLE *table);
+ virtual Item_result cmp_type () const { return STRING_RESULT; }
enum_field_types field_type() const
{
if (too_big_for_varchar())
@@ -1523,8 +1687,10 @@ public:
Item *copy_or_same(THD* thd);
void no_rows_in_result() {}
virtual void print(String *str, enum_query_type query_type);
- virtual bool change_context_processor(uchar *cntx)
+ virtual bool change_context_processor(void *cntx)
{ context= (Name_resolution_context *)cntx; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_group_concat>(thd, mem_root, this); }
};
#endif /* ITEM_SUM_INCLUDED */
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index e8440803295..23d9c3d2f04 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -430,7 +430,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
make_truncated_value_warning(current_thd,
Sql_condition::WARN_LEVEL_WARN,
val_begin, length,
- cached_timestamp_type, NullS);
+ cached_timestamp_type, 0, NullS);
break;
}
} while (++val != val_end);
@@ -478,14 +478,14 @@ static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
{
switch (*++ptr) {
case 'M':
- if (!l_time->month)
+ if (type == MYSQL_TIMESTAMP_TIME || !l_time->month)
return 1;
str->append(locale->month_names->type_names[l_time->month-1],
(uint) strlen(locale->month_names->type_names[l_time->month-1]),
system_charset_info);
break;
case 'b':
- if (!l_time->month)
+ if (type == MYSQL_TIMESTAMP_TIME || !l_time->month)
return 1;
str->append(locale->ab_month_names->type_names[l_time->month-1],
(uint) strlen(locale->ab_month_names->type_names[l_time->month-1]),
@@ -535,26 +535,38 @@ static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
}
break;
case 'Y':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->year, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 4, '0');
break;
case 'y':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->year%100, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'm':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->month, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'c':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->month, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 1, '0');
break;
case 'd':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->day, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 2, '0');
break;
case 'e':
+ if (type == MYSQL_TIMESTAMP_TIME)
+ return 1;
length= (uint) (int10_to_str(l_time->day, intbuff, 10) - intbuff);
str->append_with_prefill(intbuff, length, 1, '0');
break;
@@ -703,7 +715,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
{
const char *end=str+length;
uint i;
- long field_length= 0;
+ size_t field_length= 0;
while (str != end && !my_isdigit(cs,*str))
str++;
@@ -714,7 +726,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
const char *start= str;
for (value= 0; str != end && my_isdigit(cs, *str); str++)
value= value*10 + *str - '0';
- if ((field_length= str - start) >= 20)
+ if ((field_length= (size_t)(str - start)) >= 20)
return true;
values[i]= value;
while (str != end && !my_isdigit(cs,*str))
@@ -942,7 +954,7 @@ longlong Item_func_month::val_int()
}
-void Item_func_monthname::fix_length_and_dec()
+bool Item_func_monthname::fix_length_and_dec()
{
THD* thd= current_thd;
CHARSET_INFO *cs= thd->variables.collation_connection;
@@ -950,7 +962,8 @@ void Item_func_monthname::fix_length_and_dec()
collation.set(cs, DERIVATION_COERCIBLE, locale->repertoire());
decimals=0;
max_length= locale->max_month_name_length * collation.collation->mbmaxlen;
- maybe_null=1;
+ maybe_null=1;
+ return FALSE;
}
@@ -1051,13 +1064,15 @@ uint week_mode(uint mode)
longlong Item_func_week::val_int()
{
DBUG_ASSERT(fixed == 1);
- uint year;
+ uint year, week_format;
MYSQL_TIME ltime;
if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
return 0;
- return (longlong) calc_week(&ltime,
- week_mode((uint) args[1]->val_int()),
- &year);
+ if (arg_count > 1)
+ week_format= (uint)args[1]->val_int();
+ else
+ week_format= current_thd->variables.default_week_format;
+ return (longlong) calc_week(&ltime, week_mode(week_format), &year);
}
@@ -1088,7 +1103,7 @@ longlong Item_func_weekday::val_int()
odbc_type) + MY_TEST(odbc_type);
}
-void Item_func_dayname::fix_length_and_dec()
+bool Item_func_dayname::fix_length_and_dec()
{
THD* thd= current_thd;
CHARSET_INFO *cs= thd->variables.collation_connection;
@@ -1096,7 +1111,8 @@ void Item_func_dayname::fix_length_and_dec()
collation.set(cs, DERIVATION_COERCIBLE, locale->repertoire());
decimals=0;
max_length= locale->max_day_name_length * collation.collation->mbmaxlen;
- maybe_null=1;
+ maybe_null=1;
+ return FALSE;
}
@@ -1463,15 +1479,14 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
}
-void Item_temporal_func::fix_length_and_dec()
+bool Item_temporal_func::fix_length_and_dec()
{
uint char_length= mysql_temporal_int_part_length(field_type());
/*
We set maybe_null to 1 as default as any bad argument with date or
time can get us to return NULL.
*/
- maybe_null= 1;
-
+ maybe_null= (arg_count > 0);
if (decimals)
{
if (decimals == NOT_FIXED_DEC)
@@ -1490,6 +1505,7 @@ void Item_temporal_func::fix_length_and_dec()
DERIVATION_COERCIBLE : DERIVATION_NUMERIC,
MY_REPERTOIRE_ASCII);
fix_char_length(char_length);
+ return FALSE;
}
String *Item_temporal_func::val_str(String *str)
@@ -1564,9 +1580,9 @@ String *Item_temporal_hybrid_func::val_str_ascii(String *str)
return (String *) 0;
/* Check that the returned timestamp type matches to the function type */
- DBUG_ASSERT(cached_field_type == MYSQL_TYPE_STRING ||
+ DBUG_ASSERT(field_type() == MYSQL_TYPE_STRING ||
ltime.time_type == MYSQL_TIMESTAMP_NONE ||
- mysql_type_to_time_type(cached_field_type) == ltime.time_type);
+ mysql_type_to_time_type(field_type()) == ltime.time_type);
return str;
}
@@ -1587,24 +1603,12 @@ bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
}
-void Item_func_curdate::fix_length_and_dec()
-{
- store_now_in_TIME(&ltime);
-
- /* We don't need to set second_part and neg because they already 0 */
- ltime.hour= ltime.minute= ltime.second= 0;
- ltime.time_type= MYSQL_TIMESTAMP_DATE;
- Item_datefunc::fix_length_and_dec();
- maybe_null= false;
-}
-
/**
Converts current time in my_time_t to MYSQL_TIME represenatation for local
time zone. Defines time zone (local) used for whole CURDATE function.
*/
-void Item_func_curdate_local::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
thd->variables.time_zone->gmt_sec_to_TIME(now_time, thd->query_start());
thd->time_zone_used= 1;
}
@@ -1614,9 +1618,8 @@ void Item_func_curdate_local::store_now_in_TIME(MYSQL_TIME *now_time)
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
*/
-void Item_func_curdate_utc::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
my_tz_UTC->gmt_sec_to_TIME(now_time, thd->query_start());
/*
We are not flagging this query as using time zone, since it uses fixed
@@ -1628,6 +1631,17 @@ void Item_func_curdate_utc::store_now_in_TIME(MYSQL_TIME *now_time)
bool Item_func_curdate::get_date(MYSQL_TIME *res,
ulonglong fuzzy_date __attribute__((unused)))
{
+ THD *thd= current_thd;
+ query_id_t query_id= thd->query_id;
+ /* Cache value for this query */
+ if (last_query_id != query_id)
+ {
+ last_query_id= query_id;
+ store_now_in_TIME(thd, &ltime);
+ /* We don't need to set second_part and neg because they already 0 */
+ ltime.hour= ltime.minute= ltime.second= 0;
+ ltime.time_type= MYSQL_TIMESTAMP_DATE;
+ }
*res=ltime;
return 0;
}
@@ -1647,10 +1661,27 @@ bool Item_func_curtime::fix_fields(THD *thd, Item **items)
bool Item_func_curtime::get_date(MYSQL_TIME *res,
ulonglong fuzzy_date __attribute__((unused)))
{
+ THD *thd= current_thd;
+ query_id_t query_id= thd->query_id;
+ /* Cache value for this query */
+ if (last_query_id != query_id)
+ {
+ last_query_id= query_id;
+ store_now_in_TIME(thd, &ltime);
+ }
*res= ltime;
return 0;
}
+void Item_func_curtime::print(String *str, enum_query_type query_type)
+{
+ str->append(func_name());
+ str->append('(');
+ if (decimals)
+ str->append_ulonglong(decimals);
+ str->append(')');
+}
+
static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
{
DBUG_ASSERT(item->decimals == AUTO_SEC_PART_DIGITS ||
@@ -1667,9 +1698,8 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
Converts current time in my_time_t to MYSQL_TIME represenatation for local
time zone. Defines time zone (local) used for whole CURTIME function.
*/
-void Item_func_curtime_local::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
thd->variables.time_zone->gmt_sec_to_TIME(now_time, thd->query_start());
now_time->year= now_time->month= now_time->day= 0;
now_time->time_type= MYSQL_TIMESTAMP_TIME;
@@ -1682,9 +1712,8 @@ void Item_func_curtime_local::store_now_in_TIME(MYSQL_TIME *now_time)
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
*/
-void Item_func_curtime_utc::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
my_tz_UTC->gmt_sec_to_TIME(now_time, thd->query_start());
now_time->year= now_time->month= now_time->day= 0;
now_time->time_type= MYSQL_TIMESTAMP_TIME;
@@ -1706,13 +1735,40 @@ bool Item_func_now::fix_fields(THD *thd, Item **items)
return Item_temporal_func::fix_fields(thd, items);
}
+void Item_func_now::print(String *str, enum_query_type query_type)
+{
+ str->append(func_name());
+ str->append('(');
+ if (decimals)
+ str->append_ulonglong(decimals);
+ str->append(')');
+}
+
+
+int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
+{
+ if (field->type() == MYSQL_TYPE_TIMESTAMP)
+ {
+ THD *thd= field->get_thd();
+ my_time_t ts= thd->query_start();
+ uint dec= MY_MIN(decimals, field->decimals());
+ ulong sec_part= dec ? thd->query_start_sec_part() : 0;
+ sec_part-= my_time_fraction_remainder(sec_part, dec);
+ field->set_notnull();
+ ((Field_timestamp*)field)->store_TIME(ts, sec_part);
+ return 0;
+ }
+ else
+ return Item_temporal_func::save_in_field(field, no_conversions);
+}
+
+
/**
Converts current time in my_time_t to MYSQL_TIME represenatation for local
time zone. Defines time zone (local) used for whole NOW function.
*/
-void Item_func_now_local::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
thd->variables.time_zone->gmt_sec_to_TIME(now_time, thd->query_start());
set_sec_part(thd->query_start_sec_part(), now_time, this);
thd->time_zone_used= 1;
@@ -1723,9 +1779,8 @@ void Item_func_now_local::store_now_in_TIME(MYSQL_TIME *now_time)
Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
*/
-void Item_func_now_utc::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
my_tz_UTC->gmt_sec_to_TIME(now_time, thd->query_start());
set_sec_part(thd->query_start_sec_part(), now_time, this);
/*
@@ -1738,6 +1793,14 @@ void Item_func_now_utc::store_now_in_TIME(MYSQL_TIME *now_time)
bool Item_func_now::get_date(MYSQL_TIME *res,
ulonglong fuzzy_date __attribute__((unused)))
{
+ THD *thd= current_thd;
+ query_id_t query_id= thd->query_id;
+ /* Cache value for this query */
+ if (last_query_id != query_id)
+ {
+ last_query_id= query_id;
+ store_now_in_TIME(thd, &ltime);
+ }
*res= ltime;
return 0;
}
@@ -1747,9 +1810,8 @@ bool Item_func_now::get_date(MYSQL_TIME *res,
Converts current time in my_time_t to MYSQL_TIME represenatation for local
time zone. Defines time zone (local) used for whole SYSDATE function.
*/
-void Item_func_sysdate_local::store_now_in_TIME(MYSQL_TIME *now_time)
+void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
- THD *thd= current_thd;
my_hrtime_t now= my_hrtime();
thd->variables.time_zone->gmt_sec_to_TIME(now_time, hrtime_to_my_time(now));
set_sec_part(hrtime_sec_part(now), now_time, this);
@@ -1760,7 +1822,7 @@ void Item_func_sysdate_local::store_now_in_TIME(MYSQL_TIME *now_time)
bool Item_func_sysdate_local::get_date(MYSQL_TIME *res,
ulonglong fuzzy_date __attribute__((unused)))
{
- store_now_in_TIME(res);
+ store_now_in_TIME(current_thd, res);
return 0;
}
@@ -1804,18 +1866,18 @@ overflow:
{
ErrConvInteger err2(sec, unsigned_flag);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &err2, MYSQL_TIMESTAMP_TIME, NullS);
+ &err2, MYSQL_TIMESTAMP_TIME, 0, NullS);
}
else
{
ErrConvString err2(err);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &err2, MYSQL_TIMESTAMP_TIME, NullS);
+ &err2, MYSQL_TIMESTAMP_TIME, 0, NullS);
}
return 0;
}
-void Item_func_date_format::fix_length_and_dec()
+bool Item_func_date_format::fix_length_and_dec()
{
THD* thd= current_thd;
locale= thd->variables.lc_time_names;
@@ -1846,6 +1908,7 @@ void Item_func_date_format::fix_length_and_dec()
set_if_smaller(max_length,MAX_BLOB_WIDTH);
}
maybe_null=1; // If wrong date
+ return FALSE;
}
@@ -1988,13 +2051,13 @@ null_date:
}
-void Item_func_from_unixtime::fix_length_and_dec()
-{
+bool Item_func_from_unixtime::fix_length_and_dec()
+{
THD *thd= current_thd;
thd->time_zone_used= 1;
tz= thd->variables.time_zone;
decimals= args[0]->decimals;
- Item_temporal_func::fix_length_and_dec();
+ return Item_temporal_func::fix_length_and_dec();
}
@@ -2021,10 +2084,10 @@ bool Item_func_from_unixtime::get_date(MYSQL_TIME *ltime,
}
-void Item_func_convert_tz::fix_length_and_dec()
+bool Item_func_convert_tz::fix_length_and_dec()
{
decimals= args[0]->temporal_precision(MYSQL_TYPE_DATETIME);
- Item_temporal_func::fix_length_and_dec();
+ return Item_temporal_func::fix_length_and_dec();
}
@@ -2073,7 +2136,7 @@ void Item_func_convert_tz::cleanup()
}
-void Item_date_add_interval::fix_length_and_dec()
+bool Item_date_add_interval::fix_length_and_dec()
{
enum_field_types arg0_field_type;
@@ -2092,7 +2155,7 @@ void Item_date_add_interval::fix_length_and_dec()
(This is because you can't know if the string contains a DATE,
MYSQL_TIME or DATETIME argument)
*/
- cached_field_type= MYSQL_TYPE_STRING;
+ set_handler_by_field_type(MYSQL_TYPE_STRING);
arg0_field_type= args[0]->field_type();
uint interval_dec= 0;
if (int_type == INTERVAL_MICROSECOND ||
@@ -2106,29 +2169,29 @@ void Item_date_add_interval::fix_length_and_dec()
arg0_field_type == MYSQL_TYPE_TIMESTAMP)
{
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_DATETIME), interval_dec);
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
}
else if (arg0_field_type == MYSQL_TYPE_DATE)
{
if (int_type <= INTERVAL_DAY || int_type == INTERVAL_YEAR_MONTH)
- cached_field_type= arg0_field_type;
+ set_handler_by_field_type(arg0_field_type);
else
{
decimals= interval_dec;
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
}
}
else if (arg0_field_type == MYSQL_TYPE_TIME)
{
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_TIME), interval_dec);
if (int_type >= INTERVAL_DAY && int_type != INTERVAL_YEAR_MONTH)
- cached_field_type= arg0_field_type;
+ set_handler_by_field_type(arg0_field_type);
else
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
}
else
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_DATETIME), interval_dec);
- Item_temporal_func::fix_length_and_dec();
+ return Item_temporal_func::fix_length_and_dec();
}
@@ -2137,7 +2200,7 @@ bool Item_date_add_interval::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
INTERVAL interval;
if (args[0]->get_date(ltime,
- cached_field_type == MYSQL_TYPE_TIME ?
+ field_type() == MYSQL_TYPE_TIME ?
TIME_TIME_ONLY : 0) ||
get_interval_value(args[1], int_type, &interval))
return (null_value=1);
@@ -2183,13 +2246,11 @@ static const char *interval_names[]=
void Item_date_add_interval::print(String *str, enum_query_type query_type)
{
- str->append('(');
- args[0]->print(str, query_type);
+ args[0]->print_parenthesised(str, query_type, ADDINTERVAL_PRECEDENCE);
str->append(date_sub_interval?" - interval ":" + interval ");
- args[1]->print(str, query_type);
+ args[1]->print_parenthesised(str, query_type, INTERVAL_PRECEDENCE);
str->append(' ');
str->append(interval_names[int_type]);
- str->append(')');
}
void Item_extract::print(String *str, enum_query_type query_type)
@@ -2201,7 +2262,7 @@ void Item_extract::print(String *str, enum_query_type query_type)
str->append(')');
}
-void Item_extract::fix_length_and_dec()
+bool Item_extract::fix_length_and_dec()
{
maybe_null=1; // If wrong date
switch (int_type) {
@@ -2227,6 +2288,7 @@ void Item_extract::fix_length_and_dec()
case INTERVAL_SECOND_MICROSECOND: set_time_length(8); break; // ssffffff
case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */
}
+ return FALSE;
}
@@ -2338,7 +2400,7 @@ void Item_temporal_typecast::print(String *str, enum_query_type query_type)
args[0]->print(str, query_type);
str->append(STRING_WITH_LEN(" as "));
str->append(cast_type());
- if (decimals)
+ if (decimals && decimals != NOT_FIXED_DEC)
{
str->append('(');
str->append(llstr(decimals, buf));
@@ -2411,7 +2473,7 @@ String *Item_char_typecast::copy(String *str, CHARSET_INFO *strcs)
null_value= 1; // EOM
return 0;
}
- check_truncation_with_warn(str, copier.source_end_pos() - str->ptr());
+ check_truncation_with_warn(str, (uint)(copier.source_end_pos() - str->ptr()));
return &tmp_value;
}
@@ -2472,13 +2534,9 @@ String *Item_char_typecast::val_str(String *str)
if (!charset_conversion)
{
// Try to reuse the original string (if well formed).
- MY_STRCOPY_STATUS status;
- cs->cset->well_formed_char_length(cs, res->ptr(), res->end(),
- cast_length, &status);
- if (!status.m_well_formed_error_pos)
- {
- res= reuse(res, status.m_source_end_pos - res->ptr());
- }
+ Well_formed_prefix prefix(cs, res->ptr(), res->end(), cast_length);
+ if (!prefix.well_formed_error_pos())
+ res= reuse(res, prefix.length());
goto end;
}
// Character set conversion, or bad bytes were found.
@@ -2492,7 +2550,7 @@ end:
}
-void Item_char_typecast::fix_length_and_dec()
+bool Item_char_typecast::fix_length_and_dec()
{
uint32 char_length;
/*
@@ -2538,6 +2596,7 @@ void Item_char_typecast::fix_length_and_dec()
(cast_cs == &my_charset_bin ? 1 :
args[0]->collation.collation->mbmaxlen));
max_length= char_length * cast_cs->mbmaxlen;
+ return FALSE;
}
@@ -2626,7 +2685,7 @@ err:
}
-void Item_func_add_time::fix_length_and_dec()
+bool Item_func_add_time::fix_length_and_dec()
{
enum_field_types arg0_field_type;
decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
@@ -2641,24 +2700,24 @@ void Item_func_add_time::fix_length_and_dec()
- Otherwise the result is MYSQL_TYPE_STRING
*/
- cached_field_type= MYSQL_TYPE_STRING;
+ set_handler_by_field_type(MYSQL_TYPE_STRING);
arg0_field_type= args[0]->field_type();
if (arg0_field_type == MYSQL_TYPE_DATE ||
arg0_field_type == MYSQL_TYPE_DATETIME ||
arg0_field_type == MYSQL_TYPE_TIMESTAMP ||
is_date)
{
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_DATETIME),
args[1]->temporal_precision(MYSQL_TYPE_TIME));
}
else if (arg0_field_type == MYSQL_TYPE_TIME)
{
- cached_field_type= MYSQL_TYPE_TIME;
+ set_handler_by_field_type(MYSQL_TYPE_TIME);
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_TIME),
args[1]->temporal_precision(MYSQL_TYPE_TIME));
}
- Item_temporal_func::fix_length_and_dec();
+ return Item_temporal_func::fix_length_and_dec();
}
/**
@@ -2680,7 +2739,7 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
longlong seconds;
int l_sign= sign;
- if (cached_field_type == MYSQL_TYPE_DATETIME)
+ if (Item_func_add_time::field_type() == MYSQL_TYPE_DATETIME)
{
// TIMESTAMP function OR the first argument is DATE/DATETIME/TIMESTAMP
if (get_arg0_date(&l_time1, 0) ||
@@ -2831,7 +2890,7 @@ bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u", (uint)minute, (uint)second);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
buf, len, MYSQL_TIMESTAMP_TIME,
- NullS);
+ 0, NullS);
}
return (null_value= 0);
@@ -3137,10 +3196,10 @@ get_date_time_result_type(const char *format, uint length)
}
-void Item_func_str_to_date::fix_length_and_dec()
+bool Item_func_str_to_date::fix_length_and_dec()
{
if (agg_arg_charsets(collation, args, 2, MY_COLL_ALLOW_CONV, 1))
- return;
+ return TRUE;
if (collation.collation->mbminlen > 1)
{
#if MYSQL_VERSION_ID > 50500
@@ -3150,7 +3209,7 @@ void Item_func_str_to_date::fix_length_and_dec()
#endif
}
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
decimals= TIME_SECOND_PART_DIGITS;
if ((const_item= args[1]->const_item()))
{
@@ -3165,25 +3224,25 @@ void Item_func_str_to_date::fix_length_and_dec()
get_date_time_result_type(format->ptr(), format->length());
switch (cached_format_type) {
case DATE_ONLY:
- cached_field_type= MYSQL_TYPE_DATE;
+ set_handler_by_field_type(MYSQL_TYPE_DATE);
break;
case TIME_MICROSECOND:
decimals= 6;
/* fall through */
case TIME_ONLY:
- cached_field_type= MYSQL_TYPE_TIME;
+ set_handler_by_field_type(MYSQL_TYPE_TIME);
break;
case DATE_TIME_MICROSECOND:
decimals= 6;
/* fall through */
case DATE_TIME:
- cached_field_type= MYSQL_TYPE_DATETIME;
+ set_handler_by_field_type(MYSQL_TYPE_DATETIME);
break;
}
}
}
- cached_timestamp_type= mysql_type_to_time_type(cached_field_type);
- Item_temporal_func::fix_length_and_dec();
+ cached_timestamp_type= mysql_type_to_time_type(field_type());
+ return Item_temporal_func::fix_length_and_dec();
}
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 927ce12f079..c983e1a6f8a 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -49,10 +49,13 @@ public:
Item_func_period_add(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "period_add"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_period_add>(thd, mem_root, this); }
};
@@ -62,11 +65,14 @@ public:
Item_func_period_diff(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "period_diff"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
+ return FALSE;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_period_diff>(thd, mem_root, this); }
};
@@ -76,20 +82,23 @@ public:
Item_func_to_days(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "to_days"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
- maybe_null=1;
+ maybe_null=1;
+ return FALSE;
}
enum_monotonicity_info get_monotonicity_info() const;
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_to_days>(thd, mem_root, this); }
};
@@ -99,30 +108,24 @@ public:
Item_func_to_seconds(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "to_seconds"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null= 1;
+ return FALSE;
}
enum_monotonicity_info get_monotonicity_info() const;
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
- bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
-
- bool intro_version(uchar *int_arg)
- {
- int *input_version= (int*)int_arg;
- /* This function was introduced in 5.5 */
- int output_version= MY_MAX(*input_version, 50500);
- *input_version= output_version;
- return 0;
- }
+ bool check_partition_func_processor(void *bool_arg) { return FALSE;}
/* Only meaningful with date part and optional time part */
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_to_seconds>(thd, mem_root, this); }
};
@@ -132,18 +135,21 @@ public:
Item_func_dayofmonth(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "dayofmonth"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
- maybe_null=1;
+ maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dayofmonth>(thd, mem_root, this); }
};
@@ -155,7 +161,7 @@ public:
longlong val_int();
double val_real()
{ DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); }
- String *val_str(String *str)
+ String *val_str(String *str)
{
longlong nr= val_int();
if (null_value)
@@ -165,18 +171,22 @@ public:
}
const char *func_name() const { return "month"; }
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec()
- {
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec()
+ {
decimals= 0;
fix_char_length(2);
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_month>(thd, mem_root, this); }
};
@@ -187,13 +197,18 @@ public:
Item_func_monthname(THD *thd, Item *a): Item_str_func(thd, a) {}
const char *func_name() const { return "monthname"; }
String *val_str(String *str);
- void fix_length_and_dec();
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
- bool check_vcol_func_processor(uchar *int_arg) {return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool fix_length_and_dec();
+ bool check_partition_func_processor(void *int_arg) {return TRUE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_monthname>(thd, mem_root, this); }
};
@@ -203,18 +218,21 @@ public:
Item_func_dayofyear(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "dayofyear"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals= 0;
fix_char_length(3);
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_dayofyear>(thd, mem_root, this); }
};
@@ -224,18 +242,21 @@ public:
Item_func_hour(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "hour"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals=0;
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_time_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_hour>(thd, mem_root, this); }
};
@@ -245,18 +266,21 @@ public:
Item_func_minute(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "minute"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals=0;
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_time_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_minute>(thd, mem_root, this); }
};
@@ -266,18 +290,21 @@ public:
Item_func_quarter(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "quarter"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=1*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_quarter>(thd, mem_root, this); }
};
@@ -287,33 +314,50 @@ public:
Item_func_second(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "second"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_time_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_second>(thd, mem_root, this); }
};
class Item_func_week :public Item_int_func
{
public:
+ Item_func_week(THD *thd, Item *a): Item_int_func(thd, a) {}
Item_func_week(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "week"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=2*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
+ }
+ bool check_vcol_func_processor(void *arg)
+ {
+ if (arg_count == 2)
+ return FALSE;
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
+ bool check_valid_arguments_processor(void *int_arg)
+ {
+ return arg_count == 2;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_week>(thd, mem_root, this); }
};
class Item_func_yearweek :public Item_int_func
@@ -322,18 +366,21 @@ public:
Item_func_yearweek(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {}
longlong val_int();
const char *func_name() const { return "yearweek"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_yearweek>(thd, mem_root, this); }
};
@@ -345,18 +392,21 @@ public:
const char *func_name() const { return "year"; }
enum_monotonicity_info get_monotonicity_info() const;
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
max_length=4*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_year>(thd, mem_root, this); }
};
@@ -379,18 +429,22 @@ public:
return (odbc_type ? "dayofweek" : "weekday");
}
enum Item_result result_type () const { return INT_RESULT; }
- void fix_length_and_dec()
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ bool fix_length_and_dec()
{
decimals= 0;
fix_char_length(1);
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_date_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_weekday>(thd, mem_root, this); }
};
class Item_func_dayname :public Item_func_weekday
@@ -401,9 +455,13 @@ class Item_func_dayname :public Item_func_weekday
const char *func_name() const { return "dayname"; }
String *val_str(String *str);
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec();
- bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
+ enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ bool fix_length_and_dec();
+ bool check_partition_func_processor(void *int_arg) {return TRUE;}
+ bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
};
@@ -414,7 +472,7 @@ protected:
public:
Item_func_seconds_hybrid(THD *thd): Item_func_numhybrid(thd) {}
Item_func_seconds_hybrid(THD *thd, Item *a): Item_func_numhybrid(thd, a) {}
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
if (arg_count)
decimals= args[0]->temporal_precision(arg0_expected_type());
@@ -422,6 +480,7 @@ public:
max_length=17 + (decimals ? decimals + 1 : 0);
maybe_null= true;
set_handler_by_result_type(decimals ? DECIMAL_RESULT : INT_RESULT);
+ return FALSE;
}
double real_op() { DBUG_ASSERT(0); return 0; }
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
@@ -441,26 +500,26 @@ public:
const char *func_name() const { return "unix_timestamp"; }
enum_monotonicity_info get_monotonicity_info() const;
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
/*
UNIX_TIMESTAMP() depends on the current timezone
(and thus may not be used as a partitioning function)
when its argument is NOT of the TIMESTAMP type.
*/
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_timestamp_args();
}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- /*
- TODO: Allow UNIX_TIMESTAMP called with an argument to be a part
- of the expression for a virtual column
- */
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ if (arg_count)
+ return FALSE;
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC);
}
longlong int_op();
my_decimal *decimal_op(my_decimal* buf);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_unix_timestamp>(thd, mem_root, this); }
};
@@ -472,27 +531,28 @@ public:
Item_func_time_to_sec(THD *thd, Item *item):
Item_func_seconds_hybrid(thd, item) {}
const char *func_name() const { return "time_to_sec"; }
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_time_args();
}
longlong int_op();
my_decimal *decimal_op(my_decimal* buf);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_time_to_sec>(thd, mem_root, this); }
};
class Item_temporal_func: public Item_func
{
- ulonglong sql_mode;
+ sql_mode_t sql_mode;
public:
Item_temporal_func(THD *thd): Item_func(thd) {}
Item_temporal_func(THD *thd, Item *a): Item_func(thd, a) {}
Item_temporal_func(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {}
Item_temporal_func(THD *thd, Item *a, Item *b, Item *c): Item_func(thd, a, b, c) {}
enum Item_result result_type () const { return STRING_RESULT; }
- enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
Item_result cmp_type() const { return TIME_RESULT; }
String *val_str(String *str);
longlong val_int() { return val_int_from_date(); }
@@ -512,7 +572,7 @@ public:
save_date_in_field(field);
}
#endif
- void fix_length_and_dec();
+ bool fix_length_and_dec();
};
@@ -520,20 +580,20 @@ public:
Abstract class for functions returning TIME, DATE, DATETIME or string values,
whose data type depends on parameters and is set at fix_fields time.
*/
-class Item_temporal_hybrid_func: public Item_temporal_func
+class Item_temporal_hybrid_func: public Item_temporal_func,
+ public Type_handler_hybrid_field_type
{
protected:
- enum_field_types cached_field_type; // TIME, DATE, DATETIME or STRING
String ascii_buf; // Conversion buffer
public:
Item_temporal_hybrid_func(THD *thd, Item *a, Item *b):
Item_temporal_func(thd, a, b) {}
- enum_field_types field_type() const { return cached_field_type; }
- Item_result cmp_type() const
- {
- return cached_field_type == MYSQL_TYPE_STRING ?
- STRING_RESULT : TIME_RESULT;
- }
+ enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ enum Item_result result_type () const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type () const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
CHARSET_INFO *charset_for_protocol() const
{
/*
@@ -543,7 +603,7 @@ public:
(which is fixed from @@collation_connection in fix_length_and_dec).
*/
DBUG_ASSERT(fixed == 1);
- return cached_field_type == MYSQL_TYPE_STRING ?
+ return Item_temporal_hybrid_func::field_type() == MYSQL_TYPE_STRING ?
collation.collation : &my_charset_bin;
}
/**
@@ -586,31 +646,39 @@ public:
};
+class Item_datetimefunc :public Item_temporal_func
+{
+public:
+ Item_datetimefunc(THD *thd): Item_temporal_func(thd) {}
+ Item_datetimefunc(THD *thd, Item *a): Item_temporal_func(thd, a) {}
+ Item_datetimefunc(THD *thd, Item *a, Item *b, Item *c):
+ Item_temporal_func(thd, a, b ,c) {}
+ enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+};
+
+
/* Abstract CURTIME function. Children should define what time zone is used */
class Item_func_curtime :public Item_timefunc
{
MYSQL_TIME ltime;
+ query_id_t last_query_id;
public:
- Item_func_curtime(THD *thd, uint dec): Item_timefunc(thd) { decimals= dec; }
+ Item_func_curtime(THD *thd, uint dec): Item_timefunc(thd), last_query_id(0)
+ { decimals= dec; }
bool fix_fields(THD *, Item **);
- void fix_length_and_dec()
- {
- store_now_in_TIME(&ltime);
- Item_timefunc::fix_length_and_dec();
- maybe_null= false;
- }
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
/*
Abstract method that defines which time zone is used for conversion.
Converts time current time in my_time_t representation to broken-down
MYSQL_TIME representation using UTC-SYSTEM or per-thread time zone.
*/
- virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
- bool check_vcol_func_processor(uchar *int_arg)
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC);
}
+ void print(String *str, enum_query_type query_type);
};
@@ -619,7 +687,9 @@ class Item_func_curtime_local :public Item_func_curtime
public:
Item_func_curtime_local(THD *thd, uint dec): Item_func_curtime(thd, dec) {}
const char *func_name() const { return "curtime"; }
- virtual void store_now_in_TIME(MYSQL_TIME *now_time);
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_curtime_local>(thd, mem_root, this); }
};
@@ -628,7 +698,9 @@ class Item_func_curtime_utc :public Item_func_curtime
public:
Item_func_curtime_utc(THD *thd, uint dec): Item_func_curtime(thd, dec) {}
const char *func_name() const { return "utc_time"; }
- virtual void store_now_in_TIME(MYSQL_TIME *now_time);
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_curtime_utc>(thd, mem_root, this); }
};
@@ -636,15 +708,15 @@ public:
class Item_func_curdate :public Item_datefunc
{
+ query_id_t last_query_id;
MYSQL_TIME ltime;
public:
- Item_func_curdate(THD *thd): Item_datefunc(thd) {}
- void fix_length_and_dec();
+ Item_func_curdate(THD *thd): Item_datefunc(thd), last_query_id(0) {}
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
- bool check_vcol_func_processor(uchar *int_arg)
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC);
}
};
@@ -654,7 +726,9 @@ class Item_func_curdate_local :public Item_func_curdate
public:
Item_func_curdate_local(THD *thd): Item_func_curdate(thd) {}
const char *func_name() const { return "curdate"; }
- void store_now_in_TIME(MYSQL_TIME *now_time);
+ void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_curdate_local>(thd, mem_root, this); }
};
@@ -663,31 +737,33 @@ class Item_func_curdate_utc :public Item_func_curdate
public:
Item_func_curdate_utc(THD *thd): Item_func_curdate(thd) {}
const char *func_name() const { return "utc_date"; }
- void store_now_in_TIME(MYSQL_TIME *now_time);
+ void store_now_in_TIME(THD* thd, MYSQL_TIME *now_time);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_curdate_utc>(thd, mem_root, this); }
};
/* Abstract CURRENT_TIMESTAMP function. See also Item_func_curtime */
-
-class Item_func_now :public Item_temporal_func
+class Item_func_now :public Item_datetimefunc
{
MYSQL_TIME ltime;
+ query_id_t last_query_id;
public:
- Item_func_now(THD *thd, uint dec): Item_temporal_func(thd) { decimals= dec; }
+ Item_func_now(THD *thd, uint dec): Item_datetimefunc(thd), last_query_id(0)
+ { decimals= dec; }
bool fix_fields(THD *, Item **);
- void fix_length_and_dec()
- {
- store_now_in_TIME(&ltime);
- Item_temporal_func::fix_length_and_dec();
- maybe_null= false;
- }
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
- bool check_vcol_func_processor(uchar *int_arg)
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ /*
+ NOW is safe for replication as slaves will run with same time as
+ master
+ */
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_TIME_FUNC);
}
+ void print(String *str, enum_query_type query_type);
};
@@ -695,9 +771,12 @@ class Item_func_now_local :public Item_func_now
{
public:
Item_func_now_local(THD *thd, uint dec): Item_func_now(thd, dec) {}
- const char *func_name() const { return "now"; }
- virtual void store_now_in_TIME(MYSQL_TIME *now_time);
+ const char *func_name() const { return "current_timestamp"; }
+ int save_in_field(Field *field, bool no_conversions);
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
virtual enum Functype functype() const { return NOW_FUNC; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_now_local>(thd, mem_root, this); }
};
@@ -706,7 +785,15 @@ class Item_func_now_utc :public Item_func_now
public:
Item_func_now_utc(THD *thd, uint dec): Item_func_now(thd, dec) {}
const char *func_name() const { return "utc_timestamp"; }
- virtual void store_now_in_TIME(MYSQL_TIME *now_time);
+ virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
+ virtual enum Functype functype() const { return NOW_UTC_FUNC; }
+ virtual bool check_vcol_func_processor(void *arg)
+ {
+ return mark_unsupported_function(func_name(), "()", arg,
+ VCOL_TIME_FUNC | VCOL_NON_DETERMINISTIC);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_now_utc>(thd, mem_root, this); }
};
@@ -720,14 +807,17 @@ public:
Item_func_sysdate_local(THD *thd, uint dec): Item_func_now(thd, dec) {}
bool const_item() const { return 0; }
const char *func_name() const { return "sysdate"; }
- void store_now_in_TIME(MYSQL_TIME *now_time);
+ void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- void update_used_tables()
+ table_map used_tables() const { return RAND_TABLE_BIT; }
+ bool check_vcol_func_processor(void *arg)
{
- Item_func_now::update_used_tables();
- maybe_null= 0;
- used_tables_cache|= RAND_TABLE_BIT;
+ return mark_unsupported_function(func_name(), "()", arg,
+ VCOL_TIME_FUNC | VCOL_NON_DETERMINISTIC);
}
+ virtual enum Functype functype() const { return SYSDATE_FUNC; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sysdate_local>(thd, mem_root, this); }
};
@@ -737,12 +827,14 @@ public:
Item_func_from_days(THD *thd, Item *a): Item_datefunc(thd, a) {}
const char *func_name() const { return "from_days"; }
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return has_date_args() || has_time_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_from_days>(thd, mem_root, this); }
};
@@ -758,20 +850,30 @@ public:
String *val_str(String *str);
const char *func_name() const
{ return is_time_format ? "time_format" : "date_format"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
uint format_length(const String *format);
bool eq(const Item *item, bool binary_cmp) const;
+ bool check_vcol_func_processor(void *arg)
+ {
+ if (is_time_format)
+ return false;
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_date_format>(thd, mem_root, this); }
};
-class Item_func_from_unixtime :public Item_temporal_func
+class Item_func_from_unixtime :public Item_datetimefunc
{
Time_zone *tz;
public:
- Item_func_from_unixtime(THD *thd, Item *a): Item_temporal_func(thd, a) {}
+ Item_func_from_unixtime(THD *thd, Item *a): Item_datetimefunc(thd, a) {}
const char *func_name() const { return "from_unixtime"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_from_unixtime>(thd, mem_root, this); }
};
@@ -789,7 +891,7 @@ class Time_zone;
tables can be used during this function calculation for loading time zone
descriptions.
*/
-class Item_func_convert_tz :public Item_temporal_func
+class Item_func_convert_tz :public Item_datetimefunc
{
/*
If time zone parameters are constants we are caching objects that
@@ -801,11 +903,13 @@ class Item_func_convert_tz :public Item_temporal_func
Time_zone *from_tz, *to_tz;
public:
Item_func_convert_tz(THD *thd, Item *a, Item *b, Item *c):
- Item_temporal_func(thd, a, b, c), from_tz_cached(0), to_tz_cached(0) {}
+ Item_datetimefunc(thd, a, b, c), from_tz_cached(0), to_tz_cached(0) {}
const char *func_name() const { return "convert_tz"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
void cleanup();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_convert_tz>(thd, mem_root, this); }
};
@@ -814,12 +918,14 @@ class Item_func_sec_to_time :public Item_timefunc
public:
Item_func_sec_to_time(THD *thd, Item *item): Item_timefunc(thd, item) {}
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals= MY_MIN(args[0]->decimals, TIME_SECOND_PART_DIGITS);
- Item_timefunc::fix_length_and_dec();
+ return Item_timefunc::fix_length_and_dec();
}
const char *func_name() const { return "sec_to_time"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_sec_to_time>(thd, mem_root, this); }
};
@@ -833,10 +939,14 @@ public:
Item_temporal_hybrid_func(thd, a, b),int_type(type_arg),
date_sub_interval(neg_arg) {}
const char *func_name() const { return "date_add_interval"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str, enum_query_type query_type);
+ enum precedence precedence() const { return ADDINTERVAL_PRECEDENCE; }
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_date_add_interval>(thd, mem_root, this); }
};
@@ -897,12 +1007,17 @@ class Item_extract :public Item_int_func
longlong val_int();
enum Functype functype() const { return EXTRACT_FUNC; }
const char *func_name() const { return "extract"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str, enum_query_type query_type);
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg)
+ {
+ if (int_type != INTERVAL_WEEK)
+ return FALSE;
+ return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC);
+ }
+ bool check_valid_arguments_processor(void *int_arg)
{
switch (int_type) {
case INTERVAL_YEAR:
@@ -940,6 +1055,9 @@ class Item_extract :public Item_int_func
}
Field *create_field_for_create_select(TABLE *table)
{ return tmp_table_field_from_field_type(table, false, false); }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_extract>(thd, mem_root, this); }
};
@@ -961,8 +1079,11 @@ public:
bool eq(const Item *item, bool binary_cmp) const;
const char *func_name() const { return "cast_as_char"; }
String *val_str(String *a);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
void print(String *str, enum_query_type query_type);
+ bool need_parentheses_in_default() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_char_typecast>(thd, mem_root, this); }
};
@@ -972,11 +1093,11 @@ public:
Item_temporal_typecast(THD *thd, Item *a): Item_temporal_func(thd, a) {}
virtual const char *cast_type() const = 0;
void print(String *str, enum_query_type query_type);
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
if (decimals == NOT_FIXED_DEC)
decimals= args[0]->temporal_precision(field_type());
- Item_temporal_func::fix_length_and_dec();
+ return Item_temporal_func::fix_length_and_dec();
}
};
@@ -988,6 +1109,8 @@ public:
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
const char *cast_type() const { return "date"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_date_typecast>(thd, mem_root, this); }
};
@@ -1000,6 +1123,8 @@ public:
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
const char *cast_type() const { return "time"; }
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_time_typecast>(thd, mem_root, this); }
};
@@ -1012,6 +1137,8 @@ public:
const char *cast_type() const { return "datetime"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_datetime_typecast>(thd, mem_root, this); }
};
@@ -1023,6 +1150,8 @@ public:
const char *func_name() const { return "makedate"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_makedate>(thd, mem_root, this); }
};
@@ -1035,10 +1164,12 @@ public:
Item_func_add_time(THD *thd, Item *a, Item *b, bool type_arg, bool neg_arg):
Item_temporal_hybrid_func(thd, a, b), is_date(type_arg)
{ sign= neg_arg ? -1 : 1; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
void print(String *str, enum_query_type query_type);
const char *func_name() const { return "add_time"; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_add_time>(thd, mem_root, this); }
};
class Item_func_timediff :public Item_timefunc
@@ -1046,13 +1177,15 @@ class Item_func_timediff :public Item_timefunc
public:
Item_func_timediff(THD *thd, Item *a, Item *b): Item_timefunc(thd, a, b) {}
const char *func_name() const { return "timediff"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals= MY_MAX(args[0]->temporal_precision(MYSQL_TYPE_TIME),
args[1]->temporal_precision(MYSQL_TYPE_TIME));
- Item_timefunc::fix_length_and_dec();
+ return Item_timefunc::fix_length_and_dec();
}
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_timediff>(thd, mem_root, this); }
};
class Item_func_maketime :public Item_timefunc
@@ -1061,13 +1194,15 @@ public:
Item_func_maketime(THD *thd, Item *a, Item *b, Item *c):
Item_timefunc(thd, a, b, c)
{}
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals= MY_MIN(args[2]->decimals, TIME_SECOND_PART_DIGITS);
- Item_timefunc::fix_length_and_dec();
+ return Item_timefunc::fix_length_and_dec();
}
const char *func_name() const { return "maketime"; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_maketime>(thd, mem_root, this); }
};
@@ -1077,17 +1212,20 @@ public:
Item_func_microsecond(THD *thd, Item *a): Item_int_func(thd, a) {}
longlong val_int();
const char *func_name() const { return "microsecond"; }
- void fix_length_and_dec()
- {
+ bool fix_length_and_dec()
+ {
decimals=0;
maybe_null=1;
+ return FALSE;
}
- bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
- bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
- bool check_valid_arguments_processor(uchar *int_arg)
+ bool check_partition_func_processor(void *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(void *arg) { return FALSE;}
+ bool check_valid_arguments_processor(void *int_arg)
{
return !has_time_args();
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_microsecond>(thd, mem_root, this); }
};
@@ -1099,12 +1237,15 @@ public:
Item_int_func(thd, a, b), int_type(type_arg) {}
const char *func_name() const { return "timestampdiff"; }
longlong val_int();
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
decimals=0;
maybe_null=1;
+ return FALSE;
}
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_timestamp_diff>(thd, mem_root, this); }
};
@@ -1122,13 +1263,16 @@ public:
{}
String *val_str_ascii(String *str);
const char *func_name() const { return "get_format"; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
maybe_null= 1;
decimals=0;
fix_length_and_charset(17, default_charset());
+ return FALSE;
}
virtual void print(String *str, enum_query_type query_type);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_get_format>(thd, mem_root, this); }
};
@@ -1146,7 +1290,9 @@ public:
{}
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
const char *func_name() const { return "str_to_date"; }
- void fix_length_and_dec();
+ bool fix_length_and_dec();
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_str_to_date>(thd, mem_root, this); }
};
@@ -1156,6 +1302,8 @@ public:
Item_func_last_day(THD *thd, Item *a): Item_datefunc(thd, a) {}
const char *func_name() const { return "last_day"; }
bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_last_day>(thd, mem_root, this); }
};
#endif /* ITEM_TIMEFUNC_INCLUDED */
diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc
new file mode 100644
index 00000000000..7cb07af440b
--- /dev/null
+++ b/sql/item_windowfunc.cc
@@ -0,0 +1,451 @@
+#include "item_windowfunc.h"
+#include "my_dbug.h"
+#include "my_global.h"
+#include "sql_select.h" // test if group changed
+
+
+bool
+Item_window_func::resolve_window_name(THD *thd)
+{
+ if (window_spec)
+ {
+ /* The window name has been already resolved */
+ return false;
+ }
+ DBUG_ASSERT(window_name != NULL && window_spec == NULL);
+ char *ref_name= window_name->str;
+
+ /* !TODO: Add the code to resolve ref_name in outer queries */
+ /*
+ First look for the deinition of the window with 'window_name'
+ in the current select
+ */
+ List<Window_spec> curr_window_specs=
+ List<Window_spec> (thd->lex->current_select->window_specs);
+ List_iterator_fast<Window_spec> it(curr_window_specs);
+ Window_spec *win_spec;
+ while((win_spec= it++))
+ {
+ char *win_spec_name= win_spec->name();
+ if (win_spec_name &&
+ my_strcasecmp(system_charset_info, ref_name, win_spec_name) == 0)
+ {
+ window_spec= win_spec;
+ break;
+ }
+ }
+
+ if (!window_spec)
+ {
+ my_error(ER_WRONG_WINDOW_SPEC_NAME, MYF(0), ref_name);
+ return true;
+ }
+
+ return false;
+}
+
+
+void
+Item_window_func::update_used_tables()
+{
+ used_tables_cache= 0;
+ window_func()->update_used_tables();
+ used_tables_cache|= window_func()->used_tables();
+ for (ORDER *ord= window_spec->partition_list->first; ord; ord=ord->next)
+ {
+ Item *item= *ord->item;
+ item->update_used_tables();
+ used_tables_cache|= item->used_tables();
+ }
+ for (ORDER *ord= window_spec->order_list->first; ord; ord=ord->next)
+ {
+ Item *item= *ord->item;
+ item->update_used_tables();
+ used_tables_cache|= item->used_tables();
+ }
+}
+
+
+bool
+Item_window_func::fix_fields(THD *thd, Item **ref)
+{
+ DBUG_ASSERT(fixed == 0);
+
+ if (!thd->lex->current_select ||
+ (thd->lex->current_select->context_analysis_place != SELECT_LIST &&
+ thd->lex->current_select->context_analysis_place != IN_ORDER_BY))
+ {
+ my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
+ return true;
+ }
+
+ if (window_name && resolve_window_name(thd))
+ return true;
+
+ if (window_spec->window_frame && is_frame_prohibited())
+ {
+ my_error(ER_NOT_ALLOWED_WINDOW_FRAME, MYF(0), window_func()->func_name());
+ return true;
+ }
+
+ if (window_spec->order_list->elements == 0 && is_order_list_mandatory())
+ {
+ my_error(ER_NO_ORDER_LIST_IN_WINDOW_SPEC, MYF(0), window_func()->func_name());
+ return true;
+ }
+ /*
+ TODO: why the last parameter is 'ref' in this call? What if window_func
+ decides to substitute itself for something else and does *ref=.... ?
+ This will substitute *this (an Item_window_func object) with Item_sum
+ object. Is this the intent?
+ */
+ if (window_func()->fix_fields(thd, ref))
+ return true;
+
+ const_item_cache= false;
+ with_window_func= true;
+ with_sum_func= false;
+
+ if (fix_length_and_dec())
+ return TRUE;
+
+ max_length= window_func()->max_length;
+ maybe_null= window_func()->maybe_null;
+
+ fixed= 1;
+ set_phase_to_initial();
+ return false;
+}
+
+
+/*
+ @detail
+ Window function evaluates its arguments when it is scanning the temporary
+ table in partition/order-by order. That is, arguments should be read from
+ the temporary table, not from the original base columns.
+
+ In order for this to work, we need to call "split_sum_func" for each
+ argument. The effect of the call is:
+ 1. the argument is added into ref_pointer_array. This will cause the
+ argument to be saved in the temp.table
+ 2. argument item is replaced with an Item_ref object. this object refers
+ the argument through the ref_pointer_array.
+
+ then, change_to_use_tmp_fields() will replace ref_pointer_array with an
+ array that points to the temp.table fields.
+ This way, when window_func attempts to evaluate its arguments, it will use
+ Item_ref objects which will read data from the temp.table.
+
+ Note: Before window functions, aggregate functions never needed to do such
+ transformations on their arguments. This is because grouping operation
+ does not need to read from the temp.table.
+ (Q: what happens when we first sort and then do grouping in a
+ group-after-group mode? dont group by items read from temp.table, then?)
+*/
+
+void Item_window_func::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags)
+{
+ for (uint i=0; i < window_func()->argument_count(); i++)
+ {
+ Item **p_item= &window_func()->arguments()[i];
+ (*p_item)->split_sum_func2(thd, ref_pointer_array, fields, p_item, flags);
+ }
+ window_func()->setup_caches(thd);
+}
+
+
+/*
+ This must be called before attempting to compute the window function values.
+
+ @detail
+ If we attempt to do it in fix_fields(), partition_fields will refer
+ to the original window function arguments.
+ We need it to refer to temp.table columns.
+*/
+
+void Item_sum_rank::setup_window_func(THD *thd, Window_spec *window_spec)
+{
+ /* TODO: move this into Item_window_func? */
+ peer_tracker = new Group_bound_tracker(thd, window_spec->order_list);
+ peer_tracker->init();
+ clear();
+}
+
+void Item_sum_dense_rank::setup_window_func(THD *thd, Window_spec *window_spec)
+{
+ /* TODO: consider moving this && Item_sum_rank's implementation */
+ peer_tracker = new Group_bound_tracker(thd, window_spec->order_list);
+ peer_tracker->init();
+ clear();
+}
+
+bool Item_sum_dense_rank::add()
+{
+ if (peer_tracker->check_if_next_group() || first_add)
+ {
+ first_add= false;
+ dense_rank++;
+ }
+
+ return false;
+}
+
+
+bool Item_sum_rank::add()
+{
+ row_number++;
+ if (peer_tracker->check_if_next_group())
+ {
+ /* Row value changed */
+ cur_rank= row_number;
+ }
+ return false;
+}
+
+bool Item_sum_percent_rank::add()
+{
+ row_number++;
+ if (peer_tracker->check_if_next_group())
+ {
+ /* Row value changed. */
+ cur_rank= row_number;
+ }
+ return false;
+}
+
+void Item_sum_percent_rank::setup_window_func(THD *thd, Window_spec *window_spec)
+{
+ /* TODO: move this into Item_window_func? */
+ peer_tracker = new Group_bound_tracker(thd, window_spec->order_list);
+ peer_tracker->init();
+ clear();
+}
+
+
+bool Item_sum_hybrid_simple::fix_fields(THD *thd, Item **ref)
+{
+ DBUG_ASSERT(fixed == 0);
+
+ if (init_sum_func_check(thd))
+ return TRUE;
+
+ for (uint i= 0; i < arg_count; i++)
+ {
+ Item *item= args[i];
+ // 'item' can be changed during fix_fields
+ if ((!item->fixed && item->fix_fields(thd, args)) ||
+ (item= args[i])->check_cols(1))
+ return TRUE;
+ }
+ Type_std_attributes::set(args[0]);
+ for (uint i= 0; i < arg_count && !with_subselect; i++)
+ with_subselect= with_subselect || args[i]->with_subselect;
+
+ Item *item2= args[0]->real_item();
+ if (item2->type() == Item::FIELD_ITEM)
+ set_handler_by_field_type(((Item_field*) item2)->field->type());
+ else if (args[0]->cmp_type() == TIME_RESULT)
+ set_handler_by_field_type(item2->field_type());
+ else
+ set_handler_by_result_type(item2->result_type(),
+ max_length, collation.collation);
+
+ switch (Item_sum_hybrid_simple::result_type()) {
+ case INT_RESULT:
+ case DECIMAL_RESULT:
+ case STRING_RESULT:
+ break;
+ case REAL_RESULT:
+ max_length= float_length(decimals);
+ break;
+ case ROW_RESULT:
+ case TIME_RESULT:
+ DBUG_ASSERT(0); // XXX(cvicentiu) Should this never happen?
+ return TRUE;
+ };
+ setup_hybrid(thd, args[0]);
+ /* MIN/MAX can return NULL for empty set indepedent of the used column */
+ maybe_null= 1;
+ result_field=0;
+ null_value=1;
+ if (fix_length_and_dec())
+ return TRUE;
+
+ if (check_sum_func(thd, ref))
+ return TRUE;
+ for (uint i= 0; i < arg_count; i++)
+ {
+ orig_args[i]= args[i];
+ }
+ fixed= 1;
+ return FALSE;
+}
+
+bool Item_sum_hybrid_simple::add()
+{
+ value->store(args[0]);
+ value->cache_value();
+ null_value= value->null_value;
+ return false;
+}
+
+void Item_sum_hybrid_simple::setup_hybrid(THD *thd, Item *item)
+{
+ if (!(value= Item_cache::get_cache(thd, item, item->cmp_type())))
+ return;
+ value->setup(thd, item);
+ value->store(item);
+ if (!item->const_item())
+ value->set_used_tables(RAND_TABLE_BIT);
+ collation.set(item->collation);
+}
+
+double Item_sum_hybrid_simple::val_real()
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return 0.0;
+ double retval= value->val_real();
+ if ((null_value= value->null_value))
+ DBUG_ASSERT(retval == 0.0);
+ return retval;
+}
+
+longlong Item_sum_hybrid_simple::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return 0;
+ longlong retval= value->val_int();
+ if ((null_value= value->null_value))
+ DBUG_ASSERT(retval == 0);
+ return retval;
+}
+
+my_decimal *Item_sum_hybrid_simple::val_decimal(my_decimal *val)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return 0;
+ my_decimal *retval= value->val_decimal(val);
+ if ((null_value= value->null_value))
+ DBUG_ASSERT(retval == NULL);
+ return retval;
+}
+
+String *
+Item_sum_hybrid_simple::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return 0;
+ String *retval= value->val_str(str);
+ if ((null_value= value->null_value))
+ DBUG_ASSERT(retval == NULL);
+ return retval;
+}
+
+Field *Item_sum_hybrid_simple::create_tmp_field(bool group, TABLE *table)
+{
+ DBUG_ASSERT(0);
+ return NULL;
+}
+
+void Item_sum_hybrid_simple::reset_field()
+{
+ switch(Item_sum_hybrid_simple::result_type()) {
+ case STRING_RESULT:
+ {
+ char buff[MAX_FIELD_WIDTH];
+ String tmp(buff,sizeof(buff),result_field->charset()),*res;
+
+ res=args[0]->val_str(&tmp);
+ if (args[0]->null_value)
+ {
+ result_field->set_null();
+ result_field->reset();
+ }
+ else
+ {
+ result_field->set_notnull();
+ result_field->store(res->ptr(),res->length(),tmp.charset());
+ }
+ break;
+ }
+ case INT_RESULT:
+ {
+ longlong nr=args[0]->val_int();
+
+ if (maybe_null)
+ {
+ if (args[0]->null_value)
+ {
+ nr=0;
+ result_field->set_null();
+ }
+ else
+ result_field->set_notnull();
+ }
+ result_field->store(nr, unsigned_flag);
+ break;
+ }
+ case REAL_RESULT:
+ {
+ double nr= args[0]->val_real();
+
+ if (maybe_null)
+ {
+ if (args[0]->null_value)
+ {
+ nr=0.0;
+ result_field->set_null();
+ }
+ else
+ result_field->set_notnull();
+ }
+ result_field->store(nr);
+ break;
+ }
+ case DECIMAL_RESULT:
+ {
+ my_decimal value_buff, *arg_dec= args[0]->val_decimal(&value_buff);
+
+ if (maybe_null)
+ {
+ if (args[0]->null_value)
+ result_field->set_null();
+ else
+ result_field->set_notnull();
+ }
+ /*
+ We must store zero in the field as we will use the field value in
+ add()
+ */
+ if (!arg_dec) // Null
+ arg_dec= &decimal_zero;
+ result_field->store_decimal(arg_dec);
+ break;
+ }
+ case ROW_RESULT:
+ case TIME_RESULT:
+ DBUG_ASSERT(0);
+ }
+}
+
+void Item_sum_hybrid_simple::update_field()
+{
+ DBUG_ASSERT(0);
+}
+
+void Item_window_func::print(String *str, enum_query_type query_type)
+{
+ window_func()->print(str, query_type);
+ str->append(" over ");
+#ifndef DBUG_OFF
+ if (!window_spec) // one can call dbug_print_item() anytime in gdb
+ str->append(window_name);
+ else
+#endif
+ window_spec->print(str, query_type);
+}
diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h
new file mode 100644
index 00000000000..21270733051
--- /dev/null
+++ b/sql/item_windowfunc.h
@@ -0,0 +1,975 @@
+#ifndef ITEM_WINDOWFUNC_INCLUDED
+#define ITEM_WINDOWFUNC_INCLUDED
+
+#include "my_global.h"
+#include "item.h"
+
+class Window_spec;
+
+
+int test_if_group_changed(List<Cached_item> &list);
+
+/* A wrapper around test_if_group_changed */
+class Group_bound_tracker
+{
+public:
+
+ Group_bound_tracker(THD *thd, SQL_I_List<ORDER> *list)
+ {
+ for (ORDER *curr = list->first; curr; curr=curr->next)
+ {
+ Cached_item *tmp= new_Cached_item(thd, curr->item[0], TRUE);
+ group_fields.push_back(tmp);
+ }
+ }
+
+ void init()
+ {
+ first_check= true;
+ }
+
+ void cleanup()
+ {
+ group_fields.empty();
+ }
+
+ /*
+ Check if the current row is in a different group than the previous row
+ this function was called for.
+ XXX: Side-effect: The new row's group becomes the current row's group.
+
+ Returns true if there is a change between the current_group and the cached
+ value, or if it is the first check after a call to init.
+ */
+ bool check_if_next_group()
+ {
+ if (test_if_group_changed(group_fields) > -1 || first_check)
+ {
+ first_check= false;
+ return true;
+ }
+ return false;
+ }
+
+ /*
+ Check if the current row is in a different group than the previous row
+ check_if_next_group was called for.
+
+ Compares the groups without the additional side effect of updating the
+ current cached values.
+ */
+ int compare_with_cache()
+ {
+ List_iterator<Cached_item> li(group_fields);
+ Cached_item *ptr;
+ int res;
+ while ((ptr= li++))
+ {
+ if ((res= ptr->cmp_read_only()))
+ return res;
+ }
+ return 0;
+ }
+
+private:
+ List<Cached_item> group_fields;
+ /*
+ During the first check_if_next_group, the list of cached_items is not
+ initialized. The compare function will return that the items match if
+ the field's value is the same as the Cached_item's default value (0).
+ This flag makes sure that we always return true during the first check.
+
+ XXX This is better to be implemented within test_if_group_changed, but
+ since it is used in other parts of the codebase, we keep it here for now.
+ */
+ bool first_check;
+};
+
+/*
+ ROW_NUMBER() OVER (...)
+
+ @detail
+ - This is a Window function (not just an aggregate)
+ - It can be computed by doing one pass over select output, provided
+ the output is sorted according to the window definition.
+*/
+
+class Item_sum_row_number: public Item_sum_int
+{
+ longlong count;
+
+public:
+
+ Item_sum_row_number(THD *thd)
+ : Item_sum_int(thd), count(0) {}
+
+ void clear()
+ {
+ count= 0;
+ }
+
+ bool add()
+ {
+ count++;
+ return false;
+ }
+
+ void update_field() {}
+
+ enum Sumfunctype sum_func() const
+ {
+ return ROW_NUMBER_FUNC;
+ }
+
+ longlong val_int()
+ {
+ return count;
+ }
+ const char*func_name() const
+ {
+ return "row_number";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_row_number>(thd, mem_root, this); }
+};
+
+
+/*
+ RANK() OVER (...) Windowing function
+
+ @detail
+ - This is a Window function (not just an aggregate)
+ - It can be computed by doing one pass over select output, provided
+ the output is sorted according to the window definition.
+
+ The function is defined as:
+
+ "The rank of row R is defined as 1 (one) plus the number of rows that
+ precede R and are not peers of R"
+
+ "This implies that if two or more rows are not distinct with respect to
+ the window ordering, then there will be one or more"
+*/
+
+class Item_sum_rank: public Item_sum_int
+{
+protected:
+ longlong row_number; // just ROW_NUMBER()
+ longlong cur_rank; // current value
+
+ Group_bound_tracker *peer_tracker;
+public:
+
+ Item_sum_rank(THD *thd) : Item_sum_int(thd), peer_tracker(NULL) {}
+
+ void clear()
+ {
+ /* This is called on partition start */
+ cur_rank= 1;
+ row_number= 0;
+ }
+
+ bool add();
+
+ longlong val_int()
+ {
+ return cur_rank;
+ }
+
+ void update_field() {}
+ /*
+ void reset_field();
+ TODO: ^^ what does this do ? It is not called ever?
+ */
+
+ enum Sumfunctype sum_func () const
+ {
+ return RANK_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "rank";
+ }
+
+ void setup_window_func(THD *thd, Window_spec *window_spec);
+
+ void cleanup()
+ {
+ if (peer_tracker)
+ {
+ peer_tracker->cleanup();
+ delete peer_tracker;
+ peer_tracker= NULL;
+ }
+ Item_sum_int::cleanup();
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_rank>(thd, mem_root, this); }
+};
+
+
+/*
+ DENSE_RANK() OVER (...) Windowing function
+
+ @detail
+ - This is a Window function (not just an aggregate)
+ - It can be computed by doing one pass over select output, provided
+ the output is sorted according to the window definition.
+
+ The function is defined as:
+
+ "If DENSE_RANK is specified, then the rank of row R is defined as the
+ number of rows preceding and including R that are distinct with respect
+ to the window ordering"
+
+ "This implies that there are no gaps in the sequential rank numbering of
+ rows in each window partition."
+*/
+
+
+class Item_sum_dense_rank: public Item_sum_int
+{
+ longlong dense_rank;
+ bool first_add;
+ Group_bound_tracker *peer_tracker;
+ public:
+ /*
+ XXX(cvicentiu) This class could potentially be implemented in the rank
+ class, with a switch for the DENSE case.
+ */
+ void clear()
+ {
+ dense_rank= 0;
+ first_add= true;
+ }
+ bool add();
+ void update_field() {}
+ longlong val_int()
+ {
+ return dense_rank;
+ }
+
+ Item_sum_dense_rank(THD *thd)
+ : Item_sum_int(thd), dense_rank(0), first_add(true), peer_tracker(NULL) {}
+ enum Sumfunctype sum_func () const
+ {
+ return DENSE_RANK_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "dense_rank";
+ }
+
+ void setup_window_func(THD *thd, Window_spec *window_spec);
+
+ void cleanup()
+ {
+ if (peer_tracker)
+ {
+ peer_tracker->cleanup();
+ delete peer_tracker;
+ peer_tracker= NULL;
+ }
+ Item_sum_int::cleanup();
+ }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_dense_rank>(thd, mem_root, this); }
+};
+
+class Item_sum_hybrid_simple : public Item_sum,
+ public Type_handler_hybrid_field_type
+{
+ public:
+ Item_sum_hybrid_simple(THD *thd, Item *arg):
+ Item_sum(thd, arg),
+ Type_handler_hybrid_field_type(MYSQL_TYPE_LONGLONG),
+ value(NULL)
+ { collation.set(&my_charset_bin); }
+
+ Item_sum_hybrid_simple(THD *thd, Item *arg1, Item *arg2):
+ Item_sum(thd, arg1, arg2),
+ Type_handler_hybrid_field_type(MYSQL_TYPE_LONGLONG),
+ value(NULL)
+ { collation.set(&my_charset_bin); }
+
+ bool add();
+ bool fix_fields(THD *, Item **);
+ void setup_hybrid(THD *thd, Item *item);
+ double val_real();
+ longlong val_int();
+ my_decimal *val_decimal(my_decimal *);
+ void reset_field();
+ String *val_str(String *);
+ /* TODO(cvicentiu) copied from Item_sum_hybrid, what does it do? */
+ bool keep_field_type(void) const { return 1; }
+ enum Item_result result_type() const
+ { return Type_handler_hybrid_field_type::result_type(); }
+ enum Item_result cmp_type() const
+ { return Type_handler_hybrid_field_type::cmp_type(); }
+ enum enum_field_types field_type() const
+ { return Type_handler_hybrid_field_type::field_type(); }
+ void update_field();
+ Field *create_tmp_field(bool group, TABLE *table);
+ void clear()
+ {
+ value->clear();
+ null_value= 1;
+ }
+
+ private:
+ Item_cache *value;
+};
+
+/*
+ This item will remember the first value added to it. It will not update
+ the value unless it is cleared.
+*/
+class Item_sum_first_value : public Item_sum_hybrid_simple
+{
+ public:
+ Item_sum_first_value(THD* thd, Item* arg_expr) :
+ Item_sum_hybrid_simple(thd, arg_expr) {}
+
+
+ enum Sumfunctype sum_func () const
+ {
+ return FIRST_VALUE_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "first_value";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_first_value>(thd, mem_root, this); }
+};
+
+/*
+ This item will remember the last value added to it.
+
+ This item does not support removal, and can be cleared only by calling
+ clear().
+*/
+class Item_sum_last_value : public Item_sum_hybrid_simple
+{
+ public:
+ Item_sum_last_value(THD* thd, Item* arg_expr) :
+ Item_sum_hybrid_simple(thd, arg_expr) {}
+
+ enum Sumfunctype sum_func() const
+ {
+ return LAST_VALUE_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "last_value";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_last_value>(thd, mem_root, this); }
+};
+
+class Item_sum_nth_value : public Item_sum_hybrid_simple
+{
+ public:
+ Item_sum_nth_value(THD *thd, Item *arg_expr, Item* offset_expr) :
+ Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {}
+
+ enum Sumfunctype sum_func() const
+ {
+ return NTH_VALUE_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "nth_value";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_nth_value>(thd, mem_root, this); }
+};
+
+class Item_sum_lead : public Item_sum_hybrid_simple
+{
+ public:
+ Item_sum_lead(THD *thd, Item *arg_expr, Item* offset_expr) :
+ Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {}
+
+ enum Sumfunctype sum_func() const
+ {
+ return LEAD_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "lead";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_lead>(thd, mem_root, this); }
+};
+
+class Item_sum_lag : public Item_sum_hybrid_simple
+{
+ public:
+ Item_sum_lag(THD *thd, Item *arg_expr, Item* offset_expr) :
+ Item_sum_hybrid_simple(thd, arg_expr, offset_expr) {}
+
+ enum Sumfunctype sum_func() const
+ {
+ return LAG_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "lag";
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_lag>(thd, mem_root, this); }
+};
+
+/*
+ A base window function (aggregate) that also holds a counter for the number
+ of rows.
+*/
+class Item_sum_window_with_row_count : public Item_sum_num
+{
+ public:
+ Item_sum_window_with_row_count(THD *thd) : Item_sum_num(thd),
+ partition_row_count_(0) {}
+
+ Item_sum_window_with_row_count(THD *thd, Item *arg) :
+ Item_sum_num(thd, arg), partition_row_count_(0) {};
+
+ void set_row_count(ulonglong count) { partition_row_count_ = count; }
+
+ protected:
+ longlong get_row_count() { return partition_row_count_; }
+ private:
+ ulonglong partition_row_count_;
+};
+
+/*
+ @detail
+ "The relative rank of a row R is defined as (RK-1)/(NR-1), where RK is
+ defined to be the RANK of R and NR is defined to be the number of rows in
+ the window partition of R."
+
+ Computation of this function requires two passes:
+ - First pass to find #rows in the partition
+ This is held within the row_count context.
+ - Second pass to compute rank of current row and the value of the function
+*/
+class Item_sum_percent_rank: public Item_sum_window_with_row_count
+{
+ public:
+ Item_sum_percent_rank(THD *thd)
+ : Item_sum_window_with_row_count(thd), cur_rank(1), peer_tracker(NULL) {}
+
+ longlong val_int()
+ {
+ /*
+ Percent rank is a real value so calling the integer value should never
+ happen. It makes no sense as it gets truncated to either 0 or 1.
+ */
+ DBUG_ASSERT(0);
+ return 0;
+ }
+
+ double val_real()
+ {
+ /*
+ We can not get the real value without knowing the number of rows
+ in the partition. Don't divide by 0.
+ */
+ ulonglong partition_rows = get_row_count();
+ null_value= partition_rows > 0 ? false : true;
+
+ return partition_rows > 1 ?
+ static_cast<double>(cur_rank - 1) / (partition_rows - 1) : 0;
+ }
+
+ enum Sumfunctype sum_func () const
+ {
+ return PERCENT_RANK_FUNC;
+ }
+
+ const char*func_name() const
+ {
+ return "percent_rank";
+ }
+
+ void update_field() {}
+
+ void clear()
+ {
+ cur_rank= 1;
+ row_number= 0;
+ }
+ bool add();
+ enum Item_result result_type () const { return REAL_RESULT; }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+
+ bool fix_length_and_dec()
+ {
+ decimals = 10; // TODO-cvicentiu find out how many decimals the standard
+ // requires.
+ return FALSE;
+ }
+
+ void setup_window_func(THD *thd, Window_spec *window_spec);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_percent_rank>(thd, mem_root, this); }
+
+ private:
+ longlong cur_rank; // Current rank of the current row.
+ longlong row_number; // Value if this were ROW_NUMBER() function.
+
+ Group_bound_tracker *peer_tracker;
+
+ void cleanup()
+ {
+ if (peer_tracker)
+ {
+ peer_tracker->cleanup();
+ delete peer_tracker;
+ peer_tracker= NULL;
+ }
+ Item_sum_num::cleanup();
+ }
+};
+
+
+
+
+/*
+ @detail
+ "The relative rank of a row R is defined as NP/NR, where
+ - NP is defined to be the number of rows preceding or peer with R in the
+ window ordering of the window partition of R
+ - NR is defined to be the number of rows in the window partition of R.
+
+ Just like with Item_sum_percent_rank, computation of this function requires
+ two passes.
+*/
+
+class Item_sum_cume_dist: public Item_sum_window_with_row_count
+{
+ public:
+ Item_sum_cume_dist(THD *thd) : Item_sum_window_with_row_count(thd),
+ current_row_count_(0) {}
+
+ double val_real()
+ {
+ if (get_row_count() == 0)
+ {
+ null_value= true;
+ return 0;
+ }
+ ulonglong partition_row_count= get_row_count();
+ null_value= false;
+ return static_cast<double>(current_row_count_) / partition_row_count;
+ }
+
+ bool add()
+ {
+ current_row_count_++;
+ return false;
+ }
+
+ enum Sumfunctype sum_func() const
+ {
+ return CUME_DIST_FUNC;
+ }
+
+ void clear()
+ {
+ current_row_count_= 0;
+ set_row_count(0);
+ }
+
+ const char*func_name() const
+ {
+ return "cume_dist";
+ }
+
+ void update_field() {}
+ enum Item_result result_type () const { return REAL_RESULT; }
+ enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+
+ bool fix_length_and_dec()
+ {
+ decimals = 10; // TODO-cvicentiu find out how many decimals the standard
+ // requires.
+ return FALSE;
+ }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_cume_dist>(thd, mem_root, this); }
+
+ private:
+ ulonglong current_row_count_;
+};
+
+class Item_sum_ntile : public Item_sum_window_with_row_count
+{
+ public:
+ Item_sum_ntile(THD* thd, Item* num_quantiles_expr) :
+ Item_sum_window_with_row_count(thd, num_quantiles_expr),
+ current_row_count_(0) {};
+
+ double val_real()
+ {
+ return (double) val_int();
+ }
+
+ longlong val_int()
+ {
+ if (get_row_count() == 0)
+ {
+ null_value= true;
+ return 0;
+ }
+
+ longlong num_quantiles= get_num_quantiles();
+
+ if (num_quantiles <= 0) {
+ my_error(ER_INVALID_NTILE_ARGUMENT, MYF(0));
+ return true;
+ }
+
+ null_value= false;
+ ulonglong quantile_size = get_row_count() / num_quantiles;
+ ulonglong extra_rows = get_row_count() - quantile_size * num_quantiles;
+
+ if (current_row_count_ <= extra_rows * (quantile_size + 1))
+ return (current_row_count_ - 1) / (quantile_size + 1) + 1;
+
+ return (current_row_count_ - 1 - extra_rows) / quantile_size + 1;
+ }
+
+ bool add()
+ {
+ current_row_count_++;
+ return false;
+ }
+
+ enum Sumfunctype sum_func() const
+ {
+ return NTILE_FUNC;
+ }
+
+ void clear()
+ {
+ current_row_count_= 0;
+ set_row_count(0);
+ }
+
+ const char*func_name() const
+ {
+ return "ntile";
+ }
+
+ void update_field() {}
+
+ enum Item_result result_type () const { return INT_RESULT; }
+ enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_sum_ntile>(thd, mem_root, this); }
+
+ private:
+ longlong get_num_quantiles() { return args[0]->val_int(); }
+ ulong current_row_count_;
+};
+
+
+class Item_window_func : public Item_func_or_sum
+{
+ /* Window function parameters as we've got them from the parser */
+public:
+ LEX_STRING *window_name;
+public:
+ Window_spec *window_spec;
+
+public:
+ Item_window_func(THD *thd, Item_sum *win_func, LEX_STRING *win_name)
+ : Item_func_or_sum(thd, (Item *) win_func),
+ window_name(win_name), window_spec(NULL),
+ force_return_blank(true),
+ read_value_from_result_field(false) {}
+
+ Item_window_func(THD *thd, Item_sum *win_func, Window_spec *win_spec)
+ : Item_func_or_sum(thd, (Item *) win_func),
+ window_name(NULL), window_spec(win_spec),
+ force_return_blank(true),
+ read_value_from_result_field(false) {}
+
+ Item_sum *window_func() const { return (Item_sum *) args[0]; }
+
+ void update_used_tables();
+
+ /*
+ This is used by filesort to mark the columns it needs to read (because they
+ participate in the sort criteria and/or row retrieval. Window functions can
+ only be used in sort criteria).
+
+ Sorting by window function value is only done after the window functions
+ have been computed. In that case, window function will need to read its
+ temp.table field. In order to allow that, mark that field in the read_set.
+ */
+ bool register_field_in_read_map(void *arg)
+ {
+ TABLE *table= (TABLE*) arg;
+ if (result_field && (result_field->table == table || !table))
+ {
+ bitmap_set_bit(result_field->table->read_set, result_field->field_index);
+ }
+ return 0;
+ }
+
+ bool is_frame_prohibited() const
+ {
+ switch (window_func()->sum_func()) {
+ case Item_sum::ROW_NUMBER_FUNC:
+ case Item_sum::RANK_FUNC:
+ case Item_sum::DENSE_RANK_FUNC:
+ case Item_sum::PERCENT_RANK_FUNC:
+ case Item_sum::CUME_DIST_FUNC:
+ case Item_sum::NTILE_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool requires_special_cursors() const
+ {
+ switch (window_func()->sum_func()) {
+ case Item_sum::FIRST_VALUE_FUNC:
+ case Item_sum::LAST_VALUE_FUNC:
+ case Item_sum::NTH_VALUE_FUNC:
+ case Item_sum::LAG_FUNC:
+ case Item_sum::LEAD_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool requires_partition_size() const
+ {
+ switch (window_func()->sum_func()) {
+ case Item_sum::PERCENT_RANK_FUNC:
+ case Item_sum::CUME_DIST_FUNC:
+ case Item_sum::NTILE_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool requires_peer_size() const
+ {
+ switch (window_func()->sum_func()) {
+ case Item_sum::CUME_DIST_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool is_order_list_mandatory() const
+ {
+ switch (window_func()->sum_func()) {
+ case Item_sum::RANK_FUNC:
+ case Item_sum::DENSE_RANK_FUNC:
+ case Item_sum::PERCENT_RANK_FUNC:
+ case Item_sum::CUME_DIST_FUNC:
+ case Item_sum::LAG_FUNC:
+ case Item_sum::LEAD_FUNC:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /*
+ Computation functions.
+ TODO: consoder merging these with class Group_bound_tracker.
+ */
+ void setup_partition_border_check(THD *thd);
+
+ enum_field_types field_type() const
+ {
+ return ((Item_sum *) args[0])->field_type();
+ }
+ enum Item::Type type() const { return Item::WINDOW_FUNC_ITEM; }
+
+private:
+ /*
+ Window functions are very special functions, so val_() methods have
+ special meaning for them:
+
+ - Phase#1, "Initial" we run the join and put its result into temporary
+ table. For window functions, we write the default value (NULL?) as
+ a placeholder.
+
+ - Phase#2: "Computation": executor does the scan in {PARTITION, ORDER BY}
+ order of this window function. It calls appropriate methods to inform
+ the window function about rows entering/leaving the window.
+ It calls window_func()->val_int() so that current window function value
+ can be saved and stored in the temp.table.
+
+ - Phase#3: "Retrieval" the temporary table is read and passed to query
+ output. However, Item_window_func still remains in the select list,
+ so item_windowfunc->val_int() will be called.
+ During Phase#3, read_value_from_result_field= true.
+ */
+ bool force_return_blank;
+ bool read_value_from_result_field;
+
+public:
+ void set_phase_to_initial()
+ {
+ force_return_blank= true;
+ read_value_from_result_field= false;
+ }
+ void set_phase_to_computation()
+ {
+ force_return_blank= false;
+ read_value_from_result_field= false;
+ }
+ void set_phase_to_retrieval()
+ {
+ force_return_blank= false;
+ read_value_from_result_field= true;
+ }
+
+ bool is_null()
+ {
+ if (force_return_blank)
+ return true;
+
+ if (read_value_from_result_field)
+ return result_field->is_null();
+
+ return window_func()->is_null();
+ }
+
+ double val_real()
+ {
+ double res;
+ if (force_return_blank)
+ {
+ res= 0.0;
+ null_value= true;
+ }
+ else if (read_value_from_result_field)
+ {
+ res= result_field->val_real();
+ null_value= result_field->is_null();
+ }
+ else
+ {
+ res= window_func()->val_real();
+ null_value= window_func()->null_value;
+ }
+ return res;
+ }
+
+ longlong val_int()
+ {
+ longlong res;
+ if (force_return_blank)
+ {
+ res= 0;
+ null_value= true;
+ }
+ else if (read_value_from_result_field)
+ {
+ res= result_field->val_int();
+ null_value= result_field->is_null();
+ }
+ else
+ {
+ res= window_func()->val_int();
+ null_value= window_func()->null_value;
+ }
+ return res;
+ }
+
+ String* val_str(String* str)
+ {
+ String *res;
+ if (force_return_blank)
+ {
+ null_value= true;
+ res= NULL;
+ }
+ else if (read_value_from_result_field)
+ {
+ if ((null_value= result_field->is_null()))
+ res= NULL;
+ else
+ res= result_field->val_str(str);
+ }
+ else
+ {
+ res= window_func()->val_str(str);
+ null_value= window_func()->null_value;
+ }
+ return res;
+ }
+
+ my_decimal* val_decimal(my_decimal* dec)
+ {
+ my_decimal *res;
+ if (force_return_blank)
+ {
+ null_value= true;
+ res= NULL;
+ }
+ else if (read_value_from_result_field)
+ {
+ if ((null_value= result_field->is_null()))
+ res= NULL;
+ else
+ res= result_field->val_decimal(dec);
+ }
+ else
+ {
+ res= window_func()->val_decimal(dec);
+ null_value= window_func()->null_value;
+ }
+ return res;
+ }
+
+ void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
+ List<Item> &fields, uint flags);
+
+ bool fix_length_and_dec()
+ {
+ decimals = window_func()->decimals;
+ return FALSE;
+ }
+
+ const char* func_name() const { return "WF"; }
+
+ bool fix_fields(THD *thd, Item **ref);
+
+ bool resolve_window_name(THD *thd);
+
+ void print(String *str, enum_query_type query_type);
+
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
+
+};
+
+#endif /* ITEM_WINDOWFUNC_INCLUDED */
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index c9e6df52de9..738047fea9b 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -14,10 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifdef __GNUC__
-#pragma implementation
-#endif
-
#include <my_global.h>
#include "sql_priv.h"
/*
@@ -180,7 +176,7 @@ public:
{
nodebeg= (MY_XML_NODE*) pxml->ptr();
nodeend= (MY_XML_NODE*) (pxml->ptr() + pxml->length());
- numnodes= nodeend - nodebeg;
+ numnodes= (uint)(nodeend - nodebeg);
}
void prepare(String *nodeset)
{
@@ -226,18 +222,19 @@ public:
return str;
}
enum Item_result result_type () const { return STRING_RESULT; }
- void fix_length_and_dec()
+ bool fix_length_and_dec()
{
max_length= MAX_BLOB_WIDTH;
collation.collation= pxml->charset();
// To avoid premature evaluation, mark all nodeset functions as non-const.
used_tables_cache= RAND_TABLE_BIT;
const_item_cache= false;
+ return FALSE;
}
const char *func_name() const { return "nodeset"; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), arg, VCOL_IMPOSSIBLE);
}
};
@@ -251,6 +248,8 @@ public:
Item_nodeset_func(thd, pxml) {}
const char *func_name() const { return "xpath_rootelement"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_rootelement>(thd, mem_root, this); }
};
@@ -262,6 +261,8 @@ public:
Item_nodeset_func(thd, a, b, pxml) {}
const char *func_name() const { return "xpath_union"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_union>(thd, mem_root, this); }
};
@@ -294,6 +295,8 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_selfbyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_selfbyname>(thd, mem_root, this); }
};
@@ -306,6 +309,8 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_childbyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_childbyname>(thd, mem_root, this); }
};
@@ -320,6 +325,8 @@ public:
need_self(need_self_arg) {}
const char *func_name() const { return "xpath_descendantbyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_descendantbyname>(thd, mem_root, this); }
};
@@ -334,6 +341,8 @@ public:
need_self(need_self_arg) {}
const char *func_name() const { return "xpath_ancestorbyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_ancestorbyname>(thd, mem_root, this); }
};
@@ -346,6 +355,8 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_parentbyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_parentbyname>(thd, mem_root, this); }
};
@@ -358,6 +369,8 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_attributebyname"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_attributebyname>(thd, mem_root, this); }
};
@@ -373,6 +386,8 @@ public:
Item_nodeset_func(thd, a, b, pxml) {}
const char *func_name() const { return "xpath_predicate"; }
String *val_nodeset(String *nodeset);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_predicate>(thd, mem_root, this); }
};
@@ -384,19 +399,8 @@ public:
Item_nodeset_func(thd, a, b, pxml) { }
const char *func_name() const { return "xpath_elementbyindex"; }
String *val_nodeset(String *nodeset);
-};
-
-
-/*
- We need to distinguish a number from a boolean:
- a[1] and a[true] are different things in XPath.
-*/
-class Item_bool :public Item_int
-{
-public:
- Item_bool(THD *thd, int32 i): Item_int(thd, i) {}
- const char *func_name() const { return "xpath_bool"; }
- bool is_bool_type() { return true; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_func_elementbyindex>(thd, mem_root, this); }
};
@@ -423,6 +427,8 @@ public:
}
return args[0]->val_real() ? 1 : 0;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_xpath_cast_bool>(thd, mem_root, this); }
};
@@ -435,6 +441,8 @@ public:
Item_xpath_cast_number(THD *thd, Item *a): Item_real_func(thd, a) {}
const char *func_name() const { return "xpath_cast_number"; }
virtual double val_real() { return args[0]->val_real(); }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_xpath_cast_number>(thd, mem_root, this); }
};
@@ -449,7 +457,9 @@ public:
Item_nodeset_func(thd, pxml), string_cache(str_arg) { }
String *val_nodeset(String *res)
{ return string_cache; }
- void fix_length_and_dec() { max_length= MAX_BLOB_WIDTH; }
+ bool fix_length_and_dec() { max_length= MAX_BLOB_WIDTH; return FALSE; }
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_context_cache>(thd, mem_root, this); }
};
@@ -461,7 +471,7 @@ public:
Item_func_xpath_position(THD *thd, Item *a, String *p):
Item_int_func(thd, a), pxml(p) {}
const char *func_name() const { return "xpath_position"; }
- void fix_length_and_dec() { max_length=10; }
+ bool fix_length_and_dec() { max_length=10; return FALSE; }
longlong val_int()
{
String *flt= args[0]->val_nodeset(&tmp_value);
@@ -469,6 +479,8 @@ public:
return ((MY_XPATH_FLT*)flt->ptr())->pos + 1;
return 0;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xpath_position>(thd, mem_root, this); }
};
@@ -480,7 +492,7 @@ public:
Item_func_xpath_count(THD *thd, Item *a, String *p):
Item_int_func(thd, a), pxml(p) {}
const char *func_name() const { return "xpath_count"; }
- void fix_length_and_dec() { max_length=10; }
+ bool fix_length_and_dec() { max_length=10; return FALSE; }
longlong val_int()
{
uint predicate_supplied_context_size;
@@ -490,6 +502,8 @@ public:
return predicate_supplied_context_size;
return res->length() / sizeof(MY_XPATH_FLT);
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xpath_count>(thd, mem_root, this); }
};
@@ -533,6 +547,8 @@ public:
}
return sum;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xpath_sum>(thd, mem_root, this); }
};
@@ -573,9 +589,9 @@ public:
Item_bool_func(thd, nodeset, cmpfunc), pxml(p) {}
enum Type type() const { return XPATH_NODESET_CMP; };
const char *func_name() const { return "xpath_nodeset_to_const_comparator"; }
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor(func_name());
+ return mark_unsupported_function(func_name(), arg, VCOL_IMPOSSIBLE);
}
longlong val_int()
@@ -600,7 +616,7 @@ public:
if ((node->parent == flt->num) &&
(node->type == MY_XML_NODE_TEXT))
{
- fake->set_value(node->beg, node->end - node->beg,
+ fake->set_value(node->beg, (uint)(node->end - node->beg),
collation.collation);
if (args[1]->val_int())
return 1;
@@ -609,6 +625,8 @@ public:
}
return 0;
}
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_nodeset_to_const_comparator>(thd, mem_root, this); }
};
@@ -800,7 +818,7 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
Item_func *comp_func= (Item_func*)args[1];
uint pos= 0, size;
prepare(str);
- size= fltend - fltbeg;
+ size= (uint)(fltend - fltbeg);
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
{
nodeset_func->context_cache.length(0);
@@ -819,7 +837,7 @@ String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
prepare(nodeset);
MY_XPATH_FLT *flt;
- uint pos, size= fltend - fltbeg;
+ uint pos, size= (uint)(fltend - fltbeg);
for (pos= 0, flt= fltbeg; flt < fltend; flt++)
{
nodeset_func->context_cache.length(0);
@@ -978,7 +996,7 @@ static Item *create_comparator(MY_XPATH *xpath,
else if (a->type() == Item::XPATH_NODESET &&
b->type() == Item::XPATH_NODESET)
{
- uint len= xpath->query.end - context->beg;
+ uint len= (uint)(xpath->query.end - context->beg);
set_if_smaller(len, 32);
my_printf_error(ER_UNKNOWN_ERROR,
"XPATH error: "
@@ -1181,13 +1199,13 @@ my_xpath_keyword(MY_XPATH *x,
static Item *create_func_true(MY_XPATH *xpath, Item **args, uint nargs)
{
- return new (xpath->thd->mem_root) Item_bool(xpath->thd, 1);
+ return new (xpath->thd->mem_root) Item_bool(xpath->thd, "xpath_bool", 1);
}
static Item *create_func_false(MY_XPATH *xpath, Item **args, uint nargs)
{
- return new (xpath->thd->mem_root) Item_bool(xpath->thd, 0);
+ return new (xpath->thd->mem_root) Item_bool(xpath->thd, "xpath_bool", 0);
}
@@ -1382,7 +1400,7 @@ MY_XPATH_FUNC *
my_xpath_function(const char *beg, const char *end)
{
MY_XPATH_FUNC *k, *function_names;
- uint length= end-beg;
+ uint length= (uint)(end-beg);
switch (length)
{
case 1: return 0;
@@ -1944,7 +1962,7 @@ static int my_xpath_parse_PrimaryExpr_literal(MY_XPATH *xpath)
return 0;
xpath->item= new (xpath->thd->mem_root)
Item_string(xpath->thd, xpath->prevtok.beg + 1,
- xpath->prevtok.end - xpath->prevtok.beg - 2,
+ (uint)(xpath->prevtok.end - xpath->prevtok.beg - 2),
xpath->cs);
return 1;
}
@@ -2482,13 +2500,13 @@ static int my_xpath_parse_Number(MY_XPATH *xpath)
if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_DOT))
{
xpath->item= new (thd->mem_root) Item_int(thd, xpath->prevtok.beg,
- xpath->prevtok.end - xpath->prevtok.beg);
+ (uint)(xpath->prevtok.end - xpath->prevtok.beg));
return 1;
}
my_xpath_parse_term(xpath, MY_XPATH_LEX_DIGITS);
xpath->item= new (thd->mem_root) Item_float(thd, beg,
- xpath->prevtok.end - beg);
+ (uint)(xpath->prevtok.end - beg));
return 1;
}
@@ -2604,8 +2622,7 @@ my_xpath_parse_VariableReference(MY_XPATH *xpath)
(spv= spc->find_variable(name, false)))
{
Item_splocal *splocal= new (thd->mem_root)
- Item_splocal(thd, name, spv->offset,
- spv->type, 0);
+ Item_splocal(thd, name, spv->offset, spv->sql_type(), 0);
#ifndef DBUG_OFF
if (splocal)
splocal->m_sp= lex->sphead;
@@ -2616,7 +2633,7 @@ my_xpath_parse_VariableReference(MY_XPATH *xpath)
{
xpath->item= NULL;
DBUG_ASSERT(xpath->query.end > dollar_pos);
- uint len= xpath->query.end - dollar_pos;
+ uint len= (uint)(xpath->query.end - dollar_pos);
set_if_smaller(len, 32);
my_printf_error(ER_UNKNOWN_ERROR, "Unknown XPATH variable at: '%.*s'",
MYF(0), len, dollar_pos);
@@ -2644,7 +2661,7 @@ my_xpath_parse_NodeTest_QName(MY_XPATH *xpath)
if (!my_xpath_parse_QName(xpath))
return 0;
DBUG_ASSERT(xpath->context);
- uint len= xpath->prevtok.end - xpath->prevtok.beg;
+ uint len= (uint)(xpath->prevtok.end - xpath->prevtok.beg);
xpath->context= nametestfunc(xpath, xpath->axis, xpath->context,
xpath->prevtok.beg, len);
return 1;
@@ -2693,10 +2710,10 @@ my_xpath_parse(MY_XPATH *xpath, const char *str, const char *strend)
}
-void Item_xml_str_func::fix_length_and_dec()
+bool Item_xml_str_func::fix_length_and_dec()
{
max_length= MAX_BLOB_WIDTH;
- agg_arg_charsets_for_comparison(collation, args, arg_count);
+ return agg_arg_charsets_for_comparison(collation, args, arg_count);
}
@@ -2749,7 +2766,7 @@ bool Item_xml_str_func::fix_fields(THD *thd, Item **ref)
if (!rc)
{
- uint clen= xpath.query.end - xpath.lasttok.beg;
+ uint clen= (uint)(xpath.query.end - xpath.lasttok.beg);
set_if_smaller(clen, 32);
my_printf_error(ER_UNKNOWN_ERROR, "XPATH syntax error: '%.*s'",
MYF(0), clen, xpath.lasttok.beg);
diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h
index 92a8f757822..c46365ee5f0 100644
--- a/sql/item_xmlfunc.h
+++ b/sql/item_xmlfunc.h
@@ -21,11 +21,6 @@
/* This file defines all XML functions */
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-
typedef struct my_xml_node_st MY_XML_NODE;
@@ -87,15 +82,11 @@ public:
maybe_null= TRUE;
}
bool fix_fields(THD *thd, Item **ref);
- void fix_length_and_dec();
+ bool fix_length_and_dec();
bool const_item() const
{
return const_item_cache && (!nodeset_func || nodeset_func->const_item());
}
- bool check_vcol_func_processor(uchar *int_arg)
- {
- return trace_unsupported_by_check_vcol_func_processor(func_name());
- }
};
@@ -106,6 +97,8 @@ public:
Item_xml_str_func(thd, a, b) {}
const char *func_name() const { return "extractvalue"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xml_extractvalue>(thd, mem_root, this); }
};
@@ -120,6 +113,8 @@ public:
Item_xml_str_func(thd, a, b, c) {}
const char *func_name() const { return "updatexml"; }
String *val_str(String *);
+ Item *get_copy(THD *thd, MEM_ROOT *mem_root)
+ { return get_item_copy<Item_func_xml_update>(thd, mem_root, this); }
};
#endif /* ITEM_XMLFUNC_INCLUDED */
diff --git a/sql/key.cc b/sql/key.cc
index 19b96522b2c..116f2954d9e 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -1,4 +1,5 @@
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2018, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -21,9 +22,6 @@
#include "key.h" // key_rec_cmp
#include "field.h" // Field
-using std::min;
-using std::max;
-
/*
Search after a key that starts with 'field'
@@ -54,8 +52,8 @@ using std::max;
int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field,
uint *key_length, uint *keypart)
{
- reg2 int i;
- reg3 KEY *key_info;
+ int i;
+ KEY *key_info;
uint fieldpos;
fieldpos= field->offset(record);
@@ -137,7 +135,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
Don't copy data for null values
The -1 below is to subtract the null byte which is already handled
*/
- length= min<uint>(key_length, key_part->store_length-1);
+ length= MY_MIN(key_length, uint(key_part->store_length)-1);
if (with_zerofill)
bzero((char*) to_key, length);
continue;
@@ -147,7 +145,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
key_length-= HA_KEY_BLOB_LENGTH;
- length= min<uint>(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
uint bytes= key_part->field->get_key_image(to_key, length,
key_info->flags & HA_SPATIAL ? Field::itMBR : Field::itRAW);
if (with_zerofill && bytes < length)
@@ -156,7 +154,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
}
else
{
- length= min<uint>(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
Field *field= key_part->field;
CHARSET_INFO *cs= field->charset();
uint bytes= field->get_key_image(to_key, length, Field::itRAW);
@@ -179,7 +177,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
@param key_length specifies length of all keyparts that will be restored
*/
-void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
+void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
uint key_length)
{
uint length;
@@ -208,7 +206,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
Don't copy data for null bytes
The -1 below is to subtract the null byte which is already handled
*/
- length= min<uint>(key_length, key_part->store_length-1);
+ length= MY_MIN(key_length, uint(key_part->store_length)-1);
continue;
}
}
@@ -250,7 +248,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
my_ptrdiff_t ptrdiff= to_record - field->table->record[0];
field->move_field_offset(ptrdiff);
key_length-= HA_KEY_BLOB_LENGTH;
- length= min<uint>(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set);
field->set_key_image(from_key, length);
dbug_tmp_restore_column_map(field->table->write_set, old_map);
@@ -259,7 +257,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
}
else
{
- length= min<uint>(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
/* skip the byte with 'uneven' bits, if used */
memcpy(to_record + key_part->offset, from_key + used_uneven_bits
, (size_t) length - used_uneven_bits);
@@ -317,7 +315,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
return 1;
continue;
}
- length= min((uint) (key_end-key), store_length);
+ length= MY_MIN((uint) (key_end-key), store_length);
if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
FIELDFLAG_PACK)))
{
@@ -331,7 +329,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
}
if (cs->coll->strnncollsp(cs,
(const uchar*) key, length,
- (const uchar*) pos, char_length, 0))
+ (const uchar*) pos, char_length))
return 1;
continue;
}
@@ -377,7 +375,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
{
const char *tmp_end= tmp.ptr() + tmp.length();
while (tmp_end > tmp.ptr() && !*--tmp_end) ;
- tmp.length(tmp_end - tmp.ptr() + 1);
+ tmp.length((uint32)(tmp_end - tmp.ptr() + 1));
}
if (cs->mbmaxlen > 1 && prefix_key)
{
@@ -395,7 +393,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
tmp.length(charpos);
}
if (max_length < field->pack_length())
- tmp.length(min(tmp.length(),max_length));
+ tmp.length(MY_MIN(tmp.length(),max_length));
ErrConvString err(&tmp);
to->append(err.ptr());
}
@@ -468,19 +466,8 @@ void key_unpack(String *to, TABLE *table, KEY *key)
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
{
- bitmap_clear_all(&table->tmp_set);
- table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set);
- if (bitmap_is_overlapping(&table->tmp_set, fields))
- return 1;
-
- /*
- If table handler has primary key as part of the index, check that primary
- key is not updated
- */
- if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY &&
- (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
- return is_key_used(table, table->s->primary_key, fields);
- return 0;
+ table->mark_columns_used_by_index(idx, &table->tmp_set);
+ return bitmap_is_overlapping(&table->tmp_set, fields);
}
@@ -511,7 +498,7 @@ int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length)
if (key_part->null_bit)
{
/* This key part allows null values; NULL is lower than everything */
- register bool field_is_null= key_part->field->is_null();
+ bool field_is_null= key_part->field->is_null();
if (*key) // If range key is null
{
/* the range is expecting a null value */
@@ -894,8 +881,7 @@ bool key_buf_cmp(KEY *key_info, uint used_key_parts,
if (length1 != length2 ||
cs->coll->strnncollsp(cs,
pos1 + pack_length, byte_len1,
- pos2 + pack_length, byte_len2,
- 1))
+ pos2 + pack_length, byte_len2))
return TRUE;
key1+= pack_length; key2+= pack_length;
}
diff --git a/sql/key.h b/sql/key.h
index 47b981f5298..f2521e4a665 100644
--- a/sql/key.h
+++ b/sql/key.h
@@ -29,7 +29,7 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field,
uint *key_length, uint *keypart);
void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, uint key_length,
bool with_zerofill= FALSE);
-void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
+void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
uint key_length);
bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length);
void key_unpack(String *to, TABLE *table, KEY *key);
diff --git a/sql/lex.h b/sql/lex.h
index 87c87d03fb3..cd42672b6eb 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -219,6 +219,7 @@ static SYMBOL symbols[] = {
{ "EVERY", SYM(EVERY_SYM)},
{ "EXAMINED", SYM(EXAMINED_SYM)},
{ "EXCHANGE", SYM(EXCHANGE_SYM)},
+ { "EXCLUDE", SYM(EXCLUDE_SYM)},
{ "EXECUTE", SYM(EXECUTE_SYM)},
{ "EXISTS", SYM(EXISTS)},
{ "EXIT", SYM(EXIT_SYM)},
@@ -239,6 +240,8 @@ static SYMBOL symbols[] = {
{ "FLOAT4", SYM(FLOAT_SYM)},
{ "FLOAT8", SYM(DOUBLE_SYM)},
{ "FLUSH", SYM(FLUSH_SYM)},
+ { "FOLLOWING", SYM(FOLLOWING_SYM)},
+ { "FOLLOWS", SYM(FOLLOWS_SYM)},
{ "FOR", SYM(FOR_SYM)},
{ "FORCE", SYM(FORCE_SYM)},
{ "FOREIGN", SYM(FOREIGN)},
@@ -276,6 +279,7 @@ static SYMBOL symbols[] = {
{ "IGNORE", SYM(IGNORE_SYM)},
{ "IGNORE_DOMAIN_IDS", SYM(IGNORE_DOMAIN_IDS_SYM)},
{ "IGNORE_SERVER_IDS", SYM(IGNORE_SERVER_IDS_SYM)},
+ { "IMMEDIATE", SYM(IMMEDIATE_SYM)},
{ "IMPORT", SYM(IMPORT)},
{ "IN", SYM(IN_SYM)},
{ "INDEX", SYM(INDEX_SYM)},
@@ -306,6 +310,7 @@ static SYMBOL symbols[] = {
{ "ITERATE", SYM(ITERATE_SYM)},
{ "INVOKER", SYM(INVOKER_SYM)},
{ "JOIN", SYM(JOIN_SYM)},
+ { "JSON", SYM(JSON_SYM)},
{ "KEY", SYM(KEY_SYM)},
{ "KEYS", SYM(KEYS)},
{ "KEY_BLOCK_SIZE", SYM(KEY_BLOCK_SIZE)},
@@ -340,6 +345,7 @@ static SYMBOL symbols[] = {
{ "LOW_PRIORITY", SYM(LOW_PRIORITY)},
{ "MASTER", SYM(MASTER_SYM)},
{ "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM)},
+ { "MASTER_DELAY", SYM(MASTER_DELAY_SYM)},
{ "MASTER_GTID_POS", SYM(MASTER_GTID_POS_SYM)},
{ "MASTER_HOST", SYM(MASTER_HOST_SYM)},
{ "MASTER_LOG_FILE", SYM(MASTER_LOG_FILE_SYM)},
@@ -423,9 +429,11 @@ static SYMBOL symbols[] = {
{ "OPTIONALLY", SYM(OPTIONALLY)},
{ "OR", SYM(OR_SYM)},
{ "ORDER", SYM(ORDER_SYM)},
+ { "OTHERS", SYM(OTHERS_SYM)},
{ "OUT", SYM(OUT_SYM)},
{ "OUTER", SYM(OUTER)},
{ "OUTFILE", SYM(OUTFILE)},
+ { "OVER", SYM(OVER_SYM)},
{ "OWNER", SYM(OWNER_SYM)},
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
{ "PAGE", SYM(PAGE_SYM)},
@@ -444,6 +452,8 @@ static SYMBOL symbols[] = {
{ "POINT", SYM(POINT_SYM)},
{ "POLYGON", SYM(POLYGON)},
{ "PORT", SYM(PORT_SYM)},
+ { "PRECEDES", SYM(PRECEDES_SYM)},
+ { "PRECEDING", SYM(PRECEDING_SYM)},
{ "PRECISION", SYM(PRECISION)},
{ "PREPARE", SYM(PREPARE_SYM)},
{ "PRESERVE", SYM(PRESERVE_SYM)},
@@ -468,6 +478,7 @@ static SYMBOL symbols[] = {
{ "REAL", SYM(REAL)},
{ "REBUILD", SYM(REBUILD_SYM)},
{ "RECOVER", SYM(RECOVER_SYM)},
+ { "RECURSIVE", SYM(RECURSIVE_SYM)},
{ "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)},
{ "REDOFILE", SYM(REDOFILE_SYM)},
{ "REDUNDANT", SYM(REDUNDANT_SYM)},
@@ -507,8 +518,8 @@ static SYMBOL symbols[] = {
{ "ROLLUP", SYM(ROLLUP_SYM)},
{ "ROUTINE", SYM(ROUTINE_SYM)},
{ "ROW", SYM(ROW_SYM)},
- { "ROW_COUNT", SYM(ROW_COUNT_SYM)},
{ "ROWS", SYM(ROWS_SYM)},
+ { "ROW_COUNT", SYM(ROW_COUNT_SYM)},
{ "ROW_FORMAT", SYM(ROW_FORMAT_SYM)},
{ "RTREE", SYM(RTREE_SYM)},
{ "SAVEPOINT", SYM(SAVEPOINT_SYM)},
@@ -544,7 +555,8 @@ static SYMBOL symbols[] = {
{ "SOME", SYM(ANY_SYM)},
{ "SONAME", SYM(SONAME_SYM)},
{ "SOUNDS", SYM(SOUNDS_SYM)},
- { "SOURCE", SYM(SOURCE_SYM)},
+ { "SOURCE", SYM(SOURCE_SYM)},
+ { "STORED", SYM(STORED_SYM)},
{ "SPATIAL", SYM(SPATIAL_SYM)},
{ "SPECIFIC", SYM(SPECIFIC_SYM)},
{ "REF_SYSTEM_ID", SYM(REF_SYSTEM_ID_SYM)},
@@ -599,6 +611,7 @@ static SYMBOL symbols[] = {
{ "TEXT", SYM(TEXT_SYM)},
{ "THAN", SYM(THAN_SYM)},
{ "THEN", SYM(THEN_SYM)},
+ { "TIES", SYM(TIES_SYM)},
{ "TIME", SYM(TIME_SYM)},
{ "TIMESTAMP", SYM(TIMESTAMP)},
{ "TIMESTAMPADD", SYM(TIMESTAMP_ADD)},
@@ -616,6 +629,7 @@ static SYMBOL symbols[] = {
{ "TRUNCATE", SYM(TRUNCATE_SYM)},
{ "TYPE", SYM(TYPE_SYM)},
{ "TYPES", SYM(TYPES_SYM)},
+ { "UNBOUNDED", SYM(UNBOUNDED_SYM)},
{ "UNCOMMITTED", SYM(UNCOMMITTED_SYM)},
{ "UNDEFINED", SYM(UNDEFINED_SYM)},
{ "UNDO_BUFFER_SIZE", SYM(UNDO_BUFFER_SIZE_SYM)},
@@ -657,6 +671,7 @@ static SYMBOL symbols[] = {
{ "WHEN", SYM(WHEN_SYM)},
{ "WHERE", SYM(WHERE)},
{ "WHILE", SYM(WHILE_SYM)},
+ { "WINDOW", SYM(WINDOW_SYM)},
{ "WITH", SYM(WITH)},
{ "WORK", SYM(WORK_SYM)},
{ "WRAPPER", SYM(WRAPPER_SYM)},
@@ -679,17 +694,27 @@ static SYMBOL sql_functions[] = {
{ "BIT_XOR", SYM(BIT_XOR)},
{ "CAST", SYM(CAST_SYM)},
{ "COUNT", SYM(COUNT_SYM)},
+ { "CUME_DIST", SYM(CUME_DIST_SYM)},
{ "CURDATE", SYM(CURDATE)},
{ "CURTIME", SYM(CURTIME)},
{ "DATE_ADD", SYM(DATE_ADD_INTERVAL)},
{ "DATE_SUB", SYM(DATE_SUB_INTERVAL)},
+ { "DENSE_RANK", SYM(DENSE_RANK_SYM)},
{ "EXTRACT", SYM(EXTRACT_SYM)},
+ { "FIRST_VALUE", SYM(FIRST_VALUE_SYM)},
{ "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)},
+ { "LAG", SYM(LAG_SYM)},
+ { "LEAD", SYM(LEAD_SYM)},
{ "MAX", SYM(MAX_SYM)},
{ "MID", SYM(SUBSTRING)}, /* unireg function */
{ "MIN", SYM(MIN_SYM)},
{ "NOW", SYM(NOW_SYM)},
+ { "NTH_VALUE", SYM(NTH_VALUE_SYM)},
+ { "NTILE", SYM(NTILE_SYM)},
{ "POSITION", SYM(POSITION_SYM)},
+ { "PERCENT_RANK", SYM(PERCENT_RANK_SYM)},
+ { "RANK", SYM(RANK_SYM)},
+ { "ROW_NUMBER", SYM(ROW_NUMBER_SYM)},
{ "SESSION_USER", SYM(USER_SYM)},
{ "STD", SYM(STD_SYM)},
{ "STDDEV", SYM(STD_SYM)},
diff --git a/sql/lock.cc b/sql/lock.cc
index f3445e3b38a..aba22ebb73d 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -90,6 +90,7 @@ extern HASH open_cache;
static int lock_external(THD *thd, TABLE **table,uint count);
static int unlock_external(THD *thd, TABLE **table,uint count);
+
/* Map the return value of thr_lock to an error from errmsg.txt */
static int thr_lock_errno_to_mysql[]=
{ 0, ER_LOCK_ABORTED, ER_LOCK_WAIT_TIMEOUT, ER_LOCK_DEADLOCK };
@@ -163,18 +164,12 @@ lock_tables_check(THD *thd, TABLE **tables, uint count, uint flags)
write we must own metadata lock of MDL_SHARED_WRITE or stronger
type. For table to be locked for read we must own metadata lock
of MDL_SHARED_READ or stronger type).
- The only exception are HANDLER statements which are allowed to
- lock table for read while having only MDL_SHARED lock on it.
*/
DBUG_ASSERT(t->s->tmp_table ||
thd->mdl_context.is_lock_owner(MDL_key::TABLE,
t->s->db.str, t->s->table_name.str,
t->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE ?
- MDL_SHARED_WRITE : MDL_SHARED_READ) ||
- (t->open_by_handler &&
- thd->mdl_context.is_lock_owner(MDL_key::TABLE,
- t->s->db.str, t->s->table_name.str,
- MDL_SHARED)));
+ MDL_SHARED_WRITE : MDL_SHARED_READ));
/*
Prevent modifications to base tables if READ_ONLY is activated.
@@ -244,6 +239,39 @@ void reset_lock_data(MYSQL_LOCK *sql_lock, bool unlock)
/**
+ Scan array of tables for access types; update transaction tracker
+ accordingly.
+
+ @param thd The current thread.
+ @param tables An array of pointers to the tables to lock.
+ @param count The number of tables to lock.
+*/
+
+#ifndef EMBEDDED_LIBRARY
+static void track_table_access(THD *thd, TABLE **tables, size_t count)
+{
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ {
+ Transaction_state_tracker *tst= (Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER);
+
+ while (count--)
+ {
+ TABLE *t= tables[count];
+
+ if (t)
+ tst->add_trx_state(thd, t->reginfo.lock_type,
+ t->file->has_transactions());
+ }
+ }
+}
+#else
+#define track_table_access(A,B,C)
+#endif //EMBEDDED_LIBRARY
+
+
+
+/**
Lock tables.
@param thd The current thread.
@@ -280,6 +308,9 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags)
my_free(sql_lock);
sql_lock= 0;
}
+
+ track_table_access(thd, tables, count);
+
DBUG_RETURN(sql_lock);
}
@@ -347,7 +378,7 @@ end:
static int lock_external(THD *thd, TABLE **tables, uint count)
{
- reg1 uint i;
+ uint i;
int lock_type,error;
DBUG_ENTER("lock_external");
@@ -374,7 +405,6 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
}
else
{
- (*tables)->db_stat &= ~ HA_BLOCK_LOCK;
(*tables)->current_lock= lock_type;
}
}
@@ -500,7 +530,7 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table)
{
if (locked)
{
- reg1 uint i;
+ uint i;
for (i=0; i < locked->table_count; i++)
{
if (locked->table[i] == table)
@@ -778,7 +808,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
we may allocate too much, but better safe than memory overrun.
And in the FLUSH case, the memory is released quickly anyway.
*/
- sql_lock->lock_count= locks - locks_buf;
+ sql_lock->lock_count= (uint)(locks - locks_buf);
DBUG_ASSERT(sql_lock->lock_count <= lock_count);
DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d",
sql_lock->table_count, sql_lock->lock_count));
diff --git a/sql/log.cc b/sql/log.cc
index bab250223f3..2152fa4f11f 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -46,6 +46,8 @@
#include <stdarg.h>
#include <m_ctype.h> // For test_if_number
+#include <set_var.h> // for Sys_last_gtid_ptr
+
#ifdef _WIN32
#include "message.h"
#endif
@@ -139,6 +141,12 @@ static bool start_binlog_background_thread();
static rpl_binlog_state rpl_global_gtid_binlog_state;
+void setup_log_handling()
+{
+ rpl_global_gtid_binlog_state.init();
+}
+
+
/**
purge logs, master and slave sides both, related error code
convertor.
@@ -189,7 +197,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sql_state,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
const char *message() const { return m_message; }
@@ -199,7 +207,7 @@ bool
Silence_log_table_errors::handle_condition(THD *,
uint,
const char*,
- Sql_condition::enum_warning_level,
+ Sql_condition::enum_warning_level*,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -640,7 +648,7 @@ void Log_to_csv_event_handler::cleanup()
bool Log_to_csv_event_handler::
log_general(THD *thd, my_hrtime_t event_time, const char *user_host,
- uint user_host_len, int thread_id_arg,
+ uint user_host_len, my_thread_id thread_id_arg,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs)
@@ -1050,7 +1058,7 @@ bool Log_to_file_event_handler::
bool Log_to_file_event_handler::
log_general(THD *thd, my_hrtime_t event_time, const char *user_host,
- uint user_host_len, int thread_id_arg,
+ uint user_host_len, my_thread_id thread_id_arg,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs)
@@ -1191,23 +1199,6 @@ void LOGGER::init_log_tables()
}
-bool LOGGER::flush_logs(THD *thd)
-{
- /*
- Now we lock logger, as nobody should be able to use logging routines while
- log tables are closed
- */
- logger.lock_exclusive();
-
- /* reopen log files */
- file_log_handler->flush();
-
- /* end of log flush */
- logger.unlock();
- return 0;
-}
-
-
/**
Close and reopen the slow log (with locks).
@@ -1303,7 +1294,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
}
/* fill in user_host value: the format is "%s[%s] @ %s [%s]" */
- user_host_len= (strxnmov(user_host_buff, MAX_USER_HOST_SIZE,
+ user_host_len= (uint)(strxnmov(user_host_buff, MAX_USER_HOST_SIZE,
sctx->priv_user, "[",
sctx->user ? sctx->user : (thd->slave_thread ? "SQL_SLAVE" : ""), "] @ ",
sctx->host ? sctx->host : "", " [",
@@ -1677,14 +1668,14 @@ static int binlog_close_connection(handlerton *hton, THD *thd)
uchar *buf;
size_t len=0;
wsrep_write_cache_buf(cache, &buf, &len);
- WSREP_WARN("binlog trx cache not empty (%zu bytes) @ connection close %lu",
- len, thd->thread_id);
+ WSREP_WARN("binlog trx cache not empty (%zu bytes) @ connection close %lld",
+ len, (longlong) thd->thread_id);
if (len > 0) wsrep_dump_rbr_buf(thd, buf, len);
cache = cache_mngr->get_binlog_cache_log(false);
wsrep_write_cache_buf(cache, &buf, &len);
- WSREP_WARN("binlog stmt cache not empty (%zu bytes) @ connection close %lu",
- len, thd->thread_id);
+ WSREP_WARN("binlog stmt cache not empty (%zu bytes) @ connection close %lld",
+ len, (longlong) thd->thread_id);
if (len > 0) wsrep_dump_rbr_buf(thd, buf, len);
}
#endif /* WITH_WSREP */
@@ -2430,8 +2421,8 @@ static int find_uniq_filename(char *name, ulong next_log_number)
uint i;
char buff[FN_REFLEN], ext_buf[FN_REFLEN];
struct st_my_dir *dir_info;
- reg1 struct fileinfo *file_info;
- ulong max_found, next, number;
+ struct fileinfo *file_info;
+ ulong max_found, next, UNINIT_VAR(number);
size_t buf_length, length;
char *start, *end;
int error= 0;
@@ -2762,9 +2753,7 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name,
{
THD *thd= current_thd;
if (thd)
- my_printf_error(ER_NO_UNIQUE_LOGFILE,
- ER_THD(thd, ER_NO_UNIQUE_LOGFILE),
- MYF(ME_FATALERROR), log_name);
+ my_error(ER_NO_UNIQUE_LOGFILE, MYF(ME_FATALERROR), log_name);
sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name);
return 1;
}
@@ -2846,12 +2835,11 @@ void MYSQL_QUERY_LOG::reopen_file()
*/
bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host,
- uint user_host_len, int thread_id_arg,
+ uint user_host_len, my_thread_id thread_id_arg,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len)
{
char buff[32];
- uint length= 0;
char local_time_buff[MAX_TIME_SIZE];
struct tm start;
uint time_buff_len= 0;
@@ -2885,7 +2873,7 @@ bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host,
goto err;
/* command_type, thread_id */
- length= my_snprintf(buff, 32, "%5ld ", (long) thread_id_arg);
+ size_t length= my_snprintf(buff, 32, "%5llu ", thread_id_arg);
if (my_b_write(&log_file, (uchar*) buff, length))
goto err;
@@ -2962,7 +2950,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
int tmp_errno= 0;
char buff[80], *end;
char query_time_buff[22+7], lock_time_buff[22+7];
- uint buff_len;
+ size_t buff_len;
end= buff;
if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
@@ -3177,6 +3165,21 @@ MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
bzero((char*) &purge_index_file, sizeof(purge_index_file));
}
+void MYSQL_BIN_LOG::stop_background_thread()
+{
+ if (binlog_background_thread_started)
+ {
+ mysql_mutex_lock(&LOCK_binlog_background_thread);
+ binlog_background_thread_stop= true;
+ mysql_cond_signal(&COND_binlog_background_thread);
+ while (binlog_background_thread_stop)
+ mysql_cond_wait(&COND_binlog_background_thread_end,
+ &LOCK_binlog_background_thread);
+ mysql_mutex_unlock(&LOCK_binlog_background_thread);
+ binlog_background_thread_started= false;
+ }
+}
+
/* this is called only once */
void MYSQL_BIN_LOG::cleanup()
@@ -3187,17 +3190,8 @@ void MYSQL_BIN_LOG::cleanup()
xid_count_per_binlog *b;
/* Wait for the binlog background thread to stop. */
- if (!is_relay_log && binlog_background_thread_started)
- {
- mysql_mutex_lock(&LOCK_binlog_background_thread);
- binlog_background_thread_stop= true;
- mysql_cond_signal(&COND_binlog_background_thread);
- while (binlog_background_thread_stop)
- mysql_cond_wait(&COND_binlog_background_thread_end,
- &LOCK_binlog_background_thread);
- mysql_mutex_unlock(&LOCK_binlog_background_thread);
- binlog_background_thread_started= false;
- }
+ if (!is_relay_log)
+ stop_background_thread();
inited= 0;
mysql_mutex_lock(&LOCK_log);
@@ -3591,6 +3585,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
new_xid_list_entry->binlog_name= name_mem;
new_xid_list_entry->binlog_name_len= len;
new_xid_list_entry->xid_count= 0;
+ new_xid_list_entry->notify_count= 0;
/*
Find the name for the Initial binlog checkpoint.
@@ -4292,6 +4287,10 @@ void MYSQL_BIN_LOG::wait_for_last_checkpoint_event()
relay log.
IMPLEMENTATION
+
+ - You must hold rli->data_lock before calling this function, since
+ it writes group_relay_log_pos and similar fields of
+ Relay_log_info.
- Protects index file with LOCK_index
- Delete relevant relay log files
- Copy all file names after these ones to the front of the index file
@@ -4305,7 +4304,7 @@ void MYSQL_BIN_LOG::wait_for_last_checkpoint_event()
read by the SQL slave thread are deleted).
@note
- - This is only called from the slave-execute thread when it has read
+ - This is only called from the slave SQL thread when it has read
all commands from a relay log and want to switch to a new relay log.
- When this happens, we can be in an active transaction as
a transaction can span over two relay logs
@@ -4336,6 +4335,8 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
DBUG_ASSERT(rli->slave_running == MYSQL_SLAVE_RUN_NOT_CONNECT);
DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name));
+ mysql_mutex_assert_owner(&rli->data_lock);
+
mysql_mutex_lock(&LOCK_index);
ir= rli->inuse_relaylog_list;
@@ -4394,7 +4395,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
}
/* Store where we are in the new file for the execution thread */
- if (flush_relay_log_info(rli))
+ if (rli->flush())
error= LOG_INFO_IO;
DBUG_EXECUTE_IF("crash_before_purge_logs", DBUG_SUICIDE(););
@@ -4409,7 +4410,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
/*
* Need to update the log pos because purge logs has been called
- * after fetching initially the log pos at the begining of the method.
+ * after fetching initially the log pos at the beginning of the method.
*/
if ((errcode= find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)))
{
@@ -5196,9 +5197,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
/* handle reopening errors */
if (error)
{
- my_printf_error(ER_CANT_OPEN_FILE,
- ER_THD_OR_DEFAULT(current_thd, ER_CANT_OPEN_FILE),
- MYF(ME_FATALERROR), file_to_open, error);
+ my_error(ER_CANT_OPEN_FILE, MYF(ME_FATALERROR), file_to_open, error);
close_on_error= TRUE;
}
@@ -5306,7 +5305,7 @@ bool MYSQL_BIN_LOG::write_event_buffer(uchar* buf, uint len)
if (!ebuf)
goto err;
- crypto.set_iv(iv, my_b_append_tell(&log_file));
+ crypto.set_iv(iv, (uint32)my_b_append_tell(&log_file));
/*
we want to encrypt everything, excluding the event length:
@@ -5532,9 +5531,9 @@ binlog_cache_mngr *THD::binlog_setup_trx_data()
cache_mngr= (binlog_cache_mngr*) my_malloc(sizeof(binlog_cache_mngr), MYF(MY_ZEROFILL));
if (!cache_mngr ||
open_cached_file(&cache_mngr->stmt_cache.cache_log, mysql_tmpdir,
- LOG_PREFIX, binlog_stmt_cache_size, MYF(MY_WME)) ||
+ LOG_PREFIX, (size_t)binlog_stmt_cache_size, MYF(MY_WME)) ||
open_cached_file(&cache_mngr->trx_cache.cache_log, mysql_tmpdir,
- LOG_PREFIX, binlog_cache_size, MYF(MY_WME)))
+ LOG_PREFIX, (size_t)binlog_cache_size, MYF(MY_WME)))
{
my_free(cache_mngr);
DBUG_RETURN(0); // Didn't manage to set it up
@@ -5693,8 +5692,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional,
{
int error;
DBUG_ENTER("THD::binlog_write_table_map");
- DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)",
- (long) table, table->s->table_name.str,
+ DBUG_PRINT("enter", ("table: %p (%s: #%lu)",
+ table, table->s->table_name.str,
table->s->table_map_id));
/* Ensure that all events in a GTID group are in the same cache */
@@ -5845,7 +5844,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
{
DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
DBUG_ASSERT(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open());
- DBUG_PRINT("enter", ("event: 0x%lx", (long) event));
+ DBUG_PRINT("enter", ("event: %p", event));
int error= 0;
binlog_cache_mngr *const cache_mngr=
@@ -5856,7 +5855,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
binlog_cache_data *cache_data=
cache_mngr->get_binlog_cache_data(use_trans_cache(thd, is_transactional));
- DBUG_PRINT("info", ("cache_mngr->pending(): 0x%lx", (long) cache_data->pending()));
+ DBUG_PRINT("info", ("cache_mngr->pending(): %p", cache_data->pending()));
if (Rows_log_event* pending= cache_data->pending())
{
@@ -5948,7 +5947,8 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
}
if (err)
DBUG_RETURN(true);
- thd->last_commit_gtid= gtid;
+
+ thd->set_last_commit_gtid(gtid);
Gtid_log_event gtid_event(thd, seq_no, domain_id, standalone,
LOG_EVENT_SUPPRESS_USE_F, is_transactional,
@@ -6413,8 +6413,25 @@ err:
update_binlog_end_pos(offset);
signal_update();
+ /*
+ If a transaction with the LOAD DATA statement is divided
+ into logical mini-transactions (of the 10K rows) and binlog
+ is rotated, then the last portion of data may be lost due to
+ wsrep handler re-registration at the boundary of the split.
+ Since splitting of the LOAD DATA into mini-transactions is
+ logical, we should not allow these mini-transactions to fall
+ into separate binlogs. Therefore, it is necessary to prohibit
+ the rotation of binlog in the middle of processing LOAD DATA:
+ */
+#ifdef WITH_WSREP
+ if (!thd->wsrep_split_flag)
+ {
+#endif /* WITH_WSREP */
if ((error= rotate(false, &check_purge)))
check_purge= false;
+#ifdef WITH_WSREP
+ }
+#endif /* WITH_WSREP */
}
}
}
@@ -6837,7 +6854,6 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate,
DBUG_ENTER("MYSQL_BIN_LOG::rotate_and_purge");
bool check_purge= false;
- //todo: fix the macro def and restore safe_mutex_assert_not_owner(&LOCK_log);
mysql_mutex_lock(&LOCK_log);
prev_binlog_id= current_binlog_id;
@@ -7048,7 +7064,7 @@ int MYSQL_BIN_LOG::write_cache(THD *thd, IO_CACHE *cache)
int4store(ev + EVENT_LEN_OFFSET, ev_len + writer.checksum_len);
writer.remains= ev_len;
- if (writer.write(ev, std::min<uint>(ev_len, length - hdr_offs)))
+ if (writer.write(ev, MY_MIN(ev_len, length - hdr_offs)))
DBUG_RETURN(ER_ERROR_ON_WRITE);
/* next event header at ... */
@@ -7140,8 +7156,25 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
!(error= flush_and_sync(0)))
{
signal_update();
+ /*
+ If a transaction with the LOAD DATA statement is divided
+ into logical mini-transactions (of the 10K rows) and binlog
+ is rotated, then the last portion of data may be lost due to
+ wsrep handler re-registration at the boundary of the split.
+ Since splitting of the LOAD DATA into mini-transactions is
+ logical, we should not allow these mini-transactions to fall
+ into separate binlogs. Therefore, it is necessary to prohibit
+ the rotation of binlog in the middle of processing LOAD DATA:
+ */
+#ifdef WITH_WSREP
+ if (!thd->wsrep_split_flag)
+ {
+#endif /* WITH_WSREP */
if ((error= rotate(false, &check_purge)))
check_purge= false;
+#ifdef WITH_WSREP
+ }
+#endif /* WITH_WSREP */
}
offset= my_b_tell(&log_file);
@@ -7907,6 +7940,20 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
mark_xids_active(binlog_id, xid_count);
}
+ /*
+ If a transaction with the LOAD DATA statement is divided
+ into logical mini-transactions (of the 10K rows) and binlog
+ is rotated, then the last portion of data may be lost due to
+ wsrep handler re-registration at the boundary of the split.
+ Since splitting of the LOAD DATA into mini-transactions is
+ logical, we should not allow these mini-transactions to fall
+ into separate binlogs. Therefore, it is necessary to prohibit
+ the rotation of binlog in the middle of processing LOAD DATA:
+ */
+#ifdef WITH_WSREP
+ if (!leader->thd->wsrep_split_flag)
+ {
+#endif /* WITH_WSREP */
if (rotate(false, &check_purge))
{
/*
@@ -7926,6 +7973,9 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
my_error(ER_ERROR_ON_WRITE, MYF(ME_NOREFRESH), name, errno);
check_purge= false;
}
+#ifdef WITH_WSREP
+ }
+#endif /* WITH_WSREP */
/* In case of binlog rotate, update the correct current binlog offset. */
commit_offset= my_b_write_tell(&log_file);
}
@@ -8483,10 +8533,9 @@ void MYSQL_BIN_LOG::set_max_size(ulong max_size_arg)
0 String is not a number
*/
-static bool test_if_number(register const char *str,
- ulong *res, bool allow_wildcards)
+static bool test_if_number(const char *str, ulong *res, bool allow_wildcards)
{
- reg2 int flag;
+ int flag;
const char *start;
DBUG_ENTER("test_if_number");
@@ -8750,16 +8799,20 @@ void sql_print_information(const char *format, ...)
va_list args;
DBUG_ENTER("sql_print_information");
- if (disable_log_notes)
- DBUG_VOID_RETURN; // Skip notes during start/shutdown
-
va_start(args, format);
- error_log_print(INFORMATION_LEVEL, format, args);
+ sql_print_information_v(format, args);
va_end(args);
DBUG_VOID_RETURN;
}
+void sql_print_information_v(const char *format, va_list ap)
+{
+ if (disable_log_notes)
+ return; // Skip notes during start/shutdown
+
+ error_log_print(INFORMATION_LEVEL, format, ap);
+}
void
TC_LOG::run_prepare_ordered(THD *thd, bool all)
@@ -9766,9 +9819,20 @@ void
TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
{
xid_count_per_binlog *entry= static_cast<xid_count_per_binlog *>(cookie);
+ bool found_entry= false;
mysql_mutex_lock(&LOCK_binlog_background_thread);
- entry->next_in_queue= binlog_background_thread_queue;
- binlog_background_thread_queue= entry;
+ /* count the same notification kind from different engines */
+ for (xid_count_per_binlog *link= binlog_background_thread_queue;
+ link && !found_entry; link= link->next_in_queue)
+ {
+ if ((found_entry= (entry == link)))
+ entry->notify_count++;
+ }
+ if (!found_entry)
+ {
+ entry->next_in_queue= binlog_background_thread_queue;
+ binlog_background_thread_queue= entry;
+ }
mysql_cond_signal(&COND_binlog_background_thread);
mysql_mutex_unlock(&LOCK_binlog_background_thread);
}
@@ -9794,12 +9858,9 @@ binlog_background_thread(void *arg __attribute__((unused)))
my_thread_init();
DBUG_ENTER("binlog_background_thread");
- thd= new THD;
+ thd= new THD(next_thread_id());
thd->system_thread= SYSTEM_THREAD_BINLOG_BACKGROUND;
thd->thread_stack= (char*) &thd; /* Set approximate stack start */
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thread_id++;
- mysql_mutex_unlock(&LOCK_thread_count);
thd->store_globals();
thd->security_ctx->skip_grants();
thd->set_command(COM_DAEMON);
@@ -9866,13 +9927,16 @@ binlog_background_thread(void *arg __attribute__((unused)))
);
while (queue)
{
+ long count= queue->notify_count;
THD_STAGE_INFO(thd, stage_binlog_processing_checkpoint_notify);
DEBUG_SYNC(thd, "binlog_background_thread_before_mark_xid_done");
/* Set the thread start time */
thd->set_time();
/* Grab next pointer first, as mark_xid_done() may free the element. */
next= queue->next_in_queue;
- mysql_bin_log.mark_xid_done(queue->binlog_id, true);
+ queue->notify_count= 0;
+ for (long i= 0; i <= count; i++)
+ mysql_bin_log.mark_xid_done(queue->binlog_id, true);
queue= next;
DBUG_EXECUTE_IF("binlog_background_checkpoint_processed",
@@ -9888,6 +9952,7 @@ binlog_background_thread(void *arg __attribute__((unused)))
THD_STAGE_INFO(thd, stage_binlog_stopping_background_thread);
+ /* No need to use mutex as thd is not linked into other threads */
delete thd;
my_thread_end();
@@ -10065,7 +10130,7 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name,
((last_gtid_standalone && !ev->is_part_of_group(typ)) ||
(!last_gtid_standalone &&
(typ == XID_EVENT ||
- (typ == QUERY_EVENT &&
+ (LOG_EVENT_IS_QUERY(typ) &&
(((Query_log_event *)ev)->is_commit() ||
((Query_log_event *)ev)->is_rollback()))))))
{
@@ -10380,7 +10445,7 @@ static MYSQL_SYSVAR_ENUM(
"log events in the binary log",
NULL,
binlog_checksum_update,
- BINLOG_CHECKSUM_ALG_OFF,
+ BINLOG_CHECKSUM_ALG_CRC32,
&binlog_checksum_typelib);
static struct st_mysql_sys_var *binlog_sys_vars[]=
@@ -10539,7 +10604,8 @@ IO_CACHE * get_trans_log(THD * thd)
if (cache_mngr)
return cache_mngr->get_binlog_cache_log(true);
- WSREP_DEBUG("binlog cache not initialized, conn :%ld", thd->thread_id);
+ WSREP_DEBUG("binlog cache not initialized, conn: %llu",
+ thd->thread_id);
return NULL;
}
@@ -10577,7 +10643,8 @@ void thd_binlog_trx_reset(THD * thd)
void thd_binlog_rollback_stmt(THD * thd)
{
- WSREP_DEBUG("thd_binlog_rollback_stmt :%ld", thd->thread_id);
+ WSREP_DEBUG("thd_binlog_rollback_stmt connection: %llu",
+ thd->thread_id);
binlog_cache_mngr *const cache_mngr=
(binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
if (cache_mngr)
diff --git a/sql/log.h b/sql/log.h
index 2118bd7a059..2a3912a52d3 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2005, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, Monty Program Ab
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -26,6 +26,7 @@ class Relay_log_info;
class Format_description_log_event;
+void setup_log_handling();
bool trans_has_updated_trans_table(const THD* thd);
bool stmt_has_updated_trans_table(const THD *thd);
bool use_trans_cache(const THD* thd, bool is_transactional);
@@ -286,9 +287,9 @@ typedef struct st_log_info
#define MAX_LOG_HANDLERS_NUM 3
/* log event handler flags */
-#define LOG_NONE 1
-#define LOG_FILE 2
-#define LOG_TABLE 4
+#define LOG_NONE 1U
+#define LOG_FILE 2U
+#define LOG_TABLE 4U
class Log_event;
class Rows_log_event;
@@ -354,7 +355,7 @@ public:
MYSQL_QUERY_LOG() : last_time(0) {}
void reopen_file();
bool write(time_t event_time, const char *user_host,
- uint user_host_len, int thread_id,
+ uint user_host_len, my_thread_id thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len);
bool write(THD *thd, time_t current_time,
@@ -585,6 +586,7 @@ public:
ulong binlog_id;
/* Total prepared XIDs and pending checkpoint requests in this binlog. */
long xid_count;
+ long notify_count;
/* For linking in requests to the binlog background thread. */
xid_count_per_binlog *next_in_queue;
xid_count_per_binlog(); /* Give link error if constructor used. */
@@ -594,6 +596,8 @@ public:
mysql_cond_t COND_binlog_background_thread;
mysql_cond_t COND_binlog_background_thread_end;
+ void stop_background_thread();
+
using MYSQL_LOG::generate_name;
using MYSQL_LOG::is_open;
@@ -910,7 +914,7 @@ public:
virtual bool log_error(enum loglevel level, const char *format,
va_list args)= 0;
virtual bool log_general(THD *thd, my_hrtime_t event_time, const char *user_host,
- uint user_host_len, int thread_id,
+ uint user_host_len, my_thread_id thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs)= 0;
@@ -939,7 +943,7 @@ public:
virtual bool log_error(enum loglevel level, const char *format,
va_list args);
virtual bool log_general(THD *thd, my_hrtime_t event_time, const char *user_host,
- uint user_host_len, int thread_id,
+ uint user_host_len, my_thread_id thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs);
@@ -971,7 +975,7 @@ public:
virtual bool log_error(enum loglevel level, const char *format,
va_list args);
virtual bool log_general(THD *thd, my_hrtime_t event_time, const char *user_host,
- uint user_host_len, int thread_id,
+ uint user_host_len, my_thread_id thread_id,
const char *command_type, uint command_type_len,
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs);
@@ -1019,7 +1023,6 @@ public:
*/
void init_base();
void init_log_tables();
- bool flush_logs(THD *thd);
bool flush_slow_log();
bool flush_general_log();
/* Perform basic logger cleanup. this will leave e.g. error log open. */
@@ -1072,6 +1075,7 @@ int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
void sql_print_error(const char *format, ...);
void sql_print_warning(const char *format, ...);
void sql_print_information(const char *format, ...);
+void sql_print_information_v(const char *format, va_list ap);
typedef void (*sql_print_message_func)(const char *format, ...);
extern sql_print_message_func sql_print_message_handlers[];
@@ -1098,6 +1102,7 @@ void make_default_log_name(char **out, const char* log_ext, bool once);
void binlog_reset_cache(THD *thd);
extern MYSQL_PLUGIN_IMPORT MYSQL_BIN_LOG mysql_bin_log;
+extern handlerton *binlog_hton;
extern LOGGER logger;
extern const char *log_bin_index;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index bb373a4ed84..8990e1953b6 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB
+ Copyright (c) 2009, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,7 +18,6 @@
#include <my_global.h>
#include "sql_priv.h"
-#include "mysqld_error.h"
#ifndef MYSQL_CLIENT
#include "unireg.h"
@@ -44,17 +43,18 @@
#include <strfunc.h>
#include "compat56.h"
#include "wsrep_mysqld.h"
+#else
+#include "mysqld_error.h"
#endif /* MYSQL_CLIENT */
#include <my_bitmap.h>
#include "rpl_utility.h"
#include "rpl_constants.h"
#include "sql_digest.h"
+#include "zlib.h"
#define my_b_write_string(A, B) my_b_write((A), (uchar*)(B), (uint) (sizeof(B) - 1))
-using std::max;
-
/**
BINLOG_CHECKSUM variable.
*/
@@ -319,17 +319,34 @@ public:
constructor, but it would be possible to create a subclass
holding the IO_CACHE itself.
*/
- Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags = 0)
- : m_cache(cache), m_file(file), m_flags(flags)
+ Write_on_release_cache(IO_CACHE *cache, FILE *file, flag_set flags = 0, Log_event *ev = NULL)
+ : m_cache(cache), m_file(file), m_flags(flags), m_ev(ev)
{
reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE);
}
~Write_on_release_cache()
{
+#ifdef MYSQL_CLIENT
+ if(m_ev == NULL)
+ {
+ copy_event_cache_to_file_and_reinit(m_cache, m_file);
+ if (m_flags & FLUSH_F)
+ fflush(m_file);
+ }
+ else // if m_ev<>NULL, then storing the output in output_buf
+ {
+ LEX_STRING tmp_str;
+ if (copy_event_cache_to_string_and_reinit(m_cache, &tmp_str))
+ exit(1);
+ m_ev->output_buf.append(tmp_str.str, tmp_str.length);
+ my_free(tmp_str.str);
+ }
+#else /* MySQL_SERVER */
copy_event_cache_to_file_and_reinit(m_cache, m_file);
if (m_flags & FLUSH_F)
fflush(m_file);
+#endif
}
/*
@@ -359,6 +376,7 @@ private:
IO_CACHE *m_cache;
FILE *m_file;
flag_set m_flags;
+ Log_event *m_ev; // Used for Flashback
};
/*
@@ -716,6 +734,380 @@ char *str_to_hex(char *to, const char *from, uint len)
return to; // pointer to end 0 of 'to'
}
+#define BINLOG_COMPRESSED_HEADER_LEN 1
+#define BINLOG_COMPRESSED_ORIGINAL_LENGTH_MAX_BYTES 4
+/**
+ Compressed Record
+ Record Header: 1 Byte
+ 7 Bit: Always 1, mean compressed;
+ 4-6 Bit: Compressed algorithm - Always 0, means zlib
+ It maybe support other compression algorithm in the future.
+ 0-3 Bit: Bytes of "Record Original Length"
+ Record Original Length: 1-4 Bytes
+ Compressed Buf:
+*/
+
+/**
+ Get the length of compress content.
+*/
+
+uint32 binlog_get_compress_len(uint32 len)
+{
+ /* 5 for the begin content, 1 reserved for a '\0'*/
+ return ALIGN_SIZE((BINLOG_COMPRESSED_HEADER_LEN + BINLOG_COMPRESSED_ORIGINAL_LENGTH_MAX_BYTES)
+ + compressBound(len) + 1);
+}
+
+/**
+ Compress buf from 'src' to 'dst'.
+
+ Note: 1) Then the caller should guarantee the length of 'dst', which
+ can be got by binlog_get_uncompress_len, is enough to hold
+ the content uncompressed.
+ 2) The 'comlen' should stored the length of 'dst', and it will
+ be set as the size of compressed content after return.
+
+ return zero if successful, others otherwise.
+*/
+int binlog_buf_compress(const char *src, char *dst, uint32 len, uint32 *comlen)
+{
+ uchar lenlen;
+ if (len & 0xFF000000)
+ {
+ dst[1] = uchar(len >> 24);
+ dst[2] = uchar(len >> 16);
+ dst[3] = uchar(len >> 8);
+ dst[4] = uchar(len);
+ lenlen = 4;
+ }
+ else if (len & 0x00FF0000)
+ {
+ dst[1] = uchar(len >> 16);
+ dst[2] = uchar(len >> 8);
+ dst[3] = uchar(len);
+ lenlen = 3;
+ }
+ else if (len & 0x0000FF00)
+ {
+ dst[1] = uchar(len >> 8);
+ dst[2] = uchar(len);
+ lenlen = 2;
+ }
+ else
+ {
+ dst[1] = uchar(len);
+ lenlen = 1;
+ }
+ dst[0] = 0x80 | (lenlen & 0x07);
+
+ uLongf tmplen = (uLongf)*comlen - BINLOG_COMPRESSED_HEADER_LEN - lenlen - 1;
+ if (compress((Bytef *)dst + BINLOG_COMPRESSED_HEADER_LEN + lenlen, &tmplen,
+ (const Bytef *)src, (uLongf)len) != Z_OK)
+ {
+ return 1;
+ }
+ *comlen = (uint32)tmplen + BINLOG_COMPRESSED_HEADER_LEN + lenlen;
+ return 0;
+}
+
+/**
+ Convert a query_compressed_log_event to query_log_event
+ from 'src' to 'dst', the size after compression stored in 'newlen'.
+
+ @Note:
+ 1) The caller should call my_free to release 'dst' if *is_malloc is
+ returned as true.
+ 2) If *is_malloc is retuened as false, then 'dst' reuses the passed-in
+ 'buf'.
+
+ return zero if successful, non-zero otherwise.
+*/
+
+int
+query_event_uncompress(const Format_description_log_event *description_event,
+ bool contain_checksum, const char *src, ulong src_len,
+ char* buf, ulong buf_size, bool* is_malloc, char **dst,
+ ulong *newlen)
+{
+ ulong len = uint4korr(src + EVENT_LEN_OFFSET);
+ const char *tmp = src;
+ const char *end = src + len;
+
+ // bad event
+ if (src_len < len )
+ return 1;
+
+ DBUG_ASSERT((uchar)src[EVENT_TYPE_OFFSET] == QUERY_COMPRESSED_EVENT);
+
+ uint8 common_header_len= description_event->common_header_len;
+ uint8 post_header_len=
+ description_event->post_header_len[QUERY_COMPRESSED_EVENT-1];
+
+ *is_malloc = false;
+
+ tmp += common_header_len;
+ // bad event
+ if (end <= tmp)
+ return 1;
+
+ uint db_len = (uint)tmp[Q_DB_LEN_OFFSET];
+ uint16 status_vars_len= uint2korr(tmp + Q_STATUS_VARS_LEN_OFFSET);
+
+ tmp += post_header_len + status_vars_len + db_len + 1;
+ // bad event
+ if (end <= tmp)
+ return 1;
+
+ int32 comp_len = (int32)(len - (tmp - src) -
+ (contain_checksum ? BINLOG_CHECKSUM_LEN : 0));
+ uint32 un_len = binlog_get_uncompress_len(tmp);
+
+ // bad event
+ if (comp_len < 0 || un_len == 0)
+ return 1;
+
+ *newlen = (ulong)(tmp - src) + un_len;
+ if(contain_checksum)
+ *newlen += BINLOG_CHECKSUM_LEN;
+
+ uint32 alloc_size = (uint32)ALIGN_SIZE(*newlen);
+ char *new_dst = NULL;
+
+
+ if (alloc_size <= buf_size)
+ {
+ new_dst = buf;
+ }
+ else
+ {
+ new_dst = (char *)my_malloc(alloc_size, MYF(MY_WME));
+ if (!new_dst)
+ return 1;
+
+ *is_malloc = true;
+ }
+
+ /* copy the head*/
+ memcpy(new_dst, src , tmp - src);
+ if (binlog_buf_uncompress(tmp, new_dst + (tmp - src),
+ comp_len, &un_len))
+ {
+ if (*is_malloc)
+ my_free(new_dst);
+
+ *is_malloc = false;
+
+ return 1;
+ }
+
+ new_dst[EVENT_TYPE_OFFSET] = QUERY_EVENT;
+ int4store(new_dst + EVENT_LEN_OFFSET, *newlen);
+ if(contain_checksum)
+ {
+ ulong clear_len = *newlen - BINLOG_CHECKSUM_LEN;
+ int4store(new_dst + clear_len,
+ my_checksum(0L, (uchar *)new_dst, clear_len));
+ }
+ *dst = new_dst;
+ return 0;
+}
+
+int
+row_log_event_uncompress(const Format_description_log_event *description_event,
+ bool contain_checksum, const char *src, ulong src_len,
+ char* buf, ulong buf_size, bool* is_malloc, char **dst,
+ ulong *newlen)
+{
+ Log_event_type type = (Log_event_type)(uchar)src[EVENT_TYPE_OFFSET];
+ ulong len = uint4korr(src + EVENT_LEN_OFFSET);
+ const char *tmp = src;
+ char *new_dst = NULL;
+ const char *end = tmp + len;
+
+ // bad event
+ if (src_len < len)
+ return 1;
+
+ DBUG_ASSERT(LOG_EVENT_IS_ROW_COMPRESSED(type));
+
+ uint8 common_header_len= description_event->common_header_len;
+ uint8 post_header_len= description_event->post_header_len[type-1];
+
+ tmp += common_header_len + ROWS_HEADER_LEN_V1;
+ if (post_header_len == ROWS_HEADER_LEN_V2)
+ {
+ /*
+ Have variable length header, check length,
+ which includes length bytes
+ */
+
+ // bad event
+ if (end - tmp <= 2)
+ return 1;
+
+ uint16 var_header_len= uint2korr(tmp);
+ DBUG_ASSERT(var_header_len >= 2);
+
+ /* skip over var-len header, extracting 'chunks' */
+ tmp += var_header_len;
+
+ /* get the uncompressed event type */
+ type=
+ (Log_event_type)(type - WRITE_ROWS_COMPRESSED_EVENT + WRITE_ROWS_EVENT);
+ }
+ else
+ {
+ /* get the uncompressed event type */
+ type= (Log_event_type)
+ (type - WRITE_ROWS_COMPRESSED_EVENT_V1 + WRITE_ROWS_EVENT_V1);
+ }
+
+ //bad event
+ if (end <= tmp)
+ return 1;
+
+ ulong m_width = net_field_length((uchar **)&tmp);
+ tmp += (m_width + 7) / 8;
+
+ if (type == UPDATE_ROWS_EVENT_V1 || type == UPDATE_ROWS_EVENT)
+ {
+ tmp += (m_width + 7) / 8;
+ }
+
+ //bad event
+ if (end <= tmp)
+ return 1;
+
+ uint32 un_len = binlog_get_uncompress_len(tmp);
+ //bad event
+ if (un_len == 0)
+ return 1;
+
+ int32 comp_len = (int32)(len - (tmp - src) -
+ (contain_checksum ? BINLOG_CHECKSUM_LEN : 0));
+ //bad event
+ if (comp_len <=0)
+ return 1;
+
+ *newlen = ulong(tmp - src) + un_len;
+ if(contain_checksum)
+ *newlen += BINLOG_CHECKSUM_LEN;
+
+ size_t alloc_size = ALIGN_SIZE(*newlen);
+
+ *is_malloc = false;
+ if (alloc_size <= buf_size)
+ {
+ new_dst = buf;
+ }
+ else
+ {
+ new_dst = (char *)my_malloc(alloc_size, MYF(MY_WME));
+ if (!new_dst)
+ return 1;
+
+ *is_malloc = true;
+ }
+
+ /* Copy the head. */
+ memcpy(new_dst, src , tmp - src);
+ /* Uncompress the body. */
+ if (binlog_buf_uncompress(tmp, new_dst + (tmp - src),
+ comp_len, &un_len))
+ {
+ if (*is_malloc)
+ my_free(new_dst);
+
+ return 1;
+ }
+
+ new_dst[EVENT_TYPE_OFFSET] = type;
+ int4store(new_dst + EVENT_LEN_OFFSET, *newlen);
+ if(contain_checksum){
+ ulong clear_len = *newlen - BINLOG_CHECKSUM_LEN;
+ int4store(new_dst + clear_len,
+ my_checksum(0L, (uchar *)new_dst, clear_len));
+ }
+ *dst = new_dst;
+ return 0;
+}
+
+/**
+ Get the length of uncompress content.
+ return 0 means error.
+*/
+
+uint32 binlog_get_uncompress_len(const char *buf)
+{
+ DBUG_ASSERT((buf[0] & 0xe0) == 0x80);
+ uint32 lenlen = buf[0] & 0x07;
+ uint32 len = 0;
+ switch(lenlen)
+ {
+ case 1:
+ len = uchar(buf[1]);
+ break;
+ case 2:
+ len = uchar(buf[1]) << 8 | uchar(buf[2]);
+ break;
+ case 3:
+ len = uchar(buf[1]) << 16 | uchar(buf[2]) << 8 | uchar(buf[3]);
+ break;
+ case 4:
+ len = uchar(buf[1]) << 24 | uchar(buf[2]) << 16 |
+ uchar(buf[3]) << 8 | uchar(buf[4]);
+ break;
+ default:
+ DBUG_ASSERT(lenlen >= 1 && lenlen <= 4);
+ break;
+ }
+ return len;
+}
+
+/**
+ Uncompress the content in 'src' with length of 'len' to 'dst'.
+
+ Note: 1) Then the caller should guarantee the length of 'dst' (which
+ can be got by statement_get_uncompress_len) is enough to hold
+ the content uncompressed.
+ 2) The 'newlen' should stored the length of 'dst', and it will
+ be set as the size of uncompressed content after return.
+
+ return zero if successful, others otherwise.
+*/
+int binlog_buf_uncompress(const char *src, char *dst, uint32 len,
+ uint32 *newlen)
+{
+ if((src[0] & 0x80) == 0)
+ {
+ return 1;
+ }
+
+ uint32 lenlen= src[0] & 0x07;
+ uLongf buflen= *newlen;
+
+ uint32 alg = (src[0] & 0x70) >> 4;
+ switch(alg)
+ {
+ case 0:
+ // zlib
+ if(uncompress((Bytef *)dst, &buflen,
+ (const Bytef*)src + 1 + lenlen, len - 1 - lenlen) != Z_OK)
+ {
+ return 1;
+ }
+ break;
+ default:
+ //TODO
+ //bad algorithm
+ return 1;
+ }
+
+ DBUG_ASSERT(*newlen == (uint32)buflen);
+ *newlen = (uint32)buflen;
+ return 0;
+}
+
#ifndef MYSQL_CLIENT
/**
@@ -759,7 +1151,7 @@ int append_query_string(CHARSET_INFO *csinfo, String *to,
*ptr++= '\'';
}
- to->length(orig_len + ptr - beg);
+ to->length((uint32)(orig_len + ptr - beg));
return 0;
}
#endif
@@ -842,6 +1234,13 @@ const char* Log_event::get_type_str(Log_event_type type)
case TRANSACTION_CONTEXT_EVENT: return "Transaction_context";
case VIEW_CHANGE_EVENT: return "View_change";
case XA_PREPARE_LOG_EVENT: return "XA_prepare";
+ case QUERY_COMPRESSED_EVENT: return "Query_compressed";
+ case WRITE_ROWS_COMPRESSED_EVENT: return "Write_rows_compressed";
+ case UPDATE_ROWS_COMPRESSED_EVENT: return "Update_rows_compressed";
+ case DELETE_ROWS_COMPRESSED_EVENT: return "Delete_rows_compressed";
+ case WRITE_ROWS_COMPRESSED_EVENT_V1: return "Write_rows_compressed_v1";
+ case UPDATE_ROWS_COMPRESSED_EVENT_V1: return "Update_rows_compressed_v1";
+ case DELETE_ROWS_COMPRESSED_EVENT_V1: return "Delete_rows_compressed_v1";
default: return "Unknown"; /* impossible */
}
@@ -980,6 +1379,7 @@ int Log_event::do_update_pos(rpl_group_info *rgi)
Relay_log_info *rli= rgi->rli;
DBUG_ENTER("Log_event::do_update_pos");
+ DBUG_ASSERT(!rli->belongs_to_client());
/*
rli is null when (as far as I (Guilhem) know) the caller is
Load_log_event::do_apply_event *and* that one is called from
@@ -1246,7 +1646,7 @@ int Log_event_writer::write_header(uchar *pos, size_t len)
if (ctx)
{
uchar iv[BINLOG_IV_LENGTH];
- crypto->set_iv(iv, my_b_safe_tell(file));
+ crypto->set_iv(iv, (uint32)my_b_safe_tell(file));
if (encryption_ctx_init(ctx, crypto->key, crypto->key_length,
iv, sizeof(iv), ENCRYPTION_FLAG_ENCRYPT | ENCRYPTION_FLAG_NOPAD,
ENCRYPTION_KEY_SYSTEM_DATA, crypto->key_version))
@@ -1370,7 +1770,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
ulong data_len;
char buf[LOG_EVENT_MINIMAL_HEADER_LEN];
uchar ev_offset= packet->length();
-#ifndef max_allowed_packet
+#if !defined(MYSQL_CLIENT)
THD *thd=current_thd;
ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet
: thd->variables.max_allowed_packet
@@ -1398,8 +1798,8 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
if (data_len < LOG_EVENT_MINIMAL_HEADER_LEN)
DBUG_RETURN(LOG_READ_BOGUS);
- if (data_len > max(max_allowed_packet,
- opt_binlog_rows_event_max_size + MAX_LOG_EVENT_HEADER))
+ if (data_len > MY_MAX(max_allowed_packet,
+ opt_binlog_rows_event_max_size + MAX_LOG_EVENT_HEADER))
DBUG_RETURN(LOG_READ_TOO_LARGE);
if (likely(data_len > LOG_EVENT_MINIMAL_HEADER_LEN))
@@ -1427,7 +1827,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
if (fdle->crypto_data.scheme)
{
uchar iv[BINLOG_IV_LENGTH];
- fdle->crypto_data.set_iv(iv, my_b_tell(file) - data_len);
+ fdle->crypto_data.set_iv(iv, (uint32) (my_b_tell(file) - data_len));
char *newpkt= (char*)my_malloc(data_len + ev_offset + 1, MYF(MY_WME));
if (!newpkt)
@@ -1536,9 +1936,9 @@ err:
#endif
if (event.length() >= OLD_HEADER_LEN)
sql_print_error("Error in Log_event::read_log_event(): '%s',"
- " data_len: %lu, event_type: %d", error,
- uint4korr(&event[EVENT_LEN_OFFSET]),
- (uchar)event[EVENT_TYPE_OFFSET]);
+ " data_len: %lu, event_type: %u", error,
+ (ulong) uint4korr(&event[EVENT_LEN_OFFSET]),
+ (uint) (uchar)event[EVENT_TYPE_OFFSET]);
else
sql_print_error("Error in Log_event::read_log_event(): '%s'", error);
/*
@@ -1680,6 +2080,10 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case QUERY_EVENT:
ev = new Query_log_event(buf, event_len, fdle, QUERY_EVENT);
break;
+ case QUERY_COMPRESSED_EVENT:
+ ev = new Query_compressed_log_event(buf, event_len, fdle,
+ QUERY_COMPRESSED_EVENT);
+ break;
case LOAD_EVENT:
ev = new Load_log_event(buf, event_len, fdle);
break;
@@ -1754,6 +2158,19 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
ev = new Delete_rows_log_event(buf, event_len, fdle);
break;
+ case WRITE_ROWS_COMPRESSED_EVENT:
+ case WRITE_ROWS_COMPRESSED_EVENT_V1:
+ ev = new Write_rows_compressed_log_event(buf, event_len, fdle);
+ break;
+ case UPDATE_ROWS_COMPRESSED_EVENT:
+ case UPDATE_ROWS_COMPRESSED_EVENT_V1:
+ ev = new Update_rows_compressed_log_event(buf, event_len, fdle);
+ break;
+ case DELETE_ROWS_COMPRESSED_EVENT:
+ case DELETE_ROWS_COMPRESSED_EVENT_V1:
+ ev = new Delete_rows_compressed_log_event(buf, event_len, fdle);
+ break;
+
/* MySQL GTID events are ignored */
case GTID_LOG_EVENT:
case ANONYMOUS_GTID_LOG_EVENT:
@@ -1797,7 +2214,7 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
else
{
DBUG_PRINT("error",("Unknown event code: %d",
- (int) buf[EVENT_TYPE_OFFSET]));
+ (uchar) buf[EVENT_TYPE_OFFSET]));
ev= NULL;
break;
}
@@ -2032,9 +2449,9 @@ void Log_event::print_header(IO_CACHE* file,
if (checksum_alg != BINLOG_CHECKSUM_ALG_OFF &&
checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF)
{
- char checksum_buf[BINLOG_CHECKSUM_LEN * 2 + 4]; // to fit to "0x%lx "
+ char checksum_buf[BINLOG_CHECKSUM_LEN * 2 + 4]; // to fit to "%p "
size_t const bytes_written=
- my_snprintf(checksum_buf, sizeof(checksum_buf), "0x%08lx ", (ulong) crc);
+ my_snprintf(checksum_buf, sizeof(checksum_buf), "0x%08x ", crc);
my_b_printf(file, "%s ", get_type(&binlog_checksum_typelib, checksum_alg));
my_b_printf(file, checksum_buf, bytes_written);
}
@@ -2376,7 +2793,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
d= (ulong) (i64 / 1000000);
t= (ulong) (i64 % 1000000);
- my_b_printf(file, "%04d-%02d-%02d %02d:%02d:%02d",
+ my_b_printf(file, "'%04d-%02d-%02d %02d:%02d:%02d'",
(int) (d / 10000), (int) (d % 10000) / 100, (int) (d % 100),
(int) (t / 10000), (int) (t % 10000) / 100, (int) t % 100);
return 8;
@@ -2610,22 +3027,30 @@ size_t
Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
PRINT_EVENT_INFO *print_event_info,
MY_BITMAP *cols_bitmap,
- const uchar *value, const uchar *prefix)
+ const uchar *value, const uchar *prefix,
+ const my_bool no_fill_output)
{
const uchar *value0= value;
const uchar *null_bits= value;
uint null_bit_index= 0;
char typestr[64]= "";
-
+
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ /* Storing the review SQL */
+ IO_CACHE *review_sql= &print_event_info->review_sql_cache;
+ LEX_STRING review_str;
+#endif
+
/*
Skip metadata bytes which gives the information about nullabity of master
columns. Master writes one bit for each affected column.
*/
value+= (bitmap_bits_set(cols_bitmap) + 7) / 8;
-
- my_b_printf(file, "%s", prefix);
-
+
+ if (!no_fill_output)
+ my_b_printf(file, "%s", prefix);
+
for (size_t i= 0; i < td->size(); i ++)
{
size_t size;
@@ -2634,41 +3059,102 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
if (bitmap_is_set(cols_bitmap, i) == 0)
continue;
-
- my_b_printf(file, "### @%d=", static_cast<int>(i + 1));
+
+ if (!no_fill_output)
+ my_b_printf(file, "### @%d=", static_cast<int>(i + 1));
+
if (!is_null)
{
size_t fsize= td->calc_field_size((uint)i, (uchar*) value);
if (value + fsize > m_rows_end)
{
- my_b_printf(file, "***Corrupted replication event was detected."
- " Not printing the value***\n");
+ if (!no_fill_output)
+ my_b_printf(file, "***Corrupted replication event was detected."
+ " Not printing the value***\n");
value+= fsize;
return 0;
}
}
- if (!(size= log_event_print_value(file,is_null? NULL: value,
- td->type(i), td->field_metadata(i),
- typestr, sizeof(typestr))))
+
+ if (!no_fill_output)
+ {
+ size= log_event_print_value(file,is_null? NULL: value,
+ td->type(i), td->field_metadata(i),
+ typestr, sizeof(typestr));
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ if (need_flashback_review)
+ {
+ String tmp_str, hex_str;
+ IO_CACHE tmp_cache;
+
+ // Using a tmp IO_CACHE to get the value output
+ open_cached_file(&tmp_cache, NULL, NULL, 0, MYF(MY_WME | MY_NABP));
+ size= log_event_print_value(&tmp_cache, is_null? NULL: value,
+ td->type(i), td->field_metadata(i),
+ typestr, sizeof(typestr));
+ if (copy_event_cache_to_string_and_reinit(&tmp_cache, &review_str))
+ exit(1);
+ close_cached_file(&tmp_cache);
+
+ switch (td->type(i)) // Converting a string to HEX format
+ {
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_STRING:
+ case MYSQL_TYPE_BLOB:
+ // Avoid write_pos changed to a new area
+ // tmp_str.free();
+ tmp_str.append(review_str.str + 1, review_str.length - 2); // Removing quotation marks
+ if (hex_str.alloc(tmp_str.length()*2+1)) // If out of memory
+ {
+ fprintf(stderr, "\nError: Out of memory. "
+ "Could not print correct binlog event.\n");
+ exit(1);
+ }
+ octet2hex((char*) hex_str.ptr(), tmp_str.ptr(), tmp_str.length());
+ my_b_printf(review_sql, ", UNHEX('%s')", hex_str.ptr());
+ break;
+ default:
+ tmp_str.free();
+ tmp_str.append(review_str.str, review_str.length);
+ my_b_printf(review_sql, ", %s", tmp_str.ptr());
+ break;
+ }
+ my_free(revieww_str.str);
+ }
+#endif
+ }
+ else
+ {
+ IO_CACHE tmp_cache;
+ open_cached_file(&tmp_cache, NULL, NULL, 0, MYF(MY_WME | MY_NABP));
+ size= log_event_print_value(&tmp_cache,is_null? NULL: value,
+ td->type(i), td->field_metadata(i),
+ typestr, sizeof(typestr));
+ close_cached_file(&tmp_cache);
+ }
+
+ if (!size)
return 0;
if (!is_null)
value+= size;
- if (print_event_info->verbose > 1)
+ if (print_event_info->verbose > 1 && !no_fill_output)
{
my_b_write(file, (uchar*)" /* ", 4);
my_b_printf(file, "%s ", typestr);
-
+
my_b_printf(file, "meta=%d nullable=%d is_null=%d ",
td->field_metadata(i),
td->maybe_null(i), is_null);
my_b_write(file, (uchar*)"*/", 2);
}
-
- my_b_write_byte(file, '\n');
-
+
+ if (!no_fill_output)
+ my_b_write_byte(file, '\n');
+
null_bit_index++;
}
return value - value0;
@@ -2676,6 +3162,124 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
/**
+ Exchange the SET part and WHERE part for the Update events.
+ Revert the operations order for the Write and Delete events.
+ And then revert the events order from the last one to the first one.
+
+ @param[in] print_event_info PRINT_EVENT_INFO
+ @param[in] rows_buff Packed event buff
+*/
+
+void Rows_log_event::change_to_flashback_event(PRINT_EVENT_INFO *print_event_info,
+ uchar *rows_buff, Log_event_type ev_type)
+{
+ Table_map_log_event *map;
+ table_def *td;
+ DYNAMIC_ARRAY rows_arr;
+ uchar *swap_buff1, *swap_buff2;
+ uchar *rows_pos= rows_buff + m_rows_before_size;
+
+ if (!(map= print_event_info->m_table_map.get_table(m_table_id)) ||
+ !(td= map->create_table_def()))
+ return;
+
+ /* If the write rows event contained no values for the AI */
+ if (((get_general_type_code() == WRITE_ROWS_EVENT) && (m_rows_buf==m_rows_end)))
+ goto end;
+
+ (void) my_init_dynamic_array(&rows_arr, sizeof(LEX_STRING), 8, 8, MYF(0));
+
+ for (uchar *value= m_rows_buf; value < m_rows_end; )
+ {
+ uchar *start_pos= value;
+ size_t length1= 0;
+ if (!(length1= print_verbose_one_row(NULL, td, print_event_info,
+ &m_cols, value,
+ (const uchar*) "", TRUE)))
+ {
+ fprintf(stderr, "\nError row length: %zu\n", length1);
+ exit(1);
+ }
+ value+= length1;
+
+ swap_buff1= (uchar *) my_malloc(length1, MYF(0));
+ if (!swap_buff1)
+ {
+ fprintf(stderr, "\nError: Out of memory. "
+ "Could not exchange to flashback event.\n");
+ exit(1);
+ }
+ memcpy(swap_buff1, start_pos, length1);
+
+ // For Update_event, we have the second part
+ size_t length2= 0;
+ if (ev_type == UPDATE_ROWS_EVENT ||
+ ev_type == UPDATE_ROWS_EVENT_V1)
+ {
+ if (!(length2= print_verbose_one_row(NULL, td, print_event_info,
+ &m_cols, value,
+ (const uchar*) "", TRUE)))
+ {
+ fprintf(stderr, "\nError row length: %zu\n", length2);
+ exit(1);
+ }
+ value+= length2;
+
+ swap_buff2= (uchar *) my_malloc(length2, MYF(0));
+ if (!swap_buff2)
+ {
+ fprintf(stderr, "\nError: Out of memory. "
+ "Could not exchange to flashback event.\n");
+ exit(1);
+ }
+ memcpy(swap_buff2, start_pos + length1, length2); // WHERE part
+ }
+
+ if (ev_type == UPDATE_ROWS_EVENT ||
+ ev_type == UPDATE_ROWS_EVENT_V1)
+ {
+ /* Swap SET and WHERE part */
+ memcpy(start_pos, swap_buff2, length2);
+ memcpy(start_pos + length2, swap_buff1, length1);
+ }
+
+ /* Free tmp buffers */
+ my_free(swap_buff1);
+ if (ev_type == UPDATE_ROWS_EVENT ||
+ ev_type == UPDATE_ROWS_EVENT_V1)
+ my_free(swap_buff2);
+
+ /* Copying one row into a buff, and pushing into the array */
+ LEX_STRING one_row;
+
+ one_row.length= length1 + length2;
+ one_row.str= (char *) my_malloc(one_row.length, MYF(0));
+ memcpy(one_row.str, start_pos, one_row.length);
+ if (one_row.str == NULL || push_dynamic(&rows_arr, (uchar *) &one_row))
+ {
+ fprintf(stderr, "\nError: Out of memory. "
+ "Could not push flashback event into array.\n");
+ exit(1);
+ }
+ }
+
+ /* Copying rows from the end to the begining into event */
+ for (uint i= rows_arr.elements; i > 0; --i)
+ {
+ LEX_STRING *one_row= dynamic_element(&rows_arr, i - 1, LEX_STRING*);
+
+ memcpy(rows_pos, (uchar *)one_row->str, one_row->length);
+ rows_pos+= one_row->length;
+ my_free(one_row->str);
+ }
+ delete_dynamic(&rows_arr);
+
+end:
+ delete td;
+}
+
+
+/**
Print a row event into IO cache in human readable form (in SQL format)
@param[in] file IO cache
@@ -2687,8 +3291,12 @@ void Rows_log_event::print_verbose(IO_CACHE *file,
Table_map_log_event *map;
table_def *td;
const char *sql_command, *sql_clause1, *sql_clause2;
+ const char *sql_command_short __attribute__((unused));
Log_event_type general_type_code= get_general_type_code();
-
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ IO_CACHE *review_sql= &print_event_info->review_sql_cache;
+#endif
+
if (m_extra_row_data)
{
uint8 extra_data_len= m_extra_row_data[EXTRA_ROW_INFO_LEN_OFFSET];
@@ -2718,19 +3326,23 @@ void Rows_log_event::print_verbose(IO_CACHE *file,
sql_command= "INSERT INTO";
sql_clause1= "### SET\n";
sql_clause2= NULL;
+ sql_command_short= "I";
break;
case DELETE_ROWS_EVENT:
sql_command= "DELETE FROM";
sql_clause1= "### WHERE\n";
sql_clause2= NULL;
+ sql_command_short= "D";
break;
case UPDATE_ROWS_EVENT:
sql_command= "UPDATE";
sql_clause1= "### WHERE\n";
sql_clause2= "### SET\n";
+ sql_command_short= "U";
break;
default:
sql_command= sql_clause1= sql_clause2= NULL;
+ sql_command_short= "";
DBUG_ASSERT(0); /* Not possible */
}
@@ -2756,6 +3368,13 @@ void Rows_log_event::print_verbose(IO_CACHE *file,
my_b_printf(file, "### %s %`s.%`s\n",
sql_command,
map->get_db_name(), map->get_table_name());
+
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ if (need_flashback_review)
+ my_b_printf(review_sql, "\nINSERT INTO `%s`.`%s` VALUES ('%s'",
+ map->get_review_dbname(), map->get_review_tablename(), sql_command_short);
+#endif
+
/* Print the first image */
if (!(length= print_verbose_one_row(file, td, print_event_info,
&m_cols, value,
@@ -2772,6 +3391,17 @@ void Rows_log_event::print_verbose(IO_CACHE *file,
goto end;
value+= length;
}
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ else
+ {
+ if (need_flashback_review)
+ for (size_t i= 0; i < td->size(); i ++)
+ my_b_printf(review_sql, ", NULL");
+ }
+
+ if (need_flashback_review)
+ my_b_printf(review_sql, ")%s\n", print_event_info->delimiter);
+#endif
}
end:
@@ -2801,11 +3431,11 @@ void Log_event::print_base64(IO_CACHE* file,
PRINT_EVENT_INFO* print_event_info,
bool do_print_encoded)
{
- const uchar *ptr= (const uchar *)temp_buf;
+ uchar *ptr= (uchar *)temp_buf;
uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET);
DBUG_ENTER("Log_event::print_base64");
- size_t const tmp_str_sz= base64_needed_encoded_length((int) size);
+ size_t const tmp_str_sz= my_base64_needed_encoded_length((int) size);
char *const tmp_str= (char *) my_malloc(tmp_str_sz, MYF(MY_WME));
if (!tmp_str) {
fprintf(stderr, "\nError: Out of memory. "
@@ -2813,7 +3443,52 @@ void Log_event::print_base64(IO_CACHE* file,
DBUG_VOID_RETURN;
}
- if (base64_encode(ptr, (size_t) size, tmp_str))
+ if (is_flashback)
+ {
+ uint tmp_size= size;
+ Rows_log_event *ev= NULL;
+ Log_event_type ev_type = (enum Log_event_type) ptr[EVENT_TYPE_OFFSET];
+ if (checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF &&
+ checksum_alg != BINLOG_CHECKSUM_ALG_OFF)
+ tmp_size-= BINLOG_CHECKSUM_LEN; // checksum is displayed through the header
+ switch (ev_type) {
+ case WRITE_ROWS_EVENT:
+ ptr[EVENT_TYPE_OFFSET]= DELETE_ROWS_EVENT;
+ ev= new Delete_rows_log_event((const char*) ptr, tmp_size,
+ glob_description_event);
+ ev->change_to_flashback_event(print_event_info, ptr, ev_type);
+ break;
+ case WRITE_ROWS_EVENT_V1:
+ ptr[EVENT_TYPE_OFFSET]= DELETE_ROWS_EVENT_V1;
+ ev= new Delete_rows_log_event((const char*) ptr, tmp_size,
+ glob_description_event);
+ ev->change_to_flashback_event(print_event_info, ptr, ev_type);
+ break;
+ case DELETE_ROWS_EVENT:
+ ptr[EVENT_TYPE_OFFSET]= WRITE_ROWS_EVENT;
+ ev= new Write_rows_log_event((const char*) ptr, tmp_size,
+ glob_description_event);
+ ev->change_to_flashback_event(print_event_info, ptr, ev_type);
+ break;
+ case DELETE_ROWS_EVENT_V1:
+ ptr[EVENT_TYPE_OFFSET]= WRITE_ROWS_EVENT_V1;
+ ev= new Write_rows_log_event((const char*) ptr, tmp_size,
+ glob_description_event);
+ ev->change_to_flashback_event(print_event_info, ptr, ev_type);
+ break;
+ case UPDATE_ROWS_EVENT:
+ case UPDATE_ROWS_EVENT_V1:
+ ev= new Update_rows_log_event((const char*) ptr, tmp_size,
+ glob_description_event);
+ ev->change_to_flashback_event(print_event_info, ptr, ev_type);
+ break;
+ default:
+ break;
+ }
+ delete ev;
+ }
+
+ if (my_base64_encode(ptr, (size_t) size, tmp_str))
{
DBUG_ASSERT(0);
}
@@ -2821,7 +3496,12 @@ void Log_event::print_base64(IO_CACHE* file,
if (do_print_encoded)
my_b_printf(file, "%s\n", tmp_str);
- if (print_event_info->verbose)
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ if (print_event_info->verbose || need_flashback_review)
+#else
+ // Flashback need the table_map to parse the event
+ if (print_event_info->verbose || is_flashback)
+#endif
{
Rows_log_event *ev= NULL;
Log_event_type et= (Log_event_type) ptr[EVENT_TYPE_OFFSET];
@@ -2829,7 +3509,7 @@ void Log_event::print_base64(IO_CACHE* file,
if (checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF &&
checksum_alg != BINLOG_CHECKSUM_ALG_OFF)
size-= BINLOG_CHECKSUM_LEN; // checksum is displayed through the header
-
+
switch (et)
{
case TABLE_MAP_EVENT:
@@ -2837,6 +3517,13 @@ void Log_event::print_base64(IO_CACHE* file,
Table_map_log_event *map;
map= new Table_map_log_event((const char*) ptr, size,
glob_description_event);
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ if (need_flashback_review)
+ {
+ map->set_review_dbname(m_review_dbname.ptr());
+ map->set_review_tablename(m_review_tablename.ptr());
+ }
+#endif
print_event_info->m_table_map.set_table(map->get_table_id(), map);
break;
}
@@ -2861,17 +3548,63 @@ void Log_event::print_base64(IO_CACHE* file,
glob_description_event);
break;
}
+ case WRITE_ROWS_COMPRESSED_EVENT:
+ case WRITE_ROWS_COMPRESSED_EVENT_V1:
+ {
+ ev= new Write_rows_compressed_log_event((const char*) ptr, size,
+ glob_description_event);
+ break;
+ }
+ case UPDATE_ROWS_COMPRESSED_EVENT:
+ case UPDATE_ROWS_COMPRESSED_EVENT_V1:
+ {
+ ev= new Update_rows_compressed_log_event((const char*) ptr, size,
+ glob_description_event);
+ break;
+ }
+ case DELETE_ROWS_COMPRESSED_EVENT:
+ case DELETE_ROWS_COMPRESSED_EVENT_V1:
+ {
+ ev= new Delete_rows_compressed_log_event((const char*) ptr, size,
+ glob_description_event);
+ break;
+ }
default:
break;
}
-
+
if (ev)
{
- ev->print_verbose(file, print_event_info);
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ ev->need_flashback_review= need_flashback_review;
+ if (print_event_info->verbose)
+ ev->print_verbose(file, print_event_info);
+ else
+ {
+ IO_CACHE tmp_cache;
+ open_cached_file(&tmp_cache, NULL, NULL, 0, MYF(MY_WME | MY_NABP));
+ ev->print_verbose(&tmp_cache, print_event_info);
+ close_cached_file(&tmp_cache);
+ }
+#else
+ if (print_event_info->verbose)
+ {
+ /*
+ Verbose event printout can't start before encoded data
+ got enquoted. This is done at this point though multi-row
+ statement remain vulnerable.
+ TODO: fix MDEV-10362 to remove this workaround.
+ */
+ if (print_event_info->base64_output_mode !=
+ BASE64_OUTPUT_DECODE_ROWS)
+ my_b_printf(file, "'%s\n", print_event_info->delimiter);
+ ev->print_verbose(file, print_event_info);
+ }
+#endif
delete ev;
}
}
-
+
my_free(tmp_str);
DBUG_VOID_RETURN;
}
@@ -3215,6 +3948,24 @@ bool Query_log_event::write()
write_footer();
}
+bool Query_compressed_log_event::write()
+{
+ const char *query_tmp = query;
+ uint32 q_len_tmp = q_len;
+ uint32 alloc_size;
+ bool ret = true;
+ q_len = alloc_size = binlog_get_compress_len(q_len);
+ query = (char *)my_safe_alloca(alloc_size);
+ if(query && !binlog_buf_compress(query_tmp, (char *)query, q_len_tmp, &q_len))
+ {
+ ret = Query_log_event::write();
+ }
+ my_safe_afree((void *)query, alloc_size);
+ query = query_tmp;
+ q_len = q_len_tmp;
+ return ret;
+}
+
/**
The simplest constructor that could possibly work. This is used for
creating static objects that have a special meaning and are invisible
@@ -3256,7 +4007,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
db(thd_arg->db), q_len((uint32) query_length),
thread_id(thd_arg->thread_id),
/* save the original thread id; we already know the server id */
- slave_proxy_id(thd_arg->variables.pseudo_thread_id),
+ slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id),
flags2_inited(1), sql_mode_inited(1), charset_inited(1),
sql_mode(thd_arg->variables.sql_mode),
auto_increment_increment(thd_arg->variables.auto_increment_increment),
@@ -3407,6 +4158,16 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
DBUG_PRINT("info",("Query_log_event has flags2: %lu sql_mode: %llu cache_tye: %d",
(ulong) flags2, sql_mode, cache_type));
}
+
+Query_compressed_log_event::Query_compressed_log_event(THD* thd_arg, const char* query_arg,
+ ulong query_length, bool using_trans,
+ bool direct, bool suppress_use, int errcode)
+ :Query_log_event(thd_arg, query_arg, query_length, using_trans, direct,
+ suppress_use, errcode),
+ query_buf(0)
+{
+
+}
#endif /* MYSQL_CLIENT */
@@ -3448,7 +4209,7 @@ get_str_len_and_pointer(const Log_event::Byte **src,
if (length > 0)
{
if (*src + length >= end)
- return *src + length - end + 1; // Number of bytes missing
+ return (int)(*src + length - end + 1); // Number of bytes missing
*dst= (char *)*src + 1; // Will be copied later
}
*len= length;
@@ -3549,7 +4310,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
data_len = event_len - (common_header_len + post_header_len);
buf+= common_header_len;
- slave_proxy_id= thread_id = uint4korr(buf + Q_THREAD_ID_OFFSET);
+ thread_id = slave_proxy_id = uint4korr(buf + Q_THREAD_ID_OFFSET);
exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET);
db_len = (uchar)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars
error_code = uint2korr(buf + Q_ERR_CODE_OFFSET);
@@ -3617,14 +4378,14 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
{
CHECK_SPACE(pos, end, 8);
sql_mode_inited= 1;
- sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong
+ sql_mode= (sql_mode_t) uint8korr(pos);
DBUG_PRINT("info",("In Query_log_event, read sql_mode: %llu", sql_mode));
pos+= 8;
break;
}
case Q_CATALOG_NZ_CODE:
- DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos: 0x%lx; end: 0x%lx",
- (ulong) pos, (ulong) end));
+ DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos:%p; end:%p",
+ pos, end));
if (get_str_len_and_pointer(&pos, &catalog, &catalog_len, end))
{
DBUG_PRINT("info", ("query= 0"));
@@ -3831,6 +4592,39 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
DBUG_VOID_RETURN;
}
+Query_compressed_log_event::Query_compressed_log_event(const char *buf,
+ uint event_len,
+ const Format_description_log_event
+ *description_event,
+ Log_event_type event_type)
+ :Query_log_event(buf, event_len, description_event, event_type),
+ query_buf(NULL)
+{
+ if(query)
+ {
+ uint32 un_len=binlog_get_uncompress_len(query);
+ if (!un_len)
+ {
+ query = 0;
+ return;
+ }
+
+ /* Reserve one byte for '\0' */
+ query_buf = (Log_event::Byte*)my_malloc(ALIGN_SIZE(un_len + 1),
+ MYF(MY_WME));
+ if(query_buf &&
+ !binlog_buf_uncompress(query, (char *)query_buf, q_len, &un_len))
+ {
+ query_buf[un_len] = 0;
+ query = (const char *)query_buf;
+ q_len = un_len;
+ }
+ else
+ {
+ query= 0;
+ }
+ }
+}
/*
Replace a binlog event read into a packet with a dummy event. Either a
@@ -4093,6 +4887,8 @@ void Query_log_event::print_query_header(IO_CACHE* file,
"@@session.unique_checks", &need_comma);
print_set_option(file, tmp, OPTION_NOT_AUTOCOMMIT, ~flags2,
"@@session.autocommit", &need_comma);
+ print_set_option(file, tmp, OPTION_NO_CHECK_CONSTRAINT_CHECKS, ~flags2,
+ "@@session.check_constraint_checks", &need_comma);
my_b_printf(file,"%s\n", print_event_info->delimiter);
print_event_info->flags2= flags2;
}
@@ -4115,8 +4911,9 @@ void Query_log_event::print_query_header(IO_CACHE* file,
(unlikely(print_event_info->sql_mode != sql_mode ||
!print_event_info->sql_mode_inited)))
{
- my_b_printf(file,"SET @@session.sql_mode=%lu%s\n",
- (ulong)sql_mode, print_event_info->delimiter);
+ char llbuff[22];
+ my_b_printf(file,"SET @@session.sql_mode=%s%s\n",
+ ullstr(sql_mode, llbuff), print_event_info->delimiter);
print_event_info->sql_mode= sql_mode;
print_event_info->sql_mode_inited= 1;
}
@@ -4186,7 +4983,7 @@ void Query_log_event::print_query_header(IO_CACHE* file,
void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
- Write_on_release_cache cache(&print_event_info->head_cache, file);
+ Write_on_release_cache cache(&print_event_info->head_cache, file, 0, this);
/**
reduce the size of io cache so that the write function is called
@@ -4195,8 +4992,24 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
DBUG_EXECUTE_IF ("simulate_file_write_error",
{(&cache)->write_pos= (&cache)->write_end- 500;});
print_query_header(&cache, print_event_info);
- my_b_write(&cache, (uchar*) query, q_len);
- my_b_printf(&cache, "\n%s\n", print_event_info->delimiter);
+ if (!is_flashback)
+ {
+ my_b_write(&cache, (uchar*) query, q_len);
+ my_b_printf(&cache, "\n%s\n", print_event_info->delimiter);
+ }
+ else // is_flashback == 1
+ {
+ if (strcmp("BEGIN", query) == 0)
+ {
+ my_b_write(&cache, (uchar*) "COMMIT", 6);
+ my_b_printf(&cache, "\n%s\n", print_event_info->delimiter);
+ }
+ else if (strcmp("COMMIT", query) == 0)
+ {
+ my_b_write(&cache, (uchar*) "BEGIN", 5);
+ my_b_printf(&cache, "\n%s\n", print_event_info->delimiter);
+ }
+ }
}
#endif /* MYSQL_CLIENT */
@@ -4286,9 +5099,8 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
*/
thd->catalog= catalog_len ? (char *) catalog : (char *)"";
- int len_error;
- size_t valid_len= system_charset_info->cset->well_formed_len(system_charset_info,
- db, db + db_len, db_len, &len_error);
+ size_t valid_len= Well_formed_prefix(system_charset_info,
+ db, db_len, NAME_LEN).length();
if (valid_len != db_len)
{
@@ -4362,8 +5174,8 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
*/
if (sql_mode_inited)
thd->variables.sql_mode=
- (ulong) ((thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) |
- (sql_mode & ~(ulong) MODE_NO_DIR_IN_CREATE));
+ (sql_mode_t) ((thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) |
+ (sql_mode & ~(ulong) MODE_NO_DIR_IN_CREATE));
if (charset_inited)
{
rpl_sql_thread_info *sql_info= thd->system_thread_info.rpl_sql_info;
@@ -4529,7 +5341,8 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
}
thd->enable_slow_log= true;
- mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ FALSE, FALSE);
/* Finalize server status flags after executing a statement. */
thd->update_server_status();
log_slow_statement(thd);
@@ -4613,7 +5426,7 @@ compare_errors:
"Error on master: message (format)='%s' error code=%d ; "
"Error on slave: actual message='%s', error code=%d. "
"Default database: '%s'. Query: '%s'",
- ER_SAFE_THD(thd, expected_error),
+ ER_THD(thd, expected_error),
expected_error,
actual_error ? thd->get_stmt_da()->message() : "no error",
actual_error,
@@ -5141,6 +5954,15 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
post_header_len[GTID_LIST_EVENT-1]= GTID_LIST_HEADER_LEN;
post_header_len[START_ENCRYPTION_EVENT-1]= START_ENCRYPTION_HEADER_LEN;
+ //compressed event
+ post_header_len[QUERY_COMPRESSED_EVENT-1]= QUERY_HEADER_LEN;
+ post_header_len[WRITE_ROWS_COMPRESSED_EVENT-1]= ROWS_HEADER_LEN_V2;
+ post_header_len[UPDATE_ROWS_COMPRESSED_EVENT-1]= ROWS_HEADER_LEN_V2;
+ post_header_len[DELETE_ROWS_COMPRESSED_EVENT-1]= ROWS_HEADER_LEN_V2;
+ post_header_len[WRITE_ROWS_COMPRESSED_EVENT_V1-1]= ROWS_HEADER_LEN_V1;
+ post_header_len[UPDATE_ROWS_COMPRESSED_EVENT_V1-1]= ROWS_HEADER_LEN_V1;
+ post_header_len[DELETE_ROWS_COMPRESSED_EVENT_V1-1]= ROWS_HEADER_LEN_V1;
+
// Sanity-check that all post header lengths are initialized.
int i;
for (i=0; i<number_of_event_types; i++)
@@ -5767,7 +6589,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex,
thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0,
using_trans),
thread_id(thd_arg->thread_id),
- slave_proxy_id(thd_arg->variables.pseudo_thread_id),
+ slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id),
num_fields(0),fields(0),
field_lens(0),field_block_len(0),
table_name(table_name_arg ? table_name_arg : ""),
@@ -5891,7 +6713,7 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len,
char* buf_end = (char*)buf + event_len;
/* this is the beginning of the post-header */
const char* data_head = buf + description_event->common_header_len;
- slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET);
+ thread_id= slave_proxy_id= uint4korr(data_head + L_THREAD_ID_OFFSET);
exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET);
skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET);
table_name_len = (uint)data_head[L_TBL_LEN_OFFSET];
@@ -6263,7 +7085,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
update it inside mysql_load().
*/
List<Item> tmp_list;
- if (open_temporary_tables(thd, &tables) ||
+ if (thd->open_temporary_tables(&tables) ||
mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
handle_dup, ignore, net != 0))
thd->is_slave_error= 1;
@@ -6490,6 +7312,9 @@ bool Rotate_log_event::write()
in a A -> B -> A setup.
The NOTES below is a wrong comment which will disappear when 4.1 is merged.
+ This must only be called from the Slave SQL thread, since it calls
+ Relay_log_info::flush().
+
@retval
0 ok
1 error
@@ -6545,7 +7370,7 @@ int Rotate_log_event::do_update_pos(rpl_group_info *rgi)
(ulong) rli->group_master_log_pos));
mysql_mutex_unlock(&rli->data_lock);
rpl_global_gtid_slave_state->record_and_update_gtid(thd, rgi);
- error= flush_relay_log_info(rli);
+ error= rli->flush();
/*
Reset thd->variables.option_bits and sql_mode etc, because this could
@@ -6648,7 +7473,7 @@ Binlog_checkpoint_log_event::Binlog_checkpoint_log_event(
uint8 header_size= description_event->common_header_len;
uint8 post_header_len=
description_event->post_header_len[BINLOG_CHECKPOINT_EVENT-1];
- if (event_len < header_size + post_header_len ||
+ if (event_len < (uint) header_size + (uint) post_header_len ||
post_header_len < BINLOG_CHECKPOINT_HEADER_LEN)
return;
buf+= header_size;
@@ -6686,7 +7511,7 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
{
uint8 header_size= description_event->common_header_len;
uint8 post_header_len= description_event->post_header_len[GTID_EVENT-1];
- if (event_len < header_size + post_header_len ||
+ if (event_len < (uint) header_size + (uint) post_header_len ||
post_header_len < GTID_HEADER_LEN)
return;
@@ -6943,11 +7768,11 @@ void
Gtid_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
{
Write_on_release_cache cache(&print_event_info->head_cache, file,
- Write_on_release_cache::FLUSH_F);
+ Write_on_release_cache::FLUSH_F, this);
char buf[21];
char buf2[21];
- if (!print_event_info->short_form)
+ if (!print_event_info->short_form && !is_flashback)
{
print_header(&cache, print_event_info, FALSE);
longlong10_to_str(seq_no, buf, 10);
@@ -6993,11 +7818,12 @@ Gtid_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
print_event_info->server_id_printed= true;
}
- my_b_printf(&cache, "/*!100001 SET @@session.gtid_seq_no=%s*/%s\n",
- buf, print_event_info->delimiter);
+ if (!is_flashback)
+ my_b_printf(&cache, "/*!100001 SET @@session.gtid_seq_no=%s*/%s\n",
+ buf, print_event_info->delimiter);
}
if (!(flags2 & FL_STANDALONE))
- my_b_printf(&cache, "BEGIN\n%s\n", print_event_info->delimiter);
+ my_b_printf(&cache, is_flashback ? "COMMIT\n%s\n" : "BEGIN\n%s\n", print_event_info->delimiter);
}
#endif /* MYSQL_SERVER */
@@ -7013,7 +7839,7 @@ Gtid_list_log_event::Gtid_list_log_event(const char *buf, uint event_len,
uint32 val;
uint8 header_size= description_event->common_header_len;
uint8 post_header_len= description_event->post_header_len[GTID_LIST_EVENT-1];
- if (event_len < header_size + post_header_len ||
+ if (event_len < (uint) header_size + (uint) post_header_len ||
post_header_len < GTID_LIST_HEADER_LEN)
return;
@@ -7640,7 +8466,7 @@ bool Xid_log_event::write()
void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
Write_on_release_cache cache(&print_event_info->head_cache, file,
- Write_on_release_cache::FLUSH_F);
+ Write_on_release_cache::FLUSH_F, this);
if (!print_event_info->short_form)
{
@@ -7650,7 +8476,7 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
print_header(&cache, print_event_info, FALSE);
my_b_printf(&cache, "\tXid = %s\n", buf);
}
- my_b_printf(&cache, "COMMIT%s\n", print_event_info->delimiter);
+ my_b_printf(&cache, is_flashback ? "BEGIN%s\n" : "COMMIT%s\n", print_event_info->delimiter);
}
#endif /* MYSQL_CLIENT */
@@ -7941,7 +8767,6 @@ User_var_log_event(const char* buf, uint event_len,
val_len= uint4korr(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE +
UV_CHARSET_NUMBER_SIZE);
-
/**
We need to check if this is from an old server
that did not pack information for flags.
@@ -7953,7 +8778,7 @@ User_var_log_event(const char* buf, uint event_len,
Old events will not have this extra byte, thence,
we keep the flags set to UNDEF_F.
*/
- uint bytes_read= ((val + val_len) - buf_start);
+ size_t bytes_read= (val + val_len) - buf_start;
if ((data_written - bytes_read) > 0)
{
flags= (uint) *(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE +
@@ -8116,7 +8941,7 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
return;
str_to_hex(hex_str, val, val_len);
/*
- For proper behaviour when mysqlbinlog|mysql, we need to explicitely
+ For proper behaviour when mysqlbinlog|mysql, we need to explicitly
specify the variable's collation. It will however cause problems when
people want to mysqlbinlog|mysql into another server not supporting the
character set. But there's not much to do about this and it's unlikely.
@@ -8316,7 +9141,7 @@ void Unknown_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info
void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
Write_on_release_cache cache(&print_event_info->head_cache, file,
- Write_on_release_cache::FLUSH_F);
+ Write_on_release_cache::FLUSH_F, this);
if (print_event_info->short_form)
return;
@@ -8338,6 +9163,9 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
were we must do this cleaning is in
Start_log_event_v3::do_apply_event(), not here. Because if we come
here, the master was sane.
+
+ This must only be called from the Slave SQL thread, since it calls
+ Relay_log_info::flush().
*/
int Stop_log_event::do_update_pos(rpl_group_info *rgi)
@@ -8358,7 +9186,7 @@ int Stop_log_event::do_update_pos(rpl_group_info *rgi)
{
rpl_global_gtid_slave_state->record_and_update_gtid(thd, rgi);
rli->inc_group_relay_log_pos(0, rgi);
- if (flush_relay_log_info(rli))
+ if (rli->flush())
error= 1;
}
DBUG_RETURN(error);
@@ -9322,7 +10150,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi)
p= strmake(p, STRING_WITH_LEN(" INTO "));
p= strmake(p, query+fn_pos_end, q_len-fn_pos_end);
- error= Query_log_event::do_apply_event(rgi, buf, p-buf);
+ error= Query_log_event::do_apply_event(rgi, buf, (uint32)(p-buf));
/* Forging file name for deletion in same buffer */
*fname_end= 0;
@@ -9468,9 +10296,11 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
(!tbl_arg && !cols && tid == ~0UL));
if (thd_arg->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS)
- set_flags(NO_FOREIGN_KEY_CHECKS_F);
+ set_flags(NO_FOREIGN_KEY_CHECKS_F);
if (thd_arg->variables.option_bits & OPTION_RELAXED_UNIQUE_CHECKS)
- set_flags(RELAXED_UNIQUE_CHECKS_F);
+ set_flags(RELAXED_UNIQUE_CHECKS_F);
+ if (thd_arg->variables.option_bits & OPTION_NO_CHECK_CONSTRAINT_CHECKS)
+ set_flags(NO_CHECK_CONSTRAINT_CHECKS_F);
/* if my_bitmap_init fails, caught in is_valid() */
if (likely(!my_bitmap_init(&m_cols,
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
@@ -9510,7 +10340,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
{
DBUG_ENTER("Rows_log_event::Rows_log_event(const char*,...)");
uint8 const common_header_len= description_event->common_header_len;
- Log_event_type event_type= (Log_event_type) buf[EVENT_TYPE_OFFSET];
+ Log_event_type event_type= (Log_event_type)(uchar)buf[EVENT_TYPE_OFFSET];
m_type= event_type;
uint8 const post_header_len= description_event->post_header_len[event_type-1];
@@ -9534,6 +10364,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
post_start+= RW_FLAGS_OFFSET;
}
+ m_flags_pos= post_start - buf;
m_flags= uint2korr(post_start);
post_start+= 2;
@@ -9617,8 +10448,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */
- if ((event_type == UPDATE_ROWS_EVENT) ||
- (event_type == UPDATE_ROWS_EVENT_V1))
+ if (LOG_EVENT_IS_UPDATE_ROW(event_type))
{
DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
@@ -9651,7 +10481,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
DBUG_VOID_RETURN;
}
size_t const data_size= event_len - read_size;
- DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
+ DBUG_PRINT("info",("m_table_id: %llu m_flags: %d m_width: %lu data_size: %lu",
m_table_id, m_flags, m_width, (ulong) data_size));
m_rows_buf= (uchar*) my_malloc(data_size, MYF(MY_WME));
@@ -9663,6 +10493,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
m_rows_end= m_rows_buf + data_size;
m_rows_cur= m_rows_end;
memcpy(m_rows_buf, ptr_rows_data, data_size);
+ m_rows_before_size= ptr_rows_data - (const uchar *) buf; // Get the size that before SET part
}
else
m_cols.bitmap= 0; // to not free it
@@ -9670,6 +10501,35 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
DBUG_VOID_RETURN;
}
+void Rows_log_event::uncompress_buf()
+{
+ uint32 un_len = binlog_get_uncompress_len((char *)m_rows_buf);
+ if (!un_len)
+ return;
+
+ uchar *new_buf= (uchar*) my_malloc(ALIGN_SIZE(un_len), MYF(MY_WME));
+ if (new_buf)
+ {
+ if(!binlog_buf_uncompress((char *)m_rows_buf, (char *)new_buf,
+ (uint32)(m_rows_cur - m_rows_buf), &un_len))
+ {
+ my_free(m_rows_buf);
+ m_rows_buf = new_buf;
+#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
+ m_curr_row= m_rows_buf;
+#endif
+ m_rows_end= m_rows_buf + un_len;
+ m_rows_cur= m_rows_end;
+ return;
+ }
+ else
+ {
+ my_free(new_buf);
+ }
+ }
+ m_cols.bitmap= 0; // catch it in is_valid
+}
+
Rows_log_event::~Rows_log_event()
{
if (m_cols.bitmap == m_bitbuf) // no my_malloc happened
@@ -9687,12 +10547,13 @@ int Rows_log_event::get_data_size()
uchar *end= net_store_length(buf, m_width);
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- return 6 + no_bytes_in_map(&m_cols) + (end - buf) +
+ return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) +
(general_type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) +
- (m_rows_cur - m_rows_buf););
+ m_rows_cur - m_rows_buf););
int data_size= 0;
- bool is_v2_event= get_type_code() > DELETE_ROWS_EVENT_V1;
+ Log_event_type type = get_type_code();
+ bool is_v2_event= LOG_EVENT_IS_ROW_V2(type);
if (is_v2_event)
{
data_size= ROWS_HEADER_LEN_V2 +
@@ -9724,7 +10585,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
would save binlog space. TODO
*/
DBUG_ENTER("Rows_log_event::do_add_row_data");
- DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
+ DBUG_PRINT("enter", ("row_data:%p length: %lu", row_data,
(ulong) length));
/*
@@ -9754,7 +10615,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
if (static_cast<size_t>(m_rows_end - m_rows_cur) <= length)
{
size_t const block_size= 1024;
- ulong cur_size= m_rows_cur - m_rows_buf;
+ size_t cur_size= m_rows_cur - m_rows_buf;
DBUG_EXECUTE_IF("simulate_too_big_row_case1",
cur_size= UINT_MAX32 - (block_size * 10);
length= UINT_MAX32 - (block_size * 10););
@@ -9767,21 +10628,21 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
DBUG_EXECUTE_IF("simulate_too_big_row_case4",
cur_size= UINT_MAX32 - (block_size * 10);
length= (block_size * 10) - block_size + 1;);
- ulong remaining_space= UINT_MAX32 - cur_size;
+ size_t remaining_space= UINT_MAX32 - cur_size;
/* Check that the new data fits within remaining space and we can add
block_size without wrapping.
*/
- if (length > remaining_space ||
+ if (cur_size > UINT_MAX32 || length > remaining_space ||
((length + block_size) > remaining_space))
{
sql_print_error("The row data is greater than 4GB, which is too big to "
"write to the binary log.");
DBUG_RETURN(ER_BINLOG_ROW_LOGGING_FAILED);
}
- ulong const new_alloc=
+ size_t const new_alloc=
block_size * ((cur_size + length + block_size - 1) / block_size);
- uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, (uint) new_alloc,
+ uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, new_alloc,
MYF(MY_ALLOW_ZERO_PTR|MY_WME));
if (unlikely(!new_buf))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -9833,12 +10694,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
DBUG_ENTER("Rows_log_event::do_apply_event(Relay_log_info*)");
int error= 0;
/*
- If m_table_id == ~0UL, then we have a dummy event that does not
+ If m_table_id == ~0ULL, then we have a dummy event that does not
contain any data. In that case, we just remove all tables in the
tables_to_lock list, close the thread tables, and return with
success.
*/
- if (m_table_id == ~0UL)
+ if (m_table_id == ~0ULL)
{
/*
This one is supposed to be set: just an extra check so that
@@ -9907,6 +10768,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
else
thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
+
+ if (get_flags(NO_CHECK_CONSTRAINT_CHECKS_F))
+ thd->variables.option_bits|= OPTION_NO_CHECK_CONSTRAINT_CHECKS;
+ else
+ thd->variables.option_bits&= ~OPTION_NO_CHECK_CONSTRAINT_CHECKS;
+
/* A small test to verify that objects have consistent types */
DBUG_ASSERT(sizeof(thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
@@ -10098,8 +10965,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
table= m_table= rgi->m_table_map.get_table(m_table_id);
- DBUG_PRINT("debug", ("m_table: 0x%lx, m_table_id: %lu%s",
- (ulong) m_table, m_table_id,
+ DBUG_PRINT("debug", ("m_table:%p, m_table_id: %llu%s",
+ m_table, m_table_id,
table && master_had_triggers ?
" (master had triggers)" : ""));
if (table)
@@ -10163,7 +11030,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
extra columns on the slave. In that case, do not force
MODE_NO_AUTO_VALUE_ON_ZERO.
*/
- ulonglong saved_sql_mode= thd->variables.sql_mode;
+ sql_mode_t saved_sql_mode= thd->variables.sql_mode;
if (!is_auto_inc_in_extra_columns())
thd->variables.sql_mode= MODE_NO_AUTO_VALUE_ON_ZERO;
@@ -10219,8 +11086,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
m_curr_row_end.
*/
- DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu",
- (ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end));
+ DBUG_PRINT("info", ("curr_row: %p; curr_row_end: %p; rows_end:%p",
+ m_curr_row, m_curr_row_end, m_rows_end));
if (!m_curr_row_end && !error)
error= unpack_current_row(rgi);
@@ -10456,14 +11323,14 @@ Rows_log_event::do_update_pos(rpl_group_info *rgi)
bool Rows_log_event::write_data_header()
{
uchar buf[ROWS_HEADER_LEN_V2]; // No need to init the buffer
- DBUG_ASSERT(m_table_id != ~0UL);
+ DBUG_ASSERT(m_table_id != ~0ULL);
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
{
int4store(buf + 0, m_table_id);
int2store(buf + 4, m_flags);
return (write_data(buf, 6));
});
- int6store(buf + RW_MAPID_OFFSET, (ulonglong)m_table_id);
+ int6store(buf + RW_MAPID_OFFSET, m_table_id);
int2store(buf + RW_FLAGS_OFFSET, m_flags);
return write_data(buf, ROWS_HEADER_LEN);
}
@@ -10501,6 +11368,27 @@ bool Rows_log_event::write_data_body()
return res;
}
+
+bool Rows_log_event::write_compressed()
+{
+ uchar *m_rows_buf_tmp = m_rows_buf;
+ uchar *m_rows_cur_tmp = m_rows_cur;
+ bool ret = true;
+ uint32 comlen, alloc_size;
+ comlen= alloc_size= binlog_get_compress_len((uint32)(m_rows_cur_tmp - m_rows_buf_tmp));
+ m_rows_buf = (uchar *)my_safe_alloca(alloc_size);
+ if(m_rows_buf &&
+ !binlog_buf_compress((const char *)m_rows_buf_tmp, (char *)m_rows_buf,
+ (uint32)(m_rows_cur_tmp - m_rows_buf_tmp), &comlen))
+ {
+ m_rows_cur= comlen + m_rows_buf;
+ ret= Log_event::write();
+ }
+ my_safe_afree(m_rows_buf, alloc_size);
+ m_rows_buf= m_rows_buf_tmp;
+ m_rows_cur= m_rows_cur_tmp;
+ return ret;
+}
#endif
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
@@ -10510,12 +11398,28 @@ void Rows_log_event::pack_info(Protocol *protocol)
char const *const flagstr=
get_flags(STMT_END_F) ? " flags: STMT_END_F" : "";
size_t bytes= my_snprintf(buf, sizeof(buf),
- "table_id: %lu%s", m_table_id, flagstr);
+ "table_id: %llu%s", m_table_id, flagstr);
protocol->store(buf, bytes, &my_charset_bin);
}
#endif
#ifdef MYSQL_CLIENT
+class my_String : public String
+{
+public:
+ my_String() : String(), error(false) {};
+ bool error;
+
+ bool append(const LEX_STRING *ls)
+ {
+ return error= error || String::append(ls);
+ }
+ bool append(IO_CACHE* file, uint32 arg_length)
+ {
+ return error= error || String::append(file, arg_length);
+ }
+};
+
/**
Print an event "body" cache to @c file possibly in two fragments.
Each fragement is optionally per @c do_wrap to produce an SQL statement.
@@ -10528,25 +11432,40 @@ void Rows_log_event::pack_info(Protocol *protocol)
The function signals on any error through setting @c body->error to -1.
*/
-void copy_cache_to_file_wrapped(FILE *file,
- IO_CACHE *body,
- bool do_wrap,
- const char *delimiter)
+void copy_cache_to_string_wrapped(IO_CACHE *cache,
+ LEX_STRING *to,
+ bool do_wrap,
+ const char *delimiter,
+ bool is_verbose)
{
const char str_binlog[]= "\nBINLOG '\n";
const char fmt_delim[]= "'%s\n";
const char fmt_n_delim[]= "\n'%s";
- const my_off_t cache_size= my_b_tell(body);
+ const char fmt_frag[]= "\nSET @binlog_fragment_%d ='\n";
+ const my_off_t cache_size= my_b_tell(cache);
+ my_String ret;
+ /*
+ substring to hold parts of encoded possibly defragramented event
+ whose size is roughly estimated from the top.
+ */
+ char tmp[sizeof(str_binlog) + 2*(sizeof(fmt_frag) + 2 /* %d */) +
+ sizeof(fmt_delim) + sizeof(fmt_n_delim) +
+ PRINT_EVENT_INFO::max_delimiter_size];
+ LEX_STRING str_tmp= { tmp, 0 };
- if (reinit_io_cache(body, READ_CACHE, 0L, FALSE, FALSE))
+ if (reinit_io_cache(cache, READ_CACHE, 0L, FALSE, FALSE))
{
- body->error= -1;
+ cache->error= -1;
goto end;
}
if (!do_wrap)
{
- my_b_copy_to_file(body, file, SIZE_T_MAX);
+ if (ret.append(cache, (uint32) cache->end_of_file))
+ {
+ cache->error= -1;
+ goto end;
+ }
}
else if (4 + sizeof(str_binlog) + cache_size + sizeof(fmt_delim) >
opt_binlog_rows_event_max_encoded_size)
@@ -10562,39 +11481,43 @@ void copy_cache_to_file_wrapped(FILE *file,
limit. The estimate includes the maximum packet header
contribution of non-compressed packet.
*/
- const char fmt_frag[]= "\nSET @binlog_fragment_%d ='\n";
+ str_tmp.length= sprintf(str_tmp.str, fmt_frag, 0);
+ ret.append(&str_tmp);
+ ret.append(cache, (uint32) cache_size/2 + 1);
+ str_tmp.length= sprintf(str_tmp.str, fmt_n_delim, delimiter);
+ ret.append(&str_tmp);
- my_fprintf(file, fmt_frag, 0);
- if (my_b_copy_to_file(body, file, cache_size/2 + 1))
+ str_tmp.length= sprintf(str_tmp.str, fmt_frag, 1);
+ ret.append(&str_tmp);
+ ret.append(cache, uint32(cache->end_of_file - (cache_size/2 + 1)));
+ if (!is_verbose)
{
- body->error= -1;
- goto end;
+ str_tmp.length= sprintf(str_tmp.str, fmt_delim, delimiter);
+ ret.append(&str_tmp);
}
- my_fprintf(file, fmt_n_delim, delimiter);
-
- my_fprintf(file, fmt_frag, 1);
- if (my_b_copy_to_file(body, file, SIZE_T_MAX))
- {
- body->error= -1;
- goto end;
- }
- my_fprintf(file, fmt_delim, delimiter);
-
- my_fprintf(file, "BINLOG @binlog_fragment_0, @binlog_fragment_1%s\n",
- delimiter);
+ str_tmp.length= sprintf(str_tmp.str, "BINLOG @binlog_fragment_0, @binlog_fragment_1%s\n",
+ delimiter);
+ ret.append(&str_tmp);
}
else
{
- my_fprintf(file, str_binlog);
- if (my_b_copy_to_file(body, file, SIZE_T_MAX))
+ str_tmp.length= sprintf(str_tmp.str, str_binlog);
+ ret.append(&str_tmp);
+ ret.append(cache, (uint32) cache->end_of_file);
+ if (!is_verbose)
{
- body->error= -1;
- goto end;
+ str_tmp.length= sprintf(str_tmp.str, fmt_delim, delimiter);
+ ret.append(&str_tmp);
}
- my_fprintf(file, fmt_delim, delimiter);
}
- reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE);
+ to->length= ret.length();
+ to->str= ret.release();
+
+ reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE);
+
+ if (ret.error)
+ cache->error= -1;
end:
return;
}
@@ -10634,29 +11557,47 @@ void Rows_log_event::print_helper(FILE *file,
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ IO_CACHE *const sql= &print_event_info->review_sql_cache;
+#endif
bool do_print_encoded=
print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
!print_event_info->short_form;
if (!print_event_info->short_form)
{
+
bool const last_stmt_event= get_flags(STMT_END_F);
+ char llbuff[22];
+
print_header(head, print_event_info, !last_stmt_event);
- my_b_printf(head, "\t%s: table id %lu%s\n",
- name, m_table_id,
+ my_b_printf(head, "\t%s: table id %s%s\n",
+ name, ullstr(m_table_id, llbuff),
last_stmt_event ? " flags: STMT_END_F" : "");
print_base64(body, print_event_info, do_print_encoded);
}
if (get_flags(STMT_END_F))
{
- if (copy_event_cache_to_file_and_reinit(head, file))
+ LEX_STRING tmp_str;
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ copy_event_cache_to_string_and_reinit(sql, &tmp_str);
+ output_buf.append(&tmp_str);
+ my_free(tmp_str.str);
+#endif
+ if (copy_event_cache_to_string_and_reinit(head, &tmp_str))
{
head->error= -1;
return;
}
- copy_cache_to_file_wrapped(file, body, do_print_encoded,
- print_event_info->delimiter);
+ output_buf.append(&tmp_str);
+ my_free(tmp_str.str);
+
+ copy_cache_to_string_wrapped(body, &tmp_str, do_print_encoded,
+ print_event_info->delimiter,
+ print_event_info->verbose);
+ output_buf.append(&tmp_str);
+ my_free(tmp_str.str);
}
}
#endif
@@ -10671,7 +11612,7 @@ Annotate_rows_log_event::Annotate_rows_log_event(THD *thd,
bool direct)
: Log_event(thd, 0, using_trans),
m_save_thd_query_txt(0),
- m_save_thd_query_len(0)
+ m_save_thd_query_len(0), m_saved_thd_query(false)
{
m_query_txt= thd->query();
m_query_len= thd->query_length();
@@ -10685,7 +11626,7 @@ Annotate_rows_log_event::Annotate_rows_log_event(const char *buf,
const Format_description_log_event *desc)
: Log_event(buf, desc),
m_save_thd_query_txt(0),
- m_save_thd_query_len(0)
+ m_save_thd_query_len(0), m_saved_thd_query(false)
{
m_query_len= event_len - desc->common_header_len;
m_query_txt= (char*) buf + desc->common_header_len;
@@ -10694,7 +11635,7 @@ Annotate_rows_log_event::Annotate_rows_log_event(const char *buf,
Annotate_rows_log_event::~Annotate_rows_log_event()
{
#ifndef MYSQL_CLIENT
- if (m_save_thd_query_txt)
+ if (m_saved_thd_query)
thd->set_query(m_save_thd_query_txt, m_save_thd_query_len);
#endif
}
@@ -10778,8 +11719,10 @@ void Annotate_rows_log_event::print(FILE *file, PRINT_EVENT_INFO *pinfo)
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int Annotate_rows_log_event::do_apply_event(rpl_group_info *rgi)
{
+ rgi->free_annotate_event();
m_save_thd_query_txt= thd->query();
m_save_thd_query_len= thd->query_length();
+ m_saved_thd_query= true;
thd->set_query(m_query_txt, m_query_len);
return 0;
}
@@ -10902,7 +11845,7 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
uchar cbuf[MAX_INT_WIDTH];
uchar *cbuf_end;
DBUG_ENTER("Table_map_log_event::Table_map_log_event(TABLE)");
- DBUG_ASSERT(m_table_id != ~0UL);
+ DBUG_ASSERT(m_table_id != ~0ULL);
/*
In TABLE_SHARE, "db" and "table_name" are 0-terminated (see this comment in
table.cc / alloc_table_share():
@@ -10987,7 +11930,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
#endif
m_dbnam(NULL), m_dblen(0), m_tblnam(NULL), m_tbllen(0),
m_colcnt(0), m_coltype(0),
- m_memory(NULL), m_table_id(ULONG_MAX), m_flags(0),
+ m_memory(NULL), m_table_id(ULONGLONG_MAX), m_flags(0),
m_data_size(0), m_field_metadata(0), m_field_metadata_size(0),
m_null_bits(0), m_meta_memory(NULL)
{
@@ -11024,7 +11967,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
post_start+= TM_FLAGS_OFFSET;
}
- DBUG_ASSERT(m_table_id != ~0UL);
+ DBUG_ASSERT(m_table_id != ~0ULL);
m_flags= uint2korr(post_start);
@@ -11152,7 +12095,8 @@ int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len,
cnt += header_len;
// Write new db name length and new name
- *ptr++ = new_len;
+ DBUG_ASSERT(new_len < 0xff);
+ *ptr++ = (char)new_len;
memcpy(ptr, new_db, new_len + 1);
ptr += new_len + 1;
cnt += m_dblen + 2;
@@ -11414,9 +12358,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi)
For the cases in which a 'BINLOG' statement is set to
execute in a user session
*/
- my_printf_error(ER_SLAVE_FATAL_ERROR,
- ER_THD(thd, ER_SLAVE_FATAL_ERROR),
- MYF(0), buf);
+ my_error(ER_SLAVE_FATAL_ERROR, MYF(0), buf);
}
my_free(memory);
@@ -11446,7 +12388,7 @@ int Table_map_log_event::do_update_pos(rpl_group_info *rgi)
#ifndef MYSQL_CLIENT
bool Table_map_log_event::write_data_header()
{
- DBUG_ASSERT(m_table_id != ~0UL);
+ DBUG_ASSERT(m_table_id != ~0ULL);
uchar buf[TABLE_MAP_HEADER_LEN];
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
{
@@ -11454,7 +12396,7 @@ bool Table_map_log_event::write_data_header()
int2store(buf + 4, m_flags);
return (write_data(buf, 6));
});
- int6store(buf + TM_MAPID_OFFSET, (ulonglong)m_table_id);
+ int6store(buf + TM_MAPID_OFFSET, m_table_id);
int2store(buf + TM_FLAGS_OFFSET, m_flags);
return write_data(buf, TABLE_MAP_HEADER_LEN);
}
@@ -11504,7 +12446,7 @@ void Table_map_log_event::pack_info(Protocol *protocol)
{
char buf[256];
size_t bytes= my_snprintf(buf, sizeof(buf),
- "table_id: %lu (%s.%s)",
+ "table_id: %llu (%s.%s)",
m_table_id, m_dbnam, m_tblnam);
protocol->store(buf, bytes, &my_charset_bin);
}
@@ -11519,10 +12461,12 @@ void Table_map_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
{
if (!print_event_info->short_form)
{
+ char llbuff[22];
+
print_header(&print_event_info->head_cache, print_event_info, TRUE);
my_b_printf(&print_event_info->head_cache,
- "\tTable_map: %`s.%`s mapped to number %lu%s\n",
- m_dbnam, m_tblnam, m_table_id,
+ "\tTable_map: %`s.%`s mapped to number %s%s\n",
+ m_dbnam, m_tblnam, ullstr(m_table_id, llbuff),
((m_flags & TM_BIT_HAS_TRIGGERS_F) ?
" (has triggers)" : ""));
print_base64(&print_event_info->body_cache, print_event_info,
@@ -11548,6 +12492,21 @@ Write_rows_log_event::Write_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
is_transactional, WRITE_ROWS_EVENT_V1)
{
}
+
+Write_rows_compressed_log_event::Write_rows_compressed_log_event(
+ THD *thd_arg,
+ TABLE *tbl_arg,
+ ulong tid_arg,
+ bool is_transactional)
+ : Write_rows_log_event(thd_arg, tbl_arg, tid_arg, is_transactional)
+{
+ m_type = WRITE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+bool Write_rows_compressed_log_event::write()
+{
+ return Rows_log_event::write_compressed();
+}
#endif
/*
@@ -11560,6 +12519,15 @@ Write_rows_log_event::Write_rows_log_event(const char *buf, uint event_len,
: Rows_log_event(buf, event_len, description_event)
{
}
+
+Write_rows_compressed_log_event::Write_rows_compressed_log_event(
+ const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+: Write_rows_log_event(buf, event_len, description_event)
+{
+ uncompress_buf();
+}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
@@ -11812,7 +12780,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
the size of the first row and use that value to initialize
storage engine for bulk insertion */
DBUG_ASSERT(!(m_curr_row > m_curr_row_end));
- ulong estimated_rows= 0;
+ ha_rows estimated_rows= 0;
if (m_curr_row < m_curr_row_end)
estimated_rows= (m_rows_end - m_curr_row) / (m_curr_row_end - m_curr_row);
else if (m_curr_row == m_curr_row_end)
@@ -12049,7 +13017,29 @@ void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info)
{
DBUG_EXECUTE_IF("simulate_cache_read_error",
{DBUG_SET("+d,simulate_my_b_fill_error");});
- Rows_log_event::print_helper(file, print_event_info, "Write_rows");
+ Rows_log_event::print_helper(file, print_event_info, is_flashback ? "Delete_rows" : "Write_rows");
+}
+
+void Write_rows_compressed_log_event::print(FILE *file,
+ PRINT_EVENT_INFO* print_event_info)
+{
+ char *new_buf;
+ ulong len;
+ bool is_malloc = false;
+ if(!row_log_event_uncompress(glob_description_event,
+ checksum_alg == BINLOG_CHECKSUM_ALG_CRC32,
+ temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len))
+ {
+ free_temp_buf();
+ register_temp_buf(new_buf, true);
+ Rows_log_event::print_helper(file, print_event_info,
+ "Write_compressed_rows");
+ }
+ else
+ {
+ my_b_printf(&print_event_info->head_cache,
+ "ERROR: uncompress write_compressed_rows failed\n");
+ }
}
#endif
@@ -12236,7 +13226,7 @@ void issue_long_find_row_warning(Log_event_type type,
if (delta > LONG_FIND_ROW_THRESHOLD)
{
rgi->set_long_find_row_note_printed();
- const char* evt_type= type == DELETE_ROWS_EVENT ? " DELETE" : "n UPDATE";
+ const char* evt_type= LOG_EVENT_IS_DELETE_ROW(type) ? " DELETE" : "n UPDATE";
const char* scan_type= is_index_scan ? "scanning an index" : "scanning the table";
sql_print_information("The slave is applying a ROW event on behalf of a%s statement "
@@ -12577,6 +13567,20 @@ Delete_rows_log_event::Delete_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
DELETE_ROWS_EVENT_V1)
{
}
+
+Delete_rows_compressed_log_event::Delete_rows_compressed_log_event(
+ THD *thd_arg, TABLE *tbl_arg,
+ ulong tid_arg,
+ bool is_transactional)
+ : Delete_rows_log_event(thd_arg, tbl_arg, tid_arg, is_transactional)
+{
+ m_type= DELETE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+bool Delete_rows_compressed_log_event::write()
+{
+ return Rows_log_event::write_compressed();
+}
#endif /* #if !defined(MYSQL_CLIENT) */
/*
@@ -12589,6 +13593,15 @@ Delete_rows_log_event::Delete_rows_log_event(const char *buf, uint event_len,
: Rows_log_event(buf, event_len, description_event)
{
}
+
+Delete_rows_compressed_log_event::Delete_rows_compressed_log_event(
+ const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+ : Delete_rows_log_event(buf, event_len, description_event)
+{
+ uncompress_buf();
+}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
@@ -12684,7 +13697,29 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
void Delete_rows_log_event::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
- Rows_log_event::print_helper(file, print_event_info, "Delete_rows");
+ Rows_log_event::print_helper(file, print_event_info, is_flashback ? "Write_rows" : "Delete_rows");
+}
+
+void Delete_rows_compressed_log_event::print(FILE *file,
+ PRINT_EVENT_INFO* print_event_info)
+{
+ char *new_buf;
+ ulong len;
+ bool is_malloc = false;
+ if(!row_log_event_uncompress(glob_description_event,
+ checksum_alg == BINLOG_CHECKSUM_ALG_CRC32,
+ temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len))
+ {
+ free_temp_buf();
+ register_temp_buf(new_buf, true);
+ Rows_log_event::print_helper(file, print_event_info,
+ "Delete_compressed_rows");
+ }
+ else
+ {
+ my_b_printf(&print_event_info->head_cache,
+ "ERROR: uncompress delete_compressed_rows failed\n");
+ }
}
#endif
@@ -12713,6 +13748,19 @@ Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg,
init(tbl_arg->rpl_write_set);
}
+Update_rows_compressed_log_event::Update_rows_compressed_log_event(THD *thd_arg, TABLE *tbl_arg,
+ ulong tid,
+ bool is_transactional)
+: Update_rows_log_event(thd_arg, tbl_arg, tid, is_transactional)
+{
+ m_type = UPDATE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+bool Update_rows_compressed_log_event::write()
+{
+ return Rows_log_event::write_compressed();
+}
+
void Update_rows_log_event::init(MY_BITMAP const *cols)
{
/* if my_bitmap_init fails, caught in is_valid() */
@@ -12751,6 +13799,15 @@ Update_rows_log_event::Update_rows_log_event(const char *buf, uint event_len,
: Rows_log_event(buf, event_len, description_event)
{
}
+
+Update_rows_compressed_log_event::Update_rows_compressed_log_event(
+ const char *buf, uint event_len,
+ const Format_description_log_event
+ *description_event)
+ : Update_rows_log_event(buf, event_len, description_event)
+{
+ uncompress_buf();
+}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
@@ -12903,6 +13960,27 @@ void Update_rows_log_event::print(FILE *file,
{
Rows_log_event::print_helper(file, print_event_info, "Update_rows");
}
+
+void Update_rows_compressed_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
+{
+ char *new_buf;
+ ulong len;
+ bool is_malloc= false;
+ if(!row_log_event_uncompress(glob_description_event,
+ checksum_alg == BINLOG_CHECKSUM_ALG_CRC32,
+ temp_buf, UINT_MAX32, NULL, 0, &is_malloc, &new_buf, &len))
+ {
+ free_temp_buf();
+ register_temp_buf(new_buf, true);
+ Rows_log_event::print_helper(file, print_event_info,
+ "Update_compressed_rows");
+ }
+ else
+ {
+ my_b_printf(&print_event_info->head_cache,
+ "ERROR: uncompress update_compressed_rows failed\n");
+ }
+}
#endif
#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
@@ -13030,7 +14108,7 @@ err:
DBUG_ASSERT(error != 0);
sql_print_error("Error in Log_event::read_log_event(): "
"'%s', data_len: %d, event_type: %d",
- error,data_len,head[EVENT_TYPE_OFFSET]);
+ error,data_len,(uchar)head[EVENT_TYPE_OFFSET]);
}
(*arg_buf)+= data_len;
(*arg_buf_len)-= data_len;
@@ -13171,6 +14249,9 @@ st_print_event_info::st_print_event_info()
myf const flags = MYF(MY_WME | MY_NABP);
open_cached_file(&head_cache, NULL, NULL, 0, flags);
open_cached_file(&body_cache, NULL, NULL, 0, flags);
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ open_cached_file(&review_sql_cache, NULL, NULL, 0, flags);
+#endif
}
#endif
diff --git a/sql/log_event.h b/sql/log_event.h
index 446bd8cb827..4ecb3d49e63 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
- Copyright (c) 2009, 2014, Monty Program Ab.
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
#include "rpl_utility.h"
#include "hash.h"
#include "rpl_tblmap.h"
+#include "sql_string.h"
#endif
#ifdef MYSQL_SERVER
@@ -52,7 +53,9 @@
#include "rpl_gtid.h"
/* Forward declarations */
+#ifndef MYSQL_CLIENT
class String;
+#endif
#define PREFIX_SQL_LOAD "SQL_LOAD-"
#define LONG_FIND_ROW_THRESHOLD 60 /* seconds */
@@ -692,11 +695,75 @@ enum Log_event_type
START_ENCRYPTION_EVENT= 164,
+ /*
+ Compressed binlog event.
+
+ Note that the order between WRITE/UPDATE/DELETE events is significant;
+ this is so that we can convert from the compressed to the uncompressed
+ event type with (type-WRITE_ROWS_COMPRESSED_EVENT + WRITE_ROWS_EVENT)
+ and similar for _V1.
+ */
+ QUERY_COMPRESSED_EVENT = 165,
+ WRITE_ROWS_COMPRESSED_EVENT_V1 = 166,
+ UPDATE_ROWS_COMPRESSED_EVENT_V1 = 167,
+ DELETE_ROWS_COMPRESSED_EVENT_V1 = 168,
+ WRITE_ROWS_COMPRESSED_EVENT = 169,
+ UPDATE_ROWS_COMPRESSED_EVENT = 170,
+ DELETE_ROWS_COMPRESSED_EVENT = 171,
+
/* Add new MariaDB events here - right above this comment! */
ENUM_END_EVENT /* end marker */
};
+static inline bool LOG_EVENT_IS_QUERY(enum Log_event_type type)
+{
+ return type == QUERY_EVENT || type == QUERY_COMPRESSED_EVENT;
+}
+
+
+static inline bool LOG_EVENT_IS_WRITE_ROW(enum Log_event_type type)
+{
+ return type == WRITE_ROWS_EVENT || type == WRITE_ROWS_EVENT_V1 ||
+ type == WRITE_ROWS_COMPRESSED_EVENT ||
+ type == WRITE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+
+static inline bool LOG_EVENT_IS_UPDATE_ROW(enum Log_event_type type)
+{
+ return type == UPDATE_ROWS_EVENT || type == UPDATE_ROWS_EVENT_V1 ||
+ type == UPDATE_ROWS_COMPRESSED_EVENT ||
+ type == UPDATE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+
+static inline bool LOG_EVENT_IS_DELETE_ROW(enum Log_event_type type)
+{
+ return type == DELETE_ROWS_EVENT || type == DELETE_ROWS_EVENT_V1 ||
+ type == DELETE_ROWS_COMPRESSED_EVENT ||
+ type == DELETE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+
+static inline bool LOG_EVENT_IS_ROW_COMPRESSED(enum Log_event_type type)
+{
+ return type == WRITE_ROWS_COMPRESSED_EVENT ||
+ type == WRITE_ROWS_COMPRESSED_EVENT_V1 ||
+ type == UPDATE_ROWS_COMPRESSED_EVENT ||
+ type == UPDATE_ROWS_COMPRESSED_EVENT_V1 ||
+ type == DELETE_ROWS_COMPRESSED_EVENT ||
+ type == DELETE_ROWS_COMPRESSED_EVENT_V1;
+}
+
+
+static inline bool LOG_EVENT_IS_ROW_V2(enum Log_event_type type)
+{
+ return (type >= WRITE_ROWS_EVENT && type <= DELETE_ROWS_EVENT) ||
+ (type >= WRITE_ROWS_COMPRESSED_EVENT && type <= DELETE_ROWS_COMPRESSED_EVENT);
+}
+
+
/*
The number of types we handle in Format_description_log_event (UNKNOWN_EVENT
is not to be handled, it does not exist in binlogs, it does not have a
@@ -754,14 +821,14 @@ typedef struct st_print_event_info
bool flags2_inited;
uint32 flags2;
bool sql_mode_inited;
- ulonglong sql_mode; /* must be same as THD.variables.sql_mode */
+ sql_mode_t sql_mode; /* must be same as THD.variables.sql_mode */
ulong auto_increment_increment, auto_increment_offset;
bool charset_inited;
char charset[6]; // 3 variables, each of them storable in 2 bytes
char time_zone_str[MAX_TIME_ZONE_NAME_LENGTH];
uint lc_time_names_number;
uint charset_database_number;
- uint thread_id;
+ my_thread_id thread_id;
bool thread_id_printed;
uint32 server_id;
bool server_id_printed;
@@ -769,7 +836,7 @@ typedef struct st_print_event_info
bool domain_id_printed;
bool allow_parallel;
bool allow_parallel_printed;
-
+ static const uint max_delimiter_size= 16;
/*
Track when @@skip_replication changes so we need to output a SET
statement for it.
@@ -781,9 +848,16 @@ typedef struct st_print_event_info
~st_print_event_info() {
close_cached_file(&head_cache);
close_cached_file(&body_cache);
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ close_cached_file(&review_sql_cache);
+#endif
}
bool init_ok() /* tells if construction was successful */
- { return my_b_inited(&head_cache) && my_b_inited(&body_cache); }
+ { return my_b_inited(&head_cache) && my_b_inited(&body_cache)
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ && my_b_inited(&review_sql_cache)
+#endif
+ ; }
/* Settings on how to print the events */
@@ -798,7 +872,7 @@ typedef struct st_print_event_info
bool printed_fd_event;
my_off_t hexdump_from;
uint8 common_header_len;
- char delimiter[16];
+ char delimiter[max_delimiter_size];
uint verbose;
table_mapping m_table_map;
@@ -811,6 +885,10 @@ typedef struct st_print_event_info
*/
IO_CACHE head_cache;
IO_CACHE body_cache;
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ /* Storing the SQL for reviewing */
+ IO_CACHE review_sql_cache;
+#endif
} PRINT_EVENT_INFO;
#endif
@@ -1159,6 +1237,37 @@ public:
void print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
bool do_print_encoded);
#endif
+
+ /* The following code used for Flashback */
+#ifdef MYSQL_CLIENT
+ my_bool is_flashback;
+ my_bool need_flashback_review;
+ String output_buf; // Storing the event output
+#ifdef WHEN_FLASHBACK_REVIEW_READY
+ String m_review_dbname;
+ String m_review_tablename;
+
+ void set_review_dbname(const char *name)
+ {
+ if (name)
+ {
+ m_review_dbname.free();
+ m_review_dbname.append(name);
+ }
+ }
+ void set_review_tablename(const char *name)
+ {
+ if (name)
+ {
+ m_review_tablename.free();
+ m_review_tablename.append(name);
+ }
+ }
+ const char *get_review_dbname() const { return m_review_dbname.ptr(); }
+ const char *get_review_tablename() const { return m_review_tablename.ptr(); }
+#endif
+#endif
+
/*
read_log_event() functions read an event from a binlog or relay
log; used by SHOW BINLOG EVENTS, the binlog_dump thread on the
@@ -1899,7 +2008,7 @@ public:
uint32 q_len;
uint32 db_len;
uint16 error_code;
- ulong thread_id;
+ my_thread_id thread_id;
/*
For events created by Query_log_event::do_apply_event (and
Load_log_event::do_apply_event()) we need the *original* thread
@@ -1951,8 +2060,7 @@ public:
bool charset_inited;
uint32 flags2;
- /* In connections sql_mode is 32 bits now but will be 64 bits soon */
- ulonglong sql_mode;
+ sql_mode_t sql_mode;
ulong auto_increment_increment, auto_increment_offset;
char charset[6];
uint time_zone_len; /* 0 means uninited */
@@ -2046,9 +2154,37 @@ public: /* !!! Public in this patch to allow old usage */
!strncasecmp(query, "SAVEPOINT", 9) ||
!strncasecmp(query, "ROLLBACK", 8);
}
- bool is_begin() { return !strcmp(query, "BEGIN"); }
- bool is_commit() { return !strcmp(query, "COMMIT"); }
- bool is_rollback() { return !strcmp(query, "ROLLBACK"); }
+ virtual bool is_begin() { return !strcmp(query, "BEGIN"); }
+ virtual bool is_commit() { return !strcmp(query, "COMMIT"); }
+ virtual bool is_rollback() { return !strcmp(query, "ROLLBACK"); }
+};
+
+class Query_compressed_log_event:public Query_log_event{
+protected:
+ Log_event::Byte* query_buf; // point to the uncompressed query
+public:
+ Query_compressed_log_event(const char* buf, uint event_len,
+ const Format_description_log_event *description_event,
+ Log_event_type event_type);
+ ~Query_compressed_log_event()
+ {
+ if (query_buf)
+ my_free(query_buf);
+ }
+ Log_event_type get_type_code() { return QUERY_COMPRESSED_EVENT; }
+
+ /*
+ the min length of log_bin_compress_min_len is 10,
+ means that Begin/Commit/Rollback would never be compressed!
+ */
+ virtual bool is_begin() { return false; }
+ virtual bool is_commit() { return false; }
+ virtual bool is_rollback() { return false; }
+#ifdef MYSQL_SERVER
+ Query_compressed_log_event(THD* thd_arg, const char* query_arg, ulong query_length,
+ bool using_trans, bool direct, bool suppress_use, int error);
+ virtual bool write();
+#endif
};
@@ -2299,7 +2435,7 @@ public:
void print_query(THD *thd, bool need_db, const char *cs, String *buf,
my_off_t *fn_start, my_off_t *fn_end,
const char *qualify_db);
- ulong thread_id;
+ my_thread_id thread_id;
ulong slave_proxy_id;
uint32 table_name_len;
/*
@@ -3749,6 +3885,7 @@ private:
uint m_query_len;
char *m_save_thd_query_txt;
uint m_save_thd_query_len;
+ bool m_saved_thd_query;
};
/**
@@ -4130,7 +4267,7 @@ public:
int rewrite_db(const char* new_name, size_t new_name_len,
const Format_description_log_event*);
#endif
- ulong get_table_id() const { return m_table_id; }
+ ulonglong get_table_id() const { return m_table_id; }
const char *get_table_name() const { return m_tblnam; }
const char *get_db_name() const { return m_dbnam; }
@@ -4173,7 +4310,7 @@ private:
uchar *m_coltype;
uchar *m_memory;
- ulong m_table_id;
+ ulonglong m_table_id;
flag_set m_flags;
size_t m_data_size;
@@ -4241,7 +4378,10 @@ public:
Indicates that rows in this event are complete, that is contain
values for all columns of the table.
*/
- COMPLETE_ROWS_F = (1U << 3)
+ COMPLETE_ROWS_F = (1U << 3),
+
+ /* Value of the OPTION_NO_CHECK_CONSTRAINT_CHECKS flag in thd->options */
+ NO_CHECK_CONSTRAINT_CHECKS_F = (1U << 7)
};
typedef uint16 flag_set;
@@ -4257,6 +4397,7 @@ public:
void set_flags(flag_set flags_arg) { m_flags |= flags_arg; }
void clear_flags(flag_set flags_arg) { m_flags &= ~flags_arg; }
flag_set get_flags(flag_set flags_arg) const { return m_flags & flags_arg; }
+ void update_flags() { int2store(temp_buf + m_flags_pos, m_flags); }
Log_event_type get_type_code() { return m_type; } /* Specific type (_V1 etc) */
virtual Log_event_type get_general_type_code() = 0; /* General rows op type, no version */
@@ -4268,12 +4409,14 @@ public:
#ifdef MYSQL_CLIENT
/* not for direct call, each derived has its own ::print() */
virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info)= 0;
+ void change_to_flashback_event(PRINT_EVENT_INFO *print_event_info, uchar *rows_buff, Log_event_type ev_type);
void print_verbose(IO_CACHE *file,
PRINT_EVENT_INFO *print_event_info);
size_t print_verbose_one_row(IO_CACHE *file, table_def *td,
PRINT_EVENT_INFO *print_event_info,
MY_BITMAP *cols_bitmap,
- const uchar *ptr, const uchar *prefix);
+ const uchar *ptr, const uchar *prefix,
+ const my_bool no_fill_output= 0); // if no_fill_output=1, then print result is unnecessary
#endif
#ifdef MYSQL_SERVER
@@ -4289,7 +4432,7 @@ public:
MY_BITMAP const *get_cols() const { return &m_cols; }
MY_BITMAP const *get_cols_ai() const { return &m_cols_ai; }
size_t get_width() const { return m_width; }
- ulong get_table_id() const { return m_table_id; }
+ ulonglong get_table_id() const { return m_table_id; }
#if defined(MYSQL_SERVER)
/*
@@ -4342,6 +4485,7 @@ public:
#ifdef MYSQL_SERVER
virtual bool write_data_header();
virtual bool write_data_body();
+ virtual bool write_compressed();
virtual const char *get_db() { return m_table->s->db.str; }
#endif
/*
@@ -4376,6 +4520,7 @@ protected:
#endif
Rows_log_event(const char *row_data, uint event_len,
const Format_description_log_event *description_event);
+ void uncompress_buf();
#ifdef MYSQL_CLIENT
void print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
@@ -4388,7 +4533,7 @@ protected:
#ifdef MYSQL_SERVER
TABLE *m_table; /* The table the rows belong to */
#endif
- ulong m_table_id; /* Table ID */
+ ulonglong m_table_id; /* Table ID */
MY_BITMAP m_cols; /* Bitmap denoting columns available */
ulong m_width; /* The width of the columns bitmap */
/*
@@ -4410,6 +4555,9 @@ protected:
uchar *m_rows_cur; /* One-after the end of the data */
uchar *m_rows_end; /* One-after the end of the allocated space */
+ size_t m_rows_before_size; /* The length before m_rows_buf */
+ size_t m_flags_pos; /* The position of the m_flags */
+
flag_set m_flags; /* Flags for row-level events */
Log_event_type m_type; /* Actual event type */
@@ -4588,6 +4736,23 @@ private:
#endif
};
+class Write_rows_compressed_log_event : public Write_rows_log_event
+{
+public:
+#if defined(MYSQL_SERVER)
+ Write_rows_compressed_log_event(THD*, TABLE*, ulong table_id,
+ bool is_transactional);
+ virtual bool write();
+#endif
+#ifdef HAVE_REPLICATION
+ Write_rows_compressed_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+private:
+#if defined(MYSQL_CLIENT)
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+};
/**
@class Update_rows_log_event
@@ -4658,6 +4823,24 @@ protected:
#endif /* defined(MYSQL_SERVER) && defined(HAVE_REPLICATION) */
};
+class Update_rows_compressed_log_event : public Update_rows_log_event
+{
+public:
+#if defined(MYSQL_SERVER)
+ Update_rows_compressed_log_event(THD*, TABLE*, ulong table_id,
+ bool is_transactional);
+ virtual bool write();
+#endif
+#ifdef HAVE_REPLICATION
+ Update_rows_compressed_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+private:
+#if defined(MYSQL_CLIENT)
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+};
+
/**
@class Delete_rows_log_event
@@ -4724,6 +4907,23 @@ protected:
#endif
};
+class Delete_rows_compressed_log_event : public Delete_rows_log_event
+{
+public:
+#if defined(MYSQL_SERVER)
+ Delete_rows_compressed_log_event(THD*, TABLE*, ulong, bool is_transactional);
+ virtual bool write();
+#endif
+#ifdef HAVE_REPLICATION
+ Delete_rows_compressed_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+#endif
+private:
+#if defined(MYSQL_CLIENT)
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+};
+
#include "log_event_old.h"
@@ -4892,12 +5092,36 @@ public:
};
#ifdef MYSQL_CLIENT
-void copy_cache_to_file_wrapped(FILE *file,
- PRINT_EVENT_INFO *print_event_info,
- IO_CACHE *body,
- bool do_wrap);
+void copy_cache_to_string_wrapped(IO_CACHE *body,
+ LEX_STRING *to,
+ bool do_wrap,
+ const char *delimiter,
+ bool is_verbose);
#endif
+static inline bool copy_event_cache_to_string_and_reinit(IO_CACHE *cache, LEX_STRING *to)
+{
+ String tmp;
+
+ reinit_io_cache(cache, READ_CACHE, 0L, FALSE, FALSE);
+ if (tmp.append(cache, (uint32)cache->end_of_file))
+ goto err;
+ reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE);
+
+ /*
+ Can't change the order, because the String::release() will clear the
+ length.
+ */
+ to->length= tmp.length();
+ to->str= tmp.release();
+
+ return false;
+
+err:
+ perror("Out of memory: can't allocate memory in copy_event_cache_to_string_and_reinit().");
+ return true;
+}
+
static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache,
FILE *file)
{
@@ -4972,4 +5196,19 @@ extern TYPELIB binlog_checksum_typelib;
@} (end of group Replication)
*/
+
+int binlog_buf_compress(const char *src, char *dst, uint32 len, uint32 *comlen);
+int binlog_buf_uncompress(const char *src, char *dst, uint32 len, uint32 *newlen);
+uint32 binlog_get_compress_len(uint32 len);
+uint32 binlog_get_uncompress_len(const char *buf);
+
+int query_event_uncompress(const Format_description_log_event *description_event, bool contain_checksum,
+ const char *src, ulong src_len, char* buf, ulong buf_size, bool* is_malloc,
+ char **dst, ulong *newlen);
+
+int row_log_event_uncompress(const Format_description_log_event *description_event, bool contain_checksum,
+ const char *src, ulong src_len, char* buf, ulong buf_size, bool* is_malloc,
+ char **dst, ulong *newlen);
+
+
#endif /* _log_event_h */
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index a6f2ed3f416..2b6509048ba 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -222,6 +222,8 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
/* A small test to verify that objects have consistent types */
DBUG_ASSERT(sizeof(ev_thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
+ table->rpl_write_set= table->write_set;
+
error= do_before_row_operations(table);
while (error == 0 && row_start < ev->m_rows_end)
{
@@ -363,10 +365,10 @@ copy_extra_record_fields(TABLE *table,
my_ptrdiff_t master_fields)
{
DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)");
- DBUG_PRINT("info", ("Copying to 0x%lx "
+ DBUG_PRINT("info", ("Copying to %p "
"from field %lu at offset %lu "
"to field %d at offset %lu",
- (long) table->record[0],
+ table->record[0],
(ulong) master_fields, (ulong) master_reclength,
table->s->fields, table->s->reclength));
/*
@@ -624,8 +626,8 @@ replace_record(THD *thd, TABLE *table,
static int find_and_fetch_row(TABLE *table, uchar *key)
{
DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)");
- DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx",
- (long) table, (long) key, (long) table->record[1]));
+ DBUG_PRINT("enter", ("table: %p, key: %p record: %p",
+ table, key, table->record[1]));
DBUG_ASSERT(table->in_use != NULL);
@@ -1260,8 +1262,8 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
const uchar* const ptr_rows_data= (const uchar*) ptr_after_width;
size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf);
- DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
- m_table_id, m_flags, m_width, (ulong) data_size));
+ DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %zu",
+ m_table_id, m_flags, m_width, data_size));
DBUG_DUMP("rows_data", (uchar*) ptr_rows_data, data_size);
m_rows_buf= (uchar*) my_malloc(data_size, MYF(MY_WME));
@@ -1296,8 +1298,8 @@ int Old_rows_log_event::get_data_size()
uchar *end= net_store_length(buf, (m_width + 7) / 8);
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- return 6 + no_bytes_in_map(&m_cols) + (end - buf) +
- (m_rows_cur - m_rows_buf););
+ return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) +
+ m_rows_cur - m_rows_buf););
int data_size= ROWS_HEADER_LEN;
data_size+= no_bytes_in_map(&m_cols);
data_size+= (uint) (end - buf);
@@ -1316,8 +1318,8 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
would save binlog space. TODO
*/
DBUG_ENTER("Old_rows_log_event::do_add_row_data");
- DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
- (ulong) length));
+ DBUG_PRINT("enter", ("row_data: %p length: %zu",row_data,
+ length));
/*
Don't print debug messages when running valgrind since they can
trigger false warnings.
@@ -1590,10 +1592,10 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
break;
default:
- rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
+ rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
"Error in %s event: row application failed. %s",
get_type_str(), thd->net.last_error);
- thd->is_slave_error= 1;
+ thd->is_slave_error= 1;
break;
}
@@ -1605,8 +1607,8 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
*/
DBUG_PRINT("info", ("error: %d", error));
- DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu",
- (ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end));
+ DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p",
+ m_curr_row, m_curr_row_end, m_rows_end));
if (!m_curr_row_end && !error)
unpack_current_row(rgi);
@@ -1631,7 +1633,8 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
get_type_str(), table->s->db.str,
- table->s->table_name.str, thd->net.last_error);
+ table->s->table_name.str,
+ thd->net.last_error);
/*
If one day we honour --skip-slave-errors in row-based replication, and
@@ -1873,13 +1876,21 @@ void Old_rows_log_event::print_helper(FILE *file,
if (get_flags(STMT_END_F))
{
- if (copy_event_cache_to_file_and_reinit(head, file))
+ LEX_STRING tmp_str;
+
+ if (copy_event_cache_to_string_and_reinit(head, &tmp_str))
{
head->error= -1;
return;
}
- copy_cache_to_file_wrapped(file, body, do_print_encoded,
- print_event_info->delimiter);
+ output_buf.append(&tmp_str);
+ my_free(tmp_str.str);
+
+ copy_cache_to_string_wrapped(body, &tmp_str, do_print_encoded,
+ print_event_info->delimiter,
+ print_event_info->verbose);
+ output_buf.append(&tmp_str);
+ my_free(tmp_str.str);
}
}
#endif
diff --git a/sql/log_slow.h b/sql/log_slow.h
index 5092e8332ed..9811d298335 100644
--- a/sql/log_slow.h
+++ b/sql/log_slow.h
@@ -32,7 +32,7 @@
#define QPLAN_TMP_DISK (1U << 7)
#define QPLAN_TMP_TABLE (1U << 8)
#define QPLAN_FILESORT_PRIORITY_QUEUE (1U << 9)
-
+
/* ... */
-#define QPLAN_STATUS (1U << 31) /* not in the slow_log_filter */
-#define QPLAN_MAX (1U << 31) /* reserved as placeholder */
+#define QPLAN_STATUS (1UL << 31) /* not in the slow_log_filter */
+#define QPLAN_MAX (1UL << 31) /* reserved as placeholder */
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 86fc5fa39fc..f5c5d06328d 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -19,7 +19,7 @@
#include "sql_array.h"
#include "rpl_rli.h"
#include <lf.h>
-#include <mysqld_error.h>
+#include "unireg.h"
#include <mysql/plugin.h>
#include <mysql/service_thd_wait.h>
#include <mysql/psi/mysql_stage.h>
@@ -391,7 +391,11 @@ public:
virtual const bitmap_t *incompatible_waiting_types_bitmap() const
{ return m_waiting_incompatible; }
virtual bool needs_notification(const MDL_ticket *ticket) const
- { return (ticket->get_type() >= MDL_SHARED_NO_WRITE); }
+ {
+ return ticket->get_type() == MDL_SHARED_NO_WRITE ||
+ ticket->get_type() == MDL_SHARED_NO_READ_WRITE ||
+ ticket->get_type() == MDL_EXCLUSIVE;
+ }
/**
Notify threads holding a shared metadata locks on object which
@@ -1413,7 +1417,8 @@ const MDL_lock::bitmap_t
MDL_lock::MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END]=
{
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
- MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0, 0,
+ MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE),
+ 0, 0, 0, 0, 0, 0, 0,
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE)
};
@@ -1421,7 +1426,7 @@ const MDL_lock::bitmap_t
MDL_lock::MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END]=
{
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
- MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0
+ MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0, 0
};
@@ -1433,39 +1438,41 @@ MDL_lock::MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END]=
The first array specifies if particular type of request can be satisfied
if there is granted lock of certain type.
- Request | Granted requests for lock |
- type | S SH SR SW SU SNW SNRW X |
- ----------+----------------------------------+
- S | + + + + + + + - |
- SH | + + + + + + + - |
- SR | + + + + + + - - |
- SW | + + + + + - - - |
- SU | + + + + - - - - |
- SNW | + + + - - - - - |
- SNRW | + + - - - - - - |
- X | - - - - - - - - |
- SU -> X | - - - - 0 0 0 0 |
- SNW -> X | - - - 0 0 0 0 0 |
- SNRW -> X | - - 0 0 0 0 0 0 |
+ Request | Granted requests for lock |
+ type | S SH SR SW SU SRO SNW SNRW X |
+ ----------+---------------------------------------+
+ S | + + + + + + + + - |
+ SH | + + + + + + + + - |
+ SR | + + + + + + + - - |
+ SW | + + + + + - - - - |
+ SU | + + + + - + - - - |
+ SRO | + + + - + + + - - |
+ SNW | + + + - - + - - - |
+ SNRW | + + - - - - - - - |
+ X | - - - - - - - - - |
+ SU -> X | - - - - 0 - 0 0 0 |
+ SNW -> X | - - - 0 0 - 0 0 0 |
+ SNRW -> X | - - 0 0 0 0 0 0 0 |
The second array specifies if particular type of request can be satisfied
if there is waiting request for the same lock of certain type. In other
words it specifies what is the priority of different lock types.
- Request | Pending requests for lock |
- type | S SH SR SW SU SNW SNRW X |
- ----------+---------------------------------+
- S | + + + + + + + - |
- SH | + + + + + + + + |
- SR | + + + + + + - - |
- SW | + + + + + - - - |
- SU | + + + + + + + - |
- SNW | + + + + + + + - |
- SNRW | + + + + + + + - |
- X | + + + + + + + + |
- SU -> X | + + + + + + + + |
- SNW -> X | + + + + + + + + |
- SNRW -> X | + + + + + + + + |
+ Request | Pending requests for lock |
+ type | S SH SR SW SU SRO SNW SNRW X |
+ ----------+--------------------------------------+
+ S | + + + + + + + + - |
+ SH | + + + + + + + + + |
+ SR | + + + + + + + - - |
+ SW | + + + + + + - - - |
+ SU | + + + + + + + + - |
+ SRO | + + + - + + + - - |
+ SNW | + + + + + + + + - |
+ SNRW | + + + + + + + + - |
+ X | + + + + + + + + + |
+ SU -> X | + + + + + + + + + |
+ SNW -> X | + + + + + + + + + |
+ SNRW -> X | + + + + + + + + + |
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
@@ -1487,19 +1494,23 @@ MDL_lock::MDL_object_lock::m_granted_incompatible[MDL_TYPE_END]=
MDL_BIT(MDL_EXCLUSIVE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE),
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
MDL_BIT(MDL_SHARED_WRITE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
- MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ),
+ MDL_BIT(MDL_SHARED_WRITE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
- MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ) |
- MDL_BIT(MDL_SHARED_HIGH_PRIO) | MDL_BIT(MDL_SHARED)
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) |
+ MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) |
+ MDL_BIT(MDL_SHARED_READ),
+ MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) |
+ MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) |
+ MDL_BIT(MDL_SHARED_READ) | MDL_BIT(MDL_SHARED_HIGH_PRIO) |
+ MDL_BIT(MDL_SHARED)
};
@@ -1513,6 +1524,8 @@ MDL_lock::MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END]=
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
MDL_BIT(MDL_SHARED_NO_WRITE),
MDL_BIT(MDL_EXCLUSIVE),
+ MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
+ MDL_BIT(MDL_SHARED_WRITE),
MDL_BIT(MDL_EXCLUSIVE),
MDL_BIT(MDL_EXCLUSIVE),
0
@@ -2307,10 +2320,12 @@ MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket,
if (mdl_ticket->has_stronger_or_equal_type(new_type))
DBUG_RETURN(FALSE);
- /* Only allow upgrades from SHARED_UPGRADABLE/NO_WRITE/NO_READ_WRITE */
+ /* Only allow upgrades from UPGRADABLE/NO_WRITE/NO_READ_WRITE/READ/WRITE */
DBUG_ASSERT(mdl_ticket->m_type == MDL_SHARED_UPGRADABLE ||
mdl_ticket->m_type == MDL_SHARED_NO_WRITE ||
- mdl_ticket->m_type == MDL_SHARED_NO_READ_WRITE);
+ mdl_ticket->m_type == MDL_SHARED_NO_READ_WRITE ||
+ mdl_ticket->m_type == MDL_SHARED_READ ||
+ mdl_ticket->m_type == MDL_SHARED_WRITE);
mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type,
MDL_TRANSACTION);
@@ -2997,34 +3012,54 @@ bool MDL_context::has_explicit_locks()
}
#ifdef WITH_WSREP
-void MDL_ticket::wsrep_report(bool debug)
+static
+const char *wsrep_get_mdl_type_name(enum_mdl_type type)
{
- if (debug)
+ switch (type)
{
- const PSI_stage_info *psi_stage = m_lock->key.get_wait_state_name();
-
- WSREP_DEBUG("MDL ticket: type: %s space: %s db: %s name: %s (%s)",
- (get_type() == MDL_INTENTION_EXCLUSIVE) ? "intention exclusive" :
- ((get_type() == MDL_SHARED) ? "shared" :
- ((get_type() == MDL_SHARED_HIGH_PRIO ? "shared high prio" :
- ((get_type() == MDL_SHARED_READ) ? "shared read" :
- ((get_type() == MDL_SHARED_WRITE) ? "shared write" :
- ((get_type() == MDL_SHARED_NO_WRITE) ? "shared no write" :
- ((get_type() == MDL_SHARED_NO_READ_WRITE) ? "shared no read write" :
- ((get_type() == MDL_EXCLUSIVE) ? "exclusive" :
- "UNKNOWN")))))))),
- (m_lock->key.mdl_namespace() == MDL_key::GLOBAL) ? "GLOBAL" :
- ((m_lock->key.mdl_namespace() == MDL_key::SCHEMA) ? "SCHEMA" :
- ((m_lock->key.mdl_namespace() == MDL_key::TABLE) ? "TABLE" :
- ((m_lock->key.mdl_namespace() == MDL_key::TABLE) ? "FUNCTION" :
- ((m_lock->key.mdl_namespace() == MDL_key::TABLE) ? "PROCEDURE" :
- ((m_lock->key.mdl_namespace() == MDL_key::TABLE) ? "TRIGGER" :
- ((m_lock->key.mdl_namespace() == MDL_key::TABLE) ? "EVENT" :
- ((m_lock->key.mdl_namespace() == MDL_key::COMMIT) ? "COMMIT" :
- (char *)"UNKNOWN"))))))),
- m_lock->key.db_name(),
- m_lock->key.name(),
- psi_stage->m_name);
- }
+ case MDL_INTENTION_EXCLUSIVE : return "intention exclusive";
+ case MDL_SHARED : return "shared";
+ case MDL_SHARED_HIGH_PRIO : return "shared high prio";
+ case MDL_SHARED_READ : return "shared read";
+ case MDL_SHARED_WRITE : return "shared write";
+ case MDL_SHARED_UPGRADABLE : return "shared upgradable";
+ case MDL_SHARED_NO_WRITE : return "shared no write";
+ case MDL_SHARED_NO_READ_WRITE : return "shared no read write";
+ case MDL_EXCLUSIVE : return "exclusive";
+ default: break;
+ }
+ return "UNKNOWN";
+}
+
+static
+const char *wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns)
+{
+ switch (ns)
+ {
+ case MDL_key::GLOBAL : return "GLOBAL";
+ case MDL_key::SCHEMA : return "SCHEMA";
+ case MDL_key::TABLE : return "TABLE";
+ case MDL_key::FUNCTION : return "FUNCTION";
+ case MDL_key::PROCEDURE : return "PROCEDURE";
+ case MDL_key::TRIGGER : return "TRIGGER";
+ case MDL_key::EVENT : return "EVENT";
+ case MDL_key::COMMIT : return "COMMIT";
+ case MDL_key::USER_LOCK : return "USER_LOCK";
+ default: break;
+ }
+ return "UNKNOWN";
+}
+
+void MDL_ticket::wsrep_report(bool debug)
+{
+ if (!debug) return;
+
+ const PSI_stage_info *psi_stage= m_lock->key.get_wait_state_name();
+ WSREP_DEBUG("MDL ticket: type: %s space: %s db: %s name: %s (%s)",
+ wsrep_get_mdl_type_name(get_type()),
+ wsrep_get_mdl_namespace_name(m_lock->key.mdl_namespace()),
+ m_lock->key.db_name(),
+ m_lock->key.name(),
+ psi_stage->m_name);
}
#endif /* WITH_WSREP */
diff --git a/sql/mdl.h b/sql/mdl.h
index 15a1976876b..b76d70763e7 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -15,23 +15,12 @@
along with this program; if not, write to the Free Software Foundation,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
-#if defined(__IBMC__) || defined(__IBMCPP__)
-/* Further down, "next_in_lock" and "next_in_context" have the same type,
- and in "sql_plist.h" this leads to an identical signature, which causes
- problems in function overloading.
-*/
-#pragma namemangling(v5)
-#endif
-
-
#include "sql_plist.h"
#include <my_sys.h>
#include <m_string.h>
#include <mysql_com.h>
#include <lf.h>
-#include <algorithm>
-
class THD;
class MDL_context;
@@ -205,6 +194,12 @@ enum enum_mdl_type {
*/
MDL_SHARED_UPGRADABLE,
/*
+ A shared metadata lock for cases when we need to read data from table
+ and block all concurrent modifications to it (for both data and metadata).
+ Used by LOCK TABLES READ statement.
+ */
+ MDL_SHARED_READ_ONLY,
+ /*
An upgradable shared metadata lock which blocks all attempts to update
table data, allowing reads.
A connection holding this kind of lock can read table metadata and read
@@ -376,8 +371,7 @@ public:
character set is utf-8, we can safely assume that no
character starts with a zero byte.
*/
- using std::min;
- return memcmp(m_ptr, rhs->m_ptr, min(m_length, rhs->m_length));
+ return memcmp(m_ptr, rhs->m_ptr, MY_MIN(m_length, rhs->m_length));
}
MDL_key(const MDL_key *rhs)
@@ -486,6 +480,19 @@ public:
from.ticket= NULL; // that's what "move" means
}
+ /**
+ Is this a request for a lock which allow data to be updated?
+
+ @note This method returns true for MDL_SHARED_UPGRADABLE type of
+ lock. Even though this type of lock doesn't allow updates
+ it will always be upgraded to one that does.
+ */
+ bool is_write_lock_request() const
+ {
+ return (type >= MDL_SHARED_WRITE &&
+ type != MDL_SHARED_READ_ONLY);
+ }
+
/*
This is to work around the ugliness of TABLE_LIST
compiler-generated assignment operator. It is currently used
diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc
index 6535f16445b..8337c93fe70 100644
--- a/sql/mf_iocache.cc
+++ b/sql/mf_iocache.cc
@@ -49,8 +49,7 @@ extern "C" {
*/
-int _my_b_net_read(register IO_CACHE *info, uchar *Buffer,
- size_t Count __attribute__((unused)))
+int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t)
{
ulong read_length;
NET *net= &(current_thd)->net;
diff --git a/sql/mf_iocache_encr.cc b/sql/mf_iocache_encr.cc
index f26a437a25a..879da12faa4 100644
--- a/sql/mf_iocache_encr.cc
+++ b/sql/mf_iocache_encr.cc
@@ -57,7 +57,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count)
if (info->seek_not_done)
{
- size_t wpos;
+ my_off_t wpos;
pos_offset= pos_in_file % info->buffer_length;
pos_in_file-= pos_offset;
@@ -92,7 +92,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count)
DBUG_RETURN(1);
}
- elength= wlength - (ebuffer - wbuffer);
+ elength= wlength - (uint)(ebuffer - wbuffer);
set_iv(iv, pos_in_file, crypt_data->inbuf_counter);
if (encryption_crypt(ebuffer, elength, info->buffer, &length,
@@ -106,7 +106,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count)
DBUG_ASSERT(length <= info->buffer_length);
- copied= MY_MIN(Count, length - pos_offset);
+ copied= MY_MIN(Count, (size_t)(length - pos_offset));
memcpy(Buffer, info->buffer + pos_offset, copied);
Count-= copied;
@@ -120,7 +120,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count)
if (wlength < crypt_data->block_length && pos_in_file < info->end_of_file)
{
- info->error= pos_in_file - old_pos_in_file;
+ info->error= (int)(pos_in_file - old_pos_in_file);
DBUG_RETURN(1);
}
} while (Count);
@@ -146,7 +146,7 @@ static int my_b_encr_write(IO_CACHE *info, const uchar *Buffer, size_t Count)
if (info->seek_not_done)
{
DBUG_ASSERT(info->pos_in_file % info->buffer_length == 0);
- size_t wpos= info->pos_in_file / info->buffer_length * crypt_data->block_length;
+ my_off_t wpos= info->pos_in_file / info->buffer_length * crypt_data->block_length;
if ((mysql_file_seek(info->file, wpos, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR))
{
@@ -185,7 +185,7 @@ static int my_b_encr_write(IO_CACHE *info, const uchar *Buffer, size_t Count)
my_errno= 1;
DBUG_RETURN(info->error= -1);
}
- wlength= elength + ebuffer - wbuffer;
+ wlength= elength + (uint)(ebuffer - wbuffer);
if (length == info->buffer_length)
{
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 50918d8dcf2..9b6d0e86f90 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -17,6 +17,7 @@
#include <my_bit.h>
#include "sql_select.h"
#include "key.h"
+#include "sql_statistics.h"
/****************************************************************************
* Default MRR implementation (MRR to non-MRR converter)
@@ -63,7 +64,12 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
ha_rows rows, total_rows= 0;
uint n_ranges=0;
THD *thd= table->in_use;
+ uint limit= thd->variables.eq_range_index_dive_limit;
+ bool use_statistics_for_eq_range= eq_ranges_exceeds_limit(seq,
+ seq_init_param,
+ limit);
+
/* Default MRR implementation doesn't need buffer */
*bufsz= 0;
@@ -87,8 +93,15 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
min_endp= range.start_key.length? &range.start_key : NULL;
max_endp= range.end_key.length? &range.end_key : NULL;
}
+ int keyparts_used= my_count_bits(range.start_key.keypart_map);
if ((range.range_flag & UNIQUE_RANGE) && !(range.range_flag & NULL_RANGE))
rows= 1; /* there can be at most one row */
+ else if (use_statistics_for_eq_range &&
+ !(range.range_flag & NULL_RANGE) &&
+ (range.range_flag & EQ_RANGE) &&
+ table->key_info[keyno].actual_rec_per_key(keyparts_used - 1) > 0.5)
+ rows=
+ (ha_rows) table->key_info[keyno].actual_rec_per_key(keyparts_used - 1);
else
{
if (HA_POS_ERROR == (rows= this->records_in_range(keyno, min_endp,
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index 01d00855aed..73281161335 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -49,6 +49,7 @@ static char *opt_datadir;
static char *opt_service;
static char *opt_password;
static int opt_port;
+static int opt_innodb_page_size;
static char *opt_socket;
static char *opt_os_user;
static char *opt_os_password;
@@ -58,6 +59,7 @@ static my_bool opt_skip_networking;
static my_bool opt_verbose_bootstrap;
static my_bool verbose_errors;
+#define DEFAULT_INNODB_PAGE_SIZE 16*1024
static struct my_option my_long_options[]=
{
@@ -83,6 +85,8 @@ static struct my_option my_long_options[]=
{"skip-networking", 'N', "Do not use TCP connections, use pipe instead",
&opt_skip_networking, &opt_skip_networking, 0 , GET_BOOL, OPT_ARG, 0, 0, 0, 0,
0, 0},
+ { "innodb-page-size", 'i', "Page size for innodb",
+ &opt_innodb_page_size, &opt_innodb_page_size, 0, GET_INT, REQUIRED_ARG, DEFAULT_INNODB_PAGE_SIZE, 1*1024, 64*1024, 0, 0, 0 },
{"silent", 's', "Print less information", &opt_silent,
&opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"verbose-bootstrap", 'o', "Include mysqld bootstrap output",&opt_verbose_bootstrap,
@@ -261,13 +265,13 @@ static char *init_bootstrap_command_line(char *cmdline, size_t size)
char basedir[MAX_PATH];
get_basedir(basedir, sizeof(basedir), mysqld_path);
- my_snprintf(cmdline, size-1,
- "\"\"%s\" --no-defaults %s --bootstrap"
+ my_snprintf(cmdline, size - 1,
+ "\"\"%s\" --no-defaults %s --innodb-page-size=%d --bootstrap"
" \"--lc-messages-dir=%s/share\""
" --basedir=. --datadir=. --default-storage-engine=myisam"
" --max_allowed_packet=9M "
" --net-buffer-length=16k\"", mysqld_path,
- opt_verbose_bootstrap?"--console":"", basedir );
+ opt_verbose_bootstrap ? "--console" : "", opt_innodb_page_size, basedir);
return cmdline;
}
@@ -318,7 +322,10 @@ static int create_myini()
{
fprintf(myini,"port=%d\n", opt_port);
}
-
+ if (opt_innodb_page_size != DEFAULT_INNODB_PAGE_SIZE)
+ {
+ fprintf(myini, "innodb-page-size=%d\n", opt_innodb_page_size);
+ }
/* Write out client settings. */
fprintf(myini, "[client]\n");
@@ -712,13 +719,6 @@ static int create_db_instance()
goto end;
}
- /*
- Remove innodb log files if they exist (this works around "different size logs"
- error in MSI installation). TODO : remove this with the next Innodb, where
- different size is handled gracefully.
- */
- DeleteFile("ib_logfile0");
- DeleteFile("ib_logfile1");
/* Create my.ini file in data directory.*/
ret= create_myini();
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 8d464ed75e6..2d872afaeb8 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2018, MariaDB
+ Copyright (c) 2008, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -79,6 +79,10 @@
#include "sql_callback.h"
#include "threadpool.h"
+#ifdef HAVE_OPENSSL
+#include <ssl_compat.h>
+#endif
+
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
#include "../storage/perfschema/pfs_server.h"
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
@@ -338,7 +342,7 @@ static PSI_thread_key key_thread_handle_con_sockets;
static PSI_thread_key key_thread_handle_shutdown;
#endif /* __WIN__ */
-#if defined (HAVE_OPENSSL) && !defined(HAVE_YASSL)
+#ifdef HAVE_OPENSSL10
static PSI_rwlock_key key_rwlock_openssl;
#endif
#endif /* HAVE_PSI_INTERFACE */
@@ -361,10 +365,12 @@ static bool volatile select_thread_in_use, signal_thread_in_use;
static volatile bool ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0, opt_silent_startup= 0;
+bool my_disable_leak_check= false;
+
uint kill_cached_threads;
static uint wake_thread;
ulong max_used_connections;
-static volatile ulong cached_thread_count= 0;
+volatile ulong cached_thread_count= 0;
static char *mysqld_user, *mysqld_chroot;
static char *default_character_set_name;
static char *character_set_filesystem_name;
@@ -375,7 +381,7 @@ static char *default_collation_name;
char *default_storage_engine, *default_tmp_storage_engine;
char *enforced_storage_engine=NULL;
static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME;
-static I_List<THD> thread_cache;
+static I_List<CONNECT> thread_cache;
static bool binlog_format_used= false;
LEX_STRING opt_init_connect, opt_init_slave;
mysql_cond_t COND_thread_cache;
@@ -386,8 +392,11 @@ static DYNAMIC_ARRAY all_options;
/* Global variables */
bool opt_bin_log, opt_bin_log_used=0, opt_ignore_builtin_innodb= 0;
+bool opt_bin_log_compress;
+uint opt_bin_log_compress_min_len;
my_bool opt_log, debug_assert_if_crashed_table= 0, opt_help= 0;
-my_bool disable_log_notes;
+my_bool debug_assert_on_not_freed_memory= 0;
+my_bool disable_log_notes, opt_support_flashback= 0;
static my_bool opt_abort;
ulonglong log_output_options;
my_bool opt_userstat_running;
@@ -396,7 +405,6 @@ bool opt_error_log= IF_WIN(1,0);
bool opt_disable_networking=0, opt_skip_show_db=0;
bool opt_skip_name_resolve=0;
my_bool opt_character_set_client_handshake= 1;
-bool server_id_supplied = 0;
bool opt_endinfo, using_udf_functions;
my_bool locked_in_memory;
bool opt_using_transactions;
@@ -407,8 +415,7 @@ uint volatile global_disable_checkpoint;
ulong slow_start_timeout;
#endif
/*
- True if the bootstrap thread is running. Protected by LOCK_thread_count,
- just like thread_count.
+ True if the bootstrap thread is running. Protected by LOCK_start_thread.
Used in bootstrap() function to determine if the bootstrap thread
has completed. Note, that we can't use 'thread_count' instead,
since in 5.1, in presence of the Event Scheduler, there may be
@@ -420,7 +427,7 @@ ulong slow_start_timeout;
bootstrap either, since we want to be able to process event-related
SQL commands in the init file and in --bootstrap mode.
*/
-bool in_bootstrap= FALSE;
+bool volatile in_bootstrap= FALSE;
/**
@brief 'grant_option' is used to indicate if privileges needs
to be checked, in which case the lock, LOCK_grant, is used
@@ -526,7 +533,7 @@ ulong extra_max_connections;
uint max_digest_length= 0;
ulong slave_retried_transactions;
ulonglong slave_skipped_errors;
-ulong feature_files_opened_with_delayed_keys;
+ulong feature_files_opened_with_delayed_keys= 0, feature_check_constraint= 0;
ulonglong denied_connections;
my_decimal decimal_zero;
@@ -553,7 +560,8 @@ uint max_prepared_stmt_count;
statements.
*/
uint prepared_stmt_count=0;
-ulong thread_id=1L,current_pid;
+my_thread_id global_thread_id= 0;
+ulong current_pid;
ulong slow_launch_threads = 0;
uint sync_binlog_period= 0, sync_relaylog_period= 0,
sync_relayloginfo_period= 0, sync_masterinfo_period= 0;
@@ -628,7 +636,8 @@ DATE_TIME_FORMAT global_date_format, global_datetime_format, global_time_format;
Time_zone *default_tz;
const char *mysql_real_data_home_ptr= mysql_real_data_home;
-char server_version[SERVER_VERSION_LENGTH];
+char server_version[SERVER_VERSION_LENGTH], *server_version_ptr;
+bool using_custom_server_version= false;
char *mysqld_unix_port, *opt_mysql_tmpdir;
ulong thread_handling;
@@ -686,6 +695,14 @@ THD *next_global_thread(THD *thd)
}
struct system_variables global_system_variables;
+/**
+ Following is just for options parsing, used with a difference against
+ global_system_variables.
+
+ TODO: something should be done to get rid of following variables
+*/
+const char *current_dbug_option="";
+
struct system_variables max_system_variables;
struct system_status_var global_status_var;
@@ -708,9 +725,34 @@ SHOW_COMP_OPTION have_openssl;
/* Thread specific variables */
-pthread_key(MEM_ROOT**,THR_MALLOC);
pthread_key(THD*, THR_THD);
-mysql_mutex_t LOCK_thread_count, LOCK_thread_cache;
+
+/*
+ LOCK_thread_count protects the following variables:
+ thread_count Number of threads with THD that servers queries.
+ threads Linked list of active THD's.
+ The effect of this is that one can't unlink and
+ delete a THD as long as one has locked
+ LOCK_thread_count.
+ ready_to_exit
+ delayed_insert_threads
+*/
+mysql_mutex_t LOCK_thread_count;
+
+/*
+ LOCK_start_thread is used to syncronize thread start and stop with
+ other threads.
+
+ It also protects these variables:
+ handler_count
+ in_bootstrap
+ select_thread_in_use
+ slave_init_thread_running
+ check_temp_dir() call
+*/
+mysql_mutex_t LOCK_start_thread;
+
+mysql_mutex_t LOCK_thread_cache;
mysql_mutex_t
LOCK_status, LOCK_show_status, LOCK_error_log, LOCK_short_uuid_generator,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
@@ -725,6 +767,9 @@ mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
/* This protects against changes in master_info_index */
mysql_mutex_t LOCK_active_mi;
+/* This protects connection id.*/
+mysql_mutex_t LOCK_thread_id;
+
/**
The below lock protects access to two global server variables:
max_prepared_stmt_count and prepared_stmt_count. These variables
@@ -737,8 +782,8 @@ mysql_mutex_t LOCK_prepared_stmt_count;
mysql_mutex_t LOCK_des_key_file;
#endif
mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
-mysql_rwlock_t LOCK_system_variables_hash;
-mysql_cond_t COND_thread_count;
+mysql_prlock_t LOCK_system_variables_hash;
+mysql_cond_t COND_thread_count, COND_start_thread;
pthread_t signal_thread;
pthread_attr_t connection_attrib;
mysql_mutex_t LOCK_server_started;
@@ -888,9 +933,11 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_relay_log_info_log_space_lock, key_relay_log_info_run_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
key_LOCK_error_messages, key_LOG_INFO_lock,
+ key_LOCK_start_thread,
key_LOCK_thread_count, key_LOCK_thread_cache,
key_PARTITION_LOCK_auto_inc;
PSI_mutex_key key_RELAYLOG_LOCK_index;
+PSI_mutex_key key_LOCK_thread_id;
PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state,
key_LOCK_rpl_thread, key_LOCK_rpl_thread_pool, key_LOCK_parallel_entry;
@@ -928,6 +975,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_hash_filo_lock, "hash_filo::lock", 0},
{ &key_LOCK_active_mi, "LOCK_active_mi", PSI_FLAG_GLOBAL},
{ &key_LOCK_connection_count, "LOCK_connection_count", PSI_FLAG_GLOBAL},
+ { &key_LOCK_thread_id, "LOCK_thread_id", PSI_FLAG_GLOBAL},
{ &key_LOCK_crypt, "LOCK_crypt", PSI_FLAG_GLOBAL},
{ &key_LOCK_delayed_create, "LOCK_delayed_create", PSI_FLAG_GLOBAL},
{ &key_LOCK_delayed_insert, "LOCK_delayed_insert", PSI_FLAG_GLOBAL},
@@ -976,6 +1024,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
{ &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0},
{ &key_LOCK_slave_state, "LOCK_slave_state", 0},
+ { &key_LOCK_start_thread, "LOCK_start_thread", PSI_FLAG_GLOBAL},
{ &key_LOCK_binlog_state, "LOCK_binlog_state", 0},
{ &key_LOCK_rpl_thread, "LOCK_rpl_thread", 0},
{ &key_LOCK_rpl_thread_pool, "LOCK_rpl_thread_pool", 0},
@@ -988,7 +1037,7 @@ PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
static PSI_rwlock_info all_server_rwlocks[]=
{
-#if defined (HAVE_OPENSSL) && !defined(HAVE_YASSL)
+#ifdef HAVE_OPENSSL10
{ &key_rwlock_openssl, "CRYPTO_dynlock_value::lock", 0},
#endif
{ &key_rwlock_LOCK_grant, "LOCK_grant", PSI_FLAG_GLOBAL},
@@ -1017,6 +1066,7 @@ PSI_cond_key key_BINLOG_COND_xid_list, key_BINLOG_update_cond,
key_rpl_group_info_sleep_cond,
key_TABLE_SHARE_cond, key_user_level_lock_cond,
key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache,
+ key_COND_start_thread,
key_BINLOG_COND_queue_busy;
PSI_cond_key key_RELAYLOG_update_cond, key_COND_wakeup_ready,
key_COND_wait_commit;
@@ -1076,6 +1126,7 @@ static PSI_cond_info all_server_conds[]=
{ &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0},
{ &key_COND_prepare_ordered, "COND_prepare_ordered", 0},
{ &key_COND_slave_background, "COND_slave_background", 0},
+ { &key_COND_start_thread, "COND_start_thread", PSI_FLAG_GLOBAL},
{ &key_COND_wait_gtid, "COND_wait_gtid", 0},
{ &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0}
};
@@ -1202,8 +1253,13 @@ void init_net_server_extension(THD *thd)
/* Activate this private extension for the mysqld server. */
thd->net.extension= & thd->m_net_server_extension;
}
+#else
+void init_net_server_extension(THD *thd)
+{
+}
#endif /* EMBEDDED_LIBRARY */
+
/**
A log message for the error log, buffered in memory.
Log messages are temporarily buffered when generated before the error log
@@ -1430,7 +1486,6 @@ my_bool plugins_are_initialized= FALSE;
#ifndef DBUG_OFF
static const char* default_dbug_option;
#endif
-const char *current_dbug_option="";
#ifdef HAVE_LIBWRAP
const char *libwrapName= NULL;
int allow_severity = LOG_INFO;
@@ -1458,7 +1513,7 @@ scheduler_functions *thread_scheduler= &thread_scheduler_struct,
#ifdef HAVE_OPENSSL
#include <openssl/crypto.h>
-#ifndef HAVE_YASSL
+#ifdef HAVE_OPENSSL10
typedef struct CRYPTO_dynlock_value
{
mysql_rwlock_t lock;
@@ -1469,7 +1524,7 @@ static openssl_lock_t *openssl_dynlock_create(const char *, int);
static void openssl_dynlock_destroy(openssl_lock_t *, const char *, int);
static void openssl_lock_function(int, int, const char *, int);
static void openssl_lock(int, openssl_lock_t *, const char *, int);
-#endif
+#endif /* HAVE_OPENSSL10 */
char *des_key_file;
#ifndef EMBEDDED_LIBRARY
struct st_VioSSLFd *ssl_acceptor_fd;
@@ -1521,7 +1576,7 @@ static void close_server_sock();
static void clean_up_mutexes(void);
static void wait_for_signal_thread_to_end(void);
static void create_pid_file();
-static void mysqld_exit(int exit_code) __attribute__((noreturn));
+ATTRIBUTE_NORETURN static void mysqld_exit(int exit_code);
#endif
static void delete_pid_file(myf flags);
static void end_ssl();
@@ -1545,10 +1600,10 @@ static void close_connections(void)
/* kill connection thread */
#if !defined(__WIN__)
- DBUG_PRINT("quit", ("waiting for select thread: 0x%lx",
- (ulong) select_thread));
- mysql_mutex_lock(&LOCK_thread_count);
+ DBUG_PRINT("quit", ("waiting for select thread: %lu",
+ (ulong)select_thread));
+ mysql_mutex_lock(&LOCK_start_thread);
while (select_thread_in_use)
{
struct timespec abstime;
@@ -1562,7 +1617,7 @@ static void close_connections(void)
set_timespec(abstime, 2);
for (uint tmp=0 ; tmp < 10 && select_thread_in_use; tmp++)
{
- error= mysql_cond_timedwait(&COND_thread_count, &LOCK_thread_count,
+ error= mysql_cond_timedwait(&COND_start_thread, &LOCK_start_thread,
&abstime);
if (error != EINTR)
break;
@@ -1573,7 +1628,7 @@ static void close_connections(void)
#endif
close_server_sock();
}
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_start_thread);
#endif /* __WIN__ */
@@ -1642,11 +1697,15 @@ static void close_connections(void)
while ((tmp=it++))
{
DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
- tmp->thread_id));
- /* We skip slave threads & scheduler on this first loop through. */
+ (ulong) tmp->thread_id));
+ /* We skip slave threads on this first loop through. */
if (tmp->slave_thread)
continue;
+ /* cannot use 'continue' inside DBUG_EXECUTE_IF()... */
+ if (DBUG_EVALUATE_IF("only_kill_system_threads", !tmp->system_thread, 0))
+ continue;
+
#ifdef WITH_WSREP
/* skip wsrep system threads as well */
if (WSREP(tmp) && (tmp->wsrep_exec_mode==REPL_RECV || tmp->wsrep_applier))
@@ -1683,6 +1742,7 @@ static void close_connections(void)
Events::deinit();
slave_prepare_for_shutdown();
+ mysql_bin_log.stop_background_thread();
/*
Give threads time to die.
@@ -1698,6 +1758,8 @@ static void close_connections(void)
much smaller than even 2 seconds, this is only a safety fallback against
stuck threads so server shutdown is not held up forever.
*/
+ DBUG_PRINT("info", ("thread_count: %d", thread_count));
+
for (int i= 0; *(volatile int32*) &thread_count && i < 1000; i++)
my_sleep(20000);
@@ -1709,11 +1771,9 @@ static void close_connections(void)
for (;;)
{
- DBUG_PRINT("quit",("Locking LOCK_thread_count"));
mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
if (!(tmp=threads.get()))
{
- DBUG_PRINT("quit",("Unlocking LOCK_thread_count"));
mysql_mutex_unlock(&LOCK_thread_count);
break;
}
@@ -1722,7 +1782,7 @@ static void close_connections(void)
{
if (global_system_variables.log_warnings)
sql_print_warning(ER_DEFAULT(ER_FORCING_CLOSE),my_progname,
- tmp->thread_id,
+ (ulong) tmp->thread_id,
(tmp->main_security_ctx.user ?
tmp->main_security_ctx.user : ""));
/*
@@ -1731,10 +1791,11 @@ static void close_connections(void)
*/
THD* save_thd= current_thd;
set_current_thd(tmp);
- close_connection(tmp,ER_SERVER_SHUTDOWN);
+ close_connection(tmp);
set_current_thd(save_thd);
}
#endif
+
#ifdef WITH_WSREP
/*
* WSREP_TODO:
@@ -1817,10 +1878,35 @@ static void close_server_sock()
#endif /*EMBEDDED_LIBRARY*/
-void kill_mysql(void)
+/**
+ Set shutdown user
+
+ @note this function may be called by multiple threads concurrently, thus
+ it performs safe update of shutdown_user (first thread wins).
+*/
+
+static volatile char *shutdown_user;
+static void set_shutdown_user(THD *thd)
+{
+ char user_host_buff[MAX_USER_HOST_SIZE + 1];
+ char *user, *expected_shutdown_user= 0;
+
+ make_user_name(thd, user_host_buff);
+
+ if ((user= my_strdup(user_host_buff, MYF(0))) &&
+ !my_atomic_casptr((void **) &shutdown_user,
+ (void **) &expected_shutdown_user, user))
+ my_free(user);
+}
+
+
+void kill_mysql(THD *thd)
{
DBUG_ENTER("kill_mysql");
+ if (thd)
+ set_shutdown_user(thd);
+
#if defined(SIGNALS_DONT_BREAK_READ) && !defined(EMBEDDED_LIBRARY)
abort_loop=1; // Break connection loops
close_server_sock(); // Force accept to wake up
@@ -1899,7 +1985,13 @@ static void __cdecl kill_server(int sig_ptr)
if (sig != 0) // 0 is not a valid signal number
my_sigset(sig, SIG_IGN); /* purify inspected */
if (sig == MYSQL_KILL_SIGNAL || sig == 0)
- sql_print_information(ER_DEFAULT(ER_NORMAL_SHUTDOWN),my_progname);
+ {
+ char *user= (char *) my_atomic_loadptr((void**) &shutdown_user);
+ sql_print_information(ER_DEFAULT(ER_NORMAL_SHUTDOWN), my_progname,
+ user ? user : "unknown");
+ if (user)
+ my_free(user);
+ }
else
sql_print_error(ER_DEFAULT(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
@@ -1929,8 +2021,6 @@ static void __cdecl kill_server(int sig_ptr)
if (wsrep_inited == 1)
wsrep_deinit(true);
- wsrep_thr_deinit();
-
if (sig != MYSQL_KILL_SIGNAL &&
sig != 0)
unireg_abort(1); /* purecov: inspected */
@@ -1971,7 +2061,8 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
extern "C" sig_handler print_signal_warning(int sig)
{
if (global_system_variables.log_warnings)
- sql_print_warning("Got signal %d from thread %ld", sig,my_thread_id());
+ sql_print_warning("Got signal %d from thread %u", sig,
+ (uint)my_thread_id());
#ifdef SIGNAL_HANDLER_RESET_ON_DELIVERY
my_sigset(sig,print_signal_warning); /* int. thread system calls */
#endif
@@ -2061,8 +2152,6 @@ static void cleanup_tls()
{
if (THR_THD)
(void)pthread_key_delete(THR_THD);
- if (THR_MALLOC)
- (void)pthread_key_delete(THR_MALLOC);
}
@@ -2084,6 +2173,15 @@ static void mysqld_exit(int exit_code)
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
shutdown_performance_schema(); // we do it as late as possible
#endif
+ set_malloc_size_cb(NULL);
+ if (opt_endinfo && global_status_var.global_memory_used)
+ fprintf(stderr, "Warning: Memory not freed: %ld\n",
+ (long) global_status_var.global_memory_used);
+ if (!opt_debugging && !my_disable_leak_check && exit_code == 0 &&
+ debug_assert_on_not_freed_memory)
+ {
+ DBUG_ASSERT(global_status_var.global_memory_used == 0);
+ }
cleanup_tls();
DBUG_LEAVE;
sd_notify(0, "STATUS=MariaDB server is down");
@@ -2155,11 +2253,12 @@ void clean_up(bool print_message)
free_global_client_stats();
free_global_table_stats();
free_global_index_stats();
- delete_dynamic(&all_options);
+ delete_dynamic(&all_options); // This should be empty
free_all_rpl_filters();
#ifdef HAVE_REPLICATION
end_slave_list();
#endif
+ wsrep_thr_deinit();
my_uuid_end();
delete binlog_filter;
delete global_rpl_filter;
@@ -2176,21 +2275,24 @@ void clean_up(bool print_message)
if (print_message && my_default_lc_messages && server_start_time)
sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname);
- cleanup_errmsgs();
MYSQL_CALLBACK(thread_scheduler, end, ());
thread_scheduler= 0;
mysql_library_end();
finish_client_errs();
- (void) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); // finish server errs
- DBUG_PRINT("quit", ("Error messages freed"));
+ cleanup_errmsgs();
+ free_error_messages();
/* Tell main we are ready */
logger.cleanup_end();
sys_var_end();
free_charsets();
+
+ /*
+ Signal mysqld_main() that it can exit
+ do the broadcast inside the lock to ensure that my_end() is not called
+ during broadcast()
+ */
mysql_mutex_lock(&LOCK_thread_count);
- DBUG_PRINT("quit", ("got thread count lock"));
ready_to_exit=1;
- /* do the broadcast inside the lock to ensure that my_end() is not called */
mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
@@ -2238,6 +2340,7 @@ static void clean_up_mutexes()
mysql_rwlock_destroy(&LOCK_grant);
mysql_mutex_destroy(&LOCK_thread_count);
mysql_mutex_destroy(&LOCK_thread_cache);
+ mysql_mutex_destroy(&LOCK_start_thread);
mysql_mutex_destroy(&LOCK_status);
mysql_mutex_destroy(&LOCK_show_status);
mysql_mutex_destroy(&LOCK_delayed_insert);
@@ -2246,17 +2349,18 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_crypt);
mysql_mutex_destroy(&LOCK_user_conn);
mysql_mutex_destroy(&LOCK_connection_count);
+ mysql_mutex_destroy(&LOCK_thread_id);
mysql_mutex_destroy(&LOCK_stats);
mysql_mutex_destroy(&LOCK_global_user_client_stats);
mysql_mutex_destroy(&LOCK_global_table_stats);
mysql_mutex_destroy(&LOCK_global_index_stats);
#ifdef HAVE_OPENSSL
mysql_mutex_destroy(&LOCK_des_key_file);
-#ifndef HAVE_YASSL
+#ifdef HAVE_OPENSSL10
for (int i= 0; i < CRYPTO_num_locks(); ++i)
mysql_rwlock_destroy(&openssl_stdlocks[i].lock);
OPENSSL_free(openssl_stdlocks);
-#endif /* HAVE_YASSL */
+#endif /* HAVE_OPENSSL10 */
#endif /* HAVE_OPENSSL */
#ifdef HAVE_REPLICATION
mysql_mutex_destroy(&LOCK_rpl_status);
@@ -2265,12 +2369,13 @@ static void clean_up_mutexes()
mysql_rwlock_destroy(&LOCK_sys_init_connect);
mysql_rwlock_destroy(&LOCK_sys_init_slave);
mysql_mutex_destroy(&LOCK_global_system_variables);
- mysql_rwlock_destroy(&LOCK_system_variables_hash);
+ mysql_prlock_destroy(&LOCK_system_variables_hash);
mysql_mutex_destroy(&LOCK_short_uuid_generator);
mysql_mutex_destroy(&LOCK_prepared_stmt_count);
mysql_mutex_destroy(&LOCK_error_messages);
mysql_cond_destroy(&COND_thread_count);
mysql_cond_destroy(&COND_thread_cache);
+ mysql_cond_destroy(&COND_start_thread);
mysql_cond_destroy(&COND_flush_thread_cache);
mysql_mutex_destroy(&LOCK_server_started);
mysql_cond_destroy(&COND_server_started);
@@ -2292,7 +2397,9 @@ static void clean_up_mutexes()
static void set_ports()
{
}
-
+void close_connection(THD *thd, uint sql_errno)
+{
+}
#else
static void set_ports()
{
@@ -2714,6 +2821,7 @@ static void network_init(void)
@note
For the connection that is doing shutdown, this is called twice
*/
+
void close_connection(THD *thd, uint sql_errno)
{
DBUG_ENTER("close_connection");
@@ -2749,20 +2857,6 @@ extern "C" sig_handler end_mysqld_signal(int sig __attribute__((unused)))
DBUG_VOID_RETURN; /* purecov: deadcode */
}
-
-/*
- Cleanup THD object
-
- SYNOPSIS
- thd_cleanup()
- thd Thread handler
-*/
-
-void thd_cleanup(THD *thd)
-{
- thd->cleanup();
-}
-
/*
Decrease number of connections
@@ -2770,20 +2864,10 @@ void thd_cleanup(THD *thd)
dec_connection_count()
*/
-void dec_connection_count(THD *thd)
+void dec_connection_count(scheduler_functions *scheduler)
{
-#ifdef WITH_WSREP
- /*
- Do not decrement when its wsrep system thread. wsrep_applier is set for
- applier as well as rollbacker threads.
- */
- if (thd->wsrep_applier)
- return;
-#endif /* WITH_WSREP */
-
- DBUG_ASSERT(*thd->scheduler->connection_count > 0);
mysql_mutex_lock(&LOCK_connection_count);
- (*thd->scheduler->connection_count)--;
+ (*scheduler->connection_count)--;
mysql_mutex_unlock(&LOCK_connection_count);
}
@@ -2802,7 +2886,7 @@ void dec_connection_count(THD *thd)
void signal_thd_deleted()
{
- if (!thread_count && ! service_thread_count)
+ if (!thread_count && !service_thread_count)
{
/* Signal close_connections() that all THD's are freed */
mysql_mutex_lock(&LOCK_thread_count);
@@ -2813,38 +2897,29 @@ void signal_thd_deleted()
/*
- Unlink thd from global list of available connections and free thd
+ Unlink thd from global list of available connections
SYNOPSIS
unlink_thd()
thd Thread handler
-
- NOTES
- LOCK_thread_count is locked and left locked
*/
void unlink_thd(THD *thd)
{
DBUG_ENTER("unlink_thd");
- DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
+ DBUG_PRINT("enter", ("thd: %p", thd));
- thd_cleanup(thd);
- dec_connection_count(thd);
-
- thd->add_status_to_global();
-
- mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
/*
- Used by binlog_reset_master. It would be cleaner to use
- DEBUG_SYNC here, but that's not possible because the THD's debug
- sync feature has been shut down at this point.
+ Do not decrement when its wsrep system thread. wsrep_applier is set for
+ applier as well as rollbacker threads.
*/
- DBUG_EXECUTE_IF("sleep_after_lock_thread_count_before_delete_thd", sleep(5););
- mysql_mutex_unlock(&LOCK_thread_count);
+ if (IF_WSREP(!thd->wsrep_applier, 1))
+ dec_connection_count(thd->scheduler);
+ thd->cleanup();
+ thd->add_status_to_global();
- delete thd;
- thread_safe_decrement32(&thread_count);
+ unlink_not_visible_thd(thd);
+ thd->free_connection();
DBUG_VOID_RETURN;
}
@@ -2855,6 +2930,7 @@ void unlink_thd(THD *thd)
SYNOPSIS
cache_thread()
+ thd Thread handler
NOTES
LOCK_thread_cache is used to protect the cache variables
@@ -2866,9 +2942,11 @@ void unlink_thd(THD *thd)
*/
-static bool cache_thread()
+static bool cache_thread(THD *thd)
{
+ struct timespec abstime;
DBUG_ENTER("cache_thread");
+ DBUG_ASSERT(thd);
mysql_mutex_lock(&LOCK_thread_cache);
if (cached_thread_count < thread_cache_size &&
@@ -2886,20 +2964,50 @@ static bool cache_thread()
PSI_THREAD_CALL(delete_current_thread)();
#endif
+#ifndef DBUG_OFF
+ while (_db_is_pushed_())
+ _db_pop_();
+#endif
+
+ set_timespec(abstime, THREAD_CACHE_TIMEOUT);
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
- mysql_cond_wait(&COND_thread_cache, &LOCK_thread_cache);
+ {
+ int error= mysql_cond_timedwait(&COND_thread_cache, &LOCK_thread_cache,
+ &abstime);
+ if (error == ETIMEDOUT || error == ETIME)
+ {
+ /*
+ If timeout, end thread.
+ If a new thread is requested (wake_thread is set), we will handle
+ the call, even if we got a timeout (as we are already awake and free)
+ */
+ break;
+ }
+ }
cached_thread_count--;
if (kill_cached_threads)
mysql_cond_signal(&COND_flush_thread_cache);
if (wake_thread)
{
- THD *thd;
+ CONNECT *connect;
+
wake_thread--;
- thd= thread_cache.get();
+ connect= thread_cache.get();
mysql_mutex_unlock(&LOCK_thread_cache);
- thd->thread_stack= (char*) &thd; // For store_globals
- (void) thd->store_globals();
+ if (!(connect->create_thd(thd)))
+ {
+ /* Out of resources. Free thread to get more resources */
+ connect->close_and_delete();
+ DBUG_RETURN(0);
+ }
+ delete connect;
+
+ /*
+ We have to call store_globals to update mysys_var->id and lock_info
+ with the new thread_id
+ */
+ thd->store_globals();
#ifdef HAVE_PSI_THREAD_INTERFACE
/*
@@ -2911,19 +3019,12 @@ static bool cache_thread()
PSI_THREAD_CALL(set_thread)(psi);
#endif
- /*
- THD::mysys_var::abort is associated with physical thread rather
- than with THD object. So we need to reset this flag before using
- this thread for handling of new THD object/connection.
- */
+ /* reset abort flag for the thread */
thd->mysys_var->abort= 0;
thd->thr_create_utime= microsecond_interval_timer();
thd->start_utime= thd->thr_create_utime;
- /* Link thd into list of all active threads (THD's) */
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ add_to_active_threads(thd);
DBUG_RETURN(1);
}
}
@@ -2937,7 +3038,7 @@ static bool cache_thread()
SYNOPSIS
one_thread_per_connection_end()
- thd Thread handler
+ thd Thread handler. This may be null if we run out of resources.
put_in_cache Store thread in cache, if there is room in it
Normally this is true in all cases except when we got
out of resources initializing the current thread
@@ -2954,14 +3055,18 @@ static bool cache_thread()
bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
{
DBUG_ENTER("one_thread_per_connection_end");
- const bool wsrep_applier= IF_WSREP(thd->wsrep_applier, false);
- unlink_thd(thd);
+ if (thd)
+ {
+ const bool wsrep_applier= IF_WSREP(thd->wsrep_applier, false);
- if (!wsrep_applier && put_in_cache && cache_thread())
- DBUG_RETURN(0); // Thread is reused
+ unlink_thd(thd);
+ if (!wsrep_applier && put_in_cache && cache_thread(thd))
+ DBUG_RETURN(0); // Thread is reused
+ delete thd;
+ }
- signal_thd_deleted();
+ DBUG_PRINT("info", ("killing thread"));
DBUG_LEAVE; // Must match DBUG_ENTER()
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
ERR_remove_state(0);
@@ -3329,7 +3434,7 @@ static void start_signal_handler(void)
(void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
(void) my_setstacksize(&thr_attr,my_thread_stack_size);
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_start_thread);
if ((error= mysql_thread_create(key_thread_signal_hand,
&signal_thread, &thr_attr, signal_hand, 0)))
{
@@ -3337,8 +3442,8 @@ static void start_signal_handler(void)
error,errno);
exit(1);
}
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_cond_wait(&COND_start_thread, &LOCK_start_thread);
+ mysql_mutex_unlock(&LOCK_start_thread);
(void) pthread_attr_destroy(&thr_attr);
DBUG_VOID_RETURN;
@@ -3388,12 +3493,12 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
signal to start_signal_handler that we are ready
This works by waiting for start_signal_handler to free mutex,
after which we signal it that we are ready.
- At this pointer there is no other threads running, so there
+ At this point there is no other threads running, so there
should not be any other mysql_cond_signal() calls.
*/
- mysql_mutex_lock(&LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
- mysql_cond_broadcast(&COND_thread_count);
+ mysql_mutex_lock(&LOCK_start_thread);
+ mysql_cond_broadcast(&COND_start_thread);
+ mysql_mutex_unlock(&LOCK_start_thread);
(void) pthread_sigmask(SIG_BLOCK,&set,NULL);
for (;;)
@@ -3676,6 +3781,7 @@ SHOW_VAR com_status_vars[]= {
{"alter_server", STMT_STATUS(SQLCOM_ALTER_SERVER)},
{"alter_table", STMT_STATUS(SQLCOM_ALTER_TABLE)},
{"alter_tablespace", STMT_STATUS(SQLCOM_ALTER_TABLESPACE)},
+ {"alter_user", STMT_STATUS(SQLCOM_ALTER_USER)},
{"analyze", STMT_STATUS(SQLCOM_ANALYZE)},
{"assign_to_keycache", STMT_STATUS(SQLCOM_ASSIGN_TO_KEYCACHE)},
{"begin", STMT_STATUS(SQLCOM_BEGIN)},
@@ -3717,6 +3823,7 @@ SHOW_VAR com_status_vars[]= {
{"drop_user", STMT_STATUS(SQLCOM_DROP_USER)},
{"drop_view", STMT_STATUS(SQLCOM_DROP_VIEW)},
{"empty_query", STMT_STATUS(SQLCOM_EMPTY_QUERY)},
+ {"execute_immediate", STMT_STATUS(SQLCOM_EXECUTE_IMMEDIATE)},
{"execute_sql", STMT_STATUS(SQLCOM_EXECUTE)},
{"flush", STMT_STATUS(SQLCOM_FLUSH)},
{"get_diagnostics", STMT_STATUS(SQLCOM_GET_DIAGNOSTICS)},
@@ -3732,6 +3839,7 @@ SHOW_VAR com_status_vars[]= {
{"kill", STMT_STATUS(SQLCOM_KILL)},
{"load", STMT_STATUS(SQLCOM_LOAD)},
{"lock_tables", STMT_STATUS(SQLCOM_LOCK_TABLES)},
+ {"multi", COM_STATUS(com_multi)},
{"optimize", STMT_STATUS(SQLCOM_OPTIMIZE)},
{"preload_keys", STMT_STATUS(SQLCOM_PRELOAD_KEYS)},
{"prepare_sql", STMT_STATUS(SQLCOM_PREPARE)},
@@ -3765,6 +3873,7 @@ SHOW_VAR com_status_vars[]= {
{"show_create_proc", STMT_STATUS(SQLCOM_SHOW_CREATE_PROC)},
{"show_create_table", STMT_STATUS(SQLCOM_SHOW_CREATE)},
{"show_create_trigger", STMT_STATUS(SQLCOM_SHOW_CREATE_TRIGGER)},
+ {"show_create_user", STMT_STATUS(SQLCOM_SHOW_CREATE_USER)},
{"show_databases", STMT_STATUS(SQLCOM_SHOW_DATABASES)},
{"show_engine_logs", STMT_STATUS(SQLCOM_SHOW_ENGINE_LOGS)},
{"show_engine_mutex", STMT_STATUS(SQLCOM_SHOW_ENGINE_MUTEX)},
@@ -3900,9 +4009,9 @@ void init_com_statement_info()
extern "C" my_thread_id mariadb_dbug_id()
{
THD *thd;
- if ((thd= current_thd))
+ if ((thd= current_thd) && thd->thread_dbug_id)
{
- return thd->thread_id;
+ return thd->thread_dbug_id;
}
return my_thread_dbug_id();
}
@@ -3943,7 +4052,8 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2);
}
}
- DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0);
+ DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 ||
+ !debug_assert_on_not_freed_memory);
}
else if (likely(thd))
{
@@ -3952,12 +4062,11 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
thd->status_var.global_memory_used+= size;
}
else
- {
update_global_memory_status(size);
- }
}
}
+
/**
Create a replication file name or base for file names.
@@ -4021,21 +4130,14 @@ static int init_common_variables()
connection_errors_peer_addr= 0;
my_decimal_set_zero(&decimal_zero); // set decimal_zero constant;
- if (pthread_key_create(&THR_MALLOC,NULL))
- {
- sql_print_error("Can't create thread-keys");
- return 1;
- }
-
init_libstrings();
tzset(); // Set tzname
- sf_leaking_memory= 0; // no memory leaks from now on
#ifdef SAFEMALLOC
sf_malloc_dbug_id= mariadb_dbug_id;
#endif
- max_system_variables.pseudo_thread_id= (ulong)~0;
+ max_system_variables.pseudo_thread_id= ~(my_thread_id) 0;
server_start_time= flush_status_time= my_time(0);
my_disable_copystat_in_redel= 1;
@@ -4044,15 +4146,22 @@ static int init_common_variables()
if (!global_rpl_filter || !binlog_filter)
{
sql_perror("Could not allocate replication and binlog filters");
- return 1;
+ exit(1);
}
- if (init_thread_environment() ||
- mysql_init_variables())
- return 1;
+#ifdef HAVE_OPENSSL
+ if (check_openssl_compatibility())
+ {
+ sql_print_error("Incompatible OpenSSL version. Cannot continue...");
+ exit(1);
+ }
+#endif
+
+ if (init_thread_environment() || mysql_init_variables())
+ exit(1);
if (ignore_db_dirs_init())
- return 1;
+ exit(1);
#ifdef HAVE_TZNAME
struct tm tm_tmp;
@@ -4106,7 +4215,7 @@ static int init_common_variables()
if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time))
{
sql_print_error("This MySQL server doesn't support dates later than 2038");
- return 1;
+ exit(1);
}
opt_log_basename= const_cast<char *>("mysql");
@@ -4155,7 +4264,7 @@ static int init_common_variables()
new entries could be added to that list.
*/
if (add_status_vars(status_vars))
- return 1; // an error was already reported
+ exit(1); // an error was already reported
#ifndef DBUG_OFF
/*
@@ -4182,16 +4291,32 @@ static int init_common_variables()
of SQLCOM_ constants.
*/
compile_time_assert(sizeof(com_status_vars)/sizeof(com_status_vars[0]) - 1 ==
- SQLCOM_END + 10);
+ SQLCOM_END + 11);
#endif
if (get_options(&remaining_argc, &remaining_argv))
- return 1;
- set_server_version();
+ exit(1);
+ if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
+ set_server_version(server_version, sizeof(server_version));
+
+ mysql_real_data_home_len= uint(strlen(mysql_real_data_home));
if (!opt_abort)
- sql_print_information("%s (mysqld %s) starting as process %lu ...",
- my_progname, server_version, (ulong) getpid());
+ {
+ if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
+ sql_print_information("%s (mysqld %s) starting as process %lu ...",
+ my_progname, server_version, (ulong) getpid());
+ else
+ {
+ char real_server_version[SERVER_VERSION_LENGTH];
+ set_server_version(real_server_version, sizeof(real_server_version));
+ sql_print_information("%s (mysqld %s as %s) starting as process %lu ...",
+ my_progname, real_server_version, server_version,
+ (ulong) getpid());
+ }
+ }
+
+ sf_leaking_memory= 0; // no memory leaks from now on
#ifndef EMBEDDED_LIBRARY
if (opt_abort && !opt_verbose)
@@ -4201,7 +4326,7 @@ static int init_common_variables()
DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname,
server_version, SYSTEM_TYPE,MACHINE_TYPE));
-#ifdef HAVE_LARGE_PAGES
+#ifdef HAVE_LINUX_LARGE_PAGES
/* Initialize large page size */
if (opt_large_pages)
{
@@ -4216,7 +4341,7 @@ static int init_common_variables()
else
SYSVAR_AUTOSIZE(opt_large_pages, 0);
}
-#endif /* HAVE_LARGE_PAGES */
+#endif /* HAVE_LINUX_LARGE_PAGES */
#ifdef HAVE_SOLARIS_LARGE_PAGES
#define LARGE_PAGESIZE (4*1024*1024) /* 4MB */
#define SUPER_LARGE_PAGESIZE (256*1024*1024) /* 256MB */
@@ -4269,31 +4394,11 @@ static int init_common_variables()
#endif /* HAVE_SOLARIS_LARGE_PAGES */
-#if defined(HAVE_POOL_OF_THREADS) && !defined(_WIN32)
+#if defined(HAVE_POOL_OF_THREADS)
if (IS_SYSVAR_AUTOSIZE(&threadpool_size))
SYSVAR_AUTOSIZE(threadpool_size, my_getncpus());
#endif
- /* Fix host_cache_size. */
- if (IS_SYSVAR_AUTOSIZE(&host_cache_size))
- {
- if (max_connections <= 628 - 128)
- SYSVAR_AUTOSIZE(host_cache_size, 128 + max_connections);
- else if (max_connections <= ((ulong)(2000 - 628)) * 20 + 500)
- SYSVAR_AUTOSIZE(host_cache_size, 628 + ((max_connections - 500) / 20));
- else
- SYSVAR_AUTOSIZE(host_cache_size, 2000);
- }
-
- /* Fix back_log (back_log == 0 added for MySQL compatibility) */
- if (back_log == 0 || IS_SYSVAR_AUTOSIZE(&back_log))
- {
- if ((900 - 50) * 5 >= max_connections)
- SYSVAR_AUTOSIZE(back_log, (50 + max_connections / 5));
- else
- SYSVAR_AUTOSIZE(back_log, 900);
- }
-
/* connections and databases needs lots of files */
{
uint files, wanted_files, max_open_files, min_tc_size, extra_files,
@@ -4372,6 +4477,30 @@ static int init_common_variables()
Now we can fix other variables depending on this variable.
*/
+ /* Fix host_cache_size */
+ if (IS_SYSVAR_AUTOSIZE(&host_cache_size))
+ {
+ /*
+ The default value is 128.
+ The autoset value is 128, plus 1 for a value of max_connections
+ up to 500, plus 1 for every increment of 20 over 500 in the
+ max_connections value, capped at 2000.
+ */
+ uint size= (HOST_CACHE_SIZE + MY_MIN(max_connections, 500) +
+ MY_MAX(((long) max_connections)-500,0)/20);
+ SYSVAR_AUTOSIZE(host_cache_size, size);
+ }
+
+ /* Fix back_log (back_log == 0 added for MySQL compatibility) */
+ if (back_log == 0 || IS_SYSVAR_AUTOSIZE(&back_log))
+ {
+ /*
+ The default value is 150.
+ The autoset value is 50 + max_connections / 5 capped at 900
+ */
+ SYSVAR_AUTOSIZE(back_log, MY_MIN(900, (50 + max_connections / 5)));
+ }
+
unireg_init(opt_specialflag); /* Set up extern variabels */
if (!(my_default_lc_messages=
my_locale_by_name(lc_messages)))
@@ -4586,6 +4715,7 @@ static int init_thread_environment()
DBUG_ENTER("init_thread_environment");
mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_thread_cache, &LOCK_thread_cache, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_start_thread, &LOCK_start_thread, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_show_status, &LOCK_show_status, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_delayed_insert,
@@ -4601,7 +4731,7 @@ static int init_thread_environment()
&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
mysql_mutex_record_order(&LOCK_active_mi, &LOCK_global_system_variables);
mysql_mutex_record_order(&LOCK_status, &LOCK_thread_count);
- mysql_rwlock_init(key_rwlock_LOCK_system_variables_hash,
+ mysql_prlock_init(key_rwlock_LOCK_system_variables_hash,
&LOCK_system_variables_hash);
mysql_mutex_init(key_LOCK_prepared_stmt_count,
&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
@@ -4611,6 +4741,8 @@ static int init_thread_environment()
&LOCK_short_uuid_generator, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_connection_count,
&LOCK_connection_count, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_thread_id,
+ &LOCK_thread_id, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_stats, &LOCK_stats, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_global_user_client_stats,
&LOCK_global_user_client_stats, MY_MUTEX_INIT_FAST);
@@ -4632,7 +4764,7 @@ static int init_thread_environment()
#ifdef HAVE_OPENSSL
mysql_mutex_init(key_LOCK_des_key_file,
&LOCK_des_key_file, MY_MUTEX_INIT_FAST);
-#ifndef HAVE_YASSL
+#ifdef HAVE_OPENSSL10
openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() *
sizeof(openssl_lock_t));
for (int i= 0; i < CRYPTO_num_locks(); ++i)
@@ -4641,13 +4773,14 @@ static int init_thread_environment()
CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy);
CRYPTO_set_dynlock_lock_callback(openssl_lock);
CRYPTO_set_locking_callback(openssl_lock_function);
-#endif
-#endif
+#endif /* HAVE_OPENSSL10 */
+#endif /* HAVE_OPENSSL */
mysql_rwlock_init(key_rwlock_LOCK_sys_init_connect, &LOCK_sys_init_connect);
mysql_rwlock_init(key_rwlock_LOCK_sys_init_slave, &LOCK_sys_init_slave);
mysql_rwlock_init(key_rwlock_LOCK_grant, &LOCK_grant);
mysql_cond_init(key_COND_thread_count, &COND_thread_count, NULL);
mysql_cond_init(key_COND_thread_cache, &COND_thread_cache, NULL);
+ mysql_cond_init(key_COND_start_thread, &COND_start_thread, NULL);
mysql_cond_init(key_COND_flush_thread_cache, &COND_flush_thread_cache, NULL);
#ifdef HAVE_REPLICATION
mysql_mutex_init(key_LOCK_rpl_status, &LOCK_rpl_status, MY_MUTEX_INIT_FAST);
@@ -4675,7 +4808,7 @@ static int init_thread_environment()
}
-#if defined(HAVE_OPENSSL) && !defined(HAVE_YASSL)
+#ifdef HAVE_OPENSSL10
static openssl_lock_t *openssl_dynlock_create(const char *file, int line)
{
openssl_lock_t *lock= new openssl_lock_t;
@@ -4735,8 +4868,7 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
abort();
}
}
-#endif /* HAVE_OPENSSL */
-
+#endif /* HAVE_OPENSSL10 */
static void init_ssl()
{
@@ -4750,7 +4882,7 @@ static void init_ssl()
opt_ssl_ca, opt_ssl_capath,
opt_ssl_cipher, &error,
opt_ssl_crl, opt_ssl_crlpath);
- DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd));
+ DBUG_PRINT("info",("ssl_acceptor_fd: %p", ssl_acceptor_fd));
if (!ssl_acceptor_fd)
{
sql_print_warning("Failed to setup SSL");
@@ -4794,25 +4926,14 @@ static void end_ssl()
/**
Registers a file to be collected when Windows Error Reporting creates a crash
report.
-
- @note only works on Vista and later, since WerRegisterFile() is not available
- on earlier Windows.
*/
#include <werapi.h>
static void add_file_to_crash_report(char *file)
{
- /* Load WerRegisterFile function dynamically.*/
- HRESULT (WINAPI *pWerRegisterFile)(PCWSTR, WER_REGISTER_FILE_TYPE, DWORD)
- =(HRESULT (WINAPI *) (PCWSTR, WER_REGISTER_FILE_TYPE, DWORD))
- GetProcAddress(GetModuleHandle("kernel32"),"WerRegisterFile");
-
- if (pWerRegisterFile)
+ wchar_t wfile[MAX_PATH+1]= {0};
+ if (mbstowcs(wfile, file, MAX_PATH) != (size_t)-1)
{
- wchar_t wfile[MAX_PATH+1]= {0};
- if (mbstowcs(wfile, file, MAX_PATH) != (size_t)-1)
- {
- pWerRegisterFile(wfile, WerRegFileTypeOther, WER_FILE_ANONYMOUS_DATA);
- }
+ WerRegisterFile(wfile, WerRegFileTypeOther, WER_FILE_ANONYMOUS_DATA);
}
}
#endif
@@ -4871,8 +4992,7 @@ static int init_server_components()
all things are initialized so that unireg_abort() doesn't fail
*/
mdl_init();
- tdc_init();
- if (hostname_cache_init())
+ if (tdc_init() || hostname_cache_init())
unireg_abort(1);
query_cache_set_min_res_unit(query_cache_min_res_unit);
@@ -4885,7 +5005,8 @@ static int init_server_components()
global_system_variables.query_cache_type= 1;
}
query_cache_init();
- query_cache_resize(query_cache_size);
+ DBUG_ASSERT(query_cache_size < ULONG_MAX);
+ query_cache_resize((ulong)query_cache_size);
my_rnd_init(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2);
setup_fpu();
init_thr_lock();
@@ -4906,11 +5027,18 @@ static int init_server_components()
/* Setup logs */
+ setup_log_handling();
+
/*
Enable old-fashioned error log, except when the user has requested
help information. Since the implementation of plugin server
variables the help output is now written much later.
*/
+#ifdef _WIN32
+ if (opt_console)
+ opt_error_log= false;
+#endif
+
if (opt_error_log && !opt_abort)
{
if (!log_error_file_ptr[0])
@@ -5082,7 +5210,9 @@ static int init_server_components()
/* It's now safe to use thread specific memory */
mysqld_server_initialized= 1;
+#ifndef EMBEDDED_LIBRARY
wsrep_thr_init();
+#endif
if (WSREP_ON && !wsrep_recovery && !opt_abort) /* WSREP BEFORE SE */
{
@@ -5187,6 +5317,17 @@ static int init_server_components()
}
plugins_are_initialized= TRUE; /* Don't separate from init function */
+#ifndef EMBEDDED_LIBRARY
+ {
+ if (Session_tracker::server_boot_verify(system_charset_info))
+ {
+ sql_print_error("The variable session_track_system_variables has "
+ "invalid values.");
+ unireg_abort(1);
+ }
+ }
+#endif //EMBEDDED_LIBRARY
+
/* we do want to exit if there are any other unknown options */
if (remaining_argc > 1)
{
@@ -5426,7 +5567,7 @@ static void handle_connections_methods()
unireg_abort(1); // Will not return
}
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_start_thread);
mysql_cond_init(key_COND_handler_count, &COND_handler_count, NULL);
handler_count=0;
if (hPipe != INVALID_HANDLE_VALUE)
@@ -5469,17 +5610,17 @@ static void handle_connections_methods()
#endif
while (handler_count > 0)
- mysql_cond_wait(&COND_handler_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_cond_wait(&COND_handler_count, &LOCK_start_thread);
+ mysql_mutex_unlock(&LOCK_start_thread);
DBUG_VOID_RETURN;
}
void decrement_handler_count()
{
- mysql_mutex_lock(&LOCK_thread_count);
- handler_count--;
- mysql_cond_signal(&COND_handler_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_start_thread);
+ if (--handler_count == 0)
+ mysql_cond_signal(&COND_handler_count);
+ mysql_mutex_unlock(&LOCK_start_thread);
my_thread_end();
}
#else
@@ -5713,7 +5854,7 @@ int mysqld_main(int argc, char **argv)
ulonglong new_thread_stack_size;
new_thread_stack_size= my_setstacksize(&connection_attrib,
- my_thread_stack_size);
+ (size_t)my_thread_stack_size);
if (new_thread_stack_size != my_thread_stack_size)
SYSVAR_AUTOSIZE(my_thread_stack_size, new_thread_stack_size);
@@ -5739,6 +5880,9 @@ int mysqld_main(int argc, char **argv)
if (my_setwd(mysql_real_data_home, opt_abort ? 0 : MYF(MY_WME)) && !opt_abort)
unireg_abort(1); /* purecov: inspected */
+ /* Atomic write initialization must be done as root */
+ my_init_atomic_write();
+
if ((user_info= check_user(mysqld_user)))
{
#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT)
@@ -5752,17 +5896,6 @@ int mysqld_main(int argc, char **argv)
if (WSREP_ON && wsrep_check_opts())
global_system_variables.wsrep_on= 0;
- if (opt_bin_log && !global_system_variables.server_id)
- {
- SYSVAR_AUTOSIZE(global_system_variables.server_id, ::server_id= 1);
-#ifdef EXTRA_DEBUG
- sql_print_warning("You have enabled the binary log, but you haven't set "
- "server-id to a non-zero value: we force server id to 1; "
- "updates will be logged to the binary log, but "
- "connections from slaves will not be accepted.");
-#endif
- }
-
/*
The subsequent calls may take a long time : e.g. innodb log read.
Thus set the long running service control manager timeout
@@ -5869,7 +6002,10 @@ int mysqld_main(int argc, char **argv)
wsrep_SE_init_grab();
wsrep_SE_init_done();
/*! in case of SST wsrep waits for wsrep->sst_received */
- wsrep_sst_continue();
+ if (wsrep_sst_continue())
+ {
+ WSREP_ERROR("Failed to signal the wsrep provider to continue.");
+ }
}
else
{
@@ -5917,11 +6053,26 @@ int mysqld_main(int argc, char **argv)
}
disable_log_notes= 0; /* Startup done, now we can give notes again */
- sql_print_information(ER_DEFAULT(ER_STARTUP),my_progname,server_version,
- ((mysql_socket_getfd(unix_sock) == INVALID_SOCKET) ?
- (char*) "" : mysqld_unix_port),
- mysqld_port,
- MYSQL_COMPILATION_COMMENT);
+
+ if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
+ sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname, server_version,
+ ((mysql_socket_getfd(unix_sock) == INVALID_SOCKET) ?
+ (char*) "" : mysqld_unix_port),
+ mysqld_port, MYSQL_COMPILATION_COMMENT);
+ else
+ {
+ char real_server_version[2 * SERVER_VERSION_LENGTH + 10];
+
+ set_server_version(real_server_version, sizeof(real_server_version));
+ strcat(real_server_version, "' as '");
+ strcat(real_server_version, server_version);
+
+ sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname,
+ real_server_version,
+ ((mysql_socket_getfd(unix_sock) == INVALID_SOCKET) ?
+ (char*) "" : mysqld_unix_port),
+ mysqld_port, MYSQL_COMPILATION_COMMENT);
+ }
#ifndef _WIN32
// try to keep fd=0 busy
@@ -5955,18 +6106,10 @@ int mysqld_main(int argc, char **argv)
DBUG_PRINT("quit",("Exiting main thread"));
#ifndef __WIN__
-#ifdef EXTRA_DEBUG2
- sql_print_error("Before Lock_thread_count");
-#endif
- WSREP_DEBUG("Before Lock_thread_count");
- mysql_mutex_lock(&LOCK_thread_count);
- DBUG_PRINT("quit", ("Got thread_count mutex"));
+ mysql_mutex_lock(&LOCK_start_thread);
select_thread_in_use=0; // For close_connections
- mysql_mutex_unlock(&LOCK_thread_count);
- mysql_cond_broadcast(&COND_thread_count);
-#ifdef EXTRA_DEBUG2
- sql_print_error("After lock_thread_count");
-#endif
+ mysql_cond_broadcast(&COND_start_thread);
+ mysql_mutex_unlock(&LOCK_start_thread);
#endif /* __WIN__ */
#ifdef HAVE_PSI_THREAD_INTERFACE
@@ -5993,6 +6136,9 @@ int mysqld_main(int argc, char **argv)
CloseHandle(hEventShutdown);
}
#endif
+#if (defined(HAVE_OPENSSL) && !defined(HAVE_YASSL)) && !defined(EMBEDDED_LIBRARY)
+ ERR_remove_state(0);
+#endif
mysqld_exit(0);
return 0;
}
@@ -6216,14 +6362,15 @@ int mysqld_main(int argc, char **argv)
/**
Execute all commands from a file. Used by the mysql_install_db script to
- create MySQL privilege tables without having to start a full MySQL server.
+ create MySQL privilege tables without having to start a full MySQL server
+ and by read_init_file() if mysqld was started with the option --init-file.
*/
static void bootstrap(MYSQL_FILE *file)
{
DBUG_ENTER("bootstrap");
- THD *thd= new THD;
+ THD *thd= new THD(next_thread_id());
#ifdef WITH_WSREP
thd->variables.wsrep_on= 0;
#endif
@@ -6231,8 +6378,6 @@ static void bootstrap(MYSQL_FILE *file)
my_net_init(&thd->net,(st_vio*) 0, thd, MYF(0));
thd->max_client_packet_length= thd->net.max_packet;
thd->security_ctx->master_access= ~(ulong)0;
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- thread_count++; // Safe as only one thread running
in_bootstrap= TRUE;
bootstrap_file=file;
@@ -6251,10 +6396,7 @@ static void bootstrap(MYSQL_FILE *file)
/* Wait for thread to die */
mysql_mutex_lock(&LOCK_thread_count);
while (in_bootstrap)
- {
mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- DBUG_PRINT("quit",("One thread died (count=%u)",thread_count));
- }
mysql_mutex_unlock(&LOCK_thread_count);
#else
thd->mysql= 0;
@@ -6284,7 +6426,7 @@ static bool read_init_file(char *file_name)
*/
void inc_thread_created(void)
{
- thread_created++;
+ statistic_increment(thread_created, &LOCK_status);
}
#ifndef EMBEDDED_LIBRARY
@@ -6295,18 +6437,12 @@ void inc_thread_created(void)
NOTES
This is only used for debugging, when starting mysqld with
--thread-handling=no-threads or --one-thread
-
- When we enter this function, LOCK_thread_count is hold!
*/
-void handle_connection_in_main_thread(THD *thd)
+void handle_connection_in_main_thread(CONNECT *connect)
{
- mysql_mutex_assert_owner(&LOCK_thread_count);
- thread_cache_size=0; // Safety
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
- thd->start_utime= microsecond_interval_timer();
- do_handle_one_connection(thd);
+ thread_cache_size= 0; // Safety
+ do_handle_one_connection(connect);
}
@@ -6314,10 +6450,11 @@ void handle_connection_in_main_thread(THD *thd)
Scheduler that uses one thread per connection
*/
-void create_thread_to_handle_connection(THD *thd)
+void create_thread_to_handle_connection(CONNECT *connect)
{
+ char error_message_buff[MYSQL_ERRMSG_SIZE];
+ int error;
DBUG_ENTER("create_thread_to_handle_connection");
- mysql_mutex_assert_owner(&LOCK_thread_count);
/* Check if we can get thread from the cache */
if (cached_thread_count > wake_thread)
@@ -6326,9 +6463,8 @@ void create_thread_to_handle_connection(THD *thd)
/* Recheck condition when we have the lock */
if (cached_thread_count > wake_thread)
{
- mysql_mutex_unlock(&LOCK_thread_count);
/* Get thread from cache */
- thread_cache.push_back(thd);
+ thread_cache.push_back(connect);
wake_thread++;
mysql_cond_signal(&COND_thread_cache);
mysql_mutex_unlock(&LOCK_thread_cache);
@@ -6338,46 +6474,25 @@ void create_thread_to_handle_connection(THD *thd)
mysql_mutex_unlock(&LOCK_thread_cache);
}
- char error_message_buff[MYSQL_ERRMSG_SIZE];
/* Create new thread to handle connection */
- int error;
- thread_created++;
- threads.append(thd);
- DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id));
- thd->prior_thr_create_utime= microsecond_interval_timer();
+ inc_thread_created();
+ DBUG_PRINT("info",(("creating thread %lu"), (ulong) connect->thread_id));
+ connect->prior_thr_create_utime= microsecond_interval_timer();
+
if ((error= mysql_thread_create(key_thread_one_connection,
- &thd->real_id, &connection_attrib,
- handle_one_connection,
- (void*) thd)))
+ &connect->real_id, &connection_attrib,
+ handle_one_connection, (void*) connect)))
{
/* purecov: begin inspected */
- DBUG_PRINT("error",
- ("Can't create thread to handle request (error %d)",
+ DBUG_PRINT("error", ("Can't create thread to handle request (error %d)",
error));
- thd->set_killed(KILL_CONNECTION); // Safety
- mysql_mutex_unlock(&LOCK_thread_count);
-
- mysql_mutex_lock(&LOCK_connection_count);
- (*thd->scheduler->connection_count)--;
- mysql_mutex_unlock(&LOCK_connection_count);
-
- statistic_increment(aborted_connects,&LOCK_status);
- statistic_increment(connection_errors_internal, &LOCK_status);
- /* Can't use my_error() since store_globals has not been called. */
my_snprintf(error_message_buff, sizeof(error_message_buff),
- ER_THD(thd, ER_CANT_CREATE_THREAD), error);
- net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff, NULL);
- close_connection(thd, ER_OUT_OF_RESOURCES);
-
- mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
- mysql_mutex_unlock(&LOCK_thread_count);
- delete thd;
- thread_safe_decrement32(&thread_count);
- return;
+ ER_DEFAULT(ER_CANT_CREATE_THREAD), error);
+ connect->close_with_error(ER_CANT_CREATE_THREAD, error_message_buff,
+ ER_OUT_OF_RESOURCES);
+ DBUG_VOID_RETURN;
/* purecov: end */
}
- mysql_mutex_unlock(&LOCK_thread_count);
DBUG_PRINT("info",("Thread created"));
DBUG_VOID_RETURN;
}
@@ -6396,7 +6511,7 @@ void create_thread_to_handle_connection(THD *thd)
@param[in,out] thd Thread handle of future thread.
*/
-static void create_new_thread(THD *thd)
+static void create_new_thread(CONNECT *connect)
{
DBUG_ENTER("create_new_thread");
@@ -6407,38 +6522,34 @@ static void create_new_thread(THD *thd)
mysql_mutex_lock(&LOCK_connection_count);
- if (*thd->scheduler->connection_count >=
- *thd->scheduler->max_connections + 1|| abort_loop)
+ if (*connect->scheduler->connection_count >=
+ *connect->scheduler->max_connections + 1|| abort_loop)
{
- mysql_mutex_unlock(&LOCK_connection_count);
-
DBUG_PRINT("error",("Too many connections"));
- close_connection(thd, ER_CON_COUNT_ERROR);
+
+ mysql_mutex_unlock(&LOCK_connection_count);
statistic_increment(denied_connections, &LOCK_status);
- delete thd;
statistic_increment(connection_errors_max_connection, &LOCK_status);
+ connect->close_with_error(0, NullS, abort_loop ? ER_SERVER_SHUTDOWN : ER_CON_COUNT_ERROR);
DBUG_VOID_RETURN;
}
- ++*thd->scheduler->connection_count;
+ ++*connect->scheduler->connection_count;
if (connection_count + extra_connection_count > max_used_connections)
max_used_connections= connection_count + extra_connection_count;
mysql_mutex_unlock(&LOCK_connection_count);
- thread_safe_increment32(&thread_count);
+ connect->thread_count_incremented= 1;
- /* Start a new thread to handle connection. */
- mysql_mutex_lock(&LOCK_thread_count);
/*
The initialization of thread_id is done in create_embedded_thd() for
the embedded library.
TODO: refactor this to avoid code duplication there
*/
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
-
- MYSQL_CALLBACK(thd->scheduler, add_connection, (thd));
+ connect->thread_id= next_thread_id();
+ connect->scheduler->add_connection(connect);
DBUG_VOID_RETURN;
}
@@ -6473,13 +6584,12 @@ void handle_connections_sockets()
MYSQL_SOCKET sock= mysql_socket_invalid();
MYSQL_SOCKET new_sock= mysql_socket_invalid();
uint error_count=0;
- THD *thd;
+ CONNECT *connect;
struct sockaddr_storage cAddr;
int ip_flags __attribute__((unused))=0;
int socket_flags __attribute__((unused))= 0;
int extra_ip_flags __attribute__((unused))=0;
int flags=0,retval;
- st_vio *vio_tmp;
bool is_unix_sock;
#ifdef HAVE_POLL
int socket_count= 0;
@@ -6678,59 +6788,43 @@ void handle_connections_sockets()
}
#endif /* HAVE_LIBWRAP */
- /*
- ** Don't allow too many connections
- */
+ DBUG_PRINT("info", ("Creating CONNECT for new connection"));
- DBUG_PRINT("info", ("Creating THD for new connection"));
- if (!(thd= new THD) || thd->is_error())
+ if ((connect= new CONNECT()))
{
- (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
- (void) mysql_socket_close(new_sock);
- statistic_increment(connection_errors_internal, &LOCK_status);
- delete thd;
- continue;
- }
- /* Set to get io buffers to be part of THD */
- set_current_thd(thd);
-
- is_unix_sock= (mysql_socket_getfd(sock) ==
- mysql_socket_getfd(unix_sock));
+ is_unix_sock= (mysql_socket_getfd(sock) ==
+ mysql_socket_getfd(unix_sock));
- if (!(vio_tmp=
- mysql_socket_vio_new(new_sock,
- is_unix_sock ? VIO_TYPE_SOCKET : VIO_TYPE_TCPIP,
- is_unix_sock ? VIO_LOCALHOST: 0)) ||
- my_net_init(&thd->net, vio_tmp, thd, MYF(MY_THREAD_SPECIFIC)))
- {
- /*
- Only delete the temporary vio if we didn't already attach it to the
- NET object. The destructor in THD will delete any initialized net
- structure.
- */
- if (vio_tmp && thd->net.vio != vio_tmp)
- vio_delete(vio_tmp);
- else
+ if (!(connect->vio=
+ mysql_socket_vio_new(new_sock,
+ is_unix_sock ? VIO_TYPE_SOCKET :
+ VIO_TYPE_TCPIP,
+ is_unix_sock ? VIO_LOCALHOST: 0)))
{
- (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
- (void) mysql_socket_close(new_sock);
+ delete connect;
+ connect= 0; // Error handling below
}
- delete thd;
+ }
+
+ if (!connect)
+ {
+ /* Connect failure */
+ (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
+ (void) mysql_socket_close(new_sock);
+ statistic_increment(aborted_connects,&LOCK_status);
statistic_increment(connection_errors_internal, &LOCK_status);
continue;
}
- init_net_server_extension(thd);
if (is_unix_sock)
- thd->security_ctx->host=(char*) my_localhost;
+ connect->host= my_localhost;
if (mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock))
{
- thd->extra_port= 1;
- thd->scheduler= extra_thread_scheduler;
+ connect->extra_port= 1;
+ connect->scheduler= extra_thread_scheduler;
}
- create_new_thread(thd);
- set_current_thd(0);
+ create_new_thread(connect);
}
sd_notify(0, "STOPPING=1\n"
"STATUS=Shutdown in progress\n");
@@ -6751,7 +6845,6 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
{
HANDLE hConnectedPipe;
OVERLAPPED connectOverlapped= {0};
- THD *thd;
my_thread_init();
DBUG_ENTER("handle_connections_namedpipes");
connectOverlapped.hEvent= CreateEvent(NULL, TRUE, FALSE, NULL);
@@ -6819,25 +6912,19 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
hPipe=hConnectedPipe;
continue; // We have to try again
}
-
- if (!(thd = new THD))
+ CONNECT *connect;
+ if (!(connect= new CONNECT) ||
+ !(connect->vio= vio_new_win32pipe(hConnectedPipe)))
{
DisconnectNamedPipe(hConnectedPipe);
CloseHandle(hConnectedPipe);
+ delete connect;
+ statistic_increment(aborted_connects,&LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
continue;
}
- set_current_thd(thd);
- if (!(thd->net.vio= vio_new_win32pipe(hConnectedPipe)) ||
- my_net_init(&thd->net, thd->net.vio, thd, MYF(MY_THREAD_SPECIFIC)))
- {
- close_connection(thd, ER_OUT_OF_RESOURCES);
- delete thd;
- continue;
- }
- /* Host is unknown */
- thd->security_ctx->host= my_strdup(my_localhost, MYF(0));
- create_new_thread(thd);
- set_current_thd(0);
+ connect->host= my_localhost;
+ create_new_thread(connect);
}
LocalFree(saPipeSecurity.lpSecurityDescriptor);
CloseHandle(connectOverlapped.hEvent);
@@ -6875,7 +6962,8 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
/*
get enough space base-name + '_' + longest suffix we might ever send
*/
- if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L, MYF(MY_FAE))))
+ if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L,
+ MYF(MY_FAE))))
goto error;
if (my_security_attr_create(&sa_event, &errmsg,
@@ -6941,7 +7029,7 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
HANDLE event_server_wrote= 0;
HANDLE event_server_read= 0;
HANDLE event_conn_closed= 0;
- THD *thd= 0;
+ CONNECT *connect= 0;
p= int10_to_str(connect_number, connect_number_char, 10);
/*
@@ -7003,8 +7091,13 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
}
if (abort_loop)
goto errorconn;
- if (!(thd= new THD))
+
+ if (!(connect= new CONNECT))
+ {
+ errmsg= "Could not create CONNECT object";
goto errorconn;
+ }
+
/* Send number of connection to client */
int4store(handle_connect_map, connect_number);
if (!SetEvent(event_connect_answer))
@@ -7018,24 +7111,20 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
errmsg= "Could not set client to read mode";
goto errorconn;
}
- set_current_thd(thd);
- if (!(thd->net.vio= vio_new_win32shared_memory(handle_client_file_map,
+ if (!(connect->vio= vio_new_win32shared_memory(handle_client_file_map,
handle_client_map,
event_client_wrote,
event_client_read,
event_server_wrote,
event_server_read,
- event_conn_closed)) ||
- my_net_init(&thd->net, thd->net.vio, thd, MYF(MY_THREAD_SPECIFIC)))
+ event_conn_closed)))
{
- close_connection(thd, ER_OUT_OF_RESOURCES);
- errmsg= 0;
+ errmsg= "Could not create VIO object";
goto errorconn;
}
- thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); /* Host is unknown */
- create_new_thread(thd);
+ connect->host= my_localhost; /* Host is unknown */
+ create_new_thread(connect);
connect_number++;
- set_current_thd(0);
continue;
errorconn:
@@ -7061,9 +7150,11 @@ errorconn:
CloseHandle(event_client_read);
if (event_conn_closed)
CloseHandle(event_conn_closed);
- delete thd;
+
+ delete connect;
+ statistic_increment(aborted_connects,&LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
}
- set_current_thd(0);
/* End shared memory handling */
error:
@@ -7175,7 +7266,7 @@ struct my_option my_long_options[]=
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
/*
Because Sys_var_bit does not support command-line options, we need to
- explicitely add one for --autocommit
+ explicitly add one for --autocommit
*/
{"autocommit", 0, "Set default value for autocommit (0 or 1)",
&opt_autocommit, &opt_autocommit, 0,
@@ -7196,8 +7287,8 @@ struct my_option my_long_options[]=
"The value has to be a multiple of 256.",
&opt_binlog_rows_event_max_size, &opt_binlog_rows_event_max_size,
0, GET_ULONG, REQUIRED_ARG,
- /* def_value */ 1024, /* min_value */ 256, /* max_value */ ULONG_MAX,
- /* sub_size */ 0, /* block_size */ 256,
+ /* def_value */ 8192, /* min_value */ 256, /* max_value */ ULONG_MAX,
+ /* sub_size */ 0, /* block_size */ 256,
/* app_type */ 0
},
#ifndef DISABLE_GRANT_OPTIONS
@@ -7284,6 +7375,13 @@ struct my_option my_long_options[]=
&opt_sporadic_binlog_dump_fail, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
#endif /* HAVE_REPLICATION */
+#ifndef DBUG_OFF
+ {"debug-assert-on-not-freed-memory", 0,
+ "Assert if we found problems with memory allocation",
+ &debug_assert_on_not_freed_memory,
+ &debug_assert_on_not_freed_memory, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0,
+ 0},
+#endif /* DBUG_OFF */
/* default-storage-engine should have "MyISAM" as def_value. Instead
of initializing it here it is done in init_common_variables() due
to a compiler bug in Sun Studio compiler. */
@@ -7325,6 +7423,10 @@ struct my_option my_long_options[]=
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
/* We must always support the next option to make scripts like mysqltest
easier to do */
+ {"flashback", 0,
+ "Setup the server to use flashback. This enables binary log in row mode and will enable extra logging for DDL's needed by flashback feature",
+ &opt_support_flashback, &opt_support_flashback,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"gdb", 0,
"Set up signals usable for debugging. Deprecated, use --debug-gdb instead.",
&opt_debugging, &opt_debugging,
@@ -7567,8 +7669,8 @@ struct my_option my_long_options[]=
0, 0, 0, 0, 0, 0},
{"verbose", 'v', "Used with --help option for detailed help.",
&opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
- NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_STR,
+ OPT_ARG, 0, 0, 0, 0, 0, 0},
{"plugin-load", OPT_PLUGIN_LOAD,
"Semicolon-separated list of plugins to load, where each plugin is "
"specified as ether a plugin_name=library_file pair or only a library_file. "
@@ -7611,11 +7713,9 @@ struct my_option my_long_options[]=
MYSQL_TO_BE_IMPLEMENTED_OPTION("eq-range-index-dive-limit"),
MYSQL_COMPATIBILITY_OPTION("server-id-bits"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-rows-search-algorithms"), // HAVE_REPLICATION
- MYSQL_COMPATIBILITY_OPTION("table-open-cache-instances"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-allow-batching"), // HAVE_REPLICATION
MYSQL_COMPATIBILITY_OPTION("slave-checkpoint-period"), // HAVE_REPLICATION
MYSQL_COMPATIBILITY_OPTION("slave-checkpoint-group"), // HAVE_REPLICATION
- MYSQL_SUGGEST_ANALOG_OPTION("slave-parallel-workers", "--slave-parallel-threads"), // HAVE_REPLICATION
MYSQL_SUGGEST_ANALOG_OPTION("slave-pending-jobs-size-max", "--slave-parallel-max-queued"), // HAVE_REPLICATION
MYSQL_TO_BE_IMPLEMENTED_OPTION("disconnect-on-expired-password"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("sha256-password-private-key-path"), // HAVE_OPENSSL && !HAVE_YASSL
@@ -7809,9 +7909,9 @@ static int show_table_definitions(THD *thd, SHOW_VAR *var, char *buff,
static int show_flush_commands(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
{
- var->type= SHOW_LONG;
+ var->type= SHOW_LONGLONG;
var->value= buff;
- *((long *) buff)= (long) tdc_refresh_version();
+ *((longlong *) buff)= (longlong)tdc_refresh_version();
return 0;
}
@@ -8104,7 +8204,7 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff,
#ifdef HAVE_YASSL
static char *
-my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
+my_asn1_time_to_string(const ASN1_TIME *time, char *buf, size_t len)
{
return yaSSL_ASN1_TIME_to_string(time, buf, len);
}
@@ -8112,7 +8212,7 @@ my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
#else /* openssl */
static char *
-my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
+my_asn1_time_to_string(const ASN1_TIME *time, char *buf, size_t len)
{
int n_read;
char *res= NULL;
@@ -8121,7 +8221,7 @@ my_asn1_time_to_string(ASN1_TIME *time, char *buf, size_t len)
if (bio == NULL)
return NULL;
- if (!ASN1_TIME_print(bio, time))
+ if (!ASN1_TIME_print(bio, const_cast<ASN1_TIME*>(time)))
goto end;
n_read= BIO_read(bio, buf, (int) (len - 1));
@@ -8160,7 +8260,7 @@ show_ssl_get_server_not_before(THD *thd, SHOW_VAR *var, char *buff,
{
SSL *ssl= (SSL*) thd->net.vio->ssl_arg;
X509 *cert= SSL_get_certificate(ssl);
- ASN1_TIME *not_before= X509_get_notBefore(cert);
+ const ASN1_TIME *not_before= X509_get0_notBefore(cert);
var->value= my_asn1_time_to_string(not_before, buff,
SHOW_VAR_FUNC_BUFF_SIZE);
@@ -8194,7 +8294,7 @@ show_ssl_get_server_not_after(THD *thd, SHOW_VAR *var, char *buff,
{
SSL *ssl= (SSL*) thd->net.vio->ssl_arg;
X509 *cert= SSL_get_certificate(ssl);
- ASN1_TIME *not_after= X509_get_notAfter(cert);
+ const ASN1_TIME *not_after= X509_get0_notAfter(cert);
var->value= my_asn1_time_to_string(not_after, buff,
SHOW_VAR_FUNC_BUFF_SIZE);
@@ -8332,7 +8432,7 @@ SHOW_VAR status_vars[]= {
{"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS},
{"Com", (char*) com_status_vars, SHOW_ARRAY},
{"Compression", (char*) &show_net_compression, SHOW_SIMPLE_FUNC},
- {"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
+ {"Connections", (char*) &global_thread_id, SHOW_LONG_NOFLUSH},
{"Connection_errors_accept", (char*) &connection_errors_accept, SHOW_LONG},
{"Connection_errors_internal", (char*) &connection_errors_internal, SHOW_LONG},
{"Connection_errors_max_connections", (char*) &connection_errors_max_connection, SHOW_LONG},
@@ -8353,6 +8453,7 @@ SHOW_VAR status_vars[]= {
{"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS},
{"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH },
{"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS},
+ {"Feature_check_constraint", (char*) &feature_check_constraint, SHOW_LONG },
{"Feature_delay_key_write", (char*) &feature_files_opened_with_delayed_keys, SHOW_LONG },
{"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS},
{"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS},
@@ -8361,6 +8462,7 @@ SHOW_VAR status_vars[]= {
{"Feature_subquery", (char*) offsetof(STATUS_VAR, feature_subquery), SHOW_LONG_STATUS},
{"Feature_timezone", (char*) offsetof(STATUS_VAR, feature_timezone), SHOW_LONG_STATUS},
{"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS},
+ {"Feature_window_functions", (char*) offsetof(STATUS_VAR, feature_window_functions), SHOW_LONG_STATUS},
{"Feature_xml", (char*) offsetof(STATUS_VAR, feature_xml), SHOW_LONG_STATUS},
{"Flush_commands", (char*) &show_flush_commands, SHOW_SIMPLE_FUNC},
{"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
@@ -8529,7 +8631,8 @@ static bool add_many_options(DYNAMIC_ARRAY *options, my_option *list,
#ifndef EMBEDDED_LIBRARY
static void print_version(void)
{
- set_server_version();
+ if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
+ set_server_version(server_version, sizeof(server_version));
printf("%s Ver %s for %s on %s (%s)\n",my_progname,
server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT);
@@ -8671,7 +8774,6 @@ static int mysql_init_variables(void)
mqh_used= 0;
kill_in_progress= 0;
cleanup_done= 0;
- server_id_supplied= 0;
test_flags= select_errors= dropping_tables= ha_open_options=0;
thread_count= thread_running= kill_cached_threads= wake_thread= 0;
service_thread_count= 0;
@@ -8714,10 +8816,11 @@ static int mysql_init_variables(void)
mysql_home_ptr= mysql_home;
log_error_file_ptr= log_error_file;
protocol_version= PROTOCOL_VERSION;
- what_to_log= ~ (1L << (uint) COM_TIME);
+ what_to_log= ~(1UL << COM_TIME);
denied_connections= 0;
executed_events= 0;
- global_query_id= thread_id= 1L;
+ global_query_id= 1;
+ global_thread_id= 0;
strnmov(server_version, MYSQL_SERVER_VERSION, sizeof(server_version)-1);
threads.empty();
thread_cache.empty();
@@ -8734,8 +8837,8 @@ static int mysql_init_variables(void)
/* Set directory paths */
mysql_real_data_home_len=
- strmake_buf(mysql_real_data_home,
- get_relative_path(MYSQL_DATADIR)) - mysql_real_data_home;
+ (uint)(strmake_buf(mysql_real_data_home,
+ get_relative_path(MYSQL_DATADIR)) - mysql_real_data_home);
/* Replication parameters */
master_info_file= (char*) "master.info",
relay_log_info_file= (char*) "relay-log.info";
@@ -8932,12 +9035,21 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
binlog_format_used= true;
break;
#include <sslopt-case.h>
-#ifndef EMBEDDED_LIBRARY
case 'V':
- print_version();
- opt_abort= 1; // Abort after parsing all options
- break;
+ if (argument)
+ {
+ strmake(server_version, argument, sizeof(server_version) - 1);
+ set_sys_var_value_origin(&server_version_ptr, sys_var::CONFIG);
+ using_custom_server_version= true;
+ }
+#ifndef EMBEDDED_LIBRARY
+ else
+ {
+ print_version();
+ opt_abort= 1; // Abort after parsing all options
+ }
#endif /*EMBEDDED_LIBRARY*/
+ break;
case 'W':
if (!argument)
global_system_variables.log_warnings++;
@@ -9133,7 +9245,6 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
opt_noacl=opt_bootstrap=1;
break;
case OPT_SERVER_ID:
- server_id_supplied = 1;
::server_id= global_system_variables.server_id;
break;
case OPT_LOWER_CASE_TABLE_NAMES:
@@ -9403,7 +9514,8 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
/* prepare all_options array */
my_init_dynamic_array(&all_options, sizeof(my_option),
- array_elements(my_long_options),
+ array_elements(my_long_options) +
+ sys_var_elements(),
array_elements(my_long_options)/4, MYF(0));
add_many_options(&all_options, my_long_options, array_elements(my_long_options));
sys_var_add_options(&all_options, 0);
@@ -9433,11 +9545,6 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
between options, setting of multiple variables, etc.
Do them here.
*/
-
- if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes ||
- opt_log_slow_slave_statements) &&
- !global_system_variables.sql_log_slow)
- sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set");
if (global_system_variables.net_buffer_length >
global_system_variables.max_allowed_packet)
{
@@ -9495,6 +9602,18 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
else
global_system_variables.option_bits&= ~OPTION_BIG_SELECTS;
+ if (opt_support_flashback)
+ {
+ /* Force binary logging */
+ if (!opt_bin_logname)
+ opt_bin_logname= (char*) ""; // Use default name
+ opt_bin_log= opt_bin_log_used= 1;
+
+ /* Force format to row */
+ binlog_format_used= 1;
+ global_system_variables.binlog_format= BINLOG_FORMAT_ROW;
+ }
+
if (!opt_bootstrap && WSREP_PROVIDER_EXISTS && WSREP_ON &&
global_system_variables.binlog_format != BINLOG_FORMAT_ROW)
{
@@ -9587,13 +9706,6 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
one_thread_scheduler(extra_thread_scheduler);
#else
-#ifdef _WIN32
- /* workaround: disable thread pool on XP */
- if (GetProcAddress(GetModuleHandle("kernel32"),"CreateThreadpool") == 0 &&
- thread_handling > SCHEDULER_NO_THREADS)
- SYSVAR_AUTOSIZE(thread_handling, SCHEDULER_ONE_THREAD_PER_CONNECTION);
-#endif
-
if (thread_handling <= SCHEDULER_ONE_THREAD_PER_CONNECTION)
one_thread_per_connection_scheduler(thread_scheduler, &max_connections,
&connection_count);
@@ -9647,8 +9759,6 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
#endif
/* Ensure that some variables are not set higher than needed */
- if (back_log > max_connections)
- SYSVAR_AUTOSIZE(back_log, max_connections);
if (thread_cache_size > max_connections)
SYSVAR_AUTOSIZE(thread_cache_size, max_connections);
@@ -9663,22 +9773,17 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
(MYSQL_SERVER_SUFFIX is set by the compilation environment)
*/
-void set_server_version(void)
+void set_server_version(char *buf, size_t size)
{
- char *version_end= server_version+sizeof(server_version)-1;
- char *end= strxnmov(server_version, sizeof(server_version)-1,
- MYSQL_SERVER_VERSION,
- MYSQL_SERVER_SUFFIX_STR, NullS);
-#ifdef EMBEDDED_LIBRARY
- end= strnmov(end, "-embedded", (version_end-end));
-#endif
-#ifndef DBUG_OFF
- if (!strstr(MYSQL_SERVER_SUFFIX_STR, "-debug"))
- end= strnmov(end, "-debug", (version_end-end));
-#endif
- if (opt_log || global_system_variables.sql_log_slow || opt_bin_log)
- strnmov(end, "-log", (version_end-end)); // This may slow down system
- *end= 0;
+ bool is_log= opt_log || global_system_variables.sql_log_slow || opt_bin_log;
+ bool is_debug= IF_DBUG(!strstr(MYSQL_SERVER_SUFFIX_STR, "-debug"), 0);
+ strxnmov(buf, size - 1,
+ MYSQL_SERVER_VERSION,
+ MYSQL_SERVER_SUFFIX_STR,
+ IF_EMBEDDED("-embedded", ""),
+ is_debug ? "-debug" : "",
+ is_log ? "-log" : "",
+ NullS);
}
@@ -10279,7 +10384,8 @@ PSI_stage_info *all_server_stages[]=
& stage_master_gtid_wait,
& stage_gtid_wait_other_connection,
& stage_slave_background_process_request,
- & stage_slave_background_wait_request
+ & stage_slave_background_wait_request,
+ & stage_waiting_for_deadlock_kill
};
PSI_socket_key key_socket_tcpip, key_socket_unix, key_socket_client_connection;
@@ -10375,3 +10481,96 @@ void init_server_psi_keys(void)
}
#endif /* HAVE_PSI_INTERFACE */
+
+
+/*
+ Connection ID allocation.
+
+ We need to maintain thread_ids in the 32bit range,
+ because this is how it is passed to the client in the protocol.
+
+ The idea is to maintain a id range, initially set to
+ (0,UINT32_MAX). Whenever new id is needed, we increment the
+ lower limit and return its new value.
+
+ On "overflow", if id can not be generated anymore(i.e lower == upper -1),
+ we recalculate the range boundaries.
+ To do that, we first collect thread ids that are in use, by traversing
+ THD list, and find largest region within (0,UINT32_MAX), that is still free.
+
+*/
+
+static my_thread_id thread_id_max= UINT_MAX32;
+
+#include <vector>
+#include <algorithm>
+
+/*
+ Find largest unused thread_id range.
+
+ i.e for every number N within the returned range,
+ there is no existing connection with thread_id equal to N.
+
+ The range is exclusive, lower bound is always >=0 and
+ upper bound <=MAX_UINT32.
+
+ @param[out] low - lower bound for the range
+ @param[out] high - upper bound for the range
+*/
+static void recalculate_thread_id_range(my_thread_id *low, my_thread_id *high)
+{
+ std::vector<my_thread_id> ids;
+
+ // Add sentinels
+ ids.push_back(0);
+ ids.push_back(UINT_MAX32);
+
+ mysql_mutex_lock(&LOCK_thread_count);
+
+ I_List_iterator<THD> it(threads);
+ THD *thd;
+ while ((thd=it++))
+ ids.push_back(thd->thread_id);
+
+ mysql_mutex_unlock(&LOCK_thread_count);
+
+ std::sort(ids.begin(), ids.end());
+ my_thread_id max_gap= 0;
+ for (size_t i= 0; i < ids.size() - 1; i++)
+ {
+ my_thread_id gap= ids[i+1] - ids[i];
+ if (gap > max_gap)
+ {
+ *low= ids[i];
+ *high= ids[i+1];
+ max_gap= gap;
+ }
+ }
+
+ if (max_gap < 2)
+ {
+ /* Can't find free id. This is not really possible,
+ we'd need 2^32 connections for this to happen.*/
+ sql_print_error("Cannot find free connection id.");
+ abort();
+ }
+}
+
+
+my_thread_id next_thread_id(void)
+{
+ my_thread_id retval;
+ DBUG_EXECUTE_IF("thread_id_overflow", global_thread_id= thread_id_max-2;);
+
+ mysql_mutex_lock(&LOCK_thread_id);
+
+ if (unlikely(global_thread_id == thread_id_max - 1))
+ {
+ recalculate_thread_id_range(&global_thread_id, &thread_id_max);
+ }
+
+ retval= ++global_thread_id;
+
+ mysql_mutex_unlock(&LOCK_thread_id);
+ return retval;
+}
diff --git a/sql/mysqld.h b/sql/mysqld.h
index af519622d97..b02bd9fb1f6 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2006, 2016, Oracle and/or its affiliates.
- Copyright (c) 2010, 2016, MariaDB
+ Copyright (c) 2010, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,8 @@
#ifndef MYSQLD_INCLUDED
#define MYSQLD_INCLUDED
-#include "my_global.h" /* MYSQL_PLUGIN_IMPORT, FN_REFLEN, FN_EXTLEN */
+#include <my_global.h> /* MYSQL_PLUGIN_IMPORT, FN_REFLEN, FN_EXTLEN */
+#include "sql_basic_types.h" /* query_id_t */
#include "sql_bitmap.h" /* Bitmap */
#include "my_decimal.h" /* my_decimal */
#include "mysql_com.h" /* SERVER_VERSION_LENGTH */
@@ -30,6 +31,7 @@
#include "my_rdtsc.h"
class THD;
+class CONNECT;
struct handlerton;
class Time_zone;
@@ -47,16 +49,16 @@ typedef Bitmap<((MAX_INDEXES+7)/8*8)> key_map; /* Used for finding keys */
#endif
/* Bits from testflag */
-#define TEST_PRINT_CACHED_TABLES 1
-#define TEST_NO_KEY_GROUP 2
-#define TEST_MIT_THREAD 4
-#define TEST_BLOCKING 8
-#define TEST_KEEP_TMP_TABLES 16
-#define TEST_READCHECK 64 /**< Force use of readcheck */
-#define TEST_NO_EXTRA 128
-#define TEST_CORE_ON_SIGNAL 256 /**< Give core if signal */
-#define TEST_SIGINT 1024 /**< Allow sigint on threads */
-#define TEST_SYNCHRONIZATION 2048 /**< get server to do sleep in
+#define TEST_PRINT_CACHED_TABLES 1U
+#define TEST_NO_KEY_GROUP 2U
+#define TEST_MIT_THREAD 4U
+#define TEST_BLOCKING 8U
+#define TEST_KEEP_TMP_TABLES 16U
+#define TEST_READCHECK 64U /**< Force use of readcheck */
+#define TEST_NO_EXTRA 128U
+#define TEST_CORE_ON_SIGNAL 256U /**< Give core if signal */
+#define TEST_SIGINT 1024U /**< Allow sigint on threads */
+#define TEST_SYNCHRONIZATION 2048U /**< get server to do sleep in
some places */
/* Keep things compatible */
@@ -79,16 +81,18 @@ enum enum_slave_parallel_mode {
};
/* Function prototypes */
-void kill_mysql(void);
+void kill_mysql(THD *thd= 0);
void close_connection(THD *thd, uint sql_errno= 0);
-void handle_connection_in_main_thread(THD *thd);
-void create_thread_to_handle_connection(THD *thd);
+void handle_connection_in_main_thread(CONNECT *thd);
+void create_thread_to_handle_connection(CONNECT *connect);
void signal_thd_deleted();
void unlink_thd(THD *thd);
bool one_thread_per_connection_end(THD *thd, bool put_in_cache);
void flush_thread_cache();
void refresh_status(THD *thd);
bool is_secure_file_path(char *path);
+void dec_connection_count(scheduler_functions *scheduler);
+extern void init_net_server_extension(THD *thd);
extern "C" MYSQL_PLUGIN_IMPORT CHARSET_INFO *system_charset_info;
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *files_charset_info ;
@@ -103,11 +107,13 @@ extern CHARSET_INFO *error_message_charset_info;
extern CHARSET_INFO *character_set_filesystem;
extern MY_BITMAP temp_pool;
-extern bool opt_large_files, server_id_supplied;
-extern bool opt_update_log, opt_bin_log, opt_error_log;
+extern bool opt_large_files;
+extern bool opt_update_log, opt_bin_log, opt_error_log, opt_bin_log_compress;
+extern uint opt_bin_log_compress_min_len;
extern my_bool opt_log, opt_bootstrap;
extern my_bool opt_backup_history_log;
extern my_bool opt_backup_progress_log;
+extern my_bool opt_support_flashback;
extern ulonglong log_output_options;
extern ulong log_backup_output_options;
extern my_bool opt_log_queries_not_using_indexes;
@@ -115,8 +121,9 @@ extern bool opt_disable_networking, opt_skip_show_db;
extern bool opt_skip_name_resolve;
extern bool opt_ignore_builtin_innodb;
extern my_bool opt_character_set_client_handshake;
+extern my_bool debug_assert_on_not_freed_memory;
extern bool volatile abort_loop;
-extern bool in_bootstrap;
+extern bool volatile in_bootstrap;
extern uint connection_count;
extern my_bool opt_safe_user_create;
extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
@@ -179,7 +186,7 @@ extern char log_error_file[FN_REFLEN], *opt_tc_log_file;
extern const double log_10[309];
extern ulonglong keybuff_size;
extern ulonglong thd_startup_options;
-extern ulong thread_id;
+extern my_thread_id global_thread_id;
extern ulong binlog_cache_use, binlog_cache_disk_use;
extern ulong binlog_stmt_cache_use, binlog_stmt_cache_disk_use;
extern ulong aborted_threads,aborted_connects;
@@ -201,9 +208,10 @@ extern LEX_CSTRING reason_slave_blocked;
extern ulong slave_trans_retries;
extern uint slave_net_timeout;
extern int max_user_connections;
+extern volatile ulong cached_thread_count;
extern ulong what_to_log,flush_time;
extern uint max_prepared_stmt_count, prepared_stmt_count;
-extern ulong open_files_limit;
+extern MYSQL_PLUGIN_IMPORT ulong open_files_limit;
extern ulonglong binlog_cache_size, binlog_stmt_cache_size;
extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size;
extern ulong max_binlog_size;
@@ -265,12 +273,6 @@ extern my_bool encrypt_tmp_disk_tables, encrypt_tmp_files;
extern ulong encryption_algorithm;
extern const char *encryption_algorithm_names[];
-/*
- THR_MALLOC is a key which will be used to set/get MEM_ROOT** for a thread,
- using my_pthread_setspecific_ptr()/my_thread_getspecific_ptr().
-*/
-extern pthread_key(MEM_ROOT**,THR_MALLOC);
-
#ifdef HAVE_PSI_INTERFACE
#ifdef HAVE_MMAP
extern PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active,
@@ -300,6 +302,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_relay_log_info_log_space_lock, key_relay_log_info_run_lock,
key_rpl_group_info_sleep_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
+ key_LOCK_start_thread,
key_LOCK_error_messages, key_LOCK_thread_count, key_PARTITION_LOCK_auto_inc;
extern PSI_mutex_key key_RELAYLOG_LOCK_index;
extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state,
@@ -331,6 +334,7 @@ extern PSI_cond_key key_BINLOG_COND_xid_list, key_BINLOG_update_cond,
key_relay_log_info_start_cond, key_relay_log_info_stop_cond,
key_rpl_group_info_sleep_cond,
key_TABLE_SHARE_cond, key_user_level_lock_cond,
+ key_COND_start_thread,
key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache;
extern PSI_cond_key key_RELAYLOG_update_cond, key_COND_wakeup_ready,
key_COND_wait_commit;
@@ -543,6 +547,8 @@ extern uint mysql_real_data_home_len;
extern const char *mysql_real_data_home_ptr;
extern ulong thread_handling;
extern "C" MYSQL_PLUGIN_IMPORT char server_version[SERVER_VERSION_LENGTH];
+extern char *server_version_ptr;
+extern bool using_custom_server_version;
extern MYSQL_PLUGIN_IMPORT char mysql_real_data_home[];
extern char mysql_unpacked_real_data_home[];
extern MYSQL_PLUGIN_IMPORT struct system_variables global_system_variables;
@@ -565,6 +571,7 @@ extern mysql_mutex_t
LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count,
LOCK_slave_background;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count;
+extern mysql_mutex_t LOCK_start_thread;
#ifdef HAVE_OPENSSL
extern char* des_key_file;
extern mysql_mutex_t LOCK_des_key_file;
@@ -572,8 +579,8 @@ extern mysql_mutex_t LOCK_des_key_file;
extern mysql_mutex_t LOCK_server_started;
extern mysql_cond_t COND_server_started;
extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
-extern mysql_rwlock_t LOCK_system_variables_hash;
-extern mysql_cond_t COND_thread_count;
+extern mysql_prlock_t LOCK_system_variables_hash;
+extern mysql_cond_t COND_thread_count, COND_start_thread;
extern mysql_cond_t COND_manager;
extern mysql_cond_t COND_slave_background;
extern int32 thread_running;
@@ -661,13 +668,15 @@ enum enum_query_type
QT_WITHOUT_INTRODUCERS= (1 << 1),
/// view internal representation (like QT_ORDINARY except ORDER BY clause)
QT_VIEW_INTERNAL= (1 << 2),
- /// If identifiers should not include database names for the current database
- QT_ITEM_IDENT_SKIP_CURRENT_DATABASE= (1 << 3),
+ /// If identifiers should not include database names, where unambiguous
+ QT_ITEM_IDENT_SKIP_DB_NAMES= (1 << 3),
+ /// If identifiers should not include table names, where unambiguous
+ QT_ITEM_IDENT_SKIP_TABLE_NAMES= (1 << 4),
/// If Item_cache_wrapper should not print <expr_cache>
- QT_ITEM_CACHE_WRAPPER_SKIP_DETAILS= (1 << 4),
+ QT_ITEM_CACHE_WRAPPER_SKIP_DETAILS= (1 << 5),
/// If Item_subselect should print as just "(subquery#1)"
/// rather than display the subquery body
- QT_ITEM_SUBSELECT_ID_ONLY= (1 << 5),
+ QT_ITEM_SUBSELECT_ID_ONLY= (1 << 6),
/// If NULLIF(a,b) should print itself as
/// CASE WHEN a_for_comparison=b THEN NULL ELSE a_for_return_value END
/// when "a" was replaced to two different items
@@ -677,11 +686,11 @@ enum enum_query_type
/// a_for_return_value is not the same as a_for_comparison.
/// SHOW CREATE {VIEW|PROCEDURE|FUNCTION} and other cases where the
/// original representation is required, should set this flag.
- QT_ITEM_ORIGINAL_FUNC_NULLIF= (1 <<6),
+ QT_ITEM_ORIGINAL_FUNC_NULLIF= (1 << 7),
/// This value means focus on readability, not on ability to parse back, etc.
QT_EXPLAIN= QT_TO_SYSTEM_CHARSET |
- QT_ITEM_IDENT_SKIP_CURRENT_DATABASE |
+ QT_ITEM_IDENT_SKIP_DB_NAMES |
QT_ITEM_CACHE_WRAPPER_SKIP_DETAILS |
QT_ITEM_SUBSELECT_ID_ONLY,
@@ -699,10 +708,9 @@ enum enum_query_type
/* query_id */
-typedef int64 query_id_t;
extern query_id_t global_query_id;
-void unireg_end(void) __attribute__((noreturn));
+ATTRIBUTE_NORETURN void unireg_end(void);
/* increment query_id and return it. */
inline __attribute__((warn_unused_result)) query_id_t next_query_id()
@@ -715,6 +723,8 @@ inline query_id_t get_query_id()
return my_atomic_load64_explicit(&global_query_id, MY_MEMORY_ORDER_RELAXED);
}
+/* increment global_thread_id and return it. */
+extern __attribute__((warn_unused_result)) my_thread_id next_thread_id(void);
/*
TODO: Replace this with an inline function.
@@ -763,7 +773,7 @@ inline void dec_thread_running()
thread_safe_decrement32(&thread_running);
}
-void set_server_version(void);
+extern void set_server_version(char *buf, size_t size);
#define current_thd _current_thd()
inline int set_current_thd(THD *thd)
@@ -771,6 +781,7 @@ inline int set_current_thd(THD *thd)
return my_pthread_setspecific_ptr(THR_THD, thd);
}
+
/*
@todo remove, make it static in ha_maria.cc
currently it's needed for sql_select.cc
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 6d200f55655..7b988fd369b 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2012, 2017, MariaDB Corporation
+ Copyright (c) 2012, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -61,8 +61,9 @@
#define EXTRA_DEBUG_ASSERT DBUG_ASSERT
#else
static void inline EXTRA_DEBUG_fprintf(...) {}
+#ifndef MYSQL_SERVER
static int inline EXTRA_DEBUG_fflush(...) { return 0; }
-#define EXTRA_DEBUG_ASSERT(X) do {} while(0)
+#endif
#endif
#ifdef MYSQL_SERVER
#define MYSQL_SERVER_my_error my_error
@@ -70,6 +71,9 @@ static int inline EXTRA_DEBUG_fflush(...) { return 0; }
static void inline MYSQL_SERVER_my_error(...) {}
#endif
+#ifndef EXTRA_DEBUG_ASSERT
+# define EXTRA_DEBUG_ASSERT(X) do {} while(0)
+#endif
/*
The following handles the differences when this is linked between the
@@ -119,10 +123,11 @@ extern my_bool thd_net_is_killed();
#endif
#define TEST_BLOCKING 8
-#define MAX_PACKET_LENGTH (256L*256L*256L-1)
static my_bool net_write_buff(NET *, const uchar *, ulong);
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags);
+
/** Init with packet info. */
my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
@@ -131,14 +136,12 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
DBUG_PRINT("enter", ("my_flags: %u", my_flags));
net->vio = vio;
my_net_local_init(net); /* Set some limits */
- if (!(net->buff=(uchar*) my_malloc((size_t) net->max_packet+
- NET_HEADER_SIZE + COMP_HEADER_SIZE +1,
- MYF(MY_WME | my_flags))))
+
+ if (net_allocate_new_packet(net, thd, my_flags))
DBUG_RETURN(1);
- net->buff_end=net->buff+net->max_packet;
+
net->error=0; net->return_status=0;
net->pkt_nr=net->compress_pkt_nr=0;
- net->write_pos=net->read_pos = net->buff;
net->last_error[0]=0;
net->compress=0; net->reading_or_writing=0;
net->where_b = net->remain_in_buf=0;
@@ -167,6 +170,18 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
DBUG_RETURN(0);
}
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags)
+{
+ DBUG_ENTER("net_allocate_new_packet");
+ if (!(net->buff=(uchar*) my_malloc((size_t) net->max_packet+
+ NET_HEADER_SIZE + COMP_HEADER_SIZE +1,
+ MYF(MY_WME | my_flags))))
+ DBUG_RETURN(1);
+ net->buff_end=net->buff+net->max_packet;
+ net->write_pos=net->read_pos = net->buff;
+ DBUG_RETURN(0);
+}
+
void net_end(NET *net)
{
@@ -1124,15 +1139,22 @@ ulong my_net_read(NET *net)
The function returns the length of the found packet or packet_error.
net->read_pos points to the read data.
*/
+ulong
+my_net_read_packet(NET *net, my_bool read_from_server)
+{
+ ulong reallen = 0;
+ return my_net_read_packet_reallen(net, read_from_server, &reallen);
+}
ulong
-my_net_read_packet(NET *net, my_bool read_from_server)
+my_net_read_packet_reallen(NET *net, my_bool read_from_server, ulong* reallen)
{
size_t len, complen;
MYSQL_NET_READ_START();
+ *reallen = 0;
#ifdef HAVE_COMPRESS
if (!net->compress)
{
@@ -1155,7 +1177,10 @@ my_net_read_packet(NET *net, my_bool read_from_server)
}
net->read_pos = net->buff + net->where_b;
if (len != packet_error)
+ {
net->read_pos[len]=0; /* Safeguard for mysql_use_result */
+ *reallen = len;
+ }
MYSQL_NET_READ_DONE(0, len);
return len;
#ifdef HAVE_COMPRESS
@@ -1256,6 +1281,7 @@ my_net_read_packet(NET *net, my_bool read_from_server)
return packet_error;
}
buf_length+= complen;
+ *reallen += packet_len;
}
net->read_pos= net->buff+ first_packet_offset + NET_HEADER_SIZE;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 2f76f918e34..d7bbdeb82a4 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -114,12 +114,11 @@
#include "sql_parse.h" // check_stack_overrun
#include "sql_partition.h" // get_part_id_func, PARTITION_ITERATOR,
// struct partition_info, NOT_A_PARTITION_ID
-#include "sql_base.h" // free_io_cache
#include "records.h" // init_read_record, end_read_record
#include <m_ctype.h>
#include "sql_select.h"
#include "sql_statistics.h"
-#include "filesort.h" // filesort_free_buffers
+#include "uniques.h"
#ifndef EXTRA_DEBUG
#define test_rb_tree(A,B) {}
@@ -500,9 +499,9 @@ int SEL_IMERGE::or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree)
if (trees_next == trees_end)
{
const int realloc_ratio= 2; /* Double size for next round */
- uint old_elements= (trees_end - trees);
- uint old_size= sizeof(SEL_TREE**) * old_elements;
- uint new_size= old_size * realloc_ratio;
+ size_t old_elements= (trees_end - trees);
+ size_t old_size= sizeof(SEL_TREE**) * old_elements;
+ size_t new_size= old_size * realloc_ratio;
SEL_TREE **new_trees;
if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size)))
return -1;
@@ -847,10 +846,10 @@ SEL_TREE::SEL_TREE(SEL_TREE *arg, bool without_merges,
SEL_IMERGE::SEL_IMERGE(SEL_IMERGE *arg, uint cnt,
RANGE_OPT_PARAM *param) : Sql_alloc()
{
- uint elements= (arg->trees_end - arg->trees);
+ size_t elements= (arg->trees_end - arg->trees);
if (elements > PREALLOCED_TREES)
{
- uint size= elements * sizeof (SEL_TREE **);
+ size_t size= elements * sizeof (SEL_TREE **);
if (!(trees= (SEL_TREE **)alloc_root(param->mem_root, size)))
goto mem_err;
}
@@ -952,7 +951,7 @@ int imerge_list_or_list(RANGE_OPT_PARAM *param,
uint rc;
bool is_last_check_pass= FALSE;
SEL_IMERGE *imerge= im1->head();
- uint elems= imerge->trees_next-imerge->trees;
+ uint elems= (uint)(imerge->trees_next-imerge->trees);
MEM_ROOT *mem_root= current_thd->mem_root;
im1->empty();
@@ -1052,7 +1051,7 @@ int imerge_list_or_tree(RANGE_OPT_PARAM *param,
SEL_TREE *or_tree= new (mem_root) SEL_TREE (tree, FALSE, param);
if (or_tree)
{
- uint elems= imerge->trees_next-imerge->trees;
+ uint elems= (uint)(imerge->trees_next-imerge->trees);
rc= imerge->or_sel_tree_with_checks(param, elems, or_tree,
TRUE, &is_last_check_pass);
if (!is_last_check_pass)
@@ -1165,6 +1164,7 @@ int imerge_list_and_tree(RANGE_OPT_PARAM *param,
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
table_map read_tables, COND *conds,
+ SORT_INFO *filesort,
bool allow_null_cond,
int *error)
{
@@ -1185,13 +1185,16 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
select->head=head;
select->cond= conds;
- if (head->sort.io_cache)
+ if (filesort && my_b_inited(&filesort->io_cache))
{
- select->file= *head->sort.io_cache;
+ /*
+ Hijack the filesort io_cache for make_select
+ SQL_SELECT will be responsible for ensuring that it's properly freed.
+ */
+ select->file= filesort->io_cache;
select->records=(ha_rows) (select->file.end_of_file/
head->file->ref_length);
- my_free(head->sort.io_cache);
- head->sort.io_cache=0;
+ my_b_clear(&filesort->io_cache);
}
DBUG_RETURN(select);
}
@@ -1233,7 +1236,7 @@ QUICK_SELECT_I::QUICK_SELECT_I()
QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc,
bool *create_error)
- :doing_key_read(0),free_file(0),cur_range(NULL),last_range(0),dont_free(0)
+ :free_file(0),cur_range(NULL),last_range(0),dont_free(0)
{
my_bitmap_map *bitmap;
DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
@@ -1315,11 +1318,10 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
if (file)
{
range_end();
- if (doing_key_read)
- file->extra(HA_EXTRA_NO_KEYREAD);
+ file->ha_end_keyread();
if (free_file)
{
- DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
+ DBUG_PRINT("info", ("Freeing separate handler %p (free: %d)", file,
free_file));
file->ha_external_lock(current_thd, F_UNLCK);
file->ha_close();
@@ -1346,7 +1348,6 @@ QUICK_INDEX_SORT_SELECT::QUICK_INDEX_SORT_SELECT(THD *thd_param, TABLE *table)
DBUG_ENTER("QUICK_INDEX_SORT_SELECT::QUICK_INDEX_SORT_SELECT");
index= MAX_KEY;
head= table;
- bzero(&read_record, sizeof(read_record));
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
MYF(MY_THREAD_SPECIFIC));
DBUG_VOID_RETURN;
@@ -1403,7 +1404,6 @@ QUICK_INDEX_SORT_SELECT::~QUICK_INDEX_SORT_SELECT()
delete pk_quick_select;
/* It's ok to call the next two even if they are already deinitialized */
end_read_record(&read_record);
- free_io_cache(head);
free_root(&alloc,MYF(0));
DBUG_VOID_RETURN;
}
@@ -1472,8 +1472,8 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler,
MEM_ROOT *local_alloc)
{
handler *save_file= file, *org_file;
- my_bool org_key_read;
THD *thd= head->in_use;
+ MY_BITMAP * const save_vcol_set= head->vcol_set;
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
@@ -1481,12 +1481,11 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler,
in_ror_merged_scan= 1;
if (reuse_handler)
{
- DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file));
+ DBUG_PRINT("info", ("Reusing handler %p", file));
if (init())
{
DBUG_RETURN(1);
}
- head->column_bitmaps_set(&column_bitmap, &column_bitmap);
goto end;
}
@@ -1511,8 +1510,6 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler,
goto failure; /* purecov: inspected */
}
- head->column_bitmaps_set(&column_bitmap, &column_bitmap);
-
if (file->ha_external_lock(thd, F_RDLCK))
goto failure;
@@ -1526,31 +1523,22 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler,
last_rowid= file->ref;
end:
- DBUG_ASSERT(head->read_set == &column_bitmap);
/*
We are only going to read key fields and call position() on 'file'
The following sets head->read_set (== column_bitmap) to only use this
key. The 'column_bitmap' is used in ::get_next()
*/
org_file= head->file;
- org_key_read= head->key_read;
head->file= file;
- head->key_read= 0;
- head->mark_columns_used_by_index_no_reset(index, &column_bitmap);
-
- if (!head->no_keyread)
- {
- doing_key_read= 1;
- head->enable_keyread();
- }
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap, &column_bitmap);
+ head->prepare_for_keyread(index, &column_bitmap);
head->prepare_for_position();
head->file= org_file;
- head->key_read= org_key_read;
/* Restore head->read_set (and write_set) to what they had before the call */
- head->column_bitmaps_set(save_read_set, save_write_set);
+ head->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
if (reset())
{
@@ -1565,7 +1553,7 @@ end:
DBUG_RETURN(0);
failure:
- head->column_bitmaps_set(save_read_set, save_write_set);
+ head->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
delete file;
file= save_file;
free_file= false;
@@ -2846,7 +2834,7 @@ double records_in_column_ranges(PARAM *param, uint idx,
/* Handle cases when we don't have a valid non-empty list of range */
if (!tree)
- return HA_POS_ERROR;
+ return DBL_MAX;
if (tree->type == SEL_ARG::IMPOSSIBLE)
return (0L);
@@ -2866,9 +2854,9 @@ double records_in_column_ranges(PARAM *param, uint idx,
max_endp= range.end_key.length? &range.end_key : NULL;
rows= get_column_range_cardinality(field, min_endp, max_endp,
range.range_flag);
- if (HA_POS_ERROR == rows)
+ if (DBL_MAX == rows)
{
- total_rows= HA_POS_ERROR;
+ total_rows= DBL_MAX;
break;
}
total_rows += rows;
@@ -2916,7 +2904,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
uint keynr;
uint max_quick_key_parts= 0;
MY_BITMAP *used_fields= &table->cond_set;
- double table_records= table->stat_records();
+ double table_records= (double)table->stat_records();
DBUG_ENTER("calculate_cond_selectivity_for_table");
table->cond_selectivity= 1.0;
@@ -3114,7 +3102,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
else
{
rows= records_in_column_ranges(&param, idx, key);
- if (rows != HA_POS_ERROR)
+ if (rows != DBL_MAX)
key->field->cond_selectivity= rows/table_records;
}
}
@@ -3161,8 +3149,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
DBUG_RETURN(TRUE);
dt->list.empty();
dt->table= table;
- if ((*cond)->walk(&Item::find_selective_predicates_list_processor, 0,
- (uchar*) dt))
+ if ((*cond)->walk(&Item::find_selective_predicates_list_processor, 0, dt))
DBUG_RETURN(TRUE);
if (dt->list.elements > 0)
{
@@ -4032,8 +4019,8 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
store_length_array,
range_par->min_key,
range_par->max_key,
- tmp_min_key - range_par->min_key,
- tmp_max_key - range_par->max_key,
+ (uint)(tmp_min_key - range_par->min_key),
+ (uint)(tmp_max_key - range_par->max_key),
flag,
&ppar->part_iter);
if (!res)
@@ -4681,6 +4668,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
double roru_index_costs;
ha_rows roru_total_records;
double roru_intersect_part= 1.0;
+ size_t n_child_scans;
DBUG_ENTER("get_best_disjunct_quick");
DBUG_PRINT("info", ("Full table scan cost: %g", read_time));
@@ -4697,7 +4685,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
}
- uint n_child_scans= imerge->trees_next - imerge->trees;
+ n_child_scans= imerge->trees_next - imerge->trees;
if (!n_child_scans)
DBUG_RETURN(NULL);
@@ -4794,7 +4782,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
unique_calc_buff_size=
Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
param->table->file->ref_length,
- param->thd->variables.sortbuff_size);
+ (size_t)param->thd->variables.sortbuff_size);
if (param->imerge_cost_buff_size < unique_calc_buff_size)
{
if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root,
@@ -4806,7 +4794,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_cost +=
Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length,
- param->thd->variables.sortbuff_size,
+ (size_t)param->thd->variables.sortbuff_size,
TIME_FOR_COMPARE_ROWID,
FALSE, NULL);
DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)",
@@ -4915,8 +4903,8 @@ skip_to_ror_scan:
(TIME_FOR_COMPARE_ROWID * M_LN2) +
get_sweep_read_cost(param, roru_total_records);
- DBUG_PRINT("info", ("ROR-union: cost %g, %d members", roru_total_cost,
- n_child_scans));
+ DBUG_PRINT("info", ("ROR-union: cost %g, %zu members",
+ roru_total_cost, n_child_scans));
TRP_ROR_UNION* roru;
if (roru_total_cost < read_time)
{
@@ -5059,7 +5047,7 @@ typedef struct st_common_index_intersect_info
PARAM *param; /* context info for range optimizations */
uint key_size; /* size of a ROWID element stored in Unique object */
uint compare_factor; /* 1/compare - cost to compare two ROWIDs */
- ulonglong max_memory_size; /* maximum space allowed for Unique objects */
+ size_t max_memory_size; /* maximum space allowed for Unique objects */
ha_rows table_cardinality; /* estimate of the number of records in table */
double cutoff_cost; /* discard index intersects with greater costs */
INDEX_SCAN_INFO *cpk_scan; /* clustered primary key used in intersection */
@@ -5251,7 +5239,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
INDEX_SCAN_INFO **scan_ptr;
INDEX_SCAN_INFO *cpk_scan= NULL;
TABLE *table= param->table;
- uint n_index_scans= tree->index_scans_end - tree->index_scans;
+ uint n_index_scans= (uint)(tree->index_scans_end - tree->index_scans);
if (!n_index_scans)
return 1;
@@ -5263,7 +5251,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
common->param= param;
common->key_size= table->file->ref_length;
common->compare_factor= TIME_FOR_COMPARE_ROWID;
- common->max_memory_size= param->thd->variables.sortbuff_size;
+ common->max_memory_size= (size_t)param->thd->variables.sortbuff_size;
common->cutoff_cost= cutoff_cost;
common->cpk_scan= NULL;
common->table_cardinality=
@@ -5699,7 +5687,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
uint *buff_elems= common_info->buff_elems;
uint key_size= common_info->key_size;
uint compare_factor= common_info->compare_factor;
- ulonglong max_memory_size= common_info->max_memory_size;
+ size_t max_memory_size= common_info->max_memory_size;
records_sent_to_unique+= ext_index_scan_records;
cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
@@ -5893,7 +5881,7 @@ TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
}
}
- count= tree->index_scans_end - tree->index_scans;
+ count= (uint)(tree->index_scans_end - tree->index_scans);
for (i= 0; i < count; i++)
{
index_scan= tree->index_scans[i];
@@ -6553,7 +6541,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
intersect_scans_best););
*are_all_covering= intersect->is_covering;
- uint best_num= intersect_scans_best - intersect_scans;
+ uint best_num= (uint)(intersect_scans_best - intersect_scans);
ror_intersect_cpy(intersect, intersect_best);
/*
@@ -6737,7 +6725,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
TRP_ROR_INTERSECT *trp;
if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT))
DBUG_RETURN(trp);
- uint best_num= (ror_scan_mark - tree->ror_scans);
+ uint best_num= (uint)(ror_scan_mark - tree->ror_scans);
if (!(trp->first_scan= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
sizeof(ROR_SCAN_INFO*)*
best_num)))
@@ -6777,7 +6765,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool update_tbl_stats,
double read_time)
{
- uint idx, best_idx;
+ uint idx, UNINIT_VAR(best_idx);
SEL_ARG *key_to_read= NULL;
ha_rows UNINIT_VAR(best_records); /* protected by key_to_read */
uint UNINIT_VAR(best_mrr_flags), /* protected by key_to_read */
@@ -7263,6 +7251,205 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
/*
+ The structure Key_col_info is purely auxiliary and is used
+ only in the method Item_func_in::get_func_row_mm_tree
+*/
+struct Key_col_info {
+ Field *field; /* If != NULL the column can be used for keys */
+ cmp_item *comparator; /* If != 0 the column can be evaluated */
+};
+
+/**
+ Build SEL_TREE for the IN predicate whose arguments are rows
+
+ @param param PARAM from SQL_SELECT::test_quick_select
+ @param key_row First operand of the IN predicate
+
+ @note
+ The function builds a SEL_TREE for in IN predicate in the case
+ when the predicate uses row arguments. First the function
+ detects among the components of the key_row (c[1],...,c[n]) taken
+ from in the left part the predicate those that can be usable
+ for building SEL_TREE (c[i1],...,c[ik]). They have to contain
+ items whose real items are field items referring to the current
+ table or equal to the items referring to the current table.
+ For the remaining components of the row it checks whether they
+ can be evaluated. The result of the analysis is put into the
+ array of structures of the type Key_row_col_info.
+
+ After this the function builds the SEL_TREE for the following
+ formula that can be inferred from the given IN predicate:
+ c[i11]=a[1][i11] AND ... AND c[i1k1]=a[1][i1k1]
+ OR
+ ...
+ OR
+ c[im1]=a[m][im1] AND ... AND c[imkm]=a[m][imkm].
+ Here a[1],...,a[m] are all arguments of the IN predicate from
+ the right part and for each j ij1,...,ijkj is a subset of
+ i1,...,ik such that a[j][ij1],...,a[j][ijkj] can be evaluated.
+
+ If for some j there no a[j][i1],...,a[j][ik] can be evaluated
+ then no SEL_TREE can be built for this predicate and the
+ function immediately returns 0.
+
+ If for some j by using evaluated values of key_row it can be
+ proven that c[ij1]=a[j][ij1] AND ... AND c[ijkj]=a[j][ijkj]
+ is always FALSE then this disjunct is omitted.
+
+ @returns
+ the built SEL_TREE if it can be constructed
+ 0 - otherwise.
+*/
+
+SEL_TREE *Item_func_in::get_func_row_mm_tree(RANGE_OPT_PARAM *param,
+ Item_row *key_row)
+{
+ DBUG_ENTER("Item_func_in::get_func_row_mm_tree");
+
+ if (negated)
+ DBUG_RETURN(0);
+
+ SEL_TREE *res_tree= 0;
+ uint used_key_cols= 0;
+ uint col_comparators= 0;
+ table_map param_comp= ~(param->prev_tables | param->read_tables |
+ param->current_table);
+ uint row_cols= key_row->cols();
+ Dynamic_array <Key_col_info> key_cols_info(row_cols);
+ cmp_item_row *row_cmp_item= (cmp_item_row *)
+ (array ? ((in_row *) array)->get_cmp_item() :
+ cmp_items[(uint) ROW_RESULT]);
+
+ Item **key_col_ptr= key_row->addr(0);
+ for(uint i= 0; i < row_cols; i++, key_col_ptr++)
+ {
+ Key_col_info key_col_info= {0, NULL};
+ Item *key_col= *key_col_ptr;
+ if (key_col->real_item()->type() == Item::FIELD_ITEM)
+ {
+ /*
+ The i-th component of key_row can be used for key access if
+ key_col->real_item() points to a field of the current table or
+ if it is equal to a field item pointing to such a field.
+ */
+ Item_field *col_field_item= (Item_field *) (key_col->real_item());
+ Field *key_col_field= col_field_item->field;
+ if (key_col_field->table->map != param->current_table)
+ {
+ Item_equal *item_equal= col_field_item->item_equal;
+ if (item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ while (it++)
+ {
+ key_col_field= it.get_curr_field();
+ if (key_col_field->table->map == param->current_table)
+ break;
+ }
+ }
+ }
+ if (key_col_field->table->map == param->current_table)
+ {
+ key_col_info.field= key_col_field;
+ used_key_cols++;
+ }
+ }
+ else if (!(key_col->used_tables() & (param_comp | param->current_table))
+ && !key_col->is_expensive())
+ {
+ /* The i-th component of key_row can be evaluated */
+
+ /* See the comment in Item::get_mm_tree_for_const */
+ MEM_ROOT *tmp_root= param->mem_root;
+ param->thd->mem_root= param->old_root;
+
+ key_col->bring_value();
+ key_col_info.comparator= row_cmp_item->get_comparator(i);
+ key_col_info.comparator->store_value(key_col);
+ col_comparators++;
+
+ param->thd->mem_root= tmp_root;
+ }
+ key_cols_info.push(key_col_info);
+ }
+
+ if (!used_key_cols)
+ DBUG_RETURN(0);
+
+ uint omitted_tuples= 0;
+ Item **arg_start= arguments() + 1;
+ Item **arg_end= arg_start + argument_count() - 1;
+ for (Item **arg= arg_start ; arg < arg_end; arg++)
+ {
+ uint i;
+
+ /*
+ First check whether the disjunct constructed for *arg
+ is really needed
+ */
+ Item_row *arg_tuple= (Item_row *) (*arg);
+ if (col_comparators)
+ {
+ MEM_ROOT *tmp_root= param->mem_root;
+ param->thd->mem_root= param->old_root;
+ for (i= 0; i < row_cols; i++)
+ {
+ Key_col_info *key_col_info= &key_cols_info.at(i);
+ if (key_col_info->comparator)
+ {
+ Item *arg_col= arg_tuple->element_index(i);
+ if (!(arg_col->used_tables() & (param_comp | param->current_table)) &&
+ !arg_col->is_expensive() &&
+ key_col_info->comparator->cmp(arg_col))
+ {
+ omitted_tuples++;
+ break;
+ }
+ }
+ }
+ param->thd->mem_root= tmp_root;
+ if (i < row_cols)
+ continue;
+ }
+
+ /* The disjunct for *arg is needed: build it. */
+ SEL_TREE *and_tree= 0;
+ Item **arg_col_ptr= arg_tuple->addr(0);
+ for (uint i= 0; i < row_cols; i++, arg_col_ptr++)
+ {
+ Key_col_info *key_col_info= &key_cols_info.at(i);
+ if (!key_col_info->field)
+ continue;
+ Item *arg_col= *arg_col_ptr;
+ if (!(arg_col->used_tables() & (param_comp | param->current_table)) &&
+ !arg_col->is_expensive())
+ {
+ and_tree= tree_and(param, and_tree,
+ get_mm_parts(param,
+ key_col_info->field,
+ Item_func::EQ_FUNC,
+ arg_col->real_item()));
+ }
+ }
+ if (!and_tree)
+ {
+ res_tree= 0;
+ break;
+ }
+ /* Join the disjunct the the OR tree that is being constructed */
+ res_tree= !res_tree ? and_tree : tree_or(param, res_tree, and_tree);
+ }
+ if (omitted_tuples == argument_count() - 1)
+ {
+ /* It's turned out that all disjuncts are always FALSE */
+ res_tree= new (param->mem_root) SEL_TREE(SEL_TREE::IMPOSSIBLE,
+ param->mem_root, param->keys);
+ }
+ DBUG_RETURN(res_tree);
+}
+
+
+/*
Build conjunction of all SEL_TREEs for a simple predicate applying equalities
SYNOPSIS
@@ -7597,12 +7784,22 @@ SEL_TREE *Item_func_in::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
if (const_item())
DBUG_RETURN(get_mm_tree_for_const(param));
- if (key_item()->real_item()->type() != Item::FIELD_ITEM)
+ SEL_TREE *tree= 0;
+ switch (key_item()->real_item()->type()) {
+ case Item::FIELD_ITEM:
+ tree= get_full_func_mm_tree(param,
+ (Item_field*) (key_item()->real_item()),
+ NULL);
+ break;
+ case Item::ROW_ITEM:
+ tree= get_func_row_mm_tree(param,
+ (Item_row *) (key_item()->real_item()));
+ break;
+ default:
DBUG_RETURN(0);
- Item_field *field= (Item_field*) (key_item()->real_item());
- SEL_TREE *tree= get_full_func_mm_tree(param, field, NULL);
+ }
DBUG_RETURN(tree);
-}
+}
SEL_TREE *Item_equal::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
@@ -10081,8 +10278,8 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
ulong count=count_key_part_usage(root,pos->next_key_part);
if (count > pos->next_key_part->use_count)
{
- sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu "
- "should be %lu", (long unsigned int)pos,
+ sql_print_information("Use_count: Wrong count for key at %p: %lu "
+ "should be %lu", pos,
pos->next_key_part->use_count, count);
return;
}
@@ -10090,8 +10287,8 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
}
}
if (e_count != elements)
- sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx",
- e_count, elements, (long unsigned int) this);
+ sql_print_warning("Wrong use count: %u (should be %u) for tree at %p",
+ e_count, elements, this);
}
#endif
@@ -10700,7 +10897,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
/* Call multi_range_read_info() to get the MRR flags and buffer size */
quick->mrr_flags= HA_MRR_NO_ASSOCIATION |
- (table->key_read ? HA_MRR_INDEX_ONLY : 0);
+ (table->file->keyread_enabled() ? HA_MRR_INDEX_ONLY : 0);
if (thd->lex->sql_command != SQLCOM_SELECT)
quick->mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
@@ -10750,21 +10947,16 @@ int read_keys_and_merge_scans(THD *thd,
Unique *unique= *unique_ptr;
handler *file= head->file;
bool with_cpk_filter= pk_quick_select != NULL;
- bool enabled_keyread= 0;
DBUG_ENTER("read_keys_and_merge");
/* We're going to just read rowids. */
- if (!head->key_read)
- {
- enabled_keyread= 1;
- head->enable_keyread();
- }
head->prepare_for_position();
cur_quick_it.rewind();
cur_quick= cur_quick_it++;
bool first_quick= TRUE;
DBUG_ASSERT(cur_quick != 0);
+ head->file->ha_start_keyread(cur_quick->index);
/*
We reuse the same instance of handler so we need to call both init and
@@ -10781,7 +10973,7 @@ int read_keys_and_merge_scans(THD *thd,
unique= new Unique(refpos_order_cmp, (void *)file,
file->ref_length,
- thd->variables.sortbuff_size,
+ (size_t)thd->variables.sortbuff_size,
intersection ? quick_selects.elements : 0);
if (!unique)
goto err;
@@ -10790,7 +10982,6 @@ int read_keys_and_merge_scans(THD *thd,
else
{
unique->reset();
- filesort_free_buffers(head, false);
}
DBUG_ASSERT(file->ref_length == unique->get_size());
@@ -10843,22 +11034,21 @@ int read_keys_and_merge_scans(THD *thd,
/*
Ok all rowids are in the Unique now. The next call will initialize
- head->sort structure so it can be used to iterate through the rowids
+ the unique structure so it can be used to iterate through the rowids
sequence.
*/
result= unique->get(head);
/*
index merge currently doesn't support "using index" at all
*/
- if (enabled_keyread)
- head->disable_keyread();
- if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE))
+ head->file->ha_end_keyread();
+ if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0,
+ &unique->sort, 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
err:
- if (enabled_keyread)
- head->disable_keyread();
+ head->file->ha_end_keyread();
DBUG_RETURN(1);
}
@@ -10895,7 +11085,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
- free_io_cache(head);
+ // Free things used by sort early. Shouldn't be strictly necessary
+ unique->sort.reset();
/* All rows from Unique have been retrieved, do a clustered PK scan */
if (pk_quick_select)
{
@@ -10930,7 +11121,7 @@ int QUICK_INDEX_INTERSECT_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
- free_io_cache(head);
+ unique->sort.reset(); // Free things early
}
DBUG_RETURN(result);
@@ -11169,6 +11360,7 @@ int QUICK_RANGE_SELECT::reset()
HANDLER_BUFFER empty_buf;
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
+ MY_BITMAP * const save_vcol_set= head->vcol_set;
DBUG_ENTER("QUICK_RANGE_SELECT::reset");
last_range= NULL;
cur_range= (QUICK_RANGE**) ranges.buffer;
@@ -11182,7 +11374,8 @@ int QUICK_RANGE_SELECT::reset()
}
if (in_ror_merged_scan)
- head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap,
+ &column_bitmap);
if (file->inited == handler::NONE)
{
@@ -11208,7 +11401,10 @@ int QUICK_RANGE_SELECT::reset()
buf_size/= 2;
}
if (!mrr_buf_desc)
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ {
+ error= HA_ERR_OUT_OF_MEM;
+ goto err;
+ }
/* Initialize the handler buffer. */
mrr_buf_desc->buffer= mrange_buff;
@@ -11225,8 +11421,8 @@ int QUICK_RANGE_SELECT::reset()
err:
/* Restore bitmaps set on entry */
if (in_ror_merged_scan)
- head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
-
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set,
+ save_vcol_set);
DBUG_RETURN(error);
}
@@ -11257,13 +11453,16 @@ int QUICK_RANGE_SELECT::get_next()
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
+ MY_BITMAP * const save_vcol_set= head->vcol_set;
/*
We don't need to signal the bitmap change as the bitmap is always the
same for this head->file
*/
- head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap,
+ &column_bitmap);
result= file->multi_range_read_next(&dummy);
- head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set,
+ save_vcol_set);
DBUG_RETURN(result);
}
@@ -11319,7 +11518,7 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
DBUG_RETURN(0);
}
- uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer);
+ uint count= ranges.elements - (uint)(cur_range - (QUICK_RANGE**) ranges.buffer);
if (count == 0)
{
/* Ranges have already been used up before. None is left for read. */
@@ -11364,7 +11563,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
DBUG_RETURN(result);
}
- uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer);
+ uint count= ranges.elements - (uint)(cur_range - (QUICK_RANGE**) ranges.buffer);
if (count == 0)
{
/* Ranges have already been used up before. None is left for read. */
@@ -11440,7 +11639,7 @@ QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
used_key_parts (used_key_parts_arg)
{
QUICK_RANGE *r;
- /*
+ /*
Use default MRR implementation for reverse scans. No table engine
currently can do an MRR scan with output in reverse index order.
*/
@@ -11818,7 +12017,7 @@ void QUICK_SELECT_I::add_key_and_length(String *key_names,
bool *first)
{
char buf[64];
- uint length;
+ size_t length;
KEY *key_info= head->key_info + index;
if (*first)
@@ -11915,62 +12114,76 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names,
}
-void QUICK_RANGE_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+void QUICK_RANGE_SELECT::add_used_key_part_to_set()
{
uint key_len;
KEY_PART *part= key_parts;
for (key_len=0; key_len < max_used_key_length;
key_len += (part++)->store_length)
{
- bitmap_set_bit(col_set, part->field->field_index);
+ /*
+ We have to use field_index instead of part->field
+ as for partial fields, part->field points to
+ a temporary field that is only part of the original
+ field. field_index always points to the original field
+ */
+ Field *field= head->field[part->field->field_index];
+ field->register_field_in_read_map();
}
}
-void QUICK_GROUP_MIN_MAX_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+void QUICK_GROUP_MIN_MAX_SELECT::add_used_key_part_to_set()
{
uint key_len;
KEY_PART_INFO *part= index_info->key_part;
for (key_len=0; key_len < max_used_key_length;
key_len += (part++)->store_length)
{
- bitmap_set_bit(col_set, part->field->field_index);
+ /*
+ We have to use field_index instead of part->field
+ as for partial fields, part->field points to
+ a temporary field that is only part of the original
+ field. field_index always points to the original field
+ */
+ Field *field= head->field[part->field->field_index];
+ field->register_field_in_read_map();
}
}
-void QUICK_ROR_INTERSECT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+void QUICK_ROR_INTERSECT_SELECT::add_used_key_part_to_set()
{
List_iterator_fast<QUICK_SELECT_WITH_RECORD> it(quick_selects);
QUICK_SELECT_WITH_RECORD *quick;
while ((quick= it++))
{
- quick->quick->add_used_key_part_to_set(col_set);
+ quick->quick->add_used_key_part_to_set();
}
}
-void QUICK_INDEX_SORT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+void QUICK_INDEX_SORT_SELECT::add_used_key_part_to_set()
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- quick->add_used_key_part_to_set(col_set);
+ quick->add_used_key_part_to_set();
}
if (pk_quick_select)
- pk_quick_select->add_used_key_part_to_set(col_set);
+ pk_quick_select->add_used_key_part_to_set();
}
-void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set()
{
QUICK_SELECT_I *quick;
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
while ((quick= it++))
{
- quick->add_used_key_part_to_set(col_set);
+ quick->add_used_key_part_to_set();
}
}
@@ -12182,9 +12395,6 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
DBUG_RETURN(NULL); /* Cannot execute with correlated conditions. */
/* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
- if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
- DBUG_RETURN(NULL);
-
List_iterator<Item> select_items_it(join->fields_list);
is_agg_distinct = is_indexed_agg_distinct(join, &agg_distinct_flds);
@@ -12361,7 +12571,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
cur_group_prefix_len+= cur_part->store_length;
++cur_group_key_parts;
- max_key_part= cur_part - cur_index_info->key_part + 1;
+ max_key_part= (uint)(cur_part - cur_index_info->key_part) + 1;
used_key_parts_map.set_bit(max_key_part);
}
else
@@ -12512,7 +12722,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
/* Check if cur_part is referenced in the WHERE clause. */
if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
- (uchar*) key_part_range))
+ key_part_range))
goto next_index;
}
}
@@ -13084,7 +13294,7 @@ get_field_keypart(KEY *index, Field *field)
part < end; part++)
{
if (field->eq(part->field))
- return part - index->key_part + 1;
+ return (uint)(part - index->key_part + 1);
}
return 0;
}
@@ -13392,7 +13602,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg,
group_prefix_len(group_prefix_len_arg),
group_key_parts(group_key_parts_arg), have_min(have_min_arg),
have_max(have_max_arg), have_agg_distinct(have_agg_distinct_arg),
- seen_first_key(FALSE), doing_key_read(FALSE), min_max_arg_part(min_max_arg_part_arg),
+ seen_first_key(FALSE), min_max_arg_part(min_max_arg_part_arg),
key_infix(key_infix_arg), key_infix_len(key_infix_len_arg),
min_functions_it(NULL), max_functions_it(NULL),
is_index_scan(is_index_scan_arg)
@@ -13532,8 +13742,7 @@ QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT()
if (file->inited != handler::NONE)
{
DBUG_ASSERT(file == head->file);
- if (doing_key_read)
- head->disable_keyread();
+ head->file->ha_end_keyread();
/*
There may be a code path when the same table was first accessed by index,
then the index is closed, and the table is scanned (order by + loose scan).
@@ -13723,11 +13932,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
seen_first_key= FALSE;
- if (!head->key_read)
- {
- doing_key_read= 1;
- head->enable_keyread(); /* We need only the key attributes */
- }
+ head->file->ha_start_keyread(index); /* We need only the key attributes */
+
if ((result= file->ha_index_init(index,1)))
{
head->file->print_error(result, MYF(0));
@@ -13984,7 +14190,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max()
SELECT [SUM|COUNT|AVG](DISTINCT a,...) FROM t
This method comes to replace the index scan + Unique class
(distinct selection) for loose index scan that visits all the rows of a
- covering index instead of jumping in the begining of each group.
+ covering index instead of jumping in the beginning of each group.
TODO: Placeholder function. To be replaced by a handler API call
@param is_index_scan hint to use index scan instead of random index read
@@ -14447,6 +14653,32 @@ void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names,
}
+/* Check whether the number for equality ranges exceeds the set threshold */
+
+bool eq_ranges_exceeds_limit(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint limit)
+{
+ KEY_MULTI_RANGE range;
+ range_seq_t seq_it;
+ uint count = 0;
+
+ if (limit == 0)
+ {
+ /* 'Statistics instead of index dives' feature is turned off */
+ return false;
+ }
+ seq_it= seq->init(seq_init_param, 0, 0);
+ while (!seq->next(seq_it, &range))
+ {
+ if ((range.range_flag & EQ_RANGE) && !(range.range_flag & NULL_RANGE))
+ {
+ if (++count >= limit)
+ return true;
+ }
+ }
+ return false;
+}
+
#ifndef DBUG_OFF
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
@@ -14470,7 +14702,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)"));
- DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg,
+ DBUG_PRINT("info", ("SEL_TREE: %p (%s) scans: %s", tree, msg,
tmp.c_ptr_safe()));
DBUG_VOID_RETURN;
@@ -14694,6 +14926,4 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
}
}
-
#endif /* !DBUG_OFF */
-
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 0c495639db6..9e0bd3ae9ff 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -24,9 +24,10 @@
#pragma interface /* gcc class implementation */
#endif
-#include "thr_malloc.h" /* sql_memdup */
#include "records.h" /* READ_RECORD */
#include "queues.h" /* QUEUE */
+#include "filesort.h" /* SORT_INFO */
+
/*
It is necessary to include set_var.h instead of item.h because there
are dependencies on include order for set_var.h and item.h. This
@@ -241,7 +242,7 @@ public:
Number of children of this element in the RB-tree, plus 1 for this
element itself.
*/
- uint16 elements;
+ uint32 elements;
/*
Valid only for elements which are RB-tree roots: Number of times this
RB-tree is referred to (it is referred by SEL_ARG::next_key_part or by
@@ -1005,7 +1006,7 @@ public:
This is used by an optimization in filesort.
*/
- virtual void add_used_key_part_to_set(MY_BITMAP *col_set)=0;
+ virtual void add_used_key_part_to_set()=0;
};
@@ -1036,7 +1037,6 @@ class QUICK_RANGE_SELECT : public QUICK_SELECT_I
{
protected:
/* true if we enabled key only reads */
- bool doing_key_read;
handler *file;
/* Members to deal with case when this quick select is a ROR-merged scan */
@@ -1096,7 +1096,7 @@ public:
virtual void replace_handler(handler *new_file) { file= new_file; }
QUICK_SELECT_I *make_reverse(uint used_key_parts_arg);
- virtual void add_used_key_part_to_set(MY_BITMAP *col_set);
+ virtual void add_used_key_part_to_set();
private:
/* Default copy ctor used by QUICK_SELECT_DESC */
@@ -1260,7 +1260,7 @@ public:
/* used to get rows collected in Unique */
READ_RECORD read_record;
- virtual void add_used_key_part_to_set(MY_BITMAP *col_set);
+ virtual void add_used_key_part_to_set();
};
@@ -1335,7 +1335,7 @@ public:
void add_keys_and_lengths(String *key_names, String *used_lengths);
Explain_quick_select *get_explain(MEM_ROOT *alloc);
bool is_keys_used(const MY_BITMAP *fields);
- void add_used_key_part_to_set(MY_BITMAP *col_set);
+ void add_used_key_part_to_set();
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -1415,7 +1415,7 @@ public:
void add_keys_and_lengths(String *key_names, String *used_lengths);
Explain_quick_select *get_explain(MEM_ROOT *alloc);
bool is_keys_used(const MY_BITMAP *fields);
- void add_used_key_part_to_set(MY_BITMAP *col_set);
+ void add_used_key_part_to_set();
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -1559,7 +1559,7 @@ public:
bool unique_key_range() { return false; }
int get_type() { return QS_TYPE_GROUP_MIN_MAX; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
- void add_used_key_part_to_set(MY_BITMAP *col_set);
+ void add_used_key_part_to_set();
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -1659,10 +1659,14 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
ha_rows records);
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
table_map read_tables, COND *conds,
+ SORT_INFO* filesort,
bool allow_null_cond, int *error);
bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond);
+bool eq_ranges_exceeds_limit(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint limit);
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
#endif
diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc
index b3350191d13..c7e4496f2c1 100644
--- a/sql/opt_range_mrr.cc
+++ b/sql/opt_range_mrr.cc
@@ -72,6 +72,7 @@ typedef struct st_sel_arg_range_seq
range_seq_t sel_arg_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
SEL_ARG_RANGE_SEQ *seq= (SEL_ARG_RANGE_SEQ*)init_param;
+ seq->param->range_count=0;
seq->at_start= TRUE;
seq->stack[0].key_tree= NULL;
seq->stack[0].min_key= seq->param->min_key;
@@ -199,9 +200,9 @@ walk_right_n_up:
{
{
RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
- uint min_key_length= cur->min_key - seq->param->min_key;
- uint max_key_length= cur->max_key - seq->param->max_key;
- uint len= cur->min_key - cur[-1].min_key;
+ size_t min_key_length= cur->min_key - seq->param->min_key;
+ size_t max_key_length= cur->max_key - seq->param->max_key;
+ size_t len= cur->min_key - cur[-1].min_key;
if (!(min_key_length == max_key_length &&
!memcmp(cur[-1].min_key, cur[-1].max_key, len) &&
!key_tree->min_flag && !key_tree->max_flag))
@@ -238,7 +239,7 @@ walk_up_n_right:
/* Ok got a tuple */
RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
- uint min_key_length= cur->min_key - seq->param->min_key;
+ uint min_key_length= (uint)(cur->min_key - seq->param->min_key);
range->ptr= (char*)(intptr)(key_tree->part);
if (cur->min_key_flag & GEOM_FLAG)
@@ -256,13 +257,13 @@ walk_up_n_right:
range->range_flag= cur->min_key_flag | cur->max_key_flag;
range->start_key.key= seq->param->min_key;
- range->start_key.length= cur->min_key - seq->param->min_key;
+ range->start_key.length= (uint)(cur->min_key - seq->param->min_key);
range->start_key.keypart_map= make_prev_keypart_map(cur->min_key_parts);
range->start_key.flag= (cur->min_key_flag & NEAR_MIN ? HA_READ_AFTER_KEY :
HA_READ_KEY_EXACT);
range->end_key.key= seq->param->max_key;
- range->end_key.length= cur->max_key - seq->param->max_key;
+ range->end_key.length= (uint)(cur->max_key - seq->param->max_key);
range->end_key.flag= (cur->max_key_flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
HA_READ_AFTER_KEY);
range->end_key.keypart_map= make_prev_keypart_map(cur->max_key_parts);
@@ -272,25 +273,44 @@ walk_up_n_right:
key_info= NULL;
else
key_info= &seq->param->table->key_info[seq->real_keyno];
-
+
/*
- Conditions below:
- (1) - range analysis is used for estimating condition selectivity
- (2) - This is a unique key, and we have conditions for all its
- user-defined key parts.
- (3) - The table uses extended keys, this key covers all components,
- and we have conditions for all key parts.
+ This is an equality range (keypart_0=X and ... and keypart_n=Z) if
+ (1) - There are no flags indicating open range (e.g.,
+ "keypart_x > y") or GIS.
+ (2) - The lower bound and the upper bound of the range has the
+ same value (min_key == max_key).
*/
- if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
- (!key_info || // (1)
- ((uint)key_tree->part+1 == key_info->user_defined_key_parts && // (2)
- key_info->flags & HA_NOSAME) || // (2)
- ((key_info->flags & HA_EXT_NOSAME) && // (3)
- (uint)key_tree->part+1 == key_info->ext_key_parts) // (3)
- ) &&
- range->start_key.length == range->end_key.length &&
- !memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
- range->range_flag= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
+ const uint is_open_range =
+ (NO_MIN_RANGE | NO_MAX_RANGE | NEAR_MIN | NEAR_MAX | GEOM_FLAG);
+ const bool is_eq_range_pred =
+ !(cur->min_key_flag & is_open_range) && // (1)
+ !(cur->max_key_flag & is_open_range) && // (1)
+ range->start_key.length == range->end_key.length && // (2)
+ !memcmp(seq->param->min_key, seq->param->max_key, // (2)
+ range->start_key.length);
+
+ if (is_eq_range_pred)
+ {
+ range->range_flag = EQ_RANGE;
+
+ /*
+ Conditions below:
+ (1) - Range analysis is used for estimating condition selectivity
+ (2) - This is a unique key, and we have conditions for all its
+ user-defined key parts.
+ (3) - The table uses extended keys, this key covers all components,
+ and we have conditions for all key parts.
+ */
+ if (
+ !key_info || // (1)
+ ((uint)key_tree->part+1 == key_info->user_defined_key_parts && // (2)
+ key_info->flags & HA_NOSAME) || // (2)
+ ((key_info->flags & HA_EXT_NOSAME) && // (3)
+ (uint)key_tree->part+1 == key_info->ext_key_parts) // (3)
+ )
+ range->range_flag |= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
+ }
if (seq->param->is_ror_scan)
{
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 852f91efc00..e7bd7e88af3 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -446,7 +446,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred);
static bool convert_subq_to_jtbm(JOIN *parent_join,
Item_in_subselect *subq_pred, bool *remove);
static TABLE_LIST *alloc_join_nest(THD *thd);
-static uint get_tmp_table_rec_length(Item **p_list, uint elements);
+static uint get_tmp_table_rec_length(Ref_ptr_array p_list, uint elements);
static double get_tmp_table_lookup_cost(THD *thd, double row_count,
uint row_size);
static double get_tmp_table_write_cost(THD *thd, double row_count,
@@ -513,6 +513,7 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
(Subquery is correlated to the immediate outer query &&
Subquery !contains {GROUP BY, ORDER BY [LIMIT],
aggregate functions}) && subquery predicate is not under "NOT IN"))
+ 5. Subquery does not contain recursive references
A note about prepared statements: we want the if-branch to be taken on
PREPARE and each EXECUTE. The rewrites are only done once, but we need
@@ -529,7 +530,8 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE) || //3
optimizer_flag(thd,
OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN)) && //3
- !in_subs->is_correlated) //4
+ !in_subs->is_correlated && //4
+ !in_subs->with_recursive_reference) //5
{
return TRUE;
}
@@ -1094,8 +1096,6 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
while ((in_subq= li++))
{
SELECT_LEX *subq_sel= in_subq->get_select_lex();
- if (subq_sel->handle_derived(thd->lex, DT_OPTIMIZE))
- DBUG_RETURN(1);
if (subq_sel->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
subq_sel->update_used_tables();
@@ -1455,8 +1455,8 @@ static bool replace_where_subcondition(JOIN *join, Item **expr,
static int subq_sj_candidate_cmp(Item_in_subselect* el1, Item_in_subselect* el2,
void *arg)
{
- return (el1->sj_convert_priority > el2->sj_convert_priority) ? 1 :
- ( (el1->sj_convert_priority == el2->sj_convert_priority)? 0 : -1);
+ return (el1->sj_convert_priority > el2->sj_convert_priority) ? -1 :
+ ( (el1->sj_convert_priority == el2->sj_convert_priority)? 0 : 1);
}
@@ -2455,13 +2455,9 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
JOIN_TAB *tab= join->best_positions[i].table;
join->map2table[tab->table->tablenr]= tab;
}
- //List_iterator<Item> it(right_expr_list);
- Item **ref_array= subq_select->ref_pointer_array;
- Item **ref_array_end= ref_array + subq_select->item_list.elements;
table_map map= 0;
- //while ((item= it++))
- for (;ref_array < ref_array_end; ref_array++)
- map |= (*ref_array)->used_tables();
+ for (uint i=0; i < subq_select->item_list.elements; i++)
+ map|= subq_select->ref_pointer_array[i]->used_tables();
map= map & ~PSEUDO_TABLE_BITS;
Table_map_iterator tm_it(map);
int tableno;
@@ -2525,15 +2521,14 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
Length of the temptable record, in bytes
*/
-static uint get_tmp_table_rec_length(Item **p_items, uint elements)
+static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements)
{
uint len= 0;
Item *item;
//List_iterator<Item> it(items);
- Item **p_item;
- for (p_item= p_items; p_item < p_items + elements ; p_item++)
+ for (uint i= 0; i < elements ; i++)
{
- item = *p_item;
+ item = p_items[i];
switch (item->result_type()) {
case REAL_RESULT:
len += sizeof(double);
@@ -2776,8 +2771,8 @@ void advance_sj_state(JOIN *join, table_map remaining_tables, uint idx,
LooseScan detector in best_access_path)
*/
remaining_tables &= ~new_join_tab->table->map;
- table_map dups_producing_tables, prev_dups_producing_tables= 0,
- prev_sjm_lookup_tables= 0;
+ table_map dups_producing_tables, UNINIT_VAR(prev_dups_producing_tables),
+ UNINIT_VAR(prev_sjm_lookup_tables);
if (idx == join->const_tables)
dups_producing_tables= 0;
@@ -2788,7 +2783,7 @@ void advance_sj_state(JOIN *join, table_map remaining_tables, uint idx,
if ((emb_sj_nest= new_join_tab->emb_sj_nest))
dups_producing_tables |= emb_sj_nest->sj_inner_tables;
- Semi_join_strategy_picker **strategy, **prev_strategy= NULL;
+ Semi_join_strategy_picker **strategy, **prev_strategy= 0;
if (idx == join->const_tables)
{
/* First table, initialize pickers */
@@ -3989,13 +3984,13 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab)
*/
sjm->copy_field= new Copy_field[sjm->sjm_table_cols.elements];
//it.rewind();
- Item **p_item= emb_sj_nest->sj_subq_pred->unit->first_select()->ref_pointer_array;
+ Ref_ptr_array p_items= emb_sj_nest->sj_subq_pred->unit->first_select()->ref_pointer_array;
for (uint i=0; i < sjm->sjm_table_cols.elements; i++)
{
bool dummy;
Item_equal *item_eq;
//Item *item= (it++)->real_item();
- Item *item= (*(p_item++))->real_item();
+ Item *item= p_items[i]->real_item();
DBUG_ASSERT(item->type() == Item::FIELD_ITEM);
Field *copy_to= ((Item_field*)item)->field;
/*
@@ -4220,7 +4215,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
{
/* if we run out of slots or we are not using tempool */
sprintf(path,"%s%lx_%lx_%x", tmp_file_prefix,current_pid,
- thd->thread_id, thd->tmp_table++);
+ (ulong) thd->thread_id, thd->tmp_table++);
}
fn_format(path, path, mysql_tmpdir, "", MY_REPLACE_EXT|MY_UNPACK_FILENAME);
@@ -4243,7 +4238,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
&tmpname, (uint) strlen(path)+1,
&group_buff, (!using_unique_constraint ?
uniq_tuple_length_arg : 0),
- &bitmaps, bitmap_buffer_size(1)*5,
+ &bitmaps, bitmap_buffer_size(1)*6,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
@@ -4265,7 +4260,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
table->alias.set("weedout-tmp", sizeof("weedout-tmp")-1,
table_alias_charset);
table->reginfo.lock_type=TL_WRITE; /* Will be updated */
- table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE;
+ table->db_stat=HA_OPEN_KEYFILE;
table->map=1;
table->temp_pool_slot = temp_pool_slot;
table->copy_blobs= 1;
@@ -4400,13 +4395,13 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
field->set_table_name(&table->alias);
}
- if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
+ if (thd->variables.tmp_memory_table_size == ~ (ulonglong) 0) // No limit
share->max_rows= ~(ha_rows) 0;
else
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
- MY_MIN(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size) /
+ MY_MIN(thd->variables.tmp_memory_table_size,
+ thd->variables.max_heap_table_size) :
+ thd->variables.tmp_memory_table_size) /
share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
@@ -4440,7 +4435,6 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
field->null_ptr,
field->null_bit)))
goto err;
- key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; //todo need this?
}
keyinfo->key_length+= key_part_info->length;
}
@@ -5004,8 +4998,6 @@ int clear_sj_tmp_tables(JOIN *join)
{
if ((res= table->file->ha_delete_all_rows()))
return res; /* purecov: inspected */
- free_io_cache(table);
- filesort_free_buffers(table,0);
}
SJ_MATERIALIZATION_INFO *sjm;
@@ -5870,7 +5862,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
*/
/* C.1 Compute the cost of the materialization strategy. */
//uint rowlen= get_tmp_table_rec_length(unit->first_select()->item_list);
- uint rowlen= get_tmp_table_rec_length(ref_pointer_array,
+ uint rowlen= get_tmp_table_rec_length(ref_ptrs,
select_lex->item_list.elements);
/* The cost of writing one row into the temporary table. */
double write_cost= get_tmp_table_write_cost(thd, inner_record_count_1,
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 5d5132e7fee..8a75aaed8d6 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -408,7 +408,7 @@ int opt_sum_query(THD *thd,
if (!error && reckey_in_range(is_max, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
- table->disable_keyread();
+ table->file->ha_end_keyread();
table->file->ha_index_end();
if (error)
{
@@ -470,7 +470,7 @@ int opt_sum_query(THD *thd,
{
if (recalc_const_item)
item->update_used_tables();
- if (!item->const_item())
+ if (!item->const_item() && item->type() != Item::WINDOW_FUNC_ITEM)
const_result= 0;
}
}
@@ -768,12 +768,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
key_part_map org_key_part_used= *key_part_used;
if (eq_type || between || max_fl == less_fl)
{
- uint length= (key_ptr-ref->key_buff)+part->store_length;
+ uint length= (uint)(key_ptr-ref->key_buff)+part->store_length;
if (ref->key_length < length)
{
/* Ultimately ref->key_length will contain the length of the search key */
ref->key_length= length;
- ref->key_parts= (part - keyinfo->key_part) + 1;
+ ref->key_parts= (uint)(part - keyinfo->key_part) + 1;
}
if (!*prefix_len && part+1 == field_part)
*prefix_len= length;
@@ -977,7 +977,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
converted (for example to upper case)
*/
if (field->part_of_key.is_set(idx))
- table->enable_keyread();
+ table->file->ha_start_keyread(idx);
DBUG_RETURN(TRUE);
}
}
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index da1706e630f..a480a1659e9 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -848,7 +848,7 @@ bool check_func_dependency(JOIN *join,
*/
uint and_level=0;
build_eq_mods_for_cond(join->thd, &dac, &last_eq_mod, &and_level, cond);
- if (!(dac.n_equality_mods= last_eq_mod - dac.equality_mods))
+ if (!(dac.n_equality_mods= (uint)(last_eq_mod - dac.equality_mods)))
return FALSE; /* No useful conditions */
List<Dep_module> bound_modules;
@@ -1061,7 +1061,7 @@ bool Dep_analysis_context::setup_equality_modules_deps(List<Dep_module>
eq_mod < equality_mods + n_equality_mods;
eq_mod++)
{
- deps_recorder.expr_offset= eq_mod - equality_mods;
+ deps_recorder.expr_offset= (uint)(eq_mod - equality_mods);
deps_recorder.visited_other_tables= FALSE;
eq_mod->unbound_args= 0;
@@ -1069,7 +1069,7 @@ bool Dep_analysis_context::setup_equality_modules_deps(List<Dep_module>
{
/* Regular tbl.col=expr(tblX1.col1, tblY1.col2, ...) */
eq_mod->expr->walk(&Item::enumerate_field_refs_processor, FALSE,
- (uchar*)&deps_recorder);
+ &deps_recorder);
}
else
{
@@ -1079,7 +1079,7 @@ bool Dep_analysis_context::setup_equality_modules_deps(List<Dep_module>
Dep_value_field* field_val;
while ((field_val= it++))
{
- uint offs= field_val->bitmap_offset + eq_mod - equality_mods;
+ uint offs= (uint)(field_val->bitmap_offset + eq_mod - equality_mods);
bitmap_set_bit(&expr_deps, offs);
}
}
@@ -1105,7 +1105,7 @@ int compare_field_values(Dep_value_field *a, Dep_value_field *b, void *unused)
uint b_ratio= b->field->table->tablenr*MAX_FIELDS +
b->field->field_index;
- return (a_ratio < b_ratio)? -1 : ((a_ratio == b_ratio)? 0 : 1);
+ return (a_ratio < b_ratio)? 1 : ((a_ratio == b_ratio)? 0 : -1);
}
@@ -1158,7 +1158,7 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx,
if (cond->type() == Item_func::COND_ITEM)
{
List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
- uint orig_offset= *eq_mod - ctx->equality_mods;
+ size_t orig_offset= *eq_mod - ctx->equality_mods;
/* AND/OR */
if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
diff --git a/sql/parse_file.cc b/sql/parse_file.cc
index f3dab4f7b2f..2aad3f38761 100644
--- a/sql/parse_file.cc
+++ b/sql/parse_file.cc
@@ -255,12 +255,12 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
File handler;
IO_CACHE file;
char path[FN_REFLEN+1]; // +1 to put temporary file name for sure
- int path_end;
+ size_t path_end;
File_option *param;
DBUG_ENTER("sql_create_definition_file");
- DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx",
+ DBUG_PRINT("enter", ("Dir: %s, file: %s, base %p",
dir ? dir->str : "",
- file_name->str, (ulong) base));
+ file_name->str, base));
if (dir)
{
@@ -437,7 +437,7 @@ sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root,
DBUG_RETURN(0);
}
- if ((len= mysql_file_read(file, (uchar *)buff, stat_info.st_size,
+ if ((len= mysql_file_read(file, (uchar *)buff, (size_t)stat_info.st_size,
MYF(MY_WME))) == MY_FILE_ERROR)
{
mysql_file_close(file, MYF(MY_WME));
@@ -660,7 +660,7 @@ parse_quoted_escaped_string(const char *ptr, const char *end,
@param[in,out] ptr pointer to parameter
@param[in] end end of the configuration
- @param[in] line pointer to the line begining
+ @param[in] line pointer to the line beginning
@param[in] base base address for parameter writing (structure
like TABLE)
@param[in] parameter description
diff --git a/sql/partition_element.h b/sql/partition_element.h
index 308a4d6ddd2..b979b7a58e6 100644
--- a/sql/partition_element.h
+++ b/sql/partition_element.h
@@ -65,7 +65,7 @@ typedef struct p_column_list_val
Item* item_expression;
partition_info *part_info;
uint partition_id;
- bool max_value;
+ bool max_value; // MAXVALUE for RANGE type or DEFAULT value for LIST type
bool null_value;
char fixed;
} part_column_list_val;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 892b7e8bd05..3d10166b3d8 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -39,15 +39,15 @@ partition_info *partition_info::get_clone(THD *thd)
{
MEM_ROOT *mem_root= thd->mem_root;
DBUG_ENTER("partition_info::get_clone");
+
List_iterator<partition_element> part_it(partitions);
partition_element *part;
- partition_info *clone= new (mem_root) partition_info();
+ partition_info *clone= new (mem_root) partition_info(*this);
if (!clone)
{
mem_alloc_error(sizeof(partition_info));
DBUG_RETURN(NULL);
}
- *clone= *this;
memset(&(clone->read_partitions), 0, sizeof(clone->read_partitions));
memset(&(clone->lock_partitions), 0, sizeof(clone->lock_partitions));
clone->bitmaps_are_initialized= FALSE;
@@ -252,7 +252,6 @@ bool partition_info::set_partition_bitmaps(TABLE_LIST *table_list)
DBUG_ASSERT(bitmaps_are_initialized);
DBUG_ASSERT(table);
- is_pruning_completed= false;
if (!bitmaps_are_initialized)
DBUG_RETURN(TRUE);
@@ -279,249 +278,6 @@ bool partition_info::set_partition_bitmaps(TABLE_LIST *table_list)
}
-/**
- Checks if possible to do prune partitions on insert.
-
- @param thd Thread context
- @param duplic How to handle duplicates
- @param update In case of ON DUPLICATE UPDATE, default function fields
- @param update_fields In case of ON DUPLICATE UPDATE, which fields to update
- @param fields Listed fields
- @param empty_values True if values is empty (only defaults)
- @param[out] prune_needs_default_values Set on return if copying of default
- values is needed
- @param[out] can_prune_partitions Enum showing if possible to prune
- @param[inout] used_partitions If possible to prune the bitmap
- is initialized and cleared
-
- @return Operation status
- @retval false Success
- @retval true Failure
-*/
-
-bool partition_info::can_prune_insert(THD* thd,
- enum_duplicates duplic,
- COPY_INFO &update,
- List<Item> &update_fields,
- List<Item> &fields,
- bool empty_values,
- enum_can_prune *can_prune_partitions,
- bool *prune_needs_default_values,
- MY_BITMAP *used_partitions)
-{
- uint32 *bitmap_buf;
- uint bitmap_bytes;
- uint num_partitions= 0;
- *can_prune_partitions= PRUNE_NO;
- DBUG_ASSERT(bitmaps_are_initialized);
- DBUG_ENTER("partition_info::can_prune_insert");
-
- if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)
- DBUG_RETURN(false);
-
- /*
- If under LOCK TABLES pruning will skip start_stmt instead of external_lock
- for unused partitions.
-
- Cannot prune if there are BEFORE INSERT triggers that changes any
- partitioning column, since they may change the row to be in another
- partition.
- */
- if (table->triggers &&
- table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE) &&
- table->triggers->is_fields_updated_in_trigger(&full_part_field_set,
- TRG_EVENT_INSERT,
- TRG_ACTION_BEFORE))
- DBUG_RETURN(false);
-
- if (table->found_next_number_field)
- {
- /*
- If the field is used in the partitioning expression, we cannot prune.
- TODO: If all rows have not null values and
- is not 0 (with NO_AUTO_VALUE_ON_ZERO sql_mode), then pruning is possible!
- */
- if (bitmap_is_set(&full_part_field_set,
- table->found_next_number_field->field_index))
- DBUG_RETURN(false);
- }
-
- /*
- If updating a field in the partitioning expression, we cannot prune.
-
- Note: TIMESTAMP_AUTO_SET_ON_INSERT is handled by converting Item_null
- to the start time of the statement. Which will be the same as in
- write_row(). So pruning of TIMESTAMP DEFAULT CURRENT_TIME will work.
- But TIMESTAMP_AUTO_SET_ON_UPDATE cannot be pruned if the timestamp
- column is a part of any part/subpart expression.
- */
- if (duplic == DUP_UPDATE)
- {
- /*
- TODO: add check for static update values, which can be pruned.
- */
- if (is_field_in_part_expr(update_fields))
- DBUG_RETURN(false);
-
- /*
- Cannot prune if there are BEFORE UPDATE triggers that changes any
- partitioning column, since they may change the row to be in another
- partition.
- */
- if (table->triggers &&
- table->triggers->has_triggers(TRG_EVENT_UPDATE,
- TRG_ACTION_BEFORE) &&
- table->triggers->is_fields_updated_in_trigger(&full_part_field_set,
- TRG_EVENT_UPDATE,
- TRG_ACTION_BEFORE))
- {
- DBUG_RETURN(false);
- }
- }
-
- /*
- If not all partitioning fields are given,
- we also must set all non given partitioning fields
- to get correct defaults.
- TODO: If any gain, we could enhance this by only copy the needed default
- fields by
- 1) check which fields needs to be set.
- 2) only copy those fields from the default record.
- */
- *prune_needs_default_values= false;
- if (fields.elements)
- {
- if (!is_full_part_expr_in_fields(fields))
- *prune_needs_default_values= true;
- }
- else if (empty_values)
- {
- *prune_needs_default_values= true; // like 'INSERT INTO t () VALUES ()'
- }
- else
- {
- /*
- In case of INSERT INTO t VALUES (...) we must get values for
- all fields in table from VALUES (...) part, so no defaults
- are needed.
- */
- }
-
- /* Pruning possible, have to initialize the used_partitions bitmap. */
- num_partitions= lock_partitions.n_bits;
- bitmap_bytes= bitmap_buffer_size(num_partitions);
- if (!(bitmap_buf= (uint32*) thd->alloc(bitmap_bytes)))
- {
- mem_alloc_error(bitmap_bytes);
- DBUG_RETURN(true);
- }
- /* Also clears all bits. */
- if (my_bitmap_init(used_partitions, bitmap_buf, num_partitions, false))
- {
- /* purecov: begin deadcode */
- /* Cannot happen, due to pre-alloc. */
- mem_alloc_error(bitmap_bytes);
- DBUG_RETURN(true);
- /* purecov: end */
- }
- /*
- If no partitioning field in set (e.g. defaults) check pruning only once.
- */
- if (fields.elements &&
- !is_field_in_part_expr(fields))
- *can_prune_partitions= PRUNE_DEFAULTS;
- else
- *can_prune_partitions= PRUNE_YES;
-
- DBUG_RETURN(false);
-}
-
-
-/**
- Mark the partition, the record belongs to, as used.
-
- @param fields Fields to set
- @param values Values to use
- @param info COPY_INFO used for default values handling
- @param copy_default_values True if we should copy default values
- @param used_partitions Bitmap to set
-
- @returns Operational status
- @retval false Success
- @retval true Failure
-*/
-
-bool partition_info::set_used_partition(List<Item> &fields,
- List<Item> &values,
- COPY_INFO &info,
- bool copy_default_values,
- MY_BITMAP *used_partitions)
-{
- THD *thd= table->in_use;
- uint32 part_id;
- longlong func_value;
- Dummy_error_handler error_handler;
- bool ret= true;
- DBUG_ENTER("set_partition");
- DBUG_ASSERT(thd);
-
- /* Only allow checking of constant values */
- List_iterator_fast<Item> v(values);
- Item *item;
- thd->push_internal_handler(&error_handler);
- while ((item= v++))
- {
- if (!item->const_item())
- goto err;
- }
-
- if (copy_default_values)
- restore_record(table,s->default_values);
-
- if (fields.elements || !values.elements)
- {
- if (fill_record(thd, table, fields, values, false))
- goto err;
- }
- else
- {
- if (fill_record(thd, table, table->field, values, false, false))
- goto err;
- }
- DBUG_ASSERT(!table->auto_increment_field_not_null);
-
- /*
- Evaluate DEFAULT functions like CURRENT_TIMESTAMP.
- TODO: avoid setting non partitioning fields default value, to avoid
- overhead. Not yet done, since mostly only one DEFAULT function per
- table, or at least very few such columns.
- */
-// if (info.function_defaults_apply_on_columns(&full_part_field_set))
-// info.set_function_defaults(table);
-
- {
- /*
- This function is used in INSERT; 'values' are supplied by user,
- or are default values, not values read from a table, so read_set is
- irrelevant.
- */
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
- const int rc= get_partition_id(this, &part_id, &func_value);
- dbug_tmp_restore_column_map(table->read_set, old_map);
- if (rc)
- goto err;
- }
-
- DBUG_PRINT("info", ("Insert into partition %u", part_id));
- bitmap_set_bit(used_partitions, part_id);
- ret= false;
-
-err:
- thd->pop_internal_handler();
- DBUG_RETURN(ret);
-}
-
-
/*
Create a memory area where default partition names are stored and fill it
up with the names.
@@ -544,11 +300,11 @@ err:
#define MAX_PART_NAME_SIZE 8
-char *partition_info::create_default_partition_names(uint part_no,
+char *partition_info::create_default_partition_names(THD *thd, uint part_no,
uint num_parts_arg,
uint start_no)
{
- char *ptr= (char*) sql_calloc(num_parts_arg*MAX_PART_NAME_SIZE);
+ char *ptr= (char*) thd->calloc(num_parts_arg * MAX_PART_NAME_SIZE);
char *move_ptr= ptr;
uint i= 0;
DBUG_ENTER("create_default_partition_names");
@@ -570,42 +326,6 @@ char *partition_info::create_default_partition_names(uint part_no,
/*
- Generate a version string for partition expression
- This function must be updated every time there is a possibility for
- a new function of a higher version number than 5.5.0.
-
- SYNOPSIS
- set_show_version_string()
- RETURN VALUES
- None
-*/
-void partition_info::set_show_version_string(String *packet)
-{
- int version= 0;
- if (column_list)
- packet->append(STRING_WITH_LEN("\n/*!50500"));
- else
- {
- if (part_expr)
- part_expr->walk(&Item::intro_version, 0, (uchar*)&version);
- if (subpart_expr)
- subpart_expr->walk(&Item::intro_version, 0, (uchar*)&version);
- if (version == 0)
- {
- /* No new functions in partition function */
- packet->append(STRING_WITH_LEN("\n/*!50100"));
- }
- else
- {
- char buf[65];
- char *buf_ptr= longlong10_to_str((longlong)version, buf, 10);
- packet->append(STRING_WITH_LEN("\n/*!"));
- packet->append(buf, (size_t)(buf_ptr - buf));
- }
- }
-}
-
-/*
Create a unique name for the subpartition as part_name'sp''subpart_no'
SYNOPSIS
@@ -617,11 +337,11 @@ void partition_info::set_show_version_string(String *packet)
0 Memory allocation error
*/
-char *partition_info::create_default_subpartition_name(uint subpart_no,
+char *partition_info::create_default_subpartition_name(THD *thd, uint subpart_no,
const char *part_name)
{
uint size_alloc= strlen(part_name) + MAX_PART_NAME_SIZE;
- char *ptr= (char*) sql_calloc(size_alloc);
+ char *ptr= (char*) thd->calloc(size_alloc);
DBUG_ENTER("create_default_subpartition_name");
if (likely(ptr != NULL))
@@ -660,7 +380,7 @@ char *partition_info::create_default_subpartition_name(uint subpart_no,
The external routine needing this code is check_partition_info
*/
-bool partition_info::set_up_default_partitions(handler *file,
+bool partition_info::set_up_default_partitions(THD *thd, handler *file,
HA_CREATE_INFO *info,
uint start_no)
{
@@ -673,9 +393,9 @@ bool partition_info::set_up_default_partitions(handler *file,
{
const char *error_string;
if (part_type == RANGE_PARTITION)
- error_string= partition_keywords[PKW_RANGE].str;
+ error_string= "RANGE";
else
- error_string= partition_keywords[PKW_LIST].str;
+ error_string= "LIST";
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
goto end;
}
@@ -692,7 +412,8 @@ bool partition_info::set_up_default_partitions(handler *file,
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto end;
}
- if (unlikely((!(default_name= create_default_partition_names(0, num_parts,
+ if (unlikely((!(default_name= create_default_partition_names(thd, 0,
+ num_parts,
start_no)))))
goto end;
i= 0;
@@ -741,7 +462,7 @@ end:
The external routine needing this code is check_partition_info
*/
-bool partition_info::set_up_default_subpartitions(handler *file,
+bool partition_info::set_up_default_subpartitions(THD *thd, handler *file,
HA_CREATE_INFO *info)
{
uint i, j;
@@ -768,7 +489,7 @@ bool partition_info::set_up_default_subpartitions(handler *file,
if (likely(subpart_elem != 0 &&
(!part_elem->subpartitions.push_back(subpart_elem))))
{
- char *ptr= create_default_subpartition_name(j,
+ char *ptr= create_default_subpartition_name(thd, j,
part_elem->partition_name);
if (!ptr)
goto end;
@@ -806,7 +527,7 @@ end:
this will return an error.
*/
-bool partition_info::set_up_defaults_for_partitioning(handler *file,
+bool partition_info::set_up_defaults_for_partitioning(THD *thd, handler *file,
HA_CREATE_INFO *info,
uint start_no)
{
@@ -816,10 +537,10 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file,
{
default_partitions_setup= TRUE;
if (use_default_partitions)
- DBUG_RETURN(set_up_default_partitions(file, info, start_no));
+ DBUG_RETURN(set_up_default_partitions(thd, file, info, start_no));
if (is_sub_partitioned() &&
use_default_subpartitions)
- DBUG_RETURN(set_up_default_subpartitions(file, info));
+ DBUG_RETURN(set_up_default_subpartitions(thd, file, info));
}
DBUG_RETURN(FALSE);
}
@@ -1469,6 +1190,8 @@ bool partition_info::check_list_constants(THD *thd)
List_iterator<partition_element> list_func_it(partitions);
DBUG_ENTER("partition_info::check_list_constants");
+ DBUG_ASSERT(part_type == LIST_PARTITION);
+
num_list_values= 0;
/*
We begin by calculating the number of list values that have been
@@ -1500,21 +1223,15 @@ bool partition_info::check_list_constants(THD *thd)
has_null_part_id= i;
found_null= TRUE;
}
- List_iterator<part_elem_value> list_val_it1(part_def->list_val_list);
- while (list_val_it1++)
- num_list_values++;
+ num_list_values+= part_def->list_val_list.elements;
} while (++i < num_parts);
list_func_it.rewind();
num_column_values= part_field_list.elements;
size_entries= column_list ?
(num_column_values * sizeof(part_column_list_val)) :
sizeof(LIST_PART_ENTRY);
- ptr= thd->calloc((num_list_values+1) * size_entries);
- if (unlikely(ptr == NULL))
- {
- mem_alloc_error(num_list_values * size_entries);
+ if (unlikely(!(ptr= thd->calloc((num_list_values+1) * size_entries))))
goto end;
- }
if (column_list)
{
part_column_list_val *loc_list_col_array;
@@ -1525,6 +1242,13 @@ bool partition_info::check_list_constants(THD *thd)
do
{
part_def= list_func_it++;
+ if (part_def->max_value)
+ {
+ // DEFAULT is not a real value so let's exclude it from sorting.
+ DBUG_ASSERT(num_list_values);
+ num_list_values--;
+ continue;
+ }
List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
while ((list_value= list_val_it2++))
{
@@ -1554,6 +1278,13 @@ bool partition_info::check_list_constants(THD *thd)
do
{
part_def= list_func_it++;
+ if (part_def->max_value && part_type == LIST_PARTITION)
+ {
+ // DEFAULT is not a real value so let's exclude it from sorting.
+ DBUG_ASSERT(num_list_values);
+ num_list_values--;
+ continue;
+ }
List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
while ((list_value= list_val_it2++))
{
@@ -1666,8 +1397,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
if (!list_of_part_fields)
{
DBUG_ASSERT(part_expr);
- err= part_expr->walk(&Item::check_partition_func_processor, 0,
- NULL);
+ err= part_expr->walk(&Item::check_partition_func_processor, 0, NULL);
}
/* Check for sub partition expression. */
@@ -1701,7 +1431,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
my_error(ER_SUBPARTITION_ERROR, MYF(0));
goto end;
}
- if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0)))
+ if (unlikely(set_up_defaults_for_partitioning(thd, file, info, (uint)0)))
goto end;
if (!(tot_partitions= get_tot_partitions()))
{
@@ -1938,11 +1668,11 @@ void partition_info::print_no_partition_found(TABLE *table_arg, myf errflag)
FALSE Success
*/
-bool partition_info::set_part_expr(char *start_token, Item *item_ptr,
+bool partition_info::set_part_expr(THD *thd, char *start_token, Item *item_ptr,
char *end_token, bool is_subpart)
{
- uint expr_len= end_token - start_token;
- char *func_string= (char*) sql_memdup(start_token, expr_len);
+ size_t expr_len= end_token - start_token;
+ char *func_string= (char*) thd->memdup(start_token, expr_len);
if (!func_string)
{
@@ -1953,15 +1683,11 @@ bool partition_info::set_part_expr(char *start_token, Item *item_ptr,
{
list_of_subpart_fields= FALSE;
subpart_expr= item_ptr;
- subpart_func_string= func_string;
- subpart_func_len= expr_len;
}
else
{
list_of_part_fields= FALSE;
part_expr= item_ptr;
- part_func_string= func_string;
- part_func_len= expr_len;
}
return FALSE;
}
@@ -2203,71 +1929,6 @@ void partition_info::report_part_expr_error(bool use_subpart_expr)
}
-/**
- Check if fields are in the partitioning expression.
-
- @param fields List of Items (fields)
-
- @return True if any field in the fields list is used by a partitioning expr.
- @retval true At least one field in the field list is found.
- @retval false No field is within any partitioning expression.
-*/
-
-bool partition_info::is_field_in_part_expr(List<Item> &fields)
-{
- List_iterator<Item> it(fields);
- Item *item;
- Item_field *field;
- DBUG_ENTER("is_fields_in_part_expr");
- while ((item= it++))
- {
- field= item->field_for_view_update();
- DBUG_ASSERT(field->field->table == table);
- if (bitmap_is_set(&full_part_field_set, field->field->field_index))
- DBUG_RETURN(true);
- }
- DBUG_RETURN(false);
-}
-
-
-/**
- Check if all partitioning fields are included.
-*/
-
-bool partition_info::is_full_part_expr_in_fields(List<Item> &fields)
-{
- Field **part_field= full_part_field_array;
- DBUG_ASSERT(*part_field);
- DBUG_ENTER("is_full_part_expr_in_fields");
- /*
- It is very seldom many fields in full_part_field_array, so it is OK
- to loop over all of them instead of creating a bitmap fields argument
- to compare with.
- */
- do
- {
- List_iterator<Item> it(fields);
- Item *item;
- Item_field *field;
- bool found= false;
-
- while ((item= it++))
- {
- field= item->field_for_view_update();
- DBUG_ASSERT(field->field->table == table);
- if (*part_field == field->field)
- {
- found= true;
- break;
- }
- }
- if (!found)
- DBUG_RETURN(false);
- } while (*(++part_field));
- DBUG_RETURN(true);
-}
-
-
/*
Create a new column value in current list with maxvalue
Called from parser
@@ -2284,11 +1945,19 @@ int partition_info::add_max_value(THD *thd)
DBUG_ENTER("partition_info::add_max_value");
part_column_list_val *col_val;
- if (!(col_val= add_column_value(thd)))
+ /*
+ Makes for LIST COLUMNS 'num_columns' DEFAULT tuples, 1 tuple for RANGEs
+ */
+ uint max_val= (num_columns && part_type == LIST_PARTITION) ?
+ num_columns : 1;
+ for (uint i= 0; i < max_val; i++)
{
- DBUG_RETURN(TRUE);
+ if (!(col_val= add_column_value(thd)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ col_val->max_value= TRUE;
}
- col_val->max_value= TRUE;
DBUG_RETURN(FALSE);
}
@@ -2326,9 +1995,11 @@ part_column_list_val *partition_info::add_column_value(THD *thd)
into the structure used for 1 column. After this we call
ourselves recursively which should always succeed.
*/
+ num_columns= curr_list_object;
if (!reorganize_into_single_field_col_val(thd))
{
- DBUG_RETURN(add_column_value(thd));
+ if (!init_column_part(thd))
+ DBUG_RETURN(add_column_value(thd));
}
DBUG_RETURN(NULL);
}
@@ -2421,8 +2092,7 @@ bool partition_info::add_column_list_value(THD *thd, Item *item)
else
thd->where= "partition function";
- if (item->walk(&Item::check_partition_func_processor, 0,
- NULL))
+ if (item->walk(&Item::check_partition_func_processor, 0, NULL))
{
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
DBUG_RETURN(TRUE);
@@ -2564,8 +2234,7 @@ int partition_info::reorganize_into_single_field_col_val(THD *thd)
*/
int partition_info::fix_partition_values(THD *thd,
part_elem_value *val,
- partition_element *part_elem,
- uint part_id)
+ partition_element *part_elem)
{
part_column_list_val *col_val= val->col_val_array;
DBUG_ENTER("partition_info::fix_partition_values");
@@ -2574,59 +2243,31 @@ int partition_info::fix_partition_values(THD *thd,
{
DBUG_RETURN(FALSE);
}
- if (val->added_items != 1)
- {
- my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (col_val->max_value)
+
+ Item *item_expr= col_val->item_expression;
+ if ((val->null_value= item_expr->null_value))
{
- /* The parser ensures we're not LIST partitioned here */
- DBUG_ASSERT(part_type == RANGE_PARTITION);
- if (defined_max_value)
- {
- my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (part_id == (num_parts - 1))
- {
- defined_max_value= TRUE;
- part_elem->max_value= TRUE;
- part_elem->range_value= LONGLONG_MAX;
- }
- else
+ if (part_elem->has_null_value)
{
- my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
+ part_elem->has_null_value= TRUE;
}
- else
+ else if (item_expr->result_type() != INT_RESULT)
{
- Item *item_expr= col_val->item_expression;
- if ((val->null_value= item_expr->null_value))
- {
- if (part_elem->has_null_value)
- {
- my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- part_elem->has_null_value= TRUE;
- }
- else if (item_expr->result_type() != INT_RESULT)
+ my_error(ER_VALUES_IS_NOT_INT_TYPE_ERROR, MYF(0),
+ part_elem->partition_name);
+ DBUG_RETURN(TRUE);
+ }
+ if (part_type == RANGE_PARTITION)
+ {
+ if (part_elem->has_null_value)
{
- my_error(ER_VALUES_IS_NOT_INT_TYPE_ERROR, MYF(0),
- part_elem->partition_name);
+ my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0));
DBUG_RETURN(TRUE);
}
- if (part_type == RANGE_PARTITION)
- {
- if (part_elem->has_null_value)
- {
- my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0));
- DBUG_RETURN(TRUE);
- }
- part_elem->range_value= val->value;
- }
+ part_elem->range_value= val->value;
}
col_val->fixed= 2;
DBUG_RETURN(FALSE);
@@ -2827,6 +2468,7 @@ bool partition_info::fix_parser_data(THD *thd)
key_algorithm == KEY_ALGORITHM_NONE)
key_algorithm= KEY_ALGORITHM_55;
}
+ defined_max_value= FALSE; // in case it already set (CREATE TABLE LIKE)
do
{
part_elem= it++;
@@ -2836,16 +2478,60 @@ bool partition_info::fix_parser_data(THD *thd)
DBUG_RETURN(true);
DBUG_ASSERT(part_type == RANGE_PARTITION ?
num_elements == 1U : TRUE);
+
for (j= 0; j < num_elements; j++)
{
part_elem_value *val= list_val_it++;
- if (column_list)
+
+ if (val->added_items != (column_list ? num_columns : 1))
+ {
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Check the last MAX_VALUE for range partitions and DEFAULT value
+ for LIST partitions.
+ Both values are marked with defined_max_value and
+ default_partition_id.
+
+ This is a max_value/default is max_value is set and this is
+ a normal RANGE (no column list) or if it's a LIST partition:
+
+ PARTITION p3 VALUES LESS THAN MAXVALUE
+ or
+ PARTITION p3 VALUES DEFAULT
+ */
+ if (val->added_items && val->col_val_array[0].max_value &&
+ (!column_list || part_type == LIST_PARTITION))
{
- if (val->added_items != num_columns)
+ DBUG_ASSERT(part_type == RANGE_PARTITION ||
+ part_type == LIST_PARTITION);
+ if (defined_max_value)
{
- my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ my_error((part_type == RANGE_PARTITION) ?
+ ER_PARTITION_MAXVALUE_ERROR :
+ ER_PARTITION_DEFAULT_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
+
+ /* For RANGE PARTITION MAX_VALUE must be last */
+ if (i != (num_parts - 1) &&
+ part_type != LIST_PARTITION)
+ {
+ my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ defined_max_value= TRUE;
+ default_partition_id= i;
+ part_elem->max_value= TRUE;
+ part_elem->range_value= LONGLONG_MAX;
+ continue;
+ }
+
+ if (column_list)
+ {
for (k= 0; k < num_columns; k++)
{
part_column_list_val *col_val= &val->col_val_array[k];
@@ -2858,10 +2544,8 @@ bool partition_info::fix_parser_data(THD *thd)
}
else
{
- if (fix_partition_values(thd, val, part_elem, i))
- {
+ if (fix_partition_values(thd, val, part_elem))
DBUG_RETURN(TRUE);
- }
if (val->null_value)
{
/*
@@ -3171,7 +2855,7 @@ part_column_list_val *partition_info::add_column_value(THD *thd)
return NULL;
}
-bool partition_info::set_part_expr(char *start_token, Item *item_ptr,
+bool partition_info::set_part_expr(THD *thd, char *start_token, Item *item_ptr,
char *end_token, bool is_subpart)
{
(void)start_token;
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 0f38c87dd4e..ab880f24a74 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -165,9 +165,6 @@ public:
longlong err_value;
char* part_info_string;
- char *part_func_string;
- char *subpart_func_string;
-
partition_element *curr_part_elem; // part or sub part
partition_element *current_partition; // partition
part_elem_value *curr_list_val;
@@ -188,8 +185,6 @@ public:
partition_type subpart_type;
uint part_info_len;
- uint part_func_len;
- uint subpart_func_len;
uint num_parts;
uint num_subparts;
@@ -202,6 +197,7 @@ public:
uint num_full_part_fields;
uint has_null_part_id;
+ uint32 default_partition_id;
/*
This variable is used to calculate the partition id when using
LINEAR KEY/HASH. This functionality is kept in the MySQL Server
@@ -230,6 +226,10 @@ public:
bool use_default_num_subpartitions;
bool default_partitions_setup;
bool defined_max_value;
+ inline bool has_default_partititon()
+ {
+ return (part_type == LIST_PARTITION && defined_max_value);
+ }
bool list_of_part_fields; // KEY or COLUMNS PARTITIONING
bool list_of_subpart_fields; // KEY SUBPARTITIONING
bool linear_hash_ind; // LINEAR HASH/KEY
@@ -237,15 +237,6 @@ public:
bool is_auto_partitioned;
bool has_null_value;
bool column_list; // COLUMNS PARTITIONING, 5.5+
- /**
- True if pruning has been completed and can not be pruned any further,
- even if there are subqueries or stored programs in the condition.
-
- Some times it is needed to run prune_partitions() a second time to prune
- read partitions after tables are locked, when subquery and
- stored functions might have been evaluated.
- */
- bool is_pruning_completed;
partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL),
@@ -261,13 +252,11 @@ public:
bitmaps_are_initialized(FALSE),
list_array(NULL), err_value(0),
part_info_string(NULL),
- part_func_string(NULL), subpart_func_string(NULL),
curr_part_elem(NULL), current_partition(NULL),
curr_list_object(0), num_columns(0), table(NULL),
default_engine_type(NULL),
part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
part_info_len(0),
- part_func_len(0), subpart_func_len(0),
num_parts(0), num_subparts(0),
count_curr_subparts(0),
num_list_values(0), num_part_fields(0), num_subpart_fields(0),
@@ -279,7 +268,7 @@ public:
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
linear_hash_ind(FALSE), fixed(FALSE),
is_auto_partitioned(FALSE),
- has_null_value(FALSE), column_list(FALSE), is_pruning_completed(false)
+ has_null_value(FALSE), column_list(FALSE)
{
all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all();
@@ -307,7 +296,8 @@ public:
return num_parts * (is_sub_partitioned() ? num_subparts : 1);
}
- bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info,
+ bool set_up_defaults_for_partitioning(THD *thd, handler *file,
+ HA_CREATE_INFO *info,
uint start_no);
char *find_duplicate_field();
char *find_duplicate_name();
@@ -322,8 +312,7 @@ public:
Item* get_column_item(Item *item, Field *field);
int fix_partition_values(THD *thd,
part_elem_value *val,
- partition_element *part_elem,
- uint part_id);
+ partition_element *part_elem);
bool fix_column_value_functions(THD *thd,
part_elem_value *val,
uint part_id);
@@ -332,56 +321,30 @@ public:
void init_col_val(part_column_list_val *col_val, Item *item);
int reorganize_into_single_field_col_val(THD *thd);
part_column_list_val *add_column_value(THD *thd);
- bool set_part_expr(char *start_token, Item *item_ptr,
+ bool set_part_expr(THD *thd, char *start_token, Item *item_ptr,
char *end_token, bool is_subpart);
static int compare_column_values(const void *a, const void *b);
bool set_up_charset_field_preps(THD *thd);
bool check_partition_field_length();
bool init_column_part(THD *thd);
bool add_column_list_value(THD *thd, Item *item);
- void set_show_version_string(String *packet);
partition_element *get_part_elem(const char *partition_name, char *file_name,
size_t file_name_size, uint32 *part_id);
void report_part_expr_error(bool use_subpart_expr);
- bool set_used_partition(List<Item> &fields,
- List<Item> &values,
- COPY_INFO &info,
- bool copy_default_values,
- MY_BITMAP *used_partitions);
- /**
- PRUNE_NO - Unable to prune.
- PRUNE_DEFAULTS - Partitioning field is only set to
- DEFAULT values, only need to check
- pruning for one row where the DEFAULTS
- values are set.
- PRUNE_YES - Pruning is possible, calculate the used partition set
- by evaluate the partition_id on row by row basis.
- */
- enum enum_can_prune {PRUNE_NO=0, PRUNE_DEFAULTS, PRUNE_YES};
- bool can_prune_insert(THD *thd,
- enum_duplicates duplic,
- COPY_INFO &update,
- List<Item> &update_fields,
- List<Item> &fields,
- bool empty_values,
- enum_can_prune *can_prune_partitions,
- bool *prune_needs_default_values,
- MY_BITMAP *used_partitions);
bool has_same_partitioning(partition_info *new_part_info);
bool error_if_requires_values() const;
private:
static int list_part_cmp(const void* a, const void* b);
- bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info,
+ bool set_up_default_partitions(THD *thd, handler *file, HA_CREATE_INFO *info,
uint start_no);
- bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info);
- char *create_default_partition_names(uint part_no, uint num_parts,
+ bool set_up_default_subpartitions(THD *thd, handler *file,
+ HA_CREATE_INFO *info);
+ char *create_default_partition_names(THD *thd, uint part_no, uint num_parts,
uint start_no);
- char *create_default_subpartition_name(uint subpart_no,
+ char *create_default_subpartition_name(THD *thd, uint subpart_no,
const char *part_name);
bool prune_partition_bitmaps(TABLE_LIST *table_list);
bool add_named_partition(const char *part_name, uint length);
- bool is_field_in_part_expr(List<Item> &fields);
- bool is_full_part_expr_in_fields(List<Item> &fields);
public:
bool has_unique_name(partition_element *element);
bool field_in_partition_expr(Field *field) const;
@@ -398,6 +361,7 @@ static inline void init_single_partition_iterator(uint32 part_id,
part_iter->part_nums.start= part_iter->part_nums.cur= part_id;
part_iter->part_nums.end= part_id+1;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= FALSE;
part_iter->get_next= get_next_partition_id_range;
}
@@ -409,6 +373,7 @@ void init_all_partitions_iterator(partition_info *part_info,
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
part_iter->part_nums.end= part_info->num_parts;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= FALSE;
part_iter->get_next= get_next_partition_id_range;
}
diff --git a/sql/password.c b/sql/password.c
index 02e4a0c37c7..debdf598189 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -90,7 +90,7 @@
void hash_password(ulong *result, const char *password, uint password_len)
{
- register ulong nr=1345345333L, add=7, nr2=0x12345671L;
+ ulong nr=1345345333L, add=7, nr2=0x12345671L;
ulong tmp;
const char *password_end= password + password_len;
for (; password < password_end; password++)
@@ -278,29 +278,6 @@ void make_password_from_salt_323(char *to, const ulong *salt)
**************** MySQL 4.1.1 authentication routines *************
*/
-#if MYSQL_VERSION_ID < 0x100200
-/**
- Generate string of printable random characters of requested length.
-
- @param to[out] Buffer for generation; must be at least length+1 bytes
- long; result string is always null-terminated
- length[in] How many random characters to put in buffer
- rand_st Structure used for number generation
-*/
-
-void create_random_string(char *to, uint length,
- struct my_rnd_struct *rand_st)
-{
- char *end= to + length;
- /* Use pointer arithmetics as it is faster way to do so. */
- for (; to < end; to++)
- *to= (char) (my_rnd(rand_st)*94+33);
- *to= '\0';
-}
-#else
-#error
-#endif
-
/* Character to use as version identifier for version 4.1 */
@@ -348,7 +325,7 @@ hex2octet(uint8 *to, const char *str, uint len)
const char *str_end= str + len;
while (str < str_end)
{
- register char tmp= char_val(*str++);
+ char tmp= char_val(*str++);
*to++= (tmp << 4) | char_val(*str++);
}
}
diff --git a/sql/plistsort.c b/sql/plistsort.c
index 99657410fe0..e66bd7c7276 100644
--- a/sql/plistsort.c
+++ b/sql/plistsort.c
@@ -91,7 +91,7 @@ recursion_point:
}
{
- register struct LS_STRUCT_NAME *sp0= sp++;
+ struct LS_STRUCT_NAME *sp0= sp++;
sp->list_len= sp0->list_len >> 1;
sp0->list_len-= sp->list_len;
sp->return_point= 0;
@@ -100,7 +100,7 @@ recursion_point:
return_point0:
sp->list1= sorted_list;
{
- register struct LS_STRUCT_NAME *sp0= sp++;
+ struct LS_STRUCT_NAME *sp0= sp++;
list= list_end;
sp->list_len= sp0->list_len;
sp->return_point= 1;
@@ -108,9 +108,9 @@ return_point0:
goto recursion_point;
return_point1:
{
- register LS_LIST_ITEM **hook= &sorted_list;
- register LS_LIST_ITEM *list1= sp->list1;
- register LS_LIST_ITEM *list2= sorted_list;
+ LS_LIST_ITEM **hook= &sorted_list;
+ LS_LIST_ITEM *list1= sp->list1;
+ LS_LIST_ITEM *list2= sorted_list;
if (LS_COMPARE_FUNC_CALL(list1, list2))
{
diff --git a/sql/procedure.h b/sql/procedure.h
index a46e8cfc137..b9d5066bb3d 100644
--- a/sql/procedure.h
+++ b/sql/procedure.h
@@ -48,15 +48,17 @@ public:
virtual void set(longlong nr)=0;
virtual enum_field_types field_type() const=0;
void set(const char *str) { set(str,(uint) strlen(str), default_charset()); }
- void make_field(Send_field *tmp_field)
+ void make_field(THD *thd, Send_field *tmp_field)
{
init_make_field(tmp_field,field_type());
}
unsigned int size_of() { return sizeof(*this);}
- bool check_vcol_func_processor(uchar *int_arg)
+ bool check_vcol_func_processor(void *arg)
{
- return trace_unsupported_by_check_vcol_func_processor("proc");
+ DBUG_ASSERT(0); // impossible
+ return mark_unsupported_function("proc", arg, VCOL_IMPOSSIBLE);
}
+ Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; }
};
class Item_proc_real :public Item_proc
@@ -69,6 +71,7 @@ public:
decimals=dec; max_length=float_length(dec);
}
enum Item_result result_type () const { return REAL_RESULT; }
+ enum Item_result cmp_type () const { return REAL_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
void set(double nr) { value=nr; }
void set(longlong nr) { value=(double) nr; }
@@ -96,6 +99,7 @@ public:
Item_proc_int(THD *thd, const char *name_par): Item_proc(thd, name_par)
{ max_length=11; }
enum Item_result result_type () const { return INT_RESULT; }
+ enum Item_result cmp_type () const { return INT_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
void set(double nr) { value=(longlong) nr; }
void set(longlong nr) { value=nr; }
@@ -115,6 +119,7 @@ public:
Item_proc_string(THD *thd, const char *name_par, uint length):
Item_proc(thd, name_par) { this->max_length=length; }
enum Item_result result_type () const { return STRING_RESULT; }
+ enum Item_result cmp_type () const { return STRING_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
void set(double nr) { str_value.set_real(nr, 2, default_charset()); }
void set(longlong nr) { str_value.set(nr, default_charset()); }
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 9c5df04e404..256cecac6bb 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -35,7 +35,8 @@ static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024;
/* Declared non-static only because of the embedded library. */
bool net_send_error_packet(THD *, uint, const char *, const char *);
/* Declared non-static only because of the embedded library. */
-bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *);
+bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *,
+ bool, bool);
/* Declared non-static only because of the embedded library. */
bool net_send_eof(THD *thd, uint server_status, uint statement_warn_count);
#ifndef EMBEDDED_LIBRARY
@@ -197,7 +198,8 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err,
@param affected_rows Number of rows changed by statement
@param id Auto_increment id for first row (if used)
@param message Message to send to the client (Used by mysql_status)
-
+ @param is_eof this called instead of old EOF packet
+
@return
@retval FALSE The message was successfully sent
@retval TRUE An error occurred and the messages wasn't sent properly
@@ -208,10 +210,15 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err,
bool
net_send_ok(THD *thd,
uint server_status, uint statement_warn_count,
- ulonglong affected_rows, ulonglong id, const char *message)
+ ulonglong affected_rows, ulonglong id, const char *message,
+ bool is_eof,
+ bool skip_flush)
{
NET *net= &thd->net;
- uchar buff[MYSQL_ERRMSG_SIZE+10],*pos;
+ StringBuffer<MYSQL_ERRMSG_SIZE + 10> store;
+
+ bool state_changed= false;
+
bool error= FALSE;
DBUG_ENTER("net_send_ok");
@@ -221,38 +228,67 @@ net_send_ok(THD *thd,
DBUG_RETURN(FALSE);
}
- buff[0]=0; // No fields
- pos=net_store_length(buff+1,affected_rows);
- pos=net_store_length(pos, id);
+ /*
+ OK send instead of EOF still require 0xFE header, but OK packet content.
+ */
+ if (is_eof)
+ {
+ DBUG_ASSERT(thd->client_capabilities & CLIENT_DEPRECATE_EOF);
+ store.q_append((char)254);
+ }
+ else
+ store.q_append('\0');
+
+ /* affected rows */
+ store.q_net_store_length(affected_rows);
+
+ /* last insert id */
+ store.q_net_store_length(id);
+
if (thd->client_capabilities & CLIENT_PROTOCOL_41)
{
DBUG_PRINT("info",
("affected_rows: %lu id: %lu status: %u warning_count: %u",
- (ulong) affected_rows,
+ (ulong) affected_rows,
(ulong) id,
(uint) (server_status & 0xffff),
(uint) statement_warn_count));
- int2store(pos, server_status);
- pos+=2;
+ store.q_append2b(server_status);
/* We can only return up to 65535 warnings in two bytes */
uint tmp= MY_MIN(statement_warn_count, 65535);
- int2store(pos, tmp);
- pos+= 2;
+ store.q_append2b(tmp);
}
else if (net->return_status) // For 4.0 protocol
{
- int2store(pos, server_status);
- pos+=2;
+ store.q_append2b(server_status);
}
thd->get_stmt_da()->set_overwrite_status(true);
- if (message && message[0])
- pos= net_store_data(pos, (uchar*) message, strlen(message));
- error= my_net_write(net, buff, (size_t) (pos-buff));
- if (!error)
+ state_changed=
+ (thd->client_capabilities & CLIENT_SESSION_TRACK) &&
+ (server_status & SERVER_SESSION_STATE_CHANGED);
+
+ if (state_changed || (message && message[0]))
+ {
+ DBUG_ASSERT(safe_strlen(message) <= MYSQL_ERRMSG_SIZE);
+ store.q_net_store_data((uchar*) safe_str(message), safe_strlen(message));
+ }
+
+ if (unlikely(state_changed))
+ {
+ store.set_charset(thd->variables.collation_database);
+
+ thd->session_tracker.store(thd, &store);
+ }
+
+ DBUG_ASSERT(store.length() <= MAX_PACKET_LENGTH);
+
+ error= my_net_write(net, (const unsigned char*)store.ptr(), store.length());
+ if (!error && (!skip_flush || is_eof))
error= net_flush(net);
+ thd->server_status&= ~SERVER_SESSION_STATE_CHANGED;
thd->get_stmt_da()->set_overwrite_status(false);
DBUG_PRINT("info", ("OK sent, so no more error sending allowed"));
@@ -260,6 +296,7 @@ net_send_ok(THD *thd,
DBUG_RETURN(error);
}
+
static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */
/**
@@ -291,6 +328,22 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count)
NET *net= &thd->net;
bool error= FALSE;
DBUG_ENTER("net_send_eof");
+
+ /*
+ Check if client understand new format packets (OK instead of EOF)
+
+ Normally end of statement reply is signaled by OK packet, but in case
+ of binlog dump request an EOF packet is sent instead. Also, old clients
+ expect EOF packet instead of OK
+ */
+ if ((thd->client_capabilities & CLIENT_DEPRECATE_EOF) &&
+ (thd->get_command() != COM_BINLOG_DUMP ))
+ {
+ error= net_send_ok(thd, server_status, statement_warn_count, 0, 0, NULL,
+ true, false);
+ DBUG_RETURN(error);
+ }
+
/* Set to TRUE if no active vio, to work well in case of --init-file */
if (net->vio != 0)
{
@@ -519,18 +572,21 @@ void Protocol::end_statement()
thd->get_stmt_da()->statement_warn_count());
break;
case Diagnostics_area::DA_OK:
+ case Diagnostics_area::DA_OK_BULK:
error= send_ok(thd->server_status,
thd->get_stmt_da()->statement_warn_count(),
thd->get_stmt_da()->affected_rows(),
thd->get_stmt_da()->last_insert_id(),
- thd->get_stmt_da()->message());
+ thd->get_stmt_da()->message(),
+ thd->get_stmt_da()->skip_flush());
break;
case Diagnostics_area::DA_DISABLED:
break;
case Diagnostics_area::DA_EMPTY:
default:
DBUG_ASSERT(0);
- error= send_ok(thd->server_status, 0, 0, 0, NULL);
+ error= send_ok(thd->server_status, 0, 0, 0, NULL,
+ thd->get_stmt_da()->skip_flush());
break;
}
if (!error)
@@ -549,12 +605,12 @@ void Protocol::end_statement()
bool Protocol::send_ok(uint server_status, uint statement_warn_count,
ulonglong affected_rows, ulonglong last_insert_id,
- const char *message)
+ const char *message, bool skip_flush)
{
DBUG_ENTER("Protocol::send_ok");
- const bool retval=
+ const bool retval=
net_send_ok(thd, server_status, statement_warn_count,
- affected_rows, last_insert_id, message);
+ affected_rows, last_insert_id, message, false, skip_flush);
DBUG_RETURN(retval);
}
@@ -568,7 +624,7 @@ bool Protocol::send_ok(uint server_status, uint statement_warn_count,
bool Protocol::send_eof(uint server_status, uint statement_warn_count)
{
DBUG_ENTER("Protocol::send_eof");
- const bool retval= net_send_eof(thd, server_status, statement_warn_count);
+ bool retval= net_send_eof(thd, server_status, statement_warn_count);
DBUG_RETURN(retval);
}
@@ -763,7 +819,11 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
char *pos;
CHARSET_INFO *cs= system_charset_info;
Send_field field;
- item->make_field(&field);
+ item->make_field(thd, &field);
+
+ /* limit number of decimals for float and double */
+ if (field.type == MYSQL_TYPE_FLOAT || field.type == MYSQL_TYPE_DOUBLE)
+ set_if_smaller(field.decimals, FLOATING_POINT_DECIMALS);
/* Keep things compatible for old clients */
if (field.type == MYSQL_TYPE_VARCHAR)
@@ -864,14 +924,19 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
if (flags & SEND_EOF)
{
- /*
- Mark the end of meta-data result set, and store thd->server_status,
- to show that there is no cursor.
- Send no warning information, as it will be sent at statement end.
- */
- if (write_eof_packet(thd, &thd->net, thd->server_status,
- thd->get_stmt_da()->current_statement_warn_count()))
- DBUG_RETURN(1);
+
+ /* if it is new client do not send EOF packet */
+ if (!(thd->client_capabilities & CLIENT_DEPRECATE_EOF))
+ {
+ /*
+ Mark the end of meta-data result set, and store thd->server_status,
+ to show that there is no cursor.
+ Send no warning information, as it will be sent at statement end.
+ */
+ if (write_eof_packet(thd, &thd->net, thd->server_status,
+ thd->get_stmt_da()->current_statement_warn_count()))
+ DBUG_RETURN(1);
+ }
}
DBUG_RETURN(prepare_for_send(list->elements));
@@ -1242,33 +1307,27 @@ bool Protocol_text::send_out_parameters(List<Item_param> *sp_params)
thd->lex->prepared_stmt_params.elements);
List_iterator_fast<Item_param> item_param_it(*sp_params);
- List_iterator_fast<LEX_STRING> user_var_name_it(thd->lex->prepared_stmt_params);
+ List_iterator_fast<Item> param_it(thd->lex->prepared_stmt_params);
while (true)
{
Item_param *item_param= item_param_it++;
- LEX_STRING *user_var_name= user_var_name_it++;
+ Item *param= param_it++;
+ Settable_routine_parameter *sparam;
- if (!item_param || !user_var_name)
+ if (!item_param || !param)
break;
if (!item_param->get_out_param_info())
continue; // It's an IN-parameter.
- Item_func_set_user_var *suv=
- new (thd->mem_root) Item_func_set_user_var(thd, *user_var_name, item_param);
- /*
- Item_func_set_user_var is not fixed after construction, call
- fix_fields().
- */
- if (suv->fix_fields(thd, NULL))
- return TRUE;
-
- if (suv->check(FALSE))
- return TRUE;
+ if (!(sparam= param->get_settable_routine_parameter()))
+ {
+ DBUG_ASSERT(0);
+ continue;
+ }
- if (suv->update())
- return TRUE;
+ sparam->set_value(thd, thd->spcont, reinterpret_cast<Item **>(&item_param));
}
return FALSE;
@@ -1507,6 +1566,7 @@ bool Protocol_binary::store_time(MYSQL_TIME *tm, int decimals)
bool Protocol_binary::send_out_parameters(List<Item_param> *sp_params)
{
+ bool ret;
if (!(thd->client_capabilities & CLIENT_PS_MULTI_RESULTS))
{
/* The client does not support OUT-parameters. */
@@ -1557,17 +1617,14 @@ bool Protocol_binary::send_out_parameters(List<Item_param> *sp_params)
if (write())
return TRUE;
- /* Restore THD::server_status. */
- thd->server_status&= ~SERVER_PS_OUT_PARAMS;
-
- /* Send EOF-packet. */
- net_send_eof(thd, thd->server_status, 0);
+ ret= net_send_eof(thd, thd->server_status, 0);
/*
- Reset SERVER_MORE_RESULTS_EXISTS bit, because this is the last packet
- for sure.
+ Reset server_status:
+ - SERVER_MORE_RESULTS_EXISTS bit, because this is the last packet for sure.
+ - Restore SERVER_PS_OUT_PARAMS status.
*/
- thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
+ thd->server_status&= ~(SERVER_PS_OUT_PARAMS | SERVER_MORE_RESULTS_EXISTS);
- return FALSE;
+ return ret ? FALSE : TRUE;
}
diff --git a/sql/protocol.h b/sql/protocol.h
index 2edad5663e8..1a6cb3bdc3c 100644
--- a/sql/protocol.h
+++ b/sql/protocol.h
@@ -62,7 +62,7 @@ protected:
virtual bool send_ok(uint server_status, uint statement_warn_count,
ulonglong affected_rows, ulonglong last_insert_id,
- const char *message);
+ const char *message, bool skip_flush);
virtual bool send_eof(uint server_status, uint statement_warn_count);
diff --git a/sql/records.cc b/sql/records.cc
index d98c6939e04..7d36d52228b 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -30,17 +30,17 @@
#include "records.h"
#include "sql_priv.h"
#include "records.h"
-#include "filesort.h" // filesort_free_buffers
#include "opt_range.h" // SQL_SELECT
#include "sql_class.h" // THD
#include "sql_base.h"
+#include "sql_sort.h" // SORT_ADDON_FIELD
static int rr_quick(READ_RECORD *info);
int rr_sequential(READ_RECORD *info);
static int rr_from_tempfile(READ_RECORD *info);
static int rr_unpack_from_tempfile(READ_RECORD *info);
static int rr_unpack_from_buffer(READ_RECORD *info);
-static int rr_from_pointers(READ_RECORD *info);
+int rr_from_pointers(READ_RECORD *info);
static int rr_from_cache(READ_RECORD *info);
static int init_rr_cache(THD *thd, READ_RECORD *info);
static int rr_cmp(uchar *a,uchar *b);
@@ -183,26 +183,30 @@ bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
SQL_SELECT *select,
+ SORT_INFO *filesort,
int use_record_cache, bool print_error,
bool disable_rr_cache)
{
IO_CACHE *tempfile;
+ SORT_ADDON_FIELD *addon_field= filesort ? filesort->addon_field : 0;
DBUG_ENTER("init_read_record");
bzero((char*) info,sizeof(*info));
info->thd=thd;
info->table=table;
info->forms= &info->table; /* Only one table */
+ info->addon_field= addon_field;
if ((table->s->tmp_table == INTERNAL_TMP_TABLE ||
table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) &&
- !table->sort.addon_field)
+ !addon_field)
(void) table->file->extra(HA_EXTRA_MMAP);
- if (table->sort.addon_field)
+ if (addon_field)
{
- info->rec_buf= table->sort.addon_buf;
- info->ref_length= table->sort.addon_length;
+ info->rec_buf= (uchar*) filesort->addon_buf.str;
+ info->ref_length= filesort->addon_buf.length;
+ info->unpack= filesort->unpack;
}
else
{
@@ -214,19 +218,20 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
info->print_error=print_error;
info->unlock_row= rr_unlock_row;
info->ignore_not_found_rows= 0;
- table->status=0; /* And it's always found */
+ table->status= 0; /* Rows are always found */
+ tempfile= 0;
if (select && my_b_inited(&select->file))
tempfile= &select->file;
- else
- tempfile= table->sort.io_cache;
- if (tempfile && my_b_inited(tempfile) &&
- !(select && select->quick))
+ else if (filesort && my_b_inited(&filesort->io_cache))
+ tempfile= &filesort->io_cache;
+
+ if (tempfile && !(select && select->quick))
{
DBUG_PRINT("info",("using rr_from_tempfile"));
- info->read_record= (table->sort.addon_field ?
+ info->read_record= (addon_field ?
rr_unpack_from_tempfile : rr_from_tempfile);
- info->io_cache=tempfile;
+ info->io_cache= tempfile;
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
info->ref_pos=table->file->ref;
if (!table->file->inited)
@@ -234,12 +239,12 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
DBUG_RETURN(1);
/*
- table->sort.addon_field is checked because if we use addon fields,
+ addon_field is checked because if we use addon fields,
it doesn't make sense to use cache - we don't read from the table
- and table->sort.io_cache is read sequentially
+ and filesort->io_cache is read sequentially
*/
if (!disable_rr_cache &&
- !table->sort.addon_field &&
+ !addon_field &&
thd->variables.read_rnd_buff_size &&
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
(table->db_stat & HA_READ_ONLY ||
@@ -264,17 +269,29 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
DBUG_PRINT("info",("using rr_quick"));
info->read_record=rr_quick;
}
- else if (table->sort.record_pointers)
+ else if (filesort && filesort->record_pointers)
{
DBUG_PRINT("info",("using record_pointers"));
if (table->file->ha_rnd_init_with_error(0))
DBUG_RETURN(1);
- info->cache_pos=table->sort.record_pointers;
- info->cache_end=info->cache_pos+
- table->sort.found_records*info->ref_length;
- info->read_record= (table->sort.addon_field ?
+ info->cache_pos= filesort->record_pointers;
+ info->cache_end= (info->cache_pos+
+ filesort->return_rows * info->ref_length);
+ info->read_record= (addon_field ?
rr_unpack_from_buffer : rr_from_pointers);
}
+ else if (table->file->keyread_enabled())
+ {
+ int error;
+ info->read_record= rr_index_first;
+ if (!table->file->inited &&
+ (error= table->file->ha_index_init(table->file->keyread, 1)))
+ {
+ if (print_error)
+ table->file->print_error(error, MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
else
{
DBUG_PRINT("info",("using rr_sequential"));
@@ -289,7 +306,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
(use_record_cache < 0 &&
!(table->file->ha_table_flags() & HA_NOT_DELETE_WITH_CACHE))))
(void) table->file->extra_opt(HA_EXTRA_CACHE,
- thd->variables.read_buff_size);
+ thd->variables.read_buff_size);
}
/* Condition pushdown to storage engine */
if ((table->file->ha_table_flags() & HA_CAN_TABLE_CONDITION_PUSHDOWN) &&
@@ -312,8 +329,7 @@ void end_read_record(READ_RECORD *info)
}
if (info->table)
{
- filesort_free_buffers(info->table,0);
- if (info->table->created)
+ if (info->table->is_created())
(void) info->table->file->extra(HA_EXTRA_NO_CACHE);
if (info->read_record != rr_quick) // otherwise quick_range does it
(void) info->table->file->ha_index_or_rnd_end();
@@ -355,8 +371,6 @@ static int rr_quick(READ_RECORD *info)
break;
}
}
- if (info->table->vfield)
- update_virtual_fields(info->thd, info->table);
return tmp;
}
@@ -480,8 +494,6 @@ int rr_sequential(READ_RECORD *info)
break;
}
}
- if (!tmp && info->table->vfield)
- update_virtual_fields(info->thd, info->table);
return tmp;
}
@@ -526,14 +538,13 @@ static int rr_unpack_from_tempfile(READ_RECORD *info)
{
if (my_b_read(info->io_cache, info->rec_buf, info->ref_length))
return -1;
- TABLE *table= info->table;
- (*table->sort.unpack)(table->sort.addon_field, info->rec_buf,
- info->rec_buf + info->ref_length);
+ (*info->unpack)(info->addon_field, info->rec_buf,
+ info->rec_buf + info->ref_length);
return 0;
}
-static int rr_from_pointers(READ_RECORD *info)
+int rr_from_pointers(READ_RECORD *info)
{
int tmp;
uchar *cache_pos;
@@ -578,11 +589,9 @@ static int rr_unpack_from_buffer(READ_RECORD *info)
{
if (info->cache_pos == info->cache_end)
return -1; /* End of buffer */
- TABLE *table= info->table;
- (*table->sort.unpack)(table->sort.addon_field, info->cache_pos,
- info->cache_end);
+ (*info->unpack)(info->addon_field, info->cache_pos,
+ info->cache_end);
info->cache_pos+= info->ref_length;
-
return 0;
}
/* cacheing of records from a database */
@@ -622,7 +631,7 @@ static int init_rr_cache(THD *thd, READ_RECORD *info)
static int rr_from_cache(READ_RECORD *info)
{
- reg1 uint i;
+ uint i;
ulong length;
my_off_t rest_of_file;
int16 error;
diff --git a/sql/records.h b/sql/records.h
index b55fbc22f55..473cb610be5 100644
--- a/sql/records.h
+++ b/sql/records.h
@@ -25,6 +25,11 @@ struct TABLE;
class THD;
class SQL_SELECT;
class Copy_field;
+class SORT_INFO;
+
+struct READ_RECORD;
+
+void end_read_record(READ_RECORD *info);
/**
A context for reading through a single table using a chosen access method:
@@ -60,8 +65,10 @@ struct READ_RECORD
uchar *record;
uchar *rec_buf; /* to read field values after filesort */
uchar *cache,*cache_pos,*cache_end,*read_positions;
+ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
struct st_io_cache *io_cache;
bool print_error, ignore_not_found_rows;
+ void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
/*
SJ-Materialization runtime may need to read fields from the materialized
@@ -69,14 +76,17 @@ struct READ_RECORD
*/
Copy_field *copy_field;
Copy_field *copy_field_end;
+public:
+ READ_RECORD() : table(NULL), cache(NULL) {}
+ ~READ_RECORD() { end_read_record(this); }
};
bool init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
- SQL_SELECT *select, int use_record_cache,
+ SQL_SELECT *select, SORT_INFO *sort,
+ int use_record_cache,
bool print_errors, bool disable_rr_cache);
bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
bool print_error, uint idx, bool reverse);
-void end_read_record(READ_RECORD *info);
void rr_unlock_row(st_join_table *tab);
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index b82e7bada45..0ab4c62235f 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -239,7 +239,7 @@ Rpl_filter::db_ok_with_wild_table(const char *db)
int len;
end= strmov(hash_key, db);
*end++= '.';
- len= end - hash_key ;
+ len= (int)(end - hash_key);
if (wild_do_table_inited && find_wild(&wild_do_table, hash_key, len))
{
DBUG_PRINT("return",("1"));
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index 2d4acde03fc..f700f7411ca 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -415,6 +415,8 @@ rpl_slave_state::truncate_state_table(THD *thd)
NULL, TL_WRITE);
if (!(err= open_and_lock_tables(thd, &tlist, FALSE, 0)))
{
+ tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, "mysql",
+ rpl_gtid_slave_state_table_name.str, false);
err= tlist.table->file->ha_truncate();
if (err)
@@ -1033,8 +1035,8 @@ gtid_parser_helper(char **ptr, char *end, rpl_gtid *out_gtid)
if (err != 0)
return 1;
- out_gtid->domain_id= v1;
- out_gtid->server_id= v2;
+ out_gtid->domain_id= (uint32) v1;
+ out_gtid->server_id= (uint32) v2;
out_gtid->seq_no= v3;
*ptr= q;
return 0;
@@ -1145,7 +1147,7 @@ rpl_slave_state::is_empty()
}
-rpl_binlog_state::rpl_binlog_state()
+void rpl_binlog_state::init()
{
my_hash_init(&hash, &my_charset_bin, 32, offsetof(element, domain_id),
sizeof(uint32), NULL, my_free, HASH_UNIQUE);
@@ -1155,7 +1157,6 @@ rpl_binlog_state::rpl_binlog_state()
initialized= 1;
}
-
void
rpl_binlog_state::reset_nolock()
{
@@ -1958,10 +1959,7 @@ slave_connection_state::load(char *slave_request, size_t len)
for (;;)
{
if (!(rec= (uchar *)my_malloc(sizeof(entry), MYF(MY_WME))))
- {
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(*gtid));
return 1;
- }
gtid= &((entry *)rec)->gtid;
if (gtid_parser_helper(&p, end, gtid))
{
@@ -2578,10 +2576,7 @@ gtid_waiting::get_entry(uint32 domain_id)
return e;
if (!(e= (hash_element *)my_malloc(sizeof(*e), MYF(MY_WME))))
- {
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(*e));
return NULL;
- }
if (init_queue(&e->queue, 8, offsetof(queue_element, wait_seq_no), 0,
cmp_queue_elem, NULL, 1+offsetof(queue_element, queue_idx), 1))
diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h
index 7bd639b768f..35744bc35b1 100644
--- a/sql/rpl_gtid.h
+++ b/sql/rpl_gtid.h
@@ -239,9 +239,10 @@ struct rpl_binlog_state
/* Auxiliary buffer to sort gtid list. */
DYNAMIC_ARRAY gtid_sort_array;
- rpl_binlog_state();
+ rpl_binlog_state() :initialized(0) {}
~rpl_binlog_state();
+ void init();
void reset_nolock();
void reset();
void free();
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index ab54f0bfbb7..3b0de3deb1c 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -18,7 +18,7 @@
#include "sql_priv.h"
#include <my_dir.h>
#include "rpl_mi.h"
-#include "slave.h" // SLAVE_MAX_HEARTBEAT_PERIOD
+#include "slave.h"
#include "strfunc.h"
#include "sql_repl.h"
@@ -671,7 +671,7 @@ file '%s')", fname);
(ulong) mi->master_log_pos));
mi->rli.mi= mi;
- if (init_relay_log_info(&mi->rli, slave_info_fname))
+ if (mi->rli.init(slave_info_fname))
goto err;
mi->inited = 1;
@@ -1116,7 +1116,7 @@ bool Master_info_index::init_all_master_info()
DBUG_RETURN(1);
}
- thd= new THD; /* Needed by start_slave_threads */
+ thd= new THD(next_thread_id()); /* Needed by start_slave_threads */
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -1239,7 +1239,7 @@ bool Master_info_index::init_all_master_info()
if (succ_num) // Have some Error and some Success
{
sql_print_warning("Reading of some Master_info entries failed");
- DBUG_RETURN(2);
+ DBUG_RETURN(1);
}
sql_print_error("Reading of all Master_info entries failed!");
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 8fef2d66635..6340e4d7cc6 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -229,6 +229,14 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
entry->stop_on_error_sub_id= sub_id;
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
+ DBUG_EXECUTE_IF("rpl_parallel_simulate_wait_at_retry", {
+ if (rgi->current_gtid.seq_no == 1000) {
+ DBUG_ASSERT(entry->stop_on_error_sub_id == sub_id);
+ debug_sync_set_action(thd,
+ STRING_WITH_LEN("now WAIT_FOR proceed_by_1000"));
+ }
+ });
+
if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING)
wait_for_pending_deadlock_kill(thd, rgi);
thd->clear_error();
@@ -326,6 +334,7 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco,
&stage_waiting_for_prior_transaction_to_start_commit,
old_stage);
*did_enter_cond= true;
+ thd->set_time_for_next_stage();
do
{
if (thd->check_killed() && !rgi->worker_error)
@@ -388,6 +397,7 @@ do_ftwrl_wait(rpl_group_info *rgi,
thd->ENTER_COND(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry,
&stage_waiting_for_ftwrl, old_stage);
*did_enter_cond= true;
+ thd->set_time_for_next_stage();
do
{
if (entry->force_abort || rgi->worker_error)
@@ -436,8 +446,11 @@ pool_mark_busy(rpl_parallel_thread_pool *pool, THD *thd)
*/
mysql_mutex_lock(&pool->LOCK_rpl_thread_pool);
if (thd)
+ {
thd->ENTER_COND(&pool->COND_rpl_thread_pool, &pool->LOCK_rpl_thread_pool,
&stage_waiting_for_rpl_thread_pool, &old_stage);
+ thd->set_time_for_next_stage();
+ }
while (pool->busy)
{
if (thd && thd->check_killed())
@@ -553,6 +566,7 @@ rpl_pause_for_ftwrl(THD *thd)
e->pause_sub_id= e->largest_started_sub_id;
thd->ENTER_COND(&e->COND_parallel_entry, &e->LOCK_parallel_entry,
&stage_waiting_for_ftwrl_threads_to_pause, &old_stage);
+ thd->set_time_for_next_stage();
while (e->pause_sub_id < (uint64)ULONGLONG_MAX &&
e->last_committed_sub_id < e->pause_sub_id &&
!err)
@@ -639,7 +653,7 @@ is_group_ending(Log_event *ev, Log_event_type event_type)
{
if (event_type == XID_EVENT)
return 1;
- if (event_type == QUERY_EVENT)
+ if (event_type == QUERY_EVENT) // COMMIT/ROLLBACK are never compressed
{
Query_log_event *qev = (Query_log_event *)ev;
if (qev->is_commit())
@@ -710,12 +724,20 @@ do_retry:
unregistering (and later re-registering) the wait.
*/
if(thd->wait_for_commit_ptr)
- thd->wait_for_commit_ptr->unregister_wait_for_prior_commit();
+ thd->wait_for_commit_ptr->unregister_wait_for_prior_commit();
DBUG_EXECUTE_IF("inject_mdev8031", {
/* Simulate that we get deadlock killed at this exact point. */
rgi->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED;
thd->set_killed(KILL_CONNECTION);
});
+ DBUG_EXECUTE_IF("rpl_parallel_simulate_wait_at_retry", {
+ if (rgi->current_gtid.seq_no == 1001) {
+ debug_sync_set_action(thd,
+ STRING_WITH_LEN("rpl_parallel_simulate_wait_at_retry WAIT_FOR proceed_by_1001"));
+ }
+ DEBUG_SYNC(thd, "rpl_parallel_simulate_wait_at_retry");
+ });
+
rgi->cleanup_context(thd, 1);
wait_for_pending_deadlock_kill(thd, rgi);
thd->reset_killed();
@@ -739,7 +761,26 @@ do_retry:
for (;;)
{
mysql_mutex_lock(&entry->LOCK_parallel_entry);
- register_wait_for_prior_event_group_commit(rgi, entry);
+ if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX ||
+#ifndef DBUG_OFF
+ (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) ||
+#endif
+ rgi->gtid_sub_id < entry->stop_on_error_sub_id)
+ {
+ register_wait_for_prior_event_group_commit(rgi, entry);
+ }
+ else
+ {
+ /*
+ A failure of a preceeding "parent" transaction may not be
+ seen by the current one through its own worker_error.
+ Such induced error gets set by ourselves now.
+ */
+ err= rgi->worker_error= 1;
+ my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
+ mysql_mutex_unlock(&entry->LOCK_parallel_entry);
+ goto err;
+ }
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
/*
@@ -983,12 +1024,9 @@ handle_rpl_parallel_thread(void *arg)
struct rpl_parallel_thread *rpt= (struct rpl_parallel_thread *)arg;
my_thread_init();
- thd = new THD;
+ thd = new THD(next_thread_id());
thd->thread_stack = (char*)&thd;
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ add_to_active_threads(thd);
set_current_thd(thd);
pthread_detach_this_thread();
thd->init_for_queries();
@@ -1005,7 +1043,6 @@ handle_rpl_parallel_thread(void *arg)
thd->client_capabilities = CLIENT_LOCAL_FILES;
thd->net.reading_or_writing= 0;
thd_proc_info(thd, "Waiting for work from main SQL threads");
- thd->set_time();
thd->variables.lock_wait_timeout= LONG_TIMEOUT;
thd->system_thread_info.rpl_sql_info= &sql_info;
/*
@@ -1015,7 +1052,6 @@ handle_rpl_parallel_thread(void *arg)
*/
thd->variables.tx_isolation= ISO_REPEATABLE_READ;
-
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
rpt->thd= thd;
@@ -1025,8 +1061,10 @@ handle_rpl_parallel_thread(void *arg)
rpt->running= true;
mysql_cond_signal(&rpt->COND_rpl_thread);
+ thd->set_command(COM_SLAVE_WORKER);
while (!rpt->stop)
{
+ uint wait_count= 0;
rpl_parallel_thread::queued_event *qev, *next_qev;
thd->ENTER_COND(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread,
@@ -1045,7 +1083,11 @@ handle_rpl_parallel_thread(void *arg)
(rpt->current_owner && !in_event_group) ||
(rpt->current_owner && group_rgi->parallel_entry->force_abort) ||
rpt->stop))
+ {
+ if (!wait_count++)
+ thd->set_time_for_next_stage();
mysql_cond_wait(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread);
+ }
rpt->dequeue1(events);
thd->EXIT_COND(&old_stage);
@@ -1391,9 +1433,8 @@ handle_rpl_parallel_thread(void *arg)
thd_proc_info(thd, "Slave worker thread exiting");
thd->temporary_tables= 0;
- mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
- mysql_mutex_unlock(&LOCK_thread_count);
+ THD_CHECK_SENTRY(thd);
+ unlink_not_visible_thd(thd);
delete thd;
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
@@ -1710,7 +1751,7 @@ rpl_parallel_thread::get_qev_common(Log_event *ev, ulonglong event_size)
}
qev->typ= rpl_parallel_thread::queued_event::QUEUED_EVENT;
qev->ev= ev;
- qev->event_size= event_size;
+ qev->event_size= (size_t)event_size;
qev->next= NULL;
return qev;
}
@@ -2491,8 +2532,17 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
!(unlikely(rli->gtid_skip_flag != GTID_SKIP_NOT) && is_group_event))
return -1;
- /* ToDo: what to do with this lock?!? */
- mysql_mutex_unlock(&rli->data_lock);
+ /* Note: rli->data_lock is released by sql_delay_event(). */
+ if (sql_delay_event(ev, rli->sql_driver_thd, serial_rgi))
+ {
+ /*
+ If sql_delay_event() returns non-zero, it means that the wait timed out
+ due to slave stop. We should not queue the event in this case, it must
+ not be applied yet.
+ */
+ delete ev;
+ return 1;
+ }
if (unlikely(typ == FORMAT_DESCRIPTION_EVENT))
{
@@ -2569,7 +2619,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
{
DBUG_ASSERT(rli->gtid_skip_flag == GTID_SKIP_TRANSACTION);
if (typ == XID_EVENT ||
- (typ == QUERY_EVENT &&
+ (typ == QUERY_EVENT && // COMMIT/ROLLBACK are never compressed
(((Query_log_event *)ev)->is_commit() ||
((Query_log_event *)ev)->is_rollback())))
rli->gtid_skip_flag= GTID_SKIP_NOT;
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index 4f12169fbf3..dd4c952fa30 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -80,7 +80,7 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
unsigned int null_mask= 1U;
for ( ; (field= *p_field) ; p_field++)
{
- if (bitmap_is_set(cols, p_field - table->field))
+ if (bitmap_is_set(cols, (uint)(p_field - table->field)))
{
my_ptrdiff_t offset;
if (field->is_null(rec_offset))
@@ -105,10 +105,10 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
#endif
pack_ptr= field->pack(pack_ptr, field->ptr + offset,
field->max_data_length());
- DBUG_PRINT("debug", ("field: %s; real_type: %d, pack_ptr: 0x%lx;"
- " pack_ptr':0x%lx; bytes: %d",
+ DBUG_PRINT("debug", ("field: %s; real_type: %d, pack_ptr: %p;"
+ " pack_ptr':%p; bytes: %d",
field->field_name, field->real_type(),
- (ulong) old_pack_ptr, (ulong) pack_ptr,
+ old_pack_ptr,pack_ptr,
(int) (pack_ptr - old_pack_ptr)));
DBUG_DUMP("packed_data", old_pack_ptr, pack_ptr - old_pack_ptr);
}
@@ -262,7 +262,7 @@ unpack_row(rpl_group_info *rgi,
No need to bother about columns that does not exist: they have
gotten default values when being emptied above.
*/
- if (bitmap_is_set(cols, field_ptr - begin_ptr))
+ if (bitmap_is_set(cols, (uint)(field_ptr - begin_ptr)))
{
if ((null_mask & 0xFF) == 0)
{
@@ -322,9 +322,9 @@ unpack_row(rpl_group_info *rgi,
pack_ptr= f->unpack(f->ptr, pack_ptr, row_end, metadata);
DBUG_PRINT("debug", ("field: %s; metadata: 0x%x;"
- " pack_ptr: 0x%lx; pack_ptr': 0x%lx; bytes: %d",
+ " pack_ptr: %p; pack_ptr': %p; bytes: %d",
f->field_name, metadata,
- (ulong) old_pack_ptr, (ulong) pack_ptr,
+ old_pack_ptr, pack_ptr,
(int) (pack_ptr - old_pack_ptr)));
if (!pack_ptr)
{
@@ -336,11 +336,11 @@ unpack_row(rpl_group_info *rgi,
Galera Node throws "Could not read field" error and drops out of cluster
*/
WSREP_WARN("ROW event unpack field: %s metadata: 0x%x;"
- " pack_ptr: 0x%lx; conv_table %p conv_field %p table %s"
- " row_end: 0x%lx",
+ " pack_ptr: %p; conv_table %p conv_field %p table %s"
+ " row_end: %p",
f->field_name, metadata,
- (ulong) old_pack_ptr, conv_table, conv_field,
- (table_found) ? "found" : "not found", (ulong)row_end
+ old_pack_ptr, conv_table, conv_field,
+ (table_found) ? "found" : "not found", row_end
);
}
@@ -434,7 +434,7 @@ unpack_row(rpl_group_info *rgi,
if (master_reclength)
{
if (*field_ptr)
- *master_reclength = (*field_ptr)->ptr - table->record[0];
+ *master_reclength = (ulong)((*field_ptr)->ptr - table->record[0]);
else
*master_reclength = table->s->reclength;
}
@@ -511,11 +511,11 @@ int fill_extra_persistent_columns(TABLE *table, int master_cols)
for (vfield_ptr= table->vfield; *vfield_ptr; ++vfield_ptr)
{
vfield= *vfield_ptr;
- if (vfield->field_index >= master_cols && vfield->stored_in_db)
+ if (vfield->field_index >= master_cols && vfield->stored_in_db())
{
/*Set bitmap for writing*/
bitmap_set_bit(table->vcol_set, vfield->field_index);
- error= vfield->vcol_info->expr_item->save_in_field(vfield,0);
+ error= vfield->vcol_info->expr->save_in_field(vfield,0);
bitmap_clear_bit(table->vcol_set, vfield->field_index);
}
}
diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc
index 5b876373b9c..8e21c4a94a5 100644
--- a/sql/rpl_record_old.cc
+++ b/sql/rpl_record_old.cc
@@ -134,7 +134,7 @@ unpack_row_old(rpl_group_info *rgi,
{
Field *const f= *field_ptr;
- if (bitmap_is_set(cols, field_ptr - begin_ptr))
+ if (bitmap_is_set(cols, (uint)(field_ptr - begin_ptr)))
{
f->move_field_offset(offset);
ptr= f->unpack(f->ptr, ptr, row_buffer_end, 0);
@@ -149,14 +149,14 @@ unpack_row_old(rpl_group_info *rgi,
}
}
else
- bitmap_clear_bit(rw_set, field_ptr - begin_ptr);
+ bitmap_clear_bit(rw_set, (uint)(field_ptr - begin_ptr));
}
*row_end = ptr;
if (master_reclength)
{
if (*field_ptr)
- *master_reclength = (*field_ptr)->ptr - table->record[0];
+ *master_reclength = (ulong)((*field_ptr)->ptr - table->record[0]);
else
*master_reclength = table->s->reclength;
}
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index b35130c1505..26b93da3917 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2006, 2017, Oracle and/or its affiliates.
- Copyright (c) 2011, 2017, MariaDB Corporation
+ Copyright (c) 2010, 2017, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -28,6 +28,7 @@
#include "rpl_utility.h"
#include "transaction.h"
#include "sql_parse.h" // end_trans, ROLLBACK
+#include "slave.h"
#include <mysql/plugin.h>
#include <mysql/service_thd_wait.h>
@@ -41,35 +42,30 @@ rpl_slave_state *rpl_global_gtid_slave_state;
/* Object used for MASTER_GTID_WAIT(). */
gtid_waiting rpl_global_gtid_waiting;
-
-// Defined in slave.cc
-int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
-int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
- const char *default_val);
+const char *const Relay_log_info::state_delaying_string = "Waiting until MASTER_DELAY seconds after master executed event";
Relay_log_info::Relay_log_info(bool is_slave_recovery)
:Slave_reporting_capability("SQL"),
- no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id),
+ replicate_same_server_id(::replicate_same_server_id),
info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period),
sync_counter(0), is_relay_log_recovery(is_slave_recovery),
- save_temporary_tables(0), mi(0),
- inuse_relaylog_list(0), last_inuse_relaylog(0),
+ save_temporary_tables(0),
+ mi(0), inuse_relaylog_list(0), last_inuse_relaylog(0),
cur_log_old_open_count(0), error_on_rli_init_info(false),
group_relay_log_pos(0), event_relay_log_pos(0),
-#if HAVE_valgrind
- is_fake(FALSE),
-#endif
group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0),
last_master_timestamp(0), sql_thread_caught_up(true), slave_skip_counter(0),
abort_pos_wait(0), slave_run_id(0), sql_driver_thd(),
gtid_skip_flag(GTID_SKIP_NOT), inited(0), abort_slave(0), stop_for_until(0),
slave_running(MYSQL_SLAVE_NOT_RUN), until_condition(UNTIL_NONE),
until_log_pos(0), retried_trans(0), executed_entries(0),
+ sql_delay(0), sql_delay_end(0),
m_flags(0)
{
DBUG_ENTER("Relay_log_info::Relay_log_info");
relay_log.is_relay_log= TRUE;
+ relay_log_state.init();
#ifdef HAVE_PSI_INTERFACE
relay_log.set_psi_keys(key_RELAYLOG_LOCK_index,
key_RELAYLOG_update_cond,
@@ -115,44 +111,48 @@ Relay_log_info::~Relay_log_info()
}
-int init_relay_log_info(Relay_log_info* rli,
- const char* info_fname)
+/**
+ Read the relay_log.info file.
+
+ @param info_fname The name of the file to read from.
+ @retval 0 success
+ @retval 1 failure
+*/
+int Relay_log_info::init(const char* info_fname)
{
char fname[FN_REFLEN+128];
- int info_fd= -1;
const char* msg = 0;
int error = 0;
mysql_mutex_t *log_lock;
- DBUG_ENTER("init_relay_log_info");
- DBUG_ASSERT(!rli->no_storage); // Don't init if there is no storage
+ DBUG_ENTER("Relay_log_info::init");
- if (rli->inited) // Set if this function called
+ if (inited) // Set if this function called
DBUG_RETURN(0);
- log_lock= rli->relay_log.get_log_lock();
+ log_lock= relay_log.get_log_lock();
fn_format(fname, info_fname, mysql_data_home, "", 4+32);
- mysql_mutex_lock(&rli->data_lock);
- if (rli->error_on_rli_init_info)
+ mysql_mutex_lock(&data_lock);
+ cur_log_fd = -1;
+ slave_skip_counter=0;
+ abort_pos_wait=0;
+ log_space_limit= relay_log_space_limit;
+ log_space_total= 0;
+
+ if (error_on_rli_init_info)
goto err;
- info_fd = rli->info_fd;
- rli->cur_log_fd = -1;
- rli->slave_skip_counter=0;
- rli->abort_pos_wait=0;
- rli->log_space_limit= relay_log_space_limit;
- rli->log_space_total= 0;
char pattern[FN_REFLEN];
(void) my_realpath(pattern, slave_load_tmpdir, 0);
if (fn_format(pattern, PREFIX_SQL_LOAD, pattern, "",
MY_SAFE_PATH | MY_RETURN_REAL_PATH) == NullS)
{
- mysql_mutex_unlock(&rli->data_lock);
+ mysql_mutex_unlock(&data_lock);
sql_print_error("Unable to use slave's temporary directory %s",
slave_load_tmpdir);
DBUG_RETURN(1);
}
- unpack_filename(rli->slave_patternload_file, pattern);
- rli->slave_patternload_file_size= strlen(rli->slave_patternload_file);
+ unpack_filename(slave_patternload_file, pattern);
+ slave_patternload_file_size= strlen(slave_patternload_file);
/*
The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE.
@@ -166,7 +166,7 @@ int init_relay_log_info(Relay_log_info* rli,
if (opt_relay_logname &&
opt_relay_logname[strlen(opt_relay_logname) - 1] == FN_LIBCHAR)
{
- mysql_mutex_unlock(&rli->data_lock);
+ mysql_mutex_unlock(&data_lock);
sql_print_error("Path '%s' is a directory name, please specify \
a file name for --relay-log option", opt_relay_logname);
DBUG_RETURN(1);
@@ -178,7 +178,7 @@ a file name for --relay-log option", opt_relay_logname);
opt_relaylog_index_name[strlen(opt_relaylog_index_name) - 1]
== FN_LIBCHAR)
{
- mysql_mutex_unlock(&rli->data_lock);
+ mysql_mutex_unlock(&data_lock);
sql_print_error("Path '%s' is a directory name, please specify \
a file name for --relay-log-index option", opt_relaylog_index_name);
DBUG_RETURN(1);
@@ -187,7 +187,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
char buf[FN_REFLEN];
const char *ln;
static bool name_warning_sent= 0;
- ln= rli->relay_log.generate_name(opt_relay_logname, "-relay-bin",
+ ln= relay_log.generate_name(opt_relay_logname, "-relay-bin",
1, buf);
/* We send the warning only at startup, not after every RESET SLAVE */
if (!opt_relay_logname && !opt_relaylog_index_name && !name_warning_sent &&
@@ -210,7 +210,6 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
}
/* For multimaster, add connection name to relay log filenames */
- Master_info* mi= rli->mi;
char buf_relay_logname[FN_REFLEN], buf_relaylog_index_name_buff[FN_REFLEN];
char *buf_relaylog_index_name= opt_relaylog_index_name;
@@ -233,13 +232,13 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
but a destructor will take care of that
*/
mysql_mutex_lock(log_lock);
- if (rli->relay_log.open_index_file(buf_relaylog_index_name, ln, TRUE) ||
- rli->relay_log.open(ln, LOG_BIN, 0, 0, SEQ_READ_APPEND,
- mi->rli.max_relay_log_size, 1, TRUE))
+ if (relay_log.open_index_file(buf_relaylog_index_name, ln, TRUE) ||
+ relay_log.open(ln, LOG_BIN, 0, 0, SEQ_READ_APPEND,
+ (ulong)max_relay_log_size, 1, TRUE))
{
mysql_mutex_unlock(log_lock);
- mysql_mutex_unlock(&rli->data_lock);
- sql_print_error("Failed when trying to open logs for '%s' in init_relay_log_info(). Error: %M", ln, my_errno);
+ mysql_mutex_unlock(&data_lock);
+ sql_print_error("Failed when trying to open logs for '%s' in Relay_log_info::init(). Error: %M", ln, my_errno);
DBUG_RETURN(1);
}
mysql_mutex_unlock(log_lock);
@@ -262,7 +261,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
msg= current_thd->get_stmt_da()->message();
goto err;
}
- if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
+ if (init_io_cache(&info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
MYF(MY_WME)))
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
@@ -272,20 +271,19 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
}
/* Init relay log with first entry in the relay index file */
- if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */,
+ if (init_relay_log_pos(this,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */,
&msg, 0))
{
sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)");
goto err;
}
- rli->group_master_log_name[0]= 0;
- rli->group_master_log_pos= 0;
- rli->info_fd= info_fd;
+ group_master_log_name[0]= 0;
+ group_master_log_pos= 0;
}
else // file exists
{
if (info_fd >= 0)
- reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0);
+ reinit_io_cache(&info_file, READ_CACHE, 0L,0,0);
else
{
int error=0;
@@ -297,7 +295,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
fname, my_errno);
error= 1;
}
- else if (init_io_cache(&rli->info_file, info_fd,
+ else if (init_io_cache(&info_file, info_fd,
IO_SIZE*2, READ_CACHE, 0L, 0, MYF(MY_WME)))
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
@@ -308,26 +306,17 @@ Failed to open the existing relay log info file '%s' (errno %d)",
{
if (info_fd >= 0)
mysql_file_close(info_fd, MYF(0));
- rli->info_fd= -1;
+ info_fd= -1;
mysql_mutex_lock(log_lock);
- rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
+ relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
mysql_mutex_unlock(log_lock);
- mysql_mutex_unlock(&rli->data_lock);
+ mysql_mutex_unlock(&data_lock);
DBUG_RETURN(1);
}
}
- rli->info_fd = info_fd;
int relay_log_pos, master_log_pos, lines;
char *first_non_digit;
- /*
- In MySQL 5.6, there is a MASTER_DELAY option to CHANGE MASTER. This is
- not yet merged into MariaDB (as of 10.0.13). However, we detect the
- presense of the new option in relay-log.info, as a placeholder for
- possible later merge of the feature, and to maintain file format
- compatibility with MySQL 5.6+.
- */
- int dummy_sql_delay;
/*
Starting from MySQL 5.6.x, relay-log.info has a new format.
@@ -352,25 +341,25 @@ Failed to open the existing relay log info file '%s' (errno %d)",
it is line count and not binlog name (new format) it will be
overwritten by the second row later.
*/
- if (init_strvar_from_file(rli->group_relay_log_name,
- sizeof(rli->group_relay_log_name),
- &rli->info_file, ""))
+ if (init_strvar_from_file(group_relay_log_name,
+ sizeof(group_relay_log_name),
+ &info_file, ""))
{
msg="Error reading slave log configuration";
goto err;
}
- lines= strtoul(rli->group_relay_log_name, &first_non_digit, 10);
+ lines= strtoul(group_relay_log_name, &first_non_digit, 10);
- if (rli->group_relay_log_name[0] != '\0' &&
+ if (group_relay_log_name[0] != '\0' &&
*first_non_digit == '\0' &&
lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY)
{
DBUG_PRINT("info", ("relay_log_info file is in new format."));
/* Seems to be new format => read relay log name from next line */
- if (init_strvar_from_file(rli->group_relay_log_name,
- sizeof(rli->group_relay_log_name),
- &rli->info_file, ""))
+ if (init_strvar_from_file(group_relay_log_name,
+ sizeof(group_relay_log_name),
+ &info_file, ""))
{
msg="Error reading slave log configuration";
goto err;
@@ -380,77 +369,75 @@ Failed to open the existing relay log info file '%s' (errno %d)",
DBUG_PRINT("info", ("relay_log_info file is in old format."));
if (init_intvar_from_file(&relay_log_pos,
- &rli->info_file, BIN_LOG_HEADER_SIZE) ||
- init_strvar_from_file(rli->group_master_log_name,
- sizeof(rli->group_master_log_name),
- &rli->info_file, "") ||
- init_intvar_from_file(&master_log_pos, &rli->info_file, 0) ||
+ &info_file, BIN_LOG_HEADER_SIZE) ||
+ init_strvar_from_file(group_master_log_name,
+ sizeof(group_master_log_name),
+ &info_file, "") ||
+ init_intvar_from_file(&master_log_pos, &info_file, 0) ||
(lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY &&
- init_intvar_from_file(&dummy_sql_delay, &rli->info_file, 0)))
+ init_intvar_from_file(&sql_delay, &info_file, 0)))
{
msg="Error reading slave log configuration";
goto err;
}
- strmake_buf(rli->event_relay_log_name,rli->group_relay_log_name);
- rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
- rli->group_master_log_pos= master_log_pos;
+ strmake_buf(event_relay_log_name,group_relay_log_name);
+ group_relay_log_pos= event_relay_log_pos= relay_log_pos;
+ group_master_log_pos= master_log_pos;
- if (rli->is_relay_log_recovery && init_recovery(rli->mi, &msg))
+ if (is_relay_log_recovery && init_recovery(mi, &msg))
goto err;
- rli->relay_log_state.load(rpl_global_gtid_slave_state);
- if (init_relay_log_pos(rli,
- rli->group_relay_log_name,
- rli->group_relay_log_pos,
+ relay_log_state.load(rpl_global_gtid_slave_state);
+ if (init_relay_log_pos(this,
+ group_relay_log_name,
+ group_relay_log_pos,
0 /* no data lock*/,
&msg, 0))
{
sql_print_error("Failed to open the relay log '%s' (relay_log_pos %llu)",
- rli->group_relay_log_name, rli->group_relay_log_pos);
+ group_relay_log_name, group_relay_log_pos);
goto err;
}
}
- DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%llu rli->event_relay_log_pos=%llu",
- my_b_tell(rli->cur_log), rli->event_relay_log_pos));
- DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
- DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos);
+ DBUG_PRINT("info", ("my_b_tell(cur_log)=%llu event_relay_log_pos=%llu",
+ my_b_tell(cur_log), event_relay_log_pos));
+ DBUG_ASSERT(event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
+ DBUG_ASSERT(my_b_tell(cur_log) == event_relay_log_pos);
/*
Now change the cache from READ to WRITE - must do this
- before flush_relay_log_info
+ before Relay_log_info::flush()
*/
- reinit_io_cache(&rli->info_file, WRITE_CACHE,0L,0,1);
- if ((error= flush_relay_log_info(rli)))
+ reinit_io_cache(&info_file, WRITE_CACHE,0L,0,1);
+ if ((error= flush()))
{
msg= "Failed to flush relay log info file";
goto err;
}
- if (count_relay_log_space(rli))
+ if (count_relay_log_space(this))
{
msg="Error counting relay log space";
goto err;
}
- rli->inited= 1;
- rli->error_on_rli_init_info= false;
- mysql_mutex_unlock(&rli->data_lock);
+ inited= 1;
+ error_on_rli_init_info= false;
+ mysql_mutex_unlock(&data_lock);
DBUG_RETURN(0);
err:
- rli->error_on_rli_init_info= true;
+ error_on_rli_init_info= true;
if (msg)
sql_print_error("%s", msg);
- end_io_cache(&rli->info_file);
+ end_io_cache(&info_file);
if (info_fd >= 0)
mysql_file_close(info_fd, MYF(0));
- rli->info_fd= -1;
-
+ info_fd= -1;
mysql_mutex_lock(log_lock);
- rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
+ relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT);
mysql_mutex_unlock(log_lock);
- mysql_mutex_unlock(&rli->data_lock);
-
+ mysql_mutex_unlock(&data_lock);
DBUG_RETURN(1);
}
@@ -767,6 +754,8 @@ err:
if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg)
*errmsg= "Invalid Format_description log event; could be out of memory";
+ DBUG_PRINT("info", ("Returning %d from init_relay_log_pos", (*errmsg)?1:0));
+
DBUG_RETURN ((*errmsg) ? 1 : 0);
}
@@ -984,8 +973,11 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
{
DBUG_ENTER("Relay_log_info::inc_group_relay_log_pos");
- if (!skip_lock)
+ if (skip_lock)
+ mysql_mutex_assert_owner(&data_lock);
+ else
mysql_mutex_lock(&data_lock);
+
rgi->inc_event_relay_log_pos();
DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu",
(long) log_pos, (long) group_master_log_pos));
@@ -1078,24 +1070,53 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
void Relay_log_info::close_temporary_tables()
{
- TABLE *table,*next;
DBUG_ENTER("Relay_log_info::close_temporary_tables");
- for (table=save_temporary_tables ; table ; table=next)
+ TMP_TABLE_SHARE *share;
+ TABLE *table;
+
+ if (!save_temporary_tables)
+ {
+ /* There are no temporary tables. */
+ DBUG_VOID_RETURN;
+ }
+
+ while ((share= save_temporary_tables->pop_front()))
{
- next=table->next;
+ /*
+ Iterate over the list of tables for this TABLE_SHARE and close them.
+ */
+ while ((table= share->all_tmp_tables.pop_front()))
+ {
+ DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
+ table->s->db.str, table->s->table_name.str));
+
+ /* Reset in_use as the table may have been created by another thd */
+ table->in_use= 0;
+ /*
+ Lets not free TABLE_SHARE here as there could be multiple TABLEs opened
+ for the same table (TABLE_SHARE).
+ */
+ closefrm(table);
+ my_free(table);
+ }
- /* Reset in_use as the table may have been created by another thd */
- table->in_use=0;
/*
Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them.
*/
- DBUG_PRINT("info", ("table: 0x%lx", (long) table));
- close_temporary(table, 1, 0);
+
+ free_table_share(share);
+ my_free(share);
}
- save_temporary_tables= 0;
+
+ /* By now, there mustn't be any elements left in the list. */
+ DBUG_ASSERT(save_temporary_tables->is_empty());
+
+ my_free(save_temporary_tables);
+ save_temporary_tables= NULL;
slave_open_temp_tables= 0;
+
DBUG_VOID_RETURN;
}
@@ -1122,10 +1143,10 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
Indeed, rli->inited==0 does not imply that they already are empty.
It could be that slave's info initialization partly succeeded :
for example if relay-log.info existed but *relay-bin*.*
- have been manually removed, init_relay_log_info reads the old
- relay-log.info and fills rli->master_log_*, then init_relay_log_info
+ have been manually removed, Relay_log_info::init() reads the old
+ relay-log.info and fills rli->master_log_*, then Relay_log_info::init()
checks for the existence of the relay log, this fails and
- init_relay_log_info leaves rli->inited to 0.
+ Relay_log_info::init() leaves rli->inited to 0.
In that pathological case, rli->master_log_pos* will be properly reinited
at the next START SLAVE (as RESET SLAVE or CHANGE
MASTER, the callers of purge_relay_logs, will delete bogus *.info files
@@ -1154,7 +1175,7 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
}
mysql_mutex_lock(rli->relay_log.get_log_lock());
if (rli->relay_log.open(ln, LOG_BIN, 0, 0, SEQ_READ_APPEND,
- (rli->max_relay_log_size ? rli->max_relay_log_size :
+ (ulong)(rli->max_relay_log_size ? rli->max_relay_log_size :
max_binlog_size), 1, TRUE))
{
sql_print_error("Unable to purge relay log files. Failed to open relay "
@@ -1345,6 +1366,7 @@ bool Relay_log_info::stmt_done(my_off_t event_master_log_pos, THD *thd,
int error= 0;
DBUG_ENTER("Relay_log_info::stmt_done");
+ DBUG_ASSERT(!belongs_to_client());
DBUG_ASSERT(rgi->rli == this);
/*
If in a transaction, and if the slave supports transactions, just
@@ -1394,7 +1416,7 @@ bool Relay_log_info::stmt_done(my_off_t event_master_log_pos, THD *thd,
}
DBUG_EXECUTE_IF("inject_crash_before_flush_rli", DBUG_SUICIDE(););
if (mi->using_gtid == Master_info::USE_GTID_NO)
- if (flush_relay_log_info(this))
+ if (flush())
error= 1;
DBUG_EXECUTE_IF("inject_crash_after_flush_rli", DBUG_SUICIDE(););
}
@@ -1555,9 +1577,9 @@ rpl_load_gtid_slave_state(THD *thd)
goto end;
}
}
- domain_id= (ulonglong)table->field[0]->val_int();
+ domain_id= (uint32)table->field[0]->val_int();
sub_id= (ulonglong)table->field[1]->val_int();
- server_id= (ulonglong)table->field[2]->val_int();
+ server_id= (uint32)table->field[2]->val_int();
seq_no= (ulonglong)table->field[3]->val_int();
DBUG_PRINT("info", ("Read slave state row: %u-%u-%lu sub_id=%lu\n",
(unsigned)domain_id, (unsigned)server_id,
@@ -1761,6 +1783,12 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi,
case DELETE_ROWS_EVENT:
case UPDATE_ROWS_EVENT:
case WRITE_ROWS_EVENT:
+ case WRITE_ROWS_COMPRESSED_EVENT:
+ case DELETE_ROWS_COMPRESSED_EVENT:
+ case UPDATE_ROWS_COMPRESSED_EVENT:
+ case WRITE_ROWS_COMPRESSED_EVENT_V1:
+ case UPDATE_ROWS_COMPRESSED_EVENT_V1:
+ case DELETE_ROWS_COMPRESSED_EVENT_V1:
/*
After the last Rows event has been applied, the saved Annotate_rows
event (if any) is not needed anymore and can be deleted.
@@ -1814,6 +1842,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
}
m_table_map.clear_tables();
slave_close_thread_tables(thd);
+
if (error)
{
thd->mdl_context.release_transactional_locks();
@@ -1839,8 +1868,9 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
/*
Cleanup for the flags that have been set at do_apply_event.
*/
- thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
- thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
+ thd->variables.option_bits&= ~(OPTION_NO_FOREIGN_KEY_CHECKS |
+ OPTION_RELAXED_UNIQUE_CHECKS |
+ OPTION_NO_CHECK_CONSTRAINT_CHECKS);
/*
Reset state related to long_find_row notes in the error log:
@@ -2135,4 +2165,79 @@ bool rpl_sql_thread_info::cached_charset_compare(char *charset) const
DBUG_RETURN(0);
}
+
+/**
+ Store the file and position where the slave's SQL thread are in the
+ relay log.
+
+ Notes:
+
+ - This function should be called either from the slave SQL thread,
+ or when the slave thread is not running. (It reads the
+ group_{relay|master}_log_{pos|name} and delay fields in the rli
+ object. These may only be modified by the slave SQL thread or by
+ a client thread when the slave SQL thread is not running.)
+
+ - If there is an active transaction, then we do not update the
+ position in the relay log. This is to ensure that we re-execute
+ statements if we die in the middle of an transaction that was
+ rolled back.
+
+ - As a transaction never spans binary logs, we don't have to handle
+ the case where we do a relay-log-rotation in the middle of the
+ transaction. If transactions could span several binlogs, we would
+ have to ensure that we do not delete the relay log file where the
+ transaction started before switching to a new relay log file.
+
+ - Error can happen if writing to file fails or if flushing the file
+ fails.
+
+ @param rli The object representing the Relay_log_info.
+
+ @todo Change the log file information to a binary format to avoid
+ calling longlong2str.
+
+ @return 0 on success, 1 on error.
+*/
+bool Relay_log_info::flush()
+{
+ bool error=0;
+
+ DBUG_ENTER("Relay_log_info::flush()");
+
+ IO_CACHE *file = &info_file;
+ // 2*file name, 2*long long, 2*unsigned long, 6*'\n'
+ char buff[FN_REFLEN * 2 + 22 * 2 + 10 * 2 + 6], *pos;
+ my_b_seek(file, 0L);
+ pos= longlong10_to_str(LINES_IN_RELAY_LOG_INFO_WITH_DELAY, buff, 10);
+ *pos++='\n';
+ pos=strmov(pos, group_relay_log_name);
+ *pos++='\n';
+ pos=longlong10_to_str(group_relay_log_pos, pos, 10);
+ *pos++='\n';
+ pos=strmov(pos, group_master_log_name);
+ *pos++='\n';
+ pos=longlong10_to_str(group_master_log_pos, pos, 10);
+ *pos++='\n';
+ pos= longlong10_to_str(sql_delay, pos, 10);
+ *pos++= '\n';
+ if (my_b_write(file, (uchar*) buff, (size_t) (pos-buff)))
+ error=1;
+ if (flush_io_cache(file))
+ error=1;
+ if (sync_relayloginfo_period &&
+ !error &&
+ ++sync_counter >= sync_relayloginfo_period)
+ {
+ if (my_sync(info_fd, MYF(MY_WME)))
+ error=1;
+ sync_counter= 0;
+ }
+ /*
+ Flushing the relay log is done by the slave I/O thread
+ or by the user on STOP SLAVE.
+ */
+ DBUG_RETURN(error);
+}
+
#endif
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index b40a34a54e6..564582a47be 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -30,11 +30,6 @@ class Master_info;
class Rpl_filter;
-enum {
- LINES_IN_RELAY_LOG_INFO_WITH_DELAY= 5
-};
-
-
/****************************************************************************
Replication SQL Thread
@@ -48,7 +43,7 @@ enum {
Relay_log_info is initialized from the slave.info file if such
exists. Otherwise, data members are intialized with defaults. The
- initialization is done with init_relay_log_info() call.
+ initialization is done with Relay_log_info::init() call.
The format of slave.info file:
@@ -79,11 +74,17 @@ public:
};
/*
- If flag set, then rli does not store its state in any info file.
- This is the case only when we execute BINLOG SQL commands inside
- a client, non-replication thread.
+ The SQL thread owns one Relay_log_info, and each client that has
+ executed a BINLOG statement owns one Relay_log_info. This function
+ returns zero for the Relay_log_info object that belongs to the SQL
+ thread and nonzero for Relay_log_info objects that belong to
+ clients.
*/
- bool no_storage;
+ inline bool belongs_to_client()
+ {
+ DBUG_ASSERT(sql_driver_thd);
+ return !sql_driver_thd->slave_thread;
+ }
/*
If true, events with the same server id should be replicated. This
@@ -149,7 +150,7 @@ public:
Protected by data_lock.
*/
- TABLE *save_temporary_tables;
+ All_tmp_tables_list *save_temporary_tables;
/*
standard lock acquisition order to avoid deadlocks:
@@ -202,6 +203,11 @@ public:
relay log and finishing (commiting) on another relay log. Case which can
happen when, for example, the relay log gets rotated because of
max_binlog_size.
+
+ Note: group_relay_log_name, group_relay_log_pos must only be
+ written from the thread owning the Relay_log_info (SQL thread if
+ !belongs_to_client(); client thread executing BINLOG statement if
+ belongs_to_client()).
*/
char group_relay_log_name[FN_REFLEN];
ulonglong group_relay_log_pos;
@@ -213,16 +219,17 @@ public:
*/
char future_event_master_log_name[FN_REFLEN];
-#ifdef HAVE_valgrind
- bool is_fake; /* Mark that this is a fake relay log info structure */
-#endif
-
/*
Original log name and position of the group we're currently executing
(whose coordinates are group_relay_log_name/pos in the relay log)
in the master's binlog. These concern the *group*, because in the master's
binlog the log_pos that comes with each event is the position of the
beginning of the group.
+
+ Note: group_master_log_name, group_master_log_pos must only be
+ written from the thread owning the Relay_log_info (SQL thread if
+ !belongs_to_client(); client thread executing BINLOG statement if
+ belongs_to_client()).
*/
char group_master_log_name[FN_REFLEN];
volatile my_off_t group_master_log_pos;
@@ -252,6 +259,15 @@ public:
bool sql_thread_caught_up;
void clear_until_condition();
+ /**
+ Reset the delay.
+ This is used by RESET SLAVE to clear the delay.
+ */
+ void clear_sql_delay()
+ {
+ sql_delay= 0;
+ }
+
/*
Needed for problems when slave stops and we want to restart it
@@ -355,10 +371,11 @@ public:
rpl_parallel parallel;
/*
- The relay_log_state keeps track of the current binlog state of the execution
- of the relay log. This is used to know where to resume current GTID position
- if the slave thread is stopped and restarted.
- It is only accessed from the SQL thread, so it does not need any locking.
+ The relay_log_state keeps track of the current binlog state of the
+ execution of the relay log. This is used to know where to resume
+ current GTID position if the slave thread is stopped and
+ restarted. It is only accessed from the SQL thread, so it does
+ not need any locking.
*/
rpl_binlog_state relay_log_state;
/*
@@ -482,8 +499,72 @@ public:
m_flags&= ~flag;
}
+ /**
+ Text used in THD::proc_info when the slave SQL thread is delaying.
+ */
+ static const char *const state_delaying_string;
+
+ bool flush();
+
+ /**
+ Reads the relay_log.info file.
+ */
+ int init(const char* info_filename);
+
+ /**
+ Indicate that a delay starts.
+
+ This does not actually sleep; it only sets the state of this
+ Relay_log_info object to delaying so that the correct state can be
+ reported by SHOW SLAVE STATUS and SHOW PROCESSLIST.
+
+ Requires rli->data_lock.
+
+ @param delay_end The time when the delay shall end.
+ */
+ void start_sql_delay(time_t delay_end)
+ {
+ mysql_mutex_assert_owner(&data_lock);
+ sql_delay_end= delay_end;
+ thd_proc_info(sql_driver_thd, state_delaying_string);
+ }
+
+ int32 get_sql_delay() { return sql_delay; }
+ void set_sql_delay(int32 _sql_delay) { sql_delay= _sql_delay; }
+ time_t get_sql_delay_end() { return sql_delay_end; }
+
private:
+
+ /**
+ Delay slave SQL thread by this amount, compared to master (in
+ seconds). This is set with CHANGE MASTER TO MASTER_DELAY=X.
+
+ Guarded by data_lock. Initialized by the client thread executing
+ START SLAVE. Written by client threads executing CHANGE MASTER TO
+ MASTER_DELAY=X. Read by SQL thread and by client threads
+ executing SHOW SLAVE STATUS. Note: must not be written while the
+ slave SQL thread is running, since the SQL thread reads it without
+ a lock when executing Relay_log_info::flush().
+ */
+ int sql_delay;
+
+ /**
+ During a delay, specifies the point in time when the delay ends.
+
+ This is used for the SQL_Remaining_Delay column in SHOW SLAVE STATUS.
+
+ Guarded by data_lock. Written by the sql thread. Read by client
+ threads executing SHOW SLAVE STATUS.
+ */
+ time_t sql_delay_end;
+
+ /*
+ Before the MASTER_DELAY parameter was added (WL#344),
+ relay_log.info had 4 lines. Now it has 5 lines.
+ */
+ static const int LINES_IN_RELAY_LOG_INFO_WITH_DELAY= 5;
+
/*
Holds the state of the data in the relay log.
We need this to ensure that we are not in the middle of a
@@ -767,7 +848,7 @@ struct rpl_group_info
*/
inline void set_annotate_event(Annotate_rows_log_event *event)
{
- free_annotate_event();
+ DBUG_ASSERT(m_annotate_event == NULL);
m_annotate_event= event;
this->thd->variables.binlog_annotate_row_events= 1;
}
@@ -893,10 +974,6 @@ public:
};
-// Defined in rpl_rli.cc
-int init_relay_log_info(Relay_log_info* rli, const char* info_fname);
-
-
extern struct rpl_slave_state *rpl_global_gtid_slave_state;
extern gtid_waiting rpl_global_gtid_waiting;
diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc
index 80114f50d62..15bb8a054eb 100644
--- a/sql/rpl_tblmap.cc
+++ b/sql/rpl_tblmap.cc
@@ -66,8 +66,8 @@ TABLE* table_mapping::get_table(ulonglong table_id)
entry *e= find_entry(table_id);
if (e)
{
- DBUG_PRINT("info", ("tid %llu -> table 0x%lx (%s)",
- table_id, (long) e->table,
+ DBUG_PRINT("info", ("tid %llu -> table %p (%s)",
+ table_id, e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(e->table);
}
@@ -105,9 +105,9 @@ int table_mapping::expand()
int table_mapping::set_table(ulonglong table_id, TABLE* table)
{
DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)");
- DBUG_PRINT("enter", ("table_id: %llu table: 0x%lx (%s)",
- table_id,
- (long) table, MAYBE_TABLE_NAME(table)));
+ DBUG_PRINT("enter", ("table_id: %llu table: %p (%s)",
+ table_id,
+ table, MAYBE_TABLE_NAME(table)));
entry *e= find_entry(table_id);
if (e == 0)
{
@@ -133,8 +133,8 @@ int table_mapping::set_table(ulonglong table_id, TABLE* table)
DBUG_RETURN(ERR_MEMORY_ALLOCATION);
}
- DBUG_PRINT("info", ("tid %llu -> table 0x%lx (%s)",
- table_id, (long) e->table,
+ DBUG_PRINT("info", ("tid %llu -> table %p (%s)",
+ table_id, e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(0); // All OK
}
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index ff2cd74c3a7..9554daeedbd 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -765,14 +765,44 @@ can_convert_field_to(Field *field,
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_NULL:
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
case MYSQL_TYPE_TIMESTAMP2:
- case MYSQL_TYPE_DATETIME2:
case MYSQL_TYPE_TIME2:
DBUG_RETURN(false);
+ case MYSQL_TYPE_NEWDATE:
+ {
+ if (field->real_type() == MYSQL_TYPE_DATETIME2 ||
+ field->real_type() == MYSQL_TYPE_DATETIME)
+ {
+ *order_var= -1;
+ DBUG_RETURN(is_conversion_ok(*order_var, rli));
+ }
+ else
+ {
+ DBUG_RETURN(false);
+ }
+ }
+ break;
+
+ //case MYSQL_TYPE_DATETIME: TODO: fix MDEV-17394 and uncomment.
+ //
+ //The "old" type does not specify the fraction part size which is required
+ //for correct conversion.
+ case MYSQL_TYPE_DATETIME2:
+ {
+ if (field->real_type() == MYSQL_TYPE_NEWDATE)
+ {
+ *order_var= 1;
+ DBUG_RETURN(is_conversion_ok(*order_var, rli));
+ }
+ else
+ {
+ DBUG_RETURN(false);
+ }
+ }
+ break;
}
DBUG_RETURN(false); // To keep GCC happy
}
@@ -901,6 +931,51 @@ table_def::compatible_with(THD *thd, rpl_group_info *rgi,
return true;
}
+
+/**
+ A wrapper to Virtual_tmp_table, to get access to its constructor,
+ which is protected for safety purposes (against illegal use on stack).
+*/
+class Virtual_conversion_table: public Virtual_tmp_table
+{
+public:
+ Virtual_conversion_table(THD *thd) :Virtual_tmp_table(thd) { }
+ /**
+ Add a new field into the virtual table.
+ @param sql_type - The real_type of the field.
+ @param metadata - The RBR binary log metadata for this field.
+ @param target_field - The field from the target table, to get extra
+ attributes from (e.g. typelib in case of ENUM).
+ */
+ bool add(enum_field_types sql_type,
+ uint16 metadata, const Field *target_field)
+ {
+ const Type_handler *handler= Type_handler::get_handler_by_real_type(sql_type);
+ if (!handler)
+ {
+ sql_print_error("In RBR mode, Slave received unknown field type field %d "
+ " for column Name: %s.%s.%s.",
+ (int) sql_type,
+ target_field->table->s->db.str,
+ target_field->table->s->table_name.str,
+ target_field->field_name);
+ return true;
+ }
+ Field *tmp= handler->make_conversion_table_field(this, metadata,
+ target_field);
+ if (!tmp)
+ return true;
+ Virtual_tmp_table::add(tmp);
+ DBUG_PRINT("debug", ("sql_type: %d, target_field: '%s', max_length: %d, decimals: %d,"
+ " maybe_null: %d, unsigned_flag: %d, pack_length: %u",
+ sql_type, target_field->field_name,
+ tmp->field_length, tmp->decimals(), TRUE,
+ tmp->flags, tmp->pack_length()));
+ return false;
+ }
+};
+
+
/**
Create a conversion table.
@@ -916,8 +991,7 @@ TABLE *table_def::create_conversion_table(THD *thd, rpl_group_info *rgi,
{
DBUG_ENTER("table_def::create_conversion_table");
- List<Create_field> field_list;
- TABLE *conv_table= NULL;
+ Virtual_conversion_table *conv_table;
Relay_log_info *rli= rgi->rli;
/*
At slave, columns may differ. So we should create
@@ -925,113 +999,35 @@ TABLE *table_def::create_conversion_table(THD *thd, rpl_group_info *rgi,
conversion table.
*/
uint const cols_to_create= MY_MIN(target_table->s->fields, size());
+ if (!(conv_table= new(thd) Virtual_conversion_table(thd)) ||
+ conv_table->init(cols_to_create))
+ goto err;
for (uint col= 0 ; col < cols_to_create; ++col)
{
- Create_field *field_def=
- (Create_field*) alloc_root(thd->mem_root, sizeof(Create_field));
- Field *target_field= target_table->field[col];
- bool unsigned_flag= 0;
- if (field_list.push_back(field_def, thd->mem_root))
- DBUG_RETURN(NULL);
-
- uint decimals= 0;
- TYPELIB* interval= NULL;
- uint pack_length= 0;
- uint32 max_length=
- max_display_length_for_field(type(col), field_metadata(col));
-
- switch(type(col)) {
- int precision;
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- interval= static_cast<Field_enum*>(target_field)->typelib;
- pack_length= field_metadata(col) & 0x00ff;
- break;
-
- case MYSQL_TYPE_NEWDECIMAL:
- /*
- The display length of a DECIMAL type is not the same as the
- length that should be supplied to make_field, so we correct
- the length here.
- */
- precision= field_metadata(col) >> 8;
- decimals= field_metadata(col) & 0x00ff;
- max_length=
- my_decimal_precision_to_length(precision, decimals, FALSE);
- break;
-
- case MYSQL_TYPE_DECIMAL:
- sql_print_error("In RBR mode, Slave received incompatible DECIMAL field "
- "(old-style decimal field) from Master while creating "
- "conversion table. Please consider changing datatype on "
- "Master to new style decimal by executing ALTER command for"
- " column Name: %s.%s.%s.",
- target_table->s->db.str,
- target_table->s->table_name.str,
- target_field->field_name);
+ if (conv_table->add(type(col), field_metadata(col),
+ target_table->field[col]))
+ {
+ DBUG_PRINT("debug", ("binlog_type: %d, metadata: %04X, target_field: '%s'"
+ " make_conversion_table_field() failed",
+ binlog_type(col), field_metadata(col),
+ target_table->field[col]->field_name));
goto err;
-
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_GEOMETRY:
- pack_length= field_metadata(col) & 0x00ff;
- break;
-
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_LONGLONG:
- /*
- As we don't know if the integer was signed or not on the master,
- assume we have same sign on master and slave. This is true when not
- using conversions so it should be true also when using conversions.
- */
- unsigned_flag= static_cast<Field_num*>(target_field)->unsigned_flag;
- break;
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- /*
- As we don't know the precision of the temporal field on the master,
- assume it's the same on master and slave. This is true when not
- using conversions so it should be true also when using conversions.
- */
- if (target_field->decimals())
- max_length+= target_field->decimals() + 1;
- break;
- default:
- break;
}
-
- DBUG_PRINT("debug", ("sql_type: %d, target_field: '%s', max_length: %d, decimals: %d,"
- " maybe_null: %d, unsigned_flag: %d, pack_length: %u",
- binlog_type(col), target_field->field_name,
- max_length, decimals, TRUE, unsigned_flag,
- pack_length));
- field_def->init_for_tmp_table(type(col),
- max_length,
- decimals,
- TRUE, // maybe_null
- unsigned_flag,
- pack_length);
- field_def->charset= target_field->charset();
- field_def->interval= interval;
}
- conv_table= create_virtual_tmp_table(thd, field_list);
+ if (conv_table->open())
+ goto err; // Could not allocate record buffer?
-err:
- if (conv_table == NULL)
- {
- rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, rgi->gtid_info(),
- ER_THD(thd, ER_SLAVE_CANT_CREATE_CONVERSION),
- target_table->s->db.str,
- target_table->s->table_name.str);
- }
DBUG_RETURN(conv_table);
+
+err:
+ if (conv_table)
+ delete conv_table;
+ rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, rgi->gtid_info(),
+ ER_THD(thd, ER_SLAVE_CANT_CREATE_CONVERSION),
+ target_table->s->db.str,
+ target_table->s->table_name.str);
+ DBUG_RETURN(NULL);
}
#endif /* MYSQL_CLIENT */
diff --git a/sql/scheduler.cc b/sql/scheduler.cc
index bc3166210b5..de472ae2504 100644
--- a/sql/scheduler.cc
+++ b/sql/scheduler.cc
@@ -22,9 +22,9 @@
#pragma implementation
#endif
+#include "mysqld.h"
#include "sql_connect.h" // init_new_connection_handler_thread
#include "scheduler.h"
-#include "mysqld.h"
#include "sql_class.h"
#include "sql_callback.h"
#include <violite.h>
@@ -35,7 +35,11 @@
static bool no_threads_end(THD *thd, bool put_in_cache)
{
- unlink_thd(thd);
+ if (thd)
+ {
+ unlink_thd(thd);
+ delete thd;
+ }
return 1; // Abort handle_one_connection
}
@@ -81,7 +85,9 @@ static void scheduler_wait_net_end(void) {
one_thread_scheduler() or one_thread_per_connection_scheduler() in
mysqld.cc, so this init function will always be called.
*/
-void scheduler_init() {
+
+void scheduler_init()
+{
thr_set_lock_wait_callback(scheduler_wait_lock_begin,
scheduler_wait_lock_end);
thr_set_sync_wait_callback(scheduler_wait_sync_begin,
@@ -118,7 +124,6 @@ void post_kill_notification(THD *thd)
#ifndef EMBEDDED_LIBRARY
-
void one_thread_per_connection_scheduler(scheduler_functions *func,
ulong *arg_max_connections,
uint *arg_connection_count)
@@ -132,6 +137,14 @@ void one_thread_per_connection_scheduler(scheduler_functions *func,
func->end_thread= one_thread_per_connection_end;
func->post_kill_notification= post_kill_notification;
}
+#else
+bool init_new_connection_handler_thread()
+{
+ return 0;
+}
+void handle_connection_in_main_thread(CONNECT *connect)
+{
+}
#endif
/*
@@ -144,10 +157,7 @@ void one_thread_scheduler(scheduler_functions *func)
func->max_threads= 1;
func->max_connections= &max_connections;
func->connection_count= &connection_count;
-#ifndef EMBEDDED_LIBRARY
func->init_new_connection_thread= init_new_connection_handler_thread;
func->add_connection= handle_connection_in_main_thread;
-#endif
func->end_thread= no_threads_end;
}
-
diff --git a/sql/scheduler.h b/sql/scheduler.h
index f7aff377eac..71553372999 100644
--- a/sql/scheduler.h
+++ b/sql/scheduler.h
@@ -37,7 +37,7 @@ struct scheduler_functions
ulong *max_connections;
bool (*init)(void);
bool (*init_new_connection_thread)(void);
- void (*add_connection)(THD *thd);
+ void (*add_connection)(CONNECT *connect);
void (*thd_wait_begin)(THD *thd, int wait_type);
void (*thd_wait_end)(THD *thd);
void (*post_kill_notification)(THD *thd);
diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc
new file mode 100644
index 00000000000..4ca94b6cd60
--- /dev/null
+++ b/sql/session_tracker.cc
@@ -0,0 +1,1712 @@
+/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2016, MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+#ifndef EMBEDDED_LIBRARY
+#include "sql_plugin.h"
+#include "session_tracker.h"
+
+#include "hash.h"
+#include "table.h"
+#include "rpl_gtid.h"
+#include "sql_class.h"
+#include "sql_show.h"
+#include "sql_plugin.h"
+#include "set_var.h"
+
+void State_tracker::mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name)
+{
+ m_changed= true;
+ thd->lex->safe_to_cache_query= 0;
+ thd->server_status|= SERVER_SESSION_STATE_CHANGED;
+}
+
+
+class Not_implemented_tracker : public State_tracker
+{
+public:
+ bool enable(THD *thd)
+ { return false; }
+ bool update(THD *, set_var *)
+ { return false; }
+ bool store(THD *, String *)
+ { return false; }
+ void mark_as_changed(THD *, LEX_CSTRING *tracked_item_name)
+ {}
+
+};
+
+/**
+ Session_sysvars_tracker
+
+ This is a tracker class that enables & manages the tracking of session
+ system variables. It internally maintains a hash of user supplied variable
+ references and a boolean field to store if the variable was changed by the
+ last statement.
+*/
+
+class Session_sysvars_tracker : public State_tracker
+{
+private:
+
+ struct sysvar_node_st {
+ sys_var *m_svar;
+ bool *test_load;
+ bool m_changed;
+ };
+
+ class vars_list
+ {
+ private:
+ /**
+ Registered system variables. (@@session_track_system_variables)
+ A hash to store the name of all the system variables specified by the
+ user.
+ */
+ HASH m_registered_sysvars;
+ /** Size of buffer for string representation */
+ size_t buffer_length;
+ myf m_mem_flag;
+ /**
+ If TRUE then we want to check all session variable.
+ */
+ bool track_all;
+ void init()
+ {
+ my_hash_init(&m_registered_sysvars,
+ &my_charset_bin,
+ 4, 0, 0, (my_hash_get_key) sysvars_get_key,
+ my_free, MYF(HASH_UNIQUE |
+ ((m_mem_flag & MY_THREAD_SPECIFIC) ?
+ HASH_THREAD_SPECIFIC : 0)));
+ }
+ void free_hash()
+ {
+ if (my_hash_inited(&m_registered_sysvars))
+ {
+ my_hash_free(&m_registered_sysvars);
+ }
+ }
+
+ uchar* search(const sys_var *svar)
+ {
+ return (my_hash_search(&m_registered_sysvars, (const uchar *)&svar,
+ sizeof(sys_var *)));
+ }
+
+ public:
+ vars_list() :
+ buffer_length(0)
+ {
+ m_mem_flag= current_thd ? MY_THREAD_SPECIFIC : 0;
+ init();
+ }
+
+ size_t get_buffer_length()
+ {
+ DBUG_ASSERT(buffer_length != 0); // asked earlier then should
+ return buffer_length;
+ }
+ ~vars_list()
+ {
+ /* free the allocated hash. */
+ if (my_hash_inited(&m_registered_sysvars))
+ {
+ my_hash_free(&m_registered_sysvars);
+ }
+ }
+
+ uchar* insert_or_search(sysvar_node_st *node, const sys_var *svar)
+ {
+ uchar *res;
+ res= search(svar);
+ if (!res)
+ {
+ if (track_all)
+ {
+ insert(node, svar, m_mem_flag);
+ return search(svar);
+ }
+ }
+ return res;
+ }
+
+ bool insert(sysvar_node_st *node, const sys_var *svar, myf mem_flag);
+ void reinit();
+ void reset();
+ inline bool is_enabled()
+ {
+ return track_all || m_registered_sysvars.records;
+ }
+ void copy(vars_list* from, THD *thd);
+ bool parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error,
+ CHARSET_INFO *char_set, bool take_mutex);
+ bool construct_var_list(char *buf, size_t buf_len);
+ bool store(THD *thd, String *buf);
+ };
+ /**
+ Two objects of vars_list type are maintained to manage
+ various operations.
+ */
+ vars_list *orig_list, *tool_list;
+
+public:
+ Session_sysvars_tracker()
+ {
+ orig_list= new (std::nothrow) vars_list();
+ tool_list= new (std::nothrow) vars_list();
+ }
+
+ ~Session_sysvars_tracker()
+ {
+ if (orig_list)
+ delete orig_list;
+ if (tool_list)
+ delete tool_list;
+ }
+
+ size_t get_buffer_length()
+ {
+ return orig_list->get_buffer_length();
+ }
+ bool construct_var_list(char *buf, size_t buf_len)
+ {
+ return orig_list->construct_var_list(buf, buf_len);
+ }
+
+ /**
+ Method used to check the validity of string provided
+ for session_track_system_variables during the server
+ startup.
+ */
+ static bool server_init_check(THD *thd, CHARSET_INFO *char_set,
+ LEX_STRING var_list)
+ {
+ return check_var_list(thd, var_list, false, char_set, false);
+ }
+
+ static bool server_init_process(THD *thd, CHARSET_INFO *char_set,
+ LEX_STRING var_list)
+ {
+ vars_list dummy;
+ bool result;
+ result= dummy.parse_var_list(thd, var_list, false, char_set, false);
+ if (!result)
+ dummy.construct_var_list(var_list.str, var_list.length + 1);
+ return result;
+ }
+
+ void reset();
+ bool enable(THD *thd);
+ bool check_str(THD *thd, LEX_STRING *val);
+ bool update(THD *thd, set_var *var);
+ bool store(THD *thd, String *buf);
+ void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name);
+ /* callback */
+ static uchar *sysvars_get_key(const char *entry, size_t *length,
+ my_bool not_used __attribute__((unused)));
+
+ // hash iterators
+ static my_bool name_array_filler(void *ptr, void *data_ptr);
+ static my_bool store_variable(void *ptr, void *data_ptr);
+ static my_bool reset_variable(void *ptr, void *data_ptr);
+
+ static bool check_var_list(THD *thd, LEX_STRING var_list, bool throw_error,
+ CHARSET_INFO *char_set, bool take_mutex);
+};
+
+
+
+/**
+ Current_schema_tracker,
+
+ This is a tracker class that enables & manages the tracking of current
+ schema for a particular connection.
+*/
+
+class Current_schema_tracker : public State_tracker
+{
+private:
+ bool schema_track_inited;
+ void reset();
+
+public:
+
+ Current_schema_tracker()
+ {
+ schema_track_inited= false;
+ }
+
+ bool enable(THD *thd)
+ { return update(thd, NULL); }
+ bool update(THD *thd, set_var *var);
+ bool store(THD *thd, String *buf);
+};
+
+/*
+ Session_state_change_tracker
+
+ This is a boolean tracker class that will monitor any change that contributes
+ to a session state change.
+ Attributes that contribute to session state change include:
+ - Successful change to System variables
+ - User defined variables assignments
+ - temporary tables created, altered or deleted
+ - prepared statements added or removed
+ - change in current database
+ - change of current role
+*/
+
+class Session_state_change_tracker : public State_tracker
+{
+private:
+
+ void reset();
+
+public:
+ Session_state_change_tracker();
+ bool enable(THD *thd)
+ { return update(thd, NULL); };
+ bool update(THD *thd, set_var *var);
+ bool store(THD *thd, String *buf);
+ bool is_state_changed(THD*);
+};
+
+
+/* To be used in expanding the buffer. */
+static const unsigned int EXTRA_ALLOC= 1024;
+
+
+void Session_sysvars_tracker::vars_list::reinit()
+{
+ buffer_length= 0;
+ track_all= 0;
+ if (m_registered_sysvars.records)
+ my_hash_reset(&m_registered_sysvars);
+}
+
+/**
+ Copy the given list.
+
+ @param from Source vars_list object.
+ @param thd THD handle to retrive the charset in use.
+
+ @retval true there is something to track
+ @retval false nothing to track
+*/
+
+void Session_sysvars_tracker::vars_list::copy(vars_list* from, THD *thd)
+{
+ reinit();
+ track_all= from->track_all;
+ free_hash();
+ buffer_length= from->buffer_length;
+ m_registered_sysvars= from->m_registered_sysvars;
+ from->init();
+}
+
+/**
+ Inserts the variable to be tracked into m_registered_sysvars hash.
+
+ @param node Node to be inserted.
+ @param svar address of the system variable
+
+ @retval false success
+ @retval true error
+*/
+
+bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node,
+ const sys_var *svar,
+ myf mem_flag)
+{
+ if (!node)
+ {
+ if (!(node= (sysvar_node_st *) my_malloc(sizeof(sysvar_node_st),
+ MYF(MY_WME | mem_flag))))
+ {
+ reinit();
+ return true;
+ }
+ }
+
+ node->m_svar= (sys_var *)svar;
+ node->test_load= node->m_svar->test_load;
+ node->m_changed= false;
+ if (my_hash_insert(&m_registered_sysvars, (uchar *) node))
+ {
+ my_free(node);
+ if (!search((sys_var *)svar))
+ {
+ //EOF (error is already reported)
+ reinit();
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ Parse the specified system variables list.
+
+ @Note In case of invalid entry a warning is raised per invalid entry.
+ This is done in order to handle 'potentially' valid system
+ variables from uninstalled plugins which might get installed in
+ future.
+
+
+ @param thd [IN] The thd handle.
+ @param var_list [IN] System variable list.
+ @param throw_error [IN] bool when set to true, returns an error
+ in case of invalid/duplicate values.
+ @param char_set [IN] charecter set information used for string
+ manipulations.
+ @param take_mutex [IN] take LOCK_plugin
+
+ @return
+ true Error
+ false Success
+*/
+bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd,
+ LEX_STRING var_list,
+ bool throw_error,
+ CHARSET_INFO *char_set,
+ bool take_mutex)
+{
+ const char separator= ',';
+ char *token, *lasts= NULL;
+ size_t rest= var_list.length;
+ reinit();
+
+ if (!var_list.str || var_list.length == 0)
+ {
+ buffer_length= 1;
+ return false;
+ }
+
+ if(!strcmp(var_list.str,(const char *)"*"))
+ {
+ track_all= true;
+ buffer_length= 2;
+ return false;
+ }
+
+ buffer_length= var_list.length + 1;
+ token= var_list.str;
+
+ track_all= false;
+ /*
+ If Lock to the plugin mutex is not acquired here itself, it results
+ in having to acquire it multiple times in find_sys_var_ex for each
+ token value. Hence the mutex is handled here to avoid a performance
+ overhead.
+ */
+ if (!thd || take_mutex)
+ mysql_mutex_lock(&LOCK_plugin);
+ for (;;)
+ {
+ sys_var *svar;
+ LEX_STRING var;
+ uint not_used;
+
+ lasts= (char *) memchr(token, separator, rest);
+
+ var.str= token;
+ if (lasts)
+ {
+ var.length= (lasts - token);
+ rest-= var.length + 1;
+ }
+ else
+ var.length= rest;
+
+ /* Remove leading/trailing whitespace. */
+ trim_whitespace(char_set, &var, &not_used);
+
+ if(!strcmp(var.str,(const char *)"*"))
+ {
+ track_all= true;
+ }
+ else if ((svar=
+ find_sys_var_ex(thd, var.str, var.length, throw_error, true)))
+ {
+ if (insert(NULL, svar, m_mem_flag) == TRUE)
+ goto error;
+ }
+ else if (throw_error && thd)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "%.*s is not a valid system variable and will"
+ "be ignored.", (int)var.length, token);
+ }
+ else
+ goto error;
+
+ if (lasts)
+ token= lasts + 1;
+ else
+ break;
+ }
+ if (!thd || take_mutex)
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ return false;
+
+error:
+ if (!thd || take_mutex)
+ mysql_mutex_unlock(&LOCK_plugin);
+ return true;
+}
+
+
+bool Session_sysvars_tracker::check_var_list(THD *thd,
+ LEX_STRING var_list,
+ bool throw_error,
+ CHARSET_INFO *char_set,
+ bool take_mutex)
+{
+ const char separator= ',';
+ char *token, *lasts= NULL;
+ size_t rest= var_list.length;
+
+ if (!var_list.str || var_list.length == 0 ||
+ !strcmp(var_list.str,(const char *)"*"))
+ {
+ return false;
+ }
+
+ token= var_list.str;
+
+ /*
+ If Lock to the plugin mutex is not acquired here itself, it results
+ in having to acquire it multiple times in find_sys_var_ex for each
+ token value. Hence the mutex is handled here to avoid a performance
+ overhead.
+ */
+ if (!thd || take_mutex)
+ mysql_mutex_lock(&LOCK_plugin);
+ for (;;)
+ {
+ LEX_STRING var;
+ uint not_used;
+
+ lasts= (char *) memchr(token, separator, rest);
+
+ var.str= token;
+ if (lasts)
+ {
+ var.length= (lasts - token);
+ rest-= var.length + 1;
+ }
+ else
+ var.length= rest;
+
+ /* Remove leading/trailing whitespace. */
+ trim_whitespace(char_set, &var, &not_used);
+
+ if(!strcmp(var.str,(const char *)"*") &&
+ !find_sys_var_ex(thd, var.str, var.length, throw_error, true))
+ {
+ if (throw_error && take_mutex && thd)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "%.*s is not a valid system variable and will"
+ "be ignored.", (int)var.length, token);
+ }
+ else
+ {
+ if (!thd || take_mutex)
+ mysql_mutex_unlock(&LOCK_plugin);
+ return true;
+ }
+ }
+
+ if (lasts)
+ token= lasts + 1;
+ else
+ break;
+ }
+ if (!thd || take_mutex)
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ return false;
+}
+
+struct name_array_filler_data
+{
+ LEX_CSTRING **names;
+ uint idx;
+
+};
+
+/** Collects variable references into array */
+my_bool Session_sysvars_tracker::name_array_filler(void *ptr,
+ void *data_ptr)
+{
+ Session_sysvars_tracker::sysvar_node_st *node=
+ (Session_sysvars_tracker::sysvar_node_st *)ptr;
+ name_array_filler_data *data= (struct name_array_filler_data *)data_ptr;
+ if (*node->test_load)
+ data->names[data->idx++]= &node->m_svar->name;
+ return FALSE;
+}
+
+/* Sorts variable references array */
+static int name_array_sorter(const void *a, const void *b)
+{
+ LEX_CSTRING **an= (LEX_CSTRING **)a, **bn=(LEX_CSTRING **)b;
+ size_t min= MY_MIN((*an)->length, (*bn)->length);
+ int res= strncmp((*an)->str, (*bn)->str, min);
+ if (res == 0)
+ res= ((int)(*bn)->length)- ((int)(*an)->length);
+ return res;
+}
+
+/**
+ Construct variable list by internal hash with references
+*/
+
+bool Session_sysvars_tracker::vars_list::construct_var_list(char *buf,
+ size_t buf_len)
+{
+ struct name_array_filler_data data;
+ size_t left= buf_len;
+ size_t names_size= m_registered_sysvars.records * sizeof(LEX_CSTRING *);
+ const char separator= ',';
+
+ if (unlikely(buf_len < 1))
+ return true;
+
+ if (unlikely(track_all))
+ {
+ if (buf_len < 2)
+ return true;
+ buf[0]= '*';
+ buf[1]= '\0';
+ return false;
+ }
+
+ if (m_registered_sysvars.records == 0)
+ {
+ buf[0]= '\0';
+ return false;
+ }
+
+ data.names= (LEX_CSTRING**)my_safe_alloca(names_size);
+
+ if (unlikely(!data.names))
+ return true;
+
+ data.idx= 0;
+
+ mysql_mutex_lock(&LOCK_plugin);
+ my_hash_iterate(&m_registered_sysvars, &name_array_filler, &data);
+ DBUG_ASSERT(data.idx <= m_registered_sysvars.records);
+
+ /*
+ We check number of records again here because number of variables
+ could be reduced in case of plugin unload.
+ */
+ if (m_registered_sysvars.records == 0)
+ {
+ mysql_mutex_unlock(&LOCK_plugin);
+ buf[0]= '\0';
+ return false;
+ }
+
+ my_qsort(data.names, data.idx, sizeof(LEX_CSTRING *),
+ &name_array_sorter);
+
+ for(uint i= 0; i < data.idx; i++)
+ {
+ LEX_CSTRING *nm= data.names[i];
+ size_t ln= nm->length + 1;
+ if (ln > left)
+ {
+ mysql_mutex_unlock(&LOCK_plugin);
+ my_safe_afree(data.names, names_size);
+ return true;
+ }
+ memcpy(buf, nm->str, nm->length);
+ buf[nm->length]= separator;
+ buf+= ln;
+ left-= ln;
+ }
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ buf--; buf[0]= '\0';
+ my_safe_afree(data.names, names_size);
+
+ return false;
+}
+
+/**
+ Enable session tracker by parsing global value of tracked variables.
+
+ @param thd [IN] The thd handle.
+
+ @retval true Error
+ @retval false Success
+*/
+
+bool Session_sysvars_tracker::enable(THD *thd)
+{
+ mysql_mutex_lock(&LOCK_plugin);
+ LEX_STRING tmp;
+ tmp.str= global_system_variables.session_track_system_variables;
+ tmp.length= safe_strlen(tmp.str);
+ if (tool_list->parse_var_list(thd, tmp,
+ true, thd->charset(), false) == true)
+ {
+ mysql_mutex_unlock(&LOCK_plugin);
+ return true;
+ }
+ mysql_mutex_unlock(&LOCK_plugin);
+ orig_list->copy(tool_list, thd);
+ m_enabled= true;
+
+ return false;
+}
+
+
+/**
+ Check system variable name(s).
+
+ @note This function is called from the ON_CHECK() function of the
+ session_track_system_variables' sys_var class.
+
+ @param thd [IN] The thd handle.
+ @param var [IN] A pointer to set_var holding the specified list of
+ system variable names.
+
+ @retval true Error
+ @retval false Success
+*/
+
+inline bool Session_sysvars_tracker::check_str(THD *thd, LEX_STRING *val)
+{
+ return Session_sysvars_tracker::check_var_list(thd, *val, true,
+ thd->charset(), true);
+}
+
+
+/**
+ Once the value of the @@session_track_system_variables has been
+ successfully updated, this function calls
+ Session_sysvars_tracker::vars_list::copy updating the hash in orig_list
+ which represents the system variables to be tracked.
+
+ @note This function is called from the ON_UPDATE() function of the
+ session_track_system_variables' sys_var class.
+
+ @param thd [IN] The thd handle.
+
+ @retval true Error
+ @retval false Success
+*/
+
+bool Session_sysvars_tracker::update(THD *thd, set_var *var)
+{
+ /*
+ We are doing via tool list because there possible errors with memory
+ in this case value will be unchanged.
+ */
+ tool_list->reinit();
+ if (tool_list->parse_var_list(thd, var->save_result.string_value, true,
+ thd->charset(), true))
+ return true;
+ orig_list->copy(tool_list, thd);
+ return false;
+}
+
+
+/*
+ Function and structure to support storing variables from hash to the buffer.
+*/
+
+struct st_store_variable_param
+{
+ THD *thd;
+ String *buf;
+};
+
+my_bool Session_sysvars_tracker::store_variable(void *ptr, void *data_ptr)
+{
+ Session_sysvars_tracker::sysvar_node_st *node=
+ (Session_sysvars_tracker::sysvar_node_st *)ptr;
+ if (node->m_changed)
+ {
+ THD *thd= ((st_store_variable_param *)data_ptr)->thd;
+ String *buf= ((st_store_variable_param *)data_ptr)->buf;
+ char val_buf[SHOW_VAR_FUNC_BUFF_SIZE];
+ SHOW_VAR show;
+ CHARSET_INFO *charset;
+ size_t val_length, length;
+ mysql_mutex_lock(&LOCK_plugin);
+ if (!*node->test_load)
+ {
+ mysql_mutex_unlock(&LOCK_plugin);
+ return false;
+ }
+ sys_var *svar= node->m_svar;
+ bool is_plugin= svar->cast_pluginvar();
+ if (!is_plugin)
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ /* As its always system variable. */
+ show.type= SHOW_SYS;
+ show.name= svar->name.str;
+ show.value= (char *) svar;
+
+ const char *value= get_one_variable(thd, &show, OPT_SESSION, SHOW_SYS, NULL,
+ &charset, val_buf, &val_length);
+ if (is_plugin)
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ length= net_length_size(svar->name.length) +
+ svar->name.length +
+ net_length_size(val_length) +
+ val_length;
+
+ compile_time_assert(SESSION_TRACK_SYSTEM_VARIABLES < 251);
+ if (unlikely((1 + net_length_size(length) + length + buf->length() >=
+ MAX_PACKET_LENGTH) ||
+ buf->reserve(1 + net_length_size(length) + length,
+ EXTRA_ALLOC)))
+ return true;
+
+
+ /* Session state type (SESSION_TRACK_SYSTEM_VARIABLES) */
+ buf->q_append((char)SESSION_TRACK_SYSTEM_VARIABLES);
+
+ /* Length of the overall entity. */
+ buf->q_net_store_length((ulonglong)length);
+
+ /* System variable's name (length-encoded string). */
+ buf->q_net_store_data((const uchar*)svar->name.str, svar->name.length);
+
+ /* System variable's value (length-encoded string). */
+ buf->q_net_store_data((const uchar*)value, val_length);
+ }
+ return false;
+}
+
+bool Session_sysvars_tracker::vars_list::store(THD *thd, String *buf)
+{
+ st_store_variable_param data= {thd, buf};
+ return my_hash_iterate(&m_registered_sysvars, &store_variable, &data);
+}
+
+/**
+ Store the data for changed system variables in the specified buffer.
+ Once the data is stored, we reset the flags related to state-change
+ (see reset()).
+
+ @param thd [IN] The thd handle.
+ @paran buf [INOUT] Buffer to store the information to.
+
+ @retval true Error
+ @retval false Success
+*/
+
+bool Session_sysvars_tracker::store(THD *thd, String *buf)
+{
+ if (!orig_list->is_enabled())
+ return false;
+
+ if (orig_list->store(thd, buf))
+ return true;
+
+ reset();
+
+ return false;
+}
+
+
+/**
+ Mark the system variable as changed.
+
+ @param [IN] pointer on a variable
+*/
+
+void Session_sysvars_tracker::mark_as_changed(THD *thd,
+ LEX_CSTRING *var)
+{
+ sysvar_node_st *node= NULL;
+ sys_var *svar= (sys_var *)var;
+ /*
+ Check if the specified system variable is being tracked, if so
+ mark it as changed and also set the class's m_changed flag.
+ */
+ if (orig_list->is_enabled() &&
+ (node= (sysvar_node_st *) (orig_list->insert_or_search(node, svar))))
+ {
+ node->m_changed= true;
+ State_tracker::mark_as_changed(thd, var);
+ }
+}
+
+
+/**
+ Supply key to a hash.
+
+ @param entry [IN] A single entry.
+ @param length [OUT] Length of the key.
+ @param not_used Unused.
+
+ @return Pointer to the key buffer.
+*/
+
+uchar *Session_sysvars_tracker::sysvars_get_key(const char *entry,
+ size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= sizeof(sys_var *);
+ return (uchar *) &(((sysvar_node_st *) entry)->m_svar);
+}
+
+
+/* Function to support resetting hash nodes for the variables */
+
+my_bool Session_sysvars_tracker::reset_variable(void *ptr,
+ void *data_ptr)
+{
+ ((Session_sysvars_tracker::sysvar_node_st *)ptr)->m_changed= false;
+ return false;
+}
+
+void Session_sysvars_tracker::vars_list::reset()
+{
+ my_hash_iterate(&m_registered_sysvars, &reset_variable, NULL);
+}
+
+/**
+ Prepare/reset the m_registered_sysvars hash for next statement.
+*/
+
+void Session_sysvars_tracker::reset()
+{
+
+ orig_list->reset();
+ m_changed= false;
+}
+
+static Session_sysvars_tracker* sysvar_tracker(THD *thd)
+{
+ return (Session_sysvars_tracker*)
+ thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER);
+}
+
+bool sysvartrack_validate_value(THD *thd, const char *str, size_t len)
+{
+ LEX_STRING tmp= {(char *)str, len};
+ return Session_sysvars_tracker::server_init_check(thd, system_charset_info,
+ tmp);
+}
+bool sysvartrack_reprint_value(THD *thd, char *str, size_t len)
+{
+ LEX_STRING tmp= {str, len};
+ return Session_sysvars_tracker::server_init_process(thd,
+ system_charset_info,
+ tmp);
+}
+bool sysvartrack_update(THD *thd, set_var *var)
+{
+ return sysvar_tracker(thd)->update(thd, var);
+}
+size_t sysvartrack_value_len(THD *thd)
+{
+ return sysvar_tracker(thd)->get_buffer_length();
+}
+bool sysvartrack_value_construct(THD *thd, char *val, size_t len)
+{
+ return sysvar_tracker(thd)->construct_var_list(val, len);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ Enable/disable the tracker based on @@session_track_schema's value.
+
+ @param thd [IN] The thd handle.
+
+ @return
+ false (always)
+*/
+
+bool Current_schema_tracker::update(THD *thd, set_var *)
+{
+ m_enabled= thd->variables.session_track_schema;
+ return false;
+}
+
+
+/**
+ Store the schema name as length-encoded string in the specified buffer.
+
+ @param thd [IN] The thd handle.
+ @paran buf [INOUT] Buffer to store the information to.
+
+ @reval false Success
+ @retval true Error
+*/
+
+bool Current_schema_tracker::store(THD *thd, String *buf)
+{
+ size_t db_length, length;
+
+ /*
+ Protocol made (by unknown reasons) redundant:
+ It saves length of database name and name of database name +
+ length of saved length of database length.
+ */
+ length= db_length= thd->db_length;
+ length += net_length_size(length);
+
+ compile_time_assert(SESSION_TRACK_SCHEMA < 251);
+ compile_time_assert(NAME_LEN < 251);
+ DBUG_ASSERT(length < 251);
+ if (unlikely((1 + 1 + length + buf->length() >= MAX_PACKET_LENGTH) ||
+ buf->reserve(1 + 1 + length, EXTRA_ALLOC)))
+ return true;
+
+ /* Session state type (SESSION_TRACK_SCHEMA) */
+ buf->q_append((char)SESSION_TRACK_SCHEMA);
+
+ /* Length of the overall entity. */
+ buf->q_net_store_length(length);
+
+ /* Length and current schema name */
+ buf->q_net_store_data((const uchar *)thd->db, thd->db_length);
+
+ reset();
+
+ return false;
+}
+
+
+/**
+ Reset the m_changed flag for next statement.
+
+ @return void
+*/
+
+void Current_schema_tracker::reset()
+{
+ m_changed= false;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+Transaction_state_tracker::Transaction_state_tracker()
+{
+ m_enabled = false;
+ tx_changed = TX_CHG_NONE;
+ tx_curr_state =
+ tx_reported_state= TX_EMPTY;
+ tx_read_flags = TX_READ_INHERIT;
+ tx_isol_level = TX_ISOL_INHERIT;
+}
+
+/**
+ Enable/disable the tracker based on @@session_track_transaction_info.
+
+ @param thd [IN] The thd handle.
+
+ @retval true if updating the tracking level failed
+ @retval false otherwise
+*/
+
+bool Transaction_state_tracker::update(THD *thd, set_var *)
+{
+ if (thd->variables.session_track_transaction_info != TX_TRACK_NONE)
+ {
+ /*
+ If we only just turned reporting on (rather than changing between
+ state and characteristics reporting), start from a defined state.
+ */
+ if (!m_enabled)
+ {
+ tx_curr_state =
+ tx_reported_state = TX_EMPTY;
+ tx_changed |= TX_CHG_STATE;
+ m_enabled= true;
+ }
+ if (thd->variables.session_track_transaction_info == TX_TRACK_CHISTICS)
+ tx_changed |= TX_CHG_CHISTICS;
+ mark_as_changed(thd, NULL);
+ }
+ else
+ m_enabled= false;
+
+ return false;
+}
+
+
+/**
+ Store the transaction state (and, optionally, characteristics)
+ as length-encoded string in the specified buffer. Once the data
+ is stored, we reset the flags related to state-change (see reset()).
+
+
+ @param thd [IN] The thd handle.
+ @paran buf [INOUT] Buffer to store the information to.
+
+ @retval false Success
+ @retval true Error
+*/
+
+static LEX_CSTRING isol[]= {
+ { STRING_WITH_LEN("READ UNCOMMITTED") },
+ { STRING_WITH_LEN("READ COMMITTED") },
+ { STRING_WITH_LEN("REPEATABLE READ") },
+ { STRING_WITH_LEN("SERIALIZABLE") }
+};
+
+bool Transaction_state_tracker::store(THD *thd, String *buf)
+{
+ /* STATE */
+ if (tx_changed & TX_CHG_STATE)
+ {
+ if (unlikely((11 + buf->length() >= MAX_PACKET_LENGTH) ||
+ buf->reserve(11, EXTRA_ALLOC)))
+ return true;
+
+ buf->q_append((char)SESSION_TRACK_TRANSACTION_STATE);
+
+ buf->q_append((char)9); // whole packet length
+ buf->q_append((char)8); // results length
+
+ buf->q_append((char)((tx_curr_state & TX_EXPLICIT) ? 'T' :
+ ((tx_curr_state & TX_IMPLICIT) ? 'I' : '_')));
+ buf->q_append((char)((tx_curr_state & TX_READ_UNSAFE) ? 'r' : '_'));
+ buf->q_append((char)(((tx_curr_state & TX_READ_TRX) ||
+ (tx_curr_state & TX_WITH_SNAPSHOT)) ? 'R' : '_'));
+ buf->q_append((char)((tx_curr_state & TX_WRITE_UNSAFE) ? 'w' : '_'));
+ buf->q_append((char)((tx_curr_state & TX_WRITE_TRX) ? 'W' : '_'));
+ buf->q_append((char)((tx_curr_state & TX_STMT_UNSAFE) ? 's' : '_'));
+ buf->q_append((char)((tx_curr_state & TX_RESULT_SET) ? 'S' : '_'));
+ buf->q_append((char)((tx_curr_state & TX_LOCKED_TABLES) ? 'L' : '_'));
+ }
+
+ /* CHARACTERISTICS -- How to restart the transaction */
+
+ if ((thd->variables.session_track_transaction_info == TX_TRACK_CHISTICS) &&
+ (tx_changed & TX_CHG_CHISTICS))
+ {
+ bool is_xa= (thd->transaction.xid_state.xa_state != XA_NOTR);
+ size_t start;
+
+ /* 2 length by 1 byte and code */
+ if (unlikely((1 + 1 + 1 + 110 + buf->length() >= MAX_PACKET_LENGTH) ||
+ buf->reserve(1 + 1 + 1, EXTRA_ALLOC)))
+ return true;
+
+ compile_time_assert(SESSION_TRACK_TRANSACTION_CHARACTERISTICS < 251);
+ /* Session state type (SESSION_TRACK_TRANSACTION_CHARACTERISTICS) */
+ buf->q_append((char)SESSION_TRACK_TRANSACTION_CHARACTERISTICS);
+
+ /* placeholders for lengths. will be filled in at the end */
+ buf->q_append('\0');
+ buf->q_append('\0');
+
+ start= buf->length();
+
+ {
+ /*
+ We have four basic replay scenarios:
+
+ a) SET TRANSACTION was used, but before an actual transaction
+ was started, the load balancer moves the connection elsewhere.
+ In that case, the same one-shots should be set up in the
+ target session. (read-only/read-write; isolation-level)
+
+ b) The initial transaction has begun; the relevant characteristics
+ are the session defaults, possibly overridden by previous
+ SET TRANSACTION statements, possibly overridden or extended
+ by options passed to the START TRANSACTION statement.
+ If the load balancer wishes to move this transaction,
+ it needs to be replayed with the correct characteristics.
+ (read-only/read-write from SET or START;
+ isolation-level from SET only, snapshot from START only)
+
+ c) A subsequent transaction started with START TRANSACTION
+ (which is legal syntax in lieu of COMMIT AND CHAIN in MySQL)
+ may add/modify the current one-shots:
+
+ - It may set up a read-only/read-write one-shot.
+ This one-shot will override the value used in the previous
+ transaction (whether that came from the default or a one-shot),
+ and, like all one-shots currently do, it will carry over into
+ any subsequent transactions that don't explicitly override them
+ in turn. This behavior is not guaranteed in the docs and may
+ change in the future, but the tracker item should correctly
+ reflect whatever behavior a given version of mysqld implements.
+
+ - It may also set up a WITH CONSISTENT SNAPSHOT one-shot.
+ This one-shot does not currently carry over into subsequent
+ transactions (meaning that with "traditional syntax", WITH
+ CONSISTENT SNAPSHOT can only be requested for the first part
+ of a transaction chain). Again, the tracker item should reflect
+ mysqld behavior.
+
+ d) A subsequent transaction started using COMMIT AND CHAIN
+ (or, for that matter, BEGIN WORK, which is currently
+ legal and equivalent syntax in MySQL, or START TRANSACTION
+ sans options) will re-use any one-shots set up so far
+ (with SET before the first transaction started, and with
+ all subsequent STARTs), except for WITH CONSISTANT SNAPSHOT,
+ which will never be chained and only applies when explicitly
+ given.
+
+ It bears noting that if we switch sessions in a follow-up
+ transaction, SET TRANSACTION would be illegal in the old
+ session (as a transaction is active), whereas in the target
+ session which is being prepared, it should be legal, as no
+ transaction (chain) should have started yet.
+
+ Therefore, we are free to generate SET TRANSACTION as a replay
+ statement even for a transaction that isn't the first in an
+ ongoing chain. Consider
+
+ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITED;
+ START TRANSACTION READ ONLY, WITH CONSISTENT SNAPSHOT;
+ # work
+ COMMIT AND CHAIN;
+
+ If we switch away at this point, the replay in the new session
+ needs to be
+
+ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITED;
+ START TRANSACTION READ ONLY;
+
+ When a transaction ends (COMMIT/ROLLBACK sans CHAIN), all
+ per-transaction characteristics are reset to the session's
+ defaults.
+
+ This also holds for a transaction ended implicitly! (transaction.cc)
+ Once again, the aim is to have the tracker item reflect on a
+ given mysqld's actual behavior.
+ */
+
+ /*
+ "ISOLATION LEVEL"
+ Only legal in SET TRANSACTION, so will always be replayed as such.
+ */
+ if (tx_isol_level != TX_ISOL_INHERIT)
+ {
+ /*
+ Unfortunately, we can't re-use tx_isolation_names /
+ tx_isolation_typelib as it hyphenates its items.
+ */
+ buf->append(STRING_WITH_LEN("SET TRANSACTION ISOLATION LEVEL "));
+ buf->append(isol[tx_isol_level - 1].str, isol[tx_isol_level - 1].length);
+ buf->append(STRING_WITH_LEN("; "));
+ }
+
+ /*
+ Start transaction will usually result in TX_EXPLICIT (transaction
+ started, but no data attached yet), except when WITH CONSISTENT
+ SNAPSHOT, in which case we may have data pending.
+ If it's an XA transaction, we don't go through here so we can
+ first print the trx access mode ("SET TRANSACTION READ ...")
+ separately before adding XA START (whereas with START TRANSACTION,
+ we can merge the access mode into the same statement).
+ */
+ if ((tx_curr_state & TX_EXPLICIT) && !is_xa)
+ {
+ buf->append(STRING_WITH_LEN("START TRANSACTION"));
+
+ /*
+ "WITH CONSISTENT SNAPSHOT"
+ Defaults to no, can only be enabled.
+ Only appears in START TRANSACTION.
+ */
+ if (tx_curr_state & TX_WITH_SNAPSHOT)
+ {
+ buf->append(STRING_WITH_LEN(" WITH CONSISTENT SNAPSHOT"));
+ if (tx_read_flags != TX_READ_INHERIT)
+ buf->append(STRING_WITH_LEN(","));
+ }
+
+ /*
+ "READ WRITE / READ ONLY" can be set globally, per-session,
+ or just for one transaction.
+
+ The latter case can take the form of
+ START TRANSACTION READ (WRITE|ONLY), or of
+ SET TRANSACTION READ (ONLY|WRITE).
+ (Both set thd->read_only for the upcoming transaction;
+ it will ultimately be re-set to the session default.)
+
+ As the regular session-variable tracker does not monitor the one-shot,
+ we'll have to do it here.
+
+ If READ is flagged as set explicitly (rather than just inherited
+ from the session's default), we'll get the actual bool from the THD.
+ */
+ if (tx_read_flags != TX_READ_INHERIT)
+ {
+ if (tx_read_flags == TX_READ_ONLY)
+ buf->append(STRING_WITH_LEN(" READ ONLY"));
+ else
+ buf->append(STRING_WITH_LEN(" READ WRITE"));
+ }
+ buf->append(STRING_WITH_LEN("; "));
+ }
+ else if (tx_read_flags != TX_READ_INHERIT)
+ {
+ /*
+ "READ ONLY" / "READ WRITE"
+ We could transform this to SET TRANSACTION even when it occurs
+ in START TRANSACTION, but for now, we'll resysynthesize the original
+ command as closely as possible.
+ */
+ buf->append(STRING_WITH_LEN("SET TRANSACTION "));
+ if (tx_read_flags == TX_READ_ONLY)
+ buf->append(STRING_WITH_LEN("READ ONLY; "));
+ else
+ buf->append(STRING_WITH_LEN("READ WRITE; "));
+ }
+
+ if ((tx_curr_state & TX_EXPLICIT) && is_xa)
+ {
+ XID *xid= &thd->transaction.xid_state.xid;
+ long glen, blen;
+
+ buf->append(STRING_WITH_LEN("XA START"));
+
+ if ((glen= xid->gtrid_length) > 0)
+ {
+ buf->append(STRING_WITH_LEN(" '"));
+ buf->append(xid->data, glen);
+
+ if ((blen= xid->bqual_length) > 0)
+ {
+ buf->append(STRING_WITH_LEN("','"));
+ buf->append(xid->data + glen, blen);
+ }
+ buf->append(STRING_WITH_LEN("'"));
+
+ if (xid->formatID != 1)
+ {
+ buf->append(STRING_WITH_LEN(","));
+ buf->append_ulonglong(xid->formatID);
+ }
+ }
+
+ buf->append(STRING_WITH_LEN("; "));
+ }
+
+ // discard trailing space
+ if (buf->length() > start)
+ buf->length(buf->length() - 1);
+ }
+
+ {
+ size_t length= buf->length() - start;
+ uchar *place= (uchar *)(buf->ptr() + (start - 2));
+ DBUG_ASSERT(length < 249); // in fact < 110
+ DBUG_ASSERT(start >= 3);
+
+ DBUG_ASSERT((place - 1)[0] == SESSION_TRACK_TRANSACTION_CHARACTERISTICS);
+ /* Length of the overall entity. */
+ place[0]= (uchar)length + 1;
+ /* Transaction characteristics (length-encoded string). */
+ place[1]= (uchar)length;
+ }
+ }
+
+ reset();
+
+ return false;
+}
+
+
+/**
+ Reset the m_changed flag for next statement.
+*/
+
+void Transaction_state_tracker::reset()
+{
+ m_changed= false;
+ tx_reported_state= tx_curr_state;
+ tx_changed= TX_CHG_NONE;
+}
+
+
+/**
+ Helper function: turn table info into table access flag.
+ Accepts table lock type and engine type flag (transactional/
+ non-transactional), and returns the corresponding access flag
+ out of TX_READ_TRX, TX_READ_UNSAFE, TX_WRITE_TRX, TX_WRITE_UNSAFE.
+
+ @param thd [IN] The thd handle
+ @param set [IN] The table's access/lock type
+ @param set [IN] Whether the table's engine is transactional
+
+ @return The table access flag
+*/
+
+enum_tx_state Transaction_state_tracker::calc_trx_state(THD *thd,
+ thr_lock_type l,
+ bool has_trx)
+{
+ enum_tx_state s;
+ bool read= (l <= TL_READ_NO_INSERT);
+
+ if (read)
+ s= has_trx ? TX_READ_TRX : TX_READ_UNSAFE;
+ else
+ s= has_trx ? TX_WRITE_TRX : TX_WRITE_UNSAFE;
+
+ return s;
+}
+
+
+/**
+ Register the end of an (implicit or explicit) transaction.
+
+ @param thd [IN] The thd handle
+*/
+void Transaction_state_tracker::end_trx(THD *thd)
+{
+ DBUG_ASSERT(thd->variables.session_track_transaction_info > TX_TRACK_NONE);
+
+ if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL))
+ return;
+
+ if (tx_curr_state != TX_EMPTY)
+ {
+ if (tx_curr_state & TX_EXPLICIT)
+ tx_changed |= TX_CHG_CHISTICS;
+ tx_curr_state &= TX_LOCKED_TABLES;
+ }
+ update_change_flags(thd);
+}
+
+
+/**
+ Clear flags pertaining to the current statement or transaction.
+ May be called repeatedly within the same execution cycle.
+
+ @param thd [IN] The thd handle.
+ @param set [IN] The flags to clear
+*/
+
+void Transaction_state_tracker::clear_trx_state(THD *thd, uint clear)
+{
+ if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL))
+ return;
+
+ tx_curr_state &= ~clear;
+ update_change_flags(thd);
+}
+
+
+/**
+ Add flags pertaining to the current statement or transaction.
+ May be called repeatedly within the same execution cycle,
+ e.g. to add access info for more tables.
+
+ @param thd [IN] The thd handle.
+ @param set [IN] The flags to add
+*/
+
+void Transaction_state_tracker::add_trx_state(THD *thd, uint add)
+{
+ if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL))
+ return;
+
+ if (add == TX_EXPLICIT)
+ {
+ /* Always send characteristic item (if tracked), always replace state. */
+ tx_changed |= TX_CHG_CHISTICS;
+ tx_curr_state = TX_EXPLICIT;
+ }
+
+ /*
+ If we're not in an implicit or explicit transaction, but
+ autocommit==0 and tables are accessed, we flag "implicit transaction."
+ */
+ else if (!(tx_curr_state & (TX_EXPLICIT|TX_IMPLICIT)) &&
+ (thd->variables.option_bits & OPTION_NOT_AUTOCOMMIT) &&
+ (add &
+ (TX_READ_TRX | TX_READ_UNSAFE | TX_WRITE_TRX | TX_WRITE_UNSAFE)))
+ tx_curr_state |= TX_IMPLICIT;
+
+ /*
+ Only flag state when in transaction or LOCK TABLES is added.
+ */
+ if ((tx_curr_state & (TX_EXPLICIT | TX_IMPLICIT)) ||
+ (add & TX_LOCKED_TABLES))
+ tx_curr_state |= add;
+
+ update_change_flags(thd);
+}
+
+
+/**
+ Add "unsafe statement" flag if applicable.
+
+ @param thd [IN] The thd handle.
+ @param set [IN] The flags to add
+*/
+
+void Transaction_state_tracker::add_trx_state_from_thd(THD *thd)
+{
+ if (m_enabled)
+ {
+ if (thd->lex->is_stmt_unsafe())
+ add_trx_state(thd, TX_STMT_UNSAFE);
+ }
+}
+
+
+/**
+ Set read flags (read only/read write) pertaining to the next
+ transaction.
+
+ @param thd [IN] The thd handle.
+ @param set [IN] The flags to set
+*/
+
+void Transaction_state_tracker::set_read_flags(THD *thd,
+ enum enum_tx_read_flags flags)
+{
+ if (m_enabled && (tx_read_flags != flags))
+ {
+ tx_read_flags = flags;
+ tx_changed |= TX_CHG_CHISTICS;
+ mark_as_changed(thd, NULL);
+ }
+}
+
+
+/**
+ Set isolation level pertaining to the next transaction.
+
+ @param thd [IN] The thd handle.
+ @param set [IN] The isolation level to set
+*/
+
+void Transaction_state_tracker::set_isol_level(THD *thd,
+ enum enum_tx_isol_level level)
+{
+ if (m_enabled && (tx_isol_level != level))
+ {
+ tx_isol_level = level;
+ tx_changed |= TX_CHG_CHISTICS;
+ mark_as_changed(thd, NULL);
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+Session_state_change_tracker::Session_state_change_tracker()
+{
+ m_changed= false;
+}
+
+/**
+ @Enable/disable the tracker based on @@session_track_state_change value.
+
+ @param thd [IN] The thd handle.
+ @return false (always)
+
+**/
+
+bool Session_state_change_tracker::update(THD *thd, set_var *)
+{
+ m_enabled= thd->variables.session_track_state_change;
+ return false;
+}
+
+/**
+ Store the '1' in the specified buffer when state is changed.
+
+ @param thd [IN] The thd handle.
+ @paran buf [INOUT] Buffer to store the information to.
+
+ @reval false Success
+ @retval true Error
+**/
+
+bool Session_state_change_tracker::store(THD *thd, String *buf)
+{
+ if (unlikely((1 + 1 + 1 + buf->length() >= MAX_PACKET_LENGTH) ||
+ buf->reserve(1 + 1 + 1, EXTRA_ALLOC)))
+ return true;
+
+ compile_time_assert(SESSION_TRACK_STATE_CHANGE < 251);
+ /* Session state type (SESSION_TRACK_STATE_CHANGE) */
+ buf->q_append((char)SESSION_TRACK_STATE_CHANGE);
+
+ /* Length of the overall entity (1 byte) */
+ buf->q_append('\1');
+
+ DBUG_ASSERT(is_state_changed(thd));
+ buf->q_append('1');
+
+ reset();
+
+ return false;
+}
+
+
+/**
+ Reset the m_changed flag for next statement.
+*/
+
+void Session_state_change_tracker::reset()
+{
+ m_changed= false;
+}
+
+
+/**
+ Find if there is a session state change.
+*/
+
+bool Session_state_change_tracker::is_state_changed(THD *)
+{
+ return m_changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ @brief Initialize session tracker objects.
+*/
+
+Session_tracker::Session_tracker()
+{
+ /* track data ID fit into one byte in net coding */
+ compile_time_assert(SESSION_TRACK_always_at_the_end < 251);
+ /* one tracker could serv several tracking data */
+ compile_time_assert((uint)SESSION_TRACK_always_at_the_end >=
+ (uint)SESSION_TRACKER_END);
+
+ for (int i= 0; i < SESSION_TRACKER_END; i++)
+ m_trackers[i]= NULL;
+}
+
+
+/**
+ @brief Enables the tracker objects.
+
+ @param thd [IN] The thread handle.
+
+ @return void
+*/
+
+void Session_tracker::enable(THD *thd)
+{
+ /*
+ Originally and correctly this allocation was in the constructor and
+ deallocation in the destructor, but in this case memory counting
+ system works incorrectly (for example in INSERT DELAYED thread)
+ */
+ deinit();
+ m_trackers[SESSION_SYSVARS_TRACKER]=
+ new (std::nothrow) Session_sysvars_tracker();
+ m_trackers[CURRENT_SCHEMA_TRACKER]=
+ new (std::nothrow) Current_schema_tracker;
+ m_trackers[SESSION_STATE_CHANGE_TRACKER]=
+ new (std::nothrow) Session_state_change_tracker;
+ m_trackers[SESSION_GTIDS_TRACKER]=
+ new (std::nothrow) Not_implemented_tracker;
+ m_trackers[TRANSACTION_INFO_TRACKER]=
+ new (std::nothrow) Transaction_state_tracker;
+
+ for (int i= 0; i < SESSION_TRACKER_END; i++)
+ m_trackers[i]->enable(thd);
+}
+
+
+/**
+ Method called during the server startup to verify the contents
+ of @@session_track_system_variables.
+
+ @retval false Success
+ @retval true Failure
+*/
+
+bool Session_tracker::server_boot_verify(CHARSET_INFO *char_set)
+{
+ bool result;
+ LEX_STRING tmp;
+ tmp.str= global_system_variables.session_track_system_variables;
+ tmp.length= safe_strlen(tmp.str);
+ result=
+ Session_sysvars_tracker::server_init_check(NULL, char_set, tmp);
+ return result;
+}
+
+
+/**
+ @brief Store all change information in the specified buffer.
+
+ @param thd [IN] The thd handle.
+ @param buf [OUT] Reference to the string buffer to which the state
+ change data needs to be written.
+*/
+
+void Session_tracker::store(THD *thd, String *buf)
+{
+ size_t start;
+
+ /*
+ Probably most track result will fit in 251 byte so lets made it at
+ least efficient. We allocate 1 byte for length and then will move
+ string if there is more.
+ */
+ buf->append('\0');
+ start= buf->length();
+
+ /* Get total length. */
+ for (int i= 0; i < SESSION_TRACKER_END; i++)
+ {
+ if (m_trackers[i]->is_changed() &&
+ m_trackers[i]->store(thd, buf))
+ {
+ buf->length(start); // it is safer to have 0-length block in case of error
+ return;
+ }
+ }
+
+ size_t length= buf->length() - start;
+ uchar *data= (uchar *)(buf->ptr() + start);
+ uint size;
+
+ if ((size= net_length_size(length)) != 1)
+ {
+ if (buf->reserve(size - 1, EXTRA_ALLOC))
+ {
+ buf->length(start); // it is safer to have 0-length block in case of error
+ return;
+ }
+ memmove(data + (size - 1), data, length);
+ }
+
+ net_store_length(data - 1, length);
+}
+
+#endif //EMBEDDED_LIBRARY
diff --git a/sql/session_tracker.h b/sql/session_tracker.h
new file mode 100644
index 00000000000..684692aae0c
--- /dev/null
+++ b/sql/session_tracker.h
@@ -0,0 +1,304 @@
+#ifndef SESSION_TRACKER_INCLUDED
+#define SESSION_TRACKER_INCLUDED
+
+/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2016, 2017, MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "m_string.h"
+#include "thr_lock.h"
+
+#ifndef EMBEDDED_LIBRARY
+/* forward declarations */
+class THD;
+class set_var;
+class String;
+
+
+enum enum_session_tracker
+{
+ SESSION_SYSVARS_TRACKER, /* Session system variables */
+ CURRENT_SCHEMA_TRACKER, /* Current schema */
+ SESSION_STATE_CHANGE_TRACKER,
+ SESSION_GTIDS_TRACKER, /* Tracks GTIDs */
+ TRANSACTION_INFO_TRACKER, /* Transaction state */
+ SESSION_TRACKER_END /* must be the last */
+};
+
+/**
+ State_tracker
+
+ An abstract class that defines the interface for any of the server's
+ 'session state change tracker'. A tracker, however, is a sub- class of
+ this class which takes care of tracking the change in value of a part-
+ icular session state type and thus defines various methods listed in this
+ interface. The change information is later serialized and transmitted to
+ the client through protocol's OK packet.
+
+ Tracker system variables :-
+ A tracker is normally mapped to a system variable. So in order to enable,
+ disable or modify the sub-entities of a tracker, the user needs to modify
+ the respective system variable either through SET command or via command
+ line option. As required in system variable handling, this interface also
+ includes two functions to help in the verification of the supplied value
+ (ON_UPDATE) of the tracker system variable, namely - update().
+*/
+
+class State_tracker
+{
+protected:
+ /**
+ Is tracking enabled for a particular session state type ?
+
+ @note: it is a cache of the corresponding thd->variables.session_track_xxx
+ variable
+ */
+ bool m_enabled;
+
+ /** Has the session state type changed ? */
+ bool m_changed;
+
+public:
+ /** Constructor */
+ State_tracker() : m_enabled(false), m_changed(false)
+ {}
+
+ /** Destructor */
+ virtual ~State_tracker()
+ {}
+
+ /** Getters */
+ bool is_enabled() const
+ { return m_enabled; }
+
+ bool is_changed() const
+ { return m_changed; }
+
+ /** Called in the constructor of THD*/
+ virtual bool enable(THD *thd)= 0;
+
+ /** To be invoked when the tracker's system variable is updated (ON_UPDATE).*/
+ virtual bool update(THD *thd, set_var *var)= 0;
+
+ /** Store changed data into the given buffer. */
+ virtual bool store(THD *thd, String *buf)= 0;
+
+ /** Mark the entity as changed. */
+ virtual void mark_as_changed(THD *thd, LEX_CSTRING *name);
+};
+
+bool sysvartrack_validate_value(THD *thd, const char *str, size_t len);
+bool sysvartrack_reprint_value(THD *thd, char *str, size_t len);
+bool sysvartrack_update(THD *thd, set_var *var);
+size_t sysvartrack_value_len(THD *thd);
+bool sysvartrack_value_construct(THD *thd, char *val, size_t len);
+
+
+/**
+ Session_tracker
+
+ This class holds an object each for all tracker classes and provides
+ methods necessary for systematic detection and generation of session
+ state change information.
+*/
+
+class Session_tracker
+{
+private:
+ State_tracker *m_trackers[SESSION_TRACKER_END];
+
+ /* The following two functions are private to disable copying. */
+ Session_tracker(Session_tracker const &other)
+ {
+ DBUG_ASSERT(FALSE);
+ }
+ Session_tracker& operator= (Session_tracker const &rhs)
+ {
+ DBUG_ASSERT(FALSE);
+ return *this;
+ }
+
+public:
+
+ Session_tracker();
+ ~Session_tracker()
+ {
+ deinit();
+ }
+
+ /* trick to make happy memory accounting system */
+ void deinit()
+ {
+ for (int i= 0; i < SESSION_TRACKER_END; i++)
+ {
+ if (m_trackers[i])
+ delete m_trackers[i];
+ m_trackers[i]= NULL;
+ }
+ }
+
+ void enable(THD *thd);
+ static bool server_boot_verify(CHARSET_INFO *char_set);
+
+ /** Returns the pointer to the tracker object for the specified tracker. */
+ inline State_tracker *get_tracker(enum_session_tracker tracker) const
+ {
+ return m_trackers[tracker];
+ }
+
+ inline void mark_as_changed(THD *thd, enum enum_session_tracker tracker,
+ LEX_CSTRING *data)
+ {
+ if (m_trackers[tracker]->is_enabled())
+ m_trackers[tracker]->mark_as_changed(thd, data);
+ }
+
+
+ void store(THD *thd, String *main_buf);
+};
+
+
+/*
+ Transaction_state_tracker
+*/
+
+/**
+ Transaction state (no transaction, transaction active, work attached, etc.)
+*/
+enum enum_tx_state {
+ TX_EMPTY = 0, ///< "none of the below"
+ TX_EXPLICIT = 1, ///< an explicit transaction is active
+ TX_IMPLICIT = 2, ///< an implicit transaction is active
+ TX_READ_TRX = 4, ///< transactional reads were done
+ TX_READ_UNSAFE = 8, ///< non-transaction reads were done
+ TX_WRITE_TRX = 16, ///< transactional writes were done
+ TX_WRITE_UNSAFE = 32, ///< non-transactional writes were done
+ TX_STMT_UNSAFE = 64, ///< "unsafe" (non-deterministic like UUID()) stmts
+ TX_RESULT_SET = 128, ///< result set was sent
+ TX_WITH_SNAPSHOT= 256, ///< WITH CONSISTENT SNAPSHOT was used
+ TX_LOCKED_TABLES= 512 ///< LOCK TABLES is active
+};
+
+
+/**
+ Transaction access mode
+*/
+enum enum_tx_read_flags {
+ TX_READ_INHERIT = 0, ///< not explicitly set, inherit session.tx_read_only
+ TX_READ_ONLY = 1, ///< START TRANSACTION READ ONLY, or tx_read_only=1
+ TX_READ_WRITE = 2, ///< START TRANSACTION READ WRITE, or tx_read_only=0
+};
+
+
+/**
+ Transaction isolation level
+*/
+enum enum_tx_isol_level {
+ TX_ISOL_INHERIT = 0, ///< not explicitly set, inherit session.tx_isolation
+ TX_ISOL_UNCOMMITTED = 1,
+ TX_ISOL_COMMITTED = 2,
+ TX_ISOL_REPEATABLE = 3,
+ TX_ISOL_SERIALIZABLE= 4
+};
+
+
+/**
+ Transaction tracking level
+*/
+enum enum_session_track_transaction_info {
+ TX_TRACK_NONE = 0, ///< do not send tracker items on transaction info
+ TX_TRACK_STATE = 1, ///< track transaction status
+ TX_TRACK_CHISTICS = 2 ///< track status and characteristics
+};
+
+
+/**
+ This is a tracker class that enables & manages the tracking of
+ current transaction info for a particular connection.
+*/
+
+class Transaction_state_tracker : public State_tracker
+{
+private:
+ /** Helper function: turn table info into table access flag */
+ enum_tx_state calc_trx_state(THD *thd, thr_lock_type l, bool has_trx);
+public:
+ /** Constructor */
+ Transaction_state_tracker();
+ bool enable(THD *thd)
+ { return update(thd, NULL); }
+ bool update(THD *thd, set_var *var);
+ bool store(THD *thd, String *buf);
+
+ /** Change transaction characteristics */
+ void set_read_flags(THD *thd, enum enum_tx_read_flags flags);
+ void set_isol_level(THD *thd, enum enum_tx_isol_level level);
+
+ /** Change transaction state */
+ void clear_trx_state(THD *thd, uint clear);
+ void add_trx_state(THD *thd, uint add);
+ void inline add_trx_state(THD *thd, thr_lock_type l, bool has_trx)
+ {
+ add_trx_state(thd, calc_trx_state(thd, l, has_trx));
+ }
+ void add_trx_state_from_thd(THD *thd);
+ void end_trx(THD *thd);
+
+
+private:
+ enum enum_tx_changed {
+ TX_CHG_NONE = 0, ///< no changes from previous stmt
+ TX_CHG_STATE = 1, ///< state has changed from previous stmt
+ TX_CHG_CHISTICS = 2 ///< characteristics have changed from previous stmt
+ };
+
+ /** any trackable changes caused by this statement? */
+ uint tx_changed;
+
+ /** transaction state */
+ uint tx_curr_state, tx_reported_state;
+
+ /** r/w or r/o set? session default? */
+ enum enum_tx_read_flags tx_read_flags;
+
+ /** isolation level */
+ enum enum_tx_isol_level tx_isol_level;
+
+ void reset();
+
+ inline void update_change_flags(THD *thd)
+ {
+ tx_changed &= uint(~TX_CHG_STATE);
+ tx_changed |= (tx_curr_state != tx_reported_state) ? TX_CHG_STATE : 0;
+ if (tx_changed != TX_CHG_NONE)
+ mark_as_changed(thd, NULL);
+ }
+};
+
+#define TRANSACT_TRACKER(X) \
+ do { if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) \
+ {((Transaction_state_tracker *) \
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER)) \
+ ->X; } } while(0)
+#define SESSION_TRACKER_CHANGED(A,B,C) \
+ thd->session_tracker.mark_as_changed(A,B,C)
+#else
+
+#define TRANSACT_TRACKER(X) do{}while(0)
+#define SESSION_TRACKER_CHANGED(A,B,C) do{}while(0)
+
+#endif //EMBEDDED_LIBRARY
+
+#endif /* SESSION_TRACKER_INCLUDED */
diff --git a/sql/set_var.cc b/sql/set_var.cc
index b5430c56865..a17e7b0aa58 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -64,7 +64,7 @@ int sys_var_init()
/* Must be already initialized. */
DBUG_ASSERT(system_charset_info != NULL);
- if (my_hash_init(&system_variable_hash, system_charset_info, 100, 0,
+ if (my_hash_init(&system_variable_hash, system_charset_info, 700, 0,
0, (my_hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
goto error;
@@ -78,6 +78,11 @@ error:
DBUG_RETURN(1);
}
+uint sys_var_elements()
+{
+ return system_variable_hash.records;
+}
+
int sys_var_add_options(DYNAMIC_ARRAY *long_options, int parse_flags)
{
uint saved_elements= long_options->elements;
@@ -110,6 +115,9 @@ void sys_var_end()
DBUG_VOID_RETURN;
}
+
+static bool static_test_load= TRUE;
+
/**
sys_var constructor
@@ -179,6 +187,8 @@ sys_var::sys_var(sys_var_chain *chain, const char *name_arg,
else
chain->first= this;
chain->last= this;
+
+ test_load= &static_test_load;
}
bool sys_var::update(THD *thd, set_var *var)
@@ -199,8 +209,28 @@ bool sys_var::update(THD *thd, set_var *var)
(on_update && on_update(this, thd, OPT_GLOBAL));
}
else
- return session_update(thd, var) ||
+ {
+ bool ret= session_update(thd, var) ||
(on_update && on_update(this, thd, OPT_SESSION));
+
+ /*
+ Make sure we don't session-track variables that are not actually
+ part of the session. tx_isolation and and tx_read_only for example
+ exist as GLOBAL, SESSION, and one-shot ("for next transaction only").
+ */
+ if ((var->type == OPT_SESSION) && (!ret))
+ {
+ SESSION_TRACKER_CHANGED(thd, SESSION_SYSVARS_TRACKER,
+ (LEX_CSTRING*)var->var);
+ /*
+ Here MySQL sends variable name to avoid reporting change of
+ the tracker itself, but we decided that it is not needed
+ */
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
+ }
+
+ return ret;
+ }
}
uchar *sys_var::session_value_ptr(THD *thd, const LEX_STRING *base)
@@ -371,7 +401,7 @@ double sys_var::val_real(bool *is_null,
switch (show_type())
{
case_get_string_as_lex_string;
- case_for_integers(return val);
+ case_for_integers(return (double)val);
case_for_double(return val);
case SHOW_MY_BOOL: return *(my_bool*)value;
default:
@@ -422,6 +452,22 @@ void sys_var::do_deprecated_warning(THD *thd)
@retval true on error, false otherwise (warning or ok)
*/
+
+
+bool throw_bounds_warning(THD *thd, const char *name,const char *v)
+{
+ if (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES)
+ {
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, v);
+ return true;
+ }
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE,
+ ER_THD(thd, ER_TRUNCATED_WRONG_VALUE), name, v);
+ return false;
+}
+
+
bool throw_bounds_warning(THD *thd, const char *name,
bool fixed, bool is_unsigned, longlong v)
{
@@ -434,14 +480,12 @@ bool throw_bounds_warning(THD *thd, const char *name,
else
llstr(v, buf);
- if (thd->is_strict_mode())
+ if (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES)
{
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf);
return true;
}
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE,
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE), name, buf);
+ return throw_bounds_warning(thd, name, buf);
}
return false;
}
@@ -454,14 +498,12 @@ bool throw_bounds_warning(THD *thd, const char *name, bool fixed, double v)
my_gcvt(v, MY_GCVT_ARG_DOUBLE, sizeof(buf) - 1, buf, NULL);
- if (thd->is_strict_mode())
+ if (thd->variables.sql_mode & MODE_STRICT_ALL_TABLES)
{
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf);
return true;
}
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE,
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE), name, buf);
+ return throw_bounds_warning(thd, name, buf);
}
return false;
}
@@ -565,10 +607,10 @@ int mysql_del_sys_var_chain(sys_var *first)
{
int result= 0;
- mysql_rwlock_wrlock(&LOCK_system_variables_hash);
+ mysql_prlock_wrlock(&LOCK_system_variables_hash);
for (sys_var *var= first; var; var= var->next)
result|= my_hash_delete(&system_variable_hash, (uchar*) var);
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
return result;
}
@@ -862,6 +904,8 @@ int set_var_user::update(THD *thd)
MYF(0));
return -1;
}
+
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
return 0;
}
@@ -909,7 +953,11 @@ int set_var_role::check(THD *thd)
int set_var_role::update(THD *thd)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- return acl_setrole(thd, role.str, access);
+ int res= acl_setrole(thd, role.str, access);
+ if (!res)
+ thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER,
+ NULL);
+ return res;
#else
return 0;
#endif
@@ -961,10 +1009,23 @@ int set_var_collation_client::check(THD *thd)
int set_var_collation_client::update(THD *thd)
{
- thd->variables.character_set_client= character_set_client;
- thd->variables.character_set_results= character_set_results;
- thd->variables.collation_connection= collation_connection;
- thd->update_charset();
+ thd->update_charset(character_set_client, collation_connection,
+ character_set_results);
+
+ /* Mark client collation variables as changed */
+#ifndef EMBEDDED_LIBRARY
+ if (thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->is_enabled())
+ {
+ thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->
+ mark_as_changed(thd, (LEX_CSTRING*)Sys_character_set_client_ptr);
+ thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->
+ mark_as_changed(thd, (LEX_CSTRING*)Sys_character_set_results_ptr);
+ thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->
+ mark_as_changed(thd, (LEX_CSTRING*)Sys_character_set_connection_ptr);
+ }
+ thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
+#endif //EMBEDDED_LIBRARY
+
thd->protocol_text.init(thd);
thd->protocol_binary.init(thd);
return 0;
@@ -1006,7 +1067,7 @@ int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond)
cond= make_cond_for_info_schema(thd, cond, tables);
thd->count_cuted_fields= CHECK_FIELD_WARN;
- mysql_rwlock_rdlock(&LOCK_system_variables_hash);
+ mysql_prlock_rdlock(&LOCK_system_variables_hash);
for (uint i= 0; i < system_variable_hash.records; i++)
{
@@ -1168,7 +1229,7 @@ int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond)
}
res= 0;
end:
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
thd->count_cuted_fields= save_count_cuted_fields;
return res;
}
diff --git a/sql/set_var.h b/sql/set_var.h
index 203969d6169..d92b244cd1a 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -48,6 +48,7 @@ struct sys_var_chain
int mysql_add_sys_var_chain(sys_var *chain);
int mysql_del_sys_var_chain(sys_var *chain);
+
/**
A class representing one system variable - that is something
that can be accessed as @@global.variable_name or @@session.variable_name,
@@ -60,6 +61,7 @@ class sys_var: protected Value_source // for double_from_string_with_check
public:
sys_var *next;
LEX_CSTRING name;
+ bool *test_load;
enum flag_enum { GLOBAL, SESSION, ONLY_SESSION, SCOPE_MASK=1023,
READONLY=1024, ALLOCATED=2048, PARSE_EARLY=4096,
NO_SET_STATEMENT=8192, AUTO_SET=16384};
@@ -240,6 +242,15 @@ protected:
uchar *global_var_ptr()
{ return ((uchar*)&global_system_variables) + offset; }
+
+ void *max_var_ptr()
+ {
+ return scope() == SESSION ? (((uchar*)&max_system_variables) + offset) :
+ 0;
+ }
+
+ friend class Session_sysvars_tracker;
+ friend class Session_tracker;
};
#include "sql_plugin.h" /* SHOW_HA_ROWS, SHOW_MY_BOOL */
@@ -385,7 +396,7 @@ extern SHOW_COMP_OPTION have_openssl;
SHOW_VAR* enumerate_sys_vars(THD *thd, bool sorted, enum enum_var_type type);
int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond);
-sys_var *find_sys_var(THD *thd, const char *str, uint length=0);
+sys_var *find_sys_var(THD *thd, const char *str, size_t length=0);
int sql_set_variables(THD *thd, List<set_var_base> *var_list, bool free);
#define SYSVAR_AUTOSIZE(VAR,VAL) \
@@ -415,15 +426,18 @@ inline bool IS_SYSVAR_AUTOSIZE(void *ptr)
bool fix_delay_key_write(sys_var *self, THD *thd, enum_var_type type);
-ulonglong expand_sql_mode(ulonglong sql_mode);
-bool sql_mode_string_representation(THD *thd, ulonglong sql_mode, LEX_STRING *ls);
+sql_mode_t expand_sql_mode(sql_mode_t sql_mode);
+bool sql_mode_string_representation(THD *thd, sql_mode_t sql_mode, LEX_STRING *ls);
int default_regex_flags_pcre(const THD *thd);
-extern sys_var *Sys_autocommit_ptr;
+extern sys_var *Sys_autocommit_ptr, *Sys_last_gtid_ptr,
+ *Sys_character_set_client_ptr, *Sys_character_set_connection_ptr,
+ *Sys_character_set_results_ptr;
CHARSET_INFO *get_old_charset_by_name(const char *old_name);
int sys_var_init();
+uint sys_var_elements();
int sys_var_add_options(DYNAMIC_ARRAY *long_options, int parse_flags);
void sys_var_end(void);
bool check_has_super(sys_var *self, THD *thd, set_var *var);
diff --git a/sql/share/CMakeLists.txt b/sql/share/CMakeLists.txt
index 4293c0b528c..2980e6153f5 100644
--- a/sql/share/CMakeLists.txt
+++ b/sql/share/CMakeLists.txt
@@ -38,6 +38,7 @@ russian
czech
french
serbian
+hindi
)
SET(files
diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml
index 9764d629625..912d196cc3c 100644
--- a/sql/share/charsets/Index.xml
+++ b/sql/share/charsets/Index.xml
@@ -67,6 +67,12 @@ To make maintaining easier please:
<collation name="latin2_hungarian_ci" id="21" order="Hungarian"/>
<collation name="latin2_croatian_ci" id="27" order="Croatian"/>
<collation name="latin2_bin" id="77" order="Binary" flag="binary"/>
+ <collation name="latin2_general_nopad_ci" id="1033" flag="nopad">
+ <rules>
+ <import source="latin2_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="latin2_nopad_bin" id="1101" flag="binary" flag="nopad"/>
</charset>
<charset name="dec8">
@@ -83,6 +89,12 @@ To make maintaining easier please:
<order>Portuguese</order>
<order>Spanish</order>
</collation>
+ <collation name="dec8_swedish_nopad_ci" id="1027" flag="nopad">
+ <rules>
+ <import source="dec8_swedish_ci"/>
+ </rules>
+ </collation>
+ <collation name="dec8_nopad_bin" id="1093" flag="binary" flag="nopad"/>
</charset>
<charset name="cp850">
@@ -102,6 +114,12 @@ To make maintaining easier please:
<order>Spanish</order>
</collation>
<collation name="cp850_bin" id="80" order="Binary" flag="binary"/>
+ <collation name="cp850_general_nopad_ci" id="1028" flag="nopad">
+ <rules>
+ <import source="cp850_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp850_nopad_bin" id="1104" flag="binary" flag="nopad"/>
</charset>
<charset name="latin1">
@@ -163,6 +181,12 @@ To make maintaining easier please:
<order>Portuguese</order>
<order>Spanish</order>
</collation>
+ <collation name="hp8_english_nopad_ci" id="1030" flag="nopad">
+ <rules>
+ <import source="hp8_english_ci"/>
+ </rules>
+ </collation>
+ <collation name="hp8_nopad_bin" id="1096" flag="binary" flag="nopad"/>
</charset>
<charset name="koi8r">
@@ -172,6 +196,12 @@ To make maintaining easier please:
<alias>cskoi8r</alias>
<collation name="koi8r_general_ci" id="7" order="Russian" flag="primary"/>
<collation name="koi8r_bin" id="74" order="Binary" flag="binary"/>
+ <collation name="koi8r_general_nopad_ci" id="1031" flag="nopad">
+ <rules>
+ <import source="koi8r_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="koi8r_nopad_bin" id="1098" flag="binary" flag="nopad"/>
</charset>
<charset name="swe7">
@@ -180,6 +210,12 @@ To make maintaining easier please:
<alias>iso-646-se</alias>
<collation name="swe7_swedish_ci" id="10" order="Swedish" flag="primary"/>
<collation name="swe7_bin" id="82" order="Binary" flag="binary"/>
+ <collation name="swe7_swedish_nopad_ci" id="1034" flag="nopad">
+ <rules>
+ <import source="swe7_swedish_ci"/>
+ </rules>
+ </collation>
+ <collation name="swe7_nopad_bin" id="1106" flag="binary" flag="nopad"/>
</charset>
<charset name="ascii">
@@ -192,6 +228,12 @@ To make maintaining easier please:
<alias>iso646-us</alias>
<collation name="ascii_general_ci" id="11" order="English" flag="primary"/>
<collation name="ascii_bin" id="65" order="Binary" flag="binary"/>
+ <collation name="ascii_general_nopad_ci" id="1035" flag="nopad">
+ <rules>
+ <import source="ascii_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="ascii_nopad_bin" id="1089" flag="binary" flag="nopad"/>
</charset>
<charset name="ujis">
@@ -259,6 +301,12 @@ To make maintaining easier please:
<order>Mongolian</order>
<order>Ukrainian</order>
</collation>
+ <collation name="cp1251_general_nopad_ci" id="1075" flag="nopad">
+ <rules>
+ <import source="cp1251_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp1251_nopad_bin" id="1074" flag="binary" flag="nopad"/>
</charset>
<charset name="hebrew">
@@ -269,6 +317,12 @@ To make maintaining easier please:
<alias>iso-ir-138</alias>
<collation name="hebrew_general_ci" id="16" order="Hebrew" flag="primary"/>
<collation name="hebrew_bin" id="71" order="Binary" flag="binary"/>
+ <collation name="hebrew_general_nopad_ci" id="1040" flag="nopad">
+ <rules>
+ <import source="hebrew_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="hebrew_nopad_bin" id="1095" flag="binary" flag="nopad"/>
</charset>
<charset name="tis620">
@@ -319,6 +373,12 @@ To make maintaining easier please:
<order>Lithuanian</order>
</collation>
<collation name="latin7_bin" id="79" order="Binary" flag="binary"/>
+ <collation name="latin7_general_nopad_ci" id="1065" flag="nopad">
+ <rules>
+ <import source="latin7_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="latin7_nopad_bin" id="1103" flag="binary" flag="nopad"/>
</charset>
<charset name="koi8u">
@@ -327,6 +387,12 @@ To make maintaining easier please:
<alias>koi8-u</alias>
<collation name="koi8u_general_ci" id="22" order="Ukranian" flag="primary"/>
<collation name="koi8u_bin" id="75" order="Binary" flag="binary"/>
+ <collation name="koi8u_general_nopad_ci" id="1046" flag="nopad">
+ <rules>
+ <import source="koi8u_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="koi8u_nopad_bin" id="1099" flag="binary" flag="nopad"/>
</charset>
<charset name="gb2312">
@@ -354,6 +420,12 @@ To make maintaining easier please:
<alias>iso-ir-126</alias>
<collation name="greek_general_ci" id="25" order="Greek" flag="primary"/>
<collation name="greek_bin" id="70" order="Binary" flag="binary"/>
+ <collation name="greek_general_nopad_ci" id="1049" flag="nopad">
+ <rules>
+ <import source="greek_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="greek_nopad_bin" id="1094" flag="binary" flag="nopad"/>
</charset>
<charset name="cp1250">
@@ -380,6 +452,12 @@ To make maintaining easier please:
<flag>compiled</flag>
</collation>
<collation name="cp1250_bin" id="66" order="Binary" flag="binary"/>
+ <collation name="cp1250_general_nopad_ci" id="1050" flag="nopad">
+ <rules>
+ <import source="cp1250_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp1250_nopad_bin" id="1090" flag="binary" flag="nopad"/>
</charset>
<charset name="gbk">
@@ -407,6 +485,12 @@ To make maintaining easier please:
<order>Latvian</order>
<order>Lithuanian</order>
</collation>
+ <collation name="cp1257_general_nopad_ci" id="1083" flag="nopad">
+ <rules>
+ <import source="cp1257_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp1257_nopad_bin" id="1082" flag="binary" flag="nopad"/>
<!--collation name="cp1257_ci" id="60"/-->
<!--collation name="cp1257_cs" id="61"/-->
</charset>
@@ -422,6 +506,12 @@ To make maintaining easier please:
<alias>turkish</alias>
<collation name="latin5_turkish_ci" id="30" order="Turkish" flag="primary"/>
<collation name="latin5_bin" id="78" order="Binary" flag="binary"/>
+ <collation name="latin5_turkish_nopad_ci" id="1054" flag="nopad">
+ <rules>
+ <import source="latin5_turkish_ci"/>
+ </rules>
+ </collation>
+ <collation name="latin5_nopad_bin" id="1102" flag="binary" flag="nopad"/>
</charset>
<charset name="armscii8">
@@ -430,6 +520,12 @@ To make maintaining easier please:
<alias>armscii-8</alias>
<collation name="armscii8_general_ci" id="32" order="Armenian" flag="primary"/>
<collation name="armscii8_bin" id="64" order="Binary" flag="binary"/>
+ <collation name="armscii8_general_nopad_ci" id="1056" flag="nopad">
+ <rules>
+ <import source="armscii8_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="armscii8_nopad_bin" id="1088" flag="binary" flag="nopad"/>
</charset>
<charset name="utf8">
@@ -468,6 +564,12 @@ To make maintaining easier please:
<alias>DOSCyrillicRussian</alias>
<collation name="cp866_general_ci" id="36" order="Russian" flag="primary"/>
<collation name="cp866_bin" id="68" order="Binary" flag="binary"/>
+ <collation name="cp866_general_nopad_ci" id="1060" flag="nopad">
+ <rules>
+ <import source="cp866_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp866_nopad_bin" id="1092" flag="binary" flag="nopad"/>
</charset>
<charset name="keybcs2">
@@ -475,6 +577,12 @@ To make maintaining easier please:
<description>DOS Kamenicky Czech-Slovak</description>
<collation name="keybcs2_general_ci" id="37" order="Czech" flag="primary"/>
<collation name="keybcs2_bin" id="73" order="Binary" flag="binary"/>
+ <collation name="keybcs2_general_nopad_ci" id="1061" flag="nopad">
+ <rules>
+ <import source="keybcs2_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="keybcs2_nopad_bin" id="1097" flag="binary" flag="nopad"/>
</charset>
<charset name="macce">
@@ -491,6 +599,12 @@ To make maintaining easier please:
<order>Sorbian</order>
</collation>
<collation name="macce_bin" id="43" order="Binary" flag="binary"/>
+ <collation name="macce_general_nopad_ci" id="1062" flag="nopad">
+ <rules>
+ <import source="macce_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="macce_nopad_bin" id="1067" flag="binary" flag="nopad"/>
</charset>
<charset name="macroman">
@@ -513,6 +627,12 @@ To make maintaining easier please:
<!--collation name="macroman_ci" id="54"/-->
<!--collation name="macroman_ci_ai" id="55"/-->
<!--collation name="macroman_cs" id="56"/-->
+ <collation name="macroman_general_nopad_ci" id="1063" flag="nopad">
+ <rules>
+ <import source="macroman_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="macroman_nopad_bin" id="1077" flag="binary" flag="nopad"/>
</charset>
<charset name="cp852">
@@ -531,6 +651,12 @@ To make maintaining easier please:
<order>Sorbian</order>
</collation>
<collation name="cp852_bin" id="81" order="Binary" flag="binary"/>
+ <collation name="cp852_general_nopad_ci" id="1064" flag="nopad">
+ <rules>
+ <import source="cp852_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp852_nopad_bin" id="1105" flag="binary" flag="nopad"/>
</charset>
<charset name="cp1256">
@@ -545,6 +671,12 @@ To make maintaining easier please:
<order>Pakistani</order>
<order>Urdu</order>
</collation>
+ <collation name="cp1256_general_nopad_ci" id="1081" flag="nopad">
+ <rules>
+ <import source="cp1256_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="cp1256_nopad_bin" id="1091" flag="binary" flag="nopad"/>
</charset>
<charset name="geostd8">
@@ -552,6 +684,12 @@ To make maintaining easier please:
<description>GEOSTD8 Georgian</description>
<collation name="geostd8_general_ci" id="92" order="Georgian" flag="primary"/>
<collation name="geostd8_bin" id="93" order="Binary" flag="binary"/>
+ <collation name="geostd8_general_nopad_ci" id="1116" flag="nopad">
+ <rules>
+ <import source="geostd8_general_ci"/>
+ </rules>
+ </collation>
+ <collation name="geostd8_nopad_bin" id="1117" flag="binary" flag="nopad"/>
</charset>
<charset name="binary">
@@ -596,4 +734,3 @@ To make maintaining easier please:
</charset>
</charsets>
-
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 14662c0eb38..60f1253dd63 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -1,4 +1,4 @@
-languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251;
+languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251, hindi=hindi utf8;
default-language eng
@@ -17,6 +17,7 @@ ER_NO
fre "NON"
ger "Nein"
greek "ΟΧΙ"
+ hindi "नहीं"
hun "NEM"
kor "아니오"
nor "NEI"
@@ -37,6 +38,7 @@ ER_YES
fre "OUI"
ger "Ja"
greek "ΝΑΙ"
+ hindi "हाँ"
hun "IGEN"
ita "SI"
kor "예"
@@ -59,6 +61,7 @@ ER_CANT_CREATE_FILE
fre "Ne peut créer le fichier '%-.200s' (Errcode: %M)"
ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %M)"
greek "Αδύνατη η δημιουργία του αρχείου '%-.200s' (κωδικός λάθους: %M)"
+ hindi "फ़ाइल '%-.200s' नहीं बन सका (errno: %M)"
hun "A '%-.200s' file nem hozhato letre (hibakod: %M)"
ita "Impossibile creare il file '%-.200s' (errno: %M)"
jpn "ファイル '%-.200s' を作成できません。(エラー番号: %M)"
@@ -84,6 +87,7 @@ ER_CANT_CREATE_TABLE
fre "Ne peut créer la table %`s.%`s (Errcode: %M)"
ger "Kann Tabelle %`s.%`s nicht erzeugen (Fehler: %M)"
greek "Αδύνατη η δημιουργία του πίνακα %`s.%`s (κωδικός λάθους: %M)"
+ hindi "टेबल '%`s.%`s' नहीं बन सका (errno: %M)"
hun "A %`s.%`s tabla nem hozhato letre (hibakod: %M)"
ita "Impossibile creare la tabella %`s.%`s (errno: %M)"
jpn "%`s.%`s テーブルが作れません.(errno: %M)"
@@ -108,6 +112,7 @@ ER_CANT_CREATE_DB
fre "Ne peut créer la base '%-.192s' (Erreur %M)"
ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %M)"
greek "Αδύνατη η δημιουργία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %M)"
+ hindi "डेटाबेस '%-.192s' नहीं बन सका (errno: %M)"
hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %M)"
ita "Impossibile creare il database '%-.192s' (errno: %M)"
jpn "データベース '%-.192s' を作成できません。(エラー番号: %M)"
@@ -132,6 +137,7 @@ ER_DB_CREATE_EXISTS
fre "Ne peut créer la base '%-.192s'; elle existe déjà"
ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits"
greek "Αδύνατη η δημιουργία της βάσης δεδομένων '%-.192s'; Η βάση δεδομένων υπάρχει ήδη"
+ hindi "डेटाबेस '%-.192s' नहीं बन सकता है; यह डेटाबेस पहले से ही मौजूद है"
hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik"
ita "Impossibile creare il database '%-.192s'; il database esiste"
jpn "データベース '%-.192s' を作成できません。データベースはすでに存在します。"
@@ -142,7 +148,7 @@ ER_DB_CREATE_EXISTS
por "Não pode criar o banco de dados '%-.192s'; este banco de dados já existe"
rum "Nu pot sa creez baza de date '%-.192s'; baza de date exista deja"
rus "Невозможно создать базу данных '%-.192s'. База данных уже существует"
- serbian "Ne mogu da kreiram bazu '%-.192s'; baza već postoji."
+ serbian "Ne mogu da kreiram bazu '%-.192s'; baza već postoji"
slo "Nemôžem vytvoriť databázu '%-.192s'; databáza existuje"
spa "No puedo crear base de datos '%-.192s'; la base de datos ya existe"
swe "Databasen '%-.192s' existerar redan"
@@ -156,6 +162,7 @@ ER_DB_DROP_EXISTS
fre "Ne peut effacer la base '%-.192s'; elle n'existe pas"
ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden"
greek "Αδύνατη η διαγραφή της βάσης δεδομένων '%-.192s'. Η βάση δεδομένων δεν υπάρχει"
+ hindi "डेटाबेस '%-.192s' ड्रॉप नहीं कर सकते हैं; यह डेटाबेस मौजूद नहीं है"
hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik"
ita "Impossibile cancellare '%-.192s'; il database non esiste"
jpn "データベース '%-.192s' を削除できません。データベースは存在しません。"
@@ -166,7 +173,7 @@ ER_DB_DROP_EXISTS
por "Não pode eliminar o banco de dados '%-.192s'; este banco de dados não existe"
rum "Nu pot sa drop baza de date '%-.192s'; baza da date este inexistenta"
rus "Невозможно удалить базу данных '%-.192s'. Такой базы данных нет"
- serbian "Ne mogu da izbrišem bazu '%-.192s'; baza ne postoji."
+ serbian "Ne mogu da izbrišem bazu '%-.192s'; baza ne postoji"
slo "Nemôžem zmazať databázu '%-.192s'; databáza neexistuje"
spa "No puedo eliminar base de datos '%-.192s'; la base de datos no existe"
swe "Kan inte radera databasen '%-.192s'; databasen finns inte"
@@ -180,6 +187,7 @@ ER_DB_DROP_DELETE
fre "Ne peut effacer la base '%-.192s' (erreur %M)"
ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %M)"
greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή της βάσης δεδομένων (αδύνατη η διαγραφή '%-.192s', κωδικός λάθους: %M)"
+ hindi "डेटाबेस ड्रॉप में त्रुटि हुई ('%-.192s' हटा नहीं सकते, errno: %M)"
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)"
jpn "データベース削除エラー ('%-.192s' を削除できません。エラー番号: %M)"
@@ -204,6 +212,7 @@ ER_DB_DROP_RMDIR
fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %M)"
ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %M)"
greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή της βάσης δεδομένων (αδύνατη η διαγραφή του φακέλλου '%-.192s', κωδικός λάθους: %M)"
+ hindi "डेटाबेस ड्रॉप में त्रुटि हुई ('%-.192s' rmdir नहीं कर सकते, errno: %M)"
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)"
jpn "データベース削除エラー (ディレクトリ '%-.192s' を削除できません。エラー番号: %M)"
@@ -228,6 +237,7 @@ ER_CANT_DELETE_FILE
fre "Erreur en effaçant '%-.192s' (Errcode: %M)"
ger "Fehler beim Löschen von '%-.192s' (Fehler: %M)"
greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή '%-.192s' (κωδικός λάθους: %M)"
+ hindi "'%-.192s' के हटाने पर त्रुटि हुई (errno: %M)"
hun "Torlesi hiba: '%-.192s' (hibakod: %M)"
ita "Errore durante la cancellazione di '%-.192s' (errno: %M)"
jpn "ファイル '%-.192s' の削除エラー (エラー番号: %M)"
@@ -252,6 +262,7 @@ ER_CANT_FIND_SYSTEM_REC
fre "Ne peut lire un enregistrement de la table 'system'"
ger "Datensatz in der Systemtabelle nicht lesbar"
greek "Αδύνατη η ανάγνωση εγγραφής από πίνακα του συστήματος"
+ hindi "सिस्टम टेबल से रिकॉर्ड नहीं पढ़ सके"
hun "Nem olvashato rekord a rendszertablaban"
ita "Impossibile leggere il record dalla tabella di sistema"
jpn "システム表のレコードを読み込めません。"
@@ -276,6 +287,7 @@ ER_CANT_GET_STAT
fre "Ne peut obtenir le status de '%-.200s' (Errcode: %M)"
ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %M)"
greek "Αδύνατη η λήψη πληροφοριών για την κατάσταση του '%-.200s' (κωδικός λάθους: %M)"
+ hindi "'%-.200s' की अवस्था प्राप्त नहीं कर सके (errno: %M)"
hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %M)"
ita "Impossibile leggere lo stato di '%-.200s' (errno: %M)"
jpn "'%-.200s' の状態を取得できません。(エラー番号: %M)"
@@ -300,6 +312,7 @@ ER_CANT_GET_WD
fre "Ne peut obtenir le répertoire de travail (Errcode: %M)"
ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %M)"
greek "Ο φάκελλος εργασίας δεν βρέθηκε (κωδικός λάθους: %M)"
+ hindi "Working डाइरेक्टरी प्राप्त नहीं कर सके (errno: %M)"
hun "A munkakonyvtar nem allapithato meg (hibakod: %M)"
ita "Impossibile leggere la directory di lavoro (errno: %M)"
jpn "作業ディレクトリを取得できません。(エラー番号: %M)"
@@ -324,6 +337,7 @@ ER_CANT_LOCK
fre "Ne peut verrouiller le fichier (Errcode: %M)"
ger "Datei kann nicht gesperrt werden (Fehler: %M)"
greek "Το αρχείο δεν μπορεί να κλειδωθεί (κωδικός λάθους: %M)"
+ hindi "फ़ाइल लॉक नहीं कर सके (errno: %M)"
hun "A file nem zarolhato. (hibakod: %M)"
ita "Impossibile il locking il file (errno: %M)"
jpn "ファイルをロックできません。(エラー番号: %M)"
@@ -348,6 +362,7 @@ ER_CANT_OPEN_FILE
fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %M)"
ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %M)"
greek "Δεν είναι δυνατό να ανοιχτεί το αρχείο: '%-.200s' (κωδικός λάθους: %M)"
+ hindi "फ़ाइल '%-.200s' नहीं खोल सकते (errno: %M)"
hun "A '%-.200s' file nem nyithato meg (hibakod: %M)"
ita "Impossibile aprire il file: '%-.200s' (errno: %M)"
jpn "ファイル '%-.200s' をオープンできません。(エラー番号: %M)"
@@ -372,6 +387,7 @@ ER_FILE_NOT_FOUND
fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %M)"
ger "Kann Datei '%-.200s' nicht finden (Fehler: %M)"
greek "Δεν βρέθηκε το αρχείο: '%-.200s' (κωδικός λάθους: %M)"
+ hindi "फ़ाइल '%-.200s' नहीं मिला (errno: %M)"
hun "A(z) '%-.200s' file nem talalhato (hibakod: %M)"
ita "Impossibile trovare il file: '%-.200s' (errno: %M)"
jpn "ファイル '%-.200s' が見つかりません。(エラー番号: %M)"
@@ -396,6 +412,7 @@ ER_CANT_READ_DIR
fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %M)"
ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %M)"
greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %M)"
+ hindi "'%-.192s' की डायरेक्टरी नहीं पढ़ सके (errno: %M)"
hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %M)"
ita "Impossibile leggere la directory di '%-.192s' (errno: %M)"
jpn "ディレクトリ '%-.192s' を読み込めません。(エラー番号: %M)"
@@ -420,6 +437,7 @@ ER_CANT_SET_WD
fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %M)"
ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %M)"
greek "Αδύνατη η αλλαγή του τρέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %M)"
+ hindi "'%-.192s' डायरेक्टरी में नहीं बदल सके (errno: %M)"
hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %M)"
ita "Impossibile cambiare la directory in '%-.192s' (errno: %M)"
jpn "ディレクトリ '%-.192s' に移動できません。(エラー番号: %M)"
@@ -444,6 +462,7 @@ ER_CHECKREAD
fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.192s'"
ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.192s' geändert"
greek "Η εγγραφή έχει αλλάξει από την τελευταία φορά που ανασύρθηκε από τον πίνακα '%-.192s'"
+ hindi "रिकॉर्ड टेबल '%-.192s' पिछली बार पढ़े जाने के बाद से बदल गया है"
hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota"
ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'"
jpn "表 '%-.192s' の最後の読み込み時点から、レコードが変化しました。"
@@ -468,6 +487,7 @@ ER_DISK_FULL
fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace... (Errcode: %M)"
ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ... (Fehler: %M)"
greek "Δεν υπάρχει χώρος στο δίσκο (%s). Παρακαλώ, περιμένετε να ελευθερωθεί χώρος... (κωδικός λάθους: %M)"
+ hindi "डिस्क पूरी तरह से भरा हुआ है (%s); कुछ स्थान खाली करें (errno: %M)"
hun "A lemez megtelt (%s). (hibakod: %M)"
ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio... (errno: %M)"
jpn "ディスク領域不足です(%s)。(エラー番号: %M)"
@@ -492,7 +512,8 @@ ER_DUP_KEY 23000
fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'"
ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'"
greek "Δεν είναι δυνατή η καταχώρηση, η τιμή υπάρχει ήδη στον πίνακα '%-.192s'"
- hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban."
+ hindi "टेबल '%-.192s' में DUPLICATE KEY मौजूद होने के कारण नहीं लिख सके"
+ hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban"
ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'"
jpn "書き込めません。表 '%-.192s' に重複するキーがあります。"
kor "기록할 수 없읍니다., 테이블 '%-.192s'에서 중복 키"
@@ -516,6 +537,7 @@ ER_ERROR_ON_CLOSE
fre "Erreur a la fermeture de '%-.192s' (Errcode: %M)"
ger "Fehler beim Schließen von '%-.192s' (Fehler: %M)"
greek "Παρουσιάστηκε πρόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %M)"
+ hindi "'%-.192s' के बंद पर त्रुटि हुई (errno: %M)"
hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %M)"
ita "Errore durante la chiusura di '%-.192s' (errno: %M)"
jpn "'%-.192s' のクローズ時エラー (エラー番号: %M)"
@@ -540,6 +562,7 @@ ER_ERROR_ON_READ
fre "Erreur en lecture du fichier '%-.200s' (Errcode: %M)"
ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %M)"
greek "Πρόβλημα κατά την ανάγνωση του αρχείου '%-.200s' (κωδικός λάθους: %M)"
+ hindi "फ़ाइल '%-.200s' पढ़ने में त्रुटि हुई (errno: %M)"
hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %M)"
ita "Errore durante la lettura del file '%-.200s' (errno: %M)"
jpn "ファイル '%-.200s' の読み込みエラー (エラー番号: %M)"
@@ -564,6 +587,7 @@ ER_ERROR_ON_RENAME
fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %M)"
ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %M)"
greek "Πρόβλημα κατά την μετονομασία του αρχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %M)"
+ hindi "'%-.210s' का नाम '%-.210s' बदलने पर त्रुटि हुई (errno: %M)"
hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %M)"
ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %M)"
jpn "'%-.210s' の名前を '%-.210s' に変更できません (エラー番号: %M)"
@@ -588,6 +612,7 @@ ER_ERROR_ON_WRITE
fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %M)"
ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %M)"
greek "Πρόβλημα κατά την αποθήκευση του αρχείου '%-.200s' (κωδικός λάθους: %M)"
+ hindi "फ़ाइल '%-.200s' लिखने में त्रुटि हुई (errno: %M)"
hun "Hiba a '%-.200s' file irasakor. (hibakod: %M)"
ita "Errore durante la scrittura del file '%-.200s' (errno: %M)"
jpn "ファイル '%-.200s' の書き込みエラー (エラー番号: %M)"
@@ -612,6 +637,7 @@ ER_FILE_USED
fre "'%-.192s' est verrouillé contre les modifications"
ger "'%-.192s' ist für Änderungen gesperrt"
greek "'%-.192s' δεν επιτρέπονται αλλαγές"
+ hindi "फ़ाइल '%-.192s' में कोई बदलाव नहीं कर सकते"
hun "'%-.192s' a valtoztatas ellen zarolva"
ita "'%-.192s' e` soggetto a lock contro i cambiamenti"
jpn "'%-.192s' はロックされています。"
@@ -636,6 +662,7 @@ ER_FILSORT_ABORT
fre "Tri alphabétique abandonné"
ger "Sortiervorgang abgebrochen"
greek "Η διαδικασία ταξινόμισης ακυρώθηκε"
+ hindi "SORT निरस्त"
hun "Sikertelen rendezes"
ita "Operazione di ordinamento abbandonata"
jpn "ソート処理を中断しました。"
@@ -660,6 +687,7 @@ ER_FORM_NOT_FOUND
fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'"
ger "View '%-.192s' existiert für '%-.192s' nicht"
greek "Το View '%-.192s' δεν υπάρχει για '%-.192s'"
+ hindi "VIEW '%-.192s', '%-.192s' के लिए मौजूद नहीं है"
hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz"
ita "La view '%-.192s' non esiste per '%-.192s'"
jpn "ビュー '%-.192s' は '%-.192s' に存在しません。"
@@ -681,6 +709,7 @@ ER_GET_ERRNO
fre "Reçu l'erreur %M du handler de la table %s"
ger "Fehler %M von Speicher-Engine %s"
greek "Ελήφθη μήνυμα λάθους %M από τον χειριστή πίνακα (table handler) %s"
+ hindi "%M त्रुटि %s स्टोरेज इंजन से"
ita "Rilevato l'errore %M dal gestore delle tabelle %s"
nor "Mottok feil %M fra tabell håndterer %s"
norwegian-ny "Mottok feil %M fra tabell handterar %s"
@@ -694,6 +723,7 @@ ER_GET_ERRNO
ER_ILLEGAL_HA
eng "Storage engine %s of the table %`s.%`s doesn't have this option"
ger "Diese Option gibt es nicht in Speicher-Engine %s für %`s.%`s"
+ hindi "स्टोरेज इंजन %s में यह विकल्प उपलब्ध नहीं है (टेबल: %`s.%`s)"
rus "Обработчик %s таблицы %`s.%`s не поддерживает эту возможность"
ukr "Дескриптор %s таблиці %`s.%`s не має цієї властивості"
ER_KEY_NOT_FOUND
@@ -705,6 +735,7 @@ ER_KEY_NOT_FOUND
fre "Ne peut trouver l'enregistrement dans '%-.192s'"
ger "Kann Datensatz in '%-.192s' nicht finden"
greek "Αδύνατη η ανεύρεση εγγραφής στο '%-.192s'"
+ hindi "'%-.192s' में रिकॉर्ड नहीं मिला"
hun "Nem talalhato a rekord '%-.192s'-ben"
ita "Impossibile trovare il record in '%-.192s'"
jpn "'%-.192s' にレコードが見つかりません。"
@@ -729,6 +760,7 @@ ER_NOT_FORM_FILE
fre "Information erronnée dans le fichier: '%-.200s'"
ger "Falsche Information in Datei '%-.200s'"
greek "Λάθος πληροφορίες στο αρχείο: '%-.200s'"
+ hindi "फ़ाइल '%-.200s' में गलत जानकारी है"
hun "Ervenytelen info a file-ban: '%-.200s'"
ita "Informazione errata nel file: '%-.200s'"
jpn "ファイル '%-.200s' 内の情報が不正です。"
@@ -748,11 +780,12 @@ ER_NOT_KEYFILE
cze "Nesprávný klíč pro tabulku '%-.200s'; pokuste se ho opravit"
dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den"
nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren"
- eng "Incorrect key file for table '%-.200s'; try to repair it"
+ eng "Index for table '%-.200s' is corrupt; try to repair it"
est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada"
fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer"
ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren"
greek "Λάθος αρχείο ταξινόμισης (key file) για τον πίνακα: '%-.200s'; Παρακαλώ, διορθώστε το!"
+ hindi "टेबल '%-.200s' का इंडेक्स CORRUPT हो गया है; इसे REPAIR करने की कोशिश करें"
hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!"
ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo"
jpn "表 '%-.200s' の索引ファイル(key file)の内容が不正です。修復を試行してください。"
@@ -769,7 +802,7 @@ ER_NOT_KEYFILE
swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation"
ukr "Хибний файл ключей для таблиці: '%-.200s'; Спробуйте його відновити"
ER_OLD_KEYFILE
- cze "Starý klíčový soubor pro '%-.192s'; opravte ho."
+ cze "Starý klíčový soubor pro '%-.192s'; opravte ho"
dan "Gammel indeksfil for tabellen '%-.192s'; reparer den"
nla "Oude zoeksleutel file voor tabel '%-.192s'; repareer het!"
eng "Old key file for table '%-.192s'; repair it!"
@@ -777,6 +810,7 @@ ER_OLD_KEYFILE
fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!"
ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren"
greek "Παλαιό αρχείο ταξινόμισης (key file) για τον πίνακα '%-.192s'; Παρακαλώ, διορθώστε το!"
+ hindi "टेबल '%-.192s' के लिए पुरानी KEY फ़ाइल; इसे REPAIR करने की कोशिश करें"
hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!"
ita "File chiave vecchio per la tabella '%-.192s'; riparalo!"
jpn "表 '%-.192s' の索引ファイル(key file)は古い形式です。修復してください。"
@@ -801,6 +835,7 @@ ER_OPEN_AS_READONLY
fre "'%-.192s' est en lecture seulement"
ger "Tabelle '%-.192s' ist nur lesbar"
greek "'%-.192s' επιτρέπεται μόνο η ανάγνωση"
+ hindi "टेबल '%-.192s' READ-ONLY है"
hun "'%-.192s' irasvedett"
ita "'%-.192s' e` di sola lettura"
jpn "表 '%-.192s' は読み込み専用です。"
@@ -846,7 +881,7 @@ ER_OUT_OF_SORTMEMORY HY001 S1001
nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size"
eng "Out of sort memory, consider increasing server sort buffer size"
est "Mälu sai sorteerimisel otsa. Suurenda MariaDB-i sorteerimispuhvrit"
- fre "Manque de mémoire pour le tri. Augmentez-la."
+ fre "Manque de mémoire pour le tri. Augmentez-la"
ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden"
greek "Δεν υπάρχει διαθέσιμη μνήμη για ταξινόμιση. Αυξήστε το sort buffer size για τη διαδικασία (demon)"
hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet"
@@ -856,7 +891,7 @@ ER_OUT_OF_SORTMEMORY HY001 S1001
nor "Ikke mer sorteringsminne. Vurder å øke sorteringsminnet (sort buffer size) for tjenesten"
norwegian-ny "Ikkje meir sorteringsminne. Vurder å auke sorteringsminnet (sorteringsbuffer storleik) for tenesten"
pol "Zbyt mało pamięci dla sortowania. Zwiększ wielko?ć bufora demona dla sortowania"
- por "Não há memória suficiente para ordenação. Considere aumentar o tamanho do retentor (buffer) de ordenação."
+ por "Não há memória suficiente para ordenação. Considere aumentar o tamanho do retentor (buffer) de ordenação"
rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)"
rus "Недостаточно памяти для сортировки. Увеличьте размер буфера сортировки на сервере"
serbian "Nema memorije za sortiranje. Povećajte veličinu sort buffer-a MariaDB server-u"
@@ -897,6 +932,7 @@ ER_CON_COUNT_ERROR 08004
fre "Trop de connexions"
ger "Zu viele Verbindungen"
greek "Υπάρχουν πολλές συνδέσεις..."
+ hindi "अत्यधिक कनेक्शन"
hun "Tul sok kapcsolat"
ita "Troppe connessioni"
jpn "接続が多すぎます。"
@@ -945,6 +981,7 @@ ER_BAD_HOST_ERROR 08S01
fre "Ne peut obtenir de hostname pour votre adresse"
ger "Kann Hostnamen für diese Adresse nicht erhalten"
greek "Δεν έγινε γνωστό το hostname για την address σας"
+ hindi "आपके I.P. ऐड्रेस के लिए होस्टनेम प्राप्त करने में विफल रहे"
hun "A gepnev nem allapithato meg a cimbol"
ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
jpn "IPアドレスからホスト名を解決できません。"
@@ -969,6 +1006,7 @@ ER_HANDSHAKE_ERROR 08S01
fre "Mauvais 'handshake'"
ger "Ungültiger Handshake"
greek "Η αναγνώριση (handshake) δεν έγινε σωστά"
+ hindi "संपर्क स्थापित करते समय त्रुटि हुई (BAD HANDSHAKE)"
hun "A kapcsolatfelvetel nem sikerult (Bad handshake)"
ita "Negoziazione impossibile"
jpn "ハンドシェイクエラー"
@@ -993,7 +1031,8 @@ ER_DBACCESS_DENIED_ERROR 42000
fre "Accès refusé pour l'utilisateur: '%s'@'%s'. Base '%-.192s'"
ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'"
greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' στη βάση δεδομένων '%-.192s'"
- hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz."
+ hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz"
+ hindi "यूज़र '%s'@'%s' को डेटाबेस '%-.192s' की अनुमति नहीं है"
ita "Accesso non consentito per l'utente: '%s'@'%s' al database '%-.192s'"
jpn "ユーザー '%s'@'%s' の '%-.192s' データベースへのアクセスを拒否します"
kor "'%s'@'%s' 사용자는 '%-.192s' 데이타베이스에 접근이 거부 되었습니다."
@@ -1017,6 +1056,7 @@ ER_ACCESS_DENIED_ERROR 28000
fre "Accès refusé pour l'utilisateur: '%s'@'%s' (mot de passe: %s)"
ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)"
greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' (χρήση password: %s)"
+ hindi "यूज़र '%s'@'%s' को अनुमति नहीं है (पासवर्ड का उपयोग: %s)"
hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
ita "Accesso non consentito per l'utente: '%s'@'%s' (Password: %s)"
jpn "ユーザー '%s'@'%s' を拒否します.uUsing password: %s)"
@@ -1040,6 +1080,7 @@ ER_NO_DB_ERROR 3D000
fre "Aucune base n'a été sélectionnée"
ger "Keine Datenbank ausgewählt"
greek "Δεν επιλέχθηκε βάση δεδομένων"
+ hindi "किसी भी डेटाबेस का चयन नहीं किया गया है"
hun "Nincs kivalasztott adatbazis"
ita "Nessun database selezionato"
jpn "データベースが選択されていません。"
@@ -1064,6 +1105,7 @@ ER_UNKNOWN_COM_ERROR 08S01
fre "Commande inconnue"
ger "Unbekannter Befehl"
greek "Αγνωστη εντολή"
+ hindi "अज्ञात आदेश"
hun "Ervenytelen parancs"
ita "Comando sconosciuto"
jpn "不明なコマンドです。"
@@ -1088,6 +1130,7 @@ ER_BAD_NULL_ERROR 23000
fre "Le champ '%-.192s' ne peut être vide (null)"
ger "Feld '%-.192s' darf nicht NULL sein"
greek "Το πεδίο '%-.192s' δεν μπορεί να είναι κενό (null)"
+ hindi "काँलम '%-.192s' NULL नहीं हो सकता"
hun "A(z) '%-.192s' oszlop erteke nem lehet nulla"
ita "La colonna '%-.192s' non puo` essere nulla"
jpn "列 '%-.192s' は null にできません。"
@@ -1112,6 +1155,7 @@ ER_BAD_DB_ERROR 42000
fre "Base '%-.192s' inconnue"
ger "Unbekannte Datenbank '%-.192s'"
greek "Αγνωστη βάση δεδομένων '%-.192s'"
+ hindi "अज्ञात डाटाबेस '%-.192s'"
hun "Ervenytelen adatbazis: '%-.192s'"
ita "Database '%-.192s' sconosciuto"
jpn "'%-.192s' は不明なデータベースです。"
@@ -1136,6 +1180,7 @@ ER_TABLE_EXISTS_ERROR 42S01
fre "La table '%-.192s' existe déjà"
ger "Tabelle '%-.192s' bereits vorhanden"
greek "Ο πίνακας '%-.192s' υπάρχει ήδη"
+ hindi "टेबल '%-.192s' पहले से ही मौजूद है"
hun "A(z) '%-.192s' tabla mar letezik"
ita "La tabella '%-.192s' esiste gia`"
jpn "表 '%-.192s' はすでに存在します。"
@@ -1160,6 +1205,7 @@ ER_BAD_TABLE_ERROR 42S02
fre "Table '%-.100s' inconnue"
ger "Unbekannte Tabelle '%-.100s'"
greek "Αγνωστος πίνακας '%-.100s'"
+ hindi "अज्ञात टेबल '%-.100s'"
hun "Ervenytelen tabla: '%-.100s'"
ita "Tabella '%-.100s' sconosciuta"
jpn "'%-.100s' は不明な表です。"
@@ -1184,6 +1230,7 @@ ER_NON_UNIQ_ERROR 23000
fre "Champ: '%-.192s' dans %-.192s est ambigu"
ger "Feld '%-.192s' in %-.192s ist nicht eindeutig"
greek "Το πεδίο: '%-.192s' σε %-.192s δεν έχει καθοριστεί"
+ hindi "काँलम '%-.192s' अस्पष्ट है (टेबल: %-.192s)"
hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu"
ita "Colonna: '%-.192s' di %-.192s e` ambigua"
jpn "列 '%-.192s' は %-.192s 内で曖昧です。"
@@ -1208,6 +1255,7 @@ ER_SERVER_SHUTDOWN 08S01
fre "Arrêt du serveur en cours"
ger "Der Server wird heruntergefahren"
greek "Εναρξη διαδικασίας αποσύνδεσης του εξυπηρετητή (server shutdown)"
+ hindi "सर्वर बंद हो रहा है"
hun "A szerver leallitasa folyamatban"
ita "Shutdown del server in corso"
jpn "サーバーをシャットダウン中です。"
@@ -1232,6 +1280,7 @@ ER_BAD_FIELD_ERROR 42S22 S0022
fre "Champ '%-.192s' inconnu dans %-.192s"
ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s"
greek "Αγνωστο πεδίο '%-.192s' σε '%-.192s'"
+ hindi "अज्ञात काँलम '%-.192s'(टेबल: '%-.192s')"
hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben"
ita "Colonna sconosciuta '%-.192s' in '%-.192s'"
jpn "列 '%-.192s' は '%-.192s' にはありません。"
@@ -1256,6 +1305,7 @@ ER_WRONG_FIELD_WITH_GROUP 42000 S1009
fre "'%-.192s' n'est pas dans 'group by'"
ger "'%-.192s' ist nicht in GROUP BY vorhanden"
greek "Χρησιμοποιήθηκε '%-.192s' που δεν υπήρχε στο group by"
+ hindi "'%-.192s' GROUP BY में नहीं है"
hun "Used '%-.192s' with wasn't in group by"
ita "Usato '%-.192s' che non e` nel GROUP BY"
jpn "'%-.192s' はGROUP BY句で指定されていません。"
@@ -1280,6 +1330,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009
fre "Ne peut regrouper '%-.192s'"
ger "Gruppierung über '%-.192s' nicht möglich"
greek "Αδύνατη η ομαδοποίηση (group on) '%-.192s'"
+ hindi "'%-.192s' पर GROUP नहीं कर सकते"
hun "A group nem hasznalhato: '%-.192s'"
ita "Impossibile raggruppare per '%-.192s'"
jpn "'%-.192s' でのグループ化はできません。"
@@ -1326,6 +1377,7 @@ ER_WRONG_VALUE_COUNT 21S01
est "Tulpade arv erineb väärtuste arvust"
ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte"
greek "Το Column count δεν ταιριάζει με το value count"
+ hindi "कॉलम की गिनती मूल्य की गिनती के समान नही है"
hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel"
ita "Il numero delle colonne non e` uguale al numero dei valori"
jpn "列数が値の個数と一致しません。"
@@ -1350,7 +1402,8 @@ ER_TOO_LONG_IDENT 42000 S1009
fre "Le nom de l'identificateur '%-.100s' est trop long"
ger "Name des Bezeichners '%-.100s' ist zu lang"
greek "Το identifier name '%-.100s' είναι πολύ μεγάλο"
- hun "A(z) '%-.100s' azonositonev tul hosszu."
+ hindi "पहचानकर्ता का नाम '%-.100s' बहुत लंबा है"
+ hun "A(z) '%-.100s' azonositonev tul hosszu"
ita "Il nome dell'identificatore '%-.100s' e` troppo lungo"
jpn "識別子名 '%-.100s' は長すぎます。"
kor "Identifier '%-.100s'는 너무 길군요."
@@ -1374,6 +1427,7 @@ ER_DUP_FIELDNAME 42S21 S1009
fre "Nom du champ '%-.192s' déjà utilisé"
ger "Doppelter Spaltenname: '%-.192s'"
greek "Επανάληψη column name '%-.192s'"
+ hindi "समान कॉलम '%-.192s' मौजूद है"
hun "Duplikalt oszlopazonosito: '%-.192s'"
ita "Nome colonna duplicato '%-.192s'"
jpn "列名 '%-.192s' は重複してます。"
@@ -1398,6 +1452,7 @@ ER_DUP_KEYNAME 42000 S1009
fre "Nom de clef '%-.192s' déjà utilisé"
ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'"
greek "Επανάληψη key name '%-.192s'"
+ hindi "समान KEY '%-.192s' मौजूद है"
hun "Duplikalt kulcsazonosito: '%-.192s'"
ita "Nome chiave duplicato '%-.192s'"
jpn "索引名 '%-.192s' は重複しています。"
@@ -1424,7 +1479,8 @@ ER_DUP_ENTRY 23000 S1009
fre "Duplicata du champ '%-.192s' pour la clef %d"
ger "Doppelter Eintrag '%-.192s' für Schlüssel %d"
greek "Διπλή εγγραφή '%-.192s' για το κλειδί %d"
- hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint."
+ hindi "सामान प्रवेश '%-.192s' KEY %d के लिए"
+ hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint"
ita "Valore duplicato '%-.192s' per la chiave %d"
jpn "'%-.192s' は索引 %d で重複しています。"
kor "중복된 입력 값 '%-.192s': key %d"
@@ -1448,6 +1504,7 @@ ER_WRONG_FIELD_SPEC 42000 S1009
fre "Mauvais paramètre de champ pour le champ '%-.192s'"
ger "Falsche Spezifikation für Feld '%-.192s'"
greek "Εσφαλμένο column specifier για το πεδίο '%-.192s'"
+ hindi "कॉलम '%-.192s' के लिए गलत कॉलम विनिर्देशक"
hun "Rossz oszlopazonosito: '%-.192s'"
ita "Specifica errata per la colonna '%-.192s'"
jpn "列 '%-.192s' の定義が不正です。"
@@ -1472,6 +1529,7 @@ ER_PARSE_ERROR 42000 s1009
fre "%s près de '%-.80s' à la ligne %d"
ger "%s bei '%-.80s' in Zeile %d"
greek "%s πλησίον '%-.80s' στη γραμμή %d"
+ hindi "%s के पास '%-.80s' लाइन %d में"
hun "A %s a '%-.80s'-hez kozeli a %d sorban"
ita "%s vicino a '%-.80s' linea %d"
jpn "%s : '%-.80s' 付近 %d 行目"
@@ -1496,7 +1554,8 @@ ER_EMPTY_QUERY 42000
fre "Query est vide"
ger "Leere Abfrage"
greek "Το ερώτημα (query) που θέσατε ήταν κενό"
- hun "Ures lekerdezes."
+ hindi "क्वेरी खली थी"
+ hun "Ures lekerdezes"
ita "La query e` vuota"
jpn "クエリが空です。"
kor "쿼리결과가 없습니다."
@@ -1520,6 +1579,7 @@ ER_NONUNIQ_TABLE 42000 S1009
fre "Table/alias: '%-.192s' non unique"
ger "Tabellenname/Alias '%-.192s' nicht eindeutig"
greek "Αδύνατη η ανεύρεση unique table/alias: '%-.192s'"
+ hindi "टेबल या उसका उपनाम '%-.192s' अद्वितीय नहीं है"
hun "Nem egyedi tabla/alias: '%-.192s'"
ita "Tabella/alias non unico: '%-.192s'"
jpn "表名/別名 '%-.192s' は一意ではありません。"
@@ -1544,6 +1604,7 @@ ER_INVALID_DEFAULT 42000 S1009
fre "Valeur par défaut invalide pour '%-.192s'"
ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.192s'"
greek "Εσφαλμένη προκαθορισμένη τιμή (default value) για '%-.192s'"
+ hindi "'%-.192s' के लिए अवैध डिफ़ॉल्ट मान"
hun "Ervenytelen ertek: '%-.192s'"
ita "Valore di default non valido per '%-.192s'"
jpn "'%-.192s' へのデフォルト値が無効です。"
@@ -1568,7 +1629,8 @@ ER_MULTIPLE_PRI_KEY 42000 S1009
fre "Plusieurs clefs primaires définies"
ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert"
greek "Περισσότερα από ένα primary key ορίστηκαν"
- hun "Tobbszoros elsodleges kulcs definialas."
+ hindi "कई PRIMARY KEY परिभाषित"
+ hun "Tobbszoros elsodleges kulcs definialas"
ita "Definite piu` chiave primarie"
jpn "PRIMARY KEY が複数定義されています。"
kor "Multiple primary key가 정의되어 있슴"
@@ -1592,7 +1654,8 @@ ER_TOO_MANY_KEYS 42000 S1009
fre "Trop de clefs sont définies. Maximum de %d clefs alloué"
ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt"
greek "Πάρα πολλά key ορίσθηκαν. Το πολύ %d επιτρέπονται"
- hun "Tul sok kulcs. Maximum %d kulcs engedelyezett."
+ hun "Tul sok kulcs. Maximum %d kulcs engedelyezett"
+ hindi "बहुत सारी KEYS निर्दिष्ट हैं; अधिकतम %d KEYS की अनुमति है"
ita "Troppe chiavi. Sono ammesse max %d chiavi"
jpn "索引の数が多すぎます。最大 %d 個までです。"
kor "너무 많은 키가 정의되어 있읍니다.. 최대 %d의 키가 가능함"
@@ -1616,6 +1679,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009
fre "Trop de parties specifiées dans la clef. Maximum de %d parties"
ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt"
greek "Πάρα πολλά key parts ορίσθηκαν. Το πολύ %d επιτρέπονται"
+ hindi "बहुत सारे KEY के भाग निर्दिष्ट हैं; अधिकतम %d भागों की अनुमति है"
hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
jpn "索引のキー列指定が多すぎます。最大 %d 個までです。"
@@ -1640,6 +1704,7 @@ ER_TOO_LONG_KEY 42000 S1009
fre "La clé est trop longue. Longueur maximale: %d"
ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d"
greek "Το κλειδί που ορίσθηκε είναι πολύ μεγάλο. Το μέγιστο μήκος είναι %d"
+ hindi "निर्दिष्ट KEY बहुत लंबी थी; KEY की अधिकतम लंबाई %d बाइट है"
hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d"
ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d"
jpn "索引のキーが長すぎます。最大 %d バイトまでです。"
@@ -1664,6 +1729,7 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
fre "La clé '%-.192s' n'existe pas dans la table"
ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'"
greek "Το πεδίο κλειδί '%-.192s' δεν υπάρχει στον πίνακα"
+ hindi "KEY कॉलम '%-.192s' टेबल में मौजूद नहीं है"
hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban"
ita "La colonna chiave '%-.192s' non esiste nella tabella"
jpn "キー列 '%-.192s' は表にありません。"
@@ -1682,6 +1748,7 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
ER_BLOB_USED_AS_KEY 42000 S1009
eng "BLOB column %`s can't be used in key specification in the %s table"
ger "BLOB-Feld %`s kann beim %s Tabellen nicht als Schlüssel verwendet werden"
+ hindi "BLOB कॉलम %`s टेबल %s में KEY विनिर्देश में इस्तेमाल नहीं किया जा सकता"
rus "Столбец типа BLOB %`s не может быть использован как значение ключа в %s таблице"
ukr "BLOB стовбець %`s не може бути використаний у визначенні ключа в %s таблиці"
ER_TOO_BIG_FIELDLENGTH 42000 S1009
@@ -1693,8 +1760,9 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009
fre "Champ '%-.192s' trop long (max = %lu). Utilisez un BLOB"
ger "Feldlänge für Feld '%-.192s' zu groß (maximal %lu). BLOB- oder TEXT-Spaltentyp verwenden!"
greek "Πολύ μεγάλο μήκος για το πεδίο '%-.192s' (max = %lu). Παρακαλώ χρησιμοποιείστε τον τύπο BLOB"
- hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb."
- ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB."
+ hindi "कॉलम की लंबाई कॉलम '%-.192s' के लिए बड़ी है (अधिकतम = %lu); BLOB या TEXT का उपयोग करें"
+ hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb"
+ ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB"
jpn "列 '%-.192s' のサイズ定義が大きすぎます (最大 %lu まで)。代わりに BLOB または TEXT を使用してください。"
kor "칼럼 '%-.192s'의 칼럼 길이가 너무 깁니다 (최대 = %lu). 대신에 BLOB를 사용하세요."
nor "For stor nøkkellengde for kolonne '%-.192s' (maks = %lu). Bruk BLOB istedenfor"
@@ -1711,18 +1779,19 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009
ER_WRONG_AUTO_KEY 42000 S1009
cze "Můžete mít pouze jedno AUTO pole a to musí být definováno jako klíč"
dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret"
- nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd."
+ nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd"
eng "Incorrect table definition; there can be only one auto column and it must be defined as a key"
est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena"
fre "Un seul champ automatique est permis et il doit être indexé"
ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden"
greek "Μπορεί να υπάρχει μόνο ένα auto field και πρέπει να έχει ορισθεί σαν key"
- hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni."
+ hindi "गलत टेबल परिभाषा; टेबल में केवल एक AUTO_INCREMENT कॉलम हो सकता है और इसे एक KEY के रूप में परिभाषित किया जाना चाहिए"
+ hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni"
ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave"
jpn "不正な表定義です。AUTO_INCREMENT列は1個までで、索引を定義する必要があります。"
kor "부정확한 테이블 정의; 테이블은 하나의 auto 칼럼이 존재하고 키로 정의되어져야 합니다."
- nor "Bare ett auto felt kan være definert som nøkkel."
- norwegian-ny "Bare eitt auto felt kan være definert som nøkkel."
+ nor "Bare ett auto felt kan være definert som nøkkel"
+ norwegian-ny "Bare eitt auto felt kan være definert som nøkkel"
pol "W tabeli może być tylko jedno pole auto i musi ono być zdefiniowane jako klucz"
por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave"
rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie"
@@ -1735,29 +1804,30 @@ ER_WRONG_AUTO_KEY 42000 S1009
ER_BINLOG_CANT_DELETE_GTID_DOMAIN
eng "Could not delete gtid domain. Reason: %s."
ER_NORMAL_SHUTDOWN
- cze "%s: norm-Bální ukončení"
- dan "%s: Normal nedlukning"
- nla "%s: Normaal afgesloten"
- eng "%s: Normal shutdown"
- est "%s: MariaDB lõpetas"
- fre "%s: Arrêt normal du serveur"
- ger "%s: Normal heruntergefahren"
- greek "%s: Φυσιολογική διαδικασία shutdown"
- hun "%s: Normal leallitas"
- ita "%s: Shutdown normale"
- jpn "%s: 通常シャットダウン"
- kor "%s: 정상적인 shutdown"
- nor "%s: Normal avslutning"
- norwegian-ny "%s: Normal nedkopling"
- pol "%s: Standardowe zakończenie działania"
- por "%s: 'Shutdown' normal"
- rum "%s: Terminare normala"
- rus "%s: Корректная остановка"
- serbian "%s: Normalno gašenje"
- slo "%s: normálne ukončenie"
- spa "%s: Apagado normal"
- swe "%s: Normal avslutning"
- ukr "%s: Нормальне завершення"
+ cze "%s (%s): normální ukončení"
+ dan "%s (%s): Normal nedlukning"
+ nla "%s (%s): Normaal afgesloten "
+ eng "%s (initiated by: %s): Normal shutdown"
+ est "%s (%s): MariaDB lõpetas"
+ fre "%s (%s): Arrêt normal du serveur"
+ ger "%s (%s): Normal heruntergefahren"
+ greek "%s (%s): Φυσιολογική διαδικασία shutdown"
+ hindi "%s (%s): सामान्य शटडाउन"
+ hun "%s (%s): Normal leallitas"
+ ita "%s (%s): Shutdown normale"
+ jpn "%s (%s): 通常シャットダウン"
+ kor "%s (%s): 정상적인 shutdown"
+ nor "%s (%s): Normal avslutning"
+ norwegian-ny "%s (%s): Normal nedkopling"
+ pol "%s (%s): Standardowe zakończenie działania"
+ por "%s (%s): 'Shutdown' normal"
+ rum "%s (%s): Terminare normala"
+ rus "%s (инициирована пользователем: %s): Корректная остановка"
+ serbian "%s (%s): Normalno gašenje"
+ slo "%s (%s): normálne ukončenie"
+ spa "%s (%s): Apagado normal"
+ swe "%s (%s): Normal avslutning"
+ ukr "%s (%s): Нормальне завершення"
ER_GOT_SIGNAL
cze "%s: přijat signal %d, končím\n"
dan "%s: Fangede signal %d. Afslutter!!\n"
@@ -1767,6 +1837,7 @@ ER_GOT_SIGNAL
fre "%s: Reçu le signal %d. Abandonne!\n"
ger "%s: Signal %d erhalten. Abbruch!\n"
greek "%s: Ελήφθη το μήνυμα %d. Η διαδικασία εγκαταλείπεται!\n"
+ hindi "%s: सिग्नल %d मिलने के कारण सिस्टम बंद किया जा रहा है!\n"
hun "%s: %d jelzes. Megszakitva!\n"
ita "%s: Ricevuto segnale %d. Interruzione!\n"
jpn "%s: シグナル %d を受信しました。強制終了します!\n"
@@ -1791,6 +1862,7 @@ ER_SHUTDOWN_COMPLETE
fre "%s: Arrêt du serveur terminé\n"
ger "%s: Herunterfahren beendet\n"
greek "%s: Η διαδικασία Shutdown ολοκληρώθηκε\n"
+ hindi "%s: शटडाउन पूर्ण\n"
hun "%s: A leallitas kesz\n"
ita "%s: Shutdown completato\n"
jpn "%s: シャットダウン完了\n"
@@ -1815,6 +1887,7 @@ ER_FORCING_CLOSE 08S01
fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n"
ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n"
greek "%s: Το thread θα κλείσει %ld user: '%-.48s'\n"
+ hindi "%s: %ld थ्रेड बंद किया जा रहा है (यूज़र: '%-.48s')\n"
hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n"
ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n"
jpn "%s: スレッド %ld を強制終了します (ユーザー: '%-.48s')\n"
@@ -1839,6 +1912,7 @@ ER_IPSOCK_ERROR 08S01
fre "Ne peut créer la connexion IP (socket)"
ger "Kann IP-Socket nicht erzeugen"
greek "Δεν είναι δυνατή η δημιουργία IP socket"
+ hindi "IP SOCKET नहीं बना सकते"
hun "Az IP socket nem hozhato letre"
ita "Impossibile creare il socket IP"
jpn "IPソケットを作成できません。"
@@ -1863,6 +1937,7 @@ ER_NO_SUCH_INDEX 42S12 S1009
fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table"
ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen"
greek "Ο πίνακας '%-.192s' δεν έχει ευρετήριο (index) σαν αυτό που χρησιμοποιείτε στην CREATE INDEX. Παρακαλώ, ξαναδημιουργήστε τον πίνακα"
+ hindi "CREATE INDEX में इस्तेमाल की गयी सूचि टेबल '%-.192s' में उपलब्ध नहीं है; टेबल को पुनः बनायें"
hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat"
ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella"
jpn "表 '%-.192s' に以前CREATE INDEXで作成された索引がありません。表を作り直してください。"
@@ -1887,6 +1962,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009
fre "Séparateur de champs inconnu. Vérifiez dans le manuel"
ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen"
greek "Ο διαχωριστής πεδίων δεν είναι αυτός που αναμενόταν. Παρακαλώ ανατρέξτε στο manual"
+ hindi "फील्ड विभाजक आर्गुमेंट गलत है; मैनुअल की जाँच करें"
hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!"
ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale"
jpn "フィールド区切り文字が予期せぬ使われ方をしています。マニュアルを確認して下さい。"
@@ -1903,38 +1979,40 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009
swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen"
ukr "Хибний розділювач полів. Почитайте документацію"
ER_BLOBS_AND_NO_TERMINATED 42000 S1009
- cze "Není možné použít pevný rowlength s BLOBem. Použijte 'fields terminated by'."
- dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'."
- nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'."
+ cze "Není možné použít pevný rowlength s BLOBem. Použijte 'fields terminated by'"
+ dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'"
+ nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'"
eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'"
- est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang."
- fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'."
+ est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang"
+ fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'"
ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden"
- greek "Δεν μπορείτε να χρησιμοποιήσετε fixed rowlength σε BLOBs. Παρακαλώ χρησιμοποιείστε 'fields terminated by'."
- hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ."
- ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'."
+ greek "Δεν μπορείτε να χρησιμοποιήσετε fixed rowlength σε BLOBs. Παρακαλώ χρησιμοποιείστε 'fields terminated by'"
+ hindi "BLOBs को निश्चित लंबाई की पंक्ति के साथ प्रयोग नहीं किया जा सकता है; 'FIELDS TERMINATED BY' का इस्तेमाल करें"
+ hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' "
+ ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'"
jpn "BLOBには固定長レコードが使用できません。'FIELDS TERMINATED BY'句を使用して下さい。"
kor "BLOB로는 고정길이의 lowlength를 사용할 수 없습니다. 'fields terminated by'를 사용하세요."
- nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
- norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
- pol "Nie można użyć stałej długo?ci wiersza z polami typu BLOB. Użyj 'fields terminated by'."
- por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado."
- rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'."
+ nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'"
+ norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'"
+ pol "Nie można użyć stałej długo?ci wiersza z polami typu BLOB. Użyj 'fields terminated by'"
+ por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado"
+ rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'"
rus "Фиксированный размер записи с полями типа BLOB использовать нельзя, применяйте 'fields terminated by'"
- serbian "Ne možete koristiti fiksnu veličinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju."
- slo "Nie je možné použiť fixnú dĺžku s BLOBom. Použite 'fields terminated by'."
- spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '."
+ serbian "Ne možete koristiti fiksnu veličinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju"
+ slo "Nie je možné použiť fixnú dĺžku s BLOBom. Použite 'fields terminated by'"
+ spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '"
swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'"
ukr "Не можна використовувати сталу довжину строки з BLOB. Зкористайтеся 'fields terminated by'"
ER_TEXTFILE_NOT_READABLE
cze "Soubor '%-.128s' musí být v adresáři databáze nebo čitelný pro všechny"
dan "Filen '%-.128s' skal være i database-folderen, eller kunne læses af alle"
- nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
+ nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn"
eng "The file '%-.128s' must be in the database directory or be readable by all"
est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous"
ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein"
greek "Το αρχείο '%-.128s' πρέπει να υπάρχει στο database directory ή να μπορεί να διαβαστεί από όλους"
+ hindi "फ़ाइल '%-.128s' डेटाबेस डायरेक्टरी में या सभी के द्वारा पठनीय होना चाहिए"
hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak"
ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti"
jpn "ファイル '%-.128s' はデータベースディレクトリにあるか、全てのユーザーから読める必要があります。"
@@ -1959,7 +2037,8 @@ ER_FILE_EXISTS_ERROR
fre "Le fichier '%-.200s' existe déjà"
ger "Datei '%-.200s' bereits vorhanden"
greek "Το αρχείο '%-.200s' υπάρχει ήδη"
- hun "A '%-.200s' file mar letezik."
+ hindi "फ़ाइल '%-.200s' पहले से मौजूद है"
+ hun "A '%-.200s' file mar letezik"
ita "Il file '%-.200s' esiste gia`"
jpn "ファイル '%-.200s' はすでに存在します。"
kor "'%-.200s' 화일은 이미 존재합니다."
@@ -1983,6 +2062,7 @@ ER_LOAD_INFO
fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld"
ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld"
greek "Εγγραφές: %ld Διαγραφές: %ld Παρεκάμφθησαν: %ld Προειδοποιήσεις: %ld"
+ hindi "रिकॉर्ड: %ld हटाए गए: %ld छोड़ दिए गए: %ld चेतावनी: %ld"
hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld"
ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld"
jpn "レコード数: %ld 削除: %ld スキップ: %ld 警告: %ld"
@@ -2007,6 +2087,7 @@ ER_ALTER_INFO
fre "Enregistrements: %ld Doublons: %ld"
ger "Datensätze: %ld Duplikate: %ld"
greek "Εγγραφές: %ld Επαναλήψεις: %ld"
+ hindi "रिकॉर्ड: %ld डुप्लिकेट: %ld"
hun "Rekordok: %ld Duplikalva: %ld"
ita "Records: %ld Duplicati: %ld"
jpn "レコード数: %ld 重複: %ld"
@@ -2032,7 +2113,7 @@ ER_WRONG_SUB_KEY
ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder die Speicher-Engine unterstützt keine Unterteilschlüssel"
greek "Εσφαλμένο sub part key. Το χρησιμοποιούμενο key part δεν είναι string ή το μήκος του είναι μεγαλύτερο"
hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz"
- ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave."
+ ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave"
jpn "キーのプレフィックスが不正です。キーが文字列ではないか、プレフィックス長がキーよりも長いか、ストレージエンジンが一意索引のプレフィックス指定をサポートしていません。"
kor "부정확한 서버 파트 키. 사용된 키 파트가 스트링이 아니거나 키 파트의 길이가 너무 깁니다."
nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
@@ -2048,19 +2129,20 @@ ER_WRONG_SUB_KEY
ukr "Невірна частина ключа. Використана частина ключа не є строкою, задовга або вказівник таблиці не підтримує унікальних частин ключей"
ER_CANT_REMOVE_ALL_FIELDS 42000
cze "Není možné vymazat všechny položky s ALTER TABLE. Použijte DROP TABLE"
- dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet."
+ dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet"
nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!"
eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead"
est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil"
fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE"
ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden"
greek "Δεν είναι δυνατή η διαγραφή όλων των πεδίων με ALTER TABLE. Παρακαλώ χρησιμοποιείστε DROP TABLE"
+ hindi "ALTER TABLE का इस्तेमाल कर सभी कॉलम्स को हटाया नहीं जा सकता; DROP TABLE का इस्तेमाल करें"
hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette"
ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE"
jpn "ALTER TABLE では全ての列の削除はできません。DROP TABLE を使用してください。"
kor "ALTER TABLE 명령으로는 모든 칼럼을 지울 수 없습니다. DROP TABLE 명령을 이용하세요."
- nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden."
- norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor."
+ nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden"
+ norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor"
pol "Nie można usun?ć wszystkich pól wykorzystuj?c ALTER TABLE. W zamian użyj DROP TABLE"
por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar"
rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb"
@@ -2071,29 +2153,28 @@ ER_CANT_REMOVE_ALL_FIELDS 42000
swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället"
ukr "Не можливо видалити всі стовбці за допомогою ALTER TABLE. Для цього скористайтеся DROP TABLE"
ER_CANT_DROP_FIELD_OR_KEY 42000
- cze "Nemohu zrušit '%-.192s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíče"
- dan "Kan ikke udføre DROP '%-.192s'. Undersøg om feltet/nøglen eksisterer."
- nla "Kan '%-.192s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat."
- eng "Can't DROP '%-.192s'; check that column/key exists"
- est "Ei suuda kustutada '%-.192s'. Kontrolli kas tulp/võti eksisteerib"
- fre "Ne peut effacer (DROP) '%-.192s'. Vérifiez s'il existe"
- ger "Kann '%-.192s' nicht löschen. Existiert die Spalte oder der Schlüssel?"
- greek "Αδύνατη η διαγραφή (DROP) '%-.192s'. Παρακαλώ ελέγξτε αν το πεδίο/κλειδί υπάρχει"
- hun "A DROP '%-.192s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
- ita "Impossibile cancellare '%-.192s'. Controllare che il campo chiave esista"
- jpn "'%-.192s' を削除できません。列/索引の存在を確認して下さい。"
- kor "'%-.192s'를 DROP할 수 없습니다. 칼럼이나 키가 존재하는지 채크하세요."
- nor "Kan ikke DROP '%-.192s'. Undersøk om felt/nøkkel eksisterer."
- norwegian-ny "Kan ikkje DROP '%-.192s'. Undersøk om felt/nøkkel eksisterar."
- pol "Nie można wykonać operacji DROP '%-.192s'. SprawdĽ, czy to pole/klucz istnieje"
- por "Não se pode fazer DROP '%-.192s'. Confira se esta coluna/chave existe"
- rum "Nu pot sa DROP '%-.192s'. Verifica daca coloana/cheia exista"
- rus "Невозможно удалить (DROP) '%-.192s'. Убедитесь что столбец/ключ действительно существует"
- serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.192s'. Proverite da li ta kolona (odnosno ključ) postoji"
- slo "Nemôžem zrušiť (DROP) '%-.192s'. Skontrolujte, či neexistujú záznamy/kľúče"
- spa "No puedo ELIMINAR '%-.192s'. compuebe que el campo/clave existe"
- swe "Kan inte ta bort '%-.192s'. Kontrollera att fältet/nyckel finns"
- ukr "Не можу DROP '%-.192s'. Перевірте, чи цей стовбець/ключ існує"
+ cze "Nemohu zrušit (DROP %s) %`-.192s. Zkontrolujte, zda neexistují záznamy/klíče"
+ dan "Kan ikke udføre DROP %s %`-.192s. Undersøg om feltet/nøglen eksisterer"
+ nla "DROP %s: Kan %`-.192s niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat"
+ eng "Can't DROP %s %`-.192s; check that it exists"
+ est "Ei suuda kustutada (DROP %s) %`-.192s. Kontrolli kas tulp/võti eksisteerib"
+ fre "Ne peut effacer (DROP %s) %`-.192s. Vérifiez s'il existe"
+ ger "DROP %s: Kann %`-.192s nicht löschen. Existiert es?"
+ greek "Αδύνατη η διαγραφή (DROP %s) %`-.192s. Παρακαλώ ελέγξτε αν το πεδίο/κλειδί υπάρχει"
+ hindi "%s %`-.192s को ड्रॉप नहीं कर सकते हैं; कृपया जाँच करें कि यह मौजूद है"
+ hun "A DROP %s %`-.192s nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
+ ita "Impossibile cancellare (DROP %s) %`-.192s. Controllare che il campo chiave esista"
+ nor "Kan ikke DROP %s %`-.192s. Undersøk om felt/nøkkel eksisterer"
+ norwegian-ny "Kan ikkje DROP %s %`-.192s. Undersøk om felt/nøkkel eksisterar"
+ pol "Nie można wykonać operacji DROP %s %`-.192s. SprawdĽ, czy to pole/klucz istnieje"
+ por "Não se pode fazer DROP %s %`-.192s. Confira se esta coluna/chave existe"
+ rum "Nu pot sa DROP %s %`-.192s. Verifica daca coloana/cheia exista"
+ rus "Невозможно удалить (DROP %s) %`-.192s. Убедитесь что он действительно существует"
+ serbian "Ne mogu da izvršim komandu drop 'DROP %s' na %`-.192s. Proverite da li ta kolona (odnosno ključ) postoji"
+ slo "Nemôžem zrušiť (DROP %s) %`-.192s. Skontrolujte, či neexistujú záznamy/kľúče"
+ spa "No puedo eliminar (DROP %s) %`-.192s. compuebe que el campo/clave existe"
+ swe "Kan inte ta bort (DROP %s) %`-.192s. Kontrollera att begränsningen/fältet/nyckel finns"
+ ukr "Не можу DROP %s %`-.192s. Перевірте, чи він існує"
ER_INSERT_INFO
cze "Záznamů: %ld Zdvojených: %ld Varování: %ld"
dan "Poster: %ld Ens: %ld Advarsler: %ld"
@@ -2103,6 +2184,7 @@ ER_INSERT_INFO
fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld"
ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld"
greek "Εγγραφές: %ld Επαναλήψεις: %ld Προειδοποιήσεις: %ld"
+ hindi "रिकॉर्ड: %ld डुप्लिकेट: %ld चेतावनी: %ld"
hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld"
ita "Records: %ld Duplicati: %ld Avvertimenti: %ld"
jpn "レコード数: %ld 重複数: %ld 警告: %ld"
@@ -2130,6 +2212,7 @@ ER_NO_SUCH_THREAD
fre "Numéro de tâche inconnu: %lu"
ger "Unbekannte Thread-ID: %lu"
greek "Αγνωστο thread id: %lu"
+ hindi "अज्ञात थ्रेड ID: %lu"
hun "Ervenytelen szal (thread) id: %lu"
ita "Thread id: %lu sconosciuto"
jpn "不明なスレッドIDです: %lu"
@@ -2154,6 +2237,7 @@ ER_KILL_DENIED_ERROR
fre "Vous n'êtes pas propriétaire de la tâche no: %lu"
ger "Sie sind nicht Eigentümer von Thread %lu"
greek "Δεν είσθε owner του thread %lu"
+ hindi "आप थ्रेड %lu के OWNER नहीं हैं"
hun "A %lu thread-nek mas a tulajdonosa"
ita "Utente non proprietario del thread %lu"
jpn "スレッド %lu のオーナーではありません。"
@@ -2172,12 +2256,13 @@ ER_KILL_DENIED_ERROR
ER_NO_TABLES_USED
cze "Nejsou použity žádné tabulky"
dan "Ingen tabeller i brug"
- nla "Geen tabellen gebruikt."
+ nla "Geen tabellen gebruikt"
eng "No tables used"
est "Ühtegi tabelit pole kasutusel"
fre "Aucune table utilisée"
ger "Keine Tabellen verwendet"
greek "Δεν χρησιμοποιήθηκαν πίνακες"
+ hindi "कोई टेबल का इस्तेमाल नहीं हुआ"
hun "Nincs hasznalt tabla"
ita "Nessuna tabella usata"
jpn "表が指定されていません。"
@@ -2226,6 +2311,7 @@ ER_NO_UNIQUE_LOGFILE
fre "Ne peut générer un unique nom de journal %-.200s.(1-999)\n"
ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.200s(1-999) erzeugen\n"
greek "Αδύνατη η δημιουργία unique log-filename %-.200s.(1-999)\n"
+ hindi "एक अनूठा लॉग-फ़ाइल नाम %-.200s.(1-999) उत्पन्न नहीं कर सके\n"
hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n"
ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n"
jpn "一意なログファイル名 %-.200s.(1-999) を生成できません。\n"
@@ -2244,12 +2330,13 @@ ER_NO_UNIQUE_LOGFILE
ER_TABLE_NOT_LOCKED_FOR_WRITE
cze "Tabulka '%-.192s' byla zamčena s READ a nemůže být změněna"
dan "Tabellen '%-.192s' var låst med READ lås og kan ikke opdateres"
- nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen."
+ nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen"
eng "Table '%-.192s' was locked with a READ lock and can't be updated"
est "Tabel '%-.192s' on lukustatud READ lukuga ning ei ole muudetav"
fre "Table '%-.192s' verrouillée lecture (READ): modification impossible"
ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden"
greek "Ο πίνακας '%-.192s' έχει κλειδωθεί με READ lock και δεν επιτρέπονται αλλαγές"
+ hindi "टेबल '%-.192s' READ लॉक से बंद है और उसे बदल नहीं सकते"
hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni"
ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata"
jpn "表 '%-.192s' はREADロックされていて、更新できません。"
@@ -2274,6 +2361,7 @@ ER_TABLE_NOT_LOCKED
fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES"
ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt"
greek "Ο πίνακας '%-.192s' δεν έχει κλειδωθεί με LOCK TABLES"
+ hindi "टेबल '%-.192s' LOCK TABLES से बंद नहीं है"
hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel"
ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES"
jpn "表 '%-.192s' は LOCK TABLES でロックされていません。"
@@ -2289,30 +2377,8 @@ ER_TABLE_NOT_LOCKED
spa "Tabla '%-.192s' no fue trabada con LOCK TABLES"
swe "Tabell '%-.192s' är inte låst med LOCK TABLES"
ukr "Таблицю '%-.192s' не було блоковано з LOCK TABLES"
-ER_BLOB_CANT_HAVE_DEFAULT 42000
- cze "Blob položka '%-.192s' nemůže mít defaultní hodnotu"
- dan "BLOB feltet '%-.192s' kan ikke have en standard værdi"
- nla "Blob veld '%-.192s' can geen standaardwaarde bevatten"
- eng "BLOB/TEXT column '%-.192s' can't have a default value"
- est "BLOB-tüüpi tulp '%-.192s' ei saa omada vaikeväärtust"
- fre "BLOB '%-.192s' ne peut avoir de valeur par défaut"
- ger "BLOB/TEXT-Feld '%-.192s' darf keinen Vorgabewert (DEFAULT) haben"
- greek "Τα Blob πεδία '%-.192s' δεν μπορούν να έχουν προκαθορισμένες τιμές (default value)"
- hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke"
- ita "Il campo BLOB '%-.192s' non puo` avere un valore di default"
- jpn "BLOB/TEXT 列 '%-.192s' にはデフォルト値を指定できません。"
- kor "BLOB 칼럼 '%-.192s' 는 디폴트 값을 가질 수 없습니다."
- nor "Blob feltet '%-.192s' kan ikke ha en standard verdi"
- norwegian-ny "Blob feltet '%-.192s' kan ikkje ha ein standard verdi"
- pol "Pole typu blob '%-.192s' nie może mieć domy?lnej warto?ci"
- por "Coluna BLOB '%-.192s' não pode ter um valor padrão (default)"
- rum "Coloana BLOB '%-.192s' nu poate avea o valoare default"
- rus "Невозможно указывать значение по умолчанию для столбца BLOB '%-.192s'"
- serbian "BLOB kolona '%-.192s' ne može imati default vrednost"
- slo "Pole BLOB '%-.192s' nemôže mať implicitnú hodnotu"
- spa "Campo Blob '%-.192s' no puede tener valores patron"
- swe "BLOB fält '%-.192s' kan inte ha ett DEFAULT-värde"
- ukr "Стовбець BLOB '%-.192s' не може мати значення по замовчуванню"
+ER_UNUSED_17
+ eng "You should never see it"
ER_WRONG_DB_NAME 42000
cze "Nepřípustné jméno databáze '%-.100s'"
dan "Ugyldigt database navn '%-.100s'"
@@ -2322,6 +2388,7 @@ ER_WRONG_DB_NAME 42000
fre "Nom de base de donnée illégal: '%-.100s'"
ger "Unerlaubter Datenbankname '%-.100s'"
greek "Λάθος όνομα βάσης δεδομένων '%-.100s'"
+ hindi "डेटाबेस नाम '%-.100s' गलत है"
hun "Hibas adatbazisnev: '%-.100s'"
ita "Nome database errato '%-.100s'"
jpn "データベース名 '%-.100s' は不正です。"
@@ -2346,6 +2413,7 @@ ER_WRONG_TABLE_NAME 42000
fre "Nom de table illégal: '%-.100s'"
ger "Unerlaubter Tabellenname '%-.100s'"
greek "Λάθος όνομα πίνακα '%-.100s'"
+ hindi "टेबल नाम '%-.100s' गलत है"
hun "Hibas tablanev: '%-.100s'"
ita "Nome tabella errato '%-.100s'"
jpn "表名 '%-.100s' は不正です。"
@@ -2364,14 +2432,15 @@ ER_WRONG_TABLE_NAME 42000
ER_TOO_BIG_SELECT 42000
cze "Zadaný SELECT by procházel příliš mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1"
dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
- nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
+ nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is"
eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay"
est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1"
fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien"
ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET MAX_JOIN_SIZE=# verwenden"
greek "Το SELECT θα εξετάσει μεγάλο αριθμό εγγραφών και πιθανώς θα καθυστερήσει. Παρακαλώ εξετάστε τις παραμέτρους του WHERE και χρησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό"
hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
- ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
+ hindi "SELECT कमांड MAX_JOIN_SIZE पंक्तियों से भी ज्यादा की जांच करेगा; कृपया WHERE क्लॉज़ को जाचें अथवा SET SQL_BIG_SELECTS=1 या SET MAX_JOIN_SIZE=# का इस्तेमाल करें"
+ ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto"
jpn "SELECTがMAX_JOIN_SIZEを超える行数を処理しました。WHERE句を確認し、SELECT文に問題がなければ、 SET SQL_BIG_SELECTS=1 または SET MAX_JOIN_SIZE=# を使用して下さい。"
kor "SELECT 명령에서 너무 많은 레코드를 찾기 때문에 많은 시간이 소요됩니다. 따라서 WHERE 문을 점검하거나, 만약 SELECT가 ok되면 SET SQL_BIG_SELECTS=1 옵션을 사용하세요."
nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
@@ -2394,6 +2463,7 @@ ER_UNKNOWN_ERROR
fre "Erreur inconnue"
ger "Unbekannter Fehler"
greek "Προέκυψε άγνωστο λάθος"
+ hindi "अज्ञात त्रुटि हुई"
hun "Ismeretlen hiba"
ita "Errore sconosciuto"
jpn "不明なエラー"
@@ -2417,6 +2487,7 @@ ER_UNKNOWN_PROCEDURE 42000
fre "Procédure %-.192s inconnue"
ger "Unbekannte Prozedur '%-.192s'"
greek "Αγνωστη διαδικασία '%-.192s'"
+ hindi "अज्ञात प्रोसीजर '%-.192s'"
hun "Ismeretlen eljaras: '%-.192s'"
ita "Procedura '%-.192s' sconosciuta"
jpn "'%-.192s' は不明なプロシージャです。"
@@ -2441,6 +2512,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
fre "Mauvais nombre de paramètres pour la procedure %-.192s"
ger "Falsche Parameterzahl für Prozedur '%-.192s'"
greek "Λάθος αριθμός παραμέτρων στη διαδικασία '%-.192s'"
+ hindi "प्रोसीजर '%-.192s' के लिए पैरामीटर की संख्या गलत है"
hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal"
ita "Numero di parametri errato per la procedura '%-.192s'"
jpn "プロシージャ '%-.192s' へのパラメータ数が不正です。"
@@ -2465,6 +2537,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE
fre "Paramètre erroné pour la procedure %-.192s"
ger "Falsche Parameter für Prozedur '%-.192s'"
greek "Λάθος παράμετροι στην διαδικασία '%-.192s'"
+ hindi "प्रोसीजर '%-.192s' के लिए पैरामीटर्स गलत हैं"
hun "Rossz parameter a(z) '%-.192s' eljarasban"
ita "Parametri errati per la procedura '%-.192s'"
jpn "プロシージャ '%-.192s' へのパラメータが不正です。"
@@ -2489,6 +2562,7 @@ ER_UNKNOWN_TABLE 42S02
fre "Table inconnue '%-.192s' dans %-.32s"
ger "Unbekannte Tabelle '%-.192s' in '%-.32s'"
greek "Αγνωστος πίνακας '%-.192s' σε %-.32s"
+ hindi "टेबल '%-.192s', %-.32s में नहीं मिला"
hun "Ismeretlen tabla: '%-.192s' %-.32s-ban"
ita "Tabella '%-.192s' sconosciuta in %-.32s"
jpn "'%-.192s' は %-.32s では不明な表です。"
@@ -2513,6 +2587,7 @@ ER_FIELD_SPECIFIED_TWICE 42000
fre "Champ '%-.192s' spécifié deux fois"
ger "Feld '%-.192s' wurde zweimal angegeben"
greek "Το πεδίο '%-.192s' έχει ορισθεί δύο φορές"
+ hindi "कॉलम '%-.192s' दो बार निर्दिष्ट किया गया है"
hun "A(z) '%-.192s' mezot ketszer definialta"
ita "Campo '%-.192s' specificato 2 volte"
jpn "列 '%-.192s' は2回指定されています。"
@@ -2537,6 +2612,7 @@ ER_INVALID_GROUP_FUNC_USE
fre "Utilisation invalide de la clause GROUP"
ger "Falsche Verwendung einer Gruppierungsfunktion"
greek "Εσφαλμένη χρήση της group function"
+ hindi "ग्रुप फंक्शन का अवैध उपयोग"
hun "A group funkcio ervenytelen hasznalata"
ita "Uso non valido di una funzione di raggruppamento"
jpn "集計関数の使用方法が不正です。"
@@ -2552,13 +2628,14 @@ ER_INVALID_GROUP_FUNC_USE
ER_UNSUPPORTED_EXTENSION 42000
cze "Tabulka '%-.192s' používá rozšíření, které v této verzi MySQL není"
dan "Tabellen '%-.192s' bruger et filtypenavn som ikke findes i denne MariaDB version"
- nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MariaDB-versie voorkomt."
+ nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MariaDB-versie voorkomt"
eng "Table '%-.192s' uses an extension that doesn't exist in this MariaDB version"
est "Tabel '%-.192s' kasutab laiendust, mis ei eksisteeri antud MariaDB versioonis"
fre "Table '%-.192s' : utilise une extension invalide pour cette version de MariaDB"
ger "Tabelle '%-.192s' verwendet eine Erweiterung, die in dieser MariaDB-Version nicht verfügbar ist"
greek "Ο πίνακς '%-.192s' χρησιμοποιεί κάποιο extension που δεν υπάρχει στην έκδοση αυτή της MariaDB"
- hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MariaDB versioban."
+ hindi "टेबल '%-.192s' जिस इक्स्टेन्शन का उपयोग कर रहा है, वह इस MariaDB संस्करण में उपलब्ध नहीं है"
+ hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MariaDB versioban"
ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MariaDB"
jpn "表 '%-.192s' は、このMySQLバージョンには無い機能を使用しています。"
kor "테이블 '%-.192s'는 확장명령을 이용하지만 현재의 MariaDB 버젼에서는 존재하지 않습니다."
@@ -2582,6 +2659,7 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000
fre "Une table doit comporter au moins une colonne"
ger "Eine Tabelle muss mindestens eine Spalte besitzen"
greek "Ενας πίνακας πρέπει να έχει τουλάχιστον ένα πεδίο"
+ hindi "एक टेबल में कम से कम एक कॉलम होना चाहिए"
hun "A tablanak legalabb egy oszlopot tartalmazni kell"
ita "Una tabella deve avere almeno 1 colonna"
jpn "表には最低でも1個の列が必要です。"
@@ -2603,6 +2681,7 @@ ER_RECORD_FILE_FULL
fre "La table '%-.192s' est pleine"
ger "Tabelle '%-.192s' ist voll"
greek "Ο πίνακας '%-.192s' είναι γεμάτος"
+ hindi "टेबल '%-.192s' पूरा भरा है"
hun "A '%-.192s' tabla megtelt"
ita "La tabella '%-.192s' e` piena"
jpn "表 '%-.192s' は満杯です。"
@@ -2624,6 +2703,7 @@ ER_UNKNOWN_CHARACTER_SET 42000
fre "Jeu de caractères inconnu: '%-.64s'"
ger "Unbekannter Zeichensatz: '%-.64s'"
greek "Αγνωστο character set: '%-.64s'"
+ hindi "अज्ञात CHARACTER SET: '%-.64s'"
hun "Ervenytelen karakterkeszlet: '%-.64s'"
ita "Set di caratteri '%-.64s' sconosciuto"
jpn "不明な文字コードセット: '%-.64s'"
@@ -2645,6 +2725,7 @@ ER_TOO_MANY_TABLES
fre "Trop de tables. MariaDB ne peut utiliser que %d tables dans un JOIN"
ger "Zu viele Tabellen. MariaDB kann in einem Join maximal %d Tabellen verwenden"
greek "Πολύ μεγάλος αριθμός πινάκων. Η MariaDB μπορεί να χρησιμοποιήσει %d πίνακες σε διαδικασία join"
+ hindi "बहुत अधिक टेबल्स, MariaDB एक JOIN में केवल %d टेबल्स का उपयोग कर सकता है"
hun "Tul sok tabla. A MariaDB csak %d tablat tud kezelni osszefuzeskor"
ita "Troppe tabelle. MariaDB puo` usare solo %d tabelle in una join"
jpn "表が多すぎます。MySQLがJOINできる表は %d 個までです。"
@@ -2666,6 +2747,7 @@ ER_TOO_MANY_FIELDS
fre "Trop de champs"
ger "Zu viele Felder"
greek "Πολύ μεγάλος αριθμός πεδίων"
+ hindi "बहुत अधिक कॉलम्स"
hun "Tul sok mezo"
ita "Troppi campi"
jpn "列が多すぎます。"
@@ -2681,7 +2763,7 @@ ER_TOO_MANY_FIELDS
ER_TOO_BIG_ROWSIZE 42000
cze "Řádek je příliš velký. Maximální velikost řádku, nepočítaje položky blob, je %ld. Musíte změnit některé položky na blob"
dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's"
- nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen."
+ nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen"
eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs"
est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks"
fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB"
@@ -2702,13 +2784,13 @@ ER_TOO_BIG_ROWSIZE 42000
ER_STACK_OVERRUN
cze "Přetečení zásobníku threadu: použito %ld z %ld. Použijte 'mysqld --thread_stack=#' k zadání většího zásobníku"
dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld --thread_stack=#' for at allokere en større stak om nødvendigt"
- nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld --thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)."
+ nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld --thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)"
eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --thread_stack=#' to specify a bigger stack if needed"
fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld --thread_stack=#' pour indiquer une plus grande valeur"
ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld --thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen"
greek "Stack overrun στο thread: Used: %ld of a %ld stack. Παρακαλώ χρησιμοποιείστε 'mysqld --thread_stack=#' για να ορίσετε ένα μεγαλύτερο stack αν χρειάζεται"
hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld --thread_stack=#' nagyobb verem definialasahoz"
- ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld --thread_stack=#' per specificare uno stack piu` grande."
+ ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld --thread_stack=#' per specificare uno stack piu` grande"
jpn "スレッドスタック不足です(使用: %ld ; サイズ: %ld)。必要に応じて、より大きい値で 'mysqld --thread_stack=#' の指定をしてください。"
kor "쓰레드 스택이 넘쳤습니다. 사용: %ld개 스택: %ld개. 만약 필요시 더큰 스택을 원할때에는 'mysqld --thread_stack=#' 를 정의하세요"
por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld --thread_stack=#' para especificar uma pilha maior, se necessário"
@@ -2751,6 +2833,7 @@ ER_CANT_FIND_UDF
fre "Imposible de charger la fonction '%-.192s'"
ger "Kann Funktion '%-.192s' nicht laden"
greek "Δεν είναι δυνατή η διαδικασία load για τη συνάρτηση '%-.192s'"
+ hindi "फंक्शन '%-.192s' लोड नहीं किया जा सका"
hun "A(z) '%-.192s' fuggveny nem toltheto be"
ita "Impossibile caricare la funzione '%-.192s'"
jpn "関数 '%-.192s' をロードできません。"
@@ -2772,6 +2855,7 @@ ER_CANT_INITIALIZE_UDF
fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s"
ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s"
greek "Δεν είναι δυνατή η έναρξη της συνάρτησης '%-.192s'; %-.80s"
+ hindi "फंक्शन '%-.192s' को प्रारंभ नहीं किया जा सका; %-.80s"
hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s"
ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s"
jpn "関数 '%-.192s' を初期化できません。; %-.80s"
@@ -2814,6 +2898,7 @@ ER_UDF_EXISTS
fre "La fonction '%-.192s' existe déjà"
ger "Funktion '%-.192s' existiert schon"
greek "Η συνάρτηση '%-.192s' υπάρχει ήδη"
+ hindi "फंक्शन '%-.192s' पहले से मौजूद है"
hun "A '%-.192s' fuggveny mar letezik"
ita "La funzione '%-.192s' esiste gia`"
jpn "関数 '%-.192s' はすでに定義されています。"
@@ -2880,6 +2965,7 @@ ER_FUNCTION_NOT_DEFINED
fre "La fonction '%-.192s' n'est pas définie"
ger "Funktion '%-.192s' ist nicht definiert"
greek "Η συνάρτηση '%-.192s' δεν έχει ορισθεί"
+ hindi "फंक्शन '%-.192s' की परिभाषा नहीं मिली"
hun "A '%-.192s' fuggveny nem definialt"
ita "La funzione '%-.192s' non e` definita"
jpn "関数 '%-.192s' は定義されていません。"
@@ -2901,6 +2987,7 @@ ER_HOST_IS_BLOCKED
fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'"
ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'"
greek "Ο υπολογιστής '%-.64s' έχει αποκλεισθεί λόγω πολλαπλών λαθών σύνδεσης. Προσπαθήστε να διορώσετε με 'mysqladmin flush-hosts'"
+ hindi "होस्ट '%-.64s' को कई कनेक्शन में त्रुटियों के कारण ब्लॉक कर दिया गया है; 'mysqladmin flush-hosts' का इस्तेमाल कर अनब्लॉक करें"
hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot"
ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'"
jpn "接続エラーが多いため、ホスト '%-.64s' は拒否されました。'mysqladmin flush-hosts' で解除できます。"
@@ -2921,6 +3008,7 @@ ER_HOST_NOT_PRIVILEGED
fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MariaDB"
ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MariaDB-Server zu verbinden"
greek "Ο υπολογιστής '%-.64s' δεν έχει δικαίωμα σύνδεσης με τον MariaDB server"
+ hindi "होस्ट '%-.64s' को इस MariaDB सर्वर से कनेक्ट करने के लिए अनुमति नहीं है"
hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MariaDB szerverhez"
ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MariaDB"
jpn "ホスト '%-.64s' からのこの MySQL server への接続は許可されていません。"
@@ -2941,12 +3029,13 @@ ER_PASSWORD_ANONYMOUS_USER 42000
fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe"
ger "Sie benutzen MariaDB als anonymer Benutzer und dürfen daher keine Passwörter ändern"
greek "Χρησιμοποιείτε την MariaDB σαν anonymous user και έτσι δεν μπορείτε να αλλάξετε τα passwords άλλων χρηστών"
+ hindi "आप MariaDB का उपयोग एक बेनाम यूज़र की तरह कर रहे हैं; बेनाम यूज़र्स को 'यूज़र सेटिंग्स' बदलने की अनुमति नहीं है"
hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas"
ita "Impossibile cambiare la password usando MariaDB come utente anonimo"
jpn "MySQL を匿名ユーザーで使用しているので、パスワードの変更はできません。"
kor "당신은 MariaDB서버에 익명의 사용자로 접속을 하셨습니다.익명의 사용자는 암호를 변경할 수 없습니다."
por "Você está usando o MariaDB como usuário anônimo e usuários anônimos não têm permissão para mudar senhas"
- rum "Dumneavoastra folositi MariaDB ca un utilizator anonim si utilizatorii anonimi nu au voie sa schimbe setarile utilizatorilor."
+ rum "Dumneavoastra folositi MariaDB ca un utilizator anonim si utilizatorii anonimi nu au voie sa schimbe setarile utilizatorilor"
rus "Вы используете MariaDB от имени анонимного пользователя, а анонимным пользователям не разрешается менять пароли"
serbian "Vi koristite MariaDB kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke"
spa "Tu estás usando MariaDB como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves"
@@ -2981,6 +3070,7 @@ ER_PASSWORD_NO_MATCH 28000
fre "Impossible de trouver un enregistrement correspondant dans la table user"
ger "Kann keinen passenden Datensatz in Tabelle 'user' finden"
greek "Δεν είναι δυνατή η ανεύρεση της αντίστοιχης εγγραφής στον πίνακα των χρηστών"
+ hindi "यूज़र टेबल में रिकॉर्ड नहीं मिला"
hun "Nincs megegyezo sor a user tablaban"
ita "Impossibile trovare la riga corrispondente nella tabella user"
jpn "ユーザーテーブルに該当するレコードが見つかりません。"
@@ -3059,6 +3149,7 @@ ER_CANT_REOPEN_TABLE
est "Ei suuda taasavada tabelit '%-.192s'"
fre "Impossible de réouvrir la table: '%-.192s"
ger "Kann Tabelle'%-.192s' nicht erneut öffnen"
+ hindi "टेबल '%-.192s' फिर से खोल नहीं सकते"
hun "Nem lehet ujra-megnyitni a tablat: '%-.192s"
ita "Impossibile riaprire la tabella: '%-.192s'"
jpn "表を再オープンできません。: '%-.192s'"
@@ -3082,6 +3173,7 @@ ER_INVALID_USE_OF_NULL 22004
est "NULL väärtuse väärkasutus"
fre "Utilisation incorrecte de la valeur NULL"
ger "Unerlaubte Verwendung eines NULL-Werts"
+ hindi "NULL मान का अवैध उपयोग"
hun "A NULL ervenytelen hasznalata"
ita "Uso scorretto del valore NULL"
jpn "NULL 値の使用方法が不適切です。"
@@ -3101,6 +3193,7 @@ ER_REGEXP_ERROR 42000
est "regexp tagastas vea '%-.64s'"
fre "Erreur '%-.64s' provenant de regexp"
ger "regexp lieferte Fehler '%-.64s'"
+ hindi "regexp में '%-.64s' त्रुटि हुई"
hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)"
ita "Errore '%-.64s' da regexp"
jpn "regexp がエラー '%-.64s' を返しました。"
@@ -3191,27 +3284,27 @@ ER_COLUMNACCESS_DENIED_ERROR 42000
swe "%-.32s ej tillåtet för '%s'@'%s' för kolumn '%-.192s' i tabell '%-.192s'"
ukr "%-.32s команда заборонена користувачу: '%s'@'%s' для стовбця '%-.192s' у таблиці '%-.192s'"
ER_ILLEGAL_GRANT_FOR_TABLE 42000
- cze "Neplatný příkaz GRANT/REVOKE. Prosím, přečtěte si v manuálu, jaká privilegia je možné použít."
- dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
- nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden."
+ cze "Neplatný příkaz GRANT/REVOKE. Prosím, přečtěte si v manuálu, jaká privilegia je možné použít"
+ dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres"
+ nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden"
eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga"
- fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel."
+ fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel"
ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt"
- greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used."
+ greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek"
- ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati."
+ ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati"
jpn "不正な GRANT/REVOKE コマンドです。どの権限で利用可能かはマニュアルを参照して下さい。"
kor "잘못된 GRANT/REVOKE 명령. 어떤 권리와 승인이 사용되어 질 수 있는지 메뉴얼을 보시오."
- nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados."
- rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite."
+ nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used"
+ norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used"
+ pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used"
+ por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados"
+ rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite"
rus "Неверная команда GRANT или REVOKE. Обратитесь к документации, чтобы выяснить, какие привилегии можно использовать"
- serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priručniku koje vrednosti mogu biti upotrebljene."
- slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
- spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados."
+ serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priručniku koje vrednosti mogu biti upotrebljene"
+ slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used"
+ spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados"
swe "Felaktigt GRANT-privilegium använt"
ukr "Хибна GRANT/REVOKE команда; прочитайте документацію стосовно того, які права можна використовувати"
ER_GRANT_WRONG_HOST_OR_USER 42000
@@ -3222,6 +3315,7 @@ ER_GRANT_WRONG_HOST_OR_USER 42000
est "Masina või kasutaja nimi GRANT lauses on liiga pikk"
fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long"
ger "Das Host- oder User-Argument für GRANT ist zu lang"
+ hindi "GRANT के लिए होस्ट या यूज़र आर्गुमेंट बहुत लंबा है"
hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban"
ita "L'argomento host o utente per la GRANT e` troppo lungo"
jpn "GRANTコマンドへの、ホスト名やユーザー名が長すぎます。"
@@ -3241,6 +3335,7 @@ ER_NO_SUCH_TABLE 42S02
est "Tabelit '%-.192s.%-.192s' ei eksisteeri"
fre "La table '%-.192s.%-.192s' n'existe pas"
ger "Tabelle '%-.192s.%-.192s' existiert nicht"
+ hindi "टेबल '%-.192s.%-.192s' मौजूद नहीं है"
hun "A '%-.192s.%-.192s' tabla nem letezik"
ita "La tabella '%-.192s.%-.192s' non esiste"
jpn "表 '%-.192s.%-.192s' は存在しません。"
@@ -3283,6 +3378,7 @@ ER_NOT_ALLOWED_COMMAND 42000
est "Antud käsk ei ole lubatud käesolevas MariaDB versioonis"
fre "Cette commande n'existe pas dans cette version de MariaDB"
ger "Der verwendete Befehl ist in dieser MariaDB-Version nicht zulässig"
+ hindi "यह कमांड इस MariaDB संस्करण के साथ इस्तेमाल नहीं किया जा सकता है"
hun "A hasznalt parancs nem engedelyezett ebben a MariaDB verzioban"
ita "Il comando utilizzato non e` supportato in questa versione di MariaDB"
jpn "このMySQLバージョンでは利用できないコマンドです。"
@@ -3303,6 +3399,7 @@ ER_SYNTAX_ERROR 42000
fre "Erreur de syntaxe"
ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen"
greek "You have an error in your SQL syntax"
+ hindi "आपके SQL सिंटेक्स मैं गलती है; सही सिंटेक्स के लिए अपने MariaDB सर्वर संस्करण के मैन्युअल की सहायता लें"
hun "Szintaktikai hiba"
ita "Errore di sintassi nella query SQL"
jpn "SQL構文エラーです。バージョンに対応するマニュアルを参照して正しい構文を確認してください。"
@@ -3345,6 +3442,7 @@ ER_TOO_MANY_DELAYED_THREADS
est "Liiga palju DELAYED lõimesid kasutusel"
fre "Trop de tâche 'delayed' en cours"
ger "Zu viele verzögerte (DELAYED) Threads in Verwendung"
+ hindi "बहुत से DELAYED थ्रेड्स उपयोग में हैं"
hun "Tul sok kesletetett thread (delayed)"
ita "Troppi threads ritardati in uso"
jpn "'Delayed insert'スレッドが多すぎます。"
@@ -3387,6 +3485,7 @@ ER_NET_PACKET_TOO_LARGE 08S01
est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga"
fre "Paquet plus grand que 'max_allowed_packet' reçu"
ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes"
+ hindi "'max_allowed_packet' से भी बड़ा एक पैकेट मिला"
hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'"
ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'"
jpn "'max_allowed_packet'よりも大きなパケットを受信しました。"
@@ -3406,6 +3505,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01
est "Viga ühendustoru lugemisel"
fre "Erreur de lecture reçue du pipe de connexion"
ger "Lese-Fehler bei einer Verbindungs-Pipe"
+ hindi "कनेक्शन पाइप से एक READ त्रुटि हुई"
hun "Olvasasi hiba a kapcsolat soran"
ita "Rilevato un errore di lettura dalla pipe di connessione"
jpn "接続パイプの読み込みエラーです。"
@@ -3425,6 +3525,7 @@ ER_NET_FCNTL_ERROR 08S01
est "fcntl() tagastas vea"
fre "Erreur reçue de fcntl() "
ger "fcntl() lieferte einen Fehler"
+ hindi "fcntl() से एक त्रुटि हुई"
hun "Hiba a fcntl() fuggvenyben"
ita "Rilevato un errore da fcntl()"
jpn "fcntl()がエラーを返しました。"
@@ -3444,6 +3545,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01
est "Paketid saabusid vales järjekorras"
fre "Paquets reçus dans le désordre"
ger "Pakete nicht in der richtigen Reihenfolge empfangen"
+ hindi "पैकेट्स क्रम में नहीं प्राप्त हुए"
hun "Helytelen sorrendben erkezett adatcsomagok"
ita "Ricevuti pacchetti non in ordine"
jpn "不正な順序のパケットを受信しました。"
@@ -3463,6 +3565,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01
est "Viga andmepaketi lahtipakkimisel"
fre "Impossible de décompresser le paquet reçu"
ger "Kommunikationspaket lässt sich nicht entpacken"
+ hindi "संचार पैकेट UNCOMPRESS नहीं कर सके"
hun "A kommunikacios adatcsomagok nem tomorithetok ki"
ita "Impossibile scompattare i pacchetti di comunicazione"
jpn "圧縮パケットの展開ができませんでした。"
@@ -3482,6 +3585,7 @@ ER_NET_READ_ERROR 08S01
est "Viga andmepaketi lugemisel"
fre "Erreur de lecture des paquets reçus"
ger "Fehler beim Lesen eines Kommunikationspakets"
+ hindi "संचार पैकेट्स पढ़ते समय एक त्रुटि हुई"
hun "HIba a kommunikacios adatcsomagok olvasasa soran"
ita "Rilevato un errore ricevendo i pacchetti di comunicazione"
jpn "パケットの受信でエラーが発生しました。"
@@ -3501,6 +3605,7 @@ ER_NET_READ_INTERRUPTED 08S01
est "Kontrollaja ületamine andmepakettide lugemisel"
fre "Timeout en lecture des paquets reçus"
ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets"
+ hindi "संचार पैकेट्स पढ़ने के दौरान टाइमआउट"
hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran"
ita "Rilevato un timeout ricevendo i pacchetti di comunicazione"
jpn "パケットの受信でタイムアウトが発生しました。"
@@ -3520,6 +3625,7 @@ ER_NET_ERROR_ON_WRITE 08S01
est "Viga andmepaketi kirjutamisel"
fre "Erreur d'écriture des paquets envoyés"
ger "Fehler beim Schreiben eines Kommunikationspakets"
+ hindi "संचार पैकेट्स लिखते समय एक त्रुटि हुई"
hun "Hiba a kommunikacios csomagok irasa soran"
ita "Rilevato un errore inviando i pacchetti di comunicazione"
jpn "パケットの送信でエラーが発生しました。"
@@ -3539,6 +3645,7 @@ ER_NET_WRITE_INTERRUPTED 08S01
est "Kontrollaja ületamine andmepakettide kirjutamisel"
fre "Timeout d'écriture des paquets envoyés"
ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets"
+ hindi "संचार पैकेट्स लिखने के दौरान टाइमआउट"
hun "Idotullepes a kommunikacios csomagok irasa soran"
ita "Rilevato un timeout inviando i pacchetti di comunicazione"
jpn "パケットの送信でタイムアウトが発生しました。"
@@ -3558,6 +3665,7 @@ ER_TOO_LONG_STRING 42000
est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga"
fre "La chaîne résultat est plus grande que 'max_allowed_packet'"
ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
+ hindi "रिजल्ट स्ट्रिंग 'max_allowed_packet' से लंबा है"
hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
jpn "結果の文字列が 'max_allowed_packet' よりも大きいです。"
@@ -3576,6 +3684,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000
est "Valitud tabelitüüp (%s) ei toeta BLOB/TEXT tüüpi välju"
fre "Ce type de table (%s) ne supporte pas les colonnes BLOB/TEXT"
ger "Der verwendete Tabellentyp (%s) unterstützt keine BLOB- und TEXT-Felder"
+ hindi "स्टोरेज इंजन %s BLOB/TEXT कॉलम्स को सपोर्ट नहीं करता"
hun "A hasznalt tabla tipus (%s) nem tamogatja a BLOB/TEXT mezoket"
ita "Il tipo di tabella usata (%s) non supporta colonne di tipo BLOB/TEXT"
por "Tipo de tabela usado (%s) não permite colunas BLOB/TEXT"
@@ -3593,6 +3702,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
est "Valitud tabelitüüp (%s) ei toeta AUTO_INCREMENT tüüpi välju"
fre "Ce type de table (%s) ne supporte pas les colonnes AUTO_INCREMENT"
ger "Der verwendete Tabellentyp (%s) unterstützt keine AUTO_INCREMENT-Felder"
+ hindi "स्टोरेज इंजन %s AUTO_INCREMENT कॉलम्स को सपोर्ट नहीं करता"
hun "A hasznalt tabla tipus (%s) nem tamogatja az AUTO_INCREMENT tipusu mezoket"
ita "Il tipo di tabella usata (%s) non supporta colonne di tipo AUTO_INCREMENT"
por "Tipo de tabela usado (%s) não permite colunas AUTO_INCREMENT"
@@ -3634,6 +3744,7 @@ ER_WRONG_COLUMN_NAME 42000
est "Vigane tulba nimi '%-.100s'"
fre "Nom de colonne '%-.100s' incorrect"
ger "Falscher Spaltenname '%-.100s'"
+ hindi "कॉलम नाम '%-.100s' गलत है"
hun "Ervenytelen mezonev: '%-.100s'"
ita "Nome colonna '%-.100s' non corretto"
jpn "列名 '%-.100s' は不正です。"
@@ -3647,6 +3758,7 @@ ER_WRONG_COLUMN_NAME 42000
ER_WRONG_KEY_COLUMN 42000
eng "The storage engine %s can't index column %`s"
ger "Die Speicher-Engine %s kann die Spalte %`s nicht indizieren"
+ hindi "स्टोरेज इंजन %s, कॉलम %`s को इंडेक्स नहीं कर सकता"
rus "Обработчик таблиц %s не может проиндексировать столбец %`s"
ukr "Вказівник таблиц %s не може індексувати стовбець %`s"
ER_WRONG_MRG_TABLE
@@ -3722,6 +3834,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000
est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit"
fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE"
ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden"
+ hindi "PRIMARY KEY के सभी भागों को NOT NULL होना चाहिए; यदि आपको एक KEY में NULL की जरूरत है, तो UNIQUE का उपयोग करें"
hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot"
ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE"
jpn "PRIMARY KEYの列は全てNOT NULLでなければいけません。UNIQUE索引であればNULLを含むことが可能です。"
@@ -3740,6 +3853,7 @@ ER_TOO_MANY_ROWS 42000
est "Tulemis oli rohkem kui üks kirje"
fre "Le résultat contient plus d'un enregistrement"
ger "Ergebnis besteht aus mehr als einer Zeile"
+ hindi "परिणाम एक से अधिक पंक्ति का है"
hun "Az eredmeny tobb, mint egy sort tartalmaz"
ita "Il risultato consiste di piu` di una riga"
jpn "結果が2行以上です。"
@@ -3758,6 +3872,7 @@ ER_REQUIRES_PRIMARY_KEY 42000
est "Antud tabelitüüp nõuab primaarset võtit"
fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)"
ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)"
+ hindi "इस प्रकार के टेबल को एक PRIMARY KEY की आवश्यकता है"
hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo"
ita "Questo tipo di tabella richiede una chiave primaria"
jpn "使用のストレージエンジンでは、PRIMARY KEYが必要です。"
@@ -3776,6 +3891,7 @@ ER_NO_RAID_COMPILED
est "Antud MariaDB versioon on kompileeritud ilma RAID toeta"
fre "Cette version de MariaDB n'est pas compilée avec le support RAID"
ger "Diese MariaDB-Version ist nicht mit RAID-Unterstützung kompiliert"
+ hindi "MariaDB का यह संस्करण RAID सपोर्ट के साथ कॉम्पाईल्ड नहीं है"
hun "Ezen leforditott MariaDB verzio nem tartalmaz RAID support-ot"
ita "Questa versione di MYSQL non e` compilata con il supporto RAID"
jpn "このバージョンのMySQLはRAIDサポートを含めてコンパイルされていません。"
@@ -3811,6 +3927,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009
est "Võti '%-.192s' ei eksisteeri tabelis '%-.192s'"
fre "L'index '%-.192s' n'existe pas sur la table '%-.192s'"
ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht"
+ hindi "KEY '%-.192s', टेबल '%-.192s' में मौजूद नहीं है"
hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban"
ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'"
jpn "索引 '%-.192s' は表 '%-.192s' には存在しません。"
@@ -3828,6 +3945,7 @@ ER_CHECK_NO_SUCH_TABLE 42000
est "Ei suuda avada tabelit"
fre "Impossible d'ouvrir la table"
ger "Kann Tabelle nicht öffnen"
+ hindi "टेबल नहीं खुल सकता है"
hun "Nem tudom megnyitni a tablat"
ita "Impossibile aprire la tabella"
jpn "表をオープンできません。"
@@ -3846,6 +3964,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000
fre "Ce type de table ne supporte pas les %s"
ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s"
greek "The handler for the table doesn't support %s"
+ hindi "इस टेबल का स्टोरेज इंजन '%s' को सपोर्ट नहीं करता"
hun "A tabla kezeloje (handler) nem tamogatja az %s"
ita "Il gestore per la tabella non supporta il %s"
jpn "この表のストレージエンジンは '%s' を利用できません。"
@@ -3886,6 +4005,7 @@ ER_ERROR_DURING_COMMIT
est "Viga %M käsu COMMIT täitmisel"
fre "Erreur %M lors du COMMIT"
ger "Fehler %M beim COMMIT"
+ hindi "COMMIT के दौरान %M त्रुटि हुई"
hun "%M hiba a COMMIT vegrehajtasa soran"
ita "Rilevato l'errore %M durante il COMMIT"
jpn "COMMIT中にエラー %M が発生しました。"
@@ -3903,6 +4023,7 @@ ER_ERROR_DURING_ROLLBACK
est "Viga %M käsu ROLLBACK täitmisel"
fre "Erreur %M lors du ROLLBACK"
ger "Fehler %M beim ROLLBACK"
+ hindi "ROLLBACK के दौरान %M त्रुटि हुई"
hun "%M hiba a ROLLBACK vegrehajtasa soran"
ita "Rilevato l'errore %M durante il ROLLBACK"
jpn "ROLLBACK中にエラー %M が発生しました。"
@@ -3920,6 +4041,7 @@ ER_ERROR_DURING_FLUSH_LOGS
est "Viga %M käsu FLUSH_LOGS täitmisel"
fre "Erreur %M lors du FLUSH_LOGS"
ger "Fehler %M bei FLUSH_LOGS"
+ hindi "FLUSH_LOGS के दौरान %M त्रुटि हुई"
hun "%M hiba a FLUSH_LOGS vegrehajtasa soran"
ita "Rilevato l'errore %M durante il FLUSH_LOGS"
jpn "FLUSH_LOGS中にエラー %M が発生しました。"
@@ -3937,6 +4059,7 @@ ER_ERROR_DURING_CHECKPOINT
est "Viga %M käsu CHECKPOINT täitmisel"
fre "Erreur %M lors du CHECKPOINT"
ger "Fehler %M bei CHECKPOINT"
+ hindi "CHECKPOINT के दौरान %M त्रुटि हुई"
hun "%M hiba a CHECKPOINT vegrehajtasa soran"
ita "Rilevato l'errore %M durante il CHECKPOINT"
jpn "CHECKPOINT中にエラー %M が発生しました。"
@@ -3947,21 +4070,21 @@ ER_ERROR_DURING_CHECKPOINT
swe "Fick fel %M vid CHECKPOINT"
ukr "Отримано помилку %M під час CHECKPOINT"
ER_NEW_ABORTING_CONNECTION 08S01
- cze "Spojení %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
- dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
- nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)"
- eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)"
- est "Ühendus katkestatud %ld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)"
- fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)"
- ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)"
- ita "Interrotta la connessione %ld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)"
- jpn "接続 %ld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)"
- por "Conexão %ld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')"
- rus "Прервано соединение %ld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s' (%-.64s)"
- serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)"
- spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
- swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
- ukr "Перервано з'єднання %ld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)"
+ cze "Spojení %lld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
+ dan "Afbrød forbindelsen %lld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
+ nla "Afgebroken verbinding %lld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)"
+ eng "Aborted connection %lld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)"
+ est "Ühendus katkestatud %lld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)"
+ fre "Connection %lld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)"
+ ger "Abbruch der Verbindung %lld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)"
+ ita "Interrotta la connessione %lld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)"
+ jpn "接続 %lld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)"
+ por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')"
+ rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)"
+ spa "Abortada conexión %lld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
+ swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
+ ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)"
ER_UNUSED_10
eng "You should never see it"
ER_FLUSH_MASTER_BINLOG_CLOSED
@@ -4066,21 +4189,22 @@ ER_LOCK_OR_ACTIVE_TRANSACTION
swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion"
ukr "Не можу виконати подану команду тому, що таблиця заблокована або виконується транзакція"
ER_UNKNOWN_SYSTEM_VARIABLE
- cze "Neznámá systémová proměnná '%-.64s'"
- dan "Ukendt systemvariabel '%-.64s'"
- nla "Onbekende systeem variabele '%-.64s'"
- eng "Unknown system variable '%-.64s'"
- est "Tundmatu süsteemne muutuja '%-.64s'"
- fre "Variable système '%-.64s' inconnue"
- ger "Unbekannte Systemvariable '%-.64s'"
- ita "Variabile di sistema '%-.64s' sconosciuta"
- jpn "'%-.64s' は不明なシステム変数です。"
- por "Variável de sistema '%-.64s' desconhecida"
- rus "Неизвестная системная переменная '%-.64s'"
- serbian "Nepoznata sistemska promenljiva '%-.64s'"
- spa "Desconocida variable de sistema '%-.64s'"
- swe "Okänd systemvariabel: '%-.64s'"
- ukr "Невідома системна змінна '%-.64s'"
+ cze "Neznámá systémová proměnná '%-.*s'"
+ dan "Ukendt systemvariabel '%-.*s'"
+ nla "Onbekende systeem variabele '%-.*s'"
+ eng "Unknown system variable '%-.*s'"
+ est "Tundmatu süsteemne muutuja '%-.*s'"
+ fre "Variable système '%-.*s' inconnue"
+ ger "Unbekannte Systemvariable '%-.*s'"
+ hindi "अज्ञात सिस्टम वैरिएबल '%-.*s'"
+ ita "Variabile di sistema '%-.*s' sconosciuta"
+ jpn "'%-.*s' は不明なシステム変数です。"
+ por "Variável de sistema '%-.*s' desconhecida"
+ rus "Неизвестная системная переменная '%-.*s'"
+ serbian "Nepoznata sistemska promenljiva '%-.*s'"
+ spa "Desconocida variable de sistema '%-.*s'"
+ swe "Okänd systemvariabel: '%-.*s'"
+ ukr "Невідома системна змінна '%-.*s'"
ER_CRASHED_ON_USAGE
cze "Tabulka '%-.192s' je označena jako porušená a měla by být opravena"
dan "Tabellen '%-.192s' er markeret med fejl og bør repareres"
@@ -4151,7 +4275,7 @@ ER_SLAVE_MUST_STOP
ita "Questa operazione non puo' essere eseguita con un database 'slave' '%2$*1$s' che gira, lanciare prima STOP SLAVE '%2$*1$s'"
por "Esta operação não pode ser realizada com um 'slave' '%2$*1$s' em execução. Execute STOP SLAVE '%2$*1$s' primeiro"
rus "Эту операцию невозможно выполнить при работающем потоке подчиненного сервера %2$*1$s. Сначала выполните STOP SLAVE '%2$*1$s'"
- serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni '%2$*1$s' server. Zadajte prvo komandu 'STOP SLAVE '%2$*1$s'' da zaustavite podređeni server."
+ serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni '%2$*1$s' server. Zadajte prvo komandu 'STOP SLAVE '%2$*1$s'' da zaustavite podređeni server"
spa "Esta operación no puede ser hecha con el esclavo '%2$*1$s' funcionando, primero use STOP SLAVE '%2$*1$s'"
swe "Denna operation kan inte göras under replikering; Du har en aktiv förbindelse till '%2$*1$s'. Gör STOP SLAVE '%2$*1$s' först"
ukr "Операція не може бути виконана з запущеним підлеглим '%2$*1$s', спочатку виконайте STOP SLAVE '%2$*1$s'"
@@ -4211,6 +4335,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000
est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga"
fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connexions actives"
ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen"
+ hindi "यूज़र %-.64s के पहले से ही 'max_user_connections' से अधिक सक्रिय कनेक्शन्स हैं"
ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
jpn "ユーザー '%-.64s' はすでに 'max_user_connections' 以上のアクティブな接続を行っています。"
por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
@@ -4226,6 +4351,7 @@ ER_SET_CONSTANTS_ONLY
est "Ainult konstantsed suurused on lubatud SET klauslis"
fre "Seules les expressions constantes sont autorisées avec SET"
ger "Bei diesem Befehl dürfen nur konstante Ausdrücke verwendet werden"
+ hindi "इस स्टेटमेंट में आप केवल CONSTANT EXPRESSIONS का उपयोग कर सकते हैं"
ita "Si possono usare solo espressioni costanti con SET"
jpn "SET処理が失敗しました。"
por "Você pode usar apenas expressões constantes com SET"
@@ -4243,7 +4369,7 @@ ER_LOCK_WAIT_TIMEOUT
ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten"
ita "E' scaduto il timeout per l'attesa del lock"
jpn "ロック待ちがタイムアウトしました。トランザクションを再試行してください。"
- por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
+ por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação"
rus "Таймаут ожидания блокировки истек; попробуйте перезапустить транзакцию"
serbian "Vremenski limit za zaključavanje tabele je istekao; Probajte da ponovo startujete transakciju"
spa "Tiempo de bloqueo de espera excedido"
@@ -4256,6 +4382,7 @@ ER_LOCK_TABLE_FULL
est "Lukkude koguarv ületab lukutabeli suuruse"
fre "Le nombre total de verrou dépasse la taille de la table des verrous"
ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle"
+ hindi "लॉक्स की कुल संख्या लॉक टेबल के साइज से अधिक है"
ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
jpn "ロックの数が多すぎます。"
por "O número total de travamentos excede o tamanho da tabela de travamentos"
@@ -4315,6 +4442,7 @@ ER_WRONG_ARGUMENTS
est "Vigased parameetrid %s-le"
fre "Mauvais arguments à %s"
ger "Falsche Argumente für %s"
+ hindi "%s को गलत आर्ग्यूमेंट्स"
ita "Argomenti errati a %s"
jpn "%s の引数が不正です"
por "Argumentos errados para %s"
@@ -4329,6 +4457,7 @@ ER_NO_PERMISSION_TO_CREATE_USER 42000
est "Kasutajal '%s'@'%s' ei ole lubatud luua uusi kasutajaid"
fre "'%s'@'%s' n'est pas autorisé à créer de nouveaux utilisateurs"
ger "'%s'@'%s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
+ hindi "'%s'@'%s' को नए यूज़र्स बनाने की अनुमति नहीं है"
ita "A '%s'@'%s' non e' permesso creare nuovi utenti"
por "Não é permitido a '%s'@'%s' criar novos usuários"
rus "'%s'@'%s' не разрешается создавать новых пользователей"
@@ -4344,7 +4473,7 @@ ER_UNION_TABLES_IN_DIFFERENT_DIR
ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden"
ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
jpn "不正な表定義です。MERGE表の構成表はすべて同じデータベース内になければなりません。"
- por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
+ por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados"
rus "Неверное определение таблицы; Все таблицы в MERGE должны принадлежать одной и той же базе данных"
serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos"
@@ -4357,7 +4486,7 @@ ER_LOCK_DEADLOCK 40001
ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten"
ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
jpn "ロック取得中にデッドロックが検出されました。トランザクションを再試行してください。"
- por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
+ por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação"
rus "Возникла тупиковая ситуация в процессе получения блокировки; Попробуйте перезапустить транзакцию"
serbian "Unakrsno zaključavanje pronađeno kada sam pokušao da dobijem pravo na zaključavanje; Probajte da restartujete transakciju"
spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
@@ -4368,6 +4497,7 @@ ER_TABLE_CANT_HANDLE_FT
est "Antud tabelitüüp (%s) ei toeta FULLTEXT indekseid"
fre "Le type de table utilisé (%s) ne supporte pas les index FULLTEXT"
ger "Der verwendete Tabellentyp (%s) unterstützt keine FULLTEXT-Indizes"
+ hindi "स्टोरेज इंजन '%s' FULLTEXT इन्डेक्सेस को सपोर्ट नहीं करता"
ita "La tabella usata (%s) non supporta gli indici FULLTEXT"
por "O tipo de tabela utilizado (%s) não suporta índices de texto completo (fulltext indexes)"
rus "Используемый тип таблиц (%s) не поддерживает полнотекстовых индексов"
@@ -4484,7 +4614,7 @@ ER_CANT_UPDATE_WITH_READLOCK
spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura"
swe "Kan inte utföra kommandot emedan du har ett READ-lås"
ER_MIXING_NOT_ALLOWED
- nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld."
+ nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld"
eng "Mixing of transactional and non-transactional tables is disabled"
est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud"
ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert"
@@ -4748,7 +4878,7 @@ ER_NOT_SUPPORTED_AUTH_MODE 08004
nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MariaDB client te gebruiken"
por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MariaDB"
spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MariaDB"
- swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet."
+ swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet"
ER_SPATIAL_CANT_HAVE_NULL 42000
eng "All parts of a SPATIAL index must be NOT NULL"
ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein"
@@ -4842,11 +4972,12 @@ WARN_DATA_TRUNCATED 01000
ER_WARN_USING_OTHER_HANDLER
eng "Using storage engine %s for table '%s'"
ger "Speicher-Engine %s wird für Tabelle '%s' benutzt"
+ hindi "स्टोरेज इंजन %s का इस्तेमाल टेबल '%s' के लिए किया जा रहा है"
jpn "ストレージエンジン %s が表 '%s' に利用されています。"
por "Usando engine de armazenamento %s para tabela '%s'"
spa "Usando motor de almacenamiento %s para tabla '%s'"
swe "Använder handler %s för tabell '%s'"
-ER_CANT_AGGREGATE_2COLLATIONS
+ER_CANT_AGGREGATE_2COLLATIONS
eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'"
jpn "照合順序 (%s,%s) と (%s,%s) の混在は操作 '%s' では不正です。"
@@ -4969,6 +5100,7 @@ ER_WARN_HOSTNAME_WONT_WORK
ER_UNKNOWN_STORAGE_ENGINE 42000
eng "Unknown storage engine '%s'"
ger "Unbekannte Speicher-Engine '%s'"
+ hindi "अज्ञात स्टोरेज इंजन '%s'"
jpn "'%s' は不明なストレージエンジンです。"
por "Motor de tabela desconhecido '%s'"
spa "Desconocido motor de tabla '%s'"
@@ -5063,15 +5195,19 @@ ER_SP_NO_RECURSIVE_CREATE 2F003
ER_SP_ALREADY_EXISTS 42000
eng "%s %s already exists"
ger "%s %s existiert bereits"
+ hindi "%s %s पहले से ही मौजूद है"
ER_SP_DOES_NOT_EXIST 42000
eng "%s %s does not exist"
ger "%s %s existiert nicht"
+ hindi "%s %s मौजूद नहीं है"
ER_SP_DROP_FAILED
eng "Failed to DROP %s %s"
ger "DROP %s %s ist fehlgeschlagen"
+ hindi "%s %s को ड्रॉप करने में असफल रहे"
ER_SP_STORE_FAILED
eng "Failed to CREATE %s %s"
ger "CREATE %s %s ist fehlgeschlagen"
+ hindi "%s %s को बनाने में असफल रहे"
ER_SP_LILABEL_MISMATCH 42000
eng "%s with no matching label: %s"
ger "%s ohne passende Marke: %s"
@@ -5090,15 +5226,17 @@ ER_SP_BADSELECT 0A000
ER_SP_BADRETURN 42000
eng "RETURN is only allowed in a FUNCTION"
ger "RETURN ist nur innerhalb einer FUNCTION erlaubt"
+ hindi "RETURN को केवल FUNCTION में इस्तेमाल किया जा सकता है"
ER_SP_BADSTATEMENT 0A000
eng "%s is not allowed in stored procedures"
ger "%s ist in gespeicherten Prozeduren nicht erlaubt"
+ hindi "%s को STORED PROCEDURE में इस्तेमाल नहीं किया जा सकता है"
ER_UPDATE_LOG_DEPRECATED_IGNORED 42000
- eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB 5.6."
- ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert. Diese Option wird in MariaDB 5.6 entfernt."
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB 5.6"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert. Diese Option wird in MariaDB 5.6 entfernt"
ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000
- eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN. This option will be removed in MariaDB 5.6."
- ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt. Diese Option wird in MariaDB 5.6 entfernt."
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN. This option will be removed in MariaDB 5.6"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt. Diese Option wird in MariaDB 5.6 entfernt"
ER_QUERY_INTERRUPTED 70100
eng "Query execution was interrupted"
ger "Ausführung der Abfrage wurde unterbrochen"
@@ -5111,9 +5249,11 @@ ER_SP_COND_MISMATCH 42000
ER_SP_NORETURN 42000
eng "No RETURN found in FUNCTION %s"
ger "Kein RETURN in FUNCTION %s gefunden"
+ hindi "FUNCTION %s में कोई RETURN है"
ER_SP_NORETURNEND 2F005
eng "FUNCTION %s ended without RETURN"
ger "FUNCTION %s endete ohne RETURN"
+ hindi "FUNCTION %s RETURN के बिना समाप्त हो गया"
ER_SP_BAD_CURSOR_QUERY 42000
eng "Cursor statement must be a SELECT"
ger "Cursor-Anweisung muss ein SELECT sein"
@@ -5123,9 +5263,11 @@ ER_SP_BAD_CURSOR_SELECT 42000
ER_SP_CURSOR_MISMATCH 42000
eng "Undefined CURSOR: %s"
ger "Undefinierter CURSOR: %s"
+ hindi "CURSOR %s अपरिभाषित है"
ER_SP_CURSOR_ALREADY_OPEN 24000
eng "Cursor is already open"
ger "Cursor ist schon geöffnet"
+ hindi "CURSOR पहले से ही खुला है"
ER_SP_CURSOR_NOT_OPEN 24000
eng "Cursor is not open"
ger "Cursor ist nicht geöffnet"
@@ -5153,6 +5295,7 @@ ER_SP_DUP_CURS 42000
ER_SP_CANT_ALTER
eng "Failed to ALTER %s %s"
ger "ALTER %s %s fehlgeschlagen"
+ hindi "%s %s को ALTER करने में असफल रहे"
ER_SP_SUBSELECT_NYI 0A000
eng "Subquery value not supported"
ger "Subquery-Wert wird nicht unterstützt"
@@ -5218,6 +5361,8 @@ ER_VIEW_SELECT_DERIVED
ger "SELECT der View enthält eine Subquery in der FROM-Klausel"
rus "View SELECT содержит подзапрос в конструкции FROM"
ukr "View SELECT має підзапит у конструкції FROM"
+
+# Not used any more, syntax error is returned instead
ER_VIEW_SELECT_CLAUSE
eng "View's SELECT contains a '%s' clause"
ger "SELECT der View enthält eine '%s'-Klausel"
@@ -5259,9 +5404,11 @@ ER_SP_GOTO_IN_HNDLR
ER_TRG_ALREADY_EXISTS
eng "Trigger already exists"
ger "Trigger existiert bereits"
+ hindi "TRIGGER पहले से मौजूद है"
ER_TRG_DOES_NOT_EXIST
eng "Trigger does not exist"
ger "Trigger existiert nicht"
+ hindi "TRIGGER मौजूद नहीं है"
ER_TRG_ON_VIEW_OR_TEMP_TABLE
eng "Trigger's '%-.192s' is view or temporary table"
ger "'%-.192s' des Triggers ist View oder temporäre Tabelle"
@@ -5277,22 +5424,23 @@ ER_NO_DEFAULT_FOR_FIELD
ER_DIVISION_BY_ZERO 22012
eng "Division by 0"
ger "Division durch 0"
+ hindi "0 से विभाजन"
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD 22007
- eng "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %lu"
- ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.192s' in Zeile %lu"
+ eng "Incorrect %-.32s value: '%-.128s' for column `%.192s`.`%.192s`.`%.192s` at row %lu"
+ ger "Falscher %-.32s-Wert: '%-.128s' für Feld '`%.192s`.`%.192s`.`%.192s` in Zeile %lu"
ER_ILLEGAL_VALUE_FOR_TYPE 22007
eng "Illegal %s '%-.192s' value found during parsing"
ger "Nicht zulässiger %s-Wert '%-.192s' beim Parsen gefunden"
ER_VIEW_NONUPD_CHECK
- eng "CHECK OPTION on non-updatable view '%-.192s.%-.192s'"
- ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.192s.%-.192s'"
- rus "CHECK OPTION для необновляемого VIEW '%-.192s.%-.192s'"
- ukr "CHECK OPTION для VIEW '%-.192s.%-.192s' що не може бути оновленним"
-ER_VIEW_CHECK_FAILED
- eng "CHECK OPTION failed '%-.192s.%-.192s'"
- ger "CHECK OPTION fehlgeschlagen: '%-.192s.%-.192s'"
- rus "проверка CHECK OPTION для VIEW '%-.192s.%-.192s' провалилась"
- ukr "Перевірка CHECK OPTION для VIEW '%-.192s.%-.192s' не пройшла"
+ eng "CHECK OPTION on non-updatable view %`-.192s.%`-.192s"
+ ger "CHECK OPTION auf nicht-aktualisierbarem View %`-.192s.%`-.192s"
+ rus "CHECK OPTION для необновляемого VIEW %`-.192s.%`-.192s"
+ ukr "CHECK OPTION для VIEW %`-.192s.%`-.192s що не може бути оновленним"
+ER_VIEW_CHECK_FAILED 44000
+ eng "CHECK OPTION failed %`-.192s.%`-.192s"
+ ger "CHECK OPTION fehlgeschlagen: %`-.192s.%`-.192s"
+ rus "Проверка CHECK OPTION для VIEW %`-.192s.%`-.192s провалилась"
+ ukr "Перевірка CHECK OPTION для VIEW %`-.192s.%`-.192s не пройшла"
ER_PROCACCESS_DENIED_ERROR 42000
eng "%-.32s command denied to user '%s'@'%s' for routine '%-.192s'"
ger "Befehl %-.32s nicht zulässig für Benutzer '%s'@'%s' in Routine '%-.192s'"
@@ -5314,6 +5462,7 @@ ER_BINLOG_PURGE_PROHIBITED
ER_FSEEK_FAIL
eng "Failed on fseek()"
ger "fseek() fehlgeschlagen"
+ hindi "fseek() विफल रहा"
ER_BINLOG_PURGE_FATAL_ERR
eng "Fatal error during log purge"
ger "Schwerwiegender Fehler bei der Log-Bereinigung"
@@ -5454,28 +5603,28 @@ ER_BINLOG_CREATE_ROUTINE_NEED_SUPER
eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_function_creators verwenden)"
ER_EXEC_STMT_WITH_OPEN_CURSOR
- eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it."
+ eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it"
ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen"
ER_STMT_HAS_NO_OPEN_CURSOR
- eng "The statement (%lu) has no open cursor."
+ eng "The statement (%lu) has no open cursor"
ger "Die Anweisung (%lu) hat keinen geöffneten Cursor"
ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG
- eng "Explicit or implicit commit is not allowed in stored function or trigger."
+ eng "Explicit or implicit commit is not allowed in stored function or trigger"
ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
ER_NO_DEFAULT_FOR_VIEW_FIELD
eng "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value"
ger "Ein Feld der dem View '%-.192s.%-.192s' zugrundeliegenden Tabelle hat keinen Vorgabewert"
ER_SP_NO_RECURSION
- eng "Recursive stored functions and triggers are not allowed."
+ eng "Recursive stored functions and triggers are not allowed"
ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt"
ER_TOO_BIG_SCALE 42000 S1009
- eng "Too big scale %llu specified for '%-.192s'. Maximum is %u."
+ eng "Too big scale %llu specified for '%-.192s'. Maximum is %u"
ger "Zu großer Skalierungsfaktor %llu für '%-.192s' angegeben. Maximum ist %u"
ER_TOO_BIG_PRECISION 42000 S1009
- eng "Too big precision %llu specified for '%-.192s'. Maximum is %u."
+ eng "Too big precision %llu specified for '%-.192s'. Maximum is %u"
ger "Zu große Genauigkeit %llu für '%-.192s' angegeben. Maximum ist %u"
ER_M_BIGGER_THAN_D 42000 S1009
- eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s')."
+ eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s')"
ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.192s')"
ER_WRONG_LOCK_OF_SYSTEM_TABLE
eng "You can't combine write-locking of system tables with other tables or lock types"
@@ -5502,7 +5651,7 @@ ER_TRG_IN_WRONG_SCHEMA
eng "Trigger in wrong schema"
ger "Trigger im falschen Schema"
ER_STACK_OVERRUN_NEED_MORE
- eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack."
+ eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack"
ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld --thread_stack=#', um einen größeren Stack anzugeben"
jpn "スレッドスタック不足です(使用: %ld ; サイズ: %ld ; 要求: %ld)。より大きい値で 'mysqld --thread_stack=#' の指定をしてください。"
ER_TOO_LONG_BODY 42000 S1009
@@ -5521,10 +5670,10 @@ ER_DATETIME_FUNCTION_OVERFLOW 22008
eng "Datetime function: %-.32s field overflow"
ger "Datetime-Funktion: %-.32s Feldüberlauf"
ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG
- eng "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger."
+ eng "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger"
ger "Kann Tabelle '%-.192s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief"
ER_VIEW_PREVENT_UPDATE
- eng "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'."
+ eng "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'"
ger "Die Definition der Tabelle '%-.192s' verhindert die Operation %.192s auf Tabelle '%-.192s'"
ER_PS_NO_RECURSION
eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner"
@@ -5544,7 +5693,7 @@ ER_NO_SUCH_USER
eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
ger "Der als Definierer angegebene Benutzer ('%-.64s'@'%-.64s') existiert nicht"
ER_FORBID_SCHEMA_CHANGE
- eng "Changing schema from '%-.192s' to '%-.192s' is not allowed."
+ eng "Changing schema from '%-.192s' to '%-.192s' is not allowed"
ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt"
ER_ROW_IS_REFERENCED_2 23000
eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)"
@@ -5556,8 +5705,8 @@ ER_SP_BAD_VAR_SHADOW 42000
eng "Variable '%-.64s' must be quoted with `...`, or renamed"
ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden"
ER_TRG_NO_DEFINER
- eng "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger."
- ger "Kein Definierer-Attribut für Trigger '%-.192s'.'%-.192s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an."
+ eng "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger"
+ ger "Kein Definierer-Attribut für Trigger '%-.192s'.'%-.192s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an"
ER_OLD_FILE_FORMAT
eng "'%-.192s' has an old format, you should re-create the '%s' object(s)"
ger "'%-.192s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen"
@@ -5597,12 +5746,15 @@ ER_REMOVED_SPACES
ER_AUTOINC_READ_FAILED
eng "Failed to read auto-increment value from storage engine"
ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
+ hindi "स्टोरेज इंजन से auto-increment का मान पढ़ने में असफल रहे"
ER_USERNAME
eng "user name"
ger "Benutzername"
+ hindi "यूज़र का नाम"
ER_HOSTNAME
eng "host name"
ger "Hostname"
+ hindi "होस्ट का नाम"
ER_WRONG_STRING_LENGTH
eng "String '%-.70s' is too long for %s (should be no longer than %d)"
ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
@@ -5623,8 +5775,8 @@ ER_AMBIGUOUS_FIELD_TERM
eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
ger "Das erste Zeichen der Zeichenkette FIELDS TERMINATED ist mehrdeutig; bitte benutzen Sie nicht optionale und nicht leere FIELDS ENCLOSED BY"
ER_FOREIGN_SERVER_EXISTS
- eng "The foreign server, %s, you are trying to create already exists."
- ger "Der entfernte Server %s, den Sie versuchen zu erzeugen, existiert schon."
+ eng "The foreign server, %s, you are trying to create already exists"
+ ger "Der entfernte Server %s, den Sie versuchen zu erzeugen, existiert schon"
ER_FOREIGN_SERVER_DOESNT_EXIST
eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
@@ -5823,9 +5975,11 @@ ER_FILEGROUP_OPTION_ONLY_ONCE
ER_CREATE_FILEGROUP_FAILED
eng "Failed to create %s"
ger "Anlegen von %s fehlgeschlagen"
+ hindi "%s को बनाने में असफल रहे"
ER_DROP_FILEGROUP_FAILED
eng "Failed to drop %s"
ger "Löschen von %s fehlgeschlagen"
+ hindi "%s को हटाने में असफल रहे"
ER_TABLESPACE_AUTO_EXTEND_ERROR
eng "The handler doesn't support autoextend of tablespaces"
ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces"
@@ -5838,6 +5992,7 @@ ER_SIZE_OVERFLOW_ERROR
ER_ALTER_FILEGROUP_FAILED
eng "Failed to alter: %s"
ger "Änderung von %s fehlgeschlagen"
+ hindi "%s को ALTER करने में असफल रहे"
ER_BINLOG_ROW_LOGGING_FAILED
eng "Writing one row to the row-based binary log failed"
ger "Schreiben einer Zeilen ins zeilenbasierte Binärlog fehlgeschlagen"
@@ -5851,7 +6006,7 @@ ER_EVENT_ALREADY_EXISTS
eng "Event '%-.192s' already exists"
ger "Event '%-.192s' existiert bereits"
ER_EVENT_STORE_FAILED
- eng "Failed to store event %s. Error code %M from storage engine."
+ eng "Failed to store event %s. Error code %M from storage engine"
ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %M"
ER_EVENT_DOES_NOT_EXIST
eng "Unknown event '%-.192s'"
@@ -5859,9 +6014,11 @@ ER_EVENT_DOES_NOT_EXIST
ER_EVENT_CANT_ALTER
eng "Failed to alter event '%-.192s'"
ger "Ändern des Events '%-.192s' fehlgeschlagen"
+ hindi "'%-.192s' EVENT को ALTER करने में असफल रहे"
ER_EVENT_DROP_FAILED
eng "Failed to drop %s"
ger "Löschen von %s fehlgeschlagen"
+ hindi "%s को हटाने में असफल रहे"
ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG
eng "INTERVAL is either not positive or too big"
ger "INTERVAL ist entweder nicht positiv oder zu groß"
@@ -5874,6 +6031,7 @@ ER_EVENT_EXEC_TIME_IN_THE_PAST
ER_EVENT_OPEN_TABLE_FAILED
eng "Failed to open mysql.event"
ger "Öffnen von mysql.event fehlgeschlagen"
+ hindi "mysql.event को खोलने में असफल रहे"
ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
eng "No datetime expression provided"
ger "Kein DATETIME-Ausdruck angegeben"
@@ -5885,6 +6043,7 @@ ER_UNUSED_3
ER_EVENT_CANNOT_DELETE
eng "Failed to delete the event from mysql.event"
ger "Löschen des Events aus mysql.event fehlgeschlagen"
+ hindi "EVENT को mysql.event से हटाने मैं असफल रहे"
ER_EVENT_COMPILE_ERROR
eng "Error during compilation of event's body"
ger "Fehler beim Kompilieren des Event-Bodys"
@@ -5906,12 +6065,12 @@ ER_CANT_WRITE_LOCK_LOG_TABLE
eng "You can't write-lock a log table. Only read access is possible"
ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich"
ER_CANT_LOCK_LOG_TABLE
- eng "You can't use locks with log tables."
- ger "Log-Tabellen können nicht gesperrt werden."
+ eng "You can't use locks with log tables"
+ ger "Log-Tabellen können nicht gesperrt werden"
ER_UNUSED_4
eng "You should never see it"
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
- eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error."
+ eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error"
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
@@ -5924,6 +6083,7 @@ ER_UNUSED_13
ER_PARTITION_NO_TEMPORARY
eng "Cannot create temporary table with partitions"
ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich"
+ hindi "अस्थाई टेबल को पार्टिशन्स के साथ नहीं बनाया जा सकता"
ER_PARTITION_CONST_DOMAIN_ERROR
eng "Partition constant is out of partition function domain"
ger "Partitionskonstante liegt außerhalb der Partitionsfunktionsdomäne"
@@ -5935,6 +6095,7 @@ ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
ER_DDL_LOG_ERROR
eng "Error in DDL log"
ger "Fehler im DDL-Log"
+ hindi "DDL लॉग में त्रुटि हुई"
ER_NULL_IN_VALUES_LESS_THAN
eng "Not allowed to use NULL value in VALUES LESS THAN"
ger "In VALUES LESS THAN dürfen keine NULL-Werte verwendet werden"
@@ -5942,6 +6103,7 @@ ER_NULL_IN_VALUES_LESS_THAN
ER_WRONG_PARTITION_NAME
eng "Incorrect partition name"
ger "Falscher Partitionsname"
+ hindi "पार्टीशन का नाम गलत है"
swe "Felaktigt partitionsnamn"
ER_CANT_CHANGE_TX_CHARACTERISTICS 25001
eng "Transaction characteristics can't be changed while a transaction is in progress"
@@ -5980,6 +6142,7 @@ ER_ONLY_INTEGERS_ALLOWED
ER_UNSUPORTED_LOG_ENGINE
eng "Storage engine %s cannot be used for log tables"
ger "Speicher-Engine %s kann für Logtabellen nicht verwendet werden"
+ hindi "स्टोरेज इंजन %s को लॉग टेबल्स के लिए इस्तेमाल नहीं किया जा सकता है"
ER_BAD_LOG_STATEMENT
eng "You cannot '%s' a log table if logging is enabled"
ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
@@ -6009,7 +6172,7 @@ ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
fre "Duplicata du champ '%-.64s' pour la clef '%-.192s'"
ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.192s'"
greek "Διπλή εγγραφή '%-.64s' για το κλειδί '%-.192s'"
- hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint."
+ hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint"
ita "Valore duplicato '%-.64s' per la chiave '%-.192s'"
jpn "'%-.64s' は索引 '%-.192s' で重複しています。"
kor "중복된 입력 값 '%-.64s': key '%-.192s'"
@@ -6028,11 +6191,11 @@ ER_BINLOG_PURGE_EMFILE
eng "Too many files opened, please execute the command again"
ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
ER_EVENT_CANNOT_CREATE_IN_THE_PAST
- eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
- ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht."
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation"
+ ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht"
ER_EVENT_CANNOT_ALTER_IN_THE_PAST
- eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future."
- ger "Execution Zeitpunkt des Ereignisses in der Vergangenheit liegt, und es war NACH ABSCHLUSS Set nicht erhalten. Die Veranstaltung wurde nicht verändert. Geben Sie einen Zeitpunkt in der Zukunft."
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future"
+ ger "Execution Zeitpunkt des Ereignisses in der Vergangenheit liegt, und es war NACH ABSCHLUSS Set nicht erhalten. Die Veranstaltung wurde nicht verändert. Geben Sie einen Zeitpunkt in der Zukunft"
ER_SLAVE_INCIDENT
eng "The incident %s occurred on the master. Message: %-.64s"
ger "Der Vorfall %s passierte auf dem Master. Meldung: %-.64s"
@@ -6055,6 +6218,7 @@ ER_SLAVE_RELAY_LOG_WRITE_FAILURE
ER_SLAVE_CREATE_EVENT_FAILURE
eng "Failed to create %s"
ger "Erzeugen von %s fehlgeschlagen"
+ hindi "%s को बनाने मैं असफल रहे"
ER_SLAVE_MASTER_COM_FAILURE
eng "Master command %s failed: %s"
ger "Master-Befehl %s fehlgeschlagen: %s"
@@ -6091,8 +6255,8 @@ ER_CANT_CREATE_SROUTINE
ER_UNUSED_11
eng "You should never see it"
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
- eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
- ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran."
+ eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement"
+ ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran"
ER_SLAVE_CORRUPT_EVENT
eng "Corrupted replication event was detected"
ger "Beschädigtes Replikationsereignis entdeckt"
@@ -6136,7 +6300,7 @@ ER_SLAVE_HEARTBEAT_FAILURE
eng "Unexpected master's heartbeat data: %s"
ger "Unerwartete Daten vom Heartbeat des Masters: %s"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
- eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%u seconds)."
+ eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%u seconds)"
ER_UNUSED_14
eng "You should never see it"
ER_CONFLICT_FN_PARSE_ERROR
@@ -6162,22 +6326,27 @@ ER_DATABASE_NAME
eng "Database"
swe "Databas"
ger "Datenbank"
+ hindi "डेटाबेस"
ER_TABLE_NAME
eng "Table"
swe "Tabell"
ger "Tabelle"
+ hindi "टेबल"
ER_PARTITION_NAME
eng "Partition"
swe "Partition"
ger "Partition"
+ hindi "पार्टीशन"
ER_SUBPARTITION_NAME
eng "Subpartition"
swe "Subpartition"
ger "Unterpartition"
+ hindi "सब-पार्टीशन"
ER_TEMPORARY_NAME
eng "Temporary"
swe "Temporär"
ger "Temporär"
+ hindi "अस्थायी"
ER_RENAMED_NAME
eng "Renamed"
swe "Namnändrad"
@@ -6257,36 +6426,36 @@ ER_PARTITION_FIELDS_TOO_LONG
eng "The total length of the partitioning fields is too large"
ger "Die Gesamtlänge der Partitionsfelder ist zu groß"
ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE
- eng "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved."
+ eng "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved"
ER_BINLOG_ROW_MODE_AND_STMT_ENGINE
- eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging."
+ eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging"
ER_BINLOG_UNSAFE_AND_STMT_ENGINE
eng "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOG_FORMAT = MIXED. %s"
ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE
- eng "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging."
+ eng "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging"
ER_BINLOG_STMT_MODE_AND_ROW_ENGINE
eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s"
ER_BINLOG_ROW_INJECTION_AND_STMT_MODE
- eng "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT."
+ eng "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT"
ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE
- eng "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging."
+ eng "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging"
ER_BINLOG_UNSAFE_LIMIT
- eng "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted."
+ eng "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted"
ER_BINLOG_UNSAFE_INSERT_DELAYED
- eng "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted."
+ eng "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted"
ER_BINLOG_UNSAFE_SYSTEM_TABLE
- eng "The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves."
+ eng "The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves"
ER_BINLOG_UNSAFE_AUTOINC_COLUMNS
- eng "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values cannot be logged correctly."
+ eng "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values cannot be logged correctly"
ER_BINLOG_UNSAFE_UDF
- eng "Statement is unsafe because it uses a UDF which may not return the same value on the slave."
+ eng "Statement is unsafe because it uses a UDF which may not return the same value on the slave"
ER_BINLOG_UNSAFE_SYSTEM_VARIABLE
- eng "Statement is unsafe because it uses a system variable that may have a different value on the slave."
+ eng "Statement is unsafe because it uses a system variable that may have a different value on the slave"
ER_BINLOG_UNSAFE_SYSTEM_FUNCTION
- eng "Statement is unsafe because it uses a system function that may return a different value on the slave."
+ eng "Statement is unsafe because it uses a system function that may return a different value on the slave"
ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS
- eng "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction."
+ eng "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"
ER_MESSAGE_AND_STATEMENT
eng "%s Statement: %s"
@@ -6298,16 +6467,18 @@ ER_SLAVE_CANT_CREATE_CONVERSION
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT
eng "Cannot modify @@session.binlog_format inside a transaction"
ER_PATH_LENGTH
- eng "The path specified for %.64s is too long."
+ eng "The path specified for %.64s is too long"
+ hindi "%.64s के लिए निर्दिष्ट पथ बहुत लंबा है"
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT
- eng "'%s' is deprecated and will be removed in a future release."
- ger "'%s' ist veraltet und wird in einer zukünftigen Version entfernt werden."
+ eng "'%s' is deprecated and will be removed in a future release"
+ ger "'%s' ist veraltet und wird in einer zukünftigen Version entfernt werden"
ER_WRONG_NATIVE_TABLE_STRUCTURE
eng "Native table '%-.64s'.'%-.64s' has the wrong structure"
ER_WRONG_PERFSCHEMA_USAGE
- eng "Invalid performance_schema usage."
+ eng "Invalid performance_schema usage"
+ hindi "performance_schema का अवैध उपयोग"
ER_WARN_I_S_SKIPPED_TABLE
eng "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement"
@@ -6331,10 +6502,10 @@ ER_WRONG_SPVAR_TYPE_IN_LIMIT
eng "A variable of a non-integer based type in LIMIT clause"
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE
- eng "Mixing self-logging and non-self-logging engines in a statement is unsafe."
+ eng "Mixing self-logging and non-self-logging engines in a statement is unsafe"
ER_BINLOG_UNSAFE_MIXED_STATEMENT
- eng "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them."
+ eng "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them"
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN
eng "Cannot modify @@session.sql_log_bin inside a transaction"
@@ -6344,6 +6515,7 @@ ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN
ER_FAILED_READ_FROM_PAR_FILE
eng "Failed to read from the .par file"
+ hindi ".par फ़ाइल से पढ़ने में असफल रहे"
swe "Misslyckades läsa från .par filen"
ER_VALUES_IS_NOT_INT_TYPE_ERROR
@@ -6359,7 +6531,8 @@ ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000
fre "Accès refusé pour l'utilisateur: '%s'@'%s'"
ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung"
greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s'"
- hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres."
+ hindi "यूज़र '%s'@'%s' को अनुमति नहीं है"
+ hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres"
ita "Accesso non consentito per l'utente: '%s'@'%s'"
kor "'%s'@'%s' 사용자는 접근이 거부 되었습니다."
nor "Tilgang nektet for bruker: '%s'@'%s'"
@@ -6386,16 +6559,16 @@ ER_PLUGIN_IS_PERMANENT
eng "Plugin '%s' is force_plus_permanent and can not be unloaded"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN
- eng "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled."
+ eng "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX
- eng "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout."
+ eng "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout"
ER_STMT_CACHE_FULL
eng "Multi-row statements required more than 'max_binlog_stmt_cache_size' bytes of storage; increase this mysqld variable and try again"
ER_MULTI_UPDATE_KEY_CONFLICT
- eng "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'."
+ eng "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'"
# When translating this error message make sure to include "ALTER TABLE" in the
# message as mysqlcheck parses the error message looking for ALTER TABLE.
@@ -6406,7 +6579,7 @@ WARN_OPTION_BELOW_LIMIT
eng "The value of '%s' should be no less than the value of '%s'"
ER_INDEX_COLUMN_TOO_LONG
- eng "Index column size too large. The maximum column size is %lu bytes."
+ eng "Index column size too large. The maximum column size is %lu bytes"
ER_ERROR_IN_TRIGGER_BODY
eng "Trigger '%-.64s' has an error in its body: '%-.256s'"
@@ -6418,25 +6591,25 @@ ER_INDEX_CORRUPT
eng "Index %s is corrupted"
ER_UNDO_RECORD_TOO_BIG
- eng "Undo log record is too big."
+ eng "Undo log record is too big"
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT
- eng "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+ eng "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE
- eng "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave."
+ eng "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_REPLACE_SELECT
- eng "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
+ eng "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT
- eng "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+ eng "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT
- eng "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
+ eng "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_UPDATE_IGNORE
- eng "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+ eng "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave"
ER_UNUSED_15
eng "You should never see it"
@@ -6445,22 +6618,22 @@ ER_UNUSED_16
eng "You should never see it"
ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT
- eng "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave."
+ eng "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC
- eng "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave."
+ eng "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave"
ER_BINLOG_UNSAFE_INSERT_TWO_KEYS
eng "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe"
ER_TABLE_IN_FK_CHECK
- eng "Table is being used in foreign key check."
+ eng "Table is being used in foreign key check"
ER_UNUSED_1
eng "You should never see it"
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST
- eng "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe."
+ eng "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe"
#
# End of 5.5 error messages.
@@ -6497,7 +6670,7 @@ ER_ROW_DOES_NOT_MATCH_PARTITION
eng "Found a row that does not match the partition"
swe "Hittade en rad som inte passar i partitionen"
ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX
- eng "Option binlog_cache_size (%lu) is greater than max_binlog_cache_size (%lu); setting binlog_cache_size equal to max_binlog_cache_size."
+ eng "Option binlog_cache_size (%lu) is greater than max_binlog_cache_size (%lu); setting binlog_cache_size equal to max_binlog_cache_size"
ER_WARN_INDEX_NOT_APPLICABLE
eng "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'"
@@ -6509,14 +6682,14 @@ ER_NO_SUCH_KEY_VALUE
ER_VALUE_TOO_LONG
eng "Too long value for '%s'"
ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE
- eng "Replication event checksum verification failed while reading from network."
+ eng "Replication event checksum verification failed while reading from network"
ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE
- eng "Replication event checksum verification failed while reading from a log file."
+ eng "Replication event checksum verification failed while reading from a log file"
ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX
- eng "Option binlog_stmt_cache_size (%lu) is greater than max_binlog_stmt_cache_size (%lu); setting binlog_stmt_cache_size equal to max_binlog_stmt_cache_size."
+ eng "Option binlog_stmt_cache_size (%lu) is greater than max_binlog_stmt_cache_size (%lu); setting binlog_stmt_cache_size equal to max_binlog_stmt_cache_size"
ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT
- eng "Can't update table '%-.192s' while '%-.192s' is being created."
+ eng "Can't update table '%-.192s' while '%-.192s' is being created"
ER_PARTITION_CLAUSE_ON_NONPARTITIONED
eng "PARTITION () clause on non partitioned table"
@@ -6529,35 +6702,35 @@ ER_UNUSED_5
eng "You should never see it"
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE
- eng "Failure while changing the type of replication repository: %s."
+ eng "Failure while changing the type of replication repository: %s"
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE
- eng "The creation of some temporary tables could not be rolled back."
+ eng "The creation of some temporary tables could not be rolled back"
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE
- eng "Some temporary tables were dropped, but these operations could not be rolled back."
+ eng "Some temporary tables were dropped, but these operations could not be rolled back"
ER_MTS_FEATURE_IS_NOT_SUPPORTED
eng "%s is not supported in multi-threaded slave mode. %s"
ER_MTS_UPDATED_DBS_GREATER_MAX
- eng "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata."
+ eng "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata"
ER_MTS_CANT_PARALLEL
- eng "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s."
+ eng "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s"
ER_MTS_INCONSISTENT_DATA
eng "%s"
ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING
- eng "FULLTEXT index is not supported for partitioned tables."
- swe "FULLTEXT index stöds ej för partitionerade tabeller."
+ eng "FULLTEXT index is not supported for partitioned tables"
+ swe "FULLTEXT index stöds ej för partitionerade tabeller"
ER_DA_INVALID_CONDITION_NUMBER 35000
eng "Invalid condition number"
por "Número de condição inválido"
ER_INSECURE_PLAIN_TEXT
- eng "Sending passwords in plain text without SSL/TLS is extremely insecure."
+ eng "Sending passwords in plain text without SSL/TLS is extremely insecure"
ER_INSECURE_CHANGE_MASTER
- eng "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives."
+ eng "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives"
ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO 23000 S1009
eng "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'"
@@ -6570,101 +6743,101 @@ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO 23000 S1009
swe "FOREIGN KEY constraint för tabell '%.192s', posten '%-.192s' kan inte uppdatera en barntabell på grund av UNIQUE-test"
ER_SQLTHREAD_WITH_SECURE_SLAVE
- eng "Setting authentication options is not possible when only the Slave SQL Thread is being started."
+ eng "Setting authentication options is not possible when only the Slave SQL Thread is being started"
ER_TABLE_HAS_NO_FT
eng "The table does not have FULLTEXT index to support this query"
ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER
- eng "The system variable %.200s cannot be set in stored functions or triggers."
+ eng "The system variable %.200s cannot be set in stored functions or triggers"
ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION
- eng "The system variable %.200s cannot be set when there is an ongoing transaction."
+ eng "The system variable %.200s cannot be set when there is an ongoing transaction"
ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST
- eng "The system variable @@SESSION.GTID_NEXT has the value %.200s, which is not listed in @@SESSION.GTID_NEXT_LIST."
+ eng "The system variable @@SESSION.GTID_NEXT has the value %.200s, which is not listed in @@SESSION.GTID_NEXT_LIST"
ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL
- eng "When @@SESSION.GTID_NEXT_LIST == NULL, the system variable @@SESSION.GTID_NEXT cannot change inside a transaction."
+ eng "When @@SESSION.GTID_NEXT_LIST == NULL, the system variable @@SESSION.GTID_NEXT cannot change inside a transaction"
ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION
- eng "The statement 'SET %.200s' cannot invoke a stored function."
+ eng "The statement 'SET %.200s' cannot invoke a stored function"
ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL
- eng "The system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL."
+ eng "The system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL"
ER_SKIPPING_LOGGED_TRANSACTION
- eng "Skipping transaction %.200s because it has already been executed and logged."
+ eng "Skipping transaction %.200s because it has already been executed and logged"
ER_MALFORMED_GTID_SET_SPECIFICATION
- eng "Malformed GTID set specification '%.200s'."
+ eng "Malformed GTID set specification '%.200s'"
ER_MALFORMED_GTID_SET_ENCODING
- eng "Malformed GTID set encoding."
+ eng "Malformed GTID set encoding"
ER_MALFORMED_GTID_SPECIFICATION
- eng "Malformed GTID specification '%.200s'."
+ eng "Malformed GTID specification '%.200s'"
ER_GNO_EXHAUSTED
- eng "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid."
+ eng "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid"
ER_BAD_SLAVE_AUTO_POSITION
- eng "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active."
+ eng "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active"
ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON
- eng "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when GTID_MODE = ON."
+ eng "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when GTID_MODE = ON"
ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET
- eng "Cannot execute statements with implicit commit inside a transaction when GTID_NEXT != AUTOMATIC or GTID_NEXT_LIST != NULL."
+ eng "Cannot execute statements with implicit commit inside a transaction when GTID_NEXT != AUTOMATIC or GTID_NEXT_LIST != NULL"
ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON
- eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires ENFORCE_GTID_CONSISTENCY = 1."
+ eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires ENFORCE_GTID_CONSISTENCY = 1"
ER_GTID_MODE_REQUIRES_BINLOG
- eng "GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates."
+ eng "GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates"
ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF
- eng "GTID_NEXT cannot be set to UUID:NUMBER when GTID_MODE = OFF."
+ eng "GTID_NEXT cannot be set to UUID:NUMBER when GTID_MODE = OFF"
ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON
- eng "GTID_NEXT cannot be set to ANONYMOUS when GTID_MODE = ON."
+ eng "GTID_NEXT cannot be set to ANONYMOUS when GTID_MODE = ON"
ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF
- eng "GTID_NEXT_LIST cannot be set to a non-NULL value when GTID_MODE = OFF."
+ eng "GTID_NEXT_LIST cannot be set to a non-NULL value when GTID_MODE = OFF"
ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF
- eng "Found a Gtid_log_event or Previous_gtids_log_event when GTID_MODE = OFF."
+ eng "Found a Gtid_log_event or Previous_gtids_log_event when GTID_MODE = OFF"
ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE
- eng "When ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables."
+ eng "When ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables"
ER_GTID_UNSAFE_CREATE_SELECT
- eng "CREATE TABLE ... SELECT is forbidden when ENFORCE_GTID_CONSISTENCY = 1."
+ eng "CREATE TABLE ... SELECT is forbidden when ENFORCE_GTID_CONSISTENCY = 1"
ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION
- eng "When ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1."
+ eng "When ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1"
ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME
eng "The value of GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions."
ER_MASTER_HAS_PURGED_REQUIRED_GTIDS
- eng "The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires."
+ eng "The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires"
ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID
- eng "GTID_NEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK."
+ eng "GTID_NEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK"
ER_UNKNOWN_EXPLAIN_FORMAT
eng "Unknown EXPLAIN format name: '%s'"
rus "Неизвестное имя формата команды EXPLAIN: '%s'"
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION 25006
- eng "Cannot execute statement in a READ ONLY transaction."
+ eng "Cannot execute statement in a READ ONLY transaction"
ER_TOO_LONG_TABLE_PARTITION_COMMENT
eng "Comment for table partition '%-.64s' is too long (max = %lu)"
ER_SLAVE_CONFIGURATION
- eng "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log."
+ eng "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log"
ER_INNODB_FT_LIMIT
eng "InnoDB presently supports one FULLTEXT index creation at a time"
@@ -6679,7 +6852,7 @@ ER_INNODB_FT_WRONG_DOCID_INDEX
eng "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index"
ER_INNODB_ONLINE_LOG_TOO_BIG
- eng "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again."
+ eng "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again"
ER_UNKNOWN_ALTER_ALGORITHM
eng "Unknown ALGORITHM '%s'"
@@ -6688,13 +6861,13 @@ ER_UNKNOWN_ALTER_LOCK
eng "Unknown LOCK type '%s'"
ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS
- eng "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL."
+ eng "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL"
ER_MTS_RECOVERY_FAILURE
- eng "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log."
+ eng "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log"
ER_MTS_RESET_WORKERS
- eng "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log."
+ eng "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log"
ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2
eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted"
@@ -6704,13 +6877,13 @@ ER_SLAVE_SILENT_RETRY_TRANSACTION
eng "Slave must silently retry current transaction"
ER_DISCARD_FK_CHECKS_RUNNING
- eng "There is a foreign key check running on table '%-.192s'. Cannot discard the table."
+ eng "There is a foreign key check running on table '%-.192s'. Cannot discard the table"
ER_TABLE_SCHEMA_MISMATCH
eng "Schema mismatch (%s)"
ER_TABLE_IN_SYSTEM_TABLESPACE
- eng "Table '%-.192s' in system tablespace"
+ eng "Table %-.192s in system tablespace"
ER_IO_READ_ERROR
eng "IO Read error: (%lu, %s) %s"
@@ -6722,10 +6895,10 @@ ER_TABLESPACE_MISSING
eng "Tablespace is missing for table '%-.192s'"
ER_TABLESPACE_EXISTS
- eng "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT."
+ eng "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT"
ER_TABLESPACE_DISCARDED
- eng "Tablespace has been discarded for table '%-.192s'"
+ eng "Tablespace has been discarded for table %`s"
ER_INTERNAL_ERROR
eng "Internal error: %-.192s"
@@ -6737,8 +6910,8 @@ ER_INNODB_INDEX_CORRUPT
eng "Index corrupt: %s"
ER_INVALID_YEAR_COLUMN_LENGTH
- eng "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead."
- rus "Тип YEAR(%lu) более не поддерживается, вместо него будет создана колонка с типом YEAR(4)."
+ eng "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead"
+ rus "Тип YEAR(%lu) более не поддерживается, вместо него будет создана колонка с типом YEAR(4)"
ER_NOT_VALID_PASSWORD
eng "Your password does not satisfy the current policy requirements"
@@ -6762,26 +6935,26 @@ ER_FK_CANNOT_OPEN_PARENT
ER_FK_INCORRECT_OPTION
eng "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'"
-ER_FK_DUP_NAME
- eng "Duplicate foreign key constraint name '%s'"
+ER_DUP_CONSTRAINT_NAME
+ eng "Duplicate %s constraint name '%s'"
ER_PASSWORD_FORMAT
- eng "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function."
+ eng "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function"
ER_FK_COLUMN_CANNOT_DROP
eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'"
ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' benötigt"
ER_FK_COLUMN_CANNOT_DROP_CHILD
- eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'"
- ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' der Tabelle '%-.192s' benötigt"
+ eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table %-.192s"
+ ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' der Tabelle %-.192s benötigt"
ER_FK_COLUMN_NOT_NULL
eng "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL"
ger "Spalte '%-.192s' kann nicht NOT NULL sein: wird für eine Fremdschlüsselbeschränkung '%-.192s' SET NULL benötigt"
ER_DUP_INDEX
- eng "Duplicate index %`s. This is deprecated and will be disallowed in a future release."
+ eng "Duplicate index %`s. This is deprecated and will be disallowed in a future release"
ER_FK_COLUMN_CANNOT_CHANGE
eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'"
@@ -6793,40 +6966,40 @@ ER_FK_CANNOT_DELETE_PARENT
eng "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'"
ER_MALFORMED_PACKET
- eng "Malformed communication packet."
+ eng "Malformed communication packet"
ER_READ_ONLY_MODE
eng "Running in read-only mode"
ER_GTID_NEXT_TYPE_UNDEFINED_GROUP
- eng "When GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET GTID_NEXT before a transaction and forgot to set GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current GTID_NEXT is '%s'."
+ eng "When GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET GTID_NEXT before a transaction and forgot to set GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current GTID_NEXT is '%s'"
ER_VARIABLE_NOT_SETTABLE_IN_SP
- eng "The system variable %.200s cannot be set in stored procedures."
+ eng "The system variable %.200s cannot be set in stored procedures"
ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF
- eng "GTID_PURGED can only be set when GTID_MODE = ON."
+ eng "GTID_PURGED can only be set when GTID_MODE = ON"
ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY
- eng "GTID_PURGED can only be set when GTID_EXECUTED is empty."
+ eng "GTID_PURGED can only be set when GTID_EXECUTED is empty"
ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY
- eng "GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)."
+ eng "GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)"
ER_GTID_PURGED_WAS_CHANGED
- eng "GTID_PURGED was changed from '%s' to '%s'."
+ eng "GTID_PURGED was changed from '%s' to '%s'"
ER_GTID_EXECUTED_WAS_CHANGED
- eng "GTID_EXECUTED was changed from '%s' to '%s'."
+ eng "GTID_EXECUTED was changed from '%s' to '%s'"
ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES
- eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to."
+ eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to"
ER_ALTER_OPERATION_NOT_SUPPORTED 0A000
- eng "%s is not supported for this operation. Try %s."
+ eng "%s is not supported for this operation. Try %s"
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON 0A000
- eng "%s is not supported. Reason: %s. Try %s."
+ eng "%s is not supported. Reason: %s. Try %s"
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY
eng "COPY algorithm requires a lock"
@@ -6873,7 +7046,7 @@ ER_DUP_UNKNOWN_IN_INDEX 23000
fre "Duplicata du champ pour la clef '%-.192s'"
ger "Doppelter Eintrag für Schlüssel '%-.192s'"
greek "Διπλή εγγραφή για το κλειδί '%-.192s'"
- hun "Duplikalt bejegyzes a '%-.192s' kulcs szerint."
+ hun "Duplikalt bejegyzes a '%-.192s' kulcs szerint"
ita "Valore duplicato per la chiave '%-.192s'"
jpn "は索引 '%-.192s' で重複しています。"
kor "중복된 입력 값: key '%-.192s'"
@@ -6890,21 +7063,21 @@ ER_DUP_UNKNOWN_IN_INDEX 23000
ukr "Дублюючий запис для ключа '%-.192s'"
ER_IDENT_CAUSES_TOO_LONG_PATH
- eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'."
+ eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'"
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL
eng "cannot silently convert NULL values, as required in this SQL_MODE"
ER_MUST_CHANGE_PASSWORD_LOGIN
- eng "Your password has expired. To log in you must change it using a client that supports expired passwords."
- bgn "Паролата ви е изтекла. За да влезете трябва да я смените използвайки клиент който поддрържа такива пароли."
+ eng "Your password has expired. To log in you must change it using a client that supports expired passwords"
+ bgn "Паролата ви е изтекла. За да влезете трябва да я смените използвайки клиент който поддрържа такива пароли"
ER_ROW_IN_WRONG_PARTITION
eng "Found a row in wrong partition %s"
swe "Hittade en rad i fel partition %s"
ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX
- eng "Cannot schedule event %s, relay-log name %s, position %s to Worker thread because its size %lu exceeds %lu of slave_pending_jobs_size_max."
+ eng "Cannot schedule event %s, relay-log name %s, position %s to Worker thread because its size %lu exceeds %lu of slave_pending_jobs_size_max"
ER_INNODB_NO_FT_USES_PARSER
eng "Cannot CREATE FULLTEXT INDEX WITH PARSER on InnoDB table"
@@ -6912,10 +7085,10 @@ ER_BINLOG_LOGICAL_CORRUPTION
eng "The binary log file '%s' is logically corrupted: %s"
ER_WARN_PURGE_LOG_IN_USE
- eng "file %s was not purged because it was being read by %d thread(s), purged only %d out of %d files."
+ eng "file %s was not purged because it was being read by %d thread(s), purged only %d out of %d files"
ER_WARN_PURGE_LOG_IS_ACTIVE
- eng "file %s was not purged because it is the active log file."
+ eng "file %s was not purged because it is the active log file"
ER_AUTO_INCREMENT_CONFLICT
eng "Auto-increment value in UPDATE conflicts with internally generated values"
@@ -6930,26 +7103,27 @@ ER_SLAVE_RLI_INIT_REPOSITORY
eng "Slave failed to initialize relay log info structure from the repository"
ER_ACCESS_DENIED_CHANGE_USER_ERROR 28000
- eng "Access denied trying to change to user '%-.48s'@'%-.64s' (using password: %s). Disconnecting."
- bgn "Отказан достъп при опит за смяна към потребител %-.48s'@'%-.64s' (използвана парола: %s). Затваряне на връзката."
+ eng "Access denied trying to change to user '%-.48s'@'%-.64s' (using password: %s). Disconnecting"
+ bgn "Отказан достъп при опит за смяна към потребител %-.48s'@'%-.64s' (използвана парола: %s). Затваряне на връзката"
ER_INNODB_READ_ONLY
- eng "InnoDB is in read only mode."
+ eng "InnoDB is in read only mode"
+ hindi "InnoDB केवल READ-ONLY मोड में है"
ER_STOP_SLAVE_SQL_THREAD_TIMEOUT
- eng "STOP SLAVE command execution is incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current task is complete."
+ eng "STOP SLAVE command execution is incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current task is complete"
ER_STOP_SLAVE_IO_THREAD_TIMEOUT
- eng "STOP SLAVE command execution is incomplete: Slave IO thread got the stop signal, thread is busy, IO thread will stop once the current task is complete."
+ eng "STOP SLAVE command execution is incomplete: Slave IO thread got the stop signal, thread is busy, IO thread will stop once the current task is complete"
ER_TABLE_CORRUPT
- eng "Operation cannot be performed. The table '%-.64s.%-.64s' is missing, corrupt or contains bad data."
+ eng "Operation cannot be performed. The table '%-.64s.%-.64s' is missing, corrupt or contains bad data"
ER_TEMP_FILE_WRITE_FAILURE
- eng "Temporary file write failure."
+ eng "Temporary file write failure"
ER_INNODB_FT_AUX_NOT_HEX_ID
- eng "Upgrade index name failed, please use create index(alter table) algorithm copy to rebuild index."
+ eng "Upgrade index name failed, please use create index(alter table) algorithm copy to rebuild index"
#
@@ -6965,32 +7139,35 @@ ER_LAST_MYSQL_ERROR_MESSAGE
# MariaDB error numbers starts from 1900
start-error-number 1900
-ER_VCOL_BASED_ON_VCOL
- eng "A computed column cannot be based on a computed column"
-ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED
- eng "Function or expression is not allowed for column '%s'"
-ER_DATA_CONVERSION_ERROR_FOR_VIRTUAL_COLUMN
- eng "Generated value for computed column '%s' cannot be converted to type '%s'"
-ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN
- eng "Primary key cannot be defined upon a computed column"
+ER_UNUSED_18
+ eng ""
+ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED
+ eng "Function or expression '%s' cannot be used in the %s clause of %`s"
+ER_UNUSED_19
+ eng ""
+ER_PRIMARY_KEY_BASED_ON_GENERATED_COLUMN
+ eng "Primary key cannot be defined upon a generated column"
ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN
- eng "Key/Index cannot be defined on a non-stored computed column"
-ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN
- eng "Cannot define foreign key with %s clause on a computed column"
-ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN
- eng "The value specified for computed column '%s' in table '%s' has been ignored"
-ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN
- eng "This is not yet supported for computed columns"
-ER_CONST_EXPR_IN_VCOL
- eng "Constant expression in computed column function is not allowed"
-ER_ROW_EXPR_FOR_VCOL
- eng "Expression for computed column cannot return a row"
-ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS
- eng "%s storage engine does not support computed columns"
+ eng "Key/Index cannot be defined on a virtual generated column"
+ER_WRONG_FK_OPTION_FOR_GENERATED_COLUMN
+ eng "Cannot define foreign key with %s clause on a generated column"
+ER_WARNING_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN
+ eng "The value specified for generated column '%s' in table '%s' has been ignored"
+ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN
+ eng "This is not yet supported for generated columns"
+ER_UNUSED_20
+ eng ""
+ER_UNUSED_21
+ eng ""
+ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS
+ eng "%s storage engine does not support generated columns"
+ hindi "स्टोरेज इंजन %s COMPUTED कॉलम्स को सपोर्ट नहीं करता"
ER_UNKNOWN_OPTION
eng "Unknown option '%-.64s'"
+ hindi "अज्ञात विकल्प '%-.64s'"
ER_BAD_OPTION_VALUE
eng "Incorrect value '%-.64s' for option '%-.64s'"
+ hindi "गलत मान '%-.64s' विकल्प '%-.64s' के लिए"
ER_UNUSED_6
eng "You should never see it"
ER_UNUSED_7
@@ -6998,7 +7175,7 @@ ER_UNUSED_7
ER_UNUSED_8
eng "You should never see it"
ER_DATA_OVERFLOW 22003
- eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated."
+ eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated"
ER_DATA_TRUNCATED 22003
eng "Truncated value '%-.128s' when converting to %-.32s"
ER_BAD_DATA 22007
@@ -7012,15 +7189,18 @@ ER_DYN_COL_DATA 22007
ER_DYN_COL_WRONG_CHARSET
eng "Dynamic column contains unknown character set"
ER_ILLEGAL_SUBQUERY_OPTIMIZER_SWITCHES
- eng "At least one of the 'in_to_exists' or 'materialization' optimizer_switch flags must be 'on'."
+ eng "At least one of the 'in_to_exists' or 'materialization' optimizer_switch flags must be 'on'"
+ hindi "कम से कम 'in_to_exists' या 'materialization' optimizer_switch फ्लैग 'ON' होना चाहिए"
ER_QUERY_CACHE_IS_DISABLED
eng "Query cache is disabled (resize or similar command in progress); repeat this command later"
ER_QUERY_CACHE_IS_GLOBALY_DISABLED
eng "Query cache is globally disabled and you can't enable it only for this session"
+ hindi "क्वेरी कैश ग्लोबल स्तर पर DISABLED है और आप इसे केवल सत्र के लिए ENABLE नहीं कर सकते"
ER_VIEW_ORDERBY_IGNORED
- eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already."
+ eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already"
ER_CONNECTION_KILLED 70100
eng "Connection was killed"
+ hindi "कनेक्शन को समाप्त कर दिया गया है"
ER_UNUSED_12
eng "You should never see it"
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
@@ -7028,9 +7208,10 @@ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
eng "Cannot modify @@session.skip_replication inside a stored function or trigger"
ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT
- eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete."
+ eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete"
ER_NO_SUCH_TABLE_IN_ENGINE 42S02
eng "Table '%-.192s.%-.192s' doesn't exist in engine"
+ hindi "टेबल '%-.192s.%-.192s' इंजन में मौजूद नहीं है"
swe "Det finns ingen tabell som heter '%-.192s.%-.192s' i handlern"
ER_TARGET_NOT_EXPLAINABLE
eng "Target is not running an EXPLAINable command"
@@ -7062,13 +7243,13 @@ ER_GTID_POSITION_NOT_FOUND_IN_BINLOG
ER_CANNOT_LOAD_SLAVE_GTID_STATE
eng "Failed to load replication slave GTID position from table %s.%s"
ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG
- eng "Specified GTID %u-%u-%llu conflicts with the binary log which contains a more recent GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos."
+ eng "Specified GTID %u-%u-%llu conflicts with the binary log which contains a more recent GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos"
ER_MASTER_GTID_POS_MISSING_DOMAIN
- eng "Specified value for @@gtid_slave_pos contains no value for replication domain %u. This conflicts with the binary log which contains GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos."
+ eng "Specified value for @@gtid_slave_pos contains no value for replication domain %u. This conflicts with the binary log which contains GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos"
ER_UNTIL_REQUIRES_USING_GTID
eng "START SLAVE UNTIL master_gtid_pos requires that slave is using GTID"
ER_GTID_STRICT_OUT_OF_ORDER
- eng "An attempt was made to binlog GTID %u-%u-%llu which would create an out-of-order sequence number with existing GTID %u-%u-%llu, and gtid strict mode is enabled."
+ eng "An attempt was made to binlog GTID %u-%u-%llu which would create an out-of-order sequence number with existing GTID %u-%u-%llu, and gtid strict mode is enabled"
ER_GTID_START_FROM_BINLOG_HOLE
eng "The binlog on the master is missing the GTID %u-%u-%llu requested by the slave (even though a subsequent sequence number does exist), and GTID strict mode is enabled"
ER_SLAVE_UNEXPECTED_MASTER_SWITCH
@@ -7084,61 +7265,467 @@ ER_BINLOG_MUST_BE_EMPTY
ER_NO_SUCH_QUERY
eng "Unknown query id: %lld"
ger "Unbekannte Abfrage-ID: %lld"
+ hindi "अज्ञात क्वेरी ID: %lld"
rus "Неизвестный номер запроса: %lld"
ER_BAD_BASE64_DATA
eng "Bad base64 data as position %u"
ER_INVALID_ROLE OP000
- eng "Invalid role specification %`s."
- rum "Rolul %`s este invalid."
+ eng "Invalid role specification %`s"
+ hindi "अमान्य रोल विनिर्देश %`s"
+ rum "Rolul %`s este invalid"
ER_INVALID_CURRENT_USER 0L000
- eng "The current user is invalid."
- rum "Utilizatorul curent este invalid."
+ eng "The current user is invalid"
+ hindi "वर्तमान यूज़र अमान्य है"
+ rum "Utilizatorul curent este invalid"
ER_CANNOT_GRANT_ROLE
- eng "Cannot grant role '%s' to: %s."
- rum "Rolul '%s' nu poate fi acordat catre: %s."
+ eng "Cannot grant role '%s' to: %s"
+ hindi "रोल '%s', %s को प्रदान नहीं कर सकते"
+ rum "Rolul '%s' nu poate fi acordat catre: %s"
ER_CANNOT_REVOKE_ROLE
- eng "Cannot revoke role '%s' from: %s."
- rum "Rolul '%s' nu poate fi revocat de la: %s."
+ eng "Cannot revoke role '%s' from: %s"
+ hindi "रोल '%s', %s से हटाया नहीं जा सका"
+ rum "Rolul '%s' nu poate fi revocat de la: %s"
ER_CHANGE_SLAVE_PARALLEL_THREADS_ACTIVE
eng "Cannot change @@slave_parallel_threads while another change is in progress"
ER_PRIOR_COMMIT_FAILED
eng "Commit failed due to failure of an earlier commit on which this one depends"
ER_IT_IS_A_VIEW 42S02
eng "'%-.192s' is a view"
+ hindi "'%-.192s' एक VIEW है"
ER_SLAVE_SKIP_NOT_IN_GTID
- eng "When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position."
+ eng "When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position"
ER_TABLE_DEFINITION_TOO_BIG
eng "The definition for table %`s is too big"
+ hindi "टेबल %`s की परिभाषा बहुत बड़ी है"
ER_PLUGIN_INSTALLED
eng "Plugin '%-.192s' already installed"
+ hindi "प्लग-इन '%-.192s' पहले से ही इन्स्टॉल्ड है"
rus "Плагин '%-.192s' уже установлен"
ER_STATEMENT_TIMEOUT 70100
eng "Query execution was interrupted (max_statement_time exceeded)"
ER_SUBQUERIES_NOT_SUPPORTED 42000
- eng "%s does not support subqueries or stored functions."
+ eng "%s does not support subqueries or stored functions"
ER_SET_STATEMENT_NOT_SUPPORTED 42000
eng "The system variable %.200s cannot be set in SET STATEMENT."
ER_UNUSED_9
eng "You should never see it"
ER_USER_CREATE_EXISTS
eng "Can't create user '%-.64s'@'%-.64s'; it already exists"
+ hindi "यूज़र '%-.64s'@'%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है"
ER_USER_DROP_EXISTS
eng "Can't drop user '%-.64s'@'%-.64s'; it doesn't exist"
+ hindi "यूज़र '%-.64s'@'%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है"
ER_ROLE_CREATE_EXISTS
eng "Can't create role '%-.64s'; it already exists"
+ hindi "रोल '%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है"
ER_ROLE_DROP_EXISTS
eng "Can't drop role '%-.64s'; it doesn't exist"
+ hindi "रोल '%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है"
ER_CANNOT_CONVERT_CHARACTER
eng "Cannot convert '%s' character 0x%-.64s to '%s'"
ER_INVALID_DEFAULT_VALUE_FOR_FIELD 22007
eng "Incorrect default value '%-.128s' for column '%.192s'"
+ hindi "गलत डिफ़ॉल्ट मान '%-.128s' कॉलम '%.192s' के लिए"
ER_KILL_QUERY_DENIED_ERROR
eng "You are not owner of query %lu"
ger "Sie sind nicht Eigentümer von Abfrage %lu"
+ hindi "आप क्वेरी %lu के OWNER नहीं हैं"
rus "Вы не являетесь владельцем запроса %lu"
ER_NO_EIS_FOR_FIELD
eng "Engine-independent statistics are not collected for column '%s'"
+ hindi "Engine-independent सांख्यिकी कॉलम '%s' के लिए एकत्रित नहीं किया जा रहा है"
ukr "Незалежна від типу таблиці статистика не збирається для стовбця '%s'"
ER_WARN_AGGFUNC_DEPENDENCE
eng "Aggregate function '%-.192s)' of SELECT #%d belongs to SELECT #%d"
ukr "Агрегатна функція '%-.192s)' з SELECTу #%d належить до SELECTу #%d"
+
+#
+# Internal errors, not used
+#
+skip-to-error-number 2000
+
+# MySQL 5.7 error numbers starts here
+skip-to-error-number 3000
+
+ER_FILE_CORRUPT
+ eng "File %s is corrupted"
+
+ER_ERROR_ON_MASTER
+ eng "Query partially completed on the master (error on master: %d) and was aborted. There is a chance that your master is inconsistent at this point. If you are sure that your master is ok, run this query manually on the slave and then restart the slave with SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; START SLAVE;. Query:'%s'"
+
+ER_INCONSISTENT_ERROR
+ eng "Query caused different errors on master and slave. Error on master: message (format)='%s' error code=%d; Error on slave:actual message='%s', error code=%d. Default database:'%s'. Query:'%s'"
+
+ER_STORAGE_ENGINE_NOT_LOADED
+ eng "Storage engine for table '%s'.'%s' is not loaded."
+
+ER_GET_STACKED_DA_WITHOUT_ACTIVE_HANDLER 0Z002
+ eng "GET STACKED DIAGNOSTICS when handler not active"
+
+ER_WARN_LEGACY_SYNTAX_CONVERTED
+ eng "%s is no longer supported. The statement was converted to %s."
+
+ER_BINLOG_UNSAFE_FULLTEXT_PLUGIN
+ eng "Statement is unsafe because it uses a fulltext parser plugin which may not return the same value on the slave."
+
+ER_CANNOT_DISCARD_TEMPORARY_TABLE
+ eng "Cannot DISCARD/IMPORT tablespace associated with temporary table"
+
+ER_FK_DEPTH_EXCEEDED
+ eng "Foreign key cascade delete/update exceeds max depth of %d."
+
+ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE_V2
+ eng "Column count of %s.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error."
+ ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
+
+ER_WARN_TRIGGER_DOESNT_HAVE_CREATED
+ eng "Trigger %s.%s.%s does not have CREATED attribute."
+
+ER_REFERENCED_TRG_DOES_NOT_EXIST_MYSQL
+ eng "Referenced trigger '%s' for the given action time and event type does not exist."
+
+ER_EXPLAIN_NOT_SUPPORTED
+ eng "EXPLAIN FOR CONNECTION command is supported only for SELECT/UPDATE/INSERT/DELETE/REPLACE"
+ER_INVALID_FIELD_SIZE
+ eng "Invalid size for column '%-.192s'."
+
+ER_MISSING_HA_CREATE_OPTION
+ eng "Table storage engine '%-.64s' found required create option missing"
+
+ER_ENGINE_OUT_OF_MEMORY
+ eng "Out of memory in storage engine '%-.64s'."
+
+ER_PASSWORD_EXPIRE_ANONYMOUS_USER
+ eng "The password for anonymous user cannot be expired."
+
+ER_SLAVE_SQL_THREAD_MUST_STOP
+ eng "This operation cannot be performed with a running slave sql thread; run STOP SLAVE SQL_THREAD first"
+
+ER_NO_FT_MATERIALIZED_SUBQUERY
+ eng "Cannot create FULLTEXT index on materialized subquery"
+
+ER_INNODB_UNDO_LOG_FULL
+ eng "Undo Log error: %s"
+
+ER_INVALID_ARGUMENT_FOR_LOGARITHM 2201E
+ eng "Invalid argument for logarithm"
+
+ER_SLAVE_CHANNEL_IO_THREAD_MUST_STOP
+ eng "This operation cannot be performed with a running slave io thread; run STOP SLAVE IO_THREAD FOR CHANNEL '%s' first."
+
+ER_WARN_OPEN_TEMP_TABLES_MUST_BE_ZERO
+ eng "This operation may not be safe when the slave has temporary tables. The tables will be kept open until the server restarts or until the tables are deleted by any replicated DROP statement. Suggest to wait until slave_open_temp_tables = 0."
+
+ER_WARN_ONLY_MASTER_LOG_FILE_NO_POS
+ eng "CHANGE MASTER TO with a MASTER_LOG_FILE clause but no MASTER_LOG_POS clause may not be safe. The old position value may not be valid for the new binary log file."
+
+ER_QUERY_TIMEOUT
+ eng "Query execution was interrupted, maximum statement execution time exceeded"
+
+ER_NON_RO_SELECT_DISABLE_TIMER
+ eng "Select is not a read only statement, disabling timer"
+
+ER_DUP_LIST_ENTRY
+ eng "Duplicate entry '%-.192s'."
+
+ER_SQL_MODE_NO_EFFECT
+ eng "'%s' mode no longer has any effect. Use STRICT_ALL_TABLES or STRICT_TRANS_TABLES instead."
+
+ER_AGGREGATE_ORDER_FOR_UNION
+ eng "Expression #%u of ORDER BY contains aggregate function and applies to a UNION"
+
+ER_AGGREGATE_ORDER_NON_AGG_QUERY
+ eng "Expression #%u of ORDER BY contains aggregate function and applies to the result of a non-aggregated query"
+
+ER_SLAVE_WORKER_STOPPED_PREVIOUS_THD_ERROR
+ eng "Slave worker has stopped after at least one previous worker encountered an error when slave-preserve-commit-order was enabled. To preserve commit order, the last transaction executed by this thread has not been committed. When restarting the slave after fixing any failed threads, you should fix this worker as well."
+
+ER_DONT_SUPPORT_SLAVE_PRESERVE_COMMIT_ORDER
+ eng "slave_preserve_commit_order is not supported %s."
+
+ER_SERVER_OFFLINE_MODE
+ eng "The server is currently in offline mode"
+
+ER_GIS_DIFFERENT_SRIDS
+ eng "Binary geometry function %s given two geometries of different srids: %u and %u, which should have been identical."
+
+ER_GIS_UNSUPPORTED_ARGUMENT
+ eng "Calling geometry function %s with unsupported types of arguments."
+
+ER_GIS_UNKNOWN_ERROR
+ eng "Unknown GIS error occured in function %s."
+
+ER_GIS_UNKNOWN_EXCEPTION
+ eng "Unknown exception caught in GIS function %s."
+
+ER_GIS_INVALID_DATA 22023
+ eng "Invalid GIS data provided to function %s."
+
+ER_BOOST_GEOMETRY_EMPTY_INPUT_EXCEPTION
+ eng "The geometry has no data in function %s."
+
+ER_BOOST_GEOMETRY_CENTROID_EXCEPTION
+ eng "Unable to calculate centroid because geometry is empty in function %s."
+
+ER_BOOST_GEOMETRY_OVERLAY_INVALID_INPUT_EXCEPTION
+ eng "Geometry overlay calculation error: geometry data is invalid in function %s."
+
+ER_BOOST_GEOMETRY_TURN_INFO_EXCEPTION
+ eng "Geometry turn info calculation error: geometry data is invalid in function %s."
+
+ER_BOOST_GEOMETRY_SELF_INTERSECTION_POINT_EXCEPTION
+ eng "Analysis procedures of intersection points interrupted unexpectedly in function %s."
+
+ER_BOOST_GEOMETRY_UNKNOWN_EXCEPTION
+ eng "Unknown exception thrown in function %s."
+
+ER_STD_BAD_ALLOC_ERROR
+ eng "Memory allocation error: %-.256s in function %s."
+
+ER_STD_DOMAIN_ERROR
+ eng "Domain error: %-.256s in function %s."
+
+ER_STD_LENGTH_ERROR
+ eng "Length error: %-.256s in function %s."
+
+ER_STD_INVALID_ARGUMENT
+ eng "Invalid argument error: %-.256s in function %s."
+
+ER_STD_OUT_OF_RANGE_ERROR
+ eng "Out of range error: %-.256s in function %s."
+
+ER_STD_OVERFLOW_ERROR
+ eng "Overflow error error: %-.256s in function %s."
+
+ER_STD_RANGE_ERROR
+ eng "Range error: %-.256s in function %s."
+
+ER_STD_UNDERFLOW_ERROR
+ eng "Underflow error: %-.256s in function %s."
+
+ER_STD_LOGIC_ERROR
+ eng "Logic error: %-.256s in function %s."
+
+ER_STD_RUNTIME_ERROR
+ eng "Runtime error: %-.256s in function %s."
+
+ER_STD_UNKNOWN_EXCEPTION
+ eng "Unknown exception: %-.384s in function %s."
+
+ER_GIS_DATA_WRONG_ENDIANESS
+ eng "Geometry byte string must be little endian."
+
+ER_CHANGE_MASTER_PASSWORD_LENGTH
+ eng "The password provided for the replication user exceeds the maximum length of 32 characters"
+
+ER_USER_LOCK_WRONG_NAME 42000
+ eng "Incorrect user-level lock name '%-.192s'."
+
+# Should be different from ER_LOCK_DEADLOCK since it doesn't cause implicit
+# rollback. Should not be mapped to SQLSTATE 40001 for the same reason.
+ER_USER_LOCK_DEADLOCK
+ eng "Deadlock found when trying to get user-level lock; try rolling back transaction/releasing locks and restarting lock acquisition."
+
+ER_REPLACE_INACCESSIBLE_ROWS
+ eng "REPLACE cannot be executed as it requires deleting rows that are not in the view"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS
+ eng "Do not support online operation on table with GIS index"
+
+# MariaDB extra error numbers starts from 4000
+skip-to-error-number 4000
+
+ER_COMMULTI_BADCONTEXT 0A000
+ eng "COM_MULTI can't return a result set in the given context"
+ ger "COM_MULTI kann im gegebenen Kontext keine Ergebnismenge zurückgeben"
+ ukr "COM_MULTI не може повернути результати у цьому контексті"
+ER_BAD_COMMAND_IN_MULTI
+ eng "Command '%s' is not allowed for COM_MULTI"
+ ukr "Команда '%s' не дозволена для COM_MULTI"
+ER_WITH_COL_WRONG_LIST
+ eng "WITH column list and SELECT field list have different column counts"
+ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE
+ eng "Too many WITH elements in WITH clause"
+ER_DUP_QUERY_NAME
+ eng "Duplicate query name in WITH clause '%s'"
+ER_RECURSIVE_WITHOUT_ANCHORS
+ eng "No anchors for recursive WITH element '%s'"
+ER_UNACCEPTABLE_MUTUAL_RECURSION
+ eng "Unacceptable mutual recursion with anchored table '%s'"
+ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED
+ eng "Reference to recursive WITH table '%s' in materialized derived"
+ER_NOT_STANDARD_COMPLIANT_RECURSIVE
+ eng "Restrictions imposed on recursive definitions are violated for table '%s'"R_WRONG_WINDOW_SPEC_NAME
+ER_WRONG_WINDOW_SPEC_NAME
+ eng "Window specification with name '%s' is not defined"
+ER_DUP_WINDOW_NAME
+ eng "Multiple window specifications with the same name '%s'"
+ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC
+ eng "Window specification referencing another one '%s' cannot contain partition list"
+ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC
+ eng "Referenced window specification '%s' already contains order list"
+ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC
+ eng "Referenced window specification '%s' cannot contain window frame"
+ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS
+ eng "Unacceptable combination of window frame bound specifications"
+ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION
+ eng "Window function is allowed only in SELECT list and ORDER BY clause"
+ER_WINDOW_FUNCTION_IN_WINDOW_SPEC
+ eng "Window function is not allowed in window specification"
+ER_NOT_ALLOWED_WINDOW_FRAME
+ eng "Window frame is not allowed with '%s'"
+ER_NO_ORDER_LIST_IN_WINDOW_SPEC
+ eng "No order list in window specification for '%s'"
+ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY
+ eng "RANGE-type frame requires ORDER BY clause with single sort key"
+ER_WRONG_TYPE_FOR_ROWS_FRAME
+ eng "Integer is required for ROWS-type frame"
+ER_WRONG_TYPE_FOR_RANGE_FRAME
+ eng "Numeric datatype is required for RANGE-type frame"
+ER_FRAME_EXCLUSION_NOT_SUPPORTED
+ eng "Frame exclusion is not supported yet"
+ER_WINDOW_FUNCTION_DONT_HAVE_FRAME
+ eng "This window function may not have a window frame"
+ER_INVALID_NTILE_ARGUMENT
+ eng "Argument of NTILE must be greater than 0"
+ER_CONSTRAINT_FAILED 23000
+ eng "CONSTRAINT %`s failed for %`-.192s.%`-.192s"
+ ger "CONSTRAINT %`s fehlgeschlagen: %`-.192s.%`-.192s"
+ rus "проверка CONSTRAINT %`s для %`-.192s.%`-.192s провалилась"
+ ukr "Перевірка CONSTRAINT %`s для %`-.192s.%`-.192s не пройшла"
+ER_EXPRESSION_IS_TOO_BIG
+ eng "Expression in the %s clause is too big"
+ER_ERROR_EVALUATING_EXPRESSION
+ eng "Got an error evaluating stored expression %s"
+ER_CALCULATING_DEFAULT_VALUE
+ eng "Got an error when calculating default value for %`s"
+ER_EXPRESSION_REFERS_TO_UNINIT_FIELD 01000
+ eng "Expression for field %`-.64s is referring to uninitialized field %`s"
+ER_PARTITION_DEFAULT_ERROR
+ eng "Only one DEFAULT partition allowed"
+ ukr "Припустимо мати тільки один DEFAULT розділ"
+ER_REFERENCED_TRG_DOES_NOT_EXIST
+ eng "Referenced trigger '%s' for the given action time and event type does not exist"
+ER_INVALID_DEFAULT_PARAM
+ eng "Default/ignore value is not supported for such parameter usage"
+ ukr "Значення за замовчуванням або ігнороване значення не підтримано для цього випадку використання параьетра"
+ER_BINLOG_NON_SUPPORTED_BULK
+ eng "Only row based replication supported for bulk operations"
+ER_BINLOG_UNCOMPRESS_ERROR
+ eng "Uncompress the compressed binlog failed"
+ER_JSON_BAD_CHR
+ eng "Broken JSON string in argument %d to function '%s' at position %d"
+ER_JSON_NOT_JSON_CHR
+ eng "Character disallowed in JSON in argument %d to function '%s' at position %d"
+ER_JSON_EOS
+ eng "Unexpected end of JSON text in argument %d to function '%s'"
+ER_JSON_SYNTAX
+ eng "Syntax error in JSON text in argument %d to function '%s' at position %d"
+ER_JSON_ESCAPING
+ eng "Incorrect escaping in JSON text in argument %d to function '%s' at position %d"
+ER_JSON_DEPTH
+ eng "Limit of %d on JSON nested strucures depth is reached in argument %d to function '%s' at position %d"
+ER_JSON_PATH_EOS
+ eng "Unexpected end of JSON path in argument %d to function '%s'"
+ER_JSON_PATH_SYNTAX
+ eng "Syntax error in JSON path in argument %d to function '%s' at position %d"
+ER_JSON_PATH_DEPTH
+ eng "Limit of %d on JSON path depth is reached in argument %d to function '%s' at position %d"
+ER_JSON_PATH_NO_WILDCARD
+ eng "Wildcards in JSON path not allowed in argument %d to function '%s'"
+ER_JSON_PATH_ARRAY
+ eng "JSON path should end with an array identifier in argument %d to function '%s'"
+ER_JSON_ONE_OR_ALL
+ eng "Argument 2 to function '%s' must be "one" or "all"."
+ER_UNSUPPORT_COMPRESSED_TEMPORARY_TABLE
+ eng "CREATE TEMPORARY TABLE is not allowed with ROW_FORMAT=COMPRESSED or KEY_BLOCK_SIZE."
+ER_GEOJSON_INCORRECT
+ eng "Incorrect GeoJSON format specified for st_geomfromgeojson function."
+ER_GEOJSON_TOO_FEW_POINTS
+ eng "Incorrect GeoJSON format - too few points for linestring specified."
+ER_GEOJSON_NOT_CLOSED
+ eng "Incorrect GeoJSON format - polygon not closed."
+ER_JSON_PATH_EMPTY
+ eng "Path expression '$' is not allowed in argument %d to function '%s'."
+ER_SLAVE_SAME_ID
+ eng "A slave with the same server_uuid/server_id as this slave has connected to the master"
+ER_FLASHBACK_NOT_SUPPORTED
+ eng "Flashback does not support %s %s"
+
+
+
+#
+# MyRocks error messages
+#
+ER_KEYS_OUT_OF_ORDER
+ eng "Keys are out order during bulk load"
+
+ER_OVERLAPPING_KEYS
+ eng "Bulk load rows overlap existing rows"
+
+ER_REQUIRE_ROW_BINLOG_FORMAT
+ eng "Can't execute updates on master with binlog_format != ROW."
+
+ER_ISOLATION_MODE_NOT_SUPPORTED
+ eng "MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level %s"
+
+ER_ON_DUPLICATE_DISABLED
+ eng "When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: %s"
+
+ER_UPDATES_WITH_CONSISTENT_SNAPSHOT
+ eng "Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT."
+
+ER_ROLLBACK_ONLY
+ eng "This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction."
+
+ER_ROLLBACK_TO_SAVEPOINT
+ eng "MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows."
+
+ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT
+ eng "Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine."
+
+ER_UNSUPPORTED_COLLATION
+ eng "Unsupported collation on string indexed column %s.%s Use binary collation (%s)."
+
+ER_METADATA_INCONSISTENCY
+ eng "Table '%s' does not exist, but metadata information exists inside MyRocks. This is a sign of data inconsistency. Please check if '%s.frm' exists, and try to restore it if it does not exist."
+
+ER_CF_DIFFERENT
+ eng "Column family ('%s') flag (%d) is different from an existing flag (%d). Assign a new CF flag, or do not change existing CF flag."
+
+ER_RDB_TTL_DURATION_FORMAT
+ eng "TTL duration (%s) in MyRocks must be an unsigned non-null 64-bit integer."
+
+ER_RDB_STATUS_GENERAL
+ eng "Status error %d received from RocksDB: %s"
+
+ER_RDB_STATUS_MSG
+ eng "%s, Status error %d received from RocksDB: %s"
+
+ER_RDB_TTL_UNSUPPORTED
+ eng "TTL support is currently disabled when table has a hidden PK."
+
+ER_RDB_TTL_COL_FORMAT
+ eng "TTL column (%s) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration."
+
+ER_PER_INDEX_CF_DEPRECATED
+ eng "The per-index column family option has been deprecated"
+
+ER_KEY_CREATE_DURING_ALTER
+ eng "MyRocks failed creating new key definitions during alter."
+
+ER_SK_POPULATE_DURING_ALTER
+ eng "MyRocks failed populating secondary key during alter."
+
+# MyRocks messages end
+ER_SUM_FUNC_WITH_WINDOW_FUNC_AS_ARG
+ eng "Window functions can not be used as arguments to group functions."
+
+ER_NET_OK_PACKET_TOO_LARGE
+ eng "OK packet too large"
+
+ER_GEOJSON_EMPTY_COORDINATES
+ eng "Incorrect GeoJSON format - empty 'coordinates' array."
+
+ER_MYROCKS_CANT_NOPAD_COLLATION
+ eng "MyRocks doesn't currently support collations with \"No pad\" attribute."
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 76af7733fb9..68e801d5885 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -113,7 +113,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
"diagnose the problem, but since we have already crashed, \n"
"something is definitely wrong and this may fail.\n\n");
- set_server_version();
+ set_server_version(server_version, sizeof(server_version));
my_safe_printf_stderr("Server version: %s\n", server_version);
if (dflt_key_cache)
@@ -201,6 +201,13 @@ extern "C" sig_handler handle_fatal_signal(int sig)
case ABORT_QUERY_HARD:
kreason= "ABORT_QUERY";
break;
+ case KILL_SLAVE_SAME_ID:
+ kreason= "KILL_SLAVE_SAME_ID";
+ break;
+ case KILL_WAIT_TIMEOUT:
+ case KILL_WAIT_TIMEOUT_HARD:
+ kreason= "KILL_WAIT_TIMEOUT";
+ break;
}
my_safe_printf_stderr("%s", "\n"
"Trying to get some variables.\n"
@@ -291,7 +298,9 @@ extern "C" sig_handler handle_fatal_signal(int sig)
#ifdef HAVE_WRITE_CORE
if (test_flags & TEST_CORE_ON_SIGNAL)
{
- my_safe_printf_stderr("%s", "Writing a core file\n");
+ char buff[80];
+ my_getwd(buff, sizeof(buff), 0);
+ my_safe_printf_stderr("Writing a core file at %s\n", buff);
fflush(stderr);
my_write_core(sig);
}
diff --git a/sql/slave.cc b/sql/slave.cc
index 6fe6de6872a..6b234697f09 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -40,7 +40,8 @@
#include <my_dir.h>
#include <sql_common.h>
#include <errmsg.h>
-#include <mysqld_error.h>
+#include <ssl_compat.h>
+#include "unireg.h"
#include <mysys_err.h>
#include "rpl_handler.h"
#include <signal.h>
@@ -60,7 +61,6 @@
#include "debug_sync.h"
#include "rpl_parallel.h"
-
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
#define MAX_SLAVE_RETRY_PAUSE 5
@@ -77,6 +77,7 @@ Master_info *active_mi= 0;
Master_info_index *master_info_index;
my_bool replicate_same_server_id;
ulonglong relay_log_space_limit = 0;
+ulonglong opt_read_binlog_speed_limit = 0;
const char *relay_log_index= 0;
const char *relay_log_basename= 0;
@@ -217,7 +218,7 @@ static void set_slave_max_allowed_packet(THD *thd, MYSQL *mysql)
void init_thread_mask(int* mask,Master_info* mi,bool inverse)
{
bool set_io = mi->slave_running, set_sql = mi->rli.slave_running;
- register int tmp_mask=0;
+ int tmp_mask=0;
DBUG_ENTER("init_thread_mask");
if (set_io)
@@ -296,11 +297,8 @@ handle_slave_background(void *arg __attribute__((unused)))
bool stop;
my_thread_init();
- thd= new THD;
+ thd= new THD(next_thread_id());
thd->thread_stack= (char*) &thd; /* Set approximate stack start */
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thread_id++;
- mysql_mutex_unlock(&LOCK_thread_count);
thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
thread_safe_increment32(&service_thread_count);
thd->store_globals();
@@ -365,8 +363,8 @@ handle_slave_background(void *arg __attribute__((unused)))
delete thd;
thread_safe_decrement32(&service_thread_count);
signal_thd_deleted();
- my_thread_end();
+ my_thread_end();
return 0;
}
@@ -521,7 +519,7 @@ int init_slave()
if (active_mi->host[0] && !opt_skip_slave_start)
{
int error;
- THD *thd= new THD;
+ THD *thd= new THD(next_thread_id());
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -741,8 +739,7 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock)
DBUG_PRINT("info",("Flushing relay-log info file."));
if (current_thd)
THD_STAGE_INFO(current_thd, stage_flushing_relay_log_info_file);
- if (flush_relay_log_info(&mi->rli) ||
- my_sync(mi->rli.info_fd, MYF(MY_WME)))
+ if (mi->rli.flush() || my_sync(mi->rli.info_fd, MYF(MY_WME)))
retval= ER_ERROR_DURING_FLUSH_LOGS;
mysql_mutex_unlock(log_lock);
@@ -1080,6 +1077,7 @@ void slave_prepare_for_shutdown()
mysql_mutex_lock(&LOCK_active_mi);
master_info_index->free_connections();
mysql_mutex_unlock(&LOCK_active_mi);
+ stop_slave_background_thread();
}
/*
@@ -1642,8 +1640,10 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
+ mysql_mutex_lock(&mi->data_lock);
mi->clock_diff_with_master=
(long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10));
+ mysql_mutex_unlock(&mi->data_lock);
}
else if (check_io_slave_killed(mi, NULL))
goto slave_killed_err;
@@ -1655,7 +1655,9 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
}
else
{
+ mysql_mutex_lock(&mi->data_lock);
mi->clock_diff_with_master= 0; /* The "most sensible" value */
+ mysql_mutex_unlock(&mi->data_lock);
sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, "
"do not trust column Seconds_Behind_Master of SHOW "
"SLAVE STATUS. Error: %s (%d)",
@@ -2806,6 +2808,15 @@ void show_master_info_get_fields(THD *thd, List<Item> *field_list,
Item_empty_string(thd, "Parallel_Mode",
sizeof("conservative")-1),
mem_root);
+ field_list->push_back(new (mem_root)
+ Item_return_int(thd, "SQL_Delay", 10,
+ MYSQL_TYPE_LONG));
+ field_list->push_back(new (mem_root)
+ Item_return_int(thd, "SQL_Remaining_Delay", 8,
+ MYSQL_TYPE_LONG));
+ field_list->push_back(new (mem_root)
+ Item_empty_string(thd, "Slave_SQL_Running_State",
+ 20));
if (full)
{
field_list->push_back(new (mem_root)
@@ -2997,6 +3008,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
prot_store_ids(thd, &mi->ignore_server_ids);
// Master_Server_id
protocol->store((uint32) mi->master_id);
+ // SQL_Delay
// Master_Ssl_Crl
protocol->store(mi->ssl_ca, &my_charset_bin);
// Master_Ssl_Crlpath
@@ -3019,6 +3031,22 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
protocol->store(mode_name, strlen(mode_name), &my_charset_bin);
}
+ protocol->store((uint32) mi->rli.get_sql_delay());
+ // SQL_Remaining_Delay
+ // THD::proc_info is not protected by any lock, so we read it once
+ // to ensure that we use the same value throughout this function.
+ const char *slave_sql_running_state=
+ mi->rli.sql_driver_thd ? mi->rli.sql_driver_thd->proc_info : "";
+ if (slave_sql_running_state == Relay_log_info::state_delaying_string)
+ {
+ time_t t= my_time(0), sql_delay_end= mi->rli.get_sql_delay_end();
+ protocol->store((uint32)(t < sql_delay_end ? sql_delay_end - t : 0));
+ }
+ else
+ protocol->store_null();
+ // Slave_SQL_Running_State
+ protocol->store(slave_sql_running_state, &my_charset_bin);
+
if (full)
{
protocol->store((uint32) mi->rli.retried_trans);
@@ -3143,13 +3171,10 @@ void set_slave_thread_default_charset(THD* thd, rpl_group_info *rgi)
{
DBUG_ENTER("set_slave_thread_default_charset");
- thd->variables.character_set_client=
- global_system_variables.character_set_client;
- thd->variables.collation_connection=
- global_system_variables.collation_connection;
thd->variables.collation_server=
global_system_variables.collation_server;
- thd->update_charset();
+ thd->update_charset(global_system_variables.character_set_client,
+ global_system_variables.collation_connection);
thd->system_thread_info.rpl_sql_info->cached_charset_invalidate();
DBUG_VOID_RETURN;
@@ -3188,9 +3213,6 @@ static int init_slave_thread(THD* thd, Master_info *mi,
thd->variables.sql_log_slow= opt_log_slow_slave_statements;
thd->variables.log_slow_filter= global_system_variables.log_slow_filter;
set_slave_thread_options(thd);
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- mysql_mutex_unlock(&LOCK_thread_count);
if (thd_type == SLAVE_THD_SQL)
THD_STAGE_INFO(thd, stage_waiting_for_the_next_event_in_relay_log);
@@ -3296,13 +3318,15 @@ static int request_dump(THD *thd, MYSQL* mysql, Master_info* mi,
try a reconnect. We do not want to print anything to
the error log in this case because this a anormal
event in an idle server.
+ network_read_len get the real network read length in VIO, especially using compressed protocol
RETURN VALUES
'packet_error' Error
number Length of packet
*/
-static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings)
+static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings,
+ ulong* network_read_len)
{
ulong len;
DBUG_ENTER("read_event");
@@ -3317,7 +3341,7 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings)
DBUG_RETURN(packet_error);
#endif
- len = cli_safe_read(mysql);
+ len = cli_safe_read_reallen(mysql, network_read_len);
if (len == packet_error || (long) len < 1)
{
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
@@ -3392,6 +3416,73 @@ has_temporary_error(THD *thd)
}
+/**
+ If this is a lagging slave (specified with CHANGE MASTER TO MASTER_DELAY = X), delays accordingly. Also unlocks rli->data_lock.
+
+ Design note: this is the place to unlock rli->data_lock. The lock
+ must be held when reading delay info from rli, but it should not be
+ held while sleeping.
+
+ @param ev Event that is about to be executed.
+
+ @param thd The sql thread's THD object.
+
+ @param rli The sql thread's Relay_log_info structure.
+
+ @retval 0 If the delay timed out and the event shall be executed.
+
+ @retval nonzero If the delay was interrupted and the event shall be skipped.
+*/
+int
+sql_delay_event(Log_event *ev, THD *thd, rpl_group_info *rgi)
+{
+ Relay_log_info* rli= rgi->rli;
+ long sql_delay= rli->get_sql_delay();
+
+ DBUG_ENTER("sql_delay_event");
+ mysql_mutex_assert_owner(&rli->data_lock);
+ DBUG_ASSERT(!rli->belongs_to_client());
+
+ int type= ev->get_type_code();
+ if (sql_delay && type != ROTATE_EVENT &&
+ type != FORMAT_DESCRIPTION_EVENT && type != START_EVENT_V3)
+ {
+ // The time when we should execute the event.
+ time_t sql_delay_end=
+ ev->when + rli->mi->clock_diff_with_master + sql_delay;
+ // The current time.
+ time_t now= my_time(0);
+ // The time we will have to sleep before executing the event.
+ unsigned long nap_time= 0;
+ if (sql_delay_end > now)
+ nap_time= (ulong)(sql_delay_end - now);
+
+ DBUG_PRINT("info", ("sql_delay= %lu "
+ "ev->when= %lu "
+ "rli->mi->clock_diff_with_master= %lu "
+ "now= %ld "
+ "sql_delay_end= %llu "
+ "nap_time= %ld",
+ sql_delay, (long)ev->when,
+ rli->mi->clock_diff_with_master,
+ (long)now, (ulonglong)sql_delay_end, (long)nap_time));
+
+ if (sql_delay_end > now)
+ {
+ DBUG_PRINT("info", ("delaying replication event %lu secs",
+ nap_time));
+ rli->start_sql_delay(sql_delay_end);
+ mysql_mutex_unlock(&rli->data_lock);
+ DBUG_RETURN(slave_sleep(thd, nap_time, sql_slave_killed, rgi));
+ }
+ }
+
+ mysql_mutex_unlock(&rli->data_lock);
+
+ DBUG_RETURN(0);
+}
+
+
/*
First half of apply_event_and_update_pos(), see below.
Setup some THD variables for applying the event.
@@ -3441,12 +3532,6 @@ apply_event_and_update_pos_setup(Log_event* ev, THD* thd, rpl_group_info *rgi)
thd->variables.server_id = ev->server_id;
thd->set_time(); // time the query
thd->lex->current_select= 0;
- if (!ev->when)
- {
- my_hrtime_t hrtime= my_hrtime();
- ev->when= hrtime_to_my_time(hrtime);
- ev->when_sec_part= hrtime_sec_part(hrtime);
- }
thd->variables.option_bits=
(thd->variables.option_bits & ~OPTION_SKIP_REPLICATION) |
(ev->flags & LOG_EVENT_SKIP_REPLICATION_F ? OPTION_SKIP_REPLICATION : 0);
@@ -3515,16 +3600,16 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
if (exec_res == 0)
{
int error= ev->update_pos(rgi);
-#ifdef HAVE_valgrind
- if (!rli->is_fake)
-#endif
+ #ifndef DBUG_OFF
+ DBUG_PRINT("info", ("update_pos error = %d", error));
+ if (!rli->belongs_to_client())
{
- DBUG_PRINT("info", ("update_pos error = %d", error));
DBUG_PRINT("info", ("group %llu %s", rli->group_relay_log_pos,
rli->group_relay_log_name));
DBUG_PRINT("info", ("event %llu %s", rli->event_relay_log_pos,
rli->event_relay_log_name));
}
+#endif
/*
The update should not fail, so print an error message and
return an error code.
@@ -3559,21 +3644,39 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
/**
Applies the given event and advances the relay log position.
- In essence, this function does:
+ This is needed by the sql thread to execute events from the binlog,
+ and by clients executing BINLOG statements. Conceptually, this
+ function does:
@code
ev->apply_event(rli);
ev->update_pos(rli);
@endcode
- But it also does some maintainance, such as skipping events if
- needed and reporting errors.
+ It also does the following maintainance:
- If the @c skip flag is set, then it is tested whether the event
- should be skipped, by looking at the slave_skip_counter and the
- server id. The skip flag should be set when calling this from a
- replication thread but not set when executing an explicit BINLOG
- statement.
+ - Initializes the thread's server_id and time; and the event's
+ thread.
+
+ - If !rli->belongs_to_client() (i.e., if it belongs to the slave
+ sql thread instead of being used for executing BINLOG
+ statements), it does the following things: (1) skips events if it
+ is needed according to the server id or slave_skip_counter; (2)
+ unlocks rli->data_lock; (3) sleeps if required by 'CHANGE MASTER
+ TO MASTER_DELAY=X'; (4) maintains the running state of the sql
+ thread (rli->thread_state).
+
+ - Reports errors as needed.
+
+ @param ev The event to apply.
+
+ @param thd The client thread that executes the event (i.e., the
+ slave sql thread if called from a replication slave, or the client
+ thread if called to execute a BINLOG statement).
+
+ @param rli The relay log info (i.e., the slave's rli if called from
+ a replication slave, or the client's thd->rli_fake if called to
+ execute a BINLOG statement).
@retval 0 OK.
@@ -3596,7 +3699,16 @@ apply_event_and_update_pos(Log_event* ev, THD* thd, rpl_group_info *rgi)
DBUG_ASSERT(rli->slave_skip_counter > 0);
rli->slave_skip_counter--;
}
- mysql_mutex_unlock(&rli->data_lock);
+
+ if (reason == Log_event::EVENT_SKIP_NOT)
+ {
+ // Sleeps if needed, and unlocks rli->data_lock.
+ if (sql_delay_event(ev, thd, rgi))
+ return 0;
+ }
+ else
+ mysql_mutex_unlock(&rli->data_lock);
+
return apply_event_and_update_pos_apply(ev, thd, rgi, reason);
}
@@ -3620,6 +3732,10 @@ apply_event_and_update_pos_for_parallel(Log_event* ev, THD* thd,
driver thread, so 23 should never see EVENT_SKIP_COUNT here.
*/
DBUG_ASSERT(reason != Log_event::EVENT_SKIP_COUNT);
+ /*
+ Calling sql_delay_event() was handled in the SQL driver thread when
+ doing parallel replication.
+ */
return apply_event_and_update_pos_apply(ev, thd, rgi, reason);
}
@@ -3667,7 +3783,7 @@ inline void update_state_of_relay_log(Relay_log_info *rli, Log_event *ev)
}
/* Check for an event that starts or stops a transaction */
- if (typ == QUERY_EVENT)
+ if (LOG_EVENT_IS_QUERY(typ))
{
Query_log_event *qev= (Query_log_event*) ev;
/*
@@ -3699,7 +3815,8 @@ inline void update_state_of_relay_log(Relay_log_info *rli, Log_event *ev)
/**
- Top-level function for executing the next event from the relay log.
+ Top-level function for executing the next event in the relay log.
+ This is called from the SQL thread.
This function reads the event from the relay log, executes it, and
advances the relay log position. It also handles errors, etc.
@@ -3806,7 +3923,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
*/
DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
if ((typ == XID_EVENT) ||
- ((typ == QUERY_EVENT) &&
+ (LOG_EVENT_IS_QUERY(typ) &&
strcmp("COMMIT", ((Query_log_event *) ev)->query) == 0))
{
DBUG_ASSERT(thd->transaction.all.modified_non_trans_table);
@@ -3837,6 +3954,19 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
This is the case for pre-10.0 events without GTID, and for handling
slave_skip_counter.
*/
+ if (!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
+ {
+ /*
+ Ignore FD's timestamp as it does not reflect the slave execution
+ state but likely to reflect a deep past. Consequently when the first
+ data modification event execution last long all this time
+ Seconds_Behind_Master is zero.
+ */
+ if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
+ rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
+
+ DBUG_ASSERT(rli->last_master_timestamp >= 0);
+ }
}
if (typ == GTID_EVENT)
@@ -4128,7 +4258,7 @@ pthread_handler_t handle_slave_io(void *arg)
mysql= NULL ;
retry_count= 0;
- thd= new THD; // note that contructor of THD uses DBUG_ !
+ thd= new THD(next_thread_id()); // note that contructor of THD uses DBUG_ !
mysql_mutex_lock(&mi->run_lock);
/* Inform waiting threads that slave has started */
@@ -4151,9 +4281,7 @@ pthread_handler_t handle_slave_io(void *arg)
goto err_during_init;
}
thd->system_thread_info.rpl_io_info= &io_info;
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ add_to_active_threads(thd);
mi->slave_running = MYSQL_SLAVE_RUN_NOT_CONNECT;
mi->abort_slave = 0;
mysql_mutex_unlock(&mi->run_lock);
@@ -4254,8 +4382,10 @@ connected:
};);
#endif
- // TODO: the assignment below should be under mutex (5.0)
+ mysql_mutex_lock(&mi->run_lock);
mi->slave_running= MYSQL_SLAVE_RUN_CONNECT;
+ mysql_mutex_unlock(&mi->run_lock);
+
thd->slave_net = &mysql->net;
THD_STAGE_INFO(thd, stage_checking_master_version);
ret= get_master_version_and_clock(mysql, mi);
@@ -4302,6 +4432,7 @@ connected:
}
DBUG_PRINT("info",("Starting reading binary log from master"));
+ thd->set_command(COM_SLAVE_IO);
while (!io_slave_killed(mi))
{
THD_STAGE_INFO(thd, stage_requesting_binlog_dump);
@@ -4319,9 +4450,11 @@ connected:
mi->slave_running= MYSQL_SLAVE_RUN_READING;
DBUG_ASSERT(mi->last_error().number == 0);
+ ulonglong lastchecktime = my_hrtime().val;
+ ulonglong tokenamount = opt_read_binlog_speed_limit*1024;
while (!io_slave_killed(mi))
{
- ulong event_len;
+ ulong event_len, network_read_len = 0;
/*
We say "waiting" because read_event() will wait if there's nothing to
read. But if there's something to read, it will not wait. The
@@ -4329,7 +4462,7 @@ connected:
we're in fact receiving nothing.
*/
THD_STAGE_INFO(thd, stage_waiting_for_master_to_send_event);
- event_len= read_event(mysql, mi, &suppress_warnings);
+ event_len= read_event(mysql, mi, &suppress_warnings, &network_read_len);
if (check_io_slave_killed(mi, NullS))
goto err;
@@ -4377,6 +4510,47 @@ Stopping slave I/O thread due to out-of-memory error from master");
goto err;
}
+ /* Control the binlog read speed of master
+ when read_binlog_speed_limit is non-zero
+ */
+ ulonglong speed_limit_in_bytes = opt_read_binlog_speed_limit * 1024;
+ if (speed_limit_in_bytes)
+ {
+ /* Prevent the tokenamount become a large value,
+ for example, the IO thread doesn't work for a long time
+ */
+ if (tokenamount > speed_limit_in_bytes * 2)
+ {
+ lastchecktime = my_hrtime().val;
+ tokenamount = speed_limit_in_bytes * 2;
+ }
+
+ do
+ {
+ ulonglong currenttime = my_hrtime().val;
+ tokenamount += (currenttime - lastchecktime) * speed_limit_in_bytes / (1000*1000);
+ lastchecktime = currenttime;
+ if(tokenamount < network_read_len)
+ {
+ ulonglong duration =1000ULL*1000 * (network_read_len - tokenamount) / speed_limit_in_bytes;
+ time_t second_time = (time_t)(duration / (1000 * 1000));
+ uint micro_time = duration % (1000 * 1000);
+
+ // at least sleep 1000 micro second
+ my_sleep(MY_MAX(micro_time,1000));
+
+ /*
+ If it sleep more than one second,
+ it should use slave_sleep() to avoid the STOP SLAVE hang.
+ */
+ if (second_time)
+ slave_sleep(thd, second_time, io_slave_killed, mi);
+
+ }
+ }while(tokenamount < network_read_len);
+ tokenamount -= network_read_len;
+ }
+
/* XXX: 'synced' should be updated by queue_event to indicate
whether event has been synced to disk */
bool synced= 0;
@@ -4476,6 +4650,7 @@ err:
flush_master_info(mi, TRUE, TRUE);
THD_STAGE_INFO(thd, stage_waiting_for_slave_mutex_on_exit);
thd->add_status_to_global();
+ unlink_not_visible_thd(thd);
mysql_mutex_lock(&mi->run_lock);
err_during_init:
@@ -4506,9 +4681,7 @@ err_during_init:
DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
-#ifdef HAVE_OPENSSL
ERR_remove_state(0);
-#endif
pthread_exit(0);
return 0; // Avoid compiler warnings
}
@@ -4535,7 +4708,8 @@ int check_temp_dir(char* tmp_file)
size_t tmp_dir_size;
DBUG_ENTER("check_temp_dir");
- mysql_mutex_lock(&LOCK_thread_count);
+ /* This look is safe to use as this function is only called once */
+ mysql_mutex_lock(&LOCK_start_thread);
if (check_temp_dir_run)
{
if ((result= check_temp_dir_result))
@@ -4574,7 +4748,7 @@ int check_temp_dir(char* tmp_file)
mysql_file_delete(key_file_misc, tmp_file, MYF(0));
end:
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_start_thread);
DBUG_RETURN(result);
}
@@ -4700,7 +4874,7 @@ pthread_handler_t handle_slave_sql(void *arg)
#endif
serial_rgi= new rpl_group_info(rli);
- thd = new THD; // note that contructor of THD uses DBUG_ !
+ thd = new THD(next_thread_id()); // note that contructor of THD uses DBUG_ !
thd->thread_stack = (char*)&thd; // remember where our stack is
thd->system_thread_info.rpl_sql_info= &sql_info;
@@ -4762,9 +4936,7 @@ pthread_handler_t handle_slave_sql(void *arg)
applied. In all other cases it must be FALSE.
*/
thd->variables.binlog_annotate_row_events= 0;
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ add_to_active_threads(thd);
/*
We are going to set slave_running to 1. Assuming slave I/O thread is
alive and connected, this is going to make Seconds_Behind_Master be 0
@@ -4948,6 +5120,7 @@ pthread_handler_t handle_slave_sql(void *arg)
/* Read queries from the IO/THREAD until this thread is killed */
+ thd->set_command(COM_SLAVE_SQL);
while (!sql_slave_killed(serial_rgi))
{
THD_STAGE_INFO(thd, stage_reading_event_from_the_relay_log);
@@ -5042,11 +5215,11 @@ pthread_handler_t handle_slave_sql(void *arg)
my_bool save_log_all_errors= thd->log_all_errors;
/*
- We don't need to check return value for flush_relay_log_info()
+ We don't need to check return value for rli->flush()
as any errors should be logged to stderr
*/
thd->log_all_errors= 1;
- flush_relay_log_info(rli);
+ rli->flush();
thd->log_all_errors= save_log_all_errors;
if (mi->using_parallel())
{
@@ -5090,7 +5263,9 @@ pthread_handler_t handle_slave_sql(void *arg)
}
THD_STAGE_INFO(thd, stage_waiting_for_slave_mutex_on_exit);
thd->add_status_to_global();
+ unlink_not_visible_thd(thd);
mysql_mutex_lock(&rli->run_lock);
+
err_during_init:
/* We need data_lock, at least to wake up any waiting master_pos_wait() */
mysql_mutex_lock(&rli->data_lock);
@@ -5112,14 +5287,13 @@ err_during_init:
/*
TODO: see if we can do this conditionally in next_event() instead
to avoid unneeded position re-init
+
+ We only reset THD::temporary_tables to 0 here and not free it, as this
+ could be used by slave through Relay_log_info::save_temporary_tables.
*/
- thd->temporary_tables = 0; // remove tempation from destructor to close them
- THD_CHECK_SENTRY(thd);
+ thd->temporary_tables= 0;
rli->sql_driver_thd= 0;
- mysql_mutex_lock(&LOCK_thread_count);
thd->rgi_fake= thd->rgi_slave= NULL;
- delete serial_rgi;
- mysql_mutex_unlock(&LOCK_thread_count);
#ifdef WITH_WSREP
/*
@@ -5148,28 +5322,29 @@ err_during_init:
#endif /* WITH_WSREP */
/*
- Note: the order of the broadcast and unlock calls below (first broadcast, then unlock)
- is important. Otherwise a killer_thread can execute between the calls and
- delete the mi structure leading to a crash! (see BUG#25306 for details)
- */
+ Note: the order of the broadcast and unlock calls below (first
+ broadcast, then unlock) is important. Otherwise a killer_thread can
+ execute between the calls and delete the mi structure leading to a
+ crash! (see BUG#25306 for details)
+ */
mysql_cond_broadcast(&rli->stop_cond);
DBUG_EXECUTE_IF("simulate_slave_delay_at_terminate_bug38694", sleep(5););
mysql_mutex_unlock(&rli->run_lock); // tell the world we are done
rpl_parallel_resize_pool_if_no_slaves();
+ /* TODO: Check if this lock is needed */
mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
+ delete serial_rgi;
mysql_mutex_unlock(&LOCK_thread_count);
+
delete thd;
thread_safe_decrement32(&service_thread_count);
signal_thd_deleted();
DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
-#ifdef HAVE_OPENSSL
ERR_remove_state(0);
-#endif
pthread_exit(0);
return 0; // Avoid compiler warnings
}
@@ -5584,9 +5759,12 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
bool gtid_skip_enqueue= false;
bool got_gtid_event= false;
rpl_gtid event_gtid;
-#ifndef DBUG_OFF
- static uint dbug_rows_event_count= 0;
-#endif
+ static uint dbug_rows_event_count __attribute__((unused))= 0;
+ bool is_compress_event = false;
+ char* new_buf = NULL;
+ char new_buf_arr[4096];
+ bool is_malloc = false;
+
/*
FD_q must have been prepared for the first R_a event
inside get_master_version_and_clock()
@@ -5632,7 +5810,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
// Emulate the network corruption
DBUG_EXECUTE_IF("corrupt_queue_event",
- if (buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT)
+ if ((uchar)buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT)
{
char *debug_event_buf_c = (char*) buf;
int debug_cor_pos = rand() % (event_len - BINLOG_CHECKSUM_LEN);
@@ -5935,9 +6113,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
TODO: handling `when' for SHOW SLAVE STATUS' snds behind
*/
- if (memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len())
- || mi->master_log_pos > hb.log_pos)
- {
+ if (memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len()) ||
+ mi->master_log_pos > hb.log_pos) {
/* missed events of heartbeat from the past */
error= ER_SLAVE_HEARTBEAT_FAILURE;
error_msg.append(STRING_WITH_LEN("heartbeat is not compatible with local info;"));
@@ -6085,6 +6262,51 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
inc_pos= event_len;
}
break;
+ /*
+ Binlog compressed event should uncompress in IO thread
+ */
+ case QUERY_COMPRESSED_EVENT:
+ inc_pos= event_len;
+ if (query_event_uncompress(rli->relay_log.description_event_for_queue,
+ checksum_alg == BINLOG_CHECKSUM_ALG_CRC32,
+ buf, event_len, new_buf_arr, sizeof(new_buf_arr),
+ &is_malloc, (char **)&new_buf, &event_len))
+ {
+ char llbuf[22];
+ error = ER_BINLOG_UNCOMPRESS_ERROR;
+ error_msg.append(STRING_WITH_LEN("binlog uncompress error, master log_pos: "));
+ llstr(mi->master_log_pos, llbuf);
+ error_msg.append(llbuf, strlen(llbuf));
+ goto err;
+ }
+ buf = new_buf;
+ is_compress_event = true;
+ goto default_action;
+
+ case WRITE_ROWS_COMPRESSED_EVENT:
+ case UPDATE_ROWS_COMPRESSED_EVENT:
+ case DELETE_ROWS_COMPRESSED_EVENT:
+ case WRITE_ROWS_COMPRESSED_EVENT_V1:
+ case UPDATE_ROWS_COMPRESSED_EVENT_V1:
+ case DELETE_ROWS_COMPRESSED_EVENT_V1:
+ inc_pos = event_len;
+ {
+ if (row_log_event_uncompress(rli->relay_log.description_event_for_queue,
+ checksum_alg == BINLOG_CHECKSUM_ALG_CRC32,
+ buf, event_len, new_buf_arr, sizeof(new_buf_arr),
+ &is_malloc, (char **)&new_buf, &event_len))
+ {
+ char llbuf[22];
+ error = ER_BINLOG_UNCOMPRESS_ERROR;
+ error_msg.append(STRING_WITH_LEN("binlog uncompress error, master log_pos: "));
+ llstr(mi->master_log_pos, llbuf);
+ error_msg.append(llbuf, strlen(llbuf));
+ goto err;
+ }
+ }
+ buf = new_buf;
+ is_compress_event = true;
+ goto default_action;
#ifndef DBUG_OFF
case XID_EVENT:
@@ -6102,7 +6324,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
DBUG_EXECUTE_IF("kill_slave_io_after_2_events",
{
if (mi->dbug_do_disconnect &&
- (((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT) ||
+ (LOG_EVENT_IS_QUERY((Log_event_type)(uchar)buf[EVENT_TYPE_OFFSET]) ||
((uchar)buf[EVENT_TYPE_OFFSET] == TABLE_MAP_EVENT))
&& (--mi->dbug_event_counter == 0))
{
@@ -6115,7 +6337,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
DBUG_EXECUTE_IF("kill_slave_io_before_commit",
{
if ((uchar)buf[EVENT_TYPE_OFFSET] == XID_EVENT ||
- ((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT &&
+ ((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT && /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */
Query_log_event::peek_is_commit_rollback(buf, event_len,
checksum_alg)))
{
@@ -6135,7 +6357,9 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
++mi->events_queued_since_last_gtid;
}
- inc_pos= event_len;
+ if (!is_compress_event)
+ inc_pos= event_len;
+
break;
}
@@ -6226,8 +6450,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
/* everything is filtered out from non-master */
(s_id != mi->master_id ||
/* for the master meta information is necessary */
- (buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
- buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT))) ||
+ ((uchar)buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
+ (uchar)buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT))) ||
/*
Check whether it needs to be filtered based on domain_id
@@ -6256,9 +6480,9 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
*/
if (!(s_id == global_system_variables.server_id &&
!mi->rli.replicate_same_server_id) ||
- (buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
- buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT &&
- buf[EVENT_TYPE_OFFSET] != STOP_EVENT))
+ ((uchar)buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
+ (uchar)buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT &&
+ (uchar)buf[EVENT_TYPE_OFFSET] != STOP_EVENT))
{
mi->master_log_pos+= inc_pos;
memcpy(rli->ign_master_log_name_end, mi->master_log_name, FN_REFLEN);
@@ -6299,7 +6523,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
buf[EVENT_TYPE_OFFSET])) ||
(!mi->last_queued_gtid_standalone &&
((uchar)buf[EVENT_TYPE_OFFSET] == XID_EVENT ||
- ((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT &&
+ ((uchar)buf[EVENT_TYPE_OFFSET] == QUERY_EVENT && /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */
Query_log_event::peek_is_commit_rollback(buf, event_len,
checksum_alg))))))
{
@@ -6329,6 +6553,9 @@ err:
mi->report(ERROR_LEVEL, error, NULL, ER_DEFAULT(error),
error_msg.ptr());
+ if(is_malloc)
+ my_free((void *)new_buf);
+
DBUG_RETURN(error);
}
@@ -6646,75 +6873,6 @@ MYSQL *rpl_connect_master(MYSQL *mysql)
}
#endif
-/*
- Store the file and position where the execute-slave thread are in the
- relay log.
-
- SYNOPSIS
- flush_relay_log_info()
- rli Relay log information
-
- NOTES
- - As this is only called by the slave thread or on STOP SLAVE, with the
- log_lock grabbed and the slave thread stopped, we don't need to have
- a lock here.
- - If there is an active transaction, then we don't update the position
- in the relay log. This is to ensure that we re-execute statements
- if we die in the middle of an transaction that was rolled back.
- - As a transaction never spans binary logs, we don't have to handle the
- case where we do a relay-log-rotation in the middle of the transaction.
- If this would not be the case, we would have to ensure that we
- don't delete the relay log file where the transaction started when
- we switch to a new relay log file.
-
- TODO
- - Change the log file information to a binary format to avoid calling
- longlong2str.
-
- RETURN VALUES
- 0 ok
- 1 write error
-*/
-
-bool flush_relay_log_info(Relay_log_info* rli)
-{
- bool error=0;
- DBUG_ENTER("flush_relay_log_info");
-
- if (unlikely(rli->no_storage))
- DBUG_RETURN(0);
-
- IO_CACHE *file = &rli->info_file;
- char buff[FN_REFLEN*2+22*2+4], *pos;
-
- my_b_seek(file, 0L);
- pos=strmov(buff, rli->group_relay_log_name);
- *pos++='\n';
- pos= longlong10_to_str(rli->group_relay_log_pos, pos, 10);
- *pos++='\n';
- pos=strmov(pos, rli->group_master_log_name);
- *pos++='\n';
- pos=longlong10_to_str(rli->group_master_log_pos, pos, 10);
- *pos='\n';
- if (my_b_write(file, (uchar*) buff, (size_t) (pos-buff)+1))
- error=1;
- if (flush_io_cache(file))
- error=1;
- if (sync_relayloginfo_period &&
- !error &&
- ++(rli->sync_counter) >= sync_relayloginfo_period)
- {
- if (my_sync(rli->info_fd, MYF(MY_WME)))
- error=1;
- rli->sync_counter= 0;
- }
- /*
- Flushing the relay log is done by the slave I/O thread
- or by the user on STOP SLAVE.
- */
- DBUG_RETURN(error);
-}
-
/*
Called when we notice that the current "hot" log got rotated under our feet.
@@ -7074,7 +7232,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
}
rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
strmake_buf(rli->event_relay_log_name,rli->linfo.log_file_name);
- if (flush_relay_log_info(rli))
+ if (rli->flush())
{
errmsg= "error flushing relay log";
goto err;
diff --git a/sql/slave.h b/sql/slave.h
index 58c8106614d..d3cbaae03df 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -18,6 +18,14 @@
#define SLAVE_H
/**
+ MASTER_DELAY can be at most (1 << 31) - 1.
+*/
+#define MASTER_DELAY_MAX (0x7FFFFFFF)
+#if INT_MAX < 0x7FFFFFFF
+#error "don't support platforms where INT_MAX < 0x7FFFFFFF"
+#endif
+
+/**
@defgroup Replication Replication
@{
@@ -41,9 +49,9 @@
#include "rpl_filter.h"
#include "rpl_tblmap.h"
-#define SLAVE_NET_TIMEOUT 3600
+#define SLAVE_NET_TIMEOUT 60
-#define MAX_SLAVE_ERROR 2000
+#define MAX_SLAVE_ERROR ER_ERROR_LAST+1
#define MAX_REPLICATION_THREAD 64
@@ -102,12 +110,14 @@ int init_dynarray_intvar_from_file(DYNAMIC_ARRAY* arr, IO_CACHE* f);
In Master_info: run_lock, data_lock
run_lock protects all information about the run state: slave_running, thd
- and the existence of the I/O thread to stop/start it, you need this mutex).
+ and the existence of the I/O thread (to stop/start it, you need this mutex).
data_lock protects some moving members of the struct: counters (log name,
position) and relay log (MYSQL_BIN_LOG object).
In Relay_log_info: run_lock, data_lock
see Master_info
+ However, note that run_lock does not protect
+ Relay_log_info.run_state; that is protected by data_lock.
Order of acquisition: if you want to have LOCK_active_mi and a run_lock, you
must acquire LOCK_active_mi first.
@@ -130,6 +140,7 @@ extern my_bool opt_log_slave_updates;
extern char *opt_slave_skip_errors;
extern my_bool opt_replicate_annotate_row_events;
extern ulonglong relay_log_space_limit;
+extern ulonglong opt_read_binlog_speed_limit;
extern ulonglong slave_skipped_errors;
extern const char *relay_log_index;
extern const char *relay_log_basename;
@@ -173,7 +184,6 @@ extern const char *relay_log_basename;
int init_slave();
int init_recovery(Master_info* mi, const char** errmsg);
void init_slave_skip_errors(const char* arg);
-bool flush_relay_log_info(Relay_log_info* rli);
int register_slave_on_master(MYSQL* mysql);
int terminate_slave_threads(Master_info* mi, int thread_mask,
bool skip_lock = 0);
@@ -241,11 +251,18 @@ void set_slave_thread_options(THD* thd);
void set_slave_thread_default_charset(THD *thd, rpl_group_info *rgi);
int rotate_relay_log(Master_info* mi);
int has_temporary_error(THD *thd);
+int sql_delay_event(Log_event *ev, THD *thd, rpl_group_info *rgi);
int apply_event_and_update_pos(Log_event* ev, THD* thd,
struct rpl_group_info *rgi);
int apply_event_and_update_pos_for_parallel(Log_event* ev, THD* thd,
struct rpl_group_info *rgi);
+int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
+int init_floatvar_from_file(float* var, IO_CACHE* f, float default_val);
+int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
+ const char *default_val);
+int init_dynarray_intvar_from_file(DYNAMIC_ARRAY* arr, IO_CACHE* f);
+
pthread_handler_t handle_slave_io(void *arg);
void slave_output_error_info(rpl_group_info *rgi, THD *thd);
pthread_handler_t handle_slave_sql(void *arg);
diff --git a/sql/sp.cc b/sql/sp.cc
index a5a14ec8d85..207ece47356 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -41,7 +41,7 @@
static int
db_load_routine(THD *thd, stored_procedure_type type, sp_name *name,
sp_head **sphp,
- ulonglong sql_mode, const char *params, const char *returns,
+ sql_mode_t sql_mode, const char *params, const char *returns,
const char *body, st_sp_chistics &chistics,
LEX_STRING *definer_user_name, LEX_STRING *definer_host_name,
longlong created, longlong modified,
@@ -539,7 +539,7 @@ db_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
char buff[65];
String str(buff, sizeof(buff), &my_charset_bin);
bool saved_time_zone_used= thd->time_zone_used;
- ulonglong sql_mode, saved_mode= thd->variables.sql_mode;
+ sql_mode_t sql_mode, saved_mode= thd->variables.sql_mode;
Open_tables_backup open_tables_state_backup;
Stored_program_creation_ctx *creation_ctx;
char definer_user_name_holder[USERNAME_LENGTH + 1];
@@ -691,7 +691,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
};
@@ -701,13 +701,13 @@ Silence_deprecated_warning::handle_condition(
THD *,
uint sql_errno,
const char*,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char*,
Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_WARN_DEPRECATED_SYNTAX &&
- level == Sql_condition::WARN_LEVEL_WARN)
+ *level == Sql_condition::WARN_LEVEL_WARN)
return TRUE;
return FALSE;
@@ -727,11 +727,11 @@ Silence_deprecated_warning::handle_condition(
@retval 0 error
*/
-static sp_head *sp_compile(THD *thd, String *defstr, ulonglong sql_mode,
+static sp_head *sp_compile(THD *thd, String *defstr, sql_mode_t sql_mode,
Stored_program_creation_ctx *creation_ctx)
{
sp_head *sp;
- ulonglong old_sql_mode= thd->variables.sql_mode;
+ sql_mode_t old_sql_mode= thd->variables.sql_mode;
ha_rows old_select_limit= thd->variables.select_limit;
sp_rcontext *old_spcont= thd->spcont;
Silence_deprecated_warning warning_handler;
@@ -780,7 +780,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* message,
Sql_condition ** cond_hdl);
@@ -794,7 +794,8 @@ bool
Bad_db_error_handler::handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level
+ *level,
const char* message,
Sql_condition ** cond_hdl)
{
@@ -810,7 +811,7 @@ Bad_db_error_handler::handle_condition(THD *thd,
static int
db_load_routine(THD *thd, stored_procedure_type type,
sp_name *name, sp_head **sphp,
- ulonglong sql_mode, const char *params, const char *returns,
+ sql_mode_t sql_mode, const char *params, const char *returns,
const char *body, st_sp_chistics &chistics,
LEX_STRING *definer_user_name, LEX_STRING *definer_host_name,
longlong created, longlong modified,
@@ -1024,7 +1025,7 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
TABLE *table;
char definer_buf[USER_HOST_BUFF_SIZE];
LEX_STRING definer;
- ulonglong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
MDL_key::enum_mdl_namespace mdl_type= type == TYPE_ENUM_FUNCTION ?
MDL_key::FUNCTION : MDL_key::PROCEDURE;
@@ -1275,8 +1276,8 @@ log:
{
thd->clear_error();
- String log_query;
- log_query.set_charset(system_charset_info);
+ StringBuffer<128> log_query(thd->variables.character_set_client);
+ DBUG_ASSERT(log_query.charset()->mbminlen == 1);
if (!show_create_sp(thd, &log_query,
sp->m_type,
@@ -1477,7 +1478,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -1752,11 +1753,11 @@ sp_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
String retstr(64);
retstr.set_charset(sp->get_creation_ctx()->get_client_cs());
- DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp));
+ DBUG_PRINT("info", ("found:%p", sp));
if (sp->m_first_free_instance)
{
- DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x",
- (ulong)sp->m_first_free_instance,
+ DBUG_PRINT("info", ("first free:%p level: %lu flags %x",
+ sp->m_first_free_instance,
sp->m_first_free_instance->m_recursion_level,
sp->m_first_free_instance->m_flags));
DBUG_ASSERT(!(sp->m_first_free_instance->m_flags & sp_head::IS_INVOKED));
@@ -1796,8 +1797,8 @@ sp_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
new_sp->m_recursion_level= level;
new_sp->m_first_instance= sp;
sp->m_last_cached_sp= sp->m_first_free_instance= new_sp;
- DBUG_PRINT("info", ("added level: 0x%lx, level: %lu, flags %x",
- (ulong)new_sp, new_sp->m_recursion_level,
+ DBUG_PRINT("info", ("added level:%p, level: %lu, flags %x",
+ new_sp, new_sp->m_recursion_level,
new_sp->m_flags));
DBUG_RETURN(new_sp);
}
@@ -1808,8 +1809,8 @@ sp_find_routine(THD *thd, stored_procedure_type type, sp_name *name,
if (db_find_routine(thd, type, name, &sp) == SP_OK)
{
sp_cache_insert(cp, sp);
- DBUG_PRINT("info", ("added new: 0x%lx, level: %lu, flags %x",
- (ulong)sp, sp->m_recursion_level,
+ DBUG_PRINT("info", ("added new:%p, level: %lu, flags %x",
+ sp, sp->m_recursion_level,
sp->m_flags));
}
}
@@ -2212,9 +2213,9 @@ show_create_sp(THD *thd, String *buf,
st_sp_chistics *chistics,
const LEX_STRING *definer_user,
const LEX_STRING *definer_host,
- ulonglong sql_mode)
+ sql_mode_t sql_mode)
{
- ulonglong old_sql_mode= thd->variables.sql_mode;
+ sql_mode_t old_sql_mode= thd->variables.sql_mode;
/* Make some room to begin with */
if (buf->alloc(100 + dblen + 1 + namelen + paramslen + returnslen + bodylen +
chistics->comment.length + 10 /* length of " DEFINER= "*/ +
@@ -2301,7 +2302,8 @@ show_create_sp(THD *thd, String *buf,
sp_head *
sp_load_for_information_schema(THD *thd, TABLE *proc_table, String *db,
- String *name, ulong sql_mode, stored_procedure_type type,
+ String *name, sql_mode_t sql_mode,
+ stored_procedure_type type,
const char *returns, const char *params,
bool *free_sp_head)
{
diff --git a/sql/sp.h b/sql/sp.h
index df60482f8fd..96d49cfe676 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -200,7 +200,8 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_backup *backup);
sp_head *
sp_load_for_information_schema(THD *thd, TABLE *proc_table, String *db,
- String *name, ulong sql_mode, stored_procedure_type type,
+ String *name, sql_mode_t sql_mode,
+ stored_procedure_type type,
const char *returns, const char *params,
bool *free_sp_head);
@@ -228,5 +229,5 @@ bool show_create_sp(THD *thd, String *buf,
st_sp_chistics *chistics,
const LEX_STRING *definer_user,
const LEX_STRING *definer_host,
- ulonglong sql_mode);
+ sql_mode_t sql_mode);
#endif /* _SP_H_ */
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 852ba453090..c473aec51a1 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -29,6 +29,7 @@
#include "sql_array.h" // Dynamic_array
#include "log_event.h" // Query_log_event
#include "sql_derived.h" // mysql_handle_derived
+#include "sql_cte.h"
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation
@@ -70,28 +71,6 @@ static void reset_start_time_for_sp(THD *thd)
thd->set_start_time();
}
-Item_result
-sp_map_result_type(enum enum_field_types type)
-{
- switch (type) {
- case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- return INT_RESULT;
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_NEWDECIMAL:
- return DECIMAL_RESULT;
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- return REAL_RESULT;
- default:
- return STRING_RESULT;
- }
-}
-
Item::Type
sp_map_item_type(enum enum_field_types type)
@@ -238,6 +217,7 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_SHOW_CREATE_PROC:
case SQLCOM_SHOW_CREATE_EVENT:
case SQLCOM_SHOW_CREATE_TRIGGER:
+ case SQLCOM_SHOW_CREATE_USER:
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_ERRORS:
case SQLCOM_SHOW_EXPLAIN:
@@ -275,6 +255,7 @@ sp_get_flags_for_command(LEX *lex)
statement within an IF condition.
*/
case SQLCOM_EXECUTE:
+ case SQLCOM_EXECUTE_IMMEDIATE:
flags= sp_head::MULTI_RESULTS | sp_head::CONTAINS_DYNAMIC_SQL;
break;
case SQLCOM_PREPARE:
@@ -306,6 +287,7 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_CREATE_USER:
case SQLCOM_CREATE_ROLE:
case SQLCOM_ALTER_TABLE:
+ case SQLCOM_ALTER_USER:
case SQLCOM_GRANT:
case SQLCOM_GRANT_ROLE:
case SQLCOM_REVOKE:
@@ -568,7 +550,7 @@ sp_head::operator new(size_t size) throw()
if (sp == NULL)
DBUG_RETURN(NULL);
sp->main_mem_root= own_root;
- DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root));
+ DBUG_PRINT("info", ("mem_root %p", &sp->mem_root));
DBUG_RETURN(sp);
}
@@ -585,8 +567,8 @@ sp_head::operator delete(void *ptr, size_t size) throw()
/* Make a copy of main_mem_root as free_root will free the sp */
own_root= sp->main_mem_root;
- DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
- (ulong) &sp->mem_root, (ulong) &own_root));
+ DBUG_PRINT("info", ("mem_root %p moved to %p",
+ &sp->mem_root, &own_root));
free_root(&own_root, MYF(0));
DBUG_VOID_RETURN;
@@ -725,6 +707,7 @@ sp_head::set_stmt_end(THD *thd)
{
Lex_input_stream *lip= & thd->m_parser_state->m_lip; /* shortcut */
const char *end_ptr= lip->get_cpp_ptr(); /* shortcut */
+ uint not_used;
/* Make the string of parameters. */
@@ -742,7 +725,7 @@ sp_head::set_stmt_end(THD *thd)
m_body.length= end_ptr - m_body_begin;
m_body.str= thd->strmake(m_body_begin, m_body.length);
- trim_whitespace(thd->charset(), & m_body);
+ trim_whitespace(thd->charset(), &m_body, &not_used);
/* Make the string of UTF-body. */
@@ -750,7 +733,7 @@ sp_head::set_stmt_end(THD *thd)
m_body_utf8.length= lip->get_body_utf8_length();
m_body_utf8.str= thd->strmake(lip->get_body_utf8_str(), m_body_utf8.length);
- trim_whitespace(thd->charset(), & m_body_utf8);
+ trim_whitespace(thd->charset(), &m_body_utf8, &not_used);
/*
Make the string of whole stored-program-definition query (in the
@@ -759,12 +742,12 @@ sp_head::set_stmt_end(THD *thd)
m_defstr.length= end_ptr - lip->get_cpp_buf();
m_defstr.str= thd->strmake(lip->get_cpp_buf(), m_defstr.length);
- trim_whitespace(thd->charset(), & m_defstr);
+ trim_whitespace(thd->charset(), &m_defstr, &not_used);
}
static TYPELIB *
-create_typelib(MEM_ROOT *mem_root, Create_field *field_def, List<String> *src)
+create_typelib(MEM_ROOT *mem_root, Column_definition *field_def, List<String> *src)
{
TYPELIB *result= NULL;
CHARSET_INFO *cs= field_def->charset;
@@ -863,30 +846,56 @@ Field *
sp_head::create_result_field(uint field_max_length, const char *field_name,
TABLE *table)
{
- uint field_length;
Field *field;
DBUG_ENTER("sp_head::create_result_field");
- field_length= !m_return_field_def.length ?
- field_max_length : m_return_field_def.length;
-
- field= ::make_field(table->s, /* TABLE_SHARE ptr */
- table->in_use->mem_root,
- (uchar*) 0, /* field ptr */
- field_length, /* field [max] length */
- (uchar*) "", /* null ptr */
- 0, /* null bit */
- m_return_field_def.pack_flag,
- m_return_field_def.sql_type,
- m_return_field_def.charset,
- m_return_field_def.geom_type, m_return_field_def.srid,
- Field::NONE, /* unreg check */
- m_return_field_def.interval,
- field_name ? field_name : (const char *) m_name.str);
+ /*
+ m_return_field_def.length is always set to the field length calculated
+ by the parser, according to the RETURNS clause. See prepare_create_field()
+ in sql_table.cc. Value examples, depending on data type:
+ - 11 for INT (character representation length)
+ - 20 for BIGINT (character representation length)
+ - 22 for DOUBLE (character representation length)
+ - N for CHAR(N) CHARACTER SET latin1 (octet length)
+ - 3*N for CHAR(N) CHARACTER SET utf8 (octet length)
+ - 8 for blob-alike data types (packed length !!!)
+
+ field_max_length is also set according to the data type in the RETURNS
+ clause but can have different values depending on the execution stage:
+
+ 1. During direct execution:
+ field_max_length is 0, because Item_func_sp::fix_length_and_dec() has
+ not been called yet, so Item_func_sp::max_length is 0 by default.
+
+ 2a. During PREPARE:
+ field_max_length is 0, because Item_func_sp::fix_length_and_dec()
+ has not been called yet. It's called after create_result_field().
+
+ 2b. During EXEC:
+ field_max_length is set to the maximum possible octet length of the
+ RETURNS data type.
+ - N for CHAR(N) CHARACTER SET latin1 (octet length)
+ - 3*N for CHAR(N) CHARACTER SET utf8 (octet length)
+ - 255 for TINYBLOB (octet length, not packed length !!!)
+
+ Perhaps we should refactor prepare_create_field() to set
+ Create_field::length to maximum octet length for BLOBs,
+ instead of packed length).
+ */
+ DBUG_ASSERT(field_max_length <= m_return_field_def.length ||
+ (current_thd->stmt_arena->is_stmt_execute() &&
+ m_return_field_def.length == 8 &&
+ (m_return_field_def.pack_flag &
+ (FIELDFLAG_BLOB|FIELDFLAG_GEOM))));
+
+ field= m_return_field_def.make_field(table->s, /* TABLE_SHARE ptr */
+ table->in_use->mem_root,
+ field_name ?
+ field_name :
+ (const char *) m_name.str);
field->vcol_info= m_return_field_def.vcol_info;
- field->stored_in_db= m_return_field_def.stored_in_db;
if (field)
field->init(table);
@@ -1129,6 +1138,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
uint old_server_status;
const uint status_backup_mask= SERVER_STATUS_CURSOR_EXISTS |
SERVER_STATUS_LAST_ROW_SENT;
+ MEM_ROOT *user_var_events_alloc_saved= 0;
Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer;
Object_creation_ctx *UNINIT_VAR(saved_creation_ctx);
Diagnostics_area *da= thd->get_stmt_da();
@@ -1147,9 +1157,9 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
if (m_next_cached_sp)
{
DBUG_PRINT("info",
- ("first free for 0x%lx ++: 0x%lx->0x%lx level: %lu flags %x",
- (ulong)m_first_instance, (ulong) this,
- (ulong) m_next_cached_sp,
+ ("first free for %p ++: %p->%p level: %lu flags %x",
+ m_first_instance, this,
+ m_next_cached_sp,
m_next_cached_sp->m_recursion_level,
m_next_cached_sp->m_flags));
}
@@ -1185,7 +1195,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
/*
Switch query context. This has to be done early as this is sometimes
- allocated trough sql_alloc
+ allocated on THD::mem_root
*/
if (m_creation_ctx)
saved_creation_ctx= m_creation_ctx->set_n_backup(thd);
@@ -1230,7 +1240,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
We should also save Item tree change list to avoid rollback something
too early in the calling query.
*/
- thd->change_list.move_elements_to(&old_change_list);
+ thd->Item_change_list::move_elements_to(&old_change_list);
/*
Cursors will use thd->packet, so they may corrupt data which was prepared
for sending by upper level. OTOH cursors in the same routine can share this
@@ -1307,9 +1317,11 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
Will write this SP statement into binlog separately.
TODO: consider changing the condition to "not inside event union".
*/
- MEM_ROOT *user_var_events_alloc_saved= thd->user_var_events_alloc;
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
+ {
+ user_var_events_alloc_saved= thd->user_var_events_alloc;
thd->user_var_events_alloc= thd->mem_root;
+ }
sql_digest_state *parent_digest= thd->m_digest;
thd->m_digest= NULL;
@@ -1370,8 +1382,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
/* Restore all saved */
thd->server_status= (thd->server_status & ~status_backup_mask) | old_server_status;
old_packet.swap(thd->packet);
- DBUG_ASSERT(thd->change_list.is_empty());
- old_change_list.move_elements_to(&thd->change_list);
+ DBUG_ASSERT(thd->Item_change_list::is_empty());
+ old_change_list.move_elements_to(thd);
thd->lex= old_lex;
thd->set_query_id(old_query_id);
DBUG_ASSERT(!thd->derived_tables);
@@ -1447,10 +1459,10 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
}
m_flags&= ~IS_INVOKED;
DBUG_PRINT("info",
- ("first free for 0x%lx --: 0x%lx->0x%lx, level: %lu, flags %x",
- (ulong) m_first_instance,
- (ulong) m_first_instance->m_first_free_instance,
- (ulong) this, m_recursion_level, m_flags));
+ ("first free for %p --: %p->%p, level: %lu, flags %x",
+ m_first_instance,
+ m_first_instance->m_first_free_instance,
+ this, m_recursion_level, m_flags));
/*
Check that we have one of following:
@@ -2040,6 +2052,8 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
break;
}
}
+
+ TRANSACT_TRACKER(add_trx_state_from_thd(thd));
}
/*
@@ -2148,7 +2162,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
}
Send_field *out_param_info= new (thd->mem_root) Send_field();
- nctx->get_item(i)->make_field(out_param_info);
+ nctx->get_item(i)->make_field(thd, out_param_info);
out_param_info->db_name= m_db.str;
out_param_info->table_name= m_name.str;
out_param_info->org_table_name= m_name.str;
@@ -2291,9 +2305,9 @@ sp_head::restore_lex(THD *thd)
Put the instruction on the backpatch list, associated with the label.
*/
int
-sp_head::push_backpatch(sp_instr *i, sp_label *lab)
+sp_head::push_backpatch(THD *thd, sp_instr *i, sp_label *lab)
{
- bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t));
+ bp_t *bp= (bp_t *) thd->alloc(sizeof(bp_t));
if (!bp)
return 1;
@@ -2318,8 +2332,8 @@ sp_head::backpatch(sp_label *lab)
{
if (bp->lab == lab)
{
- DBUG_PRINT("info", ("backpatch: (m_ip %d, label 0x%lx <%s>) to dest %d",
- bp->instr->m_ip, (ulong) lab, lab->name.str, dest));
+ DBUG_PRINT("info", ("backpatch: (m_ip %d, label %p <%s>) to dest %d",
+ bp->instr->m_ip, lab, lab->name.str, dest));
bp->instr->backpatch(dest, lab->ctx);
}
}
@@ -2327,12 +2341,11 @@ sp_head::backpatch(sp_label *lab)
}
/**
- Prepare an instance of Create_field for field creation (fill all necessary
- attributes).
+ Prepare an instance of Column_definition for field creation
+ (fill all necessary attributes).
@param[in] thd Thread handle
@param[in] lex Yacc parsing context
- @param[in] field_type Field type
@param[out] field_def An instance of create_field to be filled
@retval
@@ -2343,8 +2356,7 @@ sp_head::backpatch(sp_label *lab)
bool
sp_head::fill_field_definition(THD *thd, LEX *lex,
- enum enum_field_types field_type,
- Create_field *field_def)
+ Column_definition *field_def)
{
uint unused1= 0;
@@ -2403,7 +2415,7 @@ sp_head::do_cont_backpatch()
void
sp_head::set_info(longlong created, longlong modified,
- st_sp_chistics *chistics, ulonglong sql_mode)
+ st_sp_chistics *chistics, sql_mode_t sql_mode)
{
m_created= created;
m_modified= modified;
@@ -2457,8 +2469,8 @@ sp_head::reset_thd_mem_root(THD *thd)
DBUG_ENTER("sp_head::reset_thd_mem_root");
m_thd_root= thd->mem_root;
thd->mem_root= &main_mem_root;
- DBUG_PRINT("info", ("mem_root 0x%lx moved to thd mem root 0x%lx",
- (ulong) &mem_root, (ulong) &thd->mem_root));
+ DBUG_PRINT("info", ("mem_root %p moved to thd mem root %p",
+ &mem_root, &thd->mem_root));
free_list= thd->free_list; // Keep the old list
thd->free_list= NULL; // Start a new one
m_thd= thd;
@@ -2488,8 +2500,8 @@ sp_head::restore_thd_mem_root(THD *thd)
set_query_arena(thd); // Get new free_list and mem_root
state= STMT_INITIALIZED_FOR_SP;
- DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx",
- (ulong) &mem_root, (ulong) &thd->mem_root));
+ DBUG_PRINT("info", ("mem_root %p returned from thd mem root %p",
+ &mem_root, &thd->mem_root));
thd->free_list= flist; // Restore the old one
thd->mem_root= m_thd_root;
m_thd= NULL;
@@ -2947,7 +2959,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
bool parent_modified_non_trans_table= thd->transaction.stmt.modified_non_trans_table;
thd->transaction.stmt.modified_non_trans_table= FALSE;
DBUG_ASSERT(!thd->derived_tables);
- DBUG_ASSERT(thd->change_list.is_empty());
+ DBUG_ASSERT(thd->Item_change_list::is_empty());
/*
Use our own lex.
We should not save old value since it is saved/restored in
@@ -2978,8 +2990,21 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
reinit_stmt_before_use(thd, m_lex);
+#ifndef EMBEDDED_LIBRARY
+ /*
+ If there was instruction which changed tracking state,
+ the result of changed tracking state send to client in OK packed.
+ So it changes result sent to client and probably can be different
+ independent on query text. So we can't cache such results.
+ */
+ if ((thd->client_capabilities & CLIENT_SESSION_TRACK) &&
+ (thd->server_status & SERVER_SESSION_STATE_CHANGED))
+ thd->lex->safe_to_cache_query= 0;
+#endif
+
if (open_tables)
- res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables);
+ res= check_dependencies_in_with_clauses(m_lex->with_clauses_list) ||
+ instr->exec_open_and_lock_tables(thd, m_lex->query_tables);
if (!res)
{
@@ -3054,6 +3079,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
what is needed from the substatement gained
*/
thd->transaction.stmt.modified_non_trans_table |= parent_modified_non_trans_table;
+
+ TRANSACT_TRACKER(add_trx_state_from_thd(thd));
+
/*
Unlike for PS we should not call Item's destructors for newly created
items after execution of each instruction in stored routine. This is
@@ -3080,7 +3108,7 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
Check whenever we have access to tables for this statement
and open and lock them before executing instructions core function.
*/
- if (open_temporary_tables(thd, tables) ||
+ if (thd->open_temporary_tables(tables) ||
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)
|| open_and_lock_tables(thd, tables, TRUE, 0))
result= -1;
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 604190079cb..9f1745f2aab 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -45,9 +45,6 @@
//#define TYPE_ENUM_FUNCTION 1 #define TYPE_ENUM_PROCEDURE 2 #define
//TYPE_ENUM_TRIGGER 3 #define TYPE_ENUM_PROXY 4
-Item_result
-sp_map_result_type(enum enum_field_types type);
-
Item::Type
sp_map_item_type(enum enum_field_types type);
@@ -181,11 +178,11 @@ public:
stored_procedure_type m_type;
uint m_flags; // Boolean attributes of a stored routine
- Create_field m_return_field_def; /**< This is used for FUNCTIONs only. */
+ Column_definition m_return_field_def; /**< This is used for FUNCTIONs only. */
const char *m_tmp_query; ///< Temporary pointer to sub query string
st_sp_chistics *m_chistics;
- ulonglong m_sql_mode; ///< For SHOW CREATE and execution
+ sql_mode_t m_sql_mode; ///< For SHOW CREATE and execution
LEX_STRING m_qname; ///< db.name
bool m_explicit_name; ///< Prepend the db name? */
LEX_STRING m_db;
@@ -389,7 +386,7 @@ public:
/// Put the instruction on the backpatch list, associated with the label.
int
- push_backpatch(sp_instr *, sp_label *);
+ push_backpatch(THD *thd, sp_instr *, sp_label *);
/// Update all instruction with this label in the backpatch list to
/// the current position.
@@ -421,11 +418,10 @@ public:
TABLE *table);
bool fill_field_definition(THD *thd, LEX *lex,
- enum enum_field_types field_type,
- Create_field *field_def);
+ Column_definition *field_def);
void set_info(longlong created, longlong modified,
- st_sp_chistics *chistics, ulonglong sql_mode);
+ st_sp_chistics *chistics, sql_mode_t sql_mode);
void set_definer(const char *definer, uint definerlen);
void set_definer(const LEX_STRING *user_name, const LEX_STRING *host_name);
diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc
index faf5a2de891..9a6353c9337 100644
--- a/sql/sp_pcontext.cc
+++ b/sql/sp_pcontext.cc
@@ -450,7 +450,7 @@ bool sp_pcontext::find_cursor(LEX_STRING name,
void sp_pcontext::retrieve_field_definitions(
- List<Create_field> *field_def_lst) const
+ List<Column_definition> *field_def_lst) const
{
/* Put local/context fields in the result list. */
diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h
index efe9531c3a0..d4d532340fb 100644
--- a/sql/sp_pcontext.h
+++ b/sql/sp_pcontext.h
@@ -43,9 +43,6 @@ public:
/// Name of the SP-variable.
LEX_STRING name;
- /// Field-type of the SP-variable.
- enum enum_field_types type;
-
/// Mode of the SP-variable.
enum_mode mode;
@@ -60,13 +57,14 @@ public:
Item *default_value;
/// Full type information (field meta-data) of the SP-variable.
- Create_field field_def;
+ Column_definition field_def;
+ /// Field-type of the SP-variable.
+ enum_field_types sql_type() const { return field_def.sql_type; }
public:
sp_variable(LEX_STRING _name, uint _offset)
:Sql_alloc(),
name(_name),
- type(MYSQL_TYPE_NULL),
mode(MODE_IN),
offset(_offset),
default_value(NULL)
@@ -325,11 +323,11 @@ public:
/// @return the current number of variables used in the parent contexts
/// (from the root), including this context.
uint current_var_count() const
- { return m_var_offset + m_vars.elements(); }
+ { return m_var_offset + (uint)m_vars.elements(); }
/// @return the number of variables in this context alone.
uint context_var_count() const
- { return m_vars.elements(); }
+ { return (uint)m_vars.elements(); }
/// @return map index in this parsing context to runtime offset.
uint var_context2runtime(uint i) const
@@ -347,7 +345,7 @@ public:
/// context and its children.
///
/// @param field_def_lst[out] Container to store type information.
- void retrieve_field_definitions(List<Create_field> *field_def_lst) const;
+ void retrieve_field_definitions(List<Column_definition> *field_def_lst) const;
/// Find SP-variable by name.
///
@@ -478,10 +476,10 @@ public:
const LEX_STRING *find_cursor(uint offset) const;
uint max_cursor_index() const
- { return m_max_cursor_index + m_cursors.elements(); }
+ { return m_max_cursor_index + (uint)m_cursors.elements(); }
uint current_cursor_count() const
- { return m_cursor_offset + m_cursors.elements(); }
+ { return m_cursor_offset + (uint)m_cursors.elements(); }
private:
/// Constructor for a tree node.
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index 4d74d2721f1..d612e15c000 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -61,6 +61,7 @@ sp_rcontext *sp_rcontext::create(THD *thd,
const sp_pcontext *root_parsing_ctx,
Field *return_value_fld)
{
+ SELECT_LEX *save_current_select;
sp_rcontext *ctx= new (thd->mem_root) sp_rcontext(root_parsing_ctx,
return_value_fld,
thd->in_sub_stmt);
@@ -68,14 +69,19 @@ sp_rcontext *sp_rcontext::create(THD *thd,
if (!ctx)
return NULL;
+ /* Reset current_select as it's checked in Item_ident::Item_ident */
+ save_current_select= thd->lex->current_select;
+ thd->lex->current_select= 0;
+
if (ctx->alloc_arrays(thd) ||
ctx->init_var_table(thd) ||
ctx->init_var_items(thd))
{
delete ctx;
- return NULL;
+ ctx= 0;
}
+ thd->lex->current_select= save_current_select;
return ctx;
}
@@ -104,7 +110,7 @@ bool sp_rcontext::alloc_arrays(THD *thd)
bool sp_rcontext::init_var_table(THD *thd)
{
- List<Create_field> field_def_lst;
+ List<Column_definition> field_def_lst;
if (!m_root_parsing_ctx->max_var_index())
return false;
@@ -503,9 +509,15 @@ int sp_cursor::fetch(THD *thd, List<sp_variable> *vars)
result.set_spvar_list(vars);
+ DBUG_ASSERT(!thd->is_error());
+
/* Attempt to fetch one row */
if (server_side_cursor->is_open())
+ {
server_side_cursor->fetch(1);
+ if (thd->is_error())
+ return -1; // e.g. data type conversion failed
+ }
/*
If the cursor was pointing after the last row, the fetch will
diff --git a/sql/spatial.cc b/sql/spatial.cc
index e8d2fb42383..5f045f8cbfa 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -21,6 +21,10 @@
#include "gstream.h" // Gis_read_stream
#include "sql_string.h" // String
+/* This is from item_func.h. Didn't want to #include the whole file. */
+double my_double_round(double value, longlong dec, bool dec_unsigned,
+ bool truncate);
+
#ifdef HAVE_SPATIAL
/*
@@ -58,12 +62,14 @@ Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_last+1]=
static Geometry::Class_info **ci_collection_end=
Geometry::ci_collection+Geometry::wkb_last + 1;
-Geometry::Class_info::Class_info(const char *name, int type_id,
- create_geom_t create_func):
+Geometry::Class_info::Class_info(const char *name, const char *geojson_name,
+ int type_id, create_geom_t create_func):
m_type_id(type_id), m_create_func(create_func)
{
m_name.str= (char *) name;
m_name.length= strlen(name);
+ m_geojson_name.str= (char *) geojson_name;
+ m_geojson_name.length= strlen(geojson_name);
ci_collection[type_id]= this;
}
@@ -105,26 +111,27 @@ static Geometry *create_geometrycollection(char *buffer)
-static Geometry::Class_info point_class("POINT",
+static Geometry::Class_info point_class("POINT", "Point",
Geometry::wkb_point, create_point);
-static Geometry::Class_info linestring_class("LINESTRING",
+static Geometry::Class_info linestring_class("LINESTRING", "LineString",
Geometry::wkb_linestring,
create_linestring);
-static Geometry::Class_info polygon_class("POLYGON",
+static Geometry::Class_info polygon_class("POLYGON", "Polygon",
Geometry::wkb_polygon,
create_polygon);
-static Geometry::Class_info multipoint_class("MULTIPOINT",
+static Geometry::Class_info multipoint_class("MULTIPOINT", "MultiPoint",
Geometry::wkb_multipoint,
create_multipoint);
static Geometry::Class_info
-multilinestring_class("MULTILINESTRING",
+multilinestring_class("MULTILINESTRING", "MultiLineString",
Geometry::wkb_multilinestring, create_multilinestring);
-static Geometry::Class_info multipolygon_class("MULTIPOLYGON",
+static Geometry::Class_info multipolygon_class("MULTIPOLYGON", "MultiPolygon",
Geometry::wkb_multipolygon,
create_multipolygon);
static Geometry::Class_info
-geometrycollection_class("GEOMETRYCOLLECTION",Geometry::wkb_geometrycollection,
+geometrycollection_class("GEOMETRYCOLLECTION", "GeometryCollection",
+ Geometry::wkb_geometrycollection,
create_geometrycollection);
static void get_point(double *x, double *y, const char *data)
@@ -230,6 +237,77 @@ int Geometry::as_wkt(String *wkt, const char **end)
}
+static const uchar type_keyname[]= "type";
+static const uint type_keyname_len= 4;
+static const uchar coord_keyname[]= "coordinates";
+static const uint coord_keyname_len= 11;
+static const uchar geometries_keyname[]= "geometries";
+static const uint geometries_keyname_len= 10;
+static const uchar features_keyname[]= "features";
+static const uint features_keyname_len= 8;
+static const uchar geometry_keyname[]= "geometry";
+static const uint geometry_keyname_len= 8;
+
+static const uint max_keyname_len= 11; /*'coordinates' keyname is the longest.*/
+
+static const uchar feature_type[]= "feature";
+static const int feature_type_len= 7;
+static const uchar feature_coll_type[]= "featurecollection";
+static const int feature_coll_type_len= 17;
+static const uchar bbox_keyname[]= "bbox";
+static const int bbox_keyname_len= 4;
+
+
+int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end)
+{
+ uint32 len= (uint) get_class_info()->m_geojson_name.length;
+ if (wkt->reserve(4 + type_keyname_len + 2 + len + 2 + 2 +
+ coord_keyname_len + 4, 512))
+ return 1;
+ wkt->qs_append("\"", 1);
+ wkt->qs_append((const char *) type_keyname, type_keyname_len);
+ wkt->qs_append("\": \"", 4);
+ wkt->qs_append(get_class_info()->m_geojson_name.str, len);
+ wkt->qs_append("\", \"", 4);
+ if (get_class_info() == &geometrycollection_class)
+ wkt->qs_append((const char *) geometries_keyname, geometries_keyname_len);
+ else
+ wkt->qs_append((const char *) coord_keyname, coord_keyname_len);
+
+ wkt->qs_append("\": ", 3);
+ if (get_data_as_json(wkt, max_dec_digits, end))
+ return 1;
+
+ return 0;
+}
+
+
+int Geometry::bbox_as_json(String *wkt)
+{
+ MBR mbr;
+ const char *end;
+ if (wkt->reserve(5 + bbox_keyname_len + (FLOATING_POINT_DECIMALS+2)*4, 512))
+ return 1;
+ wkt->qs_append("\"", 1);
+ wkt->qs_append((const char *) bbox_keyname, bbox_keyname_len);
+ wkt->qs_append("\": [", 4);
+
+ if (get_mbr(&mbr, &end))
+ return 1;
+
+ wkt->qs_append(mbr.xmin);
+ wkt->qs_append(", ", 2);
+ wkt->qs_append(mbr.ymin);
+ wkt->qs_append(", ", 2);
+ wkt->qs_append(mbr.xmax);
+ wkt->qs_append(", ", 2);
+ wkt->qs_append(mbr.ymax);
+ wkt->qs_append("]", 1);
+
+ return 0;
+}
+
+
static double wkb_get_double(const char *ptr, Geometry::wkbByteOrder bo)
{
double res;
@@ -291,6 +369,196 @@ Geometry *Geometry::create_from_wkb(Geometry_buffer *buffer,
}
+Geometry *Geometry::create_from_json(Geometry_buffer *buffer,
+ json_engine_t *je, bool er_on_3D, String *res)
+{
+ Class_info *ci= NULL;
+ const uchar *coord_start= NULL, *geom_start= NULL,
+ *features_start= NULL, *geometry_start= NULL;
+ Geometry *result;
+ uchar key_buf[max_keyname_len];
+ uint key_len;
+ int fcoll_type_found= 0, feature_type_found= 0;
+
+
+ if (json_read_value(je))
+ goto err_return;
+
+ if (je->value_type != JSON_VALUE_OBJECT)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ goto err_return;
+ }
+
+ while (json_scan_next(je) == 0 && je->state != JST_OBJ_END)
+ {
+ DBUG_ASSERT(je->state == JST_KEY);
+
+ key_len=0;
+ while (json_read_keyname_chr(je) == 0)
+ {
+ if (je->s.c_next > 127 || key_len >= max_keyname_len)
+ {
+ /* Symbol out of range, or keyname too long. No need to compare.. */
+ key_len=0;
+ break;
+ }
+ key_buf[key_len++]= (uchar)je->s.c_next | 0x20; /* make it lowercase. */
+ }
+
+ if (je->s.error)
+ goto err_return;
+
+ if (key_len == type_keyname_len &&
+ memcmp(key_buf, type_keyname, type_keyname_len) == 0)
+ {
+ /*
+ Found the "type" key. Let's check it's a string and remember
+ the feature's type.
+ */
+ if (json_read_value(je))
+ goto err_return;
+
+ if (je->value_type == JSON_VALUE_STRING)
+ {
+ if ((ci= find_class((const char *) je->value, je->value_len)))
+ {
+ if ((coord_start=
+ (ci == &geometrycollection_class) ? geom_start : coord_start))
+ goto create_geom;
+ }
+ else if (je->value_len == feature_coll_type_len &&
+ my_strnncoll(&my_charset_latin1, je->value, je->value_len,
+ feature_coll_type, feature_coll_type_len) == 0)
+ {
+ /*
+ 'FeatureCollection' type found. Handle the 'Featurecollection'/'features'
+ GeoJSON construction.
+ */
+ if (features_start)
+ goto handle_feature_collection;
+ fcoll_type_found= 1;
+ }
+ else if (je->value_len == feature_type_len &&
+ my_strnncoll(&my_charset_latin1, je->value, je->value_len,
+ feature_type, feature_type_len) == 0)
+ {
+ if (geometry_start)
+ goto handle_geometry_key;
+ feature_type_found= 1;
+ }
+ }
+ }
+ else if (key_len == coord_keyname_len &&
+ memcmp(key_buf, coord_keyname, coord_keyname_len) == 0)
+ {
+ /*
+ Found the "coordinates" key. Let's check it's an array
+ and remember where it starts.
+ */
+ if (json_read_value(je))
+ goto err_return;
+
+ if (je->value_type == JSON_VALUE_ARRAY)
+ {
+ coord_start= je->value_begin;
+ if (ci && ci != &geometrycollection_class)
+ goto create_geom;
+ }
+ }
+ else if (key_len == geometries_keyname_len &&
+ memcmp(key_buf, geometries_keyname, geometries_keyname_len) == 0)
+ {
+ /*
+ Found the "geometries" key. Let's check it's an array
+ and remember where it starts.
+ */
+ if (json_read_value(je))
+ goto err_return;
+
+ if (je->value_type == JSON_VALUE_ARRAY)
+ {
+ geom_start= je->value_begin;
+ if (ci == &geometrycollection_class)
+ {
+ coord_start= geom_start;
+ goto create_geom;
+ }
+ }
+ }
+ else if (key_len == features_keyname_len &&
+ memcmp(key_buf, features_keyname, features_keyname_len) == 0)
+ {
+ /*
+ 'features' key found. Handle the 'Featurecollection'/'features'
+ GeoJSON construction.
+ */
+ if (json_read_value(je))
+ goto err_return;
+ if (je->value_type == JSON_VALUE_ARRAY)
+ {
+ features_start= je->value_begin;
+ if (fcoll_type_found)
+ goto handle_feature_collection;
+ }
+ }
+ else if (key_len == geometry_keyname_len &&
+ memcmp(key_buf, geometry_keyname, geometry_keyname_len) == 0)
+ {
+ if (json_read_value(je))
+ goto err_return;
+ if (je->value_type == JSON_VALUE_OBJECT)
+ {
+ geometry_start= je->value_begin;
+ if (feature_type_found)
+ goto handle_geometry_key;
+ }
+ }
+ else
+ {
+ if (json_skip_key(je))
+ goto err_return;
+ }
+ }
+
+ if (je->s.error == 0)
+ {
+ /*
+ We didn't find all the required keys. That are "type" and "coordinates"
+ or "geometries" for GeometryCollection.
+ */
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ }
+ goto err_return;
+
+handle_feature_collection:
+ ci= &geometrycollection_class;
+ coord_start= features_start;
+
+create_geom:
+
+ json_scan_start(je, je->s.cs, coord_start, je->s.str_end);
+
+ if (res->reserve(1 + 4, 512))
+ goto err_return;
+
+ result= (*ci->m_create_func)(buffer->data);
+ res->q_append((char) wkb_ndr);
+ res->q_append((uint32) result->get_class_info()->m_type_id);
+ if (result->init_from_json(je, er_on_3D, res))
+ goto err_return;
+
+ return result;
+
+handle_geometry_key:
+ json_scan_start(je, je->s.cs, geometry_start, je->s.str_end);
+ return create_from_json(buffer, je, er_on_3D, res);
+
+err_return:
+ return NULL;
+}
+
+
Geometry *Geometry::create_from_opresult(Geometry_buffer *g_buf,
String *res, Gcalc_result_receiver &rr)
{
@@ -429,6 +697,52 @@ const char *Geometry::append_points(String *txt, uint32 n_points,
}
+static void append_json_point(String *txt, uint max_dec, const char *data)
+{
+ double x,y;
+ get_point(&x, &y, data);
+ if (max_dec < FLOATING_POINT_DECIMALS)
+ {
+ x= my_double_round(x, max_dec, FALSE, FALSE);
+ y= my_double_round(y, max_dec, FALSE, FALSE);
+ }
+ txt->qs_append('[');
+ txt->qs_append(x);
+ txt->qs_append(", ", 2);
+ txt->qs_append(y);
+ txt->qs_append(']');
+}
+
+
+/*
+ Append N points from packed format to json
+
+ SYNOPSIS
+ append_json_points()
+ txt Append points here
+ n_points Number of points
+ data Packed data
+ offset Offset between points
+
+ RETURN
+ # end of data
+*/
+
+static const char *append_json_points(String *txt, uint max_dec,
+ uint32 n_points, const char *data, uint32 offset)
+{
+ txt->qs_append('[');
+ while (n_points--)
+ {
+ data+= offset;
+ append_json_point(txt, max_dec, data);
+ data+= POINT_DATA_SIZE;
+ txt->qs_append(", ", 2);
+ }
+ txt->length(txt->length() - 2);// Remove ending ', '
+ txt->qs_append(']');
+ return data;
+}
/*
Get most bounding rectangle (mbr) for X points
@@ -502,6 +816,62 @@ uint Gis_point::init_from_wkb(const char *wkb, uint len,
}
+static int read_point_from_json(json_engine_t *je, bool er_on_3D,
+ double *x, double *y)
+{
+ int n_coord= 0, err;
+ double tmp, *d;
+ char *endptr;
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(je->state == JST_VALUE);
+ if (json_read_value(je))
+ return 1;
+
+ if (je->value_type != JSON_VALUE_NUMBER)
+ goto bad_coordinates;
+
+ d= (n_coord == 0) ? x : ((n_coord == 1) ? y : &tmp);
+ *d= my_strntod(je->s.cs, (char *) je->value,
+ je->value_len, &endptr, &err);
+ if (err)
+ goto bad_coordinates;
+ n_coord++;
+ }
+
+ if (n_coord <= 2 || !er_on_3D)
+ return 0;
+ je->s.error= Geometry::GEOJ_DIMENSION_NOT_SUPPORTED;
+ return 1;
+bad_coordinates:
+ je->s.error= Geometry::GEOJ_INCORRECT_GEOJSON;
+ return 1;
+}
+
+
+bool Gis_point::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
+{
+ double x, y;
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (read_point_from_json(je, er_on_3D, &x, &y) ||
+ wkb->reserve(POINT_DATA_SIZE))
+ return TRUE;
+
+ wkb->q_append(x);
+ wkb->q_append(y);
+ return FALSE;
+}
+
+
bool Gis_point::get_data_as_wkt(String *txt, const char **end) const
{
double x, y;
@@ -517,6 +887,17 @@ bool Gis_point::get_data_as_wkt(String *txt, const char **end) const
}
+bool Gis_point::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ if (txt->reserve(MAX_DIGITS_IN_DOUBLE * 2 + 4))
+ return 1;
+ append_json_point(txt, max_dec_digits, m_data);
+ *end= m_data+ POINT_DATA_SIZE;
+ return 0;
+}
+
+
bool Gis_point::get_mbr(MBR *mbr, const char **end) const
{
double x, y;
@@ -630,6 +1011,44 @@ uint Gis_line_string::init_from_wkb(const char *wkb, uint len,
}
+bool Gis_line_string::init_from_json(json_engine_t *je, bool er_on_3D,
+ String *wkb)
+{
+ uint32 n_points= 0;
+ uint32 np_pos= wkb->length();
+ Gis_point p;
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_points
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ if (p.init_from_json(je, er_on_3D, wkb))
+ return TRUE;
+ n_points++;
+ }
+ if (n_points < 1)
+ {
+ je->s.error= Geometry::GEOJ_TOO_FEW_POINTS;
+ return TRUE;
+ }
+ wkb->write_at_position(np_pos, n_points);
+ return FALSE;
+}
+
+
bool Gis_line_string::get_data_as_wkt(String *txt, const char **end) const
{
uint32 n_points;
@@ -661,6 +1080,28 @@ bool Gis_line_string::get_data_as_wkt(String *txt, const char **end) const
}
+bool Gis_line_string::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_points;
+ const char *data= m_data;
+
+ if (no_data(data, 4))
+ return 1;
+ n_points= uint4korr(data);
+ data += 4;
+
+ if (n_points < 1 ||
+ not_enough_points(data, n_points) ||
+ txt->reserve((MAX_DIGITS_IN_DOUBLE*2 + 6) * n_points + 2))
+ return 1;
+
+ *end= append_json_points(txt, max_dec_digits, n_points, data, 0);
+
+ return 0;
+}
+
+
bool Gis_line_string::get_mbr(MBR *mbr, const char **end) const
{
return (*end=get_mbr_for_points(mbr, m_data, 0)) == 0;
@@ -854,7 +1295,7 @@ bool Gis_polygon::init_from_wkt(Gis_read_stream *trs, String *wkb)
if (wkb->reserve(4, 512))
return 1;
- wkb->length(wkb->length()+4); // Reserve space for points
+ wkb->length(wkb->length()+4); // Reserve space for n_rings
for (;;)
{
Gis_line_string ls;
@@ -964,6 +1405,55 @@ uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
}
+bool Gis_polygon::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
+{
+ uint32 n_linear_rings= 0;
+ uint32 lr_pos= wkb->length();
+ int closed;
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_rings
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ Gis_line_string ls;
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ uint32 ls_pos=wkb->length();
+ if (ls.init_from_json(je, er_on_3D, wkb))
+ return TRUE;
+ ls.set_data_ptr(wkb->ptr() + ls_pos, wkb->length() - ls_pos);
+ if (ls.is_closed(&closed) || !closed)
+ {
+ je->s.error= GEOJ_POLYGON_NOT_CLOSED;
+ return TRUE;
+ }
+ n_linear_rings++;
+ }
+
+ if (je->s.error)
+ return TRUE;
+
+ if (n_linear_rings == 0)
+ {
+ je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
+ return TRUE;
+ }
+ wkb->write_at_position(lr_pos, n_linear_rings);
+ return FALSE;
+}
+
+
bool Gis_polygon::get_data_as_wkt(String *txt, const char **end) const
{
uint32 n_linear_rings;
@@ -996,6 +1486,39 @@ bool Gis_polygon::get_data_as_wkt(String *txt, const char **end) const
}
+bool Gis_polygon::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_linear_rings;
+ const char *data= m_data;
+
+ if (no_data(data, 4) || txt->reserve(1, 512))
+ return 1;
+
+ n_linear_rings= uint4korr(data);
+ data+= 4;
+
+ txt->qs_append('[');
+ while (n_linear_rings--)
+ {
+ uint32 n_points;
+ if (no_data(data, 4))
+ return 1;
+ n_points= uint4korr(data);
+ data+= 4;
+ if (not_enough_points(data, n_points) ||
+ txt->reserve(4 + (MAX_DIGITS_IN_DOUBLE * 2 + 6) * n_points))
+ return 1;
+ data= append_json_points(txt, max_dec_digits, n_points, data, 0);
+ txt->qs_append(", ", 2);
+ }
+ txt->length(txt->length() - 2);// Remove ending ', '
+ txt->qs_append(']');
+ *end= data;
+ return 0;
+}
+
+
bool Gis_polygon::get_mbr(MBR *mbr, const char **end) const
{
uint32 n_linear_rings;
@@ -1397,6 +1920,53 @@ uint Gis_multi_point::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
}
+bool Gis_multi_point::init_from_json(json_engine_t *je, bool er_on_3D,
+ String *wkb)
+{
+ uint32 n_points= 0;
+ uint32 np_pos= wkb->length();
+ Gis_point p;
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_points
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ if (wkb->reserve(1 + 4, 512))
+ return TRUE;
+ wkb->q_append((char) wkb_ndr);
+ wkb->q_append((uint32) wkb_point);
+
+ if (p.init_from_json(je, er_on_3D, wkb))
+ return TRUE;
+ n_points++;
+ }
+
+ if (je->s.error)
+ return TRUE;
+ if (n_points == 0)
+ {
+ je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
+ return TRUE;
+ }
+
+ wkb->write_at_position(np_pos, n_points);
+ return FALSE;
+}
+
+
bool Gis_multi_point::get_data_as_wkt(String *txt, const char **end) const
{
uint32 n_points;
@@ -1414,6 +1984,24 @@ bool Gis_multi_point::get_data_as_wkt(String *txt, const char **end) const
}
+bool Gis_multi_point::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_points;
+ if (no_data(m_data, 4))
+ return 1;
+
+ n_points= uint4korr(m_data);
+ if (n_points > max_n_points ||
+ not_enough_points(m_data+4, n_points, WKB_HEADER_SIZE) ||
+ txt->reserve((MAX_DIGITS_IN_DOUBLE * 2 + 6) * n_points + 2))
+ return 1;
+ *end= append_json_points(txt, max_dec_digits, n_points, m_data+4,
+ WKB_HEADER_SIZE);
+ return 0;
+}
+
+
bool Gis_multi_point::get_mbr(MBR *mbr, const char **end) const
{
return (*end= get_mbr_for_points(mbr, m_data, WKB_HEADER_SIZE)) == 0;
@@ -1609,6 +2197,54 @@ uint Gis_multi_line_string::init_from_wkb(const char *wkb, uint len,
}
+bool Gis_multi_line_string::init_from_json(json_engine_t *je, bool er_on_3D,
+ String *wkb)
+{
+ uint32 n_line_strings= 0;
+ uint32 ls_pos= wkb->length();
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_rings
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ Gis_line_string ls;
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ if (wkb->reserve(1 + 4, 512))
+ return TRUE;
+ wkb->q_append((char) wkb_ndr);
+ wkb->q_append((uint32) wkb_linestring);
+
+ if (ls.init_from_json(je, er_on_3D, wkb))
+ return TRUE;
+
+ n_line_strings++;
+ }
+ if (je->s.error)
+ return TRUE;
+
+ if (n_line_strings == 0)
+ {
+ je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
+ return TRUE;
+ }
+
+ wkb->write_at_position(ls_pos, n_line_strings);
+ return FALSE;
+}
+
+
bool Gis_multi_line_string::get_data_as_wkt(String *txt,
const char **end) const
{
@@ -1641,6 +2277,38 @@ bool Gis_multi_line_string::get_data_as_wkt(String *txt,
}
+bool Gis_multi_line_string::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_line_strings;
+ const char *data= m_data;
+
+ if (no_data(data, 4) || txt->reserve(1, 512))
+ return 1;
+ n_line_strings= uint4korr(data);
+ data+= 4;
+
+ txt->qs_append('[');
+ while (n_line_strings--)
+ {
+ uint32 n_points;
+ if (no_data(data, (WKB_HEADER_SIZE + 4)))
+ return 1;
+ n_points= uint4korr(data + WKB_HEADER_SIZE);
+ data+= WKB_HEADER_SIZE + 4;
+ if (not_enough_points(data, n_points) ||
+ txt->reserve(2 + (MAX_DIGITS_IN_DOUBLE * 2 + 6) * n_points))
+ return 1;
+ data= append_json_points(txt, max_dec_digits, n_points, data, 0);
+ txt->qs_append(", ", 2);
+ }
+ txt->length(txt->length() - 2);
+ txt->qs_append(']');
+ *end= data;
+ return 0;
+}
+
+
bool Gis_multi_line_string::get_mbr(MBR *mbr, const char **end) const
{
uint32 n_line_strings;
@@ -1923,7 +2591,53 @@ uint Gis_multi_polygon::init_from_opresult(String *bin,
n_poly++;
}
bin->write_at_position(np_pos, n_poly);
- return opres - opres_orig;
+ return (uint)(opres - opres_orig);
+}
+
+
+bool Gis_multi_polygon::init_from_json(json_engine_t *je, bool er_on_3D,
+ String *wkb)
+{
+ uint32 n_polygons= 0;
+ int np_pos= wkb->length();
+ Gis_polygon p;
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_rings
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ if (wkb->reserve(1 + 4, 512))
+ return TRUE;
+ wkb->q_append((char) wkb_ndr);
+ wkb->q_append((uint32) wkb_polygon);
+
+ if (p.init_from_json(je, er_on_3D, wkb))
+ return TRUE;
+
+ n_polygons++;
+ }
+ if (je->s.error)
+ return TRUE;
+ if (n_polygons == 0)
+ {
+ je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
+ return TRUE;
+ }
+ wkb->write_at_position(np_pos, n_polygons);
+ return FALSE;
}
@@ -1971,6 +2685,51 @@ bool Gis_multi_polygon::get_data_as_wkt(String *txt, const char **end) const
}
+bool Gis_multi_polygon::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_polygons;
+ const char *data= m_data;
+
+ if (no_data(data, 4) || txt->reserve(1, 512))
+ return 1;
+ n_polygons= uint4korr(data);
+ data+= 4;
+
+ txt->q_append('[');
+ while (n_polygons--)
+ {
+ uint32 n_linear_rings;
+ if (no_data(data, 4 + WKB_HEADER_SIZE) ||
+ txt->reserve(1, 512))
+ return 1;
+ n_linear_rings= uint4korr(data+WKB_HEADER_SIZE);
+ data+= 4 + WKB_HEADER_SIZE;
+ txt->q_append('[');
+
+ while (n_linear_rings--)
+ {
+ if (no_data(data, 4))
+ return 1;
+ uint32 n_points= uint4korr(data);
+ data+= 4;
+ if (not_enough_points(data, n_points) ||
+ txt->reserve(2 + (MAX_DIGITS_IN_DOUBLE * 2 + 6) * n_points,
+ 512))
+ return 1;
+ data= append_json_points(txt, max_dec_digits, n_points, data, 0);
+ txt->qs_append(", ", 2);
+ }
+ txt->length(txt->length() - 2);
+ txt->qs_append("], ", 3);
+ }
+ txt->length(txt->length() - 2);
+ txt->q_append(']');
+ *end= data;
+ return 0;
+}
+
+
bool Gis_multi_polygon::get_mbr(MBR *mbr, const char **end) const
{
uint32 n_polygons;
@@ -2319,6 +3078,48 @@ uint Gis_geometry_collection::init_from_wkb(const char *wkb, uint len,
}
+bool Gis_geometry_collection::init_from_json(json_engine_t *je, bool er_on_3D,
+ String *wkb)
+{
+ uint32 n_objects= 0;
+ uint32 no_pos= wkb->length();
+ Geometry_buffer buffer;
+ Geometry *g;
+
+ if (json_read_value(je))
+ return TRUE;
+
+ if (je->value_type != JSON_VALUE_ARRAY)
+ {
+ je->s.error= GEOJ_INCORRECT_GEOJSON;
+ return TRUE;
+ }
+
+ if (wkb->reserve(4, 512))
+ return TRUE;
+ wkb->length(wkb->length()+4); // Reserve space for n_objects
+
+ while (json_scan_next(je) == 0 && je->state != JST_ARRAY_END)
+ {
+ json_engine_t sav_je= *je;
+
+ DBUG_ASSERT(je->state == JST_VALUE);
+
+ if (!(g= create_from_json(&buffer, je, er_on_3D, wkb)))
+ return TRUE;
+
+ *je= sav_je;
+ if (json_skip_array_item(je))
+ return TRUE;
+
+ n_objects++;
+ }
+
+ wkb->write_at_position(no_pos, n_objects);
+ return FALSE;
+}
+
+
bool Gis_geometry_collection::get_data_as_wkt(String *txt,
const char **end) const
{
@@ -2363,6 +3164,46 @@ exit:
}
+bool Gis_geometry_collection::get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const
+{
+ uint32 n_objects;
+ Geometry_buffer buffer;
+ Geometry *geom;
+ const char *data= m_data;
+
+ if (no_data(data, 4) || txt->reserve(1, 512))
+ return 1;
+ n_objects= uint4korr(data);
+ data+= 4;
+
+ txt->qs_append('[');
+ while (n_objects--)
+ {
+ uint32 wkb_type;
+
+ if (no_data(data, WKB_HEADER_SIZE))
+ return 1;
+ wkb_type= uint4korr(data + 1);
+ data+= WKB_HEADER_SIZE;
+
+ if (!(geom= create_by_typeid(&buffer, wkb_type)))
+ return 1;
+ geom->set_data_ptr(data, (uint) (m_data_end - data));
+ if (txt->append("{", 1) ||
+ geom->as_json(txt, max_dec_digits, &data) ||
+ txt->append(STRING_WITH_LEN("}, "), 512))
+ return 1;
+ }
+ txt->length(txt->length() - 2);
+ if (txt->append("]", 1))
+ return 1;
+
+ *end= data;
+ return 0;
+}
+
+
bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const
{
uint32 n_objects;
diff --git a/sql/spatial.h b/sql/spatial.h
index 6f50acac984..901544b6916 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -20,6 +20,7 @@
#include "sql_string.h" /* String, LEX_STRING */
#include <my_compiler.h>
+#include <json_lib.h>
#ifdef HAVE_SPATIAL
@@ -249,6 +250,15 @@ public:
wkb_xdr= 0, /* Big Endian */
wkb_ndr= 1 /* Little Endian */
};
+ enum geojson_errors
+ {
+ GEOJ_INCORRECT_GEOJSON= 1,
+ GEOJ_TOO_FEW_POINTS= 2,
+ GEOJ_POLYGON_NOT_CLOSED= 3,
+ GEOJ_DIMENSION_NOT_SUPPORTED= 4,
+ GEOJ_EMPTY_COORDINATES= 5,
+ };
+
/** Callback which creates Geometry objects on top of a given placement. */
typedef Geometry *(*create_geom_t)(char *);
@@ -257,9 +267,11 @@ public:
{
public:
LEX_STRING m_name;
+ LEX_STRING m_geojson_name;
int m_type_id;
create_geom_t m_create_func;
- Class_info(const char *name, int type_id, create_geom_t create_func);
+ Class_info(const char *name, const char *gejson_name,
+ int type_id, create_geom_t create_func);
};
virtual const Class_info *get_class_info() const=0;
@@ -271,8 +283,12 @@ public:
virtual uint init_from_opresult(String *bin,
const char *opres, uint res_len)
{ return init_from_wkb(opres + 4, UINT_MAX32, wkb_ndr, bin) + 4; }
+ virtual bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
+ { return true; }
virtual bool get_data_as_wkt(String *txt, const char **end) const=0;
+ virtual bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const=0;
virtual bool get_mbr(MBR *mbr, const char **end) const=0;
virtual bool dimension(uint32 *dim, const char **end) const=0;
virtual int get_x(double *x) const { return -1; }
@@ -302,9 +318,13 @@ public:
bool init_stream=1);
static Geometry *create_from_wkb(Geometry_buffer *buffer,
const char *wkb, uint32 len, String *res);
+ static Geometry *create_from_json(Geometry_buffer *buffer, json_engine_t *je,
+ bool er_on_3D, String *res);
static Geometry *create_from_opresult(Geometry_buffer *g_buf,
String *res, Gcalc_result_receiver &rr);
int as_wkt(String *wkt, const char **end);
+ int as_json(String *wkt, uint max_dec_digits, const char **end);
+ int bbox_as_json(String *wkt);
inline void set_data_ptr(const char *data, uint32 data_len)
{
@@ -352,7 +372,7 @@ protected:
Need to perform the calculation in logical units, since multiplication
can overflow the size data type.
- @arg data pointer to the begining of the points array
+ @arg data pointer to the beginning of the points array
@arg expected_points number of points expected
@arg extra_point_space extra space for each point element in the array
@return true if there are not enough points
@@ -379,7 +399,10 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int get_xy(double *x, double *y) const
@@ -431,7 +454,10 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int geom_length(double *len, const char **end) const;
int area(double *ar, const char **end) const;
@@ -462,7 +488,10 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int area(double *ar, const char **end) const;
int exterior_ring(String *result) const;
@@ -496,7 +525,10 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int num_geometries(uint32 *num) const;
int geometry_n(uint32 num, String *result) const;
@@ -522,7 +554,10 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int num_geometries(uint32 *num) const;
int geometry_n(uint32 num, String *result) const;
@@ -549,7 +584,10 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int num_geometries(uint32 *num) const;
int geometry_n(uint32 num, String *result) const;
@@ -578,7 +616,10 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
+ bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
+ bool get_data_as_json(String *txt, uint max_dec_digits,
+ const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
int area(double *ar, const char **end) const;
int geom_length(double *len, const char **end) const;
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index c2e5bfd8c11..871744c6b36 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -365,6 +365,10 @@ static bool show_table_and_column_privileges(THD *, const char *, const char *,
static int show_routine_grants(THD *, const char *, const char *, HASH *,
const char *, int, char *, int);
+class Grant_tables;
+class User_table;
+class Proxies_priv_table;
+
class ACL_PROXY_USER :public ACL_ACCESS
{
acl_host_and_ip host;
@@ -412,14 +416,7 @@ public:
with_grant_arg);
}
- void init(TABLE *table, MEM_ROOT *mem)
- {
- init (get_field(mem, table->field[MYSQL_PROXIES_PRIV_HOST]),
- get_field(mem, table->field[MYSQL_PROXIES_PRIV_USER]),
- get_field(mem, table->field[MYSQL_PROXIES_PRIV_PROXIED_HOST]),
- get_field(mem, table->field[MYSQL_PROXIES_PRIV_PROXIED_USER]),
- table->field[MYSQL_PROXIES_PRIV_WITH_GRANT]->val_int() != 0);
- }
+ void init(const Proxies_priv_table& proxies_priv_table, MEM_ROOT *mem);
bool get_with_grant() { return with_grant; }
const char *get_user() { return user; }
@@ -733,7 +730,6 @@ static DYNAMIC_ARRAY acl_wild_hosts;
static Hash_filo<acl_entry> *acl_cache;
static uint grant_version=0; /* Version of priv tables. incremented by acl_load */
static ulong get_access(TABLE *form,uint fieldnr, uint *next_field=0);
-static bool check_is_role(TABLE *form);
static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b);
static ulong get_sort(uint count,...);
static void init_check_host(void);
@@ -744,12 +740,12 @@ static ACL_USER *find_user_wild(const char *host, const char *user, const char *
static ACL_ROLE *find_acl_role(const char *user);
static ROLE_GRANT_PAIR *find_role_grant_pair(const LEX_STRING *u, const LEX_STRING *h, const LEX_STRING *r);
static ACL_USER_BASE *find_acl_user_base(const char *user, const char *host);
-static bool update_user_table(THD *, TABLE *, const char *, const char *, const
+static bool update_user_table(THD *, const User_table &, const char *, const char *, const
char *, uint);
-static bool acl_load(THD *thd, TABLE_LIST *tables);
-static bool grant_load(THD *thd, TABLE_LIST *tables);
+static bool acl_load(THD *thd, const Grant_tables& grant_tables);
static inline void get_grantor(THD *thd, char* grantor);
static bool add_role_user_mapping(const char *uname, const char *hname, const char *rname);
+static bool get_YN_as_bool(Field *field);
#define ROLE_CYCLE_FOUND 2
static int traverse_role_graph_up(ACL_ROLE *, void *,
@@ -776,7 +772,7 @@ enum enum_acl_tables
ROLES_MAPPING_TABLE,
TABLES_MAX // <== always the last
};
-// bits for open_grant_tables
+
static const int Table_user= 1 << USER_TABLE;
static const int Table_db= 1 << DB_TABLE;
static const int Table_tables_priv= 1 << TABLES_PRIV_TABLE;
@@ -786,30 +782,641 @@ static const int Table_procs_priv= 1 << PROCS_PRIV_TABLE;
static const int Table_proxies_priv= 1 << PROXIES_PRIV_TABLE;
static const int Table_roles_mapping= 1 << ROLES_MAPPING_TABLE;
-static int open_grant_tables(THD *, TABLE_LIST *, enum thr_lock_type, int);
+/**
+ Base class representing a generic grant table from the mysql database.
+
+ The potential tables that this class can represent are:
+ user, db, columns_priv, tables_priv, host, procs_priv, proxies_priv,
+ roles_mapping
+
+ Objects belonging to this parent class can only be constructed by the
+ Grants_table class. This ensures the correct initialization of the objects.
+*/
+class Grant_table_base
+{
+ public:
+ /* Number of fields for this Grant Table. */
+ uint num_fields() const { return tl.table->s->fields; }
+ /* Check if the table exists after an attempt to open it was made.
+ Some tables, such as the host table in MySQL 5.6.7+ are missing. */
+ bool table_exists() const { return tl.table; };
+ /* Initializes the READ_RECORD structure provided as a parameter
+ to read through the whole table, with all columns available. Cleaning up
+ is the caller's job. */
+ bool init_read_record(READ_RECORD* info, THD* thd) const
+ {
+ DBUG_ASSERT(tl.table);
+ bool result= ::init_read_record(info, thd, tl.table, NULL, NULL, 1,
+ true, false);
+ if (!result)
+ tl.table->use_all_columns();
+ return result;
+ }
+
+ /* Return the number of privilege columns for this table. */
+ uint num_privileges() const { return num_privilege_cols; }
+ /* Return a privilege column by index. */
+ Field* priv_field(uint privilege_idx) const
+ {
+ DBUG_ASSERT(privilege_idx < num_privileges());
+ return tl.table->field[start_privilege_column + privilege_idx];
+ }
+
+ /* Fetch the privileges from the table as a set of bits. The first column
+ is represented by the first bit in the result, the second column by the
+ second bit, etc. */
+ ulong get_access() const
+ {
+ return get_access(start_privilege_column,
+ start_privilege_column + num_privileges() - 1);
+ }
+
+ /* Return the underlying TABLE handle. */
+ TABLE* table() const
+ {
+ return tl.table;
+ }
+
+ /** Check if the table was opened, issue an error otherwise. */
+ int no_such_table() const
+ {
+ if (table_exists())
+ return 0;
+
+ my_error(ER_NO_SUCH_TABLE, MYF(0), tl.db, tl.alias);
+ return 1;
+ }
+
+
+ protected:
+ friend class Grant_tables;
+
+ Grant_table_base() : start_privilege_column(0), num_privilege_cols(0)
+ {
+ tl.reset();
+ };
+
+ /* Initialization sequence common for all grant tables. This should be called
+ after all table-specific initialization is performed. */
+ void init(enum thr_lock_type lock_type, bool is_optional)
+ {
+ tl.open_type= OT_BASE_ONLY;
+ if (lock_type >= TL_WRITE_ALLOW_WRITE)
+ tl.updating= 1;
+ if (is_optional)
+ tl.open_strategy= TABLE_LIST::OPEN_IF_EXISTS;
+ }
+
+ /*
+ Get all access bits from table between start_field and end_field indices.
-const LEX_STRING acl_table_names[]= // matches enum_acl_tables
+ IMPLEMENTATION
+ The record should be already read in table->record[0]. All privileges
+ are specified as an ENUM(Y,N).
+
+ SYNOPSIS
+ get_access()
+ start_field_idx The field index at which the first privilege
+ specification begins.
+ end_field_idx The field index at which the last privilege
+ specification is located.
+
+ RETURN VALUE
+ privilege mask
+ */
+ ulong get_access(uint start_field_idx, uint end_field_idx) const
+ {
+ ulong access_bits= 0, bit= 1;
+ for (uint i = start_field_idx; i <= end_field_idx; i++, bit<<=1)
+ {
+ if (get_YN_as_bool(tl.table->field[i]))
+ access_bits|= bit;
+ }
+ return access_bits;
+ }
+
+ /* Compute how many privilege columns this table has. This method
+ can only be called after the table has been opened.
+
+ IMPLEMENTATION
+ A privilege column is of type enum('Y', 'N'). Privilege columns are
+ expected to be one after another.
+ */
+ void compute_num_privilege_cols()
+ {
+ if (!table_exists()) // Table does not exist or not opened.
+ return;
+
+ num_privilege_cols= 0;
+ for (uint i= 0; i < num_fields(); i++)
+ {
+ Field *field= tl.table->field[i];
+ if (num_privilege_cols > 0 && field->real_type() != MYSQL_TYPE_ENUM)
+ return;
+ if (field->real_type() == MYSQL_TYPE_ENUM &&
+ static_cast<Field_enum*>(field)->typelib->count == 2)
+ {
+ num_privilege_cols++;
+ if (num_privilege_cols == 1)
+ start_privilege_column= i;
+ }
+ }
+ }
+
+ /* The index at which privilege columns start. */
+ uint start_privilege_column;
+ /* The number of privilege columns in the table. */
+ uint num_privilege_cols;
+
+ TABLE_LIST tl;
+};
+
+class User_table: public Grant_table_base
+{
+ public:
+ /* Field getters return NULL if the column is not present in the table.
+ This is consistent only if the table is in a supported version. We do
+ not guard against corrupt tables. (yet) */
+ Field* host() const
+ { return get_field(0); }
+ Field* user() const
+ { return get_field(1); }
+ Field* password() const
+ { return have_password() ? NULL : tl.table->field[2]; }
+ /* Columns after privilege columns. */
+ Field* ssl_type() const
+ { return get_field(start_privilege_column + num_privileges()); }
+ Field* ssl_cipher() const
+ { return get_field(start_privilege_column + num_privileges() + 1); }
+ Field* x509_issuer() const
+ { return get_field(start_privilege_column + num_privileges() + 2); }
+ Field* x509_subject() const
+ { return get_field(start_privilege_column + num_privileges() + 3); }
+ Field* max_questions() const
+ { return get_field(start_privilege_column + num_privileges() + 4); }
+ Field* max_updates() const
+ { return get_field(start_privilege_column + num_privileges() + 5); }
+ Field* max_connections() const
+ { return get_field(start_privilege_column + num_privileges() + 6); }
+ Field* max_user_connections() const
+ { return get_field(start_privilege_column + num_privileges() + 7); }
+ Field* plugin() const
+ { return get_field(start_privilege_column + num_privileges() + 8); }
+ Field* authentication_string() const
+ { return get_field(start_privilege_column + num_privileges() + 9); }
+ Field* password_expired() const
+ { return get_field(start_privilege_column + num_privileges() + 10); }
+ Field* is_role() const
+ { return get_field(start_privilege_column + num_privileges() + 11); }
+ Field* default_role() const
+ { return get_field(start_privilege_column + num_privileges() + 12); }
+ Field* max_statement_time() const
+ { return get_field(start_privilege_column + num_privileges() + 13); }
+
+ /*
+ Check if a user entry in the user table is marked as being a role entry
+
+ IMPLEMENTATION
+ Access the coresponding column and check the coresponding ENUM of the form
+ ENUM('N', 'Y')
+
+ SYNOPSIS
+ check_is_role()
+ form an open table to read the entry from.
+ The record should be already read in table->record[0]
+
+ RETURN VALUE
+ TRUE if the user is marked as a role
+ FALSE otherwise
+ */
+ bool check_is_role() const
+ {
+ /* Table version does not support roles */
+ if (!is_role())
+ return false;
+
+ return get_YN_as_bool(is_role());
+ }
+
+
+ private:
+ friend class Grant_tables;
+
+ /* Only Grant_tables can instantiate this class. */
+ User_table() {};
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("user"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, false);
+ }
+
+ /* The user table is a bit different compared to the other Grant tables.
+ Usually, we only add columns to the grant tables when adding functionality.
+ This makes it easy to test which version of the table we are using, by
+ just looking at the number of fields present in the table.
+
+ In MySQL 5.7.6 the Password column was removed. We need to guard for that.
+ The field-fetching methods for the User table return NULL if the field
+ doesn't exist. This simplifies checking of table "version", as we don't
+ have to make use of num_fields() any more.
+ */
+ inline Field* get_field(uint field_num) const
+ {
+ if (field_num >= num_fields())
+ return NULL;
+
+ return tl.table->field[field_num];
+ }
+
+ /* Normally password column is the third column in the table. If privileges
+ start on the third column instead, we are missing the password column.
+ This means we are using a MySQL 5.7.6+ data directory. */
+ bool have_password() const { return start_privilege_column == 2; }
+
+};
+
+class Db_table: public Grant_table_base
{
- { C_STRING_WITH_LEN("user") },
- { C_STRING_WITH_LEN("db") },
- { C_STRING_WITH_LEN("tables_priv") },
- { C_STRING_WITH_LEN("columns_priv") },
- { C_STRING_WITH_LEN("host") },
- { C_STRING_WITH_LEN("procs_priv") },
- { C_STRING_WITH_LEN("proxies_priv") },
- { C_STRING_WITH_LEN("roles_mapping") }
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* db() const { return tl.table->field[1]; }
+ Field* user() const { return tl.table->field[2]; }
+
+ private:
+ friend class Grant_tables;
+
+ Db_table() {};
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("db"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, false);
+ }
};
-/** check if the table was opened, issue an error otherwise */
-static int no_such_table(TABLE_LIST *tl)
+class Tables_priv_table: public Grant_table_base
{
- if (tl->table)
- return 0;
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* db() const { return tl.table->field[1]; }
+ Field* user() const { return tl.table->field[2]; }
+ Field* table_name() const { return tl.table->field[3]; }
+ Field* grantor() const { return tl.table->field[4]; }
+ Field* timestamp() const { return tl.table->field[5]; }
+ Field* table_priv() const { return tl.table->field[6]; }
+ Field* column_priv() const { return tl.table->field[7]; }
- my_error(ER_NO_SUCH_TABLE, MYF(0), tl->db, tl->alias);
- return 1;
+ private:
+ friend class Grant_tables;
+
+ Tables_priv_table() {};
+
+ void init(enum thr_lock_type lock_type, Grant_table_base *next_table= NULL)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("tables_priv"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, false);
+ }
+};
+
+class Columns_priv_table: public Grant_table_base
+{
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* db() const { return tl.table->field[1]; }
+ Field* user() const { return tl.table->field[2]; }
+ Field* table_name() const { return tl.table->field[3]; }
+ Field* column_name() const { return tl.table->field[4]; }
+ Field* timestamp() const { return tl.table->field[5]; }
+ Field* column_priv() const { return tl.table->field[6]; }
+
+ private:
+ friend class Grant_tables;
+
+ Columns_priv_table() {};
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("columns_priv"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, false);
+ }
+};
+
+class Host_table: public Grant_table_base
+{
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* db() const { return tl.table->field[1]; }
+
+ private:
+ friend class Grant_tables;
+
+ Host_table() {}
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("host"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, true);
+ }
+};
+
+class Procs_priv_table: public Grant_table_base
+{
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* db() const { return tl.table->field[1]; }
+ Field* user() const { return tl.table->field[2]; }
+ Field* routine_name() const { return tl.table->field[3]; }
+ Field* routine_type() const { return tl.table->field[4]; }
+ Field* grantor() const { return tl.table->field[5]; }
+ Field* proc_priv() const { return tl.table->field[6]; }
+ Field* timestamp() const { return tl.table->field[7]; }
+
+ private:
+ friend class Grant_tables;
+
+ Procs_priv_table() {}
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("procs_priv"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, true);
+ }
+};
+
+class Proxies_priv_table: public Grant_table_base
+{
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* user() const { return tl.table->field[1]; }
+ Field* proxied_host() const { return tl.table->field[2]; }
+ Field* proxied_user() const { return tl.table->field[3]; }
+ Field* with_grant() const { return tl.table->field[4]; }
+ Field* grantor() const { return tl.table->field[5]; }
+ Field* timestamp() const { return tl.table->field[6]; }
+
+ private:
+ friend class Grant_tables;
+
+ Proxies_priv_table() {}
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("proxies_priv"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, true);
+ }
+};
+
+class Roles_mapping_table: public Grant_table_base
+{
+ public:
+ Field* host() const { return tl.table->field[0]; }
+ Field* user() const { return tl.table->field[1]; }
+ Field* role() const { return tl.table->field[2]; }
+ Field* admin_option() const { return tl.table->field[3]; }
+
+ private:
+ friend class Grant_tables;
+
+ Roles_mapping_table() {}
+
+ void init(enum thr_lock_type lock_type)
+ {
+ /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
+ tl.init_one_table(C_STRING_WITH_LEN("mysql"),
+ C_STRING_WITH_LEN("roles_mapping"),
+ NULL, lock_type);
+ Grant_table_base::init(lock_type, true);
+ }
+};
+
+/**
+ Class that represents a collection of grant tables.
+*/
+class Grant_tables
+{
+ public:
+ /* When constructing the Grant_tables object, we initialize only
+ the tables which are going to be opened.
+ @param which_tables Bitmap of which tables to open.
+ @param lock_type Lock type to use when opening tables.
+ */
+ Grant_tables(int which_tables, enum thr_lock_type lock_type)
+ {
+ DBUG_ENTER("Grant_tables::Grant_tables");
+ DBUG_PRINT("info", ("which_tables: %x, lock_type: %u",
+ which_tables, lock_type));
+ DBUG_ASSERT(which_tables); /* At least one table must be opened. */
+ Grant_table_base* prev= NULL;
+ /* We start from the last table, Table_roles_mapping, such that
+ the first one in the linked list is Table_user. */
+ if (which_tables & Table_roles_mapping)
+ {
+ m_roles_mapping_table.init(lock_type);
+ prev= &m_roles_mapping_table;
+ }
+ if (which_tables & Table_proxies_priv)
+ {
+ m_proxies_priv_table.init(lock_type);
+ link_tables(&m_proxies_priv_table, prev);
+ prev= &m_proxies_priv_table;
+ }
+ if (which_tables & Table_procs_priv)
+ {
+ m_procs_priv_table.init(lock_type);
+ link_tables(&m_procs_priv_table, prev);
+ prev= &m_procs_priv_table;
+ }
+ if (which_tables & Table_host)
+ {
+ m_host_table.init(lock_type);
+ link_tables(&m_host_table, prev);
+ prev= &m_host_table;
+ }
+ if (which_tables & Table_columns_priv)
+ {
+ m_columns_priv_table.init(lock_type);
+ link_tables(&m_columns_priv_table, prev);
+ prev= &m_columns_priv_table;
+ }
+ if (which_tables & Table_tables_priv)
+ {
+ m_tables_priv_table.init(lock_type);
+ link_tables(&m_tables_priv_table, prev);
+ prev= &m_tables_priv_table;
+ }
+ if (which_tables & Table_db)
+ {
+ m_db_table.init(lock_type);
+ link_tables(&m_db_table, prev);
+ prev= &m_db_table;
+ }
+ if (which_tables & Table_user)
+ {
+ m_user_table.init(lock_type);
+ link_tables(&m_user_table, prev);
+ prev= &m_user_table;
+ }
+
+ first_table_in_list= prev;
+ DBUG_VOID_RETURN;
+ }
+
+ /* Before any operation is possible on grant tables, they must be opened.
+ This opens the tables according to the lock type specified during
+ construction.
+
+ @retval 1 replication filters matched. Abort the operation,
+ but return OK (!)
+ @retval 0 tables were opened successfully
+ @retval -1 error, tables could not be opened
+ */
+ int open_and_lock(THD *thd)
+ {
+ DBUG_ENTER("Grant_tables::open_and_lock");
+ DBUG_ASSERT(first_table_in_list);
+#ifdef HAVE_REPLICATION
+ if (first_table_in_list->tl.lock_type >= TL_WRITE_ALLOW_WRITE &&
+ thd->slave_thread && !thd->spcont)
+ {
+ /*
+ GRANT and REVOKE are applied the slave in/exclusion rules as they are
+ some kind of updates to the mysql.% tables.
+ */
+ Rpl_filter *rpl_filter= thd->system_thread_info.rpl_sql_info->rpl_filter;
+ if (rpl_filter->is_on() &&
+ !rpl_filter->tables_ok(0, &first_table_in_list->tl))
+ DBUG_RETURN(1);
+ }
+#endif
+ if (open_and_lock_tables(thd, &first_table_in_list->tl, FALSE,
+ MYSQL_LOCK_IGNORE_TIMEOUT))
+ DBUG_RETURN(-1);
+
+ /*
+ We can read privilege tables even when !initialized.
+ This can be acl_load() - server startup or FLUSH PRIVILEGES
+ */
+ if (first_table_in_list->tl.lock_type >= TL_WRITE_ALLOW_WRITE &&
+ !initialized)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
+ DBUG_RETURN(-1);
+ }
+
+ /* The privilge columns vary based on MariaDB version. Figure out
+ how many we have after we've opened the table. */
+ m_user_table.compute_num_privilege_cols();
+ m_db_table.compute_num_privilege_cols();
+ m_tables_priv_table.compute_num_privilege_cols();
+ m_columns_priv_table.compute_num_privilege_cols();
+ m_host_table.compute_num_privilege_cols();
+ m_procs_priv_table.compute_num_privilege_cols();
+ m_proxies_priv_table.compute_num_privilege_cols();
+ m_roles_mapping_table.compute_num_privilege_cols();
+ DBUG_RETURN(0);
+ }
+
+ inline const User_table& user_table() const
+ {
+ return m_user_table;
+ }
+
+ inline const Db_table& db_table() const
+ {
+ return m_db_table;
+ }
+
+
+ inline const Tables_priv_table& tables_priv_table() const
+ {
+ return m_tables_priv_table;
+ }
+
+ inline const Columns_priv_table& columns_priv_table() const
+ {
+ return m_columns_priv_table;
+ }
+
+ inline const Host_table& host_table() const
+ {
+ return m_host_table;
+ }
+
+ inline const Procs_priv_table& procs_priv_table() const
+ {
+ return m_procs_priv_table;
+ }
+
+ inline const Proxies_priv_table& proxies_priv_table() const
+ {
+ return m_proxies_priv_table;
+ }
+
+ inline const Roles_mapping_table& roles_mapping_table() const
+ {
+ return m_roles_mapping_table;
+ }
+
+ private:
+ User_table m_user_table;
+ Db_table m_db_table;
+ Tables_priv_table m_tables_priv_table;
+ Columns_priv_table m_columns_priv_table;
+ Host_table m_host_table;
+ Procs_priv_table m_procs_priv_table;
+ Proxies_priv_table m_proxies_priv_table;
+ Roles_mapping_table m_roles_mapping_table;
+
+ /* The grant tables are set-up in a linked list. We keep the head of it. */
+ Grant_table_base *first_table_in_list;
+ /**
+ Chain two grant tables' TABLE_LIST members.
+ */
+ static void link_tables(Grant_table_base *from, Grant_table_base *to)
+ {
+ DBUG_ASSERT(from);
+ if (to)
+ from->tl.next_local= from->tl.next_global= &to->tl;
+ else
+ from->tl.next_local= from->tl.next_global= NULL;
+ }
+};
+
+
+void ACL_PROXY_USER::init(const Proxies_priv_table& proxies_priv_table,
+ MEM_ROOT *mem)
+{
+ init(get_field(mem, proxies_priv_table.host()),
+ get_field(mem, proxies_priv_table.user()),
+ get_field(mem, proxies_priv_table.proxied_host()),
+ get_field(mem, proxies_priv_table.proxied_user()),
+ proxies_priv_table.with_grant()->val_int() != 0);
}
+
+
/*
Enumeration of various ACL's and Hashes used in handle_grant_struct()
*/
@@ -1045,7 +1652,7 @@ static bool fix_lex_user(THD *thd, LEX_USER *user)
if (user->pwhash.length && user->pwhash.length != check_length)
{
- my_error(ER_PASSWD_LENGTH, MYF(0), check_length);
+ my_error(ER_PASSWD_LENGTH, MYF(0), (int) check_length);
return true;
}
@@ -1140,7 +1747,7 @@ bool acl_init(bool dont_read_acl_tables)
/*
To be able to run this from boot, we allocate a temporary THD
*/
- if (!(thd=new THD))
+ if (!(thd=new THD(0)))
DBUG_RETURN(1); /* purecov: inspected */
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -1194,32 +1801,30 @@ static bool set_user_plugin (ACL_USER *user, int password_len)
TRUE Error
*/
-static bool acl_load(THD *thd, TABLE_LIST *tables)
+static bool acl_load(THD *thd, const Grant_tables& tables)
{
- TABLE *table;
READ_RECORD read_record_info;
- bool return_val= TRUE;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
char tmp_name[SAFE_NAME_LEN+1];
int password_length;
- ulonglong old_sql_mode= thd->variables.sql_mode;
+ Sql_mode_save old_mode_save(thd);
DBUG_ENTER("acl_load");
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
grant_version++; /* Privileges updated */
+ const Host_table& host_table= tables.host_table();
init_sql_alloc(&acl_memroot, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
- if ((table= tables[HOST_TABLE].table)) // "host" table may not exist (e.g. in MySQL 5.6.7+)
+ if (host_table.table_exists()) // "host" table may not exist (e.g. in MySQL 5.6.7+)
{
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 1, FALSE))
- goto end;
- table->use_all_columns();
+ if (host_table.init_read_record(&read_record_info, thd))
+ DBUG_RETURN(true);
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_HOST host;
- update_hostname(&host.host,get_field(&acl_memroot, table->field[0]));
- host.db= get_field(&acl_memroot, table->field[1]);
+ update_hostname(&host.host, get_field(&acl_memroot, host_table.host()));
+ host.db= get_field(&acl_memroot, host_table.db());
if (lower_case_table_names && host.db)
{
/*
@@ -1240,9 +1845,9 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
"possible to remove this privilege using REVOKE.",
host.host.hostname, host.db);
}
- host.access= get_access(table,2);
+ host.access= host_table.get_access();
host.access= fix_rights_for_db(host.access);
- host.sort= get_sort(2,host.host.hostname,host.db);
+ host.sort= get_sort(2, host.host.hostname, host.db);
if (check_no_resolve && hostname_requires_resolving(host.host.hostname))
{
sql_print_warning("'host' entry '%s|%s' "
@@ -1252,7 +1857,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
continue;
}
#ifndef TO_BE_REMOVED
- if (table->s->fields == 8)
+ if (host_table.num_fields() == 8)
{ // Without grant
if (host.access & CREATE_ACL)
host.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL;
@@ -1260,60 +1865,62 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
#endif
(void) push_dynamic(&acl_hosts,(uchar*) &host);
}
- my_qsort((uchar*) dynamic_element(&acl_hosts,0,ACL_HOST*),acl_hosts.elements,
- sizeof(ACL_HOST),(qsort_cmp) acl_compare);
+ my_qsort((uchar*) dynamic_element(&acl_hosts, 0, ACL_HOST*),
+ acl_hosts.elements, sizeof(ACL_HOST),(qsort_cmp) acl_compare);
end_read_record(&read_record_info);
}
freeze_size(&acl_hosts);
- if (init_read_record(&read_record_info, thd, table=tables[USER_TABLE].table,
- NULL, 1, 1, FALSE))
- goto end;
- table->use_all_columns();
+ const User_table& user_table= tables.user_table();
+ if (user_table.init_read_record(&read_record_info, thd))
+ DBUG_RETURN(true);
- username_char_length= MY_MIN(table->field[1]->char_length(),
+ username_char_length= MY_MIN(user_table.user()->char_length(),
USERNAME_CHAR_LENGTH);
- password_length= table->field[2]->field_length /
- table->field[2]->charset()->mbmaxlen;
- if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ if (user_table.password()) // Password column might be missing. (MySQL 5.7.6+)
{
- sql_print_error("Fatal error: mysql.user table is damaged or in "
- "unsupported 3.20 format.");
- goto end;
- }
+ password_length= user_table.password()->field_length /
+ user_table.password()->charset()->mbmaxlen;
+ if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ {
+ sql_print_error("Fatal error: mysql.user table is damaged or in "
+ "unsupported 3.20 format.");
+ DBUG_RETURN(TRUE);
+ }
- DBUG_PRINT("info",("user table fields: %d, password length: %d",
- table->s->fields, password_length));
+ DBUG_PRINT("info",("user table fields: %d, password length: %d",
+ user_table.num_fields(), password_length));
- mysql_mutex_lock(&LOCK_global_system_variables);
- if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH)
- {
- if (opt_secure_auth)
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH)
{
- mysql_mutex_unlock(&LOCK_global_system_variables);
- sql_print_error("Fatal error: mysql.user table is in old format, "
- "but server started with --secure-auth option.");
- goto end;
+ if (opt_secure_auth)
+ {
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ sql_print_error("Fatal error: mysql.user table is in old format, "
+ "but server started with --secure-auth option.");
+ DBUG_RETURN(TRUE);
+ }
+ mysql_user_table_is_in_short_password_format= true;
+ if (global_system_variables.old_passwords)
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ else
+ {
+ extern sys_var *Sys_old_passwords_ptr;
+ Sys_old_passwords_ptr->value_origin= sys_var::AUTO;
+ global_system_variables.old_passwords= 1;
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ sql_print_warning("mysql.user table is not updated to new password format; "
+ "Disabling new password usage until "
+ "mysql_fix_privilege_tables is run");
+ }
+ thd->variables.old_passwords= 1;
}
- mysql_user_table_is_in_short_password_format= true;
- if (global_system_variables.old_passwords)
- mysql_mutex_unlock(&LOCK_global_system_variables);
else
{
- extern sys_var *Sys_old_passwords_ptr;
- Sys_old_passwords_ptr->value_origin= sys_var::AUTO;
- global_system_variables.old_passwords= 1;
+ mysql_user_table_is_in_short_password_format= false;
mysql_mutex_unlock(&LOCK_global_system_variables);
- sql_print_warning("mysql.user table is not updated to new password format; "
- "Disabling new password usage until "
- "mysql_fix_privilege_tables is run");
}
- thd->variables.old_passwords= 1;
- }
- else
- {
- mysql_user_table_is_in_short_password_format= false;
- mysql_mutex_unlock(&LOCK_global_system_variables);
}
allow_all_hosts=0;
@@ -1322,8 +1929,8 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
ACL_USER user;
bool is_role= FALSE;
bzero(&user, sizeof(user));
- update_hostname(&user.host, get_field(&acl_memroot, table->field[0]));
- char *username= get_field(&acl_memroot, table->field[1]);
+ update_hostname(&user.host, get_field(&acl_memroot, user_table.host()));
+ char *username= get_field(&acl_memroot, user_table.user());
user.user.str= username;
user.user.length= safe_strlen(username);
@@ -1331,7 +1938,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
If the user entry is a role, skip password and hostname checks
A user can not log in with a role so some checks are not necessary
*/
- is_role= check_is_role(table);
+ is_role= user_table.check_is_role();
if (is_role && is_invalid_role_name(username))
{
@@ -1349,7 +1956,9 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
continue;
}
- char *password= get_field(&acl_memroot, table->field[2]);
+ char *password= const_cast<char*>("");
+ if (user_table.password())
+ password= get_field(&acl_memroot, user_table.password());
uint password_len= safe_strlen(password);
user.auth_string.str= safe_str(password);
user.auth_string.length= password_len;
@@ -1357,30 +1966,29 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
if (!is_role && set_user_plugin(&user, password_len))
continue;
-
+
{
- uint next_field;
- user.access= get_access(table,3,&next_field) & GLOBAL_ACLS;
+ user.access= user_table.get_access() & GLOBAL_ACLS;
/*
if it is pre 5.0.1 privilege table then map CREATE privilege on
CREATE VIEW & SHOW VIEW privileges
*/
- if (table->s->fields <= 31 && (user.access & CREATE_ACL))
+ if (user_table.num_fields() <= 31 && (user.access & CREATE_ACL))
user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL);
/*
if it is pre 5.0.2 privilege table then map CREATE/ALTER privilege on
CREATE PROCEDURE & ALTER PROCEDURE privileges
*/
- if (table->s->fields <= 33 && (user.access & CREATE_ACL))
+ if (user_table.num_fields() <= 33 && (user.access & CREATE_ACL))
user.access|= CREATE_PROC_ACL;
- if (table->s->fields <= 33 && (user.access & ALTER_ACL))
+ if (user_table.num_fields() <= 33 && (user.access & ALTER_ACL))
user.access|= ALTER_PROC_ACL;
/*
pre 5.0.3 did not have CREATE_USER_ACL
*/
- if (table->s->fields <= 36 && (user.access & GRANT_ACL))
+ if (user_table.num_fields() <= 36 && (user.access & GRANT_ACL))
user.access|= CREATE_USER_ACL;
@@ -1388,13 +1996,13 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
if it is pre 5.1.6 privilege table then map CREATE privilege on
CREATE|ALTER|DROP|EXECUTE EVENT
*/
- if (table->s->fields <= 37 && (user.access & SUPER_ACL))
+ if (user_table.num_fields() <= 37 && (user.access & SUPER_ACL))
user.access|= EVENT_ACL;
/*
if it is pre 5.1.6 privilege then map TRIGGER privilege on CREATE.
*/
- if (table->s->fields <= 38 && (user.access & SUPER_ACL))
+ if (user_table.num_fields() <= 38 && (user.access & SUPER_ACL))
user.access|= TRIGGER_ACL;
user.sort= get_sort(2, user.host.hostname, user.user.str);
@@ -1403,9 +2011,9 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
user.user_resource.max_statement_time= 0.0;
/* Starting from 4.0.2 we have more fields */
- if (table->s->fields >= 31)
+ if (user_table.ssl_type())
{
- char *ssl_type=get_field(thd->mem_root, table->field[next_field++]);
+ char *ssl_type=get_field(thd->mem_root, user_table.ssl_type());
if (!ssl_type)
user.ssl_type=SSL_TYPE_NONE;
else if (!strcmp(ssl_type, "ANY"))
@@ -1415,40 +2023,43 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
else /* !strcmp(ssl_type, "SPECIFIED") */
user.ssl_type=SSL_TYPE_SPECIFIED;
- user.ssl_cipher= get_field(&acl_memroot, table->field[next_field++]);
- user.x509_issuer= get_field(&acl_memroot, table->field[next_field++]);
- user.x509_subject= get_field(&acl_memroot, table->field[next_field++]);
+ user.ssl_cipher= get_field(&acl_memroot, user_table.ssl_cipher());
+ user.x509_issuer= get_field(&acl_memroot, user_table.x509_issuer());
+ user.x509_subject= get_field(&acl_memroot, user_table.x509_subject());
- char *ptr = get_field(thd->mem_root, table->field[next_field++]);
+ char *ptr = get_field(thd->mem_root, user_table.max_questions());
user.user_resource.questions=ptr ? atoi(ptr) : 0;
- ptr = get_field(thd->mem_root, table->field[next_field++]);
+ ptr = get_field(thd->mem_root, user_table.max_updates());
user.user_resource.updates=ptr ? atoi(ptr) : 0;
- ptr = get_field(thd->mem_root, table->field[next_field++]);
+ ptr = get_field(thd->mem_root, user_table.max_connections());
user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0;
if (user.user_resource.questions || user.user_resource.updates ||
user.user_resource.conn_per_hour)
mqh_used=1;
- if (table->s->fields >= 36)
+ if (user_table.max_user_connections())
{
/* Starting from 5.0.3 we have max_user_connections field */
- ptr= get_field(thd->mem_root, table->field[next_field++]);
+ ptr= get_field(thd->mem_root, user_table.max_user_connections());
user.user_resource.user_conn= ptr ? atoi(ptr) : 0;
}
- if (!is_role && table->s->fields >= 41)
+ if (!is_role && user_table.plugin())
{
/* We may have plugin & auth_String fields */
- char *tmpstr= get_field(&acl_memroot, table->field[next_field++]);
+ char *tmpstr= get_field(&acl_memroot, user_table.plugin());
if (tmpstr)
{
user.plugin.str= tmpstr;
user.plugin.length= strlen(user.plugin.str);
user.auth_string.str=
- safe_str(get_field(&acl_memroot, table->field[next_field++]));
+ safe_str(get_field(&acl_memroot,
+ user_table.authentication_string()));
user.auth_string.length= strlen(user.auth_string.str);
- if (user.auth_string.length && password_len)
+ if (user.auth_string.length && password_len &&
+ (user.auth_string.length != password_len ||
+ memcmp(user.auth_string.str, password, password_len)))
{
sql_print_warning("'user' entry '%s@%s' has both a password "
"and an authentication plugin specified. The "
@@ -1466,11 +2077,11 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
}
}
- if (table->s->fields > MAX_STATEMENT_TIME_COLUMN_IDX)
+ if (user_table.max_statement_time())
{
/* Starting from 10.1.1 we can have max_statement_time */
ptr= get_field(thd->mem_root,
- table->field[MAX_STATEMENT_TIME_COLUMN_IDX]);
+ user_table.max_statement_time());
user.user_resource.max_statement_time= ptr ? atof(ptr) : 0.0;
}
}
@@ -1478,7 +2089,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
{
user.ssl_type=SSL_TYPE_NONE;
#ifndef TO_BE_REMOVED
- if (table->s->fields <= 13)
+ if (user_table.num_fields() <= 13)
{ // Without grant
if (user.access & CREATE_ACL)
user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL;
@@ -1496,10 +2107,10 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
8, 8, MYF(0));
/* check default role, if any */
- if (!is_role && table->s->fields > DEFAULT_ROLE_COLUMN_IDX)
+ if (!is_role && user_table.default_role())
{
user.default_rolename.str=
- get_field(&acl_memroot, table->field[DEFAULT_ROLE_COLUMN_IDX]);
+ get_field(&acl_memroot, user_table.default_role());
user.default_rolename.length= safe_strlen(user.default_rolename.str);
}
@@ -1529,19 +2140,18 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
end_read_record(&read_record_info);
freeze_size(&acl_users);
- if (init_read_record(&read_record_info, thd, table=tables[DB_TABLE].table,
- NULL, 1, 1, FALSE))
- goto end;
- table->use_all_columns();
+ const Db_table& db_table= tables.db_table();
+ if (db_table.init_read_record(&read_record_info, thd))
+ DBUG_RETURN(TRUE);
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_DB db;
- db.user=get_field(&acl_memroot, table->field[MYSQL_DB_FIELD_USER]);
- const char *hostname= get_field(&acl_memroot, table->field[MYSQL_DB_FIELD_HOST]);
+ db.user=get_field(&acl_memroot, db_table.user());
+ const char *hostname= get_field(&acl_memroot, db_table.host());
if (!hostname && find_acl_role(db.user))
hostname= "";
update_hostname(&db.host, hostname);
- db.db=get_field(&acl_memroot, table->field[MYSQL_DB_FIELD_DB]);
+ db.db=get_field(&acl_memroot, db_table.db());
if (!db.db)
{
sql_print_warning("Found an entry in the 'db' table with empty database name; Skipped");
@@ -1550,11 +2160,11 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
if (check_no_resolve && hostname_requires_resolving(db.host.hostname))
{
sql_print_warning("'db' entry '%s %s@%s' "
- "ignored in --skip-name-resolve mode.",
+ "ignored in --skip-name-resolve mode.",
db.db, safe_str(db.user), safe_str(db.host.hostname));
continue;
}
- db.access=get_access(table,3);
+ db.access= db_table.get_access();
db.access=fix_rights_for_db(db.access);
db.initial_access= db.access;
if (lower_case_table_names)
@@ -1581,7 +2191,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
}
db.sort=get_sort(3,db.host.hostname,db.db,db.user);
#ifndef TO_BE_REMOVED
- if (table->s->fields <= 9)
+ if (db_table.num_fields() <= 9)
{ // Without grant
if (db.access & CREATE_ACL)
db.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL;
@@ -1593,23 +2203,19 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
acl_dbs.sort((acl_dbs_cmp)acl_compare);
acl_dbs.freeze();
- if ((table= tables[PROXIES_PRIV_TABLE].table))
+ const Proxies_priv_table& proxies_priv_table= tables.proxies_priv_table();
+ if (proxies_priv_table.table_exists())
{
- if (init_read_record(&read_record_info, thd, table,
- NULL, 1, 1, FALSE))
- goto end;
- table->use_all_columns();
+ if (proxies_priv_table.init_read_record(&read_record_info, thd))
+ DBUG_RETURN(TRUE);
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_PROXY_USER proxy;
- proxy.init(table, &acl_memroot);
+ proxy.init(proxies_priv_table, &acl_memroot);
if (proxy.check_validity(check_no_resolve))
continue;
if (push_dynamic(&acl_proxy_users, (uchar*) &proxy))
- {
- end_read_record(&read_record_info);
- goto end;
- }
+ DBUG_RETURN(TRUE);
}
my_qsort((uchar*) dynamic_element(&acl_proxy_users, 0, ACL_PROXY_USER*),
acl_proxy_users.elements,
@@ -1623,20 +2229,20 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
}
freeze_size(&acl_proxy_users);
- if ((table= tables[ROLES_MAPPING_TABLE].table))
+ const Roles_mapping_table& roles_mapping_table= tables.roles_mapping_table();
+ if (roles_mapping_table.table_exists())
{
- if (init_read_record(&read_record_info, thd, table, NULL, 1, 1, FALSE))
- goto end;
- table->use_all_columns();
+ if (roles_mapping_table.init_read_record(&read_record_info, thd))
+ DBUG_RETURN(TRUE);
MEM_ROOT temp_root;
init_alloc_root(&temp_root, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
while (!(read_record_info.read_record(&read_record_info)))
{
- char *hostname= safe_str(get_field(&temp_root, table->field[0]));
- char *username= safe_str(get_field(&temp_root, table->field[1]));
- char *rolename= safe_str(get_field(&temp_root, table->field[2]));
- bool with_grant_option= get_YN_as_bool(table->field[3]);
+ char *hostname= safe_str(get_field(&temp_root, roles_mapping_table.host()));
+ char *username= safe_str(get_field(&temp_root, roles_mapping_table.user()));
+ char *rolename= safe_str(get_field(&temp_root, roles_mapping_table.role()));
+ bool with_grant_option= get_YN_as_bool(roles_mapping_table.admin_option());
if (add_role_user_mapping(username, hostname, rolename)) {
sql_print_error("Invalid roles_mapping table entry user:'%s@%s', rolename:'%s'",
@@ -1664,12 +2270,7 @@ static bool acl_load(THD *thd, TABLE_LIST *tables)
init_check_host();
initialized=1;
- return_val= FALSE;
-
-end:
- end_read_record(&read_record_info);
- thd->variables.sql_mode= old_sql_mode;
- DBUG_RETURN(return_val);
+ DBUG_RETURN(FALSE);
}
@@ -1717,7 +2318,6 @@ void acl_free(bool end)
bool acl_reload(THD *thd)
{
- TABLE_LIST tables[TABLES_MAX];
DYNAMIC_ARRAY old_acl_hosts, old_acl_users, old_acl_proxy_users;
Dynamic_array<ACL_DB> old_acl_dbs(0U,0U);
HASH old_acl_roles, old_acl_roles_mappings;
@@ -1725,12 +2325,13 @@ bool acl_reload(THD *thd)
int result;
DBUG_ENTER("acl_reload");
+ Grant_tables tables(Table_host | Table_user | Table_db | Table_proxies_priv |
+ Table_roles_mapping, TL_READ);
/*
To avoid deadlocks we should obtain table locks before
obtaining acl_cache->lock mutex.
*/
- if ((result= open_grant_tables(thd, tables, TL_READ, Table_host |
- Table_user | Table_db | Table_proxies_priv | Table_roles_mapping)))
+ if ((result= tables.open_and_lock(thd)))
{
DBUG_ASSERT(result <= 0);
/*
@@ -1833,34 +2434,6 @@ static ulong get_access(TABLE *form, uint fieldnr, uint *next_field)
return access_bits;
}
-/*
- Check if a user entry in the user table is marked as being a role entry
-
- IMPLEMENTATION
- Access the coresponding column and check the coresponding ENUM of the form
- ENUM('N', 'Y')
-
- SYNOPSIS
- check_is_role()
- form an open table to read the entry from.
- The record should be already read in table->record[0]
-
- RETURN VALUE
- TRUE if the user is marked as a role
- FALSE otherwise
-*/
-
-static bool check_is_role(TABLE *form)
-{
- char buff[2];
- String res(buff, sizeof(buff), &my_charset_latin1);
- /* Table version does not support roles */
- if (form->s->fields <= ROLE_ASSIGN_COLUMN_IDX)
- return FALSE;
-
- return get_YN_as_bool(form->field[ROLE_ASSIGN_COLUMN_IDX]);
-}
-
/*
Return a number which, if sorted 'desc', puts strings in this order:
@@ -2018,8 +2591,7 @@ bool acl_getroot(Security_context *sctx, char *user, char *host,
sctx->master_access= acl_role->access;
if (acl_role->user.str)
- strmake_buf(sctx->priv_user, user);
- sctx->priv_host[0]= 0;
+ strmake_buf(sctx->priv_role, user);
}
}
@@ -2046,7 +2618,7 @@ static int check_user_can_set_role(const char *user, const char *host,
acl_user= find_user_wild(host, user, ip);
if (acl_user == NULL)
{
- my_error(ER_INVALID_CURRENT_USER, MYF(0), rolename);
+ my_error(ER_INVALID_CURRENT_USER, MYF(0));
result= -1;
}
else if (access)
@@ -2442,7 +3014,8 @@ exit:
(entry= (acl_entry*) malloc(sizeof(acl_entry)+key_length)))
{
entry->access=(db_access & host_access);
- entry->length=key_length;
+ DBUG_ASSERT(key_length < 0xffff);
+ entry->length=(uint16)key_length;
memcpy((uchar*) entry->key,key,key_length);
acl_cache->add(entry);
}
@@ -2778,7 +3351,7 @@ bool check_change_password(THD *thd, LEX_USER *user)
*/
bool change_password(THD *thd, LEX_USER *user)
{
- TABLE_LIST tables[TABLES_MAX];
+ Grant_tables tables(Table_user, TL_WRITE);
/* Buffer should be extended when password length is extended. */
char buff[512];
ulong query_length= 0;
@@ -2813,7 +3386,7 @@ bool change_password(THD *thd, LEX_USER *user)
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, (char*)"user", NULL);
}
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user)))
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
result= 1;
@@ -2842,7 +3415,7 @@ bool change_password(THD *thd, LEX_USER *user)
ER_SET_PASSWORD_AUTH_PLUGIN,
ER_THD(thd, ER_SET_PASSWORD_AUTH_PLUGIN));
- if (update_user_table(thd, tables[USER_TABLE].table,
+ if (update_user_table(thd, tables.user_table(),
safe_str(acl_user->host.hostname),
safe_str(acl_user->user.str),
user->pwhash.str, user->pwhash.length))
@@ -2887,8 +3460,7 @@ int acl_check_set_default_role(THD *thd, const char *host, const char *user)
int acl_set_default_role(THD *thd, const char *host, const char *user,
const char *rolename)
{
- TABLE_LIST tables[TABLES_MAX];
- TABLE *table;
+ Grant_tables tables(Table_user, TL_WRITE);
char user_key[MAX_KEY_LENGTH];
int result= 1;
int error;
@@ -2924,19 +3496,6 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
safe_str(rolename), safe_str(user), safe_str(host));
}
- if (WSREP(thd) && !IF_WSREP(thd->wsrep_applier, 0))
- {
- thd->set_query(buff, query_length, system_charset_info);
- // Attention!!! here is implicit goto error;
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, (char*)"user", NULL);
- }
-
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user)))
- DBUG_RETURN(result != 1);
-
- table= tables[USER_TABLE].table;
- result= 1;
-
/*
This statement will be replicated as a statement, even when using
row-based replication. The flag will be reset at the end of the
@@ -2946,76 +3505,99 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
*/
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
- mysql_mutex_lock(&acl_cache->lock);
- ACL_USER *acl_user;
- if (!(acl_user= find_user_exact(host, user)))
+ if (WSREP(thd) && !IF_WSREP(thd->wsrep_applier, 0))
{
- mysql_mutex_unlock(&acl_cache->lock);
- my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
- MYF(0));
- goto end;
+ thd->set_query(buff, query_length, system_charset_info);
+ // Attention!!! here is implicit goto error;
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, (char*)"user", NULL);
}
- if (!clear_role) {
- /* set new default_rolename */
- acl_user->default_rolename.str= safe_strdup_root(&acl_memroot, rolename);
- acl_user->default_rolename.length= strlen(rolename);
- }
- else
+ /*
+ Extra block due to WSREP_TO_ISOLATION_BEGIN using goto.
+ TODO(cvicentiu) Should move this block out in a new function.
+ */
{
- /* clear the default_rolename */
- acl_user->default_rolename.str = NULL;
- acl_user->default_rolename.length = 0;
- }
+ if ((result= tables.open_and_lock(thd)))
+ DBUG_RETURN(result != 1);
- /* update the mysql.user table with the new default role */
- table->use_all_columns();
- if (table->s->fields <= DEFAULT_ROLE_COLUMN_IDX)
- {
- my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
- table->alias.c_ptr(), DEFAULT_ROLE_COLUMN_IDX + 1, table->s->fields,
- static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
- mysql_mutex_unlock(&acl_cache->lock);
- goto end;
- }
- table->field[0]->store(host,(uint) strlen(host), system_charset_info);
- table->field[1]->store(user,(uint) strlen(user), system_charset_info);
- key_copy((uchar *) user_key, table->record[0], table->key_info,
- table->key_info->key_length);
+ const User_table& user_table= tables.user_table();
+ TABLE *table= user_table.table();
- if (table->file->ha_index_read_idx_map(table->record[0], 0,
- (uchar *) user_key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
- {
- mysql_mutex_unlock(&acl_cache->lock);
- my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
- MYF(0));
- goto end;
- }
- store_record(table, record[1]);
- table->field[DEFAULT_ROLE_COLUMN_IDX]->store(acl_user->default_rolename.str,
- acl_user->default_rolename.length,
- system_charset_info);
- if ((error=table->file->ha_update_row(table->record[1],table->record[0])) &&
- error != HA_ERR_RECORD_IS_THE_SAME)
- {
- mysql_mutex_unlock(&acl_cache->lock);
- table->file->print_error(error,MYF(0)); /* purecov: deadcode */
- goto end;
- }
+ result= 1;
- acl_cache->clear(1);
- mysql_mutex_unlock(&acl_cache->lock);
- result= 0;
- if (mysql_bin_log.is_open())
- {
- DBUG_ASSERT(query_length);
- thd->clear_error();
- result= thd->binlog_query(THD::STMT_QUERY_TYPE, buff, query_length,
- FALSE, FALSE, FALSE, 0);
+ mysql_mutex_lock(&acl_cache->lock);
+ ACL_USER *acl_user;
+ if (!(acl_user= find_user_exact(host, user)))
+ {
+ mysql_mutex_unlock(&acl_cache->lock);
+ my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
+ MYF(0));
+ goto end;
+ }
+
+ if (!clear_role)
+ {
+ /* set new default_rolename */
+ acl_user->default_rolename.str= safe_strdup_root(&acl_memroot, rolename);
+ acl_user->default_rolename.length= strlen(rolename);
+ }
+ else
+ {
+ /* clear the default_rolename */
+ acl_user->default_rolename.str = NULL;
+ acl_user->default_rolename.length = 0;
+ }
+
+ /* update the mysql.user table with the new default role */
+ tables.user_table().table()->use_all_columns();
+ if (!tables.user_table().default_role())
+ {
+ my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
+ table->alias.c_ptr(), DEFAULT_ROLE_COLUMN_IDX + 1,
+ tables.user_table().num_fields(),
+ static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
+ mysql_mutex_unlock(&acl_cache->lock);
+ goto end;
+ }
+ user_table.host()->store(host,(uint) strlen(host), system_charset_info);
+ user_table.user()->store(user,(uint) strlen(user), system_charset_info);
+ key_copy((uchar *) user_key, table->record[0], table->key_info,
+ table->key_info->key_length);
+
+ if (table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *) user_key, HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
+ {
+ mysql_mutex_unlock(&acl_cache->lock);
+ my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
+ MYF(0));
+ goto end;
+ }
+ store_record(table, record[1]);
+ user_table.default_role()->store(acl_user->default_rolename.str,
+ acl_user->default_rolename.length,
+ system_charset_info);
+ if ((error=table->file->ha_update_row(table->record[1],table->record[0])) &&
+ error != HA_ERR_RECORD_IS_THE_SAME)
+ {
+ mysql_mutex_unlock(&acl_cache->lock);
+ table->file->print_error(error,MYF(0)); /* purecov: deadcode */
+ goto end;
+ }
+
+ acl_cache->clear(1);
+ mysql_mutex_unlock(&acl_cache->lock);
+ result= 0;
+ if (mysql_bin_log.is_open())
+ {
+ DBUG_ASSERT(query_length);
+ thd->clear_error();
+ result= thd->binlog_query(THD::STMT_QUERY_TYPE, buff, query_length,
+ FALSE, FALSE, FALSE, 0);
+ }
+ end:
+ close_mysql_tables(thd);
}
-end:
- close_mysql_tables(thd);
#ifdef WITH_WSREP
WSREP_ERROR_LABEL:
@@ -3287,6 +3869,28 @@ bool hostname_requires_resolving(const char *hostname)
}
+void set_authentication_plugin_from_password(const User_table& user_table,
+ const char* password,
+ uint password_length)
+{
+ if (password_length == SCRAMBLED_PASSWORD_CHAR_LENGTH ||
+ password_length == 0)
+ {
+ user_table.plugin()->store(native_password_plugin_name.str,
+ native_password_plugin_name.length,
+ system_charset_info);
+ }
+ else
+ {
+ DBUG_ASSERT(password_length == SCRAMBLED_PASSWORD_CHAR_LENGTH_323);
+ user_table.plugin()->store(old_password_plugin_name.str,
+ old_password_plugin_name.length,
+ system_charset_info);
+ }
+ user_table.authentication_string()->store(password,
+ password_length,
+ system_charset_info);
+}
/**
Update record for user in mysql.user privilege table with new password.
@@ -3300,18 +3904,19 @@ bool hostname_requires_resolving(const char *hostname)
@see change_password
*/
-static bool update_user_table(THD *thd, TABLE *table,
+static bool update_user_table(THD *thd, const User_table& user_table,
const char *host, const char *user,
- const char *new_password, uint new_password_len)
+ const char *new_password, uint new_password_len)
{
char user_key[MAX_KEY_LENGTH];
int error;
DBUG_ENTER("update_user_table");
DBUG_PRINT("enter",("user: %s host: %s",user,host));
+ TABLE *table= user_table.table();
table->use_all_columns();
- table->field[0]->store(host,(uint) strlen(host), system_charset_info);
- table->field[1]->store(user,(uint) strlen(user), system_charset_info);
+ user_table.host()->store(host,(uint) strlen(host), system_charset_info);
+ user_table.user()->store(user,(uint) strlen(user), system_charset_info);
key_copy((uchar *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
@@ -3320,15 +3925,25 @@ static bool update_user_table(THD *thd, TABLE *table,
HA_READ_KEY_EXACT))
{
my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
- MYF(0)); /* purecov: deadcode */
- DBUG_RETURN(1); /* purecov: deadcode */
+ MYF(0)); /* purecov: deadcode */
+ DBUG_RETURN(1); /* purecov: deadcode */
}
store_record(table,record[1]);
- table->field[2]->store(new_password, new_password_len, system_charset_info);
+
+ if (user_table.plugin())
+ {
+ set_authentication_plugin_from_password(user_table, new_password,
+ new_password_len);
+ }
+
+ if (user_table.password())
+ user_table.password()->store(new_password, new_password_len, system_charset_info);
+
+
if ((error=table->file->ha_update_row(table->record[1],table->record[0])) &&
error != HA_ERR_RECORD_IS_THE_SAME)
{
- table->file->print_error(error,MYF(0)); /* purecov: deadcode */
+ table->file->print_error(error,MYF(0)); /* purecov: deadcode */
DBUG_RETURN(1);
}
DBUG_RETURN(0);
@@ -3374,8 +3989,9 @@ static bool test_if_create_new_users(THD *thd)
Handle GRANT commands
****************************************************************************/
-static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
- ulong rights, bool revoke_grant,
+static int replace_user_table(THD *thd, const User_table &user_table,
+ LEX_USER &combo,
+ ulong rights, bool revoke_grant,
bool can_create_user, bool no_auto_create)
{
int error = -1;
@@ -3384,6 +4000,7 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
uchar user_key[MAX_KEY_LENGTH];
bool handle_as_role= combo.is_role();
LEX *lex= thd->lex;
+ TABLE *table= user_table.table();
DBUG_ENTER("replace_user_table");
mysql_mutex_assert_owner(&acl_cache->lock);
@@ -3402,18 +4019,18 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
combo.pwhash= empty_lex_str;
/* if the user table is not up to date, we can't handle role updates */
- if (table->s->fields <= ROLE_ASSIGN_COLUMN_IDX && handle_as_role)
+ if (!user_table.is_role() && handle_as_role)
{
my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
- table->alias.c_ptr(), ROLE_ASSIGN_COLUMN_IDX + 1, table->s->fields,
+ "user", ROLE_ASSIGN_COLUMN_IDX + 1, user_table.num_fields(),
static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
DBUG_RETURN(-1);
}
table->use_all_columns();
- table->field[0]->store(combo.host.str,combo.host.length,
+ user_table.host()->store(combo.host.str,combo.host.length,
system_charset_info);
- table->field[1]->store(combo.user.str,combo.user.length,
+ user_table.user()->store(combo.user.str,combo.user.length,
system_charset_info);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
@@ -3462,9 +4079,9 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
old_row_exists = 0;
restore_record(table,s->default_values);
- table->field[0]->store(combo.host.str,combo.host.length,
+ user_table.host()->store(combo.host.str,combo.host.length,
system_charset_info);
- table->field[1]->store(combo.user.str,combo.user.length,
+ user_table.user()->store(combo.user.str,combo.user.length,
system_charset_info);
}
else
@@ -3479,102 +4096,119 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
/* Update table columns with new privileges */
- Field **tmp_field;
ulong priv;
- uint next_field;
- for (tmp_field= table->field+3, priv = SELECT_ACL;
- *tmp_field && (*tmp_field)->real_type() == MYSQL_TYPE_ENUM &&
- ((Field_enum*) (*tmp_field))->typelib->count == 2 ;
- tmp_field++, priv <<= 1)
+ priv = SELECT_ACL;
+ for (uint i= 0; i < user_table.num_privileges(); i++, priv <<= 1)
{
- if (priv & rights) // set requested privileges
- (*tmp_field)->store(&what, 1, &my_charset_latin1);
+ if (priv & rights)
+ user_table.priv_field(i)->store(&what, 1, &my_charset_latin1);
}
- rights= get_access(table, 3, &next_field);
- DBUG_PRINT("info",("table fields: %d",table->s->fields));
- if (combo.pwhash.str[0])
- table->field[2]->store(combo.pwhash.str, combo.pwhash.length, system_charset_info);
- if (table->s->fields >= 31) /* From 4.0.0 we have more fields */
+
+ rights= user_table.get_access();
+
+ DBUG_PRINT("info",("table fields: %d", user_table.num_fields()));
+ /* If we don't have a password column, we'll use the authentication_string
+ column later. */
+ if (combo.pwhash.str[0] && user_table.password())
+ user_table.password()->store(combo.pwhash.str, combo.pwhash.length,
+ system_charset_info);
+ /* We either have the password column, the plugin column, or both. Otherwise
+ we have a corrupt user table. */
+ DBUG_ASSERT(user_table.password() || user_table.plugin());
+ if (user_table.ssl_type()) /* From 4.0.0 we have more fields */
{
/* We write down SSL related ACL stuff */
switch (lex->ssl_type) {
case SSL_TYPE_ANY:
- table->field[next_field]->store(STRING_WITH_LEN("ANY"),
- &my_charset_latin1);
- table->field[next_field+1]->store("", 0, &my_charset_latin1);
- table->field[next_field+2]->store("", 0, &my_charset_latin1);
- table->field[next_field+3]->store("", 0, &my_charset_latin1);
+ user_table.ssl_type()->store(STRING_WITH_LEN("ANY"),
+ &my_charset_latin1);
+ user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
+ user_table.x509_issuer()->store("", 0, &my_charset_latin1);
+ user_table.x509_subject()->store("", 0, &my_charset_latin1);
break;
case SSL_TYPE_X509:
- table->field[next_field]->store(STRING_WITH_LEN("X509"),
- &my_charset_latin1);
- table->field[next_field+1]->store("", 0, &my_charset_latin1);
- table->field[next_field+2]->store("", 0, &my_charset_latin1);
- table->field[next_field+3]->store("", 0, &my_charset_latin1);
+ user_table.ssl_type()->store(STRING_WITH_LEN("X509"),
+ &my_charset_latin1);
+ user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
+ user_table.x509_issuer()->store("", 0, &my_charset_latin1);
+ user_table.x509_subject()->store("", 0, &my_charset_latin1);
break;
case SSL_TYPE_SPECIFIED:
- table->field[next_field]->store(STRING_WITH_LEN("SPECIFIED"),
- &my_charset_latin1);
- table->field[next_field+1]->store("", 0, &my_charset_latin1);
- table->field[next_field+2]->store("", 0, &my_charset_latin1);
- table->field[next_field+3]->store("", 0, &my_charset_latin1);
+ user_table.ssl_type()->store(STRING_WITH_LEN("SPECIFIED"),
+ &my_charset_latin1);
+ user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
+ user_table.x509_issuer()->store("", 0, &my_charset_latin1);
+ user_table.x509_subject()->store("", 0, &my_charset_latin1);
if (lex->ssl_cipher)
- table->field[next_field+1]->store(lex->ssl_cipher,
- strlen(lex->ssl_cipher), system_charset_info);
+ user_table.ssl_cipher()->store(lex->ssl_cipher,
+ strlen(lex->ssl_cipher),
+ system_charset_info);
if (lex->x509_issuer)
- table->field[next_field+2]->store(lex->x509_issuer,
- strlen(lex->x509_issuer), system_charset_info);
+ user_table.x509_issuer()->store(lex->x509_issuer,
+ strlen(lex->x509_issuer),
+ system_charset_info);
if (lex->x509_subject)
- table->field[next_field+3]->store(lex->x509_subject,
- strlen(lex->x509_subject), system_charset_info);
+ user_table.x509_subject()->store(lex->x509_subject,
+ strlen(lex->x509_subject),
+ system_charset_info);
break;
case SSL_TYPE_NOT_SPECIFIED:
break;
case SSL_TYPE_NONE:
- table->field[next_field]->store("", 0, &my_charset_latin1);
- table->field[next_field+1]->store("", 0, &my_charset_latin1);
- table->field[next_field+2]->store("", 0, &my_charset_latin1);
- table->field[next_field+3]->store("", 0, &my_charset_latin1);
+ user_table.ssl_type()->store("", 0, &my_charset_latin1);
+ user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
+ user_table.x509_issuer()->store("", 0, &my_charset_latin1);
+ user_table.x509_subject()->store("", 0, &my_charset_latin1);
break;
}
- next_field+=4;
USER_RESOURCES mqh= lex->mqh;
if (mqh.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR)
- table->field[next_field]->store((longlong) mqh.questions, TRUE);
+ user_table.max_questions()->store((longlong) mqh.questions, TRUE);
if (mqh.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR)
- table->field[next_field+1]->store((longlong) mqh.updates, TRUE);
+ user_table.max_updates()->store((longlong) mqh.updates, TRUE);
if (mqh.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR)
- table->field[next_field+2]->store((longlong) mqh.conn_per_hour, TRUE);
- if (table->s->fields >= 36 &&
+ user_table.max_connections()->store((longlong) mqh.conn_per_hour, TRUE);
+ if (user_table.max_user_connections() &&
(mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS))
- table->field[next_field+3]->store((longlong) mqh.user_conn, FALSE);
- next_field+= 4;
- if (table->s->fields >= 41)
+ user_table.max_user_connections()->store((longlong) mqh.user_conn, FALSE);
+ if (user_table.plugin())
{
- table->field[next_field]->set_notnull();
- table->field[next_field + 1]->set_notnull();
+ user_table.plugin()->set_notnull();
+ user_table.authentication_string()->set_notnull();
if (combo.plugin.str[0])
{
DBUG_ASSERT(combo.pwhash.str[0] == 0);
- table->field[2]->reset();
- table->field[next_field]->store(combo.plugin.str, combo.plugin.length,
- system_charset_info);
- table->field[next_field + 1]->store(combo.auth.str, combo.auth.length,
- system_charset_info);
+ if (user_table.password())
+ user_table.password()->reset();
+ user_table.plugin()->store(combo.plugin.str, combo.plugin.length,
+ system_charset_info);
+ user_table.authentication_string()->store(combo.auth.str, combo.auth.length,
+ system_charset_info);
}
if (combo.pwhash.str[0])
{
DBUG_ASSERT(combo.plugin.str[0] == 0);
- table->field[next_field]->reset();
- table->field[next_field + 1]->reset();
+ /* We have Password column. */
+ if (user_table.password())
+ {
+ user_table.plugin()->reset();
+ user_table.authentication_string()->reset();
+ }
+ else
+ {
+ /* We do not have Password column. Use PLUGIN && Authentication_string
+ columns instead. */
+ set_authentication_plugin_from_password(user_table,
+ combo.pwhash.str,
+ combo.pwhash.length);
+ }
}
- if (table->s->fields > MAX_STATEMENT_TIME_COLUMN_IDX)
+ if (user_table.max_statement_time())
{
if (mqh.specified_limits & USER_RESOURCES::MAX_STATEMENT_TIME)
- table->field[MAX_STATEMENT_TIME_COLUMN_IDX]->
- store(mqh.max_statement_time);
+ user_table.max_statement_time()->store(mqh.max_statement_time);
}
}
mqh_used= (mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour ||
@@ -3583,11 +4217,11 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
/* table format checked earlier */
if (handle_as_role)
{
- if (old_row_exists && !check_is_role(table))
+ if (old_row_exists && !user_table.check_is_role())
{
goto end;
}
- table->field[ROLE_ASSIGN_COLUMN_IDX]->store("Y", 1, system_charset_info);
+ user_table.is_role()->store("Y", 1, system_charset_info);
}
}
@@ -3597,7 +4231,7 @@ static int replace_user_table(THD *thd, TABLE *table, LEX_USER &combo,
We should NEVER delete from the user table, as a uses can still
use mysqld even if he doesn't have any privileges in the user table!
*/
- if (cmp_record(table,record[1]))
+ if (cmp_record(table, record[1]))
{
if ((error=
table->file->ha_update_row(table->record[1],table->record[0])) &&
@@ -4388,6 +5022,8 @@ table_hash_search(const char *host, const char *ip, const char *db,
static GRANT_COLUMN *
column_hash_search(GRANT_TABLE *t, const char *cname, uint length)
{
+ if (!my_hash_inited(&t->hash_columns))
+ return (GRANT_COLUMN*) 0;
return (GRANT_COLUMN*) my_hash_search(&t->hash_columns,
(uchar*) cname, length);
}
@@ -5308,7 +5944,7 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
is not necessarily the first and may be not present at all.
*/
int first= -1, merged= -1;
- ulong UNINIT_VAR(access), update_flags= 0;
+ ulong access= 0, update_flags= 0;
for (int *p= dbs.front(); p <= dbs.back(); p++)
{
if (first<0 || (!dbname && strcmp(acl_dbs.at(p[0]).db, acl_dbs.at(p[-1]).db)))
@@ -5511,8 +6147,8 @@ static bool merge_role_table_and_column_privileges(ACL_ROLE *grantee,
}
grants.sort(table_name_sort);
- GRANT_TABLE **first= NULL, *UNINIT_VAR(merged), **cur;
- ulong UNINIT_VAR(privs), UNINIT_VAR(cols), update_flags= 0;
+ GRANT_TABLE **first= NULL, *merged= NULL, **cur;
+ ulong privs= 0, cols= 0, update_flags= 0;
for (cur= grants.front(); cur <= grants.back(); cur++)
{
if (!first ||
@@ -5635,8 +6271,8 @@ static bool merge_role_routine_grant_privileges(ACL_ROLE *grantee,
}
grants.sort(routine_name_sort);
- GRANT_NAME **first= NULL, *UNINIT_VAR(merged);
- ulong UNINIT_VAR(privs);
+ GRANT_NAME **first= NULL, *merged= NULL;
+ ulong privs= 0 ;
for (GRANT_NAME **cur= grants.front(); cur <= grants.back(); cur++)
{
if (!first ||
@@ -5786,7 +6422,6 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
int result;
List_iterator <LEX_USER> str_list (user_list);
LEX_USER *Str, *tmp_Str;
- TABLE_LIST tables[TABLES_MAX];
bool create_new_users=0;
char *db_name, *table_name;
DBUG_ENTER("mysql_table_grant");
@@ -5875,8 +6510,9 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
*/
thd->lex->sql_command= backup.sql_command;
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user |
- Table_tables_priv | maybe_columns_priv)))
+ Grant_tables tables(Table_user | Table_tables_priv | maybe_columns_priv,
+ TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
{
thd->lex->restore_backup_query_tables_list(&backup);
DBUG_RETURN(result != 1);
@@ -5901,7 +6537,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
}
/* Create user if needed */
error= copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables[USER_TABLE].table, *Str,
+ replace_user_table(thd, tables.user_table(), *Str,
0, revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER));
@@ -5972,16 +6608,20 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
/* update table and columns */
- if (replace_table_table(thd, grant_table, tables[TABLES_PRIV_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_table_table to use Tables_priv_table
+ instead of TABLE directly. */
+ if (replace_table_table(thd, grant_table, tables.tables_priv_table().table(),
*Str, db_name, table_name,
rights, column_priv, revoke_grant))
{
/* Should only happen if table is crashed */
result= TRUE; /* purecov: deadcode */
}
- else if (tables[COLUMNS_PRIV_TABLE].table)
+ else if (tables.columns_priv_table().table_exists())
{
- if (replace_column_table(grant_table, tables[COLUMNS_PRIV_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_column_table to use Columns_priv_table
+ instead of TABLE directly. */
+ if (replace_column_table(grant_table, tables.columns_priv_table().table(),
*Str, columns, db_name, table_name, rights,
revoke_grant))
{
@@ -6032,7 +6672,6 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
{
List_iterator <LEX_USER> str_list (user_list);
LEX_USER *Str, *tmp_Str;
- TABLE_LIST tables[TABLES_MAX];
bool create_new_users= 0;
int result;
char *db_name, *table_name;
@@ -6052,8 +6691,8 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
DBUG_RETURN(TRUE);
}
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user |
- Table_procs_priv)))
+ Grant_tables tables(Table_user | Table_procs_priv, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -6077,7 +6716,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
}
/* Create user if needed */
if (copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables[USER_TABLE].table, *Str,
+ replace_user_table(thd, tables.user_table(), *Str,
0, revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER)))
@@ -6111,8 +6750,10 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
}
}
- if (no_such_table(tables + PROCS_PRIV_TABLE) ||
- replace_routine_table(thd, grant_name, tables[PROCS_PRIV_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_routine_table to use Tables_procs_priv
+ instead of TABLE directly. */
+ if (tables.procs_priv_table().no_such_table() ||
+ replace_routine_table(thd, grant_name, tables.procs_priv_table().table(),
*Str, db_name, table_name, is_proc, rights,
revoke_grant) != 0)
{
@@ -6252,9 +6893,8 @@ bool mysql_grant_role(THD *thd, List <LEX_USER> &list, bool revoke)
no_auto_create_user= MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER);
- TABLE_LIST tables[TABLES_MAX];
- if ((result= open_grant_tables(thd, tables, TL_WRITE,
- Table_user | Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -6353,7 +6993,7 @@ bool mysql_grant_role(THD *thd, List <LEX_USER> &list, bool revoke)
user_combo.user = username;
if (copy_and_check_auth(&user_combo, &user_combo, thd) ||
- replace_user_table(thd, tables[USER_TABLE].table, user_combo, 0,
+ replace_user_table(thd, tables.user_table(), user_combo, 0,
false, create_new_user,
no_auto_create_user))
{
@@ -6419,7 +7059,9 @@ bool mysql_grant_role(THD *thd, List <LEX_USER> &list, bool revoke)
}
/* write into the roles_mapping table */
- if (replace_roles_mapping_table(tables[ROLES_MAPPING_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_roles_mapping_table to use
+ Roles_mapping_table instead of TABLE directly. */
+ if (replace_roles_mapping_table(tables.roles_mapping_table().table(),
&username, &hostname, &rolename,
thd->lex->with_admin_option,
hash_entry, revoke))
@@ -6471,7 +7113,6 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
char tmp_db[SAFE_NAME_LEN+1];
bool create_new_users=0;
int result;
- TABLE_LIST tables[TABLES_MAX];
DBUG_ENTER("mysql_grant");
if (lower_case_table_names && db)
@@ -6492,8 +7133,9 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
proxied_user= str_list++;
}
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user |
- (is_proxy ? Table_proxies_priv : Table_db))))
+ Grant_tables tables(Table_user | (is_proxy ? Table_proxies_priv : Table_db),
+ TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -6522,7 +7164,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
}
if (copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables[USER_TABLE].table, *Str,
+ replace_user_table(thd, tables.user_table(), *Str,
(!db ? rights : 0), revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER)))
@@ -6532,7 +7174,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
ulong db_rights= rights & DB_ACLS;
if (db_rights == rights)
{
- if (replace_db_table(tables[DB_TABLE].table, db, *Str, db_rights,
+ if (replace_db_table(tables.db_table().table(), db, *Str, db_rights,
revoke_grant))
result= true;
}
@@ -6544,8 +7186,10 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
}
else if (is_proxy)
{
- if (no_such_table(tables + PROXIES_PRIV_TABLE) ||
- replace_proxies_priv_table (thd, tables[PROXIES_PRIV_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_proxies_priv_table to use
+ Proxies_priv_table instead of TABLE directly. */
+ if (tables.proxies_priv_table().no_such_table() ||
+ replace_proxies_priv_table (thd, tables.proxies_priv_table().table(),
Str, proxied_user,
rights & GRANT_ACL ? TRUE : FALSE,
revoke_grant))
@@ -6600,7 +7244,7 @@ bool grant_init()
bool return_val;
DBUG_ENTER("grant_init");
- if (!(thd= new THD))
+ if (!(thd= new THD(0)))
DBUG_RETURN(1); /* purecov: deadcode */
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -6625,15 +7269,16 @@ bool grant_init()
@retval TRUE Error
*/
-static bool grant_load(THD *thd, TABLE_LIST *tables)
+static bool grant_load(THD *thd,
+ const Tables_priv_table& tables_priv,
+ const Columns_priv_table& columns_priv,
+ const Procs_priv_table& procs_priv)
{
- MEM_ROOT *memex_ptr;
bool return_val= 1;
TABLE *t_table, *c_table, *p_table;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
- MEM_ROOT **save_mem_root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,
- THR_MALLOC);
- ulonglong old_sql_mode= thd->variables.sql_mode;
+ MEM_ROOT *save_mem_root= thd->mem_root;
+ sql_mode_t old_sql_mode= thd->variables.sql_mode;
DBUG_ENTER("grant_load");
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
@@ -6647,9 +7292,9 @@ static bool grant_load(THD *thd, TABLE_LIST *tables)
0,0,0, (my_hash_get_key) get_grant_table, 0,0);
init_sql_alloc(&grant_memroot, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
- t_table= tables[TABLES_PRIV_TABLE].table;
- c_table= tables[COLUMNS_PRIV_TABLE].table;
- p_table= tables[PROCS_PRIV_TABLE].table; // this can be NULL
+ t_table= tables_priv.table();
+ c_table= columns_priv.table();
+ p_table= procs_priv.table(); // this can be NULL
if (t_table->file->ha_index_init(0, 1))
goto end_index_init;
@@ -6657,15 +7302,15 @@ static bool grant_load(THD *thd, TABLE_LIST *tables)
t_table->use_all_columns();
c_table->use_all_columns();
- memex_ptr= &grant_memroot;
- my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr);
+ thd->mem_root= &grant_memroot;
if (!t_table->file->ha_index_first(t_table->record[0]))
{
do
{
GRANT_TABLE *mem_check;
- if (!(mem_check=new (memex_ptr) GRANT_TABLE(t_table,c_table)))
+ /* TODO(cvicentiu) convert this to use tables_priv and columns_priv. */
+ if (!(mem_check= new (&grant_memroot) GRANT_TABLE(t_table, c_table)))
{
/* This could only happen if we are out memory */
goto end_unlock;
@@ -6710,7 +7355,7 @@ static bool grant_load(THD *thd, TABLE_LIST *tables)
{
GRANT_NAME *mem_check;
HASH *hash;
- if (!(mem_check=new (memex_ptr) GRANT_NAME(p_table, TRUE)))
+ if (!(mem_check= new (&grant_memroot) GRANT_NAME(p_table, TRUE)))
{
/* This could only happen if we are out memory */
goto end_unlock_p;
@@ -6727,12 +7372,12 @@ static bool grant_load(THD *thd, TABLE_LIST *tables)
continue;
}
}
- if (p_table->field[4]->val_int() == TYPE_ENUM_PROCEDURE)
+ if (procs_priv.routine_type()->val_int() == TYPE_ENUM_PROCEDURE)
{
hash= &proc_priv_hash;
}
else
- if (p_table->field[4]->val_int() == TYPE_ENUM_FUNCTION)
+ if (procs_priv.routine_type()->val_int() == TYPE_ENUM_FUNCTION)
{
hash= &func_priv_hash;
}
@@ -6763,7 +7408,7 @@ end_unlock_p:
p_table->file->ha_index_end();
end_unlock:
t_table->file->ha_index_end();
- my_pthread_setspecific_ptr(THR_MALLOC, save_mem_root_ptr);
+ thd->mem_root= save_mem_root;
end_index_init:
thd->variables.sql_mode= old_sql_mode;
DBUG_RETURN(return_val);
@@ -6800,7 +7445,6 @@ static my_bool propagate_role_grants_action(void *role_ptr,
bool grant_reload(THD *thd)
{
- TABLE_LIST tables[TABLES_MAX];
HASH old_column_priv_hash, old_proc_priv_hash, old_func_priv_hash;
MEM_ROOT old_mem;
int result;
@@ -6811,8 +7455,9 @@ bool grant_reload(THD *thd)
obtaining LOCK_grant rwlock.
*/
- if ((result= open_grant_tables(thd, tables, TL_READ, Table_tables_priv |
- Table_columns_priv | Table_procs_priv)))
+ Grant_tables tables(Table_tables_priv | Table_columns_priv| Table_procs_priv,
+ TL_READ);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -6827,7 +7472,10 @@ bool grant_reload(THD *thd)
*/
old_mem= grant_memroot;
- if ((result= grant_load(thd, tables)))
+ if ((result= grant_load(thd,
+ tables.tables_priv_table(),
+ tables.columns_priv_table(),
+ tables.procs_priv_table())))
{ // Error. Revert to old hash
DBUG_PRINT("error",("Reverting to old privileges"));
grant_free(); /* purecov: deadcode */
@@ -6941,6 +7589,11 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
tl->correspondent_table ? tl->correspondent_table : tl;
sctx= t_ref->security_ctx ? t_ref->security_ctx : thd->security_ctx;
+ if (tl->with || !tl->db ||
+ (tl->select_lex &&
+ (tl->with= tl->select_lex->find_table_def_in_with_clauses(tl))))
+ continue;
+
const ACL_internal_table_access *access=
get_cached_table_access(&t_ref->grant.m_internal,
t_ref->get_db_name(),
@@ -6978,8 +7631,11 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
/*
It is subquery in the FROM clause. VIEW set t_ref->derived after
table opening, but this function always called before table opening.
+
+ NOTE: is_derived() can't be used here because subquery in this case
+ the FROM clase (derived tables) can be not be marked yet.
*/
- if (!t_ref->referencing_view)
+ if (t_ref->is_anonymous_derived_table() || t_ref->schema_table)
{
/*
If it's a temporary table created for a subquery in the FROM
@@ -7194,7 +7850,7 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref,
GRANT_INFO *grant;
const char *db_name;
const char *table_name;
- Security_context *sctx= MY_TEST(table_ref->security_ctx) ?
+ Security_context *sctx= table_ref->security_ctx ?
table_ref->security_ctx : thd->security_ctx;
if (table_ref->view || table_ref->field_translation)
@@ -7711,6 +8367,94 @@ static void add_user_option(String *grant, double value, const char *name)
}
}
+static void add_user_parameters(String *result, ACL_USER* acl_user,
+ bool with_grant)
+{
+ result->append(STRING_WITH_LEN("@'"));
+ result->append(acl_user->host.hostname, acl_user->hostname_length,
+ system_charset_info);
+ result->append('\'');
+
+ if (acl_user->plugin.str == native_password_plugin_name.str ||
+ acl_user->plugin.str == old_password_plugin_name.str)
+ {
+ if (acl_user->auth_string.length)
+ {
+ DBUG_ASSERT(acl_user->salt_len);
+ result->append(STRING_WITH_LEN(" IDENTIFIED BY PASSWORD '"));
+ result->append(acl_user->auth_string.str, acl_user->auth_string.length);
+ result->append('\'');
+ }
+ }
+ else
+ {
+ result->append(STRING_WITH_LEN(" IDENTIFIED VIA "));
+ result->append(acl_user->plugin.str, acl_user->plugin.length);
+ if (acl_user->auth_string.length)
+ {
+ result->append(STRING_WITH_LEN(" USING '"));
+ result->append(acl_user->auth_string.str, acl_user->auth_string.length);
+ result->append('\'');
+ }
+ }
+ /* "show grants" SSL related stuff */
+ if (acl_user->ssl_type == SSL_TYPE_ANY)
+ result->append(STRING_WITH_LEN(" REQUIRE SSL"));
+ else if (acl_user->ssl_type == SSL_TYPE_X509)
+ result->append(STRING_WITH_LEN(" REQUIRE X509"));
+ else if (acl_user->ssl_type == SSL_TYPE_SPECIFIED)
+ {
+ int ssl_options = 0;
+ result->append(STRING_WITH_LEN(" REQUIRE "));
+ if (acl_user->x509_issuer)
+ {
+ ssl_options++;
+ result->append(STRING_WITH_LEN("ISSUER \'"));
+ result->append(acl_user->x509_issuer,strlen(acl_user->x509_issuer));
+ result->append('\'');
+ }
+ if (acl_user->x509_subject)
+ {
+ if (ssl_options++)
+ result->append(' ');
+ result->append(STRING_WITH_LEN("SUBJECT \'"));
+ result->append(acl_user->x509_subject,strlen(acl_user->x509_subject),
+ system_charset_info);
+ result->append('\'');
+ }
+ if (acl_user->ssl_cipher)
+ {
+ if (ssl_options++)
+ result->append(' ');
+ result->append(STRING_WITH_LEN("CIPHER '"));
+ result->append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher),
+ system_charset_info);
+ result->append('\'');
+ }
+ }
+ if (with_grant ||
+ (acl_user->user_resource.questions ||
+ acl_user->user_resource.updates ||
+ acl_user->user_resource.conn_per_hour ||
+ acl_user->user_resource.user_conn ||
+ acl_user->user_resource.max_statement_time != 0.0))
+ {
+ result->append(STRING_WITH_LEN(" WITH"));
+ if (with_grant)
+ result->append(STRING_WITH_LEN(" GRANT OPTION"));
+ add_user_option(result, acl_user->user_resource.questions,
+ "MAX_QUERIES_PER_HOUR", false);
+ add_user_option(result, acl_user->user_resource.updates,
+ "MAX_UPDATES_PER_HOUR", false);
+ add_user_option(result, acl_user->user_resource.conn_per_hour,
+ "MAX_CONNECTIONS_PER_HOUR", false);
+ add_user_option(result, acl_user->user_resource.user_conn,
+ "MAX_USER_CONNECTIONS", true);
+ add_user_option(result, acl_user->user_resource.max_statement_time,
+ "MAX_STATEMENT_TIME");
+ }
+}
+
static const char *command_array[]=
{
"SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "RELOAD",
@@ -7757,6 +8501,69 @@ static bool print_grants_for_role(THD *thd, ACL_ROLE * role)
}
+bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
+{
+ const char *username= NULL, *hostname= NULL;
+ char buff[1024]; //Show create user should not take more than 1024 bytes.
+ Protocol *protocol= thd->protocol;
+ bool error= false;
+ ACL_USER *acl_user;
+ DBUG_ENTER("mysql_show_create_user");
+
+ if (get_show_user(thd, lex_user, &username, &hostname, NULL))
+ DBUG_RETURN(TRUE);
+
+ List<Item> field_list;
+ strxmov(buff, "CREATE USER for ", username, "@", hostname, NullS);
+ Item_string *field = new (thd->mem_root) Item_string_ascii(thd, "", 0);
+ if (!field)
+ DBUG_RETURN(true); // Error given my my_alloc()
+
+ field->name= buff;
+ field->max_length= sizeof(buff);
+ field_list.push_back(field, thd->mem_root);
+ if (protocol->send_result_set_metadata(&field_list,
+ Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF))
+ DBUG_RETURN(true);
+
+ String result(buff, sizeof(buff), system_charset_info);
+ result.length(0);
+ mysql_rwlock_rdlock(&LOCK_grant);
+ mysql_mutex_lock(&acl_cache->lock);
+
+ acl_user= find_user_exact(hostname, username);
+
+ // User not found in the internal data structures.
+ if (!acl_user)
+ {
+ my_error(ER_PASSWORD_NO_MATCH, MYF(0));
+ error= true;
+ goto end;
+ }
+
+ result.append("CREATE USER '");
+ result.append(username);
+ result.append('\'');
+
+ add_user_parameters(&result, acl_user, false);
+
+ protocol->prepare_for_resend();
+ protocol->store(result.ptr(), result.length(), result.charset());
+ if (protocol->write())
+ {
+ error= true;
+ }
+ my_eof(thd);
+
+end:
+ mysql_rwlock_unlock(&LOCK_grant);
+ mysql_mutex_unlock(&acl_cache->lock);
+
+ DBUG_RETURN(error);
+}
+
+
static int show_grants_callback(ACL_USER_BASE *role, void *data)
{
THD *thd= (THD *)data;
@@ -7766,7 +8573,6 @@ static int show_grants_callback(ACL_USER_BASE *role, void *data)
return 0;
}
-
void mysql_show_grants_get_fields(THD *thd, List<Item> *fields,
const char *name)
{
@@ -7776,6 +8582,12 @@ void mysql_show_grants_get_fields(THD *thd, List<Item> *fields,
fields->push_back(field, thd->mem_root);
}
+/** checks privileges for SHOW GRANTS and SHOW CREATE USER
+
+ @note that in case of SHOW CREATE USER the parser guarantees
+ that a role can never happen here, so *rolename will never
+ be assigned to
+*/
bool get_show_user(THD *thd, LEX_USER *lex_user, const char **username,
const char **hostname, const char **rolename)
{
@@ -7836,9 +8648,7 @@ bool mysql_show_grants(THD *thd, LEX_USER *lex_user)
ACL_ROLE *acl_role= NULL;
char buff[1024];
Protocol *protocol= thd->protocol;
- const char *username= NULL;
- const char *hostname= NULL;
- const char *rolename= NULL;
+ const char *username= NULL, *hostname= NULL, *rolename= NULL;
DBUG_ENTER("mysql_show_grants");
if (!initialized)
@@ -8041,93 +8851,7 @@ static bool show_global_privileges(THD *thd, ACL_USER_BASE *acl_entry,
global.append('\'');
if (!handle_as_role)
- {
- ACL_USER *acl_user= (ACL_USER *)acl_entry;
-
- global.append (STRING_WITH_LEN("@'"));
- global.append(acl_user->host.hostname, acl_user->hostname_length,
- system_charset_info);
- global.append ('\'');
-
- if (acl_user->plugin.str == native_password_plugin_name.str ||
- acl_user->plugin.str == old_password_plugin_name.str)
- {
- if (acl_user->auth_string.length)
- {
- DBUG_ASSERT(acl_user->salt_len);
- global.append(STRING_WITH_LEN(" IDENTIFIED BY PASSWORD '"));
- global.append(acl_user->auth_string.str, acl_user->auth_string.length);
- global.append('\'');
- }
- }
- else
- {
- global.append(STRING_WITH_LEN(" IDENTIFIED VIA "));
- global.append(acl_user->plugin.str, acl_user->plugin.length);
- if (acl_user->auth_string.length)
- {
- global.append(STRING_WITH_LEN(" USING '"));
- global.append(acl_user->auth_string.str, acl_user->auth_string.length);
- global.append('\'');
- }
- }
- /* "show grants" SSL related stuff */
- if (acl_user->ssl_type == SSL_TYPE_ANY)
- global.append(STRING_WITH_LEN(" REQUIRE SSL"));
- else if (acl_user->ssl_type == SSL_TYPE_X509)
- global.append(STRING_WITH_LEN(" REQUIRE X509"));
- else if (acl_user->ssl_type == SSL_TYPE_SPECIFIED)
- {
- int ssl_options = 0;
- global.append(STRING_WITH_LEN(" REQUIRE "));
- if (acl_user->x509_issuer)
- {
- ssl_options++;
- global.append(STRING_WITH_LEN("ISSUER \'"));
- global.append(acl_user->x509_issuer,strlen(acl_user->x509_issuer));
- global.append('\'');
- }
- if (acl_user->x509_subject)
- {
- if (ssl_options++)
- global.append(' ');
- global.append(STRING_WITH_LEN("SUBJECT \'"));
- global.append(acl_user->x509_subject,strlen(acl_user->x509_subject),
- system_charset_info);
- global.append('\'');
- }
- if (acl_user->ssl_cipher)
- {
- if (ssl_options++)
- global.append(' ');
- global.append(STRING_WITH_LEN("CIPHER '"));
- global.append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher),
- system_charset_info);
- global.append('\'');
- }
- }
- if ((want_access & GRANT_ACL) ||
- (acl_user->user_resource.questions ||
- acl_user->user_resource.updates ||
- acl_user->user_resource.conn_per_hour ||
- acl_user->user_resource.user_conn ||
- acl_user->user_resource.max_statement_time != 0.0))
- {
- global.append(STRING_WITH_LEN(" WITH"));
- if (want_access & GRANT_ACL)
- global.append(STRING_WITH_LEN(" GRANT OPTION"));
- add_user_option(&global, acl_user->user_resource.questions,
- "MAX_QUERIES_PER_HOUR", false);
- add_user_option(&global, acl_user->user_resource.updates,
- "MAX_UPDATES_PER_HOUR", false);
- add_user_option(&global, acl_user->user_resource.conn_per_hour,
- "MAX_CONNECTIONS_PER_HOUR", false);
- add_user_option(&global, acl_user->user_resource.user_conn,
- "MAX_USER_CONNECTIONS", true);
- add_user_option(&global, acl_user->user_resource.max_statement_time,
- "MAX_STATEMENT_TIME");
- }
- }
+ add_user_parameters(&global, (ACL_USER *)acl_entry, (want_access & GRANT_ACL));
protocol->prepare_for_resend();
protocol->store(global.ptr(),global.length(),global.charset());
@@ -8513,73 +9237,6 @@ static int check_role_is_granted_callback(ACL_USER_BASE *grantee, void *data)
}
/*
- Initialize a TABLE_LIST array and open grant tables
-
- All tables will be opened with the same lock type, either read or write.
-
- @retval 1 replication filters matched. Abort the operation, but return OK (!)
- @retval 0 tables were opened successfully
- @retval -1 error, tables could not be opened
-*/
-
-static int open_grant_tables(THD *thd, TABLE_LIST *tables,
- enum thr_lock_type lock_type, int tables_to_open)
-{
- DBUG_ENTER("open_grant_tables");
-
- /*
- We can read privilege tables even when !initialized.
- This can be acl_load() - server startup or FLUSH PRIVILEGES
- */
- if (lock_type >= TL_WRITE_ALLOW_WRITE && !initialized)
- {
- my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
- DBUG_RETURN(-1);
- }
-
- int prev= -1;
- for (int cur=TABLES_MAX-1, mask= 1 << cur; mask; cur--, mask >>= 1)
- {
- if ((tables_to_open & mask) == 0)
- {
- tables[cur].table= NULL;
- continue;
- }
- tables[cur].init_one_table(C_STRING_WITH_LEN("mysql"),
- acl_table_names[cur].str,
- acl_table_names[cur].length,
- acl_table_names[cur].str, lock_type);
- tables[cur].open_type= OT_BASE_ONLY;
- if (lock_type >= TL_WRITE_ALLOW_WRITE)
- tables[cur].updating= 1;
- if (cur >= FIRST_OPTIONAL_TABLE)
- tables[cur].open_strategy= TABLE_LIST::OPEN_IF_EXISTS;
- if (prev != -1)
- tables[cur].next_local= tables[cur].next_global= & tables[prev];
- prev= cur;
- }
-
-#ifdef HAVE_REPLICATION
- if (lock_type >= TL_WRITE_ALLOW_WRITE && thd->slave_thread && !thd->spcont)
- {
- /*
- GRANT and REVOKE are applied the slave in/exclusion rules as they are
- some kind of updates to the mysql.% tables.
- */
- Rpl_filter *rpl_filter= thd->system_thread_info.rpl_sql_info->rpl_filter;
- if (rpl_filter->is_on() && !rpl_filter->tables_ok(0, tables))
- DBUG_RETURN(1);
- }
-#endif
-
- if (open_and_lock_tables(thd, tables + prev, FALSE,
- MYSQL_LOCK_IGNORE_TIMEOUT))
- DBUG_RETURN(-1);
-
- DBUG_RETURN(0);
-}
-
-/*
Modify a privilege table.
SYNOPSIS
@@ -8718,8 +9375,8 @@ static int handle_roles_mappings_table(TABLE *table, bool drop,
SYNOPSIS
handle_grant_table()
- tables The array with the four open tables.
- table_no The number of the table to handle (0..4).
+ grant_table An open grant table handle.
+ which_table Which grant table to handle.
drop If user_from is to be dropped.
user_from The the user to be searched/dropped/renamed.
user_to The new name for the user if to be renamed,
@@ -8737,18 +9394,21 @@ static int handle_roles_mappings_table(TABLE *table, bool drop,
> 0 At least one record matched.
0 OK, but no record matched.
< 0 Error.
+
+ TODO(cvicentiu) refactor handle_grant_table to use
+ Grant_table_base instead of TABLE directly.
*/
-static int handle_grant_table(THD *thd, TABLE_LIST *tables,
- enum enum_acl_tables table_no, bool drop,
+static int handle_grant_table(THD *thd, const Grant_table_base& grant_table,
+ enum enum_acl_tables which_table, bool drop,
LEX_USER *user_from, LEX_USER *user_to)
{
int result= 0;
int error;
- TABLE *table= tables[table_no].table;
+ TABLE *table= grant_table.table();
Field *host_field= table->field[0];
- Field *user_field= table->field[table_no == USER_TABLE ||
- table_no == PROXIES_PRIV_TABLE ? 1 : 2];
+ Field *user_field= table->field[which_table == USER_TABLE ||
+ which_table == PROXIES_PRIV_TABLE ? 1 : 2];
const char *host_str= user_from->host.str;
const char *user_str= user_from->user.str;
const char *host;
@@ -8757,14 +9417,14 @@ static int handle_grant_table(THD *thd, TABLE_LIST *tables,
uint key_prefix_length;
DBUG_ENTER("handle_grant_table");
- if (table_no == ROLES_MAPPING_TABLE)
+ if (which_table == ROLES_MAPPING_TABLE)
{
result= handle_roles_mappings_table(table, drop, user_from, user_to);
DBUG_RETURN(result);
}
table->use_all_columns();
- if (table_no == USER_TABLE) // mysql.user table
+ if (which_table == USER_TABLE) // mysql.user table
{
/*
The 'user' table has an unique index on (host, user).
@@ -8789,7 +9449,8 @@ static int handle_grant_table(THD *thd, TABLE_LIST *tables,
HA_READ_KEY_EXACT);
if (!error && !*host_str)
{ // verify that we got a role or a user, as needed
- if (check_is_role(table) != user_from->is_role())
+ if (static_cast<const User_table&>(grant_table).check_is_role() !=
+ user_from->is_role())
error= HA_ERR_KEY_NOT_FOUND;
}
if (error)
@@ -8840,7 +9501,7 @@ static int handle_grant_table(THD *thd, TABLE_LIST *tables,
user= safe_str(get_field(thd->mem_root, user_field));
#ifdef EXTRA_DEBUG
- if (table_no != PROXIES_PRIV_TABLE)
+ if (which_table != PROXIES_PRIV_TABLE)
{
DBUG_PRINT("loop",("scan fields: '%s'@'%s' '%s' '%s' '%s'",
user, host,
@@ -9259,7 +9920,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
< 0 Error.
*/
-static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
+static int handle_grant_data(THD *thd, Grant_tables& tables, bool drop,
LEX_USER *user_from, LEX_USER *user_to)
{
int result= 0;
@@ -9282,7 +9943,8 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle db table. */
- if ((found= handle_grant_table(thd, tables, DB_TABLE, drop, user_from,
+ if ((found= handle_grant_table(thd, tables.db_table(),
+ DB_TABLE, drop, user_from,
user_to)) < 0)
{
/* Handle of table failed, don't touch the in-memory array. */
@@ -9303,7 +9965,8 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle stored routines table. */
- if ((found= handle_grant_table(thd, tables, PROCS_PRIV_TABLE, drop,
+ if ((found= handle_grant_table(thd, tables.procs_priv_table(),
+ PROCS_PRIV_TABLE, drop,
user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch in-memory array. */
@@ -9332,7 +9995,8 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle tables table. */
- if ((found= handle_grant_table(thd, tables, TABLES_PRIV_TABLE, drop,
+ if ((found= handle_grant_table(thd, tables.tables_priv_table(),
+ TABLES_PRIV_TABLE, drop,
user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch columns and in-memory array. */
@@ -9349,7 +10013,8 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle columns table. */
- if ((found= handle_grant_table(thd, tables, COLUMNS_PRIV_TABLE, drop,
+ if ((found= handle_grant_table(thd, tables.columns_priv_table(),
+ COLUMNS_PRIV_TABLE, drop,
user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch the in-memory array. */
@@ -9367,9 +10032,10 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle proxies_priv table. */
- if (tables[PROXIES_PRIV_TABLE].table)
+ if (tables.proxies_priv_table().table_exists())
{
- if ((found= handle_grant_table(thd, tables, PROXIES_PRIV_TABLE, drop,
+ if ((found= handle_grant_table(thd, tables.proxies_priv_table(),
+ PROXIES_PRIV_TABLE, drop,
user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch the in-memory array. */
@@ -9387,9 +10053,10 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle roles_mapping table. */
- if (tables[ROLES_MAPPING_TABLE].table)
+ if (tables.roles_mapping_table().table_exists())
{
- if ((found= handle_grant_table(thd, tables, ROLES_MAPPING_TABLE, drop,
+ if ((found= handle_grant_table(thd, tables.roles_mapping_table(),
+ ROLES_MAPPING_TABLE, drop,
user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch the in-memory array. */
@@ -9407,8 +10074,8 @@ static int handle_grant_data(THD *thd, TABLE_LIST *tables, bool drop,
}
/* Handle user table. */
- if ((found= handle_grant_table(thd, tables, USER_TABLE, drop, user_from,
- user_to)) < 0)
+ if ((found= handle_grant_table(thd, tables.user_table(), USER_TABLE,
+ drop, user_from, user_to)) < 0)
{
/* Handle of table failed, don't touch the in-memory array. */
result= -1;
@@ -9447,7 +10114,6 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
String wrong_users;
LEX_USER *user_name;
List_iterator <LEX_USER> user_list(list);
- TABLE_LIST tables[TABLES_MAX];
bool binlog= false;
bool some_users_dropped= false;
DBUG_ENTER("mysql_create_user");
@@ -9457,10 +10123,11 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
DBUG_RETURN(TRUE);
/* CREATE USER may be skipped on replication client. */
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_db |
+ Table_tables_priv | Table_columns_priv |
+ Table_procs_priv | Table_proxies_priv |
+ Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -9543,7 +10210,7 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
}
}
- if (replace_user_table(thd, tables[USER_TABLE].table, *user_name, 0, 0, 1, 0))
+ if (replace_user_table(thd, tables.user_table(), *user_name, 0, 0, 1, 0))
{
append_user(thd, &wrong_users, user_name);
result= TRUE;
@@ -9565,7 +10232,9 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
if (grantee)
add_role_user_mapping(grantee, role);
- if (replace_roles_mapping_table(tables[ROLES_MAPPING_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_roles_mapping_table to use
+ Roles_mapping_table instead of TABLE directly. */
+ if (replace_roles_mapping_table(tables.roles_mapping_table().table(),
&thd->lex->definer->user,
&thd->lex->definer->host,
&user_name->user, true,
@@ -9625,17 +10294,17 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
String wrong_users;
LEX_USER *user_name, *tmp_user_name;
List_iterator <LEX_USER> user_list(list);
- TABLE_LIST tables[TABLES_MAX];
bool binlog= false;
- ulonglong old_sql_mode= thd->variables.sql_mode;
+ sql_mode_t old_sql_mode= thd->variables.sql_mode;
DBUG_ENTER("mysql_drop_user");
DBUG_PRINT("entry", ("Handle as %s", handle_as_role ? "role" : "user"));
/* DROP USER may be skipped on replication client. */
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_db |
+ Table_tables_priv | Table_columns_priv |
+ Table_procs_priv | Table_proxies_priv |
+ Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
@@ -9737,15 +10406,15 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
LEX_USER *user_from, *tmp_user_from;
LEX_USER *user_to, *tmp_user_to;
List_iterator <LEX_USER> user_list(list);
- TABLE_LIST tables[TABLES_MAX];
bool some_users_renamed= FALSE;
DBUG_ENTER("mysql_rename_user");
/* RENAME USER may be skipped on replication client. */
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_db |
+ Table_tables_priv | Table_columns_priv |
+ Table_procs_priv | Table_proxies_priv |
+ Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -9807,6 +10476,80 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
DBUG_RETURN(result);
}
+/*
+ Alter a user's connection and resource settings.
+
+ SYNOPSIS
+ mysql_alter_user()
+ thd The current thread.
+ list The users to alter.
+
+ RETURN
+ > 0 Error. Error message already sent.
+ 0 OK.
+*/
+int mysql_alter_user(THD* thd, List<LEX_USER> &users_list)
+{
+ DBUG_ENTER("mysql_alter_user");
+ int result= 0;
+ String wrong_users;
+ bool some_users_altered= false;
+
+ /* The only table we're altering is the user table. */
+ Grant_tables tables(Table_user, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
+ DBUG_RETURN(result != 1);
+
+ /* Lock ACL data structures until we finish altering all users. */
+ mysql_rwlock_wrlock(&LOCK_grant);
+ mysql_mutex_lock(&acl_cache->lock);
+
+ LEX_USER *tmp_lex_user;
+ List_iterator<LEX_USER> users_list_iterator(users_list);
+ while ((tmp_lex_user= users_list_iterator++))
+ {
+ LEX_USER* lex_user= get_current_user(thd, tmp_lex_user, false);
+ if (!lex_user ||
+ fix_lex_user(thd, lex_user) ||
+ replace_user_table(thd, tables.user_table(), *lex_user, 0,
+ false, false, true))
+ {
+ thd->clear_error();
+ append_user(thd, &wrong_users, tmp_lex_user);
+ result= TRUE;
+ continue;
+ }
+ some_users_altered= true;
+ }
+
+ /* Unlock ACL data structures. */
+ mysql_mutex_unlock(&acl_cache->lock);
+ mysql_rwlock_unlock(&LOCK_grant);
+
+ if (result)
+ {
+ /* 'if exists' flag leads to warnings instead of errors. */
+ if (thd->lex->create_info.if_exists())
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_CANNOT_USER,
+ ER_THD(thd, ER_CANNOT_USER),
+ "ALTER USER", wrong_users.c_ptr_safe());
+ result= FALSE;
+ }
+ else
+ {
+ my_error(ER_CANNOT_USER, MYF(0),
+ "ALTER USER",
+ wrong_users.c_ptr_safe());
+ }
+ }
+
+ if (some_users_altered)
+ result|= write_bin_log(thd, FALSE, thd->query(),
+ thd->query_length());
+ DBUG_RETURN(result);
+}
/*
Revoke all privileges from a list of users.
@@ -9827,13 +10570,13 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
uint counter, revoked, is_proc;
int result;
ACL_DB *acl_db;
- TABLE_LIST tables[TABLES_MAX];
DBUG_ENTER("mysql_revoke_all");
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_db |
+ Table_tables_priv | Table_columns_priv |
+ Table_procs_priv | Table_proxies_priv |
+ Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -9859,7 +10602,7 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
continue;
}
- if (replace_user_table(thd, tables[USER_TABLE].table, *lex_user,
+ if (replace_user_table(thd, tables.user_table(), *lex_user,
~(ulong)0, 1, 0, 0))
{
result= -1;
@@ -9886,8 +10629,10 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (!strcmp(lex_user->user.str, user) &&
!strcmp(lex_user->host.str, host))
{
- if (!replace_db_table(tables[DB_TABLE].table, acl_db->db, *lex_user,
- ~(ulong)0, 1))
+ /* TODO(cvicentiu) refactor replace_db_table to use
+ Db_table instead of TABLE directly. */
+ if (!replace_db_table(tables.db_table().table(), acl_db->db, *lex_user,
+ ~(ulong)0, 1))
{
/*
Don't increment counter as replace_db_table deleted the
@@ -9916,9 +10661,11 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (!strcmp(lex_user->user.str,user) &&
!strcmp(lex_user->host.str, host))
{
+ /* TODO(cvicentiu) refactor replace_db_table to use
+ Db_table instead of TABLE directly. */
if (replace_table_table(thd, grant_table,
- tables[TABLES_PRIV_TABLE].table,
- *lex_user, grant_table->db,
+ tables.tables_priv_table().table(),
+ *lex_user, grant_table->db,
grant_table->tname, ~(ulong)0, 0, 1))
{
result= -1;
@@ -9931,8 +10678,10 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
continue;
}
List<LEX_COLUMN> columns;
+ /* TODO(cvicentiu) refactor replace_db_table to use
+ Db_table instead of TABLE directly. */
if (!replace_column_table(grant_table,
- tables[COLUMNS_PRIV_TABLE].table,
+ tables.columns_priv_table().table(),
*lex_user, columns, grant_table->db,
grant_table->tname, ~(ulong)0, 1))
{
@@ -9956,20 +10705,21 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
user= safe_str(grant_proc->user);
host= safe_str(grant_proc->host.hostname);
- if (!strcmp(lex_user->user.str,user) &&
+ if (!strcmp(lex_user->user.str,user) &&
!strcmp(lex_user->host.str, host))
- {
- if (replace_routine_table(thd, grant_proc,
- tables[PROCS_PRIV_TABLE].table, *lex_user,
+ {
+ if (replace_routine_table(thd, grant_proc,
+ tables.procs_priv_table().table(),
+ *lex_user,
grant_proc->db, grant_proc->tname,
is_proc, ~(ulong)0, 1) == 0)
- {
- revoked= 1;
- continue;
- }
- result= -1; // Something went wrong
- }
- counter++;
+ {
+ revoked= 1;
+ continue;
+ }
+ result= -1; // Something went wrong
+ }
+ counter++;
}
} while (revoked);
@@ -9995,7 +10745,9 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
ROLE_GRANT_PAIR *pair = find_role_grant_pair(&lex_user->user,
&lex_user->host,
&role_grant->user);
- if (replace_roles_mapping_table(tables[ROLES_MAPPING_TABLE].table,
+ /* TODO(cvicentiu) refactor replace_roles_mapping_table to use
+ Roles_mapping_table instead of TABLE directly. */
+ if (replace_roles_mapping_table(tables.roles_mapping_table().table(),
&lex_user->user, &lex_user->host,
&role_grant->user, false, pair, true))
{
@@ -10065,7 +10817,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
@@ -10080,12 +10832,12 @@ Silence_routine_definer_errors::handle_condition(
THD *thd,
uint sql_errno,
const char*,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
- if (level == Sql_condition::WARN_LEVEL_ERROR)
+ if (*level == Sql_condition::WARN_LEVEL_ERROR)
{
switch (sql_errno)
{
@@ -10125,15 +10877,15 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
{
uint counter, revoked;
int result;
- TABLE_LIST tables[TABLES_MAX];
HASH *hash= is_proc ? &proc_priv_hash : &func_priv_hash;
Silence_routine_definer_errors error_handler;
DBUG_ENTER("sp_revoke_privileges");
- if ((result= open_grant_tables(thd, tables, TL_WRITE, Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping)))
+ Grant_tables tables(Table_user | Table_db |
+ Table_tables_priv | Table_columns_priv |
+ Table_procs_priv | Table_proxies_priv |
+ Table_roles_mapping, TL_WRITE);
+ if ((result= tables.open_and_lock(thd)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -10158,9 +10910,9 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
lex_user.user.length= strlen(grant_proc->user);
lex_user.host.str= safe_str(grant_proc->host.hostname);
lex_user.host.length= strlen(lex_user.host.str);
- if (replace_routine_table(thd, grant_proc,
- tables[PROCS_PRIV_TABLE].table, lex_user,
- grant_proc->db, grant_proc->tname,
+ if (replace_routine_table(thd, grant_proc,
+ tables.procs_priv_table().table(), lex_user,
+ grant_proc->db, grant_proc->tname,
is_proc, ~(ulong)0, 1) == 0)
{
revoked= 1;
@@ -10623,7 +11375,7 @@ int fill_schema_applicable_roles(THD *thd, TABLE_LIST *tables, COND *cond)
int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr)
{
- reg3 int flag;
+ int flag;
DBUG_ENTER("wild_case_compare");
DBUG_PRINT("enter",("str: '%s' wildstr: '%s'",str,wildstr));
while (*wildstr)
@@ -11404,7 +12156,13 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
data_len= SCRAMBLE_LENGTH;
}
- end= strxnmov(end, SERVER_VERSION_LENGTH, RPL_VERSION_HACK, server_version, NullS) + 1;
+ /* When server version is specified in config file, don't include
+ the replication hack prefix. */
+ if (using_custom_server_version)
+ end= strnmov(end, server_version, SERVER_VERSION_LENGTH) + 1;
+ else
+ end= strxnmov(end, SERVER_VERSION_LENGTH, RPL_VERSION_HACK, server_version, NullS) + 1;
+
int4store((uchar*) end, mpvio->auth_info.thd->thread_id);
end+= 4;
@@ -11424,7 +12182,8 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
int2store(end+5, thd->client_capabilities >> 16);
end[7]= data_len;
DBUG_EXECUTE_IF("poison_srv_handshake_scramble_len", end[7]= -100;);
- bzero(end + 8, 10);
+ bzero(end + 8, 6);
+ int4store(end + 14, thd->client_capabilities >> 32);
end+= 18;
/* write scramble tail */
end= (char*) memcpy(end, data + SCRAMBLE_LENGTH_323,
@@ -11503,7 +12262,7 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio,
const char *client_auth_plugin=
((st_mysql_auth *) (plugin_decl(mpvio->plugin)->info))->client_auth_plugin;
- DBUG_EXECUTE_IF("auth_disconnect", { vio_close(net->vio); DBUG_RETURN(1); });
+ DBUG_EXECUTE_IF("auth_disconnect", { DBUG_RETURN(1); });
DBUG_ASSERT(client_auth_plugin);
/*
@@ -11637,7 +12396,7 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
static bool
read_client_connect_attrs(char **ptr, char *end, CHARSET_INFO *from_cs)
{
- size_t length;
+ ulonglong length;
char *ptr_save= *ptr;
/* not enough bytes to hold the length */
@@ -11659,10 +12418,10 @@ read_client_connect_attrs(char **ptr, char *end, CHARSET_INFO *from_cs)
return true;
#ifdef HAVE_PSI_THREAD_INTERFACE
- if (PSI_THREAD_CALL(set_thread_connect_attrs)(*ptr, length, from_cs) &&
+ if (PSI_THREAD_CALL(set_thread_connect_attrs)(*ptr, (size_t)length, from_cs) &&
current_thd->variables.log_warnings)
- sql_print_warning("Connection attributes of length %lu were truncated",
- (unsigned long) length);
+ sql_print_warning("Connection attributes of length %llu were truncated",
+ length);
#endif
return false;
}
@@ -11680,7 +12439,7 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
char *end= user + packet_length;
/* Safe because there is always a trailing \0 at the end of the packet */
char *passwd= strend(user) + 1;
- uint user_len= passwd - user - 1;
+ uint user_len= (uint)(passwd - user - 1);
char *db= passwd;
char db_buff[SAFE_NAME_LEN + 1]; // buffer to store db in utf8
char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8
@@ -11727,7 +12486,6 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
{
if (thd_init_client_charset(thd, uint2korr(next_field)))
DBUG_RETURN(1);
- thd->update_charset();
next_field+= 2;
}
@@ -11804,7 +12562,7 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
{
my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR),
MYF(0));
- DBUG_RETURN(packet_error);
+ DBUG_RETURN(1);
}
DBUG_PRINT("info", ("client_plugin=%s, restart", client_plugin));
@@ -11841,18 +12599,27 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
*/
DBUG_ASSERT(net->read_pos[pkt_len] == 0);
- ulong client_capabilities= uint2korr(net->read_pos);
+ ulonglong client_capabilities= uint2korr(net->read_pos);
+ compile_time_assert(sizeof(client_capabilities) >= 8);
if (client_capabilities & CLIENT_PROTOCOL_41)
{
- if (pkt_len < 4)
+ if (pkt_len < 32)
return packet_error;
client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16;
+ if (!(client_capabilities & CLIENT_MYSQL))
+ {
+ // it is client with mariadb extensions
+ ulonglong ext_client_capabilities=
+ (((ulonglong)uint4korr(net->read_pos + 28)) << 32);
+ client_capabilities|= ext_client_capabilities;
+ }
}
/* Disable those bits which are not supported by the client. */
+ compile_time_assert(sizeof(thd->client_capabilities) >= 8);
thd->client_capabilities&= client_capabilities;
- DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities));
+ DBUG_PRINT("info", ("client capabilities: %llu", thd->client_capabilities));
if (thd->client_capabilities & CLIENT_SSL)
{
unsigned long errptr __attribute__((unused));
@@ -11880,13 +12647,10 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
if (client_capabilities & CLIENT_PROTOCOL_41)
{
- if (pkt_len < 32)
- return packet_error;
thd->max_client_packet_length= uint4korr(net->read_pos+4);
DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
if (thd_init_client_charset(thd, (uint) net->read_pos[8]))
return packet_error;
- thd->update_charset();
end= (char*) net->read_pos+32;
}
else
@@ -11914,7 +12678,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
char *user= end;
char *passwd= strend(user)+1;
- uint user_len= passwd - user - 1, db_len;
+ uint user_len= (uint)(passwd - user - 1), db_len;
char *db= passwd;
char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8
uint dummy_errors;
@@ -11929,15 +12693,22 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
Cast *passwd to an unsigned char, so that it doesn't extend the sign for
*passwd > 127 and become 2**32-127+ after casting to uint.
*/
- uint passwd_len;
+ ulonglong len;
+ size_t passwd_len;
+
if (!(thd->client_capabilities & CLIENT_SECURE_CONNECTION))
- passwd_len= strlen(passwd);
+ len= strlen(passwd);
else if (!(thd->client_capabilities & CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA))
- passwd_len= (uchar)(*passwd++);
+ len= (uchar)(*passwd++);
else
- passwd_len= safe_net_field_length_ll((uchar**)&passwd,
+ {
+ len= safe_net_field_length_ll((uchar**)&passwd,
net->read_pos + pkt_len - (uchar*)passwd);
-
+ if (len > pkt_len)
+ return packet_error;
+ }
+
+ passwd_len= (size_t)len;
db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ?
db + passwd_len + 1 : 0;
@@ -11973,14 +12744,9 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
mostly for backward compatibility (to truncate long usernames, as
old 5.1 did)
*/
- {
- CHARSET_INFO *cs= system_charset_info;
- int err;
-
- user_len= (uint) cs->cset->well_formed_len(cs, user, user + user_len,
- username_char_length, &err);
- user[user_len]= '\0';
- }
+ user_len= Well_formed_prefix(system_charset_info, user, user_len,
+ username_char_length).length();
+ user[user_len]= '\0';
Security_context *sctx= thd->security_ctx;
@@ -12661,7 +13427,7 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
}
DBUG_PRINT("info",
- ("Capabilities: %lu packet_length: %ld Host: '%s' "
+ ("Capabilities: %llu packet_length: %ld Host: '%s' "
"Login user: '%s' Priv_user: '%s' Using password: %s "
"Access: %lu db: '%s'",
thd->client_capabilities, thd->max_client_packet_length,
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index fc3fcdc534a..a4182a4d300 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -2,6 +2,7 @@
#define SQL_ACL_INCLUDED
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -20,35 +21,35 @@
#include "violite.h" /* SSL_type */
#include "sql_class.h" /* LEX_COLUMN */
-#define SELECT_ACL (1L << 0)
-#define INSERT_ACL (1L << 1)
-#define UPDATE_ACL (1L << 2)
-#define DELETE_ACL (1L << 3)
-#define CREATE_ACL (1L << 4)
-#define DROP_ACL (1L << 5)
-#define RELOAD_ACL (1L << 6)
-#define SHUTDOWN_ACL (1L << 7)
-#define PROCESS_ACL (1L << 8)
-#define FILE_ACL (1L << 9)
-#define GRANT_ACL (1L << 10)
-#define REFERENCES_ACL (1L << 11)
-#define INDEX_ACL (1L << 12)
-#define ALTER_ACL (1L << 13)
-#define SHOW_DB_ACL (1L << 14)
-#define SUPER_ACL (1L << 15)
-#define CREATE_TMP_ACL (1L << 16)
-#define LOCK_TABLES_ACL (1L << 17)
-#define EXECUTE_ACL (1L << 18)
-#define REPL_SLAVE_ACL (1L << 19)
-#define REPL_CLIENT_ACL (1L << 20)
-#define CREATE_VIEW_ACL (1L << 21)
-#define SHOW_VIEW_ACL (1L << 22)
-#define CREATE_PROC_ACL (1L << 23)
-#define ALTER_PROC_ACL (1L << 24)
-#define CREATE_USER_ACL (1L << 25)
-#define EVENT_ACL (1L << 26)
-#define TRIGGER_ACL (1L << 27)
-#define CREATE_TABLESPACE_ACL (1L << 28)
+#define SELECT_ACL (1UL << 0)
+#define INSERT_ACL (1UL << 1)
+#define UPDATE_ACL (1UL << 2)
+#define DELETE_ACL (1UL << 3)
+#define CREATE_ACL (1UL << 4)
+#define DROP_ACL (1UL << 5)
+#define RELOAD_ACL (1UL << 6)
+#define SHUTDOWN_ACL (1UL << 7)
+#define PROCESS_ACL (1UL << 8)
+#define FILE_ACL (1UL << 9)
+#define GRANT_ACL (1UL << 10)
+#define REFERENCES_ACL (1UL << 11)
+#define INDEX_ACL (1UL << 12)
+#define ALTER_ACL (1UL << 13)
+#define SHOW_DB_ACL (1UL << 14)
+#define SUPER_ACL (1UL << 15)
+#define CREATE_TMP_ACL (1UL << 16)
+#define LOCK_TABLES_ACL (1UL << 17)
+#define EXECUTE_ACL (1UL << 18)
+#define REPL_SLAVE_ACL (1UL << 19)
+#define REPL_CLIENT_ACL (1UL << 20)
+#define CREATE_VIEW_ACL (1UL << 21)
+#define SHOW_VIEW_ACL (1UL << 22)
+#define CREATE_PROC_ACL (1UL << 23)
+#define ALTER_PROC_ACL (1UL << 24)
+#define CREATE_USER_ACL (1UL << 25)
+#define EVENT_ACL (1UL << 26)
+#define TRIGGER_ACL (1UL << 27)
+#define CREATE_TABLESPACE_ACL (1UL << 28)
/*
don't forget to update
1. static struct show_privileges_st sys_privileges[]
@@ -57,7 +58,7 @@
4. acl_init() or whatever - to define behaviour for old privilege tables
5. sql_yacc.yy - for GRANT/REVOKE to work
*/
-#define NO_ACCESS (1L << 30)
+#define NO_ACCESS (1UL << 30)
#define DB_ACLS \
(UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \
@@ -189,8 +190,12 @@ extern LEX_STRING current_user_and_current_role;
static inline int access_denied_error_code(int passwd_used)
{
+#ifdef mysqld_error_find_printf_error_used
+ return 0;
+#else
return passwd_used == 2 ? ER_ACCESS_DENIED_NO_PASSWORD_ERROR
: ER_ACCESS_DENIED_ERROR;
+#endif
}
@@ -246,6 +251,7 @@ bool get_show_user(THD *thd, LEX_USER *lex_user, const char **username,
void mysql_show_grants_get_fields(THD *thd, List<Item> *fields,
const char *name);
bool mysql_show_grants(THD *thd, LEX_USER *user);
+bool mysql_show_create_user(THD *thd, LEX_USER *user);
int fill_schema_enabled_roles(THD *thd, TABLE_LIST *tables, COND *cond);
int fill_schema_applicable_roles(THD *thd, TABLE_LIST *tables, COND *cond);
void get_privilege_desc(char *to, uint max_length, ulong access);
@@ -253,6 +259,7 @@ void get_mqh(const char *user, const char *host, USER_CONN *uc);
bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role);
bool mysql_drop_user(THD *thd, List <LEX_USER> &list, bool handle_as_role);
bool mysql_rename_user(THD *thd, List <LEX_USER> &list);
+int mysql_alter_user(THD *thd, List <LEX_USER> &list);
bool mysql_revoke_all(THD *thd, List <LEX_USER> &list);
void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant,
const char *db, const char *table);
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 0ec6719037c..76bb65fba30 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -54,7 +54,7 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
DEBUG_SYNC(thd, "ha_admin_try_alter");
tmp_disable_binlog(thd); // binlogging is done by caller if wanted
- result_code= (open_temporary_tables(thd, table_list) ||
+ result_code= (thd->open_temporary_tables(table_list) ||
mysql_recreate_table(thd, table_list, false));
reenable_binlog(thd);
/*
@@ -130,7 +130,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(0);
has_mdl_lock= TRUE;
- share= tdc_acquire_share_shortlived(thd, table_list, GTS_TABLE);
+ share= tdc_acquire_share(thd, table_list, GTS_TABLE);
if (share == NULL)
DBUG_RETURN(0); // Can't open frm file
@@ -162,7 +162,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
- Run a normal repair using the new index file and the old data file
*/
- if (table->s->frm_version != FRM_VER_TRUE_VARCHAR &&
+ if (table->s->frm_version < FRM_VER_TRUE_VARCHAR &&
table->s->varchar_fields)
{
error= send_check_errmsg(thd, table_list, "repair",
@@ -260,7 +260,10 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
end:
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
if (table == &tmp_table)
- closefrm(table, 1); // Free allocated memory
+ {
+ closefrm(table);
+ tdc_release_share(table->s);
+ }
/* In case of a temporary table there will be no metadata lock. */
if (error && has_mdl_lock)
thd->mdl_context.release_transactional_locks();
@@ -291,6 +294,127 @@ static inline bool table_not_corrupt_error(uint sql_errno)
sql_errno == ER_WRONG_OBJECT);
}
+#ifndef DBUG_OFF
+// It is counter for debugging fail on second call of open_only_one_table
+static int debug_fail_counter= 0;
+#endif
+
+static bool open_only_one_table(THD* thd, TABLE_LIST* table,
+ bool repair_table_use_frm,
+ bool is_view_operator_func)
+{
+ LEX *lex= thd->lex;
+ SELECT_LEX *select= &lex->select_lex;
+ TABLE_LIST *save_next_global, *save_next_local;
+ bool open_error;
+ save_next_global= table->next_global;
+ table->next_global= 0;
+ save_next_local= table->next_local;
+ table->next_local= 0;
+ select->table_list.first= table;
+ /*
+ Time zone tables and SP tables can be add to lex->query_tables list,
+ so it have to be prepared.
+ TODO: Investigate if we can put extra tables into argument instead of
+ using lex->query_tables
+ */
+ lex->query_tables= table;
+ lex->query_tables_last= &table->next_global;
+ lex->query_tables_own_last= 0;
+
+ DBUG_EXECUTE_IF("fail_2call_open_only_one_table", {
+ if (debug_fail_counter)
+ {
+ open_error= TRUE;
+ goto dbug_err;
+ }
+ else
+ debug_fail_counter++;
+ });
+
+ /*
+ CHECK TABLE command is allowed for views as well. Check on alter flags
+ to differentiate from ALTER TABLE...CHECK PARTITION on which view is not
+ allowed.
+ */
+ if (lex->alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION ||
+ !is_view_operator_func)
+ {
+ table->required_type=FRMTYPE_TABLE;
+ DBUG_ASSERT(!lex->only_view);
+ }
+ else if (lex->only_view)
+ {
+ table->required_type= FRMTYPE_VIEW;
+ }
+ else if (!lex->only_view && lex->sql_command == SQLCOM_REPAIR)
+ {
+ table->required_type= FRMTYPE_TABLE;
+ }
+
+ if (lex->sql_command == SQLCOM_CHECK ||
+ lex->sql_command == SQLCOM_REPAIR ||
+ lex->sql_command == SQLCOM_ANALYZE ||
+ lex->sql_command == SQLCOM_OPTIMIZE)
+ thd->prepare_derived_at_open= TRUE;
+ if (!thd->locked_tables_mode && repair_table_use_frm)
+ {
+ /*
+ If we're not under LOCK TABLES and we're executing REPAIR TABLE
+ USE_FRM, we need to ignore errors from open_and_lock_tables().
+ REPAIR TABLE USE_FRM is a heavy weapon used when a table is
+ critically damaged, so open_and_lock_tables() will most likely
+ report errors. Those errors are not interesting for the user
+ because it's already known that the table is badly damaged.
+ */
+
+ Diagnostics_area *da= thd->get_stmt_da();
+ Warning_info tmp_wi(thd->query_id, false, true);
+
+ da->push_warning_info(&tmp_wi);
+
+ open_error= (thd->open_temporary_tables(table) ||
+ open_and_lock_tables(thd, table, TRUE, 0));
+
+ da->pop_warning_info();
+ }
+ else
+ {
+ /*
+ It's assumed that even if it is REPAIR TABLE USE_FRM, the table
+ can be opened if we're under LOCK TABLES (otherwise LOCK TABLES
+ would fail). Thus, the only errors we could have from
+ open_and_lock_tables() are logical ones, like incorrect locking
+ mode. It does make sense for the user to see such errors.
+ */
+
+ open_error= (thd->open_temporary_tables(table) ||
+ open_and_lock_tables(thd, table, TRUE, 0));
+ }
+#ifndef DBUG_OFF
+dbug_err:
+#endif
+
+ thd->prepare_derived_at_open= FALSE;
+
+ /*
+ MERGE engine may adjust table->next_global chain, thus we have to
+ append save_next_global after merge children.
+ */
+ if (save_next_global)
+ {
+ TABLE_LIST *table_list_iterator= table;
+ while (table_list_iterator->next_global)
+ table_list_iterator= table_list_iterator->next_global;
+ table_list_iterator->next_global= save_next_global;
+ save_next_global->prev_global= &table_list_iterator->next_global;
+ }
+
+ table->next_local= save_next_local;
+
+ return open_error;
+}
+
/*
RETURN VALUES
@@ -313,7 +437,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
HA_CHECK_OPT *))
{
TABLE_LIST *table;
- SELECT_LEX *select= &thd->lex->select_lex;
List<Item> field_list;
Item *item;
Protocol *protocol= thd->protocol;
@@ -389,97 +512,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
/* open only one table from local list of command */
while (1)
{
- TABLE_LIST *save_next_global, *save_next_local;
- save_next_global= table->next_global;
- table->next_global= 0;
- save_next_local= table->next_local;
- table->next_local= 0;
- select->table_list.first= table;
- /*
- Time zone tables and SP tables can be add to lex->query_tables list,
- so it have to be prepared.
- TODO: Investigate if we can put extra tables into argument instead of
- using lex->query_tables
- */
- lex->query_tables= table;
- lex->query_tables_last= &table->next_global;
- lex->query_tables_own_last= 0;
-
- /*
- CHECK TABLE command is allowed for views as well. Check on alter flags
- to differentiate from ALTER TABLE...CHECK PARTITION on which view is
- not allowed.
- */
- if (lex->alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION ||
- view_operator_func == NULL)
- {
- table->required_type=FRMTYPE_TABLE;
- DBUG_ASSERT(!lex->only_view);
- }
- else if (lex->only_view)
- {
- table->required_type= FRMTYPE_VIEW;
- }
- else if (!lex->only_view && lex->sql_command == SQLCOM_REPAIR)
- {
- table->required_type= FRMTYPE_TABLE;
- }
-
- if (lex->sql_command == SQLCOM_CHECK ||
- lex->sql_command == SQLCOM_REPAIR ||
- lex->sql_command == SQLCOM_ANALYZE ||
- lex->sql_command == SQLCOM_OPTIMIZE)
- thd->prepare_derived_at_open= TRUE;
- if (!thd->locked_tables_mode && repair_table_use_frm)
- {
- /*
- If we're not under LOCK TABLES and we're executing REPAIR TABLE
- USE_FRM, we need to ignore errors from open_and_lock_tables().
- REPAIR TABLE USE_FRM is a heavy weapon used when a table is
- critically damaged, so open_and_lock_tables() will most likely
- report errors. Those errors are not interesting for the user
- because it's already known that the table is badly damaged.
- */
-
- Diagnostics_area *da= thd->get_stmt_da();
- Warning_info tmp_wi(thd->query_id, false, true);
-
- da->push_warning_info(&tmp_wi);
-
- open_error= (open_temporary_tables(thd, table) ||
- open_and_lock_tables(thd, table, TRUE, 0));
-
- da->pop_warning_info();
- }
- else
- {
- /*
- It's assumed that even if it is REPAIR TABLE USE_FRM, the table
- can be opened if we're under LOCK TABLES (otherwise LOCK TABLES
- would fail). Thus, the only errors we could have from
- open_and_lock_tables() are logical ones, like incorrect locking
- mode. It does make sense for the user to see such errors.
- */
-
- open_error= (open_temporary_tables(thd, table) ||
- open_and_lock_tables(thd, table, TRUE, 0));
- }
- thd->prepare_derived_at_open= FALSE;
-
- /*
- MERGE engine may adjust table->next_global chain, thus we have to
- append save_next_global after merge children.
- */
- if (save_next_global)
- {
- TABLE_LIST *table_list_iterator= table;
- while (table_list_iterator->next_global)
- table_list_iterator= table_list_iterator->next_global;
- table_list_iterator->next_global= save_next_global;
- save_next_global->prev_global= &table_list_iterator->next_global;
- }
-
- table->next_local= save_next_local;
+ open_error= open_only_one_table(thd, table,
+ repair_table_use_frm,
+ (view_operator_func != NULL));
thd->open_options&= ~extra_open_options;
/*
@@ -552,7 +587,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
}
#endif
- DBUG_PRINT("admin", ("table: 0x%lx", (long) table->table));
+ DBUG_PRINT("admin", ("table: %p", table->table));
if (prepare_func)
{
@@ -720,7 +755,6 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (operator_func == &handler::ha_analyze)
{
TABLE *tab= table->table;
- Field **field_ptr= tab->field;
if (lex->with_persistent_for_clause &&
tab->s->table_category != TABLE_CATEGORY_USER)
@@ -732,8 +766,69 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
(get_use_stat_tables_mode(thd) > NEVER ||
lex->with_persistent_for_clause));
- if (collect_eis)
+
+ if (!lex->index_list)
+ {
+ tab->keys_in_use_for_query.init(tab->s->keys);
+ }
+ else
{
+ int pos;
+ LEX_STRING *index_name;
+ List_iterator_fast<LEX_STRING> it(*lex->index_list);
+
+ tab->keys_in_use_for_query.clear_all();
+ while ((index_name= it++))
+ {
+ if (tab->s->keynames.type_names == 0 ||
+ (pos= find_type(&tab->s->keynames, index_name->str,
+ index_name->length, 1)) <= 0)
+ {
+ compl_result_code= result_code= HA_ADMIN_INVALID;
+ break;
+ }
+ tab->keys_in_use_for_query.set_bit(--pos);
+ }
+ }
+ }
+
+ if (result_code == HA_ADMIN_OK)
+ {
+ DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name));
+ THD_STAGE_INFO(thd, stage_executing);
+ result_code = (table->table->file->*operator_func)(thd, check_opt);
+ THD_STAGE_INFO(thd, stage_sending_data);
+ DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
+ }
+
+ if (compl_result_code == HA_ADMIN_OK && collect_eis)
+ {
+ /*
+ Here we close and reopen table in read mode because operation of
+ collecting statistics is long and it will be better do not block
+ the table completely.
+ InnoDB/XtraDB will allow read/write and MyISAM read/insert.
+ */
+ trans_commit_stmt(thd);
+ trans_commit(thd);
+ thd->open_options|= extra_open_options;
+ close_thread_tables(thd);
+ table->table= NULL;
+ thd->mdl_context.release_transactional_locks();
+ table->mdl_request.init(MDL_key::TABLE, table->db, table->table_name,
+ MDL_SHARED_NO_READ_WRITE, MDL_TRANSACTION);
+ table->mdl_request.set_type(MDL_SHARED_READ);
+
+ table->lock_type= TL_READ;
+ DBUG_ASSERT(view_operator_func == NULL);
+ open_error= open_only_one_table(thd, table,
+ repair_table_use_frm, FALSE);
+ thd->open_options&= ~extra_open_options;
+
+ if (!open_error)
+ {
+ TABLE *tab= table->table;
+ Field **field_ptr= tab->field;
if (!lex->column_list)
{
bitmap_clear_all(tab->read_set);
@@ -743,7 +838,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (type < MYSQL_TYPE_MEDIUM_BLOB ||
type > MYSQL_TYPE_BLOB)
bitmap_set_bit(tab->read_set, fields);
- else if (collect_eis)
+ else
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NO_EIS_FOR_FIELD,
ER_THD(thd, ER_NO_EIS_FOR_FIELD),
@@ -771,61 +866,23 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (type < MYSQL_TYPE_MEDIUM_BLOB ||
type > MYSQL_TYPE_BLOB)
bitmap_set_bit(tab->read_set, pos);
- else if (collect_eis)
+ else
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NO_EIS_FOR_FIELD,
ER_THD(thd, ER_NO_EIS_FOR_FIELD),
column_name->str);
}
- tab->file->column_bitmaps_signal();
+ tab->file->column_bitmaps_signal();
}
+ if (!(compl_result_code=
+ alloc_statistics_for_table(thd, table->table)) &&
+ !(compl_result_code=
+ collect_statistics_for_table(thd, table->table)))
+ compl_result_code= update_statistics_for_table(thd, table->table);
}
else
- {
- DBUG_ASSERT(!lex->column_list);
- }
-
- if (!lex->index_list)
- {
- tab->keys_in_use_for_query.init(tab->s->keys);
- }
- else
- {
- int pos;
- LEX_STRING *index_name;
- List_iterator_fast<LEX_STRING> it(*lex->index_list);
-
- tab->keys_in_use_for_query.clear_all();
- while ((index_name= it++))
- {
- if (tab->s->keynames.type_names == 0 ||
- (pos= find_type(&tab->s->keynames, index_name->str,
- index_name->length, 1)) <= 0)
- {
- compl_result_code= result_code= HA_ADMIN_INVALID;
- break;
- }
- tab->keys_in_use_for_query.set_bit(--pos);
- }
- }
- }
-
- if (result_code == HA_ADMIN_OK)
- {
- DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name));
- THD_STAGE_INFO(thd, stage_executing);
- result_code = (table->table->file->*operator_func)(thd, check_opt);
- THD_STAGE_INFO(thd, stage_sending_data);
- DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
- }
+ compl_result_code= HA_ADMIN_FAILED;
- if (compl_result_code == HA_ADMIN_OK && collect_eis)
- {
- if (!(compl_result_code=
- alloc_statistics_for_table(thd, table->table)) &&
- !(compl_result_code=
- collect_statistics_for_table(thd, table->table)))
- compl_result_code= update_statistics_for_table(thd, table->table);
if (compl_result_code)
result_code= HA_ADMIN_FAILED;
else
@@ -978,7 +1035,7 @@ send_result_message:
table->mdl_request.ticket= NULL;
DEBUG_SYNC(thd, "ha_admin_open_ltable");
table->mdl_request.set_type(MDL_SHARED_WRITE);
- if (!open_temporary_tables(thd, table) &&
+ if (!thd->open_temporary_tables(table) &&
(table->table= open_ltable(thd, table, lock_type, 0)))
{
uint save_flags;
diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc
index ddac271146e..f6db1847063 100644
--- a/sql/sql_alter.cc
+++ b/sql/sql_alter.cc
@@ -1,4 +1,5 @@
/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,7 +17,6 @@
#include "sql_parse.h" // check_access
#include "sql_table.h" // mysql_alter_table,
// mysql_exchange_partition
-#include "sql_base.h" // open_temporary_tables
#include "sql_alter.h"
#include "wsrep_mysqld.h"
@@ -25,6 +25,7 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root)
alter_list(rhs.alter_list, mem_root),
key_list(rhs.key_list, mem_root),
create_list(rhs.create_list, mem_root),
+ check_constraint_list(rhs.check_constraint_list, mem_root),
flags(rhs.flags),
keys_onoff(rhs.keys_onoff),
partition_names(rhs.partition_names, mem_root),
@@ -303,11 +304,9 @@ bool Sql_cmd_alter_table::execute(THD *thd)
create_info.data_file_name= create_info.index_file_name= NULL;
#ifdef WITH_WSREP
- TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl);
-
if (WSREP(thd) &&
(!thd->is_current_stmt_binlog_format_row() ||
- !find_temporary_table(thd, first_table)))
+ !thd->find_temporary_table(first_table)))
{
WSREP_TO_ISOLATION_BEGIN_ALTER(((lex->name.str) ? select_lex->db : NULL),
((lex->name.str) ? lex->name.str : NULL),
diff --git a/sql/sql_alter.h b/sql/sql_alter.h
index a4505f1d6c1..e33efc9476f 100644
--- a/sql/sql_alter.h
+++ b/sql/sql_alter.h
@@ -121,6 +121,9 @@ public:
// Set for ADD [COLUMN] FIRST | AFTER
static const uint ALTER_COLUMN_ORDER = 1L << 25;
+ static const uint ALTER_ADD_CHECK_CONSTRAINT = 1L << 27;
+ static const uint ALTER_DROP_CHECK_CONSTRAINT = 1L << 28;
+ static const uint ALTER_RENAME_COLUMN = 1L << 29;
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
@@ -169,6 +172,9 @@ public:
List<Key> key_list;
// List of columns, used by both CREATE and ALTER TABLE.
List<Create_field> create_list;
+
+ static const uint CHECK_CONSTRAINT_IF_NOT_EXISTS= 1;
+ List<Virtual_column_info> check_constraint_list;
// Type of ALTER TABLE operation.
uint flags;
// Enable or disable keys.
@@ -197,6 +203,7 @@ public:
alter_list.empty();
key_list.empty();
create_list.empty();
+ check_constraint_list.empty();
flags= 0;
keys_onoff= LEAVE_AS_IS;
num_parts= 0;
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 45f4f87f172..8b3e945e891 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -134,7 +134,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result,
}
if (!(pc->f_info=
- (field_info**)sql_alloc(sizeof(field_info*)*field_list.elements)))
+ (field_info**) thd->alloc(sizeof(field_info*) * field_list.elements)))
goto err;
pc->f_end = pc->f_info + field_list.elements;
pc->fields = field_list;
@@ -409,7 +409,7 @@ void field_real::add()
if (num == 0.0)
empty++;
- if ((decs = decimals()) == NOT_FIXED_DEC)
+ if ((decs = decimals()) >= FLOATING_POINT_DECIMALS)
{
length= sprintf(buff, "%g", num);
if (rint(num) != num)
@@ -892,7 +892,7 @@ void field_real::get_opt_type(String *answer,
if (!max_notzero_dec_len)
{
- int len= (int) max_length - ((item->decimals == NOT_FIXED_DEC) ?
+ int len= (int) max_length - ((item->decimals >= FLOATING_POINT_DECIMALS) ?
0 : (item->decimals + 1));
if (min_arg >= -128 && max_arg <= (min_arg >= 0 ? 255 : 127))
@@ -912,7 +912,7 @@ void field_real::get_opt_type(String *answer,
if (min_arg >= 0)
answer->append(STRING_WITH_LEN(" UNSIGNED"));
}
- else if (item->decimals == NOT_FIXED_DEC)
+ else if (item->decimals >= FLOATING_POINT_DECIMALS)
{
if (min_arg >= -FLT_MAX && max_arg <= FLT_MAX)
answer->append(STRING_WITH_LEN("FLOAT"));
diff --git a/sql/sql_analyze_stmt.cc b/sql/sql_analyze_stmt.cc
index 68299d024fd..8e67267f6a0 100644
--- a/sql/sql_analyze_stmt.cc
+++ b/sql/sql_analyze_stmt.cc
@@ -26,7 +26,11 @@
void Filesort_tracker::print_json_members(Json_writer *writer)
{
const char *varied_str= "(varied across executions)";
- writer->add_member("r_loops").add_ll(get_r_loops());
+
+ if (!get_r_loops())
+ writer->add_member("r_loops").add_null();
+ else
+ writer->add_member("r_loops").add_ll(get_r_loops());
if (get_r_loops() && time_tracker.timed)
{
@@ -36,22 +40,29 @@ void Filesort_tracker::print_json_members(Json_writer *writer)
if (r_limit != HA_POS_ERROR)
{
writer->add_member("r_limit");
- if (r_limit == 0)
+ if (!get_r_loops())
+ writer->add_null();
+ else if (r_limit == 0)
writer->add_str(varied_str);
else
writer->add_ll((longlong) rint(r_limit));
}
writer->add_member("r_used_priority_queue");
- if (r_used_pq == get_r_loops())
+ if (!get_r_loops())
+ writer->add_null();
+ else if (r_used_pq == get_r_loops())
writer->add_bool(true);
else if (r_used_pq == 0)
writer->add_bool(false);
else
writer->add_str(varied_str);
- writer->add_member("r_output_rows").add_ll((longlong) rint(r_output_rows /
- get_r_loops()));
+ if (!get_r_loops())
+ writer->add_member("r_output_rows").add_null();
+ else
+ writer->add_member("r_output_rows").add_ll((longlong) rint(r_output_rows /
+ get_r_loops()));
if (sort_passes)
{
@@ -69,75 +80,3 @@ void Filesort_tracker::print_json_members(Json_writer *writer)
}
}
-
-/*
- Report that we are doing a filesort.
- @return
- Tracker object to be used with filesort
-*/
-
-Filesort_tracker *Sort_and_group_tracker::report_sorting(THD *thd)
-{
- DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
-
- if (total_actions)
- {
- /* This is not the first execution. Check */
- if (qep_actions[cur_action] != EXPL_ACTION_FILESORT)
- {
- varied_executions= true;
- cur_action++;
- if (!dummy_fsort_tracker)
- dummy_fsort_tracker= new (thd->mem_root) Filesort_tracker(is_analyze);
- return dummy_fsort_tracker;
- }
- return qep_actions_data[cur_action++].filesort_tracker;
- }
-
- Filesort_tracker *fs_tracker= new(thd->mem_root)Filesort_tracker(is_analyze);
- qep_actions_data[cur_action].filesort_tracker= fs_tracker;
- qep_actions[cur_action++]= EXPL_ACTION_FILESORT;
-
- return fs_tracker;
-}
-
-
-void Sort_and_group_tracker::report_tmp_table(TABLE *tbl)
-{
- DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
- if (total_actions)
- {
- /* This is not the first execution. Check if the steps match. */
- // todo: should also check that tmp.table kinds are the same.
- if (qep_actions[cur_action] != EXPL_ACTION_TEMPTABLE)
- varied_executions= true;
- }
-
- if (!varied_executions)
- {
- qep_actions[cur_action]= EXPL_ACTION_TEMPTABLE;
- // qep_actions_data[cur_action]= ....
- }
-
- cur_action++;
-}
-
-
-void Sort_and_group_tracker::report_duplicate_removal()
-{
- DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
- if (total_actions)
- {
- /* This is not the first execution. Check if the steps match. */
- if (qep_actions[cur_action] != EXPL_ACTION_REMOVE_DUPS)
- varied_executions= true;
- }
-
- if (!varied_executions)
- {
- qep_actions[cur_action]= EXPL_ACTION_REMOVE_DUPS;
- }
-
- cur_action++;
-}
-
diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h
index 7d3d0853417..27fd7fb6d6a 100644
--- a/sql/sql_analyze_stmt.h
+++ b/sql/sql_analyze_stmt.h
@@ -284,174 +284,3 @@ private:
ulonglong sort_buffer_size;
};
-
-typedef enum
-{
- EXPL_NO_TMP_TABLE=0,
- EXPL_TMP_TABLE_BUFFER,
- EXPL_TMP_TABLE_GROUP,
- EXPL_TMP_TABLE_DISTINCT
-} enum_tmp_table_use;
-
-
-typedef enum
-{
- EXPL_ACTION_EOF, /* not-an-action */
- EXPL_ACTION_FILESORT,
- EXPL_ACTION_TEMPTABLE,
- EXPL_ACTION_REMOVE_DUPS,
-} enum_qep_action;
-
-
-/*
- This is to track how a JOIN object has resolved ORDER/GROUP BY/DISTINCT
-
- We are not tied to the query plan at all, because query plan does not have
- sufficient information. *A lot* of decisions about ordering/grouping are
- made at very late stages (in JOIN::exec, JOIN::init_execution, in
- create_sort_index and even in create_tmp_table).
-
- The idea is that operations that happen during select execution will report
- themselves. We have these operations:
- - Sorting with filesort()
- - Duplicate row removal (the one done by remove_duplicates()).
- - Use of temporary table to buffer the result.
-
- There is also "Selection" operation, done by do_select(). It reads rows,
- there are several distinct cases:
- 1. doing the join operation on the base tables
- 2. reading the temporary table
- 3. reading the filesort output
- it would be nice to build execution graph, e.g.
-
- Select(JOIN op) -> temp.table -> filesort -> Select(filesort result)
-
- the problem is that there is no way to tell what a do_select() call will do.
-
- Our solution is not to have explicit selection operations. We make these
- assumptions about the query plan:
- - Select(JOIN op) is the first operation in the query plan
- - Unless the first recorded operation is filesort(). filesort() is unable
- read result of a select, so when we find it first, the query plan is:
-
- filesort(first join table) -> Select(JOIN op) -> ...
-
- the other popular query plan is:
-
- Select (JOIN op) -> temp.table -> filesort() -> ...
-
-///TODO: handle repeated execution with subselects!
-*/
-
-class Sort_and_group_tracker : public Sql_alloc
-{
- enum { MAX_QEP_ACTIONS = 5 };
-
- /* Query actions in the order they were made. */
- enum_qep_action qep_actions[MAX_QEP_ACTIONS];
-
- /* Number for the next action */
- int cur_action;
-
- /*
- Non-zero means there was already an execution which had
- #total_actions actions
- */
- int total_actions;
-
- int get_n_actions()
- {
- return total_actions? total_actions: cur_action;
- }
-
- /*
- TRUE<=>there were executions which took different sort/buffer/de-duplicate
- routes. The counter values are not meaningful.
- */
- bool varied_executions;
-
- /* Details about query actions */
- union
- {
- Filesort_tracker *filesort_tracker;
- enum_tmp_table_use tmp_table;
- }
- qep_actions_data[MAX_QEP_ACTIONS];
-
- Filesort_tracker *dummy_fsort_tracker;
- bool is_analyze;
-public:
- Sort_and_group_tracker(bool is_analyze_arg) :
- cur_action(0), total_actions(0), varied_executions(false),
- dummy_fsort_tracker(NULL),
- is_analyze(is_analyze_arg)
- {}
-
- /*************** Reporting interface ***************/
- /* Report that join execution is started */
- void report_join_start()
- {
- if (!total_actions && cur_action != 0)
- {
- /* This is a second execution */
- total_actions= cur_action;
- }
- cur_action= 0;
- }
-
- /*
- Report that a temporary table is created. The next step is to write to the
- this tmp. table
- */
- void report_tmp_table(TABLE *tbl);
-
- /*
- Report that we are doing a filesort.
- @return
- Tracker object to be used with filesort
- */
- Filesort_tracker *report_sorting(THD *thd);
-
- /*
- Report that remove_duplicates() is invoked [on a temp. table].
- We don't collect any statistics on this operation, yet.
- */
- void report_duplicate_removal();
-
- friend class Iterator;
- /*************** Statistics retrieval interface ***************/
- bool had_varied_executions() { return varied_executions; }
-
- class Iterator
- {
- Sort_and_group_tracker *owner;
- int idx;
- public:
- Iterator(Sort_and_group_tracker *owner_arg) :
- owner(owner_arg), idx(owner_arg->get_n_actions() - 1)
- {}
-
- enum_qep_action get_next(Filesort_tracker **tracker/*,
- enum_tmp_table_use *tmp_table_use*/)
- {
- /* Walk back through the array... */
- if (idx < 0)
- return EXPL_ACTION_EOF;
- switch (owner->qep_actions[idx])
- {
- case EXPL_ACTION_FILESORT:
- *tracker= owner->qep_actions_data[idx].filesort_tracker;
- break;
- case EXPL_ACTION_TEMPTABLE:
- //*tmp_table_use= tmp_table_kind[tmp_table_idx++];
- break;
- default:
- break;
- }
- return owner->qep_actions[idx--];
- }
-
- bool is_last_element() { return idx == -1; }
- };
-};
-
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 3e75a9ea546..e1adc9b9785 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -85,6 +85,15 @@ public:
Element_type *array() const { return m_array; }
+ bool operator==(const Bounds_checked_array<Element_type>&rhs) const
+ {
+ return m_array == rhs.m_array && m_size == rhs.m_size;
+ }
+ bool operator!=(const Bounds_checked_array<Element_type>&rhs) const
+ {
+ return m_array != rhs.m_array || m_size != rhs.m_size;
+ }
+
private:
Element_type *m_array;
size_t m_size;
diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc
index 60a75cb06e7..8134adca13f 100644
--- a/sql/sql_audit.cc
+++ b/sql/sql_audit.cc
@@ -24,6 +24,7 @@ extern int finalize_audit_plugin(st_plugin_int *plugin);
struct st_mysql_event_generic
{
+ unsigned long event_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
unsigned int event_class;
const void *event;
};
@@ -32,8 +33,6 @@ unsigned long mysql_global_audit_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
static mysql_mutex_t LOCK_audit_mask;
-static void event_class_dispatch(THD *, unsigned int, const void *);
-
static inline
void set_audit_mask(unsigned long *mask, uint event_class)
@@ -56,101 +55,6 @@ bool check_audit_mask(const unsigned long *lhs,
}
-typedef void (*audit_handler_t)(THD *thd, uint event_subtype, va_list ap);
-
-/**
- MYSQL_AUDIT_GENERAL_CLASS handler
-
- @param[in] thd
- @param[in] event_subtype
- @param[in] error_code
- @param[in] ap
-
-*/
-
-static void general_class_handler(THD *thd, uint event_subtype, va_list ap)
-{
- mysql_event_general event;
- event.event_subclass= event_subtype;
- event.general_error_code= va_arg(ap, int);
- event.general_thread_id= thd ? thd->thread_id : 0;
- event.general_time= va_arg(ap, time_t);
- event.general_user= va_arg(ap, const char *);
- event.general_user_length= va_arg(ap, unsigned int);
- event.general_command= va_arg(ap, const char *);
- event.general_command_length= va_arg(ap, unsigned int);
- event.general_query= va_arg(ap, const char *);
- event.general_query_length= va_arg(ap, unsigned int);
- event.general_charset= va_arg(ap, struct charset_info_st *);
- event.general_rows= (unsigned long long) va_arg(ap, ha_rows);
- event.database= va_arg(ap, const char *);
- event.database_length= va_arg(ap, unsigned int);
- event.query_id= (unsigned long long) (thd ? thd->query_id : 0);
- event_class_dispatch(thd, MYSQL_AUDIT_GENERAL_CLASS, &event);
-}
-
-
-static void connection_class_handler(THD *thd, uint event_subclass, va_list ap)
-{
- mysql_event_connection event;
- event.event_subclass= event_subclass;
- event.status= va_arg(ap, int);
- event.thread_id= va_arg(ap, unsigned long);
- event.user= va_arg(ap, const char *);
- event.user_length= va_arg(ap, unsigned int);
- event.priv_user= va_arg(ap, const char *);
- event.priv_user_length= va_arg(ap, unsigned int);
- event.external_user= va_arg(ap, const char *);
- event.external_user_length= va_arg(ap, unsigned int);
- event.proxy_user= va_arg(ap, const char *);
- event.proxy_user_length= va_arg(ap, unsigned int);
- event.host= va_arg(ap, const char *);
- event.host_length= va_arg(ap, unsigned int);
- event.ip= va_arg(ap, const char *);
- event.ip_length= va_arg(ap, unsigned int);
- event.database= va_arg(ap, const char *);
- event.database_length= va_arg(ap, unsigned int);
- event_class_dispatch(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event);
-}
-
-
-static void table_class_handler(THD *thd, uint event_subclass, va_list ap)
-{
- mysql_event_table event;
- event.event_subclass= event_subclass;
- event.read_only= va_arg(ap, int);
- event.thread_id= va_arg(ap, unsigned long);
- event.user= va_arg(ap, const char *);
- event.priv_user= va_arg(ap, const char *);
- event.priv_host= va_arg(ap, const char *);
- event.external_user= va_arg(ap, const char *);
- event.proxy_user= va_arg(ap, const char *);
- event.host= va_arg(ap, const char *);
- event.ip= va_arg(ap, const char *);
- event.database= va_arg(ap, const char *);
- event.database_length= va_arg(ap, unsigned int);
- event.table= va_arg(ap, const char *);
- event.table_length= va_arg(ap, unsigned int);
- event.new_database= va_arg(ap, const char *);
- event.new_database_length= va_arg(ap, unsigned int);
- event.new_table= va_arg(ap, const char *);
- event.new_table_length= va_arg(ap, unsigned int);
- event.query_id= (unsigned long long) (thd ? thd->query_id : 0);
- event_class_dispatch(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
-}
-
-
-static audit_handler_t audit_handlers[] =
-{
- general_class_handler, connection_class_handler,
- 0,0,0,0,0,0,0,0,0,0,0,0,0, /* placeholders */
- table_class_handler
-};
-
-static const uint audit_handlers_count=
- (sizeof(audit_handlers) / sizeof(audit_handler_t));
-
-
/**
Acquire and lock any additional audit plugins as required
@@ -207,38 +111,16 @@ static my_bool acquire_plugins(THD *thd, plugin_ref plugin, void *arg)
void mysql_audit_acquire_plugins(THD *thd, ulong *event_class_mask)
{
DBUG_ENTER("mysql_audit_acquire_plugins");
- if (thd && !check_audit_mask(mysql_global_audit_mask, event_class_mask) &&
- check_audit_mask(thd->audit_class_mask, event_class_mask))
+ DBUG_ASSERT(thd);
+ DBUG_ASSERT(!check_audit_mask(mysql_global_audit_mask, event_class_mask));
+
+ if (check_audit_mask(thd->audit_class_mask, event_class_mask))
{
plugin_foreach(thd, acquire_plugins, MYSQL_AUDIT_PLUGIN, event_class_mask);
add_audit_mask(thd->audit_class_mask, event_class_mask);
}
DBUG_VOID_RETURN;
}
-
-
-/**
- Notify the audit system of an event
-
- @param[in] thd
- @param[in] event_class
- @param[in] event_subtype
- @param[in] error_code
-
-*/
-
-void mysql_audit_notify(THD *thd, uint event_class, uint event_subtype, ...)
-{
- va_list ap;
- audit_handler_t *handlers= audit_handlers + event_class;
- DBUG_ASSERT(event_class < audit_handlers_count);
- unsigned long event_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
- set_audit_mask(event_class_mask, event_class);
- mysql_audit_acquire_plugins(thd, event_class_mask);
- va_start(ap, event_subtype);
- (*handlers)(thd, event_subtype, ap);
- va_end(ap);
-}
/**
@@ -358,7 +240,7 @@ void mysql_audit_finalize()
/**
Initialize an Audit plug-in
-
+
@param[in] plugin
@retval FALSE OK
@@ -368,14 +250,14 @@ void mysql_audit_finalize()
int initialize_audit_plugin(st_plugin_int *plugin)
{
st_mysql_audit *data= (st_mysql_audit*) plugin->plugin->info;
-
+
if (!data->event_notify || !data->class_mask[0])
{
sql_print_error("Plugin '%s' has invalid data.",
plugin->name.str);
return 1;
}
-
+
if (plugin->plugin->init && plugin->plugin->init(NULL))
{
sql_print_error("Plugin '%s' init function returned error.",
@@ -385,7 +267,7 @@ int initialize_audit_plugin(st_plugin_int *plugin)
/* Make the interface info more easily accessible */
plugin->data= plugin->plugin->info;
-
+
/* Add the bits the plugin is interested in to the global mask */
mysql_mutex_lock(&LOCK_audit_mask);
add_audit_mask(mysql_global_audit_mask, data->class_mask);
@@ -495,17 +377,11 @@ static my_bool plugins_dispatch(THD *thd, plugin_ref plugin, void *arg)
{
const struct st_mysql_event_generic *event_generic=
(const struct st_mysql_event_generic *) arg;
- unsigned long event_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
st_mysql_audit *data= plugin_data(plugin, struct st_mysql_audit *);
- set_audit_mask(event_class_mask, event_generic->event_class);
-
/* Check to see if the plugin is interested in this event */
- if (check_audit_mask(data->class_mask, event_class_mask))
- return 0;
-
- /* Actually notify the plugin */
- data->event_notify(thd, event_generic->event_class, event_generic->event);
+ if (!check_audit_mask(data->class_mask, event_generic->event_class_mask))
+ data->event_notify(thd, event_generic->event_class, event_generic->event);
return 0;
}
@@ -513,17 +389,18 @@ static my_bool plugins_dispatch(THD *thd, plugin_ref plugin, void *arg)
/**
Distributes an audit event to plug-ins
-
+
@param[in] thd
+ @param[in] event_class
@param[in] event
*/
-static void event_class_dispatch(THD *thd, unsigned int event_class,
- const void *event)
+void mysql_audit_notify(THD *thd, uint event_class, const void *event)
{
struct st_mysql_event_generic event_generic;
event_generic.event_class= event_class;
event_generic.event= event;
+ set_audit_mask(event_generic.event_class_mask, event_class);
/*
Check if we are doing a slow global dispatch. This event occurs when
thd == NULL as it is not associated with any particular thread.
@@ -536,6 +413,8 @@ static void event_class_dispatch(THD *thd, unsigned int event_class,
{
plugin_ref *plugins, *plugins_last;
+ mysql_audit_acquire_plugins(thd, event_generic.event_class_mask);
+
/* Use the cached set of audit plugins */
plugins= (plugin_ref*) thd->audit_class_plugins.buffer;
plugins_last= plugins + thd->audit_class_plugins.elements;
@@ -580,5 +459,12 @@ void mysql_audit_release(THD *thd)
{
}
+void mysql_audit_init_thd(THD *thd)
+{
+}
+
+void mysql_audit_free_thd(THD *thd)
+{
+}
#endif /* EMBEDDED_LIBRARY */
diff --git a/sql/sql_audit.h b/sql/sql_audit.h
index 68106f099cc..96f9d0caece 100644
--- a/sql/sql_audit.h
+++ b/sql/sql_audit.h
@@ -2,6 +2,7 @@
#define SQL_AUDIT_INCLUDED
/* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -35,8 +36,7 @@ extern void mysql_audit_acquire_plugins(THD *thd, ulong *event_class_mask);
#ifndef EMBEDDED_LIBRARY
-extern void mysql_audit_notify(THD *thd, uint event_class,
- uint event_subtype, ...);
+extern void mysql_audit_notify(THD *thd, uint event_class, const void *event);
static inline bool mysql_audit_general_enabled()
{
@@ -55,22 +55,33 @@ static inline bool mysql_audit_table_enabled()
#else
static inline void mysql_audit_notify(THD *thd, uint event_class,
- uint event_subtype, ...) { }
+ const void *event) {}
#define mysql_audit_general_enabled() 0
#define mysql_audit_connection_enabled() 0
#define mysql_audit_table_enabled() 0
#endif
extern void mysql_audit_release(THD *thd);
+static inline unsigned int strlen_uint(const char *s)
+{
+ return (uint)strlen(s);
+}
+
+static inline unsigned int safe_strlen_uint(const char *s)
+{
+ return (uint)safe_strlen(s);
+}
+
#define MAX_USER_HOST_SIZE 512
static inline uint make_user_name(THD *thd, char *buf)
{
const Security_context *sctx= thd->security_ctx;
- return strxnmov(buf, MAX_USER_HOST_SIZE,
+ char *end= strxnmov(buf, MAX_USER_HOST_SIZE,
sctx->priv_user[0] ? sctx->priv_user : "", "[",
sctx->user ? sctx->user : "", "] @ ",
sctx->host ? sctx->host : "", " [",
- sctx->ip ? sctx->ip : "", "]", NullS) - buf;
+ sctx->ip ? sctx->ip : "", "]", NullS);
+ return (uint)(end-buf);
}
/**
@@ -94,15 +105,37 @@ void mysql_audit_general_log(THD *thd, time_t time,
{
if (mysql_audit_general_enabled())
{
- CHARSET_INFO *clientcs= thd ? thd->variables.character_set_client
- : global_system_variables.character_set_client;
- const char *db= thd ? thd->db : "";
- size_t db_length= thd ? thd->db_length : 0;
-
- mysql_audit_notify(thd, MYSQL_AUDIT_GENERAL_CLASS, MYSQL_AUDIT_GENERAL_LOG,
- 0, time, user, userlen, cmd, cmdlen,
- query, querylen, clientcs, (ha_rows) 0,
- db, db_length);
+ mysql_event_general event;
+
+ event.event_subclass= MYSQL_AUDIT_GENERAL_LOG;
+ event.general_error_code= 0;
+ event.general_time= time;
+ event.general_user= user;
+ event.general_user_length= userlen;
+ event.general_command= cmd;
+ event.general_command_length= cmdlen;
+ event.general_query= query;
+ event.general_query_length= querylen;
+ event.general_rows= 0;
+
+ if (thd)
+ {
+ event.general_thread_id= (unsigned long)thd->thread_id;
+ event.general_charset= thd->variables.character_set_client;
+ event.database= thd->db;
+ event.database_length= (unsigned int)thd->db_length;
+ event.query_id= thd->query_id;
+ }
+ else
+ {
+ event.general_thread_id= 0;
+ event.general_charset= global_system_variables.character_set_client;
+ event.database= "";
+ event.database_length= 0;
+ event.query_id= 0;
+ }
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_GENERAL_CLASS, &event);
}
}
@@ -124,38 +157,43 @@ void mysql_audit_general(THD *thd, uint event_subtype,
{
if (mysql_audit_general_enabled())
{
- time_t time= my_time(0);
- uint msglen= msg ? strlen(msg) : 0;
- const char *user;
- uint userlen;
char user_buff[MAX_USER_HOST_SIZE];
- CSET_STRING query;
- ha_rows rows;
- const char *db;
- size_t db_length;
+ mysql_event_general event;
+
+ event.event_subclass= event_subtype;
+ event.general_error_code= error_code;
+ event.general_time= my_time(0);
+ event.general_command= msg;
+ event.general_command_length= safe_strlen_uint(msg);
if (thd)
{
- query= thd->query_string;
- user= user_buff;
- userlen= make_user_name(thd, user_buff);
- rows= thd->get_stmt_da()->current_row_for_warning();
- db= thd->db;
- db_length= thd->db_length;
+ event.general_user= user_buff;
+ event.general_user_length= make_user_name(thd, user_buff);
+ event.general_thread_id= (unsigned long)thd->thread_id;
+ event.general_query= thd->query_string.str();
+ event.general_query_length= (unsigned) thd->query_string.length();
+ event.general_charset= thd->query_string.charset();
+ event.general_rows= thd->get_stmt_da()->current_row_for_warning();
+ event.database= thd->db;
+ event.database_length= (uint)thd->db_length;
+ event.query_id= thd->query_id;
}
else
{
- user= 0;
- userlen= 0;
- rows= 0;
- db= "";
- db_length= 0;
+ event.general_user= NULL;
+ event.general_user_length= 0;
+ event.general_thread_id= 0;
+ event.general_query= NULL;
+ event.general_query_length= 0;
+ event.general_charset= &my_charset_bin;
+ event.general_rows= 0;
+ event.database= "";
+ event.database_length= 0;
+ event.query_id= 0;
}
- mysql_audit_notify(thd, MYSQL_AUDIT_GENERAL_CLASS, event_subtype,
- error_code, time, user, userlen, msg, msglen,
- query.str(), query.length(), query.charset(), rows,
- db, db_length);
+ mysql_audit_notify(thd, MYSQL_AUDIT_GENERAL_CLASS, &event);
}
}
@@ -165,19 +203,28 @@ void mysql_audit_notify_connection_connect(THD *thd)
if (mysql_audit_connection_enabled())
{
const Security_context *sctx= thd->security_ctx;
- Diagnostics_area *da= thd->get_stmt_da();
- mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
- MYSQL_AUDIT_CONNECTION_CONNECT,
- da->is_error() ? da->sql_errno() : 0,
- thd->thread_id,
- sctx->user, sctx->user ? strlen(sctx->user) : 0,
- sctx->priv_user, strlen(sctx->priv_user),
- sctx->external_user,
- sctx->external_user ? strlen(sctx->external_user) : 0,
- sctx->proxy_user, strlen(sctx->proxy_user),
- sctx->host, sctx->host ? strlen(sctx->host) : 0,
- sctx->ip, sctx->ip ? strlen(sctx->ip) : 0,
- thd->db, thd->db ? strlen(thd->db) : 0);
+ mysql_event_connection event;
+
+ event.event_subclass= MYSQL_AUDIT_CONNECTION_CONNECT;
+ event.status= thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.user_length= safe_strlen_uint(sctx->user);
+ event.priv_user= sctx->priv_user;
+ event.priv_user_length= strlen_uint(sctx->priv_user);
+ event.external_user= sctx->external_user;
+ event.external_user_length= safe_strlen_uint(sctx->external_user);
+ event.proxy_user= sctx->proxy_user;
+ event.proxy_user_length= strlen_uint(sctx->proxy_user);
+ event.host= sctx->host;
+ event.host_length= safe_strlen_uint(sctx->host);
+ event.ip= sctx->ip;
+ event.ip_length= safe_strlen_uint(sctx->ip);
+ event.database= thd->db;
+ event.database_length= safe_strlen_uint(thd->db);
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event);
}
}
@@ -187,17 +234,27 @@ void mysql_audit_notify_connection_disconnect(THD *thd, int errcode)
if (mysql_audit_connection_enabled())
{
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
- MYSQL_AUDIT_CONNECTION_DISCONNECT,
- errcode, thd->thread_id,
- sctx->user, sctx->user ? strlen(sctx->user) : 0,
- sctx->priv_user, strlen(sctx->priv_user),
- sctx->external_user,
- sctx->external_user ? strlen(sctx->external_user) : 0,
- sctx->proxy_user, strlen(sctx->proxy_user),
- sctx->host, sctx->host ? strlen(sctx->host) : 0,
- sctx->ip, sctx->ip ? strlen(sctx->ip) : 0,
- thd->db, thd->db ? strlen(thd->db) : 0);
+ mysql_event_connection event;
+
+ event.event_subclass= MYSQL_AUDIT_CONNECTION_DISCONNECT;
+ event.status= errcode;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.user_length= safe_strlen_uint(sctx->user);
+ event.priv_user= sctx->priv_user;
+ event.priv_user_length= strlen_uint(sctx->priv_user);
+ event.external_user= sctx->external_user;
+ event.external_user_length= safe_strlen_uint(sctx->external_user);
+ event.proxy_user= sctx->proxy_user;
+ event.proxy_user_length= strlen_uint(sctx->proxy_user);
+ event.host= sctx->host;
+ event.host_length= safe_strlen_uint(sctx->host);
+ event.ip= sctx->ip;
+ event.ip_length= safe_strlen_uint(sctx->ip) ;
+ event.database= thd->db;
+ event.database_length= safe_strlen_uint(thd->db);
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event);
}
}
@@ -207,19 +264,28 @@ void mysql_audit_notify_connection_change_user(THD *thd)
if (mysql_audit_connection_enabled())
{
const Security_context *sctx= thd->security_ctx;
- Diagnostics_area *da= thd->get_stmt_da();
- mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
- MYSQL_AUDIT_CONNECTION_CHANGE_USER,
- da->is_error() ? da->sql_errno() : 0,
- thd->thread_id,
- sctx->user, sctx->user ? strlen(sctx->user) : 0,
- sctx->priv_user, strlen(sctx->priv_user),
- sctx->external_user,
- sctx->external_user ? strlen(sctx->external_user) : 0,
- sctx->proxy_user, strlen(sctx->proxy_user),
- sctx->host, sctx->host ? strlen(sctx->host) : 0,
- sctx->ip, sctx->ip ? strlen(sctx->ip) : 0,
- thd->db, thd->db ? strlen(thd->db) : 0);
+ mysql_event_connection event;
+
+ event.event_subclass= MYSQL_AUDIT_CONNECTION_CHANGE_USER;
+ event.status= thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.user_length= safe_strlen_uint(sctx->user);
+ event.priv_user= sctx->priv_user;
+ event.priv_user_length= strlen_uint(sctx->priv_user);
+ event.external_user= sctx->external_user;
+ event.external_user_length= safe_strlen_uint(sctx->external_user);
+ event.proxy_user= sctx->proxy_user;
+ event.proxy_user_length= strlen_uint(sctx->proxy_user);
+ event.host= sctx->host;
+ event.host_length= safe_strlen_uint(sctx->host);
+ event.ip= sctx->ip;
+ event.ip_length= safe_strlen_uint(sctx->ip);
+ event.database= thd->db;
+ event.database_length= safe_strlen_uint(thd->db);
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event);
}
}
@@ -229,13 +295,29 @@ void mysql_audit_external_lock(THD *thd, TABLE_SHARE *share, int lock)
if (lock != F_UNLCK && mysql_audit_table_enabled())
{
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_LOCK,
- (int)(lock == F_RDLCK), (ulong)thd->thread_id,
- sctx->user, sctx->priv_user, sctx->priv_host,
- sctx->external_user, sctx->proxy_user, sctx->host,
- sctx->ip, share->db.str, (uint)share->db.length,
- share->table_name.str, (uint)share->table_name.length,
- 0,0,0,0);
+ mysql_event_table event;
+
+ event.event_subclass= MYSQL_AUDIT_TABLE_LOCK;
+ event.read_only= lock == F_RDLCK;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.priv_user= sctx->priv_user;
+ event.priv_host= sctx->priv_host;
+ event.external_user= sctx->external_user;
+ event.proxy_user= sctx->proxy_user;
+ event.host= sctx->host;
+ event.ip= sctx->ip;
+ event.database= share->db.str;
+ event.database_length= (unsigned int)share->db.length;
+ event.table= share->table_name.str;
+ event.table_length= (unsigned int)share->table_name.length;
+ event.new_database= 0;
+ event.new_database_length= 0;
+ event.new_table= 0;
+ event.new_table_length= 0;
+ event.query_id= thd->query_id;
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
}
}
@@ -247,13 +329,29 @@ void mysql_audit_create_table(TABLE *table)
THD *thd= table->in_use;
const TABLE_SHARE *share= table->s;
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_CREATE,
- 0, (ulong)thd->thread_id,
- sctx->user, sctx->priv_user, sctx->priv_host,
- sctx->external_user, sctx->proxy_user, sctx->host,
- sctx->ip, share->db.str, (uint)share->db.length,
- share->table_name.str, (uint)share->table_name.length,
- 0,0,0,0);
+ mysql_event_table event;
+
+ event.event_subclass= MYSQL_AUDIT_TABLE_CREATE;
+ event.read_only= 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.priv_user= sctx->priv_user;
+ event.priv_host= sctx->priv_host;
+ event.external_user= sctx->external_user;
+ event.proxy_user= sctx->proxy_user;
+ event.host= sctx->host;
+ event.ip= sctx->ip;
+ event.database= share->db.str;
+ event.database_length= (unsigned int)share->db.length;
+ event.table= share->table_name.str;
+ event.table_length= (unsigned int)share->table_name.length;
+ event.new_database= 0;
+ event.new_database_length= 0;
+ event.new_table= 0;
+ event.new_table_length= 0;
+ event.query_id= thd->query_id;
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
}
}
@@ -263,13 +361,29 @@ void mysql_audit_drop_table(THD *thd, TABLE_LIST *table)
if (mysql_audit_table_enabled())
{
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_DROP,
- 0, (ulong)thd->thread_id,
- sctx->user, sctx->priv_user, sctx->priv_host,
- sctx->external_user, sctx->proxy_user, sctx->host,
- sctx->ip, table->db, (uint)table->db_length,
- table->table_name, (uint)table->table_name_length,
- 0,0,0,0);
+ mysql_event_table event;
+
+ event.event_subclass= MYSQL_AUDIT_TABLE_DROP;
+ event.read_only= 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.priv_user= sctx->priv_user;
+ event.priv_host= sctx->priv_host;
+ event.external_user= sctx->external_user;
+ event.proxy_user= sctx->proxy_user;
+ event.host= sctx->host;
+ event.ip= sctx->ip;
+ event.database= table->db;
+ event.database_length= (unsigned int)table->db_length;
+ event.table= table->table_name;
+ event.table_length= (unsigned int)table->table_name_length;
+ event.new_database= 0;
+ event.new_database_length= 0;
+ event.new_table= 0;
+ event.new_table_length= 0;
+ event.query_id= thd->query_id;
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
}
}
@@ -280,13 +394,29 @@ void mysql_audit_rename_table(THD *thd, const char *old_db, const char *old_tb,
if (mysql_audit_table_enabled())
{
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_RENAME,
- 0, (ulong)thd->thread_id,
- sctx->user, sctx->priv_user, sctx->priv_host,
- sctx->external_user, sctx->proxy_user, sctx->host,
- sctx->ip,
- old_db, (uint)strlen(old_db), old_tb, (uint)strlen(old_tb),
- new_db, (uint)strlen(new_db), new_tb, (uint)strlen(new_tb));
+ mysql_event_table event;
+
+ event.event_subclass= MYSQL_AUDIT_TABLE_RENAME;
+ event.read_only= 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.priv_user= sctx->priv_user;
+ event.priv_host= sctx->priv_host;
+ event.external_user= sctx->external_user;
+ event.proxy_user= sctx->proxy_user;
+ event.host= sctx->host;
+ event.ip= sctx->ip;
+ event.database= old_db;
+ event.database_length= strlen_uint(old_db);
+ event.table= old_tb;
+ event.table_length= strlen_uint(old_tb);
+ event.new_database= new_db;
+ event.new_database_length= strlen_uint(new_db);
+ event.new_table= new_tb;
+ event.new_table_length= strlen_uint(new_tb);
+ event.query_id= thd->query_id;
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
}
}
@@ -296,13 +426,29 @@ void mysql_audit_alter_table(THD *thd, TABLE_LIST *table)
if (mysql_audit_table_enabled())
{
const Security_context *sctx= thd->security_ctx;
- mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_ALTER,
- 0, (ulong)thd->thread_id,
- sctx->user, sctx->priv_user, sctx->priv_host,
- sctx->external_user, sctx->proxy_user, sctx->host,
- sctx->ip, table->db, (uint)table->db_length,
- table->table_name, (uint)table->table_name_length,
- 0,0,0,0);
+ mysql_event_table event;
+
+ event.event_subclass= MYSQL_AUDIT_TABLE_ALTER;
+ event.read_only= 0;
+ event.thread_id= (unsigned long)thd->thread_id;
+ event.user= sctx->user;
+ event.priv_user= sctx->priv_user;
+ event.priv_host= sctx->priv_host;
+ event.external_user= sctx->external_user;
+ event.proxy_user= sctx->proxy_user;
+ event.host= sctx->host;
+ event.ip= sctx->ip;
+ event.database= table->db;
+ event.database_length= (unsigned int)table->db_length;
+ event.table= table->table_name;
+ event.table_length= (unsigned int)table->table_name_length;
+ event.new_database= 0;
+ event.new_database_length= 0;
+ event.new_table= 0;
+ event.new_table_length= 0;
+ event.query_id= thd->query_id;
+
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
}
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 733a3a1f3ed..e0a907abfb3 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -49,6 +49,7 @@
#include "transaction.h"
#include "sql_prepare.h"
#include "sql_statistics.h"
+#include "sql_cte.h"
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
@@ -67,7 +68,7 @@ bool
No_such_table_error_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char*,
Sql_condition ** cond_hdl)
{
@@ -78,7 +79,7 @@ No_such_table_error_handler::handle_condition(THD *,
return TRUE;
}
- if (level == Sql_condition::WARN_LEVEL_ERROR)
+ if (*level == Sql_condition::WARN_LEVEL_ERROR)
m_unhandled_errors++;
return FALSE;
}
@@ -111,7 +112,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
@@ -141,7 +142,7 @@ bool
Repair_mrg_table_error_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char*,
Sql_condition ** cond_hdl)
{
@@ -171,47 +172,13 @@ static bool auto_repair_table(THD *thd, TABLE_LIST *table_list);
/**
- Create a table cache/table definition cache key
-
- @param thd Thread context
- @param key Buffer for the key to be created (must be of
- size MAX_DBKEY_LENGTH).
- @param db_name Database name.
- @param table_name Table name.
-
- @note
- The table cache_key is created from:
- db_name + \0
- table_name + \0
-
- additionally we add the following to make each tmp table
- unique on the slave:
-
- 4 bytes for master thread id
- 4 bytes pseudo thread id
-
- @return Length of key.
-*/
-
-uint create_tmp_table_def_key(THD *thd, char *key,
- const char *db, const char *table_name)
-{
- uint key_length= tdc_create_key(key, db, table_name);
- int4store(key + key_length, thd->variables.server_id);
- int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
- key_length+= TMP_TABLE_KEY_EXTRA;
- return key_length;
-}
-
-
-/**
Get table cache key for a table list element.
@param table_list[in] Table list element.
@param key[out] On return points to table cache key for the table.
@note Unlike create_table_def_key() call this function doesn't construct
- key in a buffer provider by caller. Instead it relies on the fact
+ key in a buffer provided by caller. Instead it relies on the fact
that table list element for which key is requested has properly
initialized MDL_request object and the fact that table definition
cache key is suffix of key used in MDL subsystem. So to get table
@@ -303,7 +270,7 @@ static my_bool list_open_tables_callback(TDC_element *element,
(*arg->start_list)->in_use= 0;
mysql_mutex_lock(&element->LOCK_table_share);
- TDC_element::All_share_tables_list::Iterator it(element->all_tables);
+ All_share_tables_list::Iterator it(element->all_tables);
TABLE *table;
while ((table= it++))
if (table->in_use)
@@ -335,84 +302,6 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
DBUG_RETURN(argument.open_list);
}
-/*****************************************************************************
- * Functions to free open table cache
- ****************************************************************************/
-
-
-void intern_close_table(TABLE *table)
-{ // Free all structures
- DBUG_ENTER("intern_close_table");
- DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx",
- table->s ? table->s->db.str : "?",
- table->s ? table->s->table_name.str : "?",
- (long) table));
-
- free_io_cache(table);
- delete table->triggers;
- if (table->file) // Not true if placeholder
- (void) closefrm(table, 1); // close file
- table->alias.free();
- my_free(table);
- DBUG_VOID_RETURN;
-}
-
-
-/* Free resources allocated by filesort() and read_record() */
-
-void free_io_cache(TABLE *table)
-{
- DBUG_ENTER("free_io_cache");
- if (table->sort.io_cache)
- {
- close_cached_file(table->sort.io_cache);
- my_free(table->sort.io_cache);
- table->sort.io_cache=0;
- }
- DBUG_VOID_RETURN;
-}
-
-
-/**
- Auxiliary function which allows to kill delayed threads for
- particular table identified by its share.
-
- @param share Table share.
-
- @pre Caller should have TABLE_SHARE::tdc.LOCK_table_share mutex.
-*/
-
-void kill_delayed_threads_for_table(TDC_element *element)
-{
- TDC_element::All_share_tables_list::Iterator it(element->all_tables);
- TABLE *tab;
-
- mysql_mutex_assert_owner(&element->LOCK_table_share);
-
- if (!delayed_insert_threads)
- return;
-
- while ((tab= it++))
- {
- THD *in_use= tab->in_use;
-
- DBUG_ASSERT(in_use && tab->s->tdc->flushed);
- if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
- ! in_use->killed)
- {
- in_use->set_killed(KILL_SYSTEM_THREAD);
- mysql_mutex_lock(&in_use->mysys_var->mutex);
- if (in_use->mysys_var->current_cond)
- {
- mysql_mutex_lock(in_use->mysys_var->current_mutex);
- mysql_cond_broadcast(in_use->mysys_var->current_cond);
- mysql_mutex_unlock(in_use->mysys_var->current_mutex);
- }
- mysql_mutex_unlock(&in_use->mysys_var->mutex);
- }
- }
-}
-
/*
Close all tables which aren't in use by any thread
@@ -436,7 +325,7 @@ void kill_delayed_threads_for_table(TDC_element *element)
struct close_cached_tables_arg
{
- ulong refresh_version;
+ tdc_version_t refresh_version;
TDC_element *element;
};
@@ -462,7 +351,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
{
bool result= FALSE;
struct timespec abstime;
- ulong refresh_version;
+ tdc_version_t refresh_version;
DBUG_ENTER("close_cached_tables");
DBUG_ASSERT(thd || (!wait_for_refresh && !tables));
@@ -593,7 +482,8 @@ err_with_reopen:
old locks. This should always succeed (unless some external process
has removed the tables)
*/
- thd->locked_tables_list.reopen_tables(thd, false);
+ if (thd->locked_tables_list.reopen_tables(thd, false))
+ result= true;
/*
Since downgrade_lock() won't do anything with shared
metadata lock it is much simpler to go through all open tables rather
@@ -680,85 +570,6 @@ bool close_cached_connection_tables(THD *thd, LEX_STRING *connection)
}
-/**
- Mark all temporary tables which were used by the current statement or
- substatement as free for reuse, but only if the query_id can be cleared.
-
- @param thd thread context
-
- @remark For temp tables associated with a open SQL HANDLER the query_id
- is not reset until the HANDLER is closed.
-*/
-
-static void mark_temp_tables_as_free_for_reuse(THD *thd)
-{
- DBUG_ENTER("mark_temp_tables_as_free_for_reuse");
-
- if (thd->query_id == 0)
- {
- /* Thread has not executed any statement and has not used any tmp tables */
- DBUG_VOID_RETURN;
- }
-
- if (thd->have_temporary_tables())
- {
- thd->lock_temporary_tables();
- for (TABLE *table= thd->temporary_tables ; table ; table= table->next)
- {
- if ((table->query_id == thd->query_id) && ! table->open_by_handler)
- mark_tmp_table_for_reuse(table);
- }
- thd->unlock_temporary_tables(1);
- }
- DBUG_VOID_RETURN;
-}
-
-
-/**
- Reset a single temporary table.
- Effectively this "closes" one temporary table,
- in a session.
-
- @param table Temporary table.
-*/
-
-void mark_tmp_table_for_reuse(TABLE *table)
-{
- DBUG_ENTER("mark_tmp_table_for_reuse");
- DBUG_ASSERT(table->s->tmp_table);
-
- table->query_id= 0;
- table->file->ha_reset();
-
- /* Detach temporary MERGE children from temporary parent. */
- DBUG_ASSERT(table->file);
- table->file->extra(HA_EXTRA_DETACH_CHILDREN);
-
- /*
- Reset temporary table lock type to it's default value (TL_WRITE).
-
- Statements such as INSERT INTO .. SELECT FROM tmp, CREATE TABLE
- .. SELECT FROM tmp and UPDATE may under some circumstances modify
- the lock type of the tables participating in the statement. This
- isn't a problem for non-temporary tables since their lock type is
- reset at every open, but the same does not occur for temporary
- tables for historical reasons.
-
- Furthermore, the lock type of temporary tables is not really that
- important because they can only be used by one query at a time and
- not even twice in a query -- a temporary table is represented by
- only one TABLE object. Nonetheless, it's safer from a maintenance
- point of view to reset the lock type of this singleton TABLE object
- as to not cause problems when the table is reused.
-
- Even under LOCK TABLES mode its okay to reset the lock type as
- LOCK TABLES is allowed (but ignored) for a temporary table.
- */
- table->reginfo.lock_type= TL_WRITE;
- DBUG_VOID_RETURN;
-}
-
-
/*
Mark all tables in the list which were used by current substatement
as free for reuse.
@@ -798,23 +609,6 @@ static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
/**
- Auxiliary function to close all tables in the open_tables list.
-
- @param thd Thread context.
-
- @remark It should not ordinarily be called directly.
-*/
-
-static void close_open_tables(THD *thd)
-{
- DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables));
-
- while (thd->open_tables)
- (void) close_thread_table(thd, &thd->open_tables);
-}
-
-
-/**
Close all open instances of the table but keep the MDL lock.
Works both under LOCK TABLES and in the normal mode.
@@ -846,6 +640,8 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
ha_extra_function extra,
TABLE *skip_table)
{
+ DBUG_ASSERT(!share->tmp_table);
+
char key[MAX_DBKEY_LENGTH];
uint key_length= share->table_cache_key.length;
const char *db= key;
@@ -923,8 +719,8 @@ void close_thread_tables(THD *thd)
#ifdef EXTRA_DEBUG
DBUG_PRINT("tcache", ("open tables:"));
for (table= thd->open_tables; table; table= table->next)
- DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str,
- table->s->table_name.str, (long) table));
+ DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str,
+ table->s->table_name.str, table));
#endif
#if defined(ENABLED_DEBUG_SYNC)
@@ -976,10 +772,27 @@ void close_thread_tables(THD *thd)
thd->derived_tables= 0;
}
+ if (thd->rec_tables)
+ {
+ TABLE *next;
+ /*
+ Close all temporary tables created for recursive table references.
+ This action was postponed because the table could be used in the
+ statements like ANALYZE WITH r AS (...) SELECT * from r
+ where r is defined through recursion.
+ */
+ for (table= thd->rec_tables ; table ; table= next)
+ {
+ next= table->next;
+ free_tmp_table(thd, table);
+ }
+ thd->rec_tables= 0;
+ }
+
/*
Mark all temporary tables used by this statement as free for reuse.
*/
- mark_temp_tables_as_free_for_reuse(thd);
+ thd->mark_tmp_tables_as_free_for_reuse();
if (thd->locked_tables_mode)
{
@@ -1034,8 +847,8 @@ void close_thread_tables(THD *thd)
Closing a MERGE child before the parent would be fatal if the
other thread tries to abort the MERGE lock in between.
*/
- if (thd->open_tables)
- close_open_tables(thd);
+ while (thd->open_tables)
+ (void) close_thread_table(thd, &thd->open_tables);
DBUG_VOID_RETURN;
}
@@ -1047,9 +860,9 @@ void close_thread_table(THD *thd, TABLE **table_ptr)
{
TABLE *table= *table_ptr;
DBUG_ENTER("close_thread_table");
- DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str,
- table->s->table_name.str, (long) table));
- DBUG_ASSERT(table->key_read == 0);
+ DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str,
+ table->s->table_name.str, table));
+ DBUG_ASSERT(!table->file->keyread_enabled());
DBUG_ASSERT(!table->file || table->file->inited == handler::NONE);
/*
@@ -1085,204 +898,13 @@ void close_thread_table(THD *thd, TABLE **table_ptr)
Do this *before* entering the TABLE_SHARE::tdc.LOCK_table_share
critical section.
*/
- if (table->file != NULL)
- MYSQL_UNBIND_TABLE(table->file);
+ MYSQL_UNBIND_TABLE(table->file);
tc_release_table(table);
DBUG_VOID_RETURN;
}
-/* close_temporary_tables' internal, 4 is due to uint4korr definition */
-static inline uint tmpkeyval(THD *thd, TABLE *table)
-{
- return uint4korr(table->s->table_cache_key.str + table->s->table_cache_key.length - 4);
-}
-
-
-/*
- Close all temporary tables created by 'CREATE TEMPORARY TABLE' for thread
- creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread
-
- Temporary tables created in a sql slave is closed by
- Relay_log_info::close_temporary_tables()
-
-*/
-
-bool close_temporary_tables(THD *thd)
-{
- DBUG_ENTER("close_temporary_tables");
- TABLE *table;
- TABLE *next= NULL;
- TABLE *prev_table;
- /* Assume thd->variables.option_bits has OPTION_QUOTE_SHOW_CREATE */
- bool was_quote_show= TRUE;
- bool error= 0;
-
- if (!thd->temporary_tables)
- DBUG_RETURN(FALSE);
- DBUG_ASSERT(!thd->rgi_slave);
-
- /*
- Ensure we don't have open HANDLERs for tables we are about to close.
- This is necessary when close_temporary_tables() is called as part
- of execution of BINLOG statement (e.g. for format description event).
- */
- mysql_ha_rm_temporary_tables(thd);
- if (!mysql_bin_log.is_open())
- {
- TABLE *tmp_next;
- for (TABLE *t= thd->temporary_tables; t; t= tmp_next)
- {
- tmp_next= t->next;
- mysql_lock_remove(thd, thd->lock, t);
- close_temporary(t, 1, 1);
- }
- thd->temporary_tables= 0;
- DBUG_RETURN(FALSE);
- }
-
- /* Better add "if exists", in case a RESET MASTER has been done */
- const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
- char buf[FN_REFLEN];
- String s_query(buf, sizeof(buf), system_charset_info);
- bool found_user_tables= FALSE;
-
- s_query.copy(stub, sizeof(stub)-1, system_charset_info);
-
- /*
- Insertion sort of temp tables by pseudo_thread_id to build ordered list
- of sublists of equal pseudo_thread_id
- */
-
- for (prev_table= thd->temporary_tables, table= prev_table->next;
- table;
- prev_table= table, table= table->next)
- {
- TABLE *prev_sorted /* same as for prev_table */, *sorted;
- if (is_user_table(table))
- {
- if (!found_user_tables)
- found_user_tables= true;
- for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table;
- prev_sorted= sorted, sorted= sorted->next)
- {
- if (!is_user_table(sorted) ||
- tmpkeyval(thd, sorted) > tmpkeyval(thd, table))
- {
- /* move into the sorted part of the list from the unsorted */
- prev_table->next= table->next;
- table->next= sorted;
- if (prev_sorted)
- {
- prev_sorted->next= table;
- }
- else
- {
- thd->temporary_tables= table;
- }
- table= prev_table;
- break;
- }
- }
- }
- }
-
- /* We always quote db,table names though it is slight overkill */
- if (found_user_tables &&
- !(was_quote_show= MY_TEST(thd->variables.option_bits &
- OPTION_QUOTE_SHOW_CREATE)))
- {
- thd->variables.option_bits |= OPTION_QUOTE_SHOW_CREATE;
- }
-
- /* scan sorted tmps to generate sequence of DROP */
- for (table= thd->temporary_tables; table; table= next)
- {
- if (is_user_table(table))
- {
- bool save_thread_specific_used= thd->thread_specific_used;
- my_thread_id save_pseudo_thread_id= thd->variables.pseudo_thread_id;
- char db_buf[FN_REFLEN];
- String db(db_buf, sizeof(db_buf), system_charset_info);
-
- /* Set pseudo_thread_id to be that of the processed table */
- thd->variables.pseudo_thread_id= tmpkeyval(thd, table);
-
- db.copy(table->s->db.str, table->s->db.length, system_charset_info);
- /* Reset s_query() if changed by previous loop */
- s_query.length(sizeof(stub)-1);
-
- /* Loop forward through all tables that belong to a common database
- within the sublist of common pseudo_thread_id to create single
- DROP query
- */
- for (;
- table && is_user_table(table) &&
- tmpkeyval(thd, table) == thd->variables.pseudo_thread_id &&
- table->s->db.length == db.length() &&
- memcmp(table->s->db.str, db.ptr(), db.length()) == 0;
- table= next)
- {
- /*
- We are going to add ` around the table names and possible more
- due to special characters
- */
- append_identifier(thd, &s_query, table->s->table_name.str,
- strlen(table->s->table_name.str));
- s_query.append(',');
- next= table->next;
- mysql_lock_remove(thd, thd->lock, table);
- close_temporary(table, 1, 1);
- }
- thd->clear_error();
- CHARSET_INFO *cs_save= thd->variables.character_set_client;
- thd->variables.character_set_client= system_charset_info;
- thd->thread_specific_used= TRUE;
- Query_log_event qinfo(thd, s_query.ptr(),
- s_query.length() - 1 /* to remove trailing ',' */,
- FALSE, TRUE, FALSE, 0);
- qinfo.db= db.ptr();
- qinfo.db_len= db.length();
- thd->variables.character_set_client= cs_save;
-
- thd->get_stmt_da()->set_overwrite_status(true);
- thd->transaction.stmt.mark_dropped_temp_table();
- if ((error= (mysql_bin_log.write(&qinfo) || error)))
- {
- /*
- If we're here following THD::cleanup, thence the connection
- has been closed already. So lets print a message to the
- error log instead of pushing yet another error into the
- stmt_da.
-
- Also, we keep the error flag so that we propagate the error
- up in the stack. This way, if we're the SQL thread we notice
- that close_temporary_tables failed. (Actually, the SQL
- thread only calls close_temporary_tables while applying old
- Start_log_event_v3 events.)
- */
- sql_print_error("Failed to write the DROP statement for "
- "temporary tables to binary log");
- }
- thd->get_stmt_da()->set_overwrite_status(false);
-
- thd->variables.pseudo_thread_id= save_pseudo_thread_id;
- thd->thread_specific_used= save_thread_specific_used;
- }
- else
- {
- next= table->next;
- close_temporary(table, 1, 1);
- }
- }
- if (!was_quote_show)
- thd->variables.option_bits&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
- thd->temporary_tables=0;
-
- DBUG_RETURN(error);
-}
-
/*
Find table in list.
@@ -1309,8 +931,7 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table,
{
for (; table; table= table->*link )
{
- if ((table->table == 0 || table->table->s->tmp_table == NO_TMP_TABLE) &&
- strcmp(table->db, db_name) == 0 &&
+ if (strcmp(table->db, db_name) == 0 &&
strcmp(table->table_name, table_name) == 0)
break;
}
@@ -1376,9 +997,6 @@ TABLE_LIST* find_dup_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
/* All MyISAMMRG children are plain MyISAM tables. */
DBUG_ASSERT(table->table->file->ht->db_type != DB_TYPE_MRG_MYISAM);
- /* temporary table is always unique */
- if (table->table && table->table->s->tmp_table != NO_TMP_TABLE)
- DBUG_RETURN(0);
table= table->find_underlying_table(table->table);
/*
as far as we have table->table we have to find real TABLE_LIST of
@@ -1415,6 +1033,12 @@ retry:
if (res->table && (res->table == table->table))
continue;
+ /* Skip if table is tmp table */
+ if (check_flag & CHECK_DUP_SKIP_TEMP_TABLE &&
+ res->table && res->table->s->tmp_table != NO_TMP_TABLE)
+ {
+ continue;
+ }
if (check_flag & CHECK_DUP_FOR_CREATE)
DBUG_RETURN(res);
@@ -1455,7 +1079,7 @@ retry:
Try to fix by materializing the derived table
*/
TABLE_LIST *derived= res->belong_to_derived;
- if (derived->is_merged_derived())
+ if (derived->is_merged_derived() && !derived->derived->is_excluded())
{
DBUG_PRINT("info",
("convert merged to materialization to resolve the conflict"));
@@ -1565,292 +1189,6 @@ void update_non_unique_table_error(TABLE_LIST *update,
/**
- Find temporary table specified by database and table names in the
- THD::temporary_tables list.
-
- @return TABLE instance if a temporary table has been found; NULL otherwise.
-*/
-
-TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
-{
- char key[MAX_DBKEY_LENGTH];
- uint key_length= create_tmp_table_def_key(thd, key, db, table_name);
- return find_temporary_table(thd, key, key_length);
-}
-
-
-/**
- Find a temporary table specified by TABLE_LIST instance in the
- THD::temporary_tables list.
-
- @return TABLE instance if a temporary table has been found; NULL otherwise.
-*/
-
-TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl)
-{
- const char *tmp_key;
- char key[MAX_DBKEY_LENGTH];
- uint key_length;
-
- key_length= get_table_def_key(tl, &tmp_key);
- memcpy(key, tmp_key, key_length);
- int4store(key + key_length, thd->variables.server_id);
- int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
-
- return find_temporary_table(thd, key, key_length + TMP_TABLE_KEY_EXTRA);
-}
-
-
-static bool
-use_temporary_table(THD *thd, TABLE *table, TABLE **out_table)
-{
- *out_table= table;
- if (!table)
- return false;
- /*
- Temporary tables are not safe for parallel replication. They were
- designed to be visible to one thread only, so have no table locking.
- Thus there is no protection against two conflicting transactions
- committing in parallel and things like that.
-
- So for now, anything that uses temporary tables will be serialised
- with anything before it, when using parallel replication.
-
- ToDo: We might be able to introduce a reference count or something
- on temp tables, and have slave worker threads wait for it to reach
- zero before being allowed to use the temp table. Might not be worth
- it though, as statement-based replication using temporary tables is
- in any case rather fragile.
- */
- if (thd->rgi_slave && thd->rgi_slave->is_parallel_exec &&
- thd->wait_for_prior_commit())
- return true;
- /*
- We need to set the THD as it may be different in case of
- parallel replication
- */
- if (table->in_use != thd)
- {
- table->in_use= thd;
-#ifdef REMOVE_AFTER_MERGE_WITH_10
- if (thd->rgi_slave)
- {
- /*
- We may be stealing an opened temporary tables from one slave
- thread to another, we need to let the performance schema know that,
- for aggregates per thread to work properly.
- */
- MYSQL_UNBIND_TABLE(table->file);
- MYSQL_REBIND_TABLE(table->file);
- }
-#endif
- }
- return false;
-}
-
-bool
-find_and_use_temporary_table(THD *thd, const char *db, const char *table_name,
- TABLE **out_table)
-{
- return use_temporary_table(thd, find_temporary_table(thd, db, table_name),
- out_table);
-}
-
-
-bool
-find_and_use_temporary_table(THD *thd, const TABLE_LIST *tl, TABLE **out_table)
-{
- return use_temporary_table(thd, find_temporary_table(thd, tl), out_table);
-}
-
-
-/**
- Find a temporary table specified by a key in the THD::temporary_tables list.
-
- @return TABLE instance if a temporary table has been found; NULL otherwise.
-*/
-
-TABLE *find_temporary_table(THD *thd,
- const char *table_key,
- uint table_key_length)
-{
- TABLE *result= 0;
- if (!thd->have_temporary_tables())
- return NULL;
-
- thd->lock_temporary_tables();
- for (TABLE *table= thd->temporary_tables; table; table= table->next)
- {
- if (table->s->table_cache_key.length == table_key_length &&
- !memcmp(table->s->table_cache_key.str, table_key, table_key_length))
- {
- result= table;
- break;
- }
- }
- thd->unlock_temporary_tables(0);
- return result;
-}
-
-
-/**
- Drop a temporary table.
-
- Try to locate the table in the list of thd->temporary_tables.
- If the table is found:
- - if the table is being used by some outer statement, fail.
- - if the table is locked with LOCK TABLES or by prelocking,
- unlock it and remove it from the list of locked tables
- (THD::lock). Currently only transactional temporary tables
- are locked.
- - Close the temporary table, remove its .FRM
- - remove the table from the list of temporary tables
-
- This function is used to drop user temporary tables, as well as
- internal tables created in CREATE TEMPORARY TABLE ... SELECT
- or ALTER TABLE. Even though part of the work done by this function
- is redundant when the table is internal, as long as we
- link both internal and user temporary tables into the same
- thd->temporary_tables list, it's impossible to tell here whether
- we're dealing with an internal or a user temporary table.
-
- @param thd Thread handler
- @param table Temporary table to be deleted
- @param is_trans Is set to the type of the table:
- transactional (e.g. innodb) as TRUE or non-transactional
- (e.g. myisam) as FALSE.
-
- @retval 0 the table was found and dropped successfully.
- @retval -1 the table is in use by a outer query
-*/
-
-int drop_temporary_table(THD *thd, TABLE *table, bool *is_trans)
-{
- DBUG_ENTER("drop_temporary_table");
- DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
- table->s->db.str, table->s->table_name.str));
-
- /* Table might be in use by some outer statement. */
- if (table->query_id && table->query_id != thd->query_id)
- {
- DBUG_PRINT("info", ("table->query_id: %lu thd->query_id: %lu",
- (ulong) table->query_id, (ulong) thd->query_id));
-
- my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
- DBUG_RETURN(-1);
- }
-
- *is_trans= table->file->has_transactions();
-
- /*
- If LOCK TABLES list is not empty and contains this table,
- unlock the table and remove the table from this list.
- */
- mysql_lock_remove(thd, thd->lock, table);
- close_temporary_table(thd, table, 1, 1);
- DBUG_RETURN(0);
-}
-
-
-/*
- unlink from thd->temporary tables and close temporary table
-*/
-
-void close_temporary_table(THD *thd, TABLE *table,
- bool free_share, bool delete_table)
-{
- DBUG_ENTER("close_temporary_table");
- DBUG_PRINT("tmptable", ("closing table: '%s'.'%s' 0x%lx alias: '%s'",
- table->s->db.str, table->s->table_name.str,
- (long) table, table->alias.c_ptr()));
-
- thd->lock_temporary_tables();
- if (table->prev)
- {
- table->prev->next= table->next;
- if (table->prev->next)
- table->next->prev= table->prev;
- }
- else
- {
- /* removing the item from the list */
- DBUG_ASSERT(table == thd->temporary_tables);
- /*
- slave must reset its temporary list pointer to zero to exclude
- passing non-zero value to end_slave via rli->save_temporary_tables
- when no temp tables opened, see an invariant below.
- */
- thd->temporary_tables= table->next;
- if (thd->temporary_tables)
- table->next->prev= 0;
- }
- if (thd->rgi_slave)
- {
- /* natural invariant of temporary_tables */
- DBUG_ASSERT(slave_open_temp_tables || !thd->temporary_tables);
- thread_safe_decrement32(&slave_open_temp_tables);
- table->in_use= 0; // No statistics
- }
- thd->unlock_temporary_tables(0);
- close_temporary(table, free_share, delete_table);
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Close and delete a temporary table
-
- NOTE
- This dosn't unlink table from thd->temporary
- If this is needed, use close_temporary_table()
-*/
-
-void close_temporary(TABLE *table, bool free_share, bool delete_table)
-{
- handlerton *table_type= table->s->db_type();
- DBUG_ENTER("close_temporary");
- DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
- table->s->db.str, table->s->table_name.str));
-
- free_io_cache(table);
- closefrm(table, 0);
- if (delete_table)
- rm_temporary_table(table_type, table->s->path.str);
- if (free_share)
- {
- free_table_share(table->s);
- my_free(table);
- }
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Used by ALTER TABLE when the table is a temporary one. It changes something
- only if the ALTER contained a RENAME clause (otherwise, table_name is the old
- name).
- Prepares a table cache key, which is the concatenation of db, table_name and
- thd->slave_proxy_id, separated by '\0'.
-*/
-
-bool rename_temporary_table(THD* thd, TABLE *table, const char *db,
- const char *table_name)
-{
- char *key;
- uint key_length;
- TABLE_SHARE *share= table->s;
- DBUG_ENTER("rename_temporary_table");
-
- if (!(key=(char*) alloc_root(&share->mem_root, MAX_DBKEY_LENGTH)))
- DBUG_RETURN(1); /* purecov: inspected */
-
- key_length= create_tmp_table_def_key(thd, key, db, table_name);
- share->set_table_cache_key(key, key_length);
- DBUG_RETURN(0);
-}
-
-
-/**
Force all other threads to stop using the table by upgrading
metadata lock on it and remove unused TABLE instances from cache.
@@ -1872,8 +1210,9 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
enum ha_extra_function function)
{
DBUG_ENTER("wait_while_table_is_used");
- DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
- table->s->table_name.str, (ulong) table->s,
+ DBUG_ASSERT(!table->s->tmp_table);
+ DBUG_PRINT("enter", ("table: '%s' share: %p db_stat: %u version: %lld",
+ table->s->table_name.str, table->s,
table->db_stat, table->s->tdc->version));
if (thd->mdl_context.upgrade_shared_lock(
@@ -1915,7 +1254,7 @@ void drop_open_table(THD *thd, TABLE *table, const char *db_name,
{
DBUG_ENTER("drop_open_table");
if (table->s->tmp_table)
- close_temporary_table(thd, table, 1, 1);
+ thd->drop_temporary_table(table, NULL, true);
else
{
DBUG_ASSERT(table == thd->open_tables);
@@ -1951,7 +1290,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
@@ -1970,7 +1309,7 @@ private:
bool MDL_deadlock_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- Sql_condition::enum_warning_level,
+ Sql_condition::enum_warning_level*,
const char*,
Sql_condition ** cond_hdl)
{
@@ -2171,7 +1510,7 @@ bool is_locked_view(THD *thd, TABLE_LIST *t)
DBUG_RETURN(FALSE);
}
- if (!tdc_open_view(thd, t, t->alias, CHECK_METADATA_VERSION))
+ if (!tdc_open_view(thd, t, CHECK_METADATA_VERSION))
{
DBUG_ASSERT(t->view != 0);
DBUG_RETURN(TRUE); // VIEW
@@ -2252,7 +1591,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
Note that we allow write locks on log tables as otherwise logging
to general/slow log would be disabled in read only transactions.
*/
- if (table_list->mdl_request.type >= MDL_SHARED_WRITE &&
+ if (table_list->mdl_request.is_write_lock_request() &&
thd->tx_read_only &&
!(flags & (MYSQL_LOCK_LOG_TABLE | MYSQL_OPEN_HAS_MDL_LOCK)))
{
@@ -2260,6 +1599,12 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
DBUG_RETURN(true);
}
+ if (!table_list->db)
+ {
+ my_error(ER_NO_DB_ERROR, MYF(0));
+ DBUG_RETURN(true);
+ }
+
key_length= get_table_def_key(table_list, &key);
/*
@@ -2372,7 +1717,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
pre-acquiring metadata locks at the beggining of
open_tables() call.
*/
- if (table_list->mdl_request.type >= MDL_SHARED_WRITE &&
+ if (table_list->mdl_request.is_write_lock_request() &&
! (flags & (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
MYSQL_OPEN_FORCE_SHARED_MDL |
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
@@ -2440,10 +1785,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
retry_share:
- share= tdc_acquire_share(thd, table_list->db, table_list->table_name,
- key, key_length,
- table_list->mdl_request.key.tc_hash_value(),
- gts_flags, &table);
+ share= tdc_acquire_share(thd, table_list, gts_flags, &table);
if (!share)
{
@@ -2512,7 +1854,7 @@ retry_share:
{
if (share->tdc->flushed)
{
- DBUG_PRINT("info", ("Found old share version: %lu current: %lu",
+ DBUG_PRINT("info", ("Found old share version: %lld current: %lld",
share->tdc->version, tdc_refresh_version()));
/*
We already have an MDL lock. But we have encountered an old
@@ -2576,12 +1918,8 @@ retry_share:
goto err_lock;
error= open_table_from_share(thd, share, alias,
- (uint) (HA_OPEN_KEYFILE |
- HA_OPEN_RNDFILE |
- HA_GET_INDEX |
- HA_TRY_READ_ONLY),
- (READ_KEYINFO | COMPUTE_TYPES |
- EXTRA_RECORD),
+ HA_OPEN_KEYFILE | HA_TRY_READ_ONLY,
+ EXTRA_RECORD,
thd->open_options, table, FALSE);
if (error)
@@ -2603,7 +1941,7 @@ retry_share:
}
if (open_table_entry_fini(thd, share, table))
{
- closefrm(table, 0);
+ closefrm(table);
my_free(table);
goto err_lock;
}
@@ -2824,6 +2162,9 @@ Locked_tables_list::init_locked_tables(THD *thd)
return TRUE;
}
}
+
+ TRANSACT_TRACKER(add_trx_state(thd, TX_LOCKED_TABLES));
+
thd->enter_locked_tables_mode(LTM_LOCK_TABLES);
return FALSE;
@@ -2864,6 +2205,8 @@ Locked_tables_list::unlock_locked_tables(THD *thd)
}
thd->leave_locked_tables_mode();
+ TRANSACT_TRACKER(clear_trx_state(thd, TX_LOCKED_TABLES));
+
DBUG_ASSERT(thd->transaction.stmt.is_empty());
close_thread_tables(thd);
@@ -3322,9 +2665,6 @@ check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt,
@param thd Thread handle
@param table_list TABLE_LIST with db, table_name & belong_to_view
- @param alias Alias name
- @param cache_key Key for table definition cache
- @param cache_key_length Length of cache_key
@param flags Flags which modify how we open the view
@todo This function is needed for special handling of views under
@@ -3333,16 +2673,13 @@ check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt,
@return FALSE if success, TRUE - otherwise.
*/
-bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
- const char *cache_key, uint cache_key_length,
- uint flags)
+bool tdc_open_view(THD *thd, TABLE_LIST *table_list, uint flags)
{
TABLE not_used;
TABLE_SHARE *share;
bool err= TRUE;
- if (!(share= tdc_acquire_share(thd, table_list->db, table_list->table_name,
- cache_key, cache_key_length, GTS_VIEW)))
+ if (!(share= tdc_acquire_share(thd, table_list, GTS_VIEW)))
return TRUE;
DBUG_ASSERT(share->is_view);
@@ -3429,16 +2766,14 @@ static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
if (!(entry= (TABLE*)my_malloc(sizeof(TABLE), MYF(MY_WME))))
return result;
- if (!(share= tdc_acquire_share_shortlived(thd, table_list, GTS_TABLE)))
+ if (!(share= tdc_acquire_share(thd, table_list, GTS_TABLE)))
goto end_free;
DBUG_ASSERT(! share->is_view);
if (open_table_from_share(thd, share, table_list->alias,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX |
- HA_TRY_READ_ONLY),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
+ HA_OPEN_KEYFILE | HA_TRY_READ_ONLY,
+ EXTRA_RECORD,
ha_open_options | HA_OPEN_FOR_REPAIR,
entry, FALSE) || ! entry->file ||
(entry->file->is_crashed() && entry->file->ha_check_and_repair(thd)))
@@ -3449,12 +2784,12 @@ static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
sql_print_error("Couldn't repair table: %s.%s", share->db.str,
share->table_name.str);
if (entry->file)
- closefrm(entry, 0);
+ closefrm(entry);
}
else
{
thd->clear_error(); // Clear error message
- closefrm(entry, 0);
+ closefrm(entry);
result= FALSE;
}
@@ -3586,7 +2921,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -3648,8 +2983,7 @@ Open_table_context::recover_from_failed_open()
if (open_if_exists)
m_thd->push_internal_handler(&no_such_table_handler);
- result= !tdc_acquire_share(m_thd, m_failed_table->db,
- m_failed_table->table_name,
+ result= !tdc_acquire_share(m_thd, m_failed_table,
GTS_TABLE | GTS_FORCE_DISCOVERY | GTS_NOLOCK);
if (open_if_exists)
{
@@ -3981,6 +3315,59 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
tables->table_name= tables->view_name.str;
tables->table_name_length= tables->view_name.length;
}
+ else if (tables->select_lex)
+ {
+ /*
+ Check whether 'tables' refers to a table defined in a with clause.
+ If so set the reference to the definition in tables->with.
+ */
+ if (!tables->with)
+ tables->with= tables->select_lex->find_table_def_in_with_clauses(tables);
+ /*
+ If 'tables' is defined in a with clause set the pointer to the
+ specification from its definition in tables->derived.
+ */
+ if (tables->with)
+ {
+ if (tables->is_recursive_with_table() &&
+ !tables->is_with_table_recursive_reference())
+ {
+ tables->with->rec_outer_references++;
+ With_element *with_elem= tables->with;
+ while ((with_elem= with_elem->get_next_mutually_recursive()) !=
+ tables->with)
+ with_elem->rec_outer_references++;
+ }
+ if (tables->set_as_with_table(thd, tables->with))
+ DBUG_RETURN(1);
+ else
+ goto end;
+ }
+ }
+
+ if (!tables->derived &&
+ is_infoschema_db(tables->db, tables->db_length))
+ {
+ /*
+ Check whether the information schema contains a table
+ whose name is tables->schema_table_name
+ */
+ ST_SCHEMA_TABLE *schema_table;
+ schema_table= find_schema_table(thd, tables->schema_table_name);
+ if (!schema_table ||
+ (schema_table->hidden &&
+ ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
+ /*
+ this check is used for show columns|keys from I_S hidden table
+ */
+ lex->sql_command == SQLCOM_SHOW_FIELDS ||
+ lex->sql_command == SQLCOM_SHOW_KEYS)))
+ {
+ my_error(ER_UNKNOWN_TABLE, MYF(0),
+ tables->schema_table_name, INFORMATION_SCHEMA_NAME.str);
+ DBUG_RETURN(1);
+ }
+ }
/*
If this TABLE_LIST object is a placeholder for an information_schema
table, create a temporary table to represent the information_schema
@@ -4066,7 +3453,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
of temporary tables we have to try to open temporary table for it.
We can't simply skip this table list element and postpone opening of
- temporary tabletill the execution of substatement for several reasons:
+ temporary table till the execution of substatement for several reasons:
- Temporary table can be a MERGE table with base underlying tables,
so its underlying tables has to be properly open and locked at
prelocking stage.
@@ -4082,7 +3469,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
The problem is that since those attributes are not set in merge
children, another round of PREPARE will not help.
*/
- error= open_temporary_table(thd, tables);
+ error= thd->open_temporary_table(tables);
if (!error && !tables->table)
error= open_table(thd, tables, ot_ctx);
@@ -4101,7 +3488,8 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
Repair_mrg_table_error_handler repair_mrg_table_handler;
thd->push_internal_handler(&repair_mrg_table_handler);
- error= open_temporary_table(thd, tables);
+ error= thd->open_temporary_table(tables);
+
if (!error && !tables->table)
error= open_table(thd, tables, ot_ctx);
@@ -4117,7 +3505,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
still might need to look for a temporary table if this table
list element corresponds to underlying table of a merge table.
*/
- error= open_temporary_table(thd, tables);
+ error= thd->open_temporary_table(tables);
}
if (!error && !tables->table)
@@ -4289,6 +3677,7 @@ end:
new locks, so use open_tables_check_upgradable_mdl() instead.
@param thd Thread context.
+ @param options DDL options.
@param tables_start Start of list of tables on which upgradable locks
should be acquired.
@param tables_end End of list of tables.
@@ -4331,6 +3720,7 @@ lock_table_names(THD *thd, const DDL_options_st &options,
table= table->next_global)
{
if (table->mdl_request.type < MDL_SHARED_UPGRADABLE ||
+ table->mdl_request.type == MDL_SHARED_READ_ONLY ||
table->open_type == OT_TEMPORARY_ONLY ||
(table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table)))
{
@@ -4458,6 +3848,11 @@ open_tables_check_upgradable_mdl(THD *thd, TABLE_LIST *tables_start,
for (table= tables_start; table && table != tables_end;
table= table->next_global)
{
+ /*
+ Check below needs to be updated if this function starts
+ called for SRO locks.
+ */
+ DBUG_ASSERT(table->mdl_request.type != MDL_SHARED_READ_ONLY);
if (table->mdl_request.type < MDL_SHARED_UPGRADABLE ||
table->open_type == OT_TEMPORARY_ONLY ||
(table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table)))
@@ -4495,6 +3890,7 @@ open_tables_check_upgradable_mdl(THD *thd, TABLE_LIST *tables_start,
Open all tables in list
@param[in] thd Thread context.
+ @param[in] options DDL options.
@param[in,out] start List of tables to be open (it can be adjusted for
statement that uses tables only implicitly, e.g.
for "SELECT f1()").
@@ -4668,7 +4064,7 @@ restart:
goto error;
/* Re-open temporary tables after close_tables_for_reopen(). */
- if (open_temporary_tables(thd, *start))
+ if (thd->open_temporary_tables(*start))
goto error;
error= FALSE;
@@ -4730,7 +4126,7 @@ restart:
goto error;
/* Re-open temporary tables after close_tables_for_reopen(). */
- if (open_temporary_tables(thd, *start))
+ if (thd->open_temporary_tables(*start))
goto error;
error= FALSE;
@@ -4880,6 +4276,7 @@ handle_routine(THD *thd, Query_tables_list *prelocking_ctx,
@note this can be changed to use a hash, instead of scanning the linked
list, if the performance of this function will ever become an issue
*/
+
bool table_already_fk_prelocked(TABLE_LIST *tl, LEX_STRING *db,
LEX_STRING *table, thr_lock_type lock_type)
{
@@ -4957,11 +4354,12 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx,
while ((fk= fk_list_it++))
{
// FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access
+ static bool can_write[]= { true, false, true, true, false, true };
uint8 op= table_list->trg_event_map;
thr_lock_type lock_type;
- if ((op & (1 << TRG_EVENT_DELETE) && fk_modifies_child(fk->delete_method))
- || (op & (1 << TRG_EVENT_UPDATE) && fk_modifies_child(fk->update_method)))
+ if ((op & (1 << TRG_EVENT_DELETE) && can_write[fk->delete_method])
+ || (op & (1 << TRG_EVENT_UPDATE) && can_write[fk->update_method]))
lock_type= TL_WRITE_ALLOW_WRITE;
else
lock_type= TL_READ;
@@ -5170,6 +4568,13 @@ static bool check_lock_and_start_stmt(THD *thd,
table_list->table->file->print_error(error, MYF(0));
DBUG_RETURN(1);
}
+
+ /*
+ Record in transaction state tracking
+ */
+ TRANSACT_TRACKER(add_trx_state(thd, lock_type,
+ table_list->table->file->has_transactions()));
+
DBUG_RETURN(0);
}
@@ -5269,7 +4674,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type,
bool error;
DBUG_ENTER("open_ltable");
- /* Ignore temporary tables as they have already ben opened*/
+ /* Ignore temporary tables as they have already been opened. */
if (table_list->table)
DBUG_RETURN(table_list->table);
@@ -5356,8 +4761,9 @@ end:
Open all tables in list, locks them and optionally process derived tables.
@param thd Thread context.
+ @param options DDL options.
@param tables List of tables for open and locking.
- @param derived If to handle derived tables.
+ @param derived Whether to handle derived tables.
@param flags Bitmap of options to be used to open and lock
tables (see open_tables() and mysql_lock_tables()
for details).
@@ -5502,6 +4908,45 @@ static void mark_real_tables_as_free_for_reuse(TABLE_LIST *table_list)
}
+static bool fix_all_session_vcol_exprs(THD *thd, TABLE_LIST *tables)
+{
+ Security_context *save_security_ctx= thd->security_ctx;
+ TABLE_LIST *first_not_own= thd->lex->first_not_own_table();
+ DBUG_ENTER("fix_session_vcol_expr");
+
+ for (TABLE_LIST *table= tables; table && table != first_not_own;
+ table= table->next_global)
+ {
+ TABLE *t= table->table;
+ if (!table->placeholder() && t->s->vcols_need_refixing &&
+ table->lock_type >= TL_WRITE_ALLOW_WRITE)
+ {
+ if (table->security_ctx)
+ thd->security_ctx= table->security_ctx;
+
+ for (Field **vf= t->vfield; vf && *vf; vf++)
+ if (fix_session_vcol_expr(thd, (*vf)->vcol_info))
+ goto err;
+
+ for (Field **df= t->default_field; df && *df; df++)
+ if ((*df)->default_value &&
+ fix_session_vcol_expr(thd, (*df)->default_value))
+ goto err;
+
+ for (Virtual_column_info **cc= t->check_constraints; cc && *cc; cc++)
+ if (fix_session_vcol_expr(thd, (*cc)))
+ goto err;
+
+ thd->security_ctx= save_security_ctx;
+ }
+ }
+ DBUG_RETURN(0);
+err:
+ thd->security_ctx= save_security_ctx;
+ DBUG_RETURN(1);
+}
+
+
/**
Lock all tables in a list.
@@ -5663,7 +5108,11 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count,
}
}
- DBUG_RETURN(thd->decide_logging_format(tables));
+ bool res= fix_all_session_vcol_exprs(thd, tables);
+ if (!res)
+ res= thd->decide_logging_format(tables);
+
+ DBUG_RETURN(res);
}
@@ -5752,176 +5201,6 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
}
-/**
- Open a single table without table caching and don't add it to
- THD::open_tables. Depending on the 'add_to_temporary_tables_list' value,
- the opened TABLE instance will be addded to THD::temporary_tables list.
-
- @param thd Thread context.
- @param hton Storage engine of the table, if known,
- or NULL otherwise.
- @param frm frm image
- @param path Path (without .frm)
- @param db Database name.
- @param table_name Table name.
- @param add_to_temporary_tables_list Specifies if the opened TABLE
- instance should be linked into
- THD::temporary_tables list.
- @param open_in_engine Indicates that we need to open table
- in storage engine in addition to
- constructing TABLE object for it.
-
- @note This function is used:
- - by alter_table() to open a temporary table;
- - when creating a temporary table with CREATE TEMPORARY TABLE.
-
- @return TABLE instance for opened table.
- @retval NULL on error.
-*/
-
-TABLE *open_table_uncached(THD *thd, handlerton *hton,
- LEX_CUSTRING *frm,
- const char *path, const char *db,
- const char *table_name,
- bool add_to_temporary_tables_list,
- bool open_in_engine)
-{
- TABLE *tmp_table;
- TABLE_SHARE *share;
- char cache_key[MAX_DBKEY_LENGTH], *saved_cache_key, *tmp_path;
- uint key_length;
- DBUG_ENTER("open_table_uncached");
- DBUG_PRINT("enter",
- ("table: '%s'.'%s' path: '%s' server_id: %u "
- "pseudo_thread_id: %lu",
- db, table_name, path,
- (uint) thd->variables.server_id,
- (ulong) thd->variables.pseudo_thread_id));
-
- if (add_to_temporary_tables_list)
- {
- /* Temporary tables are not safe for parallel replication. */
- if (thd->rgi_slave && thd->rgi_slave->is_parallel_exec &&
- thd->wait_for_prior_commit())
- DBUG_RETURN(NULL);
- }
-
- /* Create the cache_key for temporary tables */
- key_length= create_tmp_table_def_key(thd, cache_key, db, table_name);
-
- if (!(tmp_table= (TABLE*) my_malloc(sizeof(*tmp_table) + sizeof(*share) +
- strlen(path)+1 + key_length,
- MYF(MY_WME))))
- DBUG_RETURN(0); /* purecov: inspected */
-
- share= (TABLE_SHARE*) (tmp_table+1);
- tmp_path= (char*) (share+1);
- saved_cache_key= strmov(tmp_path, path)+1;
- memcpy(saved_cache_key, cache_key, key_length);
-
- init_tmp_table_share(thd, share, saved_cache_key, key_length,
- strend(saved_cache_key)+1, tmp_path);
- share->db_plugin= ha_lock_engine(thd, hton);
-
- /*
- Use the frm image, if possible, open the file otherwise.
-
- The image might be unavailable in ALTER TABLE, when the discovering
- engine took over the ownership (see TABLE::read_frm_image).
- */
- int res= frm->str
- ? share->init_from_binary_frm_image(thd, false, frm->str, frm->length)
- : open_table_def(thd, share, GTS_TABLE | GTS_USE_DISCOVERY);
-
- if (res)
- {
- /* No need to lock share->mutex as this is not needed for tmp tables */
- free_table_share(share);
- my_free(tmp_table);
- DBUG_RETURN(0);
- }
-
- share->m_psi= PSI_CALL_get_table_share(true, share);
-
- if (open_table_from_share(thd, share, table_name,
- open_in_engine ?
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX) : 0,
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
- ha_open_options,
- tmp_table,
- /*
- Set "is_create_table" if the table does not
- exist in SE
- */
- open_in_engine ? false : true))
- {
- /* No need to lock share->mutex as this is not needed for tmp tables */
- free_table_share(share);
- my_free(tmp_table);
- DBUG_RETURN(0);
- }
-
- tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked
- tmp_table->grant.privilege= TMP_TABLE_ACLS;
- share->tmp_table= (tmp_table->file->has_transactions() ?
- TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE);
-
- if (add_to_temporary_tables_list)
- {
- thd->lock_temporary_tables();
- /* growing temp list at the head */
- tmp_table->next= thd->temporary_tables;
- if (tmp_table->next)
- tmp_table->next->prev= tmp_table;
- thd->temporary_tables= tmp_table;
- thd->temporary_tables->prev= 0;
- if (thd->rgi_slave)
- {
- thread_safe_increment32(&slave_open_temp_tables);
- }
- thd->unlock_temporary_tables(0);
- }
- tmp_table->pos_in_table_list= 0;
- DBUG_PRINT("tmptable", ("opened table: '%s'.'%s' 0x%lx", tmp_table->s->db.str,
- tmp_table->s->table_name.str, (long) tmp_table));
- DBUG_RETURN(tmp_table);
-}
-
-
-/**
- Delete a temporary table.
-
- @param base Handlerton for table to be deleted.
- @param path Path to the table to be deleted (i.e. path
- to its .frm without an extension).
-
- @retval false - success.
- @retval true - failure.
-*/
-
-bool rm_temporary_table(handlerton *base, const char *path)
-{
- bool error=0;
- handler *file;
- char frm_path[FN_REFLEN + 1];
- DBUG_ENTER("rm_temporary_table");
-
- strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS);
- if (mysql_file_delete(key_file_frm, frm_path, MYF(0)))
- error=1; /* purecov: inspected */
- file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
- if (file && file->ha_delete_table(path))
- {
- error=1;
- sql_print_warning("Could not remove temporary table: '%s', error: %d",
- path, my_errno);
- }
- delete file;
- DBUG_RETURN(error);
-}
-
-
/*****************************************************************************
* The following find_field_in_XXX procedures implement the core of the
* name resolution functionality. The entry point to resolve a column name in a
@@ -5950,7 +5229,6 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
*/
table->covering_keys.intersect(field->part_of_key);
- table->merge_keys.merge(field->part_of_key);
if (field->vcol_info)
table->mark_virtual_col(field);
@@ -5990,164 +5268,6 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
}
-/**
- Find a temporary table specified by TABLE_LIST instance in the cache and
- prepare its TABLE instance for use.
-
- This function tries to resolve this table in the list of temporary tables
- of this thread. Temporary tables are thread-local and "shadow" base
- tables with the same name.
-
- @note In most cases one should use open_temporary_tables() instead
- of this call.
-
- @note One should finalize process of opening temporary table for table
- list element by calling open_and_process_table(). This function
- is responsible for table version checking and handling of merge
- tables.
-
- @note We used to check global_read_lock before opening temporary tables.
- However, that limitation was artificial and is removed now.
-
- @return Error status.
- @retval FALSE On success. If a temporary table exists for the given
- key, tl->table is set.
- @retval TRUE On error. my_error() has been called.
-*/
-
-bool open_temporary_table(THD *thd, TABLE_LIST *tl)
-{
- TABLE *table;
- DBUG_ENTER("open_temporary_table");
- DBUG_PRINT("enter", ("table: '%s'.'%s'", tl->db, tl->table_name));
-
- /*
- Code in open_table() assumes that TABLE_LIST::table can
- be non-zero only for pre-opened temporary tables.
- */
- DBUG_ASSERT(tl->table == NULL);
-
- /*
- This function should not be called for cases when derived or I_S
- tables can be met since table list elements for such tables can
- have invalid db or table name.
- Instead open_temporary_tables() should be used.
- */
- DBUG_ASSERT(!tl->derived && !tl->schema_table);
-
- if (tl->open_type == OT_BASE_ONLY || !thd->have_temporary_tables())
- {
- DBUG_PRINT("info", ("skip_temporary is set or no temporary tables"));
- DBUG_RETURN(FALSE);
- }
-
- if (find_and_use_temporary_table(thd, tl, &table))
- DBUG_RETURN(TRUE);
- if (!table)
- {
- if (tl->open_type == OT_TEMPORARY_ONLY &&
- tl->open_strategy == TABLE_LIST::OPEN_NORMAL)
- {
- my_error(ER_NO_SUCH_TABLE, MYF(0), tl->db, tl->table_name);
- DBUG_RETURN(TRUE);
- }
- DBUG_RETURN(FALSE);
- }
-
- /*
- Temporary tables are not safe for parallel replication. They were
- designed to be visible to one thread only, so have no table locking.
- Thus there is no protection against two conflicting transactions
- committing in parallel and things like that.
-
- So for now, anything that uses temporary tables will be serialised
- with anything before it, when using parallel replication.
-
- ToDo: We might be able to introduce a reference count or something
- on temp tables, and have slave worker threads wait for it to reach
- zero before being allowed to use the temp table. Might not be worth
- it though, as statement-based replication using temporary tables is
- in any case rather fragile.
- */
- if (thd->rgi_slave && thd->rgi_slave->is_parallel_exec &&
- thd->wait_for_prior_commit())
- DBUG_RETURN(true);
-
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (tl->partition_names)
- {
- /* Partitioned temporary tables is not supported. */
- DBUG_ASSERT(!table->part_info);
- my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
- DBUG_RETURN(true);
- }
-#endif
-
- if (table->query_id)
- {
- /*
- We're trying to use the same temporary table twice in a query.
- Right now we don't support this because a temporary table is always
- represented by only one TABLE object in THD, and it can not be
- cloned. Emit an error for an unsupported behaviour.
- */
-
- DBUG_PRINT("error",
- ("query_id: %lu server_id: %u pseudo_thread_id: %lu",
- (ulong) table->query_id, (uint) thd->variables.server_id,
- (ulong) thd->variables.pseudo_thread_id));
- my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
- DBUG_RETURN(TRUE);
- }
-
- table->query_id= thd->query_id;
- thd->thread_specific_used= TRUE;
-
- tl->updatable= 1; // It is not derived table nor non-updatable VIEW.
- tl->table= table;
-
- table->init(thd, tl);
-
- DBUG_PRINT("info", ("Using temporary table"));
- DBUG_RETURN(FALSE);
-}
-
-
-/**
- Pre-open temporary tables corresponding to table list elements.
-
- @note One should finalize process of opening temporary tables
- by calling open_tables(). This function is responsible
- for table version checking and handling of merge tables.
-
- @return Error status.
- @retval FALSE On success. If a temporary tables exists for the
- given element, tl->table is set.
- @retval TRUE On error. my_error() has been called.
-*/
-
-bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list)
-{
- TABLE_LIST *first_not_own= thd->lex->first_not_own_table();
- DBUG_ENTER("open_temporary_tables");
-
- for (TABLE_LIST *tl= tl_list; tl && tl != first_not_own; tl= tl->next_global)
- {
- if (tl->derived || tl->schema_table)
- {
- /*
- Derived and I_S tables will be handled by a later call to open_tables().
- */
- continue;
- }
-
- if (open_temporary_table(thd, tl))
- DBUG_RETURN(TRUE);
- }
-
- DBUG_RETURN(FALSE);
-}
-
/*
Find a field by name in a view that uses merge algorithm.
@@ -6177,8 +5297,8 @@ find_field_in_view(THD *thd, TABLE_LIST *table_list,
{
DBUG_ENTER("find_field_in_view");
DBUG_PRINT("enter",
- ("view: '%s', field name: '%s', item name: '%s', ref 0x%lx",
- table_list->alias, name, item_name, (ulong) ref));
+ ("view: '%s', field name: '%s', item name: '%s', ref %p",
+ table_list->alias, name, item_name, ref));
Field_iterator_view field_it;
field_it.set(table_list);
Query_arena *arena= 0, backup;
@@ -6222,9 +5342,9 @@ find_field_in_view(THD *thd, TABLE_LIST *table_list,
}
else
{
- item->set_name((*ref)->name, (*ref)->name_length,
+ item->set_name(thd, (*ref)->name, (*ref)->name_length,
system_charset_info);
- item->real_item()->set_name((*ref)->name, (*ref)->name_length,
+ item->real_item()->set_name(thd, (*ref)->name, (*ref)->name_length,
system_charset_info);
}
}
@@ -6279,8 +5399,8 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
Field *UNINIT_VAR(found_field);
Query_arena *UNINIT_VAR(arena), backup;
DBUG_ENTER("find_field_in_natural_join");
- DBUG_PRINT("enter", ("field name: '%s', ref 0x%lx",
- name, (ulong) ref));
+ DBUG_PRINT("enter", ("field name: '%s', ref %p",
+ name, ref));
DBUG_ASSERT(table_ref->is_natural_join && table_ref->join_columns);
DBUG_ASSERT(*actual_table == NULL);
@@ -6321,9 +5441,9 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
*/
if (*ref && !(*ref)->is_autogenerated_name)
{
- item->set_name((*ref)->name, (*ref)->name_length,
+ item->set_name(thd, (*ref)->name, (*ref)->name_length,
system_charset_info);
- item->real_item()->set_name((*ref)->name, (*ref)->name_length,
+ item->real_item()->set_name(thd, (*ref)->name, (*ref)->name_length,
system_charset_info);
}
if (register_tree_change && arena)
@@ -6436,7 +5556,7 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
if (field_ptr && *field_ptr)
{
- *cached_field_index_ptr= field_ptr - table->field;
+ *cached_field_index_ptr= (uint)(field_ptr - table->field);
field= *field_ptr;
}
else
@@ -6513,8 +5633,8 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
DBUG_ASSERT(name);
DBUG_ASSERT(item_name);
DBUG_PRINT("enter",
- ("table: '%s' field name: '%s' item name: '%s' ref 0x%lx",
- table_list->alias, name, item_name, (ulong) ref));
+ ("table: '%s' field name: '%s' item name: '%s' ref %p",
+ table_list->alias, name, item_name, ref));
/*
Check that the table and database that qualify the current field name
@@ -6629,9 +5749,9 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
else
{
if (thd->mark_used_columns == MARK_COLUMNS_READ)
- it->walk(&Item::register_field_in_read_map, 0, (uchar *) 0);
+ it->walk(&Item::register_field_in_read_map, 0, 0);
else
- it->walk(&Item::register_field_in_write_map, 0, (uchar *) 0);
+ it->walk(&Item::register_field_in_write_map, 0, 0);
}
}
else
@@ -6972,6 +6092,8 @@ find_field_in_tables(THD *thd, Item_ident *item,
or as a field name without alias,
or as a field hidden by alias,
or ignoring alias)
+ limit How many items in the list to check
+ (if limit==0 then all items are to be checked)
RETURN VALUES
0 Item is not found or item is not unique,
@@ -6989,9 +6111,10 @@ Item **not_found_item= (Item**) 0x1;
Item **
find_item_in_list(Item *find, List<Item> &items, uint *counter,
find_item_error_report_type report_error,
- enum_resolution_type *resolution)
+ enum_resolution_type *resolution, uint limit)
{
List_iterator<Item> li(items);
+ uint n_items= limit == 0 ? items.elements : limit;
Item **found=0, **found_unaliased= 0, *item;
const char *db_name=0;
const char *field_name=0;
@@ -7015,8 +6138,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
db_name= ((Item_ident*) find)->db_name;
}
- for (uint i= 0; (item=li++); i++)
+ for (uint i= 0; i < n_items; i++)
{
+ item= li++;
if (field_name &&
(item->real_item()->type() == Item::FIELD_ITEM ||
((item->type() == Item::REF_ITEM) &&
@@ -7456,7 +6580,6 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
/* Mark field_1 used for table cache. */
bitmap_set_bit(table_1->read_set, field_1->field_index);
table_1->covering_keys.intersect(field_1->part_of_key);
- table_1->merge_keys.merge(field_1->part_of_key);
}
if (field_2)
{
@@ -7464,7 +6587,6 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
/* Mark field_2 used for table cache. */
bitmap_set_bit(table_2->read_set, field_2->field_index);
table_2->covering_keys.intersect(field_2->part_of_key);
- table_2->merge_keys.merge(field_2->part_of_key);
}
if (using_fields != NULL)
@@ -7909,13 +7031,15 @@ static bool setup_natural_join_row_types(THD *thd,
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list,
- uint wild_num)
+ uint wild_num, uint *hidden_bit_fields)
{
+ if (!wild_num)
+ return(0);
+
Item *item;
List_iterator<Item> it(fields);
Query_arena *arena, backup;
DBUG_ENTER("setup_wild");
- DBUG_ASSERT(wild_num != 0);
/*
Don't use arena if we are not in prepared statements or stored procedures
@@ -7948,7 +7072,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
else if (insert_fields(thd, ((Item_field*) item)->context,
((Item_field*) item)->db_name,
((Item_field*) item)->table_name, &it,
- any_privileges))
+ any_privileges, hidden_bit_fields))
{
if (arena)
thd->restore_active_arena(arena, &backup);
@@ -7994,19 +7118,19 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
** Check that all given fields exists and fill struct with current data
****************************************************************************/
-bool setup_fields(THD *thd, Item **ref_pointer_array,
+bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, List<Item> *pre_fix,
bool allow_sum_func)
{
- reg2 Item *item;
+ Item *item;
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
List_iterator<Item> it(fields);
bool save_is_item_list_lookup;
bool make_pre_fix= (pre_fix && (pre_fix->elements == 0));
DBUG_ENTER("setup_fields");
- DBUG_PRINT("enter", ("ref_pointer_array: %p", ref_pointer_array));
+ DBUG_PRINT("enter", ("ref_pointer_array: %p", ref_pointer_array.array()));
thd->mark_used_columns= mark_used_columns;
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
@@ -8028,8 +7152,11 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
TODO: remove it when (if) we made one list for allfields and
ref_pointer_array
*/
- if (ref_pointer_array)
- bzero(ref_pointer_array, sizeof(Item *) * fields.elements);
+ if (!ref_pointer_array.is_null())
+ {
+ DBUG_ASSERT(ref_pointer_array.size() >= fields.elements);
+ memset(ref_pointer_array.array(), 0, sizeof(Item *) * fields.elements);
+ }
/*
We call set_entry() there (before fix_fields() of the whole list of field
@@ -8047,7 +7174,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
while ((var= li++))
var->set_entry(thd, FALSE);
- Item **ref= ref_pointer_array;
+ Ref_ptr_array ref= ref_pointer_array;
thd->lex->current_select->cur_pos_in_select_list= 0;
while ((item= it++))
{
@@ -8063,12 +7190,23 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
DBUG_RETURN(TRUE); /* purecov: inspected */
}
- if (ref)
- *(ref++)= item;
- if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM &&
- sum_func_list)
+ if (!ref.is_null())
+ {
+ ref[0]= item;
+ ref.pop_front();
+ }
+ /*
+ split_sum_func() must be called for Window Function items, see
+ Item_window_func::split_sum_func.
+ */
+ if (sum_func_list &&
+ ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
+ item->with_window_func))
+ {
item->split_sum_func(thd, ref_pointer_array, *sum_func_list,
SPLIT_SUM_SELECT);
+ }
+ thd->lex->current_select->select_list_tables|= item->used_tables();
thd->lex->used_tables|= item->used_tables();
thd->lex->current_select->cur_pos_in_select_list++;
}
@@ -8399,13 +7537,13 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table,
bool
insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
const char *table_name, List_iterator<Item> *it,
- bool any_privileges)
+ bool any_privileges, uint *hidden_bit_fields)
{
Field_iterator_table_ref field_iterator;
bool found;
char name_buff[SAFE_NAME_LEN+1];
DBUG_ENTER("insert_fields");
- DBUG_PRINT("arena", ("stmt arena: 0x%lx", (ulong)thd->stmt_arena));
+ DBUG_PRINT("arena", ("stmt arena: %p",thd->stmt_arena));
if (db_name && lower_case_table_names)
{
@@ -8487,7 +7625,10 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
views and natural joins this update is performed inside the loop below.
*/
if (table)
+ {
thd->lex->used_tables|= table->map;
+ thd->lex->current_select->select_list_tables|= table->map;
+ }
/*
Initialize a generic field iterator for the current table reference.
@@ -8516,6 +7657,9 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
else
it->after(item); /* Add 'item' to the SELECT list. */
+ if (item->type() == Item::FIELD_ITEM && item->field_type() == MYSQL_TYPE_BIT)
+ (*hidden_bit_fields)++;
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/*
Set privilege information for the fields of newly created views.
@@ -8525,7 +7669,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
temporary table. Thus in this case we can be sure that 'item' is an
Item_field.
*/
- if (any_privileges)
+ if (any_privileges && !tables->is_with_table() && !tables->is_derived())
{
DBUG_ASSERT((tables->field_translation == NULL && table) ||
tables->is_natural_join);
@@ -8562,7 +7706,6 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
if (table)
{
table->covering_keys.intersect(field->part_of_key);
- table->merge_keys.merge(field->part_of_key);
}
if (tables->is_natural_join)
{
@@ -8579,8 +7722,9 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
if (field_table)
{
thd->lex->used_tables|= field_table->map;
+ thd->lex->current_select->select_list_tables|=
+ field_table->map;
field_table->covering_keys.intersect(field->part_of_key);
- field_table->merge_keys.merge(field->part_of_key);
field_table->used_fields++;
}
}
@@ -8827,11 +7971,14 @@ err_no_arena:
@param fields Item_fields list to be filled
@param values values to fill with
@param ignore_errors TRUE if we should ignore errors
+ @param update TRUE if update query
@details
fill_record() may set table->auto_increment_field_not_null and a
caller should make sure that it is reset after their last call to this
function.
+ default functions are executed for inserts.
+ virtual fields are always updated
@return Status
@retval true An error occurred.
@@ -8840,12 +7987,11 @@ err_no_arena:
bool
fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
- bool ignore_errors)
+ bool ignore_errors, bool update)
{
List_iterator_fast<Item> f(fields),v(values);
Item *value, *fld;
Item_field *field;
- TABLE *vcol_table= 0;
bool save_abort_on_warning= thd->abort_on_warning;
bool save_no_errors= thd->no_errors;
DBUG_ENTER("fill_record");
@@ -8871,8 +8017,6 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
table_arg->auto_increment_field_not_null= FALSE;
f.rewind();
}
- else if (thd->lex->unit.insert_table_with_stored_vcol)
- vcol_table= thd->lex->unit.insert_table_with_stored_vcol;
while ((fld= f++))
{
@@ -8887,9 +8031,10 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
if (table->next_number_field &&
rfield->field_index == table->next_number_field->field_index)
table->auto_increment_field_not_null= TRUE;
- if (rfield->vcol_info &&
- value->type() != Item::DEFAULT_VALUE_ITEM &&
- value->type() != Item::NULL_ITEM &&
+ Item::Type type= value->type();
+ if (rfield->vcol_info &&
+ type != Item::DEFAULT_VALUE_ITEM &&
+ type != Item::NULL_ITEM &&
table->s->table_category != TABLE_CATEGORY_TEMPORARY)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -8897,25 +8042,27 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
ER_THD(thd, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
rfield->field_name, table->s->table_name.str);
}
- if ((!rfield->vcol_info || rfield->stored_in_db) &&
+ if (rfield->stored_in_db() &&
(value->save_in_field(rfield, 0)) < 0 && !ignore_errors)
{
my_message(ER_UNKNOWN_ERROR, ER_THD(thd, ER_UNKNOWN_ERROR), MYF(0));
goto err;
}
rfield->set_explicit_default(value);
- DBUG_ASSERT(vcol_table == 0 || vcol_table == table);
- vcol_table= table;
}
- /* Update virtual fields*/
- thd->abort_on_warning= FALSE;
- if (vcol_table && vcol_table->vfield &&
- update_virtual_fields(thd, vcol_table, VCOL_UPDATE_FOR_WRITE))
+
+ if (!update && table_arg->default_field &&
+ table_arg->update_default_fields(0, ignore_errors))
+ goto err;
+ /* Update virtual fields */
+ if (table_arg->vfield &&
+ table_arg->update_virtual_fields(table_arg->file, VCOL_UPDATE_FOR_WRITE))
goto err;
thd->abort_on_warning= save_abort_on_warning;
thd->no_errors= save_no_errors;
DBUG_RETURN(thd->is_error());
err:
+ DBUG_PRINT("error",("got error"));
thd->abort_on_warning= save_abort_on_warning;
thd->no_errors= save_no_errors;
if (fields.elements)
@@ -8934,19 +8081,48 @@ void switch_to_nullable_trigger_fields(List<Item> &items, TABLE *table)
{
Field** field= table->field_to_fill();
+ /* True if we have NOT NULL fields and BEFORE triggers */
if (field != table->field)
{
List_iterator_fast<Item> it(items);
Item *item;
while ((item= it++))
- item->walk(&Item::switch_to_nullable_fields_processor, 1, (uchar*)field);
+ item->walk(&Item::switch_to_nullable_fields_processor, 1, field);
table->triggers->reset_extra_null_bitmap();
}
}
/**
+ Prepare Virtual fields and field with default expressions to use
+ trigger fields
+
+ This means redirecting from table->field to
+ table->field_to_fill(), if needed.
+*/
+
+void switch_defaults_to_nullable_trigger_fields(TABLE *table)
+{
+ if (!table->default_field)
+ return; // no defaults
+
+ Field **trigger_field= table->field_to_fill();
+
+ /* True if we have NOT NULL fields and BEFORE triggers */
+ if (trigger_field != table->field)
+ {
+ for (Field **field_ptr= table->default_field; *field_ptr ; field_ptr++)
+ {
+ Field *field= (*field_ptr);
+ field->default_value->expr->walk(&Item::switch_to_nullable_fields_processor, 1, trigger_field);
+ *field_ptr= (trigger_field[field->field_index]);
+ }
+ }
+}
+
+
+/**
Test NOT NULL constraint after BEFORE triggers
*/
static bool not_null_fields_have_null_values(TABLE *table)
@@ -9000,36 +8176,37 @@ static bool not_null_fields_have_null_values(TABLE *table)
*/
bool
-fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields,
+fill_record_n_invoke_before_triggers(THD *thd, TABLE *table,
+ List<Item> &fields,
List<Item> &values, bool ignore_errors,
enum trg_event_type event)
{
bool result;
Table_triggers_list *triggers= table->triggers;
- result= fill_record(thd, table, fields, values, ignore_errors);
+ result= fill_record(thd, table, fields, values, ignore_errors,
+ event == TRG_EVENT_UPDATE);
if (!result && triggers)
- result= triggers->process_triggers(thd, event, TRG_ACTION_BEFORE, TRUE) ||
- not_null_fields_have_null_values(table);
-
- /*
- Re-calculate virtual fields to cater for cases when base columns are
- updated by the triggers.
- */
- if (!result && triggers)
{
- List_iterator_fast<Item> f(fields);
- Item *fld;
- Item_field *item_field;
- if (fields.elements)
+ if (triggers->process_triggers(thd, event, TRG_ACTION_BEFORE,
+ TRUE) ||
+ not_null_fields_have_null_values(table))
+ return TRUE;
+
+ /*
+ Re-calculate virtual fields to cater for cases when base columns are
+ updated by the triggers.
+ */
+ if (table->vfield && fields.elements)
{
- fld= (Item_field*)f++;
- item_field= fld->field_for_view_update();
- if (item_field && item_field->field && table && table->vfield)
+ Item *fld= (Item_field*) fields.head();
+ Item_field *item_field= fld->field_for_view_update();
+ if (item_field)
{
DBUG_ASSERT(table == item_field->field->table);
- result= update_virtual_fields(thd, table, VCOL_UPDATE_FOR_WRITE);
+ result|= table->update_virtual_fields(table->file,
+ VCOL_UPDATE_FOR_WRITE);
}
}
}
@@ -9039,6 +8216,7 @@ fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields,
/**
Fill the field buffer of a table with the values of an Item list
+ All fields are given a value
@param thd thread handler
@param table_arg the table that is being modified
@@ -9063,6 +8241,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
{
List_iterator_fast<Item> v(values);
List<TABLE> tbl_list;
+ bool all_fields_have_values= true;
Item *value;
Field *field;
bool abort_on_warning_saved= thd->abort_on_warning;
@@ -9096,15 +8275,18 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
value=v++;
if (field->field_index == autoinc_index)
table->auto_increment_field_not_null= TRUE;
- if (field->vcol_info &&
- value->type() != Item::DEFAULT_VALUE_ITEM &&
- value->type() != Item::NULL_ITEM &&
- table->s->table_category != TABLE_CATEGORY_TEMPORARY)
+ if (field->vcol_info)
{
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
- ER_THD(thd, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
- field->field_name, table->s->table_name.str);
+ Item::Type type= value->type();
+ if (type != Item::DEFAULT_VALUE_ITEM &&
+ type != Item::NULL_ITEM &&
+ table->s->table_category != TABLE_CATEGORY_TEMPORARY)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
+ ER_THD(thd, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
+ field->field_name, table->s->table_name.str);
+ }
}
if (use_value)
@@ -9112,12 +8294,15 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
else
if (value->save_in_field(field, 0) < 0)
goto err;
- field->set_explicit_default(value);
+ all_fields_have_values &= field->set_explicit_default(value);
}
- /* Update virtual fields*/
+ if (!all_fields_have_values && table->default_field &&
+ table->update_default_fields(0, ignore_errors))
+ goto err;
+ /* Update virtual fields */
thd->abort_on_warning= FALSE;
if (table->vfield &&
- update_virtual_fields(thd, table, VCOL_UPDATE_FOR_WRITE))
+ table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
goto err;
thd->abort_on_warning= abort_on_warning_saved;
DBUG_RETURN(thd->is_error());
@@ -9171,7 +8356,7 @@ fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, Field **ptr,
{
DBUG_ASSERT(table == (*ptr)->table);
if (table->vfield)
- result= update_virtual_fields(thd, table, VCOL_UPDATE_FOR_WRITE);
+ result= table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE);
}
return result;
@@ -9188,7 +8373,7 @@ my_bool mysql_rm_tmp_tables(void)
THD *thd;
DBUG_ENTER("mysql_rm_tmp_tables");
- if (!(thd= new THD))
+ if (!(thd= new THD(0)))
DBUG_RETURN(1);
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -9280,7 +8465,6 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
DBUG_PRINT("info",("Performing FULLTEXT search"));
while ((ifm=li++))
-#if MYSQL_VERSION_ID < 100213
if (unlikely(!ifm->fixed))
/*
it mean that clause where was FT function was removed, so we have
@@ -9288,7 +8472,6 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
*/
li.remove();
else
-#endif
ifm->init_search(thd, no_order);
}
return 0;
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 94294e3aa43..37cc9e8e8f1 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -18,8 +18,8 @@
#ifndef SQL_BASE_INCLUDED
#define SQL_BASE_INCLUDED
-#include "sql_trigger.h" /* trg_event_type */
#include "sql_class.h" /* enum_mark_columns */
+#include "sql_trigger.h" /* trg_event_type */
#include "mysqld.h" /* key_map */
#include "table_cache.h"
@@ -64,9 +64,8 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND,
/* Flag bits for unique_table() */
#define CHECK_DUP_ALLOW_DIFFERENT_ALIAS 1
#define CHECK_DUP_FOR_CREATE 2
+#define CHECK_DUP_SKIP_TEMP_TABLE 4
-uint create_tmp_table_def_key(THD *thd, char *key, const char *db,
- const char *table_name);
uint get_table_def_key(const TABLE_LIST *table_list, const char **key);
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
uint lock_flags);
@@ -131,11 +130,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx);
bool get_key_map_from_key_list(key_map *map, TABLE *table,
List<String> *index_list);
-TABLE *open_table_uncached(THD *thd, handlerton *hton,
- LEX_CUSTRING *frm, const char *path,
- const char *db, const char *table_name,
- bool add_to_temporary_tables_list,
- bool open_in_engine);
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name);
TABLE *find_write_locked_table(TABLE *list, const char *db,
const char *table_name);
@@ -145,7 +139,6 @@ thr_lock_type read_lock_type_for_table(THD *thd,
bool routine_modifies_data);
my_bool mysql_rm_tmp_tables(void);
-bool rm_temporary_table(handlerton *base, const char *path);
void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
const MDL_savepoint &start_of_statement_svp);
bool table_already_fk_prelocked(TABLE_LIST *tl, LEX_STRING *db,
@@ -154,16 +147,9 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table,
TABLE_LIST *TABLE_LIST::*link,
const char *db_name,
const char *table_name);
-TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name);
-bool find_and_use_temporary_table(THD *thd, const char *db,
- const char *table_name, TABLE **out_table);
-TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl);
-bool find_and_use_temporary_table(THD *thd, const TABLE_LIST *tl,
- TABLE **out_table);
-TABLE *find_temporary_table(THD *thd, const char *table_key,
- uint table_key_length);
void close_thread_tables(THD *thd);
void switch_to_nullable_trigger_fields(List<Item> &items, TABLE *);
+void switch_defaults_to_nullable_trigger_fields(TABLE *table);
bool fill_record_n_invoke_before_triggers(THD *thd, TABLE *table,
List<Item> &fields,
List<Item> &values,
@@ -176,18 +162,19 @@ bool fill_record_n_invoke_before_triggers(THD *thd, TABLE *table,
enum trg_event_type event);
bool insert_fields(THD *thd, Name_resolution_context *context,
const char *db_name, const char *table_name,
- List_iterator<Item> *it, bool any_privileges);
+ List_iterator<Item> *it, bool any_privileges,
+ uint *hidden_bit_fields);
void make_leaves_list(THD *thd, List<TABLE_LIST> &list, TABLE_LIST *tables,
bool full_table_list, TABLE_LIST *boundary);
int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
- List<Item> *sum_func_list, uint wild_num);
-bool setup_fields(THD *thd, Item** ref_pointer_array,
+ List<Item> *sum_func_list, uint wild_num, uint * hidden_bit_fields);
+bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &item, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, List<Item> *pre_fix,
bool allow_sum_func);
void unfix_fields(List<Item> &items);
bool fill_record(THD * thd, TABLE *table_arg, List<Item> &fields,
- List<Item> &values, bool ignore_errors);
+ List<Item> &values, bool ignore_errors, bool update);
bool fill_record(THD *thd, TABLE *table, Field **field, List<Item> &values,
bool ignore_errors, bool use_value);
@@ -211,7 +198,7 @@ Field *
find_field_in_table_sef(TABLE *table, const char *name);
Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter,
find_item_error_report_type report_error,
- enum_resolution_type *resolution);
+ enum_resolution_type *resolution, uint limit= 0);
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
List<TABLE_LIST> &leaves, bool select_insert,
@@ -281,21 +268,9 @@ bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags,
uint dt_phases);
bool lock_tables(THD *thd, TABLE_LIST *tables, uint counter, uint flags);
int decide_logging_format(THD *thd, TABLE_LIST *tables);
-void free_io_cache(TABLE *entry);
-void intern_close_table(TABLE *entry);
-void kill_delayed_threads_for_table(TDC_element *element);
void close_thread_table(THD *thd, TABLE **table_ptr);
-bool close_temporary_tables(THD *thd);
TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
uint check_flag);
-int drop_temporary_table(THD *thd, TABLE *table, bool *is_trans);
-void close_temporary_table(THD *thd, TABLE *table, bool free_share,
- bool delete_table);
-void close_temporary(TABLE *table, bool free_share, bool delete_table);
-bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
- const char *table_name);
-bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list);
-bool open_temporary_table(THD *thd, TABLE_LIST *tl);
bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
class Open_tables_backup;
@@ -319,24 +294,13 @@ void close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
ha_extra_function extra,
TABLE *skip_table);
OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild);
-bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
- const char *cache_key, uint cache_key_length, uint flags);
-
-static inline bool tdc_open_view(THD *thd, TABLE_LIST *table_list,
- const char *alias, uint flags)
-{
- const char *key;
- uint key_length= get_table_def_key(table_list, &key);
- return tdc_open_view(thd, table_list, alias, key, key_length, flags);
-}
+bool tdc_open_view(THD *thd, TABLE_LIST *table_list, uint flags);
TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
const char *table_name,
int *p_error);
void mark_tmp_table_for_reuse(TABLE *table);
-int update_virtual_fields(THD *thd, TABLE *table,
- enum enum_vcol_update_mode vcol_update_mode= VCOL_UPDATE_FOR_READ);
int dynamic_column_error_message(enum_dyncol_func_result rc);
/* open_and_lock_tables with optional derived handling */
@@ -378,14 +342,12 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
table->force_index= table_list->force_index;
table->force_index_order= table->force_index_group= 0;
table->covering_keys= table->s->keys_for_keyread;
- table->merge_keys.clear_all();
TABLE_LIST *orig= table_list->select_lex ?
table_list->select_lex->master_unit()->derived : 0;
if (!orig || !orig->is_merged_derived())
{
/* Tables merged from derived were set up already.*/
table->covering_keys= table->s->keys_for_keyread;
- table->merge_keys.clear_all();
}
}
@@ -406,7 +368,7 @@ inline TABLE_LIST *find_table_in_local_list(TABLE_LIST *table,
}
-inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array,
+inline bool setup_fields_with_no_wrap(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &item,
enum_mark_columns mark_used_columns,
List<Item> *sum_func_list,
@@ -675,7 +637,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
diff --git a/sql/sql_basic_types.h b/sql/sql_basic_types.h
new file mode 100644
index 00000000000..1e97262cdf0
--- /dev/null
+++ b/sql/sql_basic_types.h
@@ -0,0 +1,25 @@
+/*
+ Copyright (c) 2000, 2016, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2016, MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
+
+/* File that includes common types used globally in MariaDB */
+
+#ifndef SQL_TYPES_INCLUDED
+#define SQL_TYPES_INCLUDED
+
+typedef ulonglong sql_mode_t;
+typedef int64 query_id_t;
+#endif
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index 2e861d00f10..5cd2a341353 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -17,17 +17,97 @@
#include <my_global.h>
#include "sql_priv.h"
#include "sql_binlog.h"
-#include "sql_parse.h" // check_global_access
-#include "sql_acl.h" // *_ACL
+#include "sql_parse.h"
+#include "sql_acl.h"
#include "rpl_rli.h"
-#include "slave.h" // apply_event_and_update_pos
-#include "log_event.h" // Format_description_log_event,
- // EVENT_LEN_OFFSET,
- // EVENT_TYPE_OFFSET,
- // FORMAT_DESCRIPTION_LOG_EVENT,
- // START_EVENT_V3,
- // Log_event_type,
- // Log_event
+#include "slave.h"
+#include "log_event.h"
+
+
+/**
+ Check if the event type is allowed in a BINLOG statement.
+
+ @retval 0 if the event type is ok.
+ @retval 1 if the event type is not ok.
+*/
+static int check_event_type(int type, Relay_log_info *rli)
+{
+ Format_description_log_event *fd_event=
+ rli->relay_log.description_event_for_exec;
+
+ /*
+ Convert event type id of certain old versions (see comment in
+ Format_description_log_event::Format_description_log_event(char*,...)).
+ */
+ if (fd_event && fd_event->event_type_permutation)
+ {
+ IF_DBUG({
+ int new_type= fd_event->event_type_permutation[type];
+ DBUG_PRINT("info",
+ ("converting event type %d to %d (%s)",
+ type, new_type,
+ Log_event::get_type_str((Log_event_type)new_type)));
+ },
+ (void)0);
+ type= fd_event->event_type_permutation[type];
+ }
+
+ switch (type)
+ {
+ case START_EVENT_V3:
+ case FORMAT_DESCRIPTION_EVENT:
+ /*
+ We need a preliminary FD event in order to parse the FD event,
+ if we don't already have one.
+ */
+ if (!fd_event)
+ if (!(rli->relay_log.description_event_for_exec=
+ new Format_description_log_event(4)))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), 1);
+ return 1;
+ }
+
+ /* It is always allowed to execute FD events. */
+ return 0;
+
+ case TABLE_MAP_EVENT:
+ case WRITE_ROWS_EVENT_V1:
+ case UPDATE_ROWS_EVENT_V1:
+ case DELETE_ROWS_EVENT_V1:
+ case WRITE_ROWS_EVENT:
+ case UPDATE_ROWS_EVENT:
+ case DELETE_ROWS_EVENT:
+ case PRE_GA_WRITE_ROWS_EVENT:
+ case PRE_GA_UPDATE_ROWS_EVENT:
+ case PRE_GA_DELETE_ROWS_EVENT:
+ /*
+ Row events are only allowed if a Format_description_event has
+ already been seen.
+ */
+ if (fd_event)
+ return 0;
+ else
+ {
+ my_error(ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT,
+ MYF(0), Log_event::get_type_str((Log_event_type)type));
+ return 1;
+ }
+ break;
+
+ default:
+ /*
+ It is not meaningful to execute other events than row-events and
+ FD events. It would even be dangerous to execute Stop_log_event
+ and Rotate_log_event since they call Relay_log_info::flush(), which
+ is not allowed to call by other threads than the slave SQL
+ thread when the slave SQL thread is running.
+ */
+ my_error(ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT,
+ MYF(0), Log_event::get_type_str((Log_event_type)type));
+ return 1;
+ }
+}
/**
Copy fragments into the standard placeholder thd->lex->comment.str.
@@ -123,12 +203,6 @@ void mysql_client_binlog_statement(THD* thd)
Allocation
*/
- /*
- If we do not have a Format_description_event, we create a dummy
- one here. In this case, the first event we read must be a
- Format_description_event.
- */
- my_bool have_fd_event= TRUE;
int err;
Relay_log_info *rli;
rpl_group_info *rgi;
@@ -136,20 +210,8 @@ void mysql_client_binlog_statement(THD* thd)
size_t coded_len= 0, decoded_len= 0;
rli= thd->rli_fake;
- if (!rli)
- {
- rli= thd->rli_fake= new Relay_log_info(FALSE);
-#ifdef HAVE_valgrind
- rli->is_fake= TRUE;
-#endif
- have_fd_event= FALSE;
- }
- if (rli && !rli->relay_log.description_event_for_exec)
- {
- rli->relay_log.description_event_for_exec=
- new Format_description_log_event(4);
- have_fd_event= FALSE;
- }
+ if (!rli && (rli= thd->rli_fake= new Relay_log_info(FALSE)))
+ rli->sql_driver_thd= thd;
if (!(rgi= thd->rgi_fake))
rgi= thd->rgi_fake= new rpl_group_info(rli);
rgi->thd= thd;
@@ -161,14 +223,13 @@ void mysql_client_binlog_statement(THD* thd)
/*
Out of memory check
*/
- if (!(rli && rli->relay_log.description_event_for_exec))
+ if (!(rli))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
goto end;
}
- rli->sql_driver_thd= thd;
- rli->no_storage= TRUE;
+ DBUG_ASSERT(rli->belongs_to_client());
if (unlikely(is_fragmented= thd->lex->comment.str && thd->lex->ident.str))
if (binlog_defragment(thd))
@@ -180,7 +241,7 @@ void mysql_client_binlog_statement(THD* thd)
goto end;
}
- decoded_len= base64_needed_decoded_length(coded_len);
+ decoded_len= my_base64_needed_decoded_length(coded_len);
if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME))))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
@@ -191,7 +252,7 @@ void mysql_client_binlog_statement(THD* thd)
strptr < thd->lex->comment.str + thd->lex->comment.length ; )
{
char const *endptr= 0;
- int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr,
+ int bytes_decoded= my_base64_decode(strptr, coded_len, buf, &endptr,
MY_BASE64_DECODE_ALLOW_MULTIPLE_CHUNKS);
#ifndef HAVE_valgrind
@@ -200,8 +261,8 @@ void mysql_client_binlog_statement(THD* thd)
since it will read from unassigned memory.
*/
DBUG_PRINT("info",
- ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)",
- bytes_decoded, (long) strptr, (long) endptr, *endptr,
+ ("bytes_decoded: %d strptr: %p endptr: %p ('%c':%d)",
+ bytes_decoded, strptr, endptr, *endptr,
*endptr));
#endif
@@ -252,23 +313,8 @@ void mysql_client_binlog_statement(THD* thd)
DBUG_PRINT("info", ("event_len=%lu, bytes_decoded=%d",
event_len, bytes_decoded));
- /*
- If we have not seen any Format_description_event, then we must
- see one; it is the only statement that can be read in base64
- without a prior Format_description_event.
- */
- if (!have_fd_event)
- {
- int type = (uchar)bufptr[EVENT_TYPE_OFFSET];
- if (type == FORMAT_DESCRIPTION_EVENT || type == START_EVENT_V3)
- have_fd_event= TRUE;
- else
- {
- my_error(ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT,
- MYF(0), Log_event::get_type_str((Log_event_type)type));
- goto end;
- }
- }
+ if (check_event_type(bufptr[EVENT_TYPE_OFFSET], rli))
+ goto end;
ev= Log_event::read_log_event(bufptr, event_len, &error,
rli->relay_log.description_event_for_exec,
@@ -279,7 +325,7 @@ void mysql_client_binlog_statement(THD* thd)
{
/*
This could actually be an out-of-memory, but it is more likely
- causes by a bad statement
+ caused by a bad statement
*/
my_error(ER_SYNTAX_ERROR, MYF(0));
goto end;
diff --git a/sql/sql_bootstrap.cc b/sql/sql_bootstrap.cc
index 30d03029ce6..9733831b41f 100644
--- a/sql/sql_bootstrap.cc
+++ b/sql/sql_bootstrap.cc
@@ -24,8 +24,8 @@ int read_bootstrap_query(char *query, int *query_length,
{
char line_buffer[MAX_BOOTSTRAP_LINE_SIZE];
const char *line;
- int len;
- int query_len= 0;
+ size_t len;
+ size_t query_len= 0;
int fgets_error= 0;
*error= 0;
@@ -82,14 +82,14 @@ int read_bootstrap_query(char *query, int *query_length,
*/
if (query_len + len + 1 >= MAX_BOOTSTRAP_QUERY_SIZE)
{
- int new_len= MAX_BOOTSTRAP_QUERY_SIZE - query_len - 1;
+ size_t new_len= MAX_BOOTSTRAP_QUERY_SIZE - query_len - 1;
if ((new_len > 0) && (query_len < MAX_BOOTSTRAP_QUERY_SIZE))
{
memcpy(query + query_len, line, new_len);
query_len+= new_len;
}
query[query_len]= '\0';
- *query_length= query_len;
+ *query_length= (int)query_len;
return READ_BOOTSTRAP_QUERY_SIZE;
}
@@ -111,7 +111,7 @@ int read_bootstrap_query(char *query, int *query_length,
Return the query found.
*/
query[query_len]= '\0';
- *query_length= query_len;
+ *query_length= (int)query_len;
return READ_BOOTSTRAP_SUCCESS;
}
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index df6c7c35e5a..17f896374a4 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2010, 2013, Monty Program Ab
+ Copyright (c) 2010, 2017, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -330,6 +330,7 @@ TODO list:
#include <my_global.h> /* NO_EMBEDDED_ACCESS_CHECKS */
#include "sql_priv.h"
+#include "sql_basic_types.h"
#include "sql_cache.h"
#include "sql_parse.h" // check_table_access
#include "tztime.h" // struct Time_zone
@@ -354,28 +355,28 @@ const uchar *query_state_map;
#include "emb_qcache.h"
#endif
-#if !defined(EXTRA_DBUG) && !defined(DBUG_OFF)
-#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock 0x%lx",(ulong)(M))); \
+#if defined(EXTRA_DEBUG) && !defined(DBUG_OFF)
+#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock %p",(M))); \
if (!mysql_rwlock_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
-#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock 0x%lx", (ulong)(M))); \
+#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock %p",(M))); \
if (!mysql_rwlock_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
-#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock 0x%lx",(ulong)(M))); \
+#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock %p",(M))); \
if (!mysql_rwlock_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")); \
else DBUG_PRINT("lock", ("rwlock unlock FAILED %d", errno)); }
-#define BLOCK_LOCK_WR(B) {DBUG_PRINT("lock", ("%d LOCK_WR 0x%lx",\
- __LINE__,(ulong)(B))); \
+#define BLOCK_LOCK_WR(B) {DBUG_PRINT("lock", ("%d LOCK_WR %p",\
+ __LINE__,(B))); \
B->query()->lock_writing();}
-#define BLOCK_LOCK_RD(B) {DBUG_PRINT("lock", ("%d LOCK_RD 0x%lx",\
- __LINE__,(ulong)(B))); \
+#define BLOCK_LOCK_RD(B) {DBUG_PRINT("lock", ("%d LOCK_RD %p",\
+ __LINE__,(B))); \
B->query()->lock_reading();}
#define BLOCK_UNLOCK_WR(B) { \
- DBUG_PRINT("lock", ("%d UNLOCK_WR 0x%lx",\
- __LINE__,(ulong)(B)));B->query()->unlock_writing();}
+ DBUG_PRINT("lock", ("%d UNLOCK_WR %p",\
+ __LINE__,(B)));B->query()->unlock_writing();}
#define BLOCK_UNLOCK_RD(B) { \
- DBUG_PRINT("lock", ("%d UNLOCK_RD 0x%lx",\
- __LINE__,(ulong)(B)));B->query()->unlock_reading();}
+ DBUG_PRINT("lock", ("%d UNLOCK_RD %p",\
+ __LINE__,(B)));B->query()->unlock_reading();}
#define DUMP(C) DBUG_EXECUTE("qcache", {\
(C)->cache_dump(); (C)->queries_dump();(C)->tables_dump();})
#else
@@ -646,7 +647,7 @@ bool Query_cache::try_lock(THD *thd, Cache_try_lock_mode mode)
else if (mode == TIMEOUT)
{
struct timespec waittime;
- set_timespec_nsec(waittime,(ulong)(50000000L)); /* Wait for 50 msec */
+ set_timespec_nsec(waittime,50000000UL); /* Wait for 50 msec */
int res= mysql_cond_timedwait(&COND_cache_status_changed,
&structure_guard_mutex, &waittime);
if (res == ETIMEDOUT)
@@ -820,7 +821,7 @@ inline Query_cache_block * Query_cache_block_table::block()
void Query_cache_block::init(ulong block_length)
{
DBUG_ENTER("Query_cache_block::init");
- DBUG_PRINT("qcache", ("init block: 0x%lx length: %lu", (ulong) this,
+ DBUG_PRINT("qcache", ("init block: %p length: %lu", this,
block_length));
length = block_length;
used = 0;
@@ -832,8 +833,8 @@ void Query_cache_block::init(ulong block_length)
void Query_cache_block::destroy()
{
DBUG_ENTER("Query_cache_block::destroy");
- DBUG_PRINT("qcache", ("destroy block 0x%lx, type %d",
- (ulong) this, type));
+ DBUG_PRINT("qcache", ("destroy block %p, type %d",
+ this, type));
type = INCOMPLETE;
DBUG_VOID_RETURN;
}
@@ -935,7 +936,7 @@ bool Query_cache_query::try_lock_writing()
DBUG_PRINT("info", ("can't lock rwlock"));
DBUG_RETURN(0);
}
- DBUG_PRINT("info", ("rwlock 0x%lx locked", (ulong) &lock));
+ DBUG_PRINT("info", ("rwlock %p locked", &lock));
DBUG_RETURN(1);
}
@@ -964,9 +965,9 @@ void Query_cache_query::init_n_lock()
res=0; wri = 0; len = 0; ready= 0;
mysql_rwlock_init(key_rwlock_query_cache_query_lock, &lock);
lock_writing();
- DBUG_PRINT("qcache", ("inited & locked query for block 0x%lx",
- (long) (((uchar*) this) -
- ALIGN_SIZE(sizeof(Query_cache_block)))));
+ DBUG_PRINT("qcache", ("inited & locked query for block %p",
+ (uchar*) this -
+ ALIGN_SIZE(sizeof(Query_cache_block))));
DBUG_VOID_RETURN;
}
@@ -974,9 +975,9 @@ void Query_cache_query::init_n_lock()
void Query_cache_query::unlock_n_destroy()
{
DBUG_ENTER("Query_cache_query::unlock_n_destroy");
- DBUG_PRINT("qcache", ("destroyed & unlocked query for block 0x%lx",
- (long) (((uchar*) this) -
- ALIGN_SIZE(sizeof(Query_cache_block)))));
+ DBUG_PRINT("qcache", ("destroyed & unlocked query for block %p",
+ (uchar*) this -
+ ALIGN_SIZE(sizeof(Query_cache_block))));
/*
The following call is not needed on system where one can destroy an
active semaphore
@@ -1109,7 +1110,7 @@ Query_cache::insert(THD *thd, Query_cache_tls *query_cache_tls,
{
DBUG_PRINT("warning", ("Can't append data"));
header->result(result);
- DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block));
+ DBUG_PRINT("qcache", ("free query %p", query_block));
// The following call will remove the lock on query_block
query_cache.free_query(query_block);
query_cache.refused++;
@@ -1386,6 +1387,21 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
DBUG_VOID_RETURN;
}
+ /*
+ Do not store queries while tracking transaction state.
+ The tracker already flags queries that actually have
+ transaction tracker items, but this will make behavior
+ more straight forward.
+ */
+#ifndef EMBEDDED_LIBRARY
+ if (thd->variables.session_track_transaction_info != TX_TRACK_NONE)
+ {
+ DBUG_PRINT("qcache", ("Do not work with transaction tracking"));
+ DBUG_VOID_RETURN;
+ }
+#endif //EMBEDDED_LIBRARY
+
+
/* The following assert fails if we haven't called send_result_to_client */
DBUG_ASSERT(thd->base_query.is_alloced() ||
thd->base_query.ptr() == thd->query());
@@ -1401,6 +1417,8 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
flags.client_long_flag= MY_TEST(thd->client_capabilities & CLIENT_LONG_FLAG);
flags.client_protocol_41= MY_TEST(thd->client_capabilities &
CLIENT_PROTOCOL_41);
+ flags.client_depr_eof= MY_TEST(thd->client_capabilities &
+ CLIENT_DEPRECATE_EOF);
/*
Protocol influences result format, so statement results in the binary
protocol (COM_EXECUTE) cannot be served to statements asking for results
@@ -1431,20 +1449,21 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
flags.div_precision_increment= thd->variables.div_precincrement;
flags.default_week_format= thd->variables.default_week_format;
DBUG_PRINT("qcache", ("\
-long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
-CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
-sql mode: 0x%llx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
+long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
+CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
+sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %lu, \
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
+ (int)flags.client_depr_eof,
(int)flags.protocol_type,
(int)flags.more_results_exists,
flags.pkt_nr,
flags.character_set_client_num,
flags.character_set_results_num,
flags.collation_connection_num,
- (ulong) flags.limit,
- (ulong) flags.time_zone,
+ (ulonglong)flags.limit,
+ flags.time_zone,
flags.sql_mode,
flags.max_sort_length,
flags.group_concat_max_len,
@@ -1454,12 +1473,6 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.autocommit));
/*
- Make InnoDB to release the adaptive hash index latch before
- acquiring the query cache mutex.
- */
- ha_release_temporary_latches(thd);
-
- /*
A table- or a full flush operation can potentially take a long time to
finish. We choose not to wait for them and skip caching statements
instead.
@@ -1513,7 +1526,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
/* Check if another thread is processing the same query? */
Query_cache_block *competitor = (Query_cache_block *)
my_hash_search(&queries, (uchar*) query, tot_length);
- DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor));
+ DBUG_PRINT("qcache", ("competitor %p", competitor));
if (competitor == 0)
{
/* Query is not in cache and no one is working with it; Store it */
@@ -1523,8 +1536,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
Query_cache_block::QUERY, local_tables);
if (query_block != 0)
{
- DBUG_PRINT("qcache", ("query block 0x%lx allocated, %lu",
- (ulong) query_block, query_block->used));
+ DBUG_PRINT("qcache", ("query block %p allocated, %lu",
+ query_block, query_block->used));
Query_cache_query *header = query_block->query();
header->init_n_lock();
@@ -1726,6 +1739,20 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
goto err;
}
+ /*
+ Don't allow serving from Query_cache while tracking transaction
+ state. This is a safeguard in case an otherwise matching query
+ was added to the cache before tracking was turned on.
+ */
+#ifndef EMBEDDED_LIBRARY
+ if (thd->variables.session_track_transaction_info != TX_TRACK_NONE)
+ {
+ DBUG_PRINT("qcache", ("Do not work with transaction tracking"));
+ goto err;
+ }
+#endif //EMBEDDED_LIBRARY
+
+
thd->query_cache_is_applicable= 1;
sql= org_sql; sql_end= sql + query_length;
@@ -1804,7 +1831,10 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
}
if ((my_toupper(system_charset_info, sql[0]) != 'S' ||
my_toupper(system_charset_info, sql[1]) != 'E' ||
- my_toupper(system_charset_info, sql[2]) != 'L'))
+ my_toupper(system_charset_info, sql[2]) != 'L') &&
+ (my_toupper(system_charset_info, sql[0]) != 'W' ||
+ my_toupper(system_charset_info, sql[1]) != 'I' ||
+ my_toupper(system_charset_info, sql[2]) != 'T'))
{
DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached"));
goto err;
@@ -1896,6 +1926,8 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
flags.client_long_flag= MY_TEST(thd->client_capabilities & CLIENT_LONG_FLAG);
flags.client_protocol_41= MY_TEST(thd->client_capabilities &
CLIENT_PROTOCOL_41);
+ flags.client_depr_eof= MY_TEST(thd->client_capabilities &
+ CLIENT_DEPRECATE_EOF);
flags.protocol_type= (unsigned int) thd->protocol->type();
flags.more_results_exists= MY_TEST(thd->server_status &
SERVER_MORE_RESULTS_EXISTS);
@@ -1917,20 +1949,21 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
flags.default_week_format= thd->variables.default_week_format;
flags.lc_time_names= thd->variables.lc_time_names;
DBUG_PRINT("qcache", ("\
-long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
-CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
-sql mode: 0x%llx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
+long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
+CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
+sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %lu, \
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
+ (int)flags.client_depr_eof,
(int)flags.protocol_type,
(int)flags.more_results_exists,
flags.pkt_nr,
flags.character_set_client_num,
flags.character_set_results_num,
flags.collation_connection_num,
- (ulong) flags.limit,
- (ulong) flags.time_zone,
+ (ulonglong) flags.limit,
+ flags.time_zone,
flags.sql_mode,
flags.max_sort_length,
flags.group_concat_max_len,
@@ -1957,7 +1990,7 @@ lookup:
DBUG_PRINT("qcache", ("No query in query hash or no results"));
goto err_unlock;
}
- DBUG_PRINT("qcache", ("Query in query hash 0x%lx", (ulong)query_block));
+ DBUG_PRINT("qcache", ("Query in query hash %p",query_block));
#ifdef WITH_WSREP
if (once_more && WSREP_CLIENT(thd) && wsrep_must_sync_wait(thd))
@@ -1988,7 +2021,7 @@ lookup:
BLOCK_UNLOCK_RD(query_block);
goto err_unlock;
}
- DBUG_PRINT("qcache", ("Query have result 0x%lx", (ulong) query));
+ DBUG_PRINT("qcache", ("Query have result %p", query));
if (thd->in_multi_stmt_transaction_mode() &&
(query->tables_type() & HA_CACHE_TBL_TRANSACT))
@@ -2006,36 +2039,32 @@ lookup:
for (; block_table != block_table_end; block_table++)
{
TABLE_LIST table_list;
- TABLE *tmptable;
+ TMP_TABLE_SHARE *tmptable;
Query_cache_table *table = block_table->parent;
/*
- Check that we have not temporary tables with same names of tables
- of this query. If we have such tables, we will not send data from
- query cache, because temporary tables hide real tables by which
+ Check that we do not have temporary tables with same names as that of
+ base tables from this query. If we have such tables, we will not send
+ data from query cache, because temporary tables hide real tables by which
query in query cache was made.
*/
- for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next)
+ if ((tmptable=
+ thd->find_tmp_table_share_w_base_key((char *) table->data(),
+ table->key_length())))
{
- if (tmptable->s->table_cache_key.length - TMP_TABLE_KEY_EXTRA ==
- table->key_length() &&
- !memcmp(tmptable->s->table_cache_key.str, table->data(),
- table->key_length()))
- {
- DBUG_PRINT("qcache",
- ("Temporary table detected: '%s.%s'",
- tmptable->s->db.str, tmptable->alias.c_ptr()));
- unlock();
- /*
- We should not store result of this query because it contain
- temporary tables => assign following variable to make check
- faster.
- */
- thd->query_cache_is_applicable= 0; // Query can't be cached
- thd->lex->safe_to_cache_query= 0; // For prepared statements
- BLOCK_UNLOCK_RD(query_block);
- DBUG_RETURN(-1);
- }
+ DBUG_PRINT("qcache",
+ ("Temporary table detected: '%s.%s'",
+ tmptable->db.str, tmptable->table_name.str));
+ unlock();
+ /*
+ We should not store result of this query because it contain
+ temporary tables => assign following variable to make check
+ faster.
+ */
+ thd->query_cache_is_applicable= 0; // Query can't be cached
+ thd->lex->safe_to_cache_query= 0; // For prepared statements
+ BLOCK_UNLOCK_RD(query_block);
+ DBUG_RETURN(-1);
}
bzero((char*) &table_list,sizeof(table_list));
@@ -2089,9 +2118,9 @@ lookup:
if (engine_data != table->engine_data())
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %.*s %lu-%lu",
+ ("Handler require invalidation queries of %.*s %llu-%llu",
qcache_se_key_len, qcache_se_key_name,
- (ulong) engine_data, (ulong) table->engine_data()));
+ engine_data, table->engine_data()));
invalidate_table_internal(thd,
(uchar *) table->db(),
table->key_length());
@@ -2129,9 +2158,9 @@ lookup:
THD_STAGE_INFO(thd, stage_sending_cached_result_to_client);
do
{
- DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)",
+ DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)",
result_block->length, result_block->used,
- (ulong) (result_block->headers_len()+
+ (uint) (result_block->headers_len()+
ALIGN_SIZE(sizeof(Query_cache_result)))));
Query_cache_result *result = result_block->result();
@@ -2169,7 +2198,7 @@ lookup:
thd->get_stmt_da()->disable_status();
BLOCK_UNLOCK_RD(query_block);
- MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows);
+ MYSQL_QUERY_CACHE_HIT(thd->query(), thd->limit_found_rows);
DBUG_RETURN(1); // Result sent to client
err_unlock:
@@ -2366,7 +2395,7 @@ void Query_cache::invalidate(THD *thd, char *db)
}
/*
The used tables are linked in a circular list;
- loop until we return to the begining.
+ loop until we return to the beginning.
*/
} while (table_block != tables_blocks);
/*
@@ -2888,8 +2917,8 @@ my_bool Query_cache::free_old_query()
void Query_cache::free_query_internal(Query_cache_block *query_block)
{
DBUG_ENTER("Query_cache::free_query_internal");
- DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result",
- (ulong) query_block,
+ DBUG_PRINT("qcache", ("free query %p %lu bytes result",
+ query_block,
query_block->query()->length() ));
queries_in_cache--;
@@ -2958,8 +2987,8 @@ void Query_cache::free_query_internal(Query_cache_block *query_block)
void Query_cache::free_query(Query_cache_block *query_block)
{
DBUG_ENTER("Query_cache::free_query");
- DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result",
- (ulong) query_block,
+ DBUG_PRINT("qcache", ("free query %p %lu bytes result",
+ query_block,
query_block->query()->length() ));
my_hash_delete(&queries,(uchar *) query_block);
@@ -3006,8 +3035,8 @@ Query_cache::append_result_data(Query_cache_block **current_block,
Query_cache_block *query_block)
{
DBUG_ENTER("Query_cache::append_result_data");
- DBUG_PRINT("qcache", ("append %lu bytes to 0x%lx query",
- data_len, (long) query_block));
+ DBUG_PRINT("qcache", ("append %lu bytes to %p query",
+ data_len, query_block));
if (query_block->query()->add(data_len) > query_cache_limit)
{
@@ -3024,8 +3053,8 @@ Query_cache::append_result_data(Query_cache_block **current_block,
}
Query_cache_block *last_block = (*current_block)->prev;
- DBUG_PRINT("qcache", ("lastblock 0x%lx len %lu used %lu",
- (ulong) last_block, last_block->length,
+ DBUG_PRINT("qcache", ("lastblock %p len %lu used %lu",
+ last_block, last_block->length,
last_block->used));
my_bool success = 1;
ulong last_block_free_space= last_block->length - last_block->used;
@@ -3070,8 +3099,8 @@ Query_cache::append_result_data(Query_cache_block **current_block,
if (success && last_block_free_space > 0)
{
ulong to_copy = MY_MIN(data_len,last_block_free_space);
- DBUG_PRINT("qcache", ("use free space %lub at block 0x%lx to copy %lub",
- last_block_free_space, (ulong)last_block, to_copy));
+ DBUG_PRINT("qcache", ("use free space %lub at block %p to copy %lub",
+ last_block_free_space,last_block, to_copy));
memcpy((uchar*) last_block + last_block->used, data, to_copy);
last_block->used+=to_copy;
}
@@ -3112,8 +3141,8 @@ my_bool Query_cache::write_result_data(Query_cache_block **result_block,
{
block->type = type;
ulong length = block->used - headers_len;
- DBUG_PRINT("qcache", ("write %lu byte in block 0x%lx",length,
- (ulong)block));
+ DBUG_PRINT("qcache", ("write %lu byte in block %p",length,
+ block));
memcpy((uchar*) block+headers_len, rest, length);
rest += length;
block = block->next;
@@ -3319,7 +3348,7 @@ Query_cache::invalidate_query_block_list(THD *thd,
}
/*
- Register given table list begining with given position in tables table of
+ Register given table list beginning with given position in tables table of
block
SYNOPSIS
@@ -3376,12 +3405,12 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
else
{
DBUG_PRINT("qcache",
- ("table: %s db: %s openinfo: 0x%lx keylen: %lu key: 0x%lx",
+ ("table: %s db: %s openinfo: %p keylen: %zu key: %p",
tables_used->table->s->table_name.str,
tables_used->table->s->table_cache_key.str,
- (ulong) tables_used->table,
- (ulong) tables_used->table->s->table_cache_key.length,
- (ulong) tables_used->table->s->table_cache_key.str));
+ tables_used->table,
+ tables_used->table->s->table_cache_key.length,
+ tables_used->table->s->table_cache_key.str));
if (!insert_table(thd, tables_used->table->s->table_cache_key.length,
tables_used->table->s->table_cache_key.str,
@@ -3418,8 +3447,8 @@ my_bool Query_cache::register_all_tables(THD *thd,
TABLE_COUNTER_TYPE tables_arg)
{
TABLE_COUNTER_TYPE n;
- DBUG_PRINT("qcache", ("register tables block 0x%lx, n %d, header %x",
- (ulong) block, (int) tables_arg,
+ DBUG_PRINT("qcache", ("register tables block %p, n %d, header %x",
+ block, (int) tables_arg,
(int) ALIGN_SIZE(sizeof(Query_cache_block))));
Query_cache_block_table *block_table = block->table(0);
@@ -3458,8 +3487,8 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key,
my_bool hash)
{
DBUG_ENTER("Query_cache::insert_table");
- DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d",
- (ulong)node, key_len));
+ DBUG_PRINT("qcache", ("insert table node %p, len %d",
+ node, key_len));
Query_cache_block *table_block=
(hash ?
@@ -3470,11 +3499,11 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key,
table_block->table()->engine_data() != engine_data)
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lu-%lu",
+ ("Handler require invalidation queries of %s.%s %llu-%llu",
table_block->table()->db(),
table_block->table()->table(),
- (ulong) engine_data,
- (ulong) table_block->table()->engine_data()));
+ engine_data,
+ table_block->table()->engine_data()));
/*
as far as we delete all queries with this table, table block will be
deleted, too
@@ -3489,8 +3518,8 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key,
if (table_block == 0)
{
- DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)",
- (ulong) key, (int) key_len));
+ DBUG_PRINT("qcache", ("new table block from %p (%u)",
+ key, (int) key_len));
table_block= write_block_data(key_len, (uchar*) key,
ALIGN_SIZE(sizeof(Query_cache_table)),
Query_cache_block::TABLE, 1);
@@ -3699,7 +3728,7 @@ Query_cache::get_free_block(ulong len, my_bool not_less, ulong min)
if (block != 0)
exclude_from_free_memory_list(block);
- DBUG_PRINT("qcache",("getting block 0x%lx", (ulong) block));
+ DBUG_PRINT("qcache",("getting block %p", block));
DBUG_RETURN(block);
}
@@ -3710,9 +3739,9 @@ void Query_cache::free_memory_block(Query_cache_block *block)
block->used=0;
block->type= Query_cache_block::FREE; // mark block as free in any case
DBUG_PRINT("qcache",
- ("first_block 0x%lx, block 0x%lx, pnext 0x%lx pprev 0x%lx",
- (ulong) first_block, (ulong) block, (ulong) block->pnext,
- (ulong) block->pprev));
+ ("first_block %p, block %p, pnext %p pprev %p",
+ first_block, block, block->pnext,
+ block->pprev));
if (block->pnext != first_block && block->pnext->is_free())
block = join_free_blocks(block, block->pnext);
@@ -3744,8 +3773,8 @@ void Query_cache::split_block(Query_cache_block *block, ulong len)
else
free_memory_block(new_block);
- DBUG_PRINT("qcache", ("split 0x%lx (%lu) new 0x%lx",
- (ulong) block, len, (ulong) new_block));
+ DBUG_PRINT("qcache", ("split %p (%lu) new %p",
+ block, len, new_block));
DBUG_VOID_RETURN;
}
@@ -3757,9 +3786,9 @@ Query_cache::join_free_blocks(Query_cache_block *first_block_arg,
Query_cache_block *second_block;
DBUG_ENTER("Query_cache::join_free_blocks");
DBUG_PRINT("qcache",
- ("join first 0x%lx, pnext 0x%lx, in list 0x%lx",
- (ulong) first_block_arg, (ulong) first_block_arg->pnext,
- (ulong) block_in_list));
+ ("join first %p, pnext %p, in list %p",
+ first_block_arg, first_block_arg->pnext,
+ block_in_list));
exclude_from_free_memory_list(block_in_list);
second_block = first_block_arg->pnext;
@@ -3781,7 +3810,7 @@ my_bool Query_cache::append_next_free_block(Query_cache_block *block,
{
Query_cache_block *next_block = block->pnext;
DBUG_ENTER("Query_cache::append_next_free_block");
- DBUG_PRINT("enter", ("block 0x%lx, add_size %lu", (ulong) block,
+ DBUG_PRINT("enter", ("block %p, add_size %lu", block,
add_size));
if (next_block != first_block && next_block->is_free())
@@ -3813,8 +3842,8 @@ void Query_cache::exclude_from_free_memory_list(Query_cache_block *free_block)
bin->number--;
free_memory-=free_block->length;
free_memory_blocks--;
- DBUG_PRINT("qcache",("exclude block 0x%lx, bin 0x%lx", (ulong) free_block,
- (ulong) bin));
+ DBUG_PRINT("qcache",("exclude block %p, bin %p", free_block,
+ bin));
DBUG_VOID_RETURN;
}
@@ -3831,8 +3860,8 @@ void Query_cache::insert_into_free_memory_list(Query_cache_block *free_block)
free_block->data());
*bin_ptr = bins+idx;
(*bin_ptr)->number++;
- DBUG_PRINT("qcache",("insert block 0x%lx, bin[%d] 0x%lx",
- (ulong) free_block, idx, (ulong) *bin_ptr));
+ DBUG_PRINT("qcache",("insert block %p, bin[%d] %p",
+ free_block, idx, *bin_ptr));
DBUG_VOID_RETURN;
}
@@ -3929,7 +3958,7 @@ Query_cache::double_linked_list_simple_include(Query_cache_block *point,
list_pointer)
{
DBUG_ENTER("Query_cache::double_linked_list_simple_include");
- DBUG_PRINT("qcache", ("including block 0x%lx", (ulong) point));
+ DBUG_PRINT("qcache", ("including block %p", point));
if (*list_pointer == 0)
*list_pointer=point->next=point->prev=point;
else
@@ -3948,8 +3977,8 @@ Query_cache::double_linked_list_exclude(Query_cache_block *point,
Query_cache_block **list_pointer)
{
DBUG_ENTER("Query_cache::double_linked_list_exclude");
- DBUG_PRINT("qcache", ("excluding block 0x%lx, list 0x%lx",
- (ulong) point, (ulong) list_pointer));
+ DBUG_PRINT("qcache", ("excluding block %p, list %p",
+ point, list_pointer));
if (point->next == point)
*list_pointer = 0; // empty list
else
@@ -4244,7 +4273,7 @@ my_bool Query_cache::move_by_type(uchar **border,
switch (block->type) {
case Query_cache_block::FREE:
{
- DBUG_PRINT("qcache", ("block 0x%lx FREE", (ulong) block));
+ DBUG_PRINT("qcache", ("block %p FREE", block));
if (*border == 0)
{
*border = (uchar *) block;
@@ -4263,7 +4292,7 @@ my_bool Query_cache::move_by_type(uchar **border,
case Query_cache_block::TABLE:
{
HASH_SEARCH_STATE record_idx;
- DBUG_PRINT("qcache", ("block 0x%lx TABLE", (ulong) block));
+ DBUG_PRINT("qcache", ("block %p TABLE", block));
if (*border == 0)
break;
ulong len = block->length, used = block->used;
@@ -4275,7 +4304,7 @@ my_bool Query_cache::move_by_type(uchar **border,
*pprev = block->pprev,
*pnext = block->pnext,
*new_block =(Query_cache_block *) *border;
- uint tablename_offset = block->table()->table() - block->table()->db();
+ size_t tablename_offset = block->table()->table() - block->table()->db();
char *data = (char*) block->data();
uchar *key;
size_t key_length;
@@ -4299,9 +4328,9 @@ my_bool Query_cache::move_by_type(uchar **border,
nlist_root->prev = tprev;
tprev->next = nlist_root;
DBUG_PRINT("qcache",
- ("list_root: 0x%lx tnext 0x%lx tprev 0x%lx tprev->next 0x%lx tnext->prev 0x%lx",
- (ulong) list_root, (ulong) tnext, (ulong) tprev,
- (ulong)tprev->next, (ulong)tnext->prev));
+ ("list_root: %p tnext %p tprev %p tprev->next %p tnext->prev %p",
+ list_root, tnext, tprev,
+ tprev->next,tnext->prev));
/*
Go through all queries that uses this table and change them to
point to the new table object
@@ -4316,14 +4345,14 @@ my_bool Query_cache::move_by_type(uchar **border,
/* Fix hash to point at moved block */
my_hash_replace(&tables, &record_idx, (uchar*) new_block);
- DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
- len, (ulong) new_block, (ulong) *border));
+ DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p",
+ len, new_block, *border));
break;
}
case Query_cache_block::QUERY:
{
HASH_SEARCH_STATE record_idx;
- DBUG_PRINT("qcache", ("block 0x%lx QUERY", (ulong) block));
+ DBUG_PRINT("qcache", ("block %p QUERY", block));
if (*border == 0)
break;
BLOCK_LOCK_WR(block);
@@ -4362,7 +4391,7 @@ my_bool Query_cache::move_by_type(uchar **border,
{
Query_cache_block_table *block_table = new_block->table(j);
- // use aligment from begining of table if 'next' is in same block
+ // use aligment from beginning of table if 'next' is in same block
if ((beg_of_table_table <= block_table->next) &&
(block_table->next < end_of_table_table))
((Query_cache_block_table *)(beg_of_new_table_table +
@@ -4372,7 +4401,7 @@ my_bool Query_cache::move_by_type(uchar **border,
else
block_table->next->prev= block_table;
- // use aligment from begining of table if 'prev' is in same block
+ // use aligment from beginning of table if 'prev' is in same block
if ((beg_of_table_table <= block_table->prev) &&
(block_table->prev < end_of_table_table))
((Query_cache_block_table *)(beg_of_new_table_table +
@@ -4409,8 +4438,8 @@ my_bool Query_cache::move_by_type(uchar **border,
}
/* Fix hash to point at moved block */
my_hash_replace(&queries, &record_idx, (uchar*) new_block);
- DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
- len, (ulong) new_block, (ulong) *border));
+ DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p",
+ len, new_block, *border));
break;
}
case Query_cache_block::RES_INCOMPLETE:
@@ -4418,7 +4447,7 @@ my_bool Query_cache::move_by_type(uchar **border,
case Query_cache_block::RES_CONT:
case Query_cache_block::RESULT:
{
- DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block,
+ DBUG_PRINT("qcache", ("block %p RES* (%d)", block,
(int) block->type));
if (*border == 0)
break;
@@ -4458,13 +4487,13 @@ my_bool Query_cache::move_by_type(uchar **border,
new_block->length -= free_space;
}
BLOCK_UNLOCK_WR(query_block);
- DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
- len, (ulong) new_block, (ulong) *border));
+ DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p",
+ len, new_block, *border));
break;
}
default:
- DBUG_PRINT("error", ("unexpected block type %d, block 0x%lx",
- (int)block->type, (ulong) block));
+ DBUG_PRINT("error", ("unexpected block type %d, block %p",
+ (int)block->type, block));
ok = 0;
}
DBUG_RETURN(ok);
@@ -4584,7 +4613,7 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
filename= tablename + dirname_length(tablename + 2) + 2;
/* Find start of databasename */
for (dbname= filename - 2 ; dbname[-1] != FN_LIBCHAR ; dbname--) ;
- *db_length= (filename - dbname) - 1;
+ *db_length= (uint32)(filename - dbname) - 1;
DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename));
DBUG_RETURN((uint) (strmake(strmake(key, dbname,
@@ -4665,16 +4694,16 @@ void Query_cache::bins_dump()
DBUG_PRINT("qcache", ("-------------------------"));
for (i=0; i < mem_bin_num; i++)
{
- DBUG_PRINT("qcache", ("%10lu %3d 0x%lx", bins[i].size, bins[i].number,
- (ulong)&(bins[i])));
+ DBUG_PRINT("qcache", ("%10lu %3d %p", bins[i].size, bins[i].number,
+ &(bins[i])));
if (bins[i].free_blocks)
{
Query_cache_block *block = bins[i].free_blocks;
do{
- DBUG_PRINT("qcache", ("\\-- %lu 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx",
- block->length, (ulong)block,
- (ulong)block->next, (ulong)block->prev,
- (ulong)block->pnext, (ulong)block->pprev));
+ DBUG_PRINT("qcache", ("\\-- %lu %p %p %p %p %p",
+ block->length,block,
+ block->next,block->prev,
+ block->pnext,block->pprev));
block = block->next;
} while ( block != bins[i].free_blocks );
}
@@ -4698,11 +4727,11 @@ void Query_cache::cache_dump()
do
{
DBUG_PRINT("qcache",
- ("%10lu %10lu %1d %2d 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx",
+ ("%10lu %10lu %1d %2d %p %p %p %p %p",
i->length, i->used, (int)i->type,
- i->n_tables, (ulong)i,
- (ulong)i->next, (ulong)i->prev, (ulong)i->pnext,
- (ulong)i->pprev));
+ i->n_tables,i,
+ i->next,i->prev,i->pnext,
+ i->pprev));
i = i->pnext;
} while ( i != first_block );
DBUG_PRINT("qcache", ("-------------------------------------"));
@@ -4732,15 +4761,15 @@ void Query_cache::queries_dump()
Query_cache_query_flags flags;
memcpy(&flags, str+len, QUERY_CACHE_FLAGS_SIZE);
str[len]= 0; // make zero ending DB name
- DBUG_PRINT("qcache", ("F: %u C: %u L: %lu T: '%s' (%lu) '%s' '%s'",
+ DBUG_PRINT("qcache", ("F: %u C: %u L: %llu T: '%s' (%zu) '%s' '%s'",
flags.client_long_flag,
flags.character_set_client_num,
- (ulong)flags.limit,
+ flags.limit,
flags.time_zone->get_name()->ptr(),
- (ulong) len, str, strend(str)+1));
- DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block,
- (ulong) block->next, (ulong) block->prev,
- (ulong)block->pnext, (ulong)block->pprev));
+ len, str, strend(str)+1));
+ DBUG_PRINT("qcache", ("-b- %p %p %p %p %p", block,
+ block->next, block->prev,
+ block->pnext,block->pprev));
memcpy(str + len, &flags, QUERY_CACHE_FLAGS_SIZE); // restore flags
for (TABLE_COUNTER_TYPE t= 0; t < block->n_tables; t++)
{
@@ -4754,14 +4783,14 @@ void Query_cache::queries_dump()
Query_cache_block *result_beg = result_block;
do
{
- DBUG_PRINT("qcache", ("-r- %u %lu/%lu 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx",
+ DBUG_PRINT("qcache", ("-r- %u %lu/%lu %p %p %p %p %p",
(uint) result_block->type,
result_block->length, result_block->used,
- (ulong) result_block,
- (ulong) result_block->next,
- (ulong) result_block->prev,
- (ulong) result_block->pnext,
- (ulong) result_block->pprev));
+ result_block,
+ result_block->next,
+ result_block->prev,
+ result_block->pnext,
+ result_block->pprev));
result_block = result_block->next;
} while ( result_block != result_beg );
}
@@ -4840,14 +4869,14 @@ my_bool Query_cache::check_integrity(bool locked)
if (!block)
break;
- DBUG_PRINT("qcache", ("block 0x%lx, type %u...",
- (ulong) block, (uint) block->type));
+ DBUG_PRINT("qcache", ("block %p, type %u...",
+ block, (uint) block->type));
// Check allignment
- if ((((long)block) % (long) ALIGN_SIZE(1)) !=
- (((long)first_block) % (long)ALIGN_SIZE(1)))
+ if ((((size_t)block) % ALIGN_SIZE(1)) !=
+ (((size_t)first_block) % ALIGN_SIZE(1)))
{
DBUG_PRINT("error",
- ("block 0x%lx do not aligned by %d", (ulong) block,
+ ("block %p do not aligned by %d", block,
(int) ALIGN_SIZE(1)));
result = 1;
}
@@ -4858,10 +4887,10 @@ my_bool Query_cache::check_integrity(bool locked)
((uchar*)first_block) + query_cache_size)
{
DBUG_PRINT("error",
- ("block 0x%lx, type %u, ended at 0x%lx, but cache ended at 0x%lx",
- (ulong) block, (uint) block->type,
- (ulong) (((uchar*)block) + block->length),
- (ulong) (((uchar*)first_block) + query_cache_size)));
+ ("block %p, type %u, ended at %p, but cache ended at %p",
+ block, (uint) block->type,
+ (((uchar*)block) + block->length),
+ (((uchar*)first_block) + query_cache_size)));
result = 1;
}
}
@@ -4869,10 +4898,10 @@ my_bool Query_cache::check_integrity(bool locked)
if (((uchar*)block) + block->length != ((uchar*)block->pnext))
{
DBUG_PRINT("error",
- ("block 0x%lx, type %u, ended at 0x%lx, but next block begining at 0x%lx",
- (ulong) block, (uint) block->type,
- (ulong) (((uchar*)block) + block->length),
- (ulong) ((uchar*)block->pnext)));
+ ("block %p, type %u, ended at %p, but next block beginning at %p",
+ block, (uint) block->type,
+ (((uchar*)block) + block->length),
+ ((uchar*)block->pnext)));
}
if (block->type == Query_cache_block::FREE)
free+= block->length;
@@ -4888,11 +4917,11 @@ my_bool Query_cache::check_integrity(bool locked)
((uchar*)bin) >= ((uchar*)first_block))
{
DBUG_PRINT("error",
- ("free block 0x%lx have bin pointer 0x%lx beyaond of bins array bounds [0x%lx,0x%lx]",
- (ulong) block,
- (ulong) bin,
- (ulong) bins,
- (ulong) first_block));
+ ("free block %p have bin pointer %p beyaond of bins array bounds [%p,%p]",
+ block,
+ bin,
+ bins,
+ first_block));
result = 1;
}
else
@@ -4939,11 +4968,11 @@ my_bool Query_cache::check_integrity(bool locked)
((uchar*)query_block) >= (((uchar*)first_block) + query_cache_size))
{
DBUG_PRINT("error",
- ("result block 0x%lx have query block pointer 0x%lx beyaond of block pool bounds [0x%lx,0x%lx]",
- (ulong) block,
- (ulong) query_block,
- (ulong) first_block,
- (ulong) (((uchar*)first_block) + query_cache_size)));
+ ("result block %p have query block pointer %p beyaond of block pool bounds [%p,%p]",
+ block,
+ query_block,
+ first_block,
+ (((uchar*)first_block) + query_cache_size)));
result = 1;
}
else
@@ -4959,8 +4988,8 @@ my_bool Query_cache::check_integrity(bool locked)
break;
}
default:
- DBUG_PRINT("error", ("block 0x%lx have incorrect type %u",
- (long) block, block->type));
+ DBUG_PRINT("error", ("block %p have incorrect type %u",
+ block, block->type));
result = 1;
}
@@ -4988,15 +5017,15 @@ my_bool Query_cache::check_integrity(bool locked)
{
do
{
- DBUG_PRINT("qcache", ("block 0x%lx, type %u...",
- (ulong) block, (uint) block->type));
+ DBUG_PRINT("qcache", ("block %p, type %u...",
+ block, (uint) block->type));
size_t length;
uchar *key = query_cache_query_get_key((uchar*) block, &length, 0);
uchar* val = my_hash_search(&queries, key, length);
if (((uchar*)block) != val)
{
- DBUG_PRINT("error", ("block 0x%lx found in queries hash like 0x%lx",
- (ulong) block, (ulong) val));
+ DBUG_PRINT("error", ("block %p found in queries hash like %p",
+ block, val));
}
if (in_blocks(block))
result = 1;
@@ -5006,8 +5035,8 @@ my_bool Query_cache::check_integrity(bool locked)
Query_cache_block * result_block = results;
do
{
- DBUG_PRINT("qcache", ("block 0x%lx, type %u...",
- (ulong) block, (uint) block->type));
+ DBUG_PRINT("qcache", ("block %p, type %u...",
+ block, (uint) block->type));
if (in_blocks(result_block))
result = 1;
@@ -5023,15 +5052,15 @@ my_bool Query_cache::check_integrity(bool locked)
{
do
{
- DBUG_PRINT("qcache", ("block 0x%lx, type %u...",
- (ulong) block, (uint) block->type));
+ DBUG_PRINT("qcache", ("block %p, type %u...",
+ block, (uint) block->type));
size_t length;
uchar *key = query_cache_table_get_key((uchar*) block, &length, 0);
uchar* val = my_hash_search(&tables, key, length);
if (((uchar*)block) != val)
{
- DBUG_PRINT("error", ("block 0x%lx found in tables hash like 0x%lx",
- (ulong) block, (ulong) val));
+ DBUG_PRINT("error", ("block %p found in tables hash like %p",
+ block, val));
}
if (in_blocks(block))
@@ -5048,8 +5077,8 @@ my_bool Query_cache::check_integrity(bool locked)
uint count = 0;
do
{
- DBUG_PRINT("qcache", ("block 0x%lx, type %u...",
- (ulong) block, (uint) block->type));
+ DBUG_PRINT("qcache", ("block %p, type %u...",
+ block, (uint) block->type));
if (in_blocks(block))
result = 1;
@@ -5081,13 +5110,13 @@ my_bool Query_cache::in_blocks(Query_cache_block * point)
if (block->pprev->pnext != block)
{
DBUG_PRINT("error",
- ("block 0x%lx in physical list is incorrect linked, prev block 0x%lx refered as next to 0x%lx (check from 0x%lx)",
- (ulong) block, (ulong) block->pprev,
- (ulong) block->pprev->pnext,
- (ulong) point));
+ ("block %p in physical list is incorrect linked, prev block %p refered as next to %p (check from %p)",
+ block, block->pprev,
+ block->pprev->pnext,
+ point));
//back trace
for (; block != point; block = block->pnext)
- DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block));
+ DBUG_PRINT("error", ("back trace %p", block));
result = 1;
goto err1;
}
@@ -5096,8 +5125,8 @@ my_bool Query_cache::in_blocks(Query_cache_block * point)
if (block != first_block)
{
DBUG_PRINT("error",
- ("block 0x%lx (0x%lx<-->0x%lx) not owned by pysical list",
- (ulong) block, (ulong) block->pprev, (ulong )block->pnext));
+ ("block %p (%p<-->%p) not owned by pysical list",
+ block, block->pprev, block->pnext));
return 1;
}
@@ -5109,13 +5138,13 @@ err1:
if (block->pnext->pprev != block)
{
DBUG_PRINT("error",
- ("block 0x%lx in physicel list is incorrect linked, next block 0x%lx refered as prev to 0x%lx (check from 0x%lx)",
- (ulong) block, (ulong) block->pnext,
- (ulong) block->pnext->pprev,
- (ulong) point));
+ ("block %p in physicel list is incorrect linked, next block %p refered as prev to %p (check from %p)",
+ block, block->pnext,
+ block->pnext->pprev,
+ point));
//back trace
for (; block != point; block = block->pprev)
- DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block));
+ DBUG_PRINT("error", ("back trace %p", block));
result = 1;
goto err2;
}
@@ -5138,13 +5167,13 @@ my_bool Query_cache::in_list(Query_cache_block * root,
if (block->prev->next != block)
{
DBUG_PRINT("error",
- ("block 0x%lx in list '%s' 0x%lx is incorrect linked, prev block 0x%lx refered as next to 0x%lx (check from 0x%lx)",
- (ulong) block, name, (ulong) root, (ulong) block->prev,
- (ulong) block->prev->next,
- (ulong) point));
+ ("block %p in list '%s' %p is incorrect linked, prev block %p refered as next to %p (check from %p)",
+ block, name, root, block->prev,
+ block->prev->next,
+ point));
//back trace
for (; block != point; block = block->next)
- DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block));
+ DBUG_PRINT("error", ("back trace %p", block));
result = 1;
goto err1;
}
@@ -5153,10 +5182,10 @@ my_bool Query_cache::in_list(Query_cache_block * root,
if (block != root)
{
DBUG_PRINT("error",
- ("block 0x%lx (0x%lx<-->0x%lx) not owned by list '%s' 0x%lx",
- (ulong) block,
- (ulong) block->prev, (ulong) block->next,
- name, (ulong) root));
+ ("block %p (%p<-->%p) not owned by list '%s' %p",
+ block,
+ block->prev, block->next,
+ name, root));
return 1;
}
err1:
@@ -5167,13 +5196,13 @@ err1:
if (block->next->prev != block)
{
DBUG_PRINT("error",
- ("block 0x%lx in list '%s' 0x%lx is incorrect linked, next block 0x%lx refered as prev to 0x%lx (check from 0x%lx)",
- (ulong) block, name, (ulong) root, (ulong) block->next,
- (ulong) block->next->prev,
- (ulong) point));
+ ("block %p in list '%s' %p is incorrect linked, next block %p refered as prev to %p (check from %p)",
+ block, name, root, block->next,
+ block->next->prev,
+ point));
//back trace
for (; block != point; block = block->prev)
- DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block));
+ DBUG_PRINT("error", ("back trace %p", block));
result = 1;
goto err2;
}
@@ -5186,13 +5215,13 @@ err2:
void dump_node(Query_cache_block_table * node,
const char * call, const char * descr)
{
- DBUG_PRINT("qcache", ("%s: %s: node: 0x%lx", call, descr, (ulong) node));
- DBUG_PRINT("qcache", ("%s: %s: node block: 0x%lx",
- call, descr, (ulong) node->block()));
- DBUG_PRINT("qcache", ("%s: %s: next: 0x%lx", call, descr,
- (ulong) node->next));
- DBUG_PRINT("qcache", ("%s: %s: prev: 0x%lx", call, descr,
- (ulong) node->prev));
+ DBUG_PRINT("qcache", ("%s: %s: node: %p", call, descr, node));
+ DBUG_PRINT("qcache", ("%s: %s: node block: %p",
+ call, descr, node->block()));
+ DBUG_PRINT("qcache", ("%s: %s: next: %p", call, descr,
+ node->next));
+ DBUG_PRINT("qcache", ("%s: %s: prev: %p", call, descr,
+ node->prev));
}
my_bool Query_cache::in_table_list(Query_cache_block_table * root,
@@ -5209,17 +5238,17 @@ my_bool Query_cache::in_table_list(Query_cache_block_table * root,
if (table->prev->next != table)
{
DBUG_PRINT("error",
- ("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, prev table 0x%lx(0x%lx) refered as next to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))",
- (ulong) table, (ulong) table->block(), name,
- (ulong) root, (ulong) root->block(),
- (ulong) table->prev, (ulong) table->prev->block(),
- (ulong) table->prev->next,
- (ulong) table->prev->next->block(),
- (ulong) point, (ulong) point->block()));
+ ("table %p(%p) in list '%s' %p(%p) is incorrect linked, prev table %p(%p) refered as next to %p(%p) (check from %p(%p))",
+ table, table->block(), name,
+ root, root->block(),
+ table->prev, table->prev->block(),
+ table->prev->next,
+ table->prev->next->block(),
+ point, point->block()));
//back trace
for (; table != point; table = table->next)
- DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)",
- (ulong) table, (ulong) table->block()));
+ DBUG_PRINT("error", ("back trace %p(%p)",
+ table, table->block()));
result = 1;
goto err1;
}
@@ -5228,11 +5257,11 @@ my_bool Query_cache::in_table_list(Query_cache_block_table * root,
if (table != root)
{
DBUG_PRINT("error",
- ("table 0x%lx(0x%lx) (0x%lx(0x%lx)<-->0x%lx(0x%lx)) not owned by list '%s' 0x%lx(0x%lx)",
- (ulong) table, (ulong) table->block(),
- (ulong) table->prev, (ulong) table->prev->block(),
- (ulong) table->next, (ulong) table->next->block(),
- name, (ulong) root, (ulong) root->block()));
+ ("table %p(%p) (%p(%p)<-->%p(%p)) not owned by list '%s' %p(%p)",
+ table, table->block(),
+ table->prev, table->prev->block(),
+ table->next, table->next->block(),
+ name, root, root->block()));
return 1;
}
err1:
@@ -5244,17 +5273,17 @@ err1:
if (table->next->prev != table)
{
DBUG_PRINT("error",
- ("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, next table 0x%lx(0x%lx) refered as prev to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))",
- (ulong) table, (ulong) table->block(),
- name, (ulong) root, (ulong) root->block(),
- (ulong) table->next, (ulong) table->next->block(),
- (ulong) table->next->prev,
- (ulong) table->next->prev->block(),
- (ulong) point, (ulong) point->block()));
+ ("table %p(%p) in list '%s' %p(%p) is incorrect linked, next table %p(%p) refered as prev to %p(%p) (check from %p(%p))",
+ table, table->block(),
+ name, root, root->block(),
+ table->next, table->next->block(),
+ table->next->prev,
+ table->next->prev->block(),
+ point, point->block()));
//back trace
for (; table != point; table = table->prev)
- DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)",
- (ulong) table, (ulong) table->block()));
+ DBUG_PRINT("error", ("back trace %p(%p)",
+ table, table->block()));
result = 1;
goto err2;
}
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 657caf4a5bc..ad9cac76b0a 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -545,6 +545,7 @@ struct Query_cache_query_flags
{
unsigned int client_long_flag:1;
unsigned int client_protocol_41:1;
+ unsigned int client_depr_eof:1;
unsigned int protocol_type:2;
unsigned int more_results_exists:1;
unsigned int in_trans:1;
@@ -555,9 +556,9 @@ struct Query_cache_query_flags
uint collation_connection_num;
ha_rows limit;
Time_zone *time_zone;
- ulonglong sql_mode;
- ulong max_sort_length;
- ulong group_concat_max_len;
+ sql_mode_t sql_mode;
+ ulonglong max_sort_length;
+ ulonglong group_concat_max_len;
ulong default_week_format;
ulong div_precision_increment;
MY_LOCALE *lc_time_names;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 639c7c1784a..1c29e3d18a7 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2017, MariaDB
+ Copyright (c) 2008, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -37,7 +37,7 @@
#include "tztime.h" // MYSQL_TIME <-> my_time_t
#include "sql_acl.h" // NO_ACCESS,
// acl_getroot_no_password
-#include "sql_base.h" // close_temporary_tables
+#include "sql_base.h"
#include "sql_handler.h" // mysql_ha_cleanup
#include "rpl_rli.h"
#include "rpl_filter.h"
@@ -267,73 +267,15 @@ bool Foreign_key::validate(List<Create_field> &table_fields)
/****************************************************************************
** Thread specific functions
****************************************************************************/
-#ifdef ONLY_FOR_MYSQL_CLOSED_SOURCE_SCHEDULED
-/**
- Get reference to scheduler data object
-
- @param thd THD object
-
- @retval Scheduler data object on THD
-*/
-void *thd_get_scheduler_data(THD *thd)
-{
- return thd->scheduler.data;
-}
-
-/**
- Set reference to Scheduler data object for THD object
-
- @param thd THD object
- @param psi Scheduler data object to set on THD
-*/
-void thd_set_scheduler_data(THD *thd, void *data)
-{
- thd->scheduler.data= data;
-}
-
-/**
- Get reference to Performance Schema object for THD object
-
- @param thd THD object
-
- @retval Performance schema object for thread on THD
-*/
-PSI_thread *thd_get_psi(THD *thd)
-{
- return thd->scheduler.m_psi;
-}
-
-/**
- Get net_wait_timeout for THD object
-
- @param thd THD object
-
- @retval net_wait_timeout value for thread on THD
-*/
-ulong thd_get_net_wait_timeout(THD* thd)
-{
- return thd->variables.net_wait_timeout;
-}
/**
- Set reference to Performance Schema object for THD object
+ Get current THD object from thread local data
- @param thd THD object
- @param psi Performance schema object for thread
+ @retval The THD object for the thread, NULL if not connection thread
*/
-void thd_set_psi(THD *thd, PSI_thread *psi)
+THD *thd_get_current_thd()
{
- thd->scheduler.m_psi= psi;
-}
-
-/**
- Set the state on connection to killed
-
- @param thd THD object
-*/
-void thd_set_killed(THD *thd)
-{
- thd->set_killed(KILL_CONNECTION);
+ return current_thd;
}
/**
@@ -347,116 +289,6 @@ void thd_clear_errors(THD *thd)
thd->mysys_var->abort= 0;
}
-/**
- Set thread stack in THD object
-
- @param thd Thread object
- @param stack_start Start of stack to set in THD object
-*/
-void thd_set_thread_stack(THD *thd, char *stack_start)
-{
- thd->thread_stack= stack_start;
-}
-
-/**
- Close the socket used by this connection
-
- @param thd THD object
-*/
-void thd_close_connection(THD *thd)
-{
- if (thd->net.vio)
- vio_close(thd->net.vio);
-}
-
-/**
- Lock data that needs protection in THD object
-
- @param thd THD object
-*/
-void thd_lock_data(THD *thd)
-{
- mysql_mutex_lock(&thd->LOCK_thd_data);
-}
-
-/**
- Unlock data that needs protection in THD object
-
- @param thd THD object
-*/
-void thd_unlock_data(THD *thd)
-{
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-}
-
-/**
- Support method to check if connection has already started transcaction
-
- @param client_cntx Low level client context
-
- @retval TRUE if connection already started transaction
-*/
-bool thd_is_transaction_active(THD *thd)
-{
- return thd->transaction.is_active();
-}
-
-/**
- Check if there is buffered data on the socket representing the connection
-
- @param thd THD object
-*/
-int thd_connection_has_data(THD *thd)
-{
- Vio *vio= thd->net.vio;
- return vio->has_data(vio);
-}
-
-/**
- Set reading/writing on socket, used by SHOW PROCESSLIST
-
- @param thd THD object
- @param val Value to set it to (0 or 1)
-*/
-void thd_set_net_read_write(THD *thd, uint val)
-{
- thd->net.reading_or_writing= val;
-}
-
-/**
- Get reading/writing on socket from THD object
- @param thd THD object
-
- @retval net.reading_or_writing value for thread on THD.
-*/
-uint thd_get_net_read_write(THD *thd)
-{
- return thd->net.reading_or_writing;
-}
-
-/**
- Set reference to mysys variable in THD object
-
- @param thd THD object
- @param mysys_var Reference to set
-*/
-void thd_set_mysys_var(THD *thd, st_my_thread_var *mysys_var)
-{
- thd->set_mysys_var(mysys_var);
-}
-
-/**
- Get socket file descriptor for this connection
-
- @param thd THD object
-
- @retval Socket of the connection
-*/
-my_socket thd_get_fd(THD *thd)
-{
- return mysql_socket_getfd(thd->net.vio->mysql_socket);
-}
-#endif
/**
Get thread attributes for connection threads
@@ -529,13 +361,13 @@ const char *set_thd_proc_info(THD *thd_arg, const char *info,
PSI_stage_info old_stage;
PSI_stage_info new_stage;
- old_stage.m_key= 0;
- old_stage.m_name= info;
+ new_stage.m_key= 0;
+ new_stage.m_name= info;
- set_thd_stage_info(thd_arg, & old_stage, & new_stage,
+ set_thd_stage_info(thd_arg, & new_stage, & old_stage,
calling_function, calling_file, calling_line);
- return new_stage.m_name;
+ return old_stage.m_name;
}
extern "C"
@@ -715,7 +547,7 @@ char *thd_security_context(THD *thd,
bool Drop_table_error_handler::handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -726,6 +558,28 @@ bool Drop_table_error_handler::handle_condition(THD *thd,
/**
+ Handle an error from MDL_context::upgrade_lock() and mysql_lock_tables().
+ Ignore ER_LOCK_ABORTED and ER_LOCK_DEADLOCK errors.
+*/
+
+bool
+MDL_deadlock_and_lock_abort_error_handler::
+handle_condition(THD *thd,
+ uint sql_errno,
+ const char *sqlstate,
+ Sql_condition::enum_warning_level *level,
+ const char* msg,
+ Sql_condition **cond_hdl)
+{
+ *cond_hdl= NULL;
+ if (sql_errno == ER_LOCK_ABORTED || sql_errno == ER_LOCK_DEADLOCK)
+ m_need_reopen= true;
+
+ return m_need_reopen;
+}
+
+
+/**
Send timeout to thread.
Note that this is always safe as the thread will always remove it's
@@ -742,26 +596,24 @@ extern "C" void thd_kill_timeout(THD* thd)
}
-THD::THD(bool is_wsrep_applier)
+THD::THD(my_thread_id id, bool is_wsrep_applier)
:Statement(&main_lex, &main_mem_root, STMT_CONVENTIONAL_EXECUTION,
/* statement id */ 0),
rli_fake(0), rgi_fake(0), rgi_slave(NULL),
protocol_text(this), protocol_binary(this),
+ m_current_stage_key(0),
in_sub_stmt(0), log_all_errors(0),
binlog_unsafe_warning_flags(0),
binlog_table_maps(0),
+ bulk_param(0),
table_map_for_update(0),
- arg_of_last_insert_id_function(FALSE),
- first_successful_insert_id_in_prev_stmt(0),
- first_successful_insert_id_in_prev_stmt_for_binlog(0),
- first_successful_insert_id_in_cur_stmt(0),
- stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
m_examined_row_count(0),
accessed_rows_and_keys(0),
m_digest(NULL),
m_statement_psi(NULL),
m_idle_psi(NULL),
- thread_id(0),
+ thread_id(id),
+ thread_dbug_id(id),
os_thread_id(0),
global_disable_checkpoint(0),
failed_com_change_user(0),
@@ -784,7 +636,8 @@ THD::THD(bool is_wsrep_applier)
main_da(0, false, false),
m_stmt_da(&main_da),
tdc_hash_pins(0),
- xid_hash_pins(0)
+ xid_hash_pins(0),
+ m_tmp_tables_locked(false)
#ifdef WITH_WSREP
,
wsrep_applier(is_wsrep_applier),
@@ -808,6 +661,7 @@ THD::THD(bool is_wsrep_applier)
set_current_thd(this);
status_var.local_memory_used= sizeof(THD);
status_var.global_memory_used= 0;
+ variables.pseudo_thread_id= thread_id;
variables.max_mem_used= global_system_variables.max_mem_used;
main_da.init();
@@ -821,6 +675,12 @@ THD::THD(bool is_wsrep_applier)
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
MYF(MY_THREAD_SPECIFIC));
+ /*
+ Allocation of user variables for binary logging is always done with main
+ mem root
+ */
+ user_var_events_alloc= mem_root;
+
stmt_arena= this;
thread_stack= 0;
scheduler= thread_scheduler; // Will be fixed later
@@ -849,8 +709,7 @@ THD::THD(bool is_wsrep_applier)
statement_id_counter= 0UL;
// Must be reset to handle error with THD's created for init of mysqld
lex->current_select= 0;
- user_time.val= start_time= start_time_sec_part= 0;
- start_utime= utime_after_query= prior_thr_create_utime= 0L;
+ start_utime= utime_after_query= 0;
utime_after_lock= 0L;
progress.arena= 0;
progress.report_to_client= 0;
@@ -874,14 +733,13 @@ THD::THD(bool is_wsrep_applier)
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
#endif
-#ifndef EMBEDDED_LIBRARY
mysql_audit_init_thd(this);
-#endif
net.vio=0;
net.buff= 0;
+ net.reading_or_writing= 0;
client_capabilities= 0; // minimalistic client
system_thread= NON_SYSTEM_THREAD;
- cleanup_done= abort_on_warning= 0;
+ cleanup_done= free_connection_done= abort_on_warning= 0;
peer_port= 0; // For SHOW PROCESSLIST
transaction.m_pending_rows_event= 0;
transaction.on= 1;
@@ -905,7 +763,6 @@ THD::THD(bool is_wsrep_applier)
/* Variables with default values */
proc_info="login";
where= THD::DEFAULT_WHERE;
- variables.server_id = global_system_variables.server_id;
slave_net = 0;
m_command=COM_CONNECT;
*scramble= '\0';
@@ -927,6 +784,7 @@ THD::THD(bool is_wsrep_applier)
wsrep_affected_rows = 0;
wsrep_replicate_GTID = false;
wsrep_skip_wsrep_GTID = false;
+ wsrep_split_flag = false;
#endif
/* Call to init() below requires fully initialized Open_tables_state. */
reset_open_tables_state(this);
@@ -966,9 +824,8 @@ THD::THD(bool is_wsrep_applier)
by adding the address of the stack.
*/
tmp= (ulong) (my_rnd(&sql_rand) * 0xffffffff);
- my_rnd_init(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
+ my_rnd_init(&rand, tmp + (ulong)((size_t) &rand), tmp + (ulong) ::global_query_id);
substitute_null_with_insert_id = FALSE;
- thr_lock_info_init(&lock_info); /* safety: will be reset after start */
lock_info.mysql_thd= (void *)this;
m_token_array= NULL;
@@ -984,8 +841,10 @@ THD::THD(bool is_wsrep_applier)
prepare_derived_at_open= FALSE;
create_tmp_table_for_derived= FALSE;
save_prep_leaf_list= FALSE;
+ org_charset= 0;
/* Restore THR_THD */
set_current_thd(old_THR_THD);
+ inc_thread_count();
}
@@ -1006,7 +865,7 @@ void THD::push_internal_handler(Internal_error_handler *handler)
bool THD::handle_condition(uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -1133,6 +992,7 @@ Sql_condition* THD::raise_condition(uint sql_errno,
Diagnostics_area *da= get_stmt_da();
Sql_condition *cond= NULL;
DBUG_ENTER("THD::raise_condition");
+ DBUG_ASSERT(level < Sql_condition::WARN_LEVEL_END);
if (!(variables.option_bits & OPTION_SQL_NOTES) &&
(level == Sql_condition::WARN_LEVEL_NOTE))
@@ -1160,24 +1020,23 @@ Sql_condition* THD::raise_condition(uint sql_errno,
push_warning and strict SQL_MODE case.
*/
level= Sql_condition::WARN_LEVEL_ERROR;
- set_killed(KILL_BAD_DATA);
}
- switch (level)
- {
+ if (handle_condition(sql_errno, sqlstate, &level, msg, &cond))
+ DBUG_RETURN(cond);
+
+ switch (level) {
case Sql_condition::WARN_LEVEL_NOTE:
case Sql_condition::WARN_LEVEL_WARN:
got_warning= 1;
break;
case Sql_condition::WARN_LEVEL_ERROR:
break;
- default:
- DBUG_ASSERT(FALSE);
+ case Sql_condition::WARN_LEVEL_END:
+ /* Impossible */
+ break;
}
- if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
- DBUG_RETURN(cond);
-
if (level == Sql_condition::WARN_LEVEL_ERROR)
{
mysql_audit_general(this, MYSQL_AUDIT_GENERAL_ERROR, sql_errno, msg);
@@ -1207,13 +1066,13 @@ Sql_condition* THD::raise_condition(uint sql_errno,
}
extern "C"
-void *thd_alloc(MYSQL_THD thd, unsigned int size)
+void *thd_alloc(MYSQL_THD thd, size_t size)
{
return thd->alloc(size);
}
extern "C"
-void *thd_calloc(MYSQL_THD thd, unsigned int size)
+void *thd_calloc(MYSQL_THD thd, size_t size)
{
return thd->calloc(size);
}
@@ -1225,14 +1084,14 @@ char *thd_strdup(MYSQL_THD thd, const char *str)
}
extern "C"
-char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size)
+char *thd_strmake(MYSQL_THD thd, const char *str, size_t size)
{
return thd->strmake(str, size);
}
extern "C"
LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
- const char *str, unsigned int size,
+ const char *str, size_t size,
int allocate_lex_string)
{
return allocate_lex_string ? thd->make_lex_string(str, size)
@@ -1240,7 +1099,7 @@ LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
}
extern "C"
-void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size)
+void *thd_memdup(MYSQL_THD thd, const void* str, size_t size)
{
return thd->memdup(str, size);
}
@@ -1276,6 +1135,12 @@ extern "C" THD *_current_thd_noinline(void)
{
return my_pthread_getspecific_ptr(THD*,THR_THD);
}
+
+extern "C" my_thread_id next_thread_id_noinline()
+{
+#undef next_thread_id
+ return next_thread_id();
+}
#endif
/*
@@ -1301,6 +1166,8 @@ void THD::init(void)
mysql_mutex_unlock(&LOCK_global_system_variables);
+ user_time.val= start_time= start_time_sec_part= 0;
+
server_status= SERVER_STATUS_AUTOCOMMIT;
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
@@ -1318,14 +1185,21 @@ void THD::init(void)
TL_WRITE);
tx_isolation= (enum_tx_isolation) variables.tx_isolation;
tx_read_only= variables.tx_read_only;
- update_charset();
+ update_charset(); // plugin_thd_var() changed character sets
reset_current_stmt_binlog_format_row();
reset_binlog_local_stmt_filter();
set_status_var_init();
bzero((char *) &org_status_var, sizeof(org_status_var));
- start_bytes_received= 0;
- last_commit_gtid.seq_no= 0;
status_in_global= 0;
+ start_bytes_received= 0;
+ m_last_commit_gtid.seq_no= 0;
+ last_stmt= NULL;
+ /* Reset status of last insert id */
+ arg_of_last_insert_id_function= FALSE;
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt= FALSE;
+ first_successful_insert_id_in_prev_stmt= 0;
+ first_successful_insert_id_in_prev_stmt_for_binlog= 0;
+ first_successful_insert_id_in_cur_stmt= 0;
#ifdef WITH_WSREP
wsrep_exec_mode= wsrep_applier ? REPL_RECV : LOCAL_STATE;
wsrep_conflict_state= NO_CONFLICT;
@@ -1345,6 +1219,7 @@ void THD::init(void)
wsrep_affected_rows = 0;
wsrep_replicate_GTID = false;
wsrep_skip_wsrep_GTID = false;
+ wsrep_split_flag = false;
#endif /* WITH_WSREP */
if (variables.sql_log_bin)
@@ -1362,6 +1237,11 @@ void THD::init(void)
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
debug_sync_init_thread(this);
#endif /* defined(ENABLED_DEBUG_SYNC) */
+
+#ifndef EMBEDDED_LIBRARY
+ session_tracker.enable(this);
+#endif //EMBEDDED_LIBRARY
+
apc_target.init(&LOCK_thd_data);
DBUG_VOID_RETURN;
}
@@ -1447,12 +1327,19 @@ void THD::init_for_queries()
void THD::change_user(void)
{
- add_status_to_global();
+ if (!status_in_global) // Reset in init()
+ add_status_to_global();
- cleanup();
- reset_killed();
+ if (!cleanup_done)
+ cleanup();
cleanup_done= 0;
- status_in_global= 0;
+ reset_killed();
+ thd_clear_errors(this);
+
+ /* Clear warnings. */
+ if (!get_stmt_da()->is_warning_info_empty())
+ get_stmt_da()->clear_warning_info(0);
+
init();
stmt_map.reset();
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
@@ -1482,9 +1369,10 @@ void THD::cleanup(void)
locked_tables_list.unlock_locked_tables(this);
delete_dynamic(&user_var_events);
- close_temporary_tables(this);
+ close_temporary_tables();
transaction.xid_state.xa_state= XA_NOTR;
+ transaction.xid_state.rm_error= 0;
trans_rollback(this);
xid_cache_delete(this, &transaction.xid_state);
@@ -1516,6 +1404,8 @@ void THD::cleanup(void)
my_hash_free(&user_vars);
sp_cache_clear(&sp_proc_cache);
sp_cache_clear(&sp_func_cache);
+ auto_inc_intervals_forced.empty();
+ auto_inc_intervals_in_cur_stmt_for_binlog.empty();
mysql_ull_cleanup(this);
stmt_map.reset();
@@ -1528,11 +1418,78 @@ void THD::cleanup(void)
}
+/*
+ Free all connection related resources associated with a THD.
+ This is used when we put a thread into the thread cache.
+ After this call should either call ~THD or reset_for_reuse() depending on
+ circumstances.
+*/
+
+void THD::free_connection()
+{
+ DBUG_ASSERT(free_connection_done == 0);
+ my_free(db);
+ db= NULL;
+#ifndef EMBEDDED_LIBRARY
+ if (net.vio)
+ vio_delete(net.vio);
+ net.vio= 0;
+ net_end(&net);
+#endif
+ if (!cleanup_done)
+ cleanup();
+ ha_close_connection(this);
+ plugin_thdvar_cleanup(this);
+ mysql_audit_free_thd(this);
+ main_security_ctx.destroy();
+ /* close all prepared statements, to save memory */
+ stmt_map.reset();
+ free_connection_done= 1;
+#if defined(ENABLED_PROFILING)
+ profiling.restart(); // Reset profiling
+#endif
+}
+
+/*
+ Reset thd for reuse by another connection
+ This is only used for user connections, so the following variables doesn't
+ have to be reset:
+ - Replication (slave) variables.
+ - Variables not reset between each statements. See reset_for_next_command.
+*/
+
+void THD::reset_for_reuse()
+{
+ mysql_audit_init_thd(this);
+ change_user(); // Calls cleanup() & init()
+ get_stmt_da()->reset_diagnostics_area();
+ main_security_ctx.init();
+ failed_com_change_user= 0;
+ is_fatal_error= 0;
+ client_capabilities= 0;
+ peer_port= 0;
+ query_name_consts= 0; // Safety
+ abort_on_warning= 0;
+ free_connection_done= 0;
+ m_command= COM_CONNECT;
+#if defined(ENABLED_PROFILING)
+ profiling.reset();
+#endif
+#ifdef SIGNAL_WITH_VIO_CLOSE
+ active_vio = 0;
+#endif
+}
+
+
THD::~THD()
{
THD *orig_thd= current_thd;
THD_CHECK_SENTRY(this);
DBUG_ENTER("~THD()");
+ /* Check that we have already called thd->unlink() */
+ DBUG_ASSERT(prev == 0 && next == 0);
+ /* This takes a long time so we should not do this under LOCK_thread_count */
+ mysql_mutex_assert_not_owner(&LOCK_thread_count);
/*
In error cases, thd may not be current thd. We have to fix this so
@@ -1547,26 +1504,13 @@ THD::~THD()
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef WITH_WSREP
- if (wsrep_rgi) delete wsrep_rgi;
+ delete wsrep_rgi;
#endif
- /* Close connection */
-#ifndef EMBEDDED_LIBRARY
- if (net.vio)
- vio_delete(net.vio);
- net_end(&net);
-#endif
- stmt_map.reset(); /* close all prepared statements */
- if (!cleanup_done)
- cleanup();
+ if (!free_connection_done)
+ free_connection();
mdl_context.destroy();
- ha_close_connection(this);
- mysql_audit_release(this);
- plugin_thdvar_cleanup(this);
- main_security_ctx.destroy();
- my_free(db);
- db= NULL;
free_root(&transaction.mem_root,MYF(0));
mysql_cond_destroy(&COND_wakeup_ready);
mysql_mutex_destroy(&LOCK_wakeup_ready);
@@ -1587,7 +1531,6 @@ THD::~THD()
rli_fake= NULL;
}
- mysql_audit_free_thd(this);
if (rgi_slave)
rgi_slave->cleanup_after_session();
my_free(semisync_info);
@@ -1602,14 +1545,22 @@ THD::~THD()
lf_hash_put_pins(xid_hash_pins);
/* Ensure everything is freed */
status_var.local_memory_used-= sizeof(THD);
+
+ /* trick to make happy memory accounting system */
+#ifndef EMBEDDED_LIBRARY
+ session_tracker.deinit();
+#endif //EMBEDDED_LIBRARY
+
if (status_var.local_memory_used != 0)
{
DBUG_PRINT("error", ("memory_used: %lld", status_var.local_memory_used));
- SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id());
- DBUG_ASSERT(status_var.local_memory_used == 0);
+ SAFEMALLOC_REPORT_MEMORY(thread_id);
+ DBUG_ASSERT(status_var.local_memory_used == 0 ||
+ !debug_assert_on_not_freed_memory);
}
update_global_memory_status(status_var.global_memory_used);
set_current_thd(orig_thd == this ? 0 : orig_thd);
+ dec_thread_count();
DBUG_VOID_RETURN;
}
@@ -1624,10 +1575,9 @@ THD::~THD()
NOTES
This function assumes that all variables at start are long/ulong and
- other types are handled explicitely
+ other types are handled explicitly
*/
-
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
{
ulong *end= (ulong*) ((uchar*) to_var +
@@ -1657,11 +1607,10 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
DBUG_PRINT("info", ("global memory_used: %lld size: %lld",
(longlong) global_status_var.global_memory_used,
(longlong) from_var->global_memory_used));
+ update_global_memory_status(from_var->global_memory_used);
}
- // workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1)
- int64 volatile * volatile ptr= &to_var->global_memory_used;
- my_atomic_add64_explicit(ptr, from_var->global_memory_used,
- MY_MEMORY_ORDER_RELAXED);
+ else
+ to_var->global_memory_used+= from_var->global_memory_used;
}
/*
@@ -1675,7 +1624,7 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
NOTE
This function assumes that all variables at start are long/ulong and
- other types are handled explicitely
+ other types are handled explicitly
*/
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
@@ -1977,6 +1926,11 @@ int THD::killed_errno()
case KILL_SERVER:
case KILL_SERVER_HARD:
DBUG_RETURN(ER_SERVER_SHUTDOWN);
+ case KILL_SLAVE_SAME_ID:
+ DBUG_RETURN(ER_SLAVE_SAME_ID);
+ case KILL_WAIT_TIMEOUT:
+ case KILL_WAIT_TIMEOUT_HARD:
+ DBUG_RETURN(ER_NET_READ_INTERRUPTED);
}
DBUG_RETURN(0); // Keep compiler happy
}
@@ -1984,7 +1938,7 @@ int THD::killed_errno()
/*
Remember the location of thread info, the structure needed for
- sql_alloc() and the structure for the net buffer
+ the structure for the net buffer
*/
bool THD::store_globals()
@@ -1995,8 +1949,7 @@ bool THD::store_globals()
*/
DBUG_ASSERT(thread_stack);
- if (set_current_thd(this) ||
- my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
+ if (set_current_thd(this))
return 1;
/*
mysys_var is concurrently readable by a killer thread.
@@ -2012,7 +1965,16 @@ bool THD::store_globals()
Let mysqld define the thread id (not mysys)
This allows us to move THD to different threads if needed.
*/
- mysys_var->id= thread_id;
+ mysys_var->id= thread_id;
+
+ /* thread_dbug_id should not change for a THD */
+ if (!thread_dbug_id)
+ thread_dbug_id= mysys_var->dbug_id;
+ else
+ {
+ /* This only changes if we are using pool-of-threads */
+ mysys_var->dbug_id= thread_dbug_id;
+ }
#ifdef __NR_gettid
os_thread_id= (uint32)syscall(__NR_gettid);
#else
@@ -2023,14 +1985,13 @@ bool THD::store_globals()
STACK_DIRECTION * (long)my_thread_stack_size;
if (net.vio)
{
- vio_set_thread_id(net.vio, real_id);
net.thd= this;
}
/*
We have to call thr_lock_info_init() again here as THD may have been
created in another thread
*/
- thr_lock_info_init(&lock_info);
+ thr_lock_info_init(&lock_info, mysys_var);
return 0;
}
@@ -2049,7 +2010,6 @@ void THD::reset_globals()
/* Undocking the thread specific data. */
set_current_thd(0);
- my_pthread_setspecific_ptr(THR_MALLOC, NULL);
net.thd= 0;
}
@@ -2177,12 +2137,19 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
{
DBUG_ENTER("THD::convert_string");
size_t new_length= to_cs->mbmaxlen * from_length;
- uint dummy_errors;
+ uint errors;
if (alloc_lex_string(to, new_length + 1))
DBUG_RETURN(true); // EOM
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
- from, from_length, from_cs, &dummy_errors);
+ from, from_length, from_cs, &errors);
to->str[to->length]= 0; // Safety
+ if (errors && lex->parse_vcol_expr)
+ {
+ my_error(ER_BAD_DATA, MYF(0),
+ ErrConvString(from, from_length, from_cs).ptr(),
+ to_cs->csname);
+ DBUG_RETURN(true);
+ }
DBUG_RETURN(false);
}
@@ -2563,8 +2530,10 @@ struct Item_change_record: public ilink
thd->mem_root (due to possible set_n_backup_active_arena called for thd).
*/
-void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
- MEM_ROOT *runtime_memroot)
+void
+Item_change_list::nocheck_register_item_tree_change(Item **place,
+ Item *old_value,
+ MEM_ROOT *runtime_memroot)
{
Item_change_record *change;
DBUG_ENTER("THD::nocheck_register_item_tree_change");
@@ -2605,8 +2574,10 @@ void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
changes to substitute the same reference at both locations L1 and L2.
*/
-void THD::check_and_register_item_tree_change(Item **place, Item **new_value,
- MEM_ROOT *runtime_memroot)
+void
+Item_change_list::check_and_register_item_tree_change(Item **place,
+ Item **new_value,
+ MEM_ROOT *runtime_memroot)
{
Item_change_record *change;
DBUG_ENTER("THD::check_and_register_item_tree_change");
@@ -2625,7 +2596,7 @@ void THD::check_and_register_item_tree_change(Item **place, Item **new_value,
}
-void THD::rollback_item_tree_changes()
+void Item_change_list::rollback_item_tree_changes()
{
DBUG_ENTER("THD::rollback_item_tree_changes");
I_List_iterator<Item_change_record> it(change_list);
@@ -2747,13 +2718,6 @@ int select_send::send_data(List<Item> &items)
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(FALSE);
- /*
- We may be passing the control from mysqld to the client: release the
- InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
- by thd
- */
- ha_release_temporary_latches(thd);
-
protocol->prepare_for_resend();
if (protocol->send_result_set_row(&items))
{
@@ -2773,13 +2737,6 @@ int select_send::send_data(List<Item> &items)
bool select_send::send_eof()
{
/*
- We may be passing the control from mysqld to the client: release the
- InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
- by thd
- */
- ha_release_temporary_latches(thd);
-
- /*
Don't send EOF if we're in error condition (which implies we've already
sent or are sending an error)
*/
@@ -3066,6 +3023,10 @@ int select_export::send_data(List<Item> &items)
error_pos= copier.most_important_error_pos();
if (error_pos)
{
+ /*
+ TODO:
+ add new error message that will show user this printable_buff
+
char printable_buff[32];
convert_to_printable(printable_buff, sizeof(printable_buff),
error_pos, res->ptr() + res->length() - error_pos,
@@ -3075,6 +3036,11 @@ int select_export::send_data(List<Item> &items)
ER_THD(thd, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"string", printable_buff,
item->name, static_cast<long>(row_count));
+ */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
+ ER_THD(thd, WARN_DATA_TRUNCATED),
+ item->name, static_cast<long>(row_count));
}
else if (copier.source_end_pos() < res->ptr() + res->length())
{
@@ -3182,7 +3148,7 @@ int select_export::send_data(List<Item> &items)
if ((NEED_ESCAPING(*pos) ||
(check_second_byte &&
- my_mbcharlen(character_set_client, (uchar) *pos) == 2 &&
+ ((uchar) *pos) > 0x7F /* a potential MB2HEAD */ &&
pos + 1 < end &&
NEED_ESCAPING(pos[1]))) &&
/*
@@ -3515,12 +3481,12 @@ void Query_arena::free_items()
{
Item *next;
DBUG_ENTER("Query_arena::free_items");
- /* This works because items are allocated with sql_alloc() */
+ /* This works because items are allocated on THD::mem_root */
for (; free_list; free_list= next)
{
next= free_list->next;
DBUG_ASSERT(free_list != next);
- DBUG_PRINT("info", ("free item: 0x%lx", (ulong) free_list));
+ DBUG_PRINT("info", ("free item: %p", free_list));
free_list->delete_self();
}
/* Postcondition: free_list is 0 */
@@ -3968,7 +3934,7 @@ int select_materialize_with_stats::send_data(List<Item> &items)
void TMP_TABLE_PARAM::init()
{
DBUG_ENTER("TMP_TABLE_PARAM::init");
- DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
+ DBUG_PRINT("enter", ("this: %p", this));
field_count= sum_func_count= func_count= hidden_field_count= 0;
group_parts= group_length= group_null_parts= 0;
quick_group= 1;
@@ -4035,7 +4001,7 @@ void Security_context::destroy()
// If not pointer to constant
if (host != my_localhost)
{
- my_free(host);
+ my_free((char*) host);
host= NULL;
}
if (user != delayed_user)
@@ -4200,7 +4166,8 @@ void THD::restore_backup_open_tables_state(Open_tables_backup *backup)
Before we will throw away current open tables state we want
to be sure that it was properly cleaned up.
*/
- DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 &&
+ DBUG_ASSERT(open_tables == 0 &&
+ temporary_tables == 0 &&
derived_tables == 0 &&
lock == 0 &&
locked_tables_mode == LTM_NONE &&
@@ -4306,7 +4273,7 @@ extern "C" void thd_progress_init(MYSQL_THD thd, uint max_stage)
is a high level command (like ALTER TABLE) and we are not in a
stored procedure
*/
- thd->progress.report= ((thd->client_capabilities & CLIENT_PROGRESS) &&
+ thd->progress.report= ((thd->client_capabilities & MARIADB_CLIENT_PROGRESS) &&
thd->progress.report_to_client &&
!thd->in_sub_stmt);
thd->progress.next_report_time= 0;
@@ -4428,31 +4395,144 @@ extern "C" void thd_create_random_password(MYSQL_THD thd,
#ifdef INNODB_COMPATIBILITY_HOOKS
-extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
+
+/** open a table and add it to thd->open_tables
+
+ @note At the moment this is used in innodb background purge threads
+ *only*.There should be no table locks, because the background purge does not
+ change the table as far as LOCK TABLES is concerned. MDL locks are
+ still needed, though.
+
+ To make sure no table stays open for long, this helper allows the thread to
+ have only one table open at any given time.
+*/
+TABLE *open_purge_table(THD *thd, const char *db, size_t dblen,
+ const char *tb, size_t tblen)
{
- return(thd->charset());
+ DBUG_ENTER("open_purge_table");
+ DBUG_ASSERT(thd->open_tables == NULL);
+ DBUG_ASSERT(thd->locked_tables_mode < LTM_PRELOCKED);
+
+ Open_table_context ot_ctx(thd, 0);
+ TABLE_LIST *tl= (TABLE_LIST*)thd->alloc(sizeof(TABLE_LIST));
+
+ tl->init_one_table(db, dblen, tb, tblen, tb, TL_READ);
+ tl->i_s_requested_object= OPEN_TABLE_ONLY;
+
+ bool error= open_table(thd, tl, &ot_ctx);
+
+ /* we don't recover here */
+ DBUG_ASSERT(!error || !ot_ctx.can_recover_from_failed_open());
+
+ if (error)
+ close_thread_tables(thd);
+
+ DBUG_RETURN(error ? NULL : tl->table);
}
-/**
- OBSOLETE : there's no way to ensure the string is null terminated.
- Use thd_query_string instead()
+
+/** Find an open table in the list of prelocked tabled
+
+ Used for foreign key actions, for example, in UPDATE t1 SET a=1;
+ where a child table t2 has a KB on t1.a.
+
+ But only when virtual columns are involved, otherwise InnoDB
+ does not need an open TABLE.
*/
-extern "C" char **thd_query(MYSQL_THD thd)
+TABLE *find_fk_open_table(THD *thd, const char *db, size_t db_len,
+ const char *table, size_t table_len)
+{
+ for (TABLE *t= thd->open_tables; t; t= t->next)
+ {
+ if (t->s->db.length == db_len && t->s->table_name.length == table_len &&
+ !strcmp(t->s->db.str, db) && !strcmp(t->s->table_name.str, table) &&
+ t->pos_in_table_list->prelocking_placeholder == TABLE_LIST::FK)
+ return t;
+ }
+ return NULL;
+}
+
+/* the following three functions are used in background purge threads */
+
+MYSQL_THD create_thd()
{
- return (&thd->query_string.string.str);
+ THD *thd= new THD(next_thread_id());
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+ thd->set_command(COM_DAEMON);
+ thd->system_thread= SYSTEM_THREAD_GENERIC;
+ thd->security_ctx->host_or_ip="";
+ add_to_active_threads(thd);
+ return thd;
+}
+
+void destroy_thd(MYSQL_THD thd)
+{
+ thd->add_status_to_global();
+ unlink_not_visible_thd(thd);
+ delete thd;
+ dec_thread_running();
+}
+
+void reset_thd(MYSQL_THD thd)
+{
+ close_thread_tables(thd);
+ thd->mdl_context.release_transactional_locks();
+ thd->free_items();
+ free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC));
+}
+
+unsigned long long thd_get_query_id(const MYSQL_THD thd)
+{
+ return((unsigned long long)thd->query_id);
+}
+
+extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
+{
+ return(thd->charset());
}
+
/**
Get the current query string for the thread.
+ This function is not thread safe and can be used only by thd owner thread.
+
@param The MySQL internal thread pointer
@return query string and length. May be non-null-terminated.
*/
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd)
{
+ DBUG_ASSERT(thd == current_thd);
return(&thd->query_string.string);
}
+
+/**
+ Get the current query string for the thread.
+
+ @param thd The MySQL internal thread pointer
+ @param buf Buffer where the query string will be copied
+ @param buflen Length of the buffer
+
+ @return Length of the query
+
+ @note This function is thread safe as the query string is
+ accessed under mutex protection and the string is copied
+ into the provided buffer. @see thd_query_string().
+*/
+
+extern "C" size_t thd_query_safe(MYSQL_THD thd, char *buf, size_t buflen)
+{
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ size_t len= MY_MIN(buflen - 1, thd->query_length());
+ memcpy(buf, thd->query(), len);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ buf[len]= '\0';
+ return len;
+}
+
+
extern "C" int thd_slave_thread(const MYSQL_THD thd)
{
return(thd->slave_thread);
@@ -4464,6 +4544,12 @@ extern "C" int thd_rpl_is_parallel(const MYSQL_THD thd)
return thd->rgi_slave && thd->rgi_slave->is_parallel_exec;
}
+extern "C" int thd_rpl_stmt_based(const MYSQL_THD thd)
+{
+ return thd &&
+ !thd->is_current_stmt_binlog_format_row() &&
+ !thd->is_current_stmt_binlog_disabled();
+}
/* Returns high resolution timestamp for the start
of the current query. */
@@ -4474,19 +4560,20 @@ extern "C" unsigned long long thd_start_utime(const MYSQL_THD thd)
/*
- This function can optionally be called to check if thd_report_wait_for()
+ This function can optionally be called to check if thd_rpl_deadlock_check()
needs to be called for waits done by a given transaction.
- If this function returns false for a given thd, there is no need to do any
- calls to thd_report_wait_for() on that thd.
+ If this function returns false for a given thd, there is no need to do
+ any calls to thd_rpl_deadlock_check() on that thd.
- This call is optional; it is safe to call thd_report_wait_for() in any case.
- This call can be used to save some redundant calls to thd_report_wait_for()
- if desired. (This is unlikely to matter much unless there are _lots_ of
- waits to report, as the overhead of thd_report_wait_for() is small).
+ This call is optional; it is safe to call thd_rpl_deadlock_check() in
+ any case. This call can be used to save some redundant calls to
+ thd_rpl_deadlock_check() if desired. (This is unlikely to matter much
+ unless there are _lots_ of waits to report, as the overhead of
+ thd_rpl_deadlock_check() is small).
*/
extern "C" int
-thd_need_wait_for(const MYSQL_THD thd)
+thd_need_wait_reports(const MYSQL_THD thd)
{
rpl_group_info *rgi;
@@ -4501,75 +4588,9 @@ thd_need_wait_for(const MYSQL_THD thd)
}
/*
- Used by InnoDB/XtraDB to report that one transaction THD is about to go to
- wait for a transactional lock held by another transactions OTHER_THD.
-
- This is used for parallel replication, where transactions are required to
- commit in the same order on the slave as they did on the master. If the
- transactions on the slave encounters lock conflicts on the slave that did
- not exist on the master, this can cause deadlocks.
-
- Normally, such conflicts will not occur, because the same conflict would
- have prevented the two transactions from committing in parallel on the
- master, thus preventing them from running in parallel on the slave in the
- first place. However, it is possible in case when the optimizer chooses a
- different plan on the slave than on the master (eg. table scan instead of
- index scan).
-
- InnoDB/XtraDB reports lock waits using this call. If a lock wait causes a
- deadlock with the pre-determined commit order, we kill the later transaction,
- and later re-try it, to resolve the deadlock.
-
- This call need only receive reports about waits for locks that will remain
- until the holding transaction commits. InnoDB/XtraDB auto-increment locks
- are released earlier, and so need not be reported. (Such false positives are
- not harmful, but could lead to unnecessary kill and retry, so best avoided).
-*/
-extern "C" void
-thd_report_wait_for(MYSQL_THD thd, MYSQL_THD other_thd)
-{
- rpl_group_info *rgi;
- rpl_group_info *other_rgi;
-
- if (!thd)
- return;
- DEBUG_SYNC(thd, "thd_report_wait_for");
- thd->transaction.stmt.mark_trans_did_wait();
- if (!other_thd)
- return;
- binlog_report_wait_for(thd, other_thd);
- rgi= thd->rgi_slave;
- other_rgi= other_thd->rgi_slave;
- if (!rgi || !other_rgi)
- return;
- if (!rgi->is_parallel_exec)
- return;
- if (rgi->rli != other_rgi->rli)
- return;
- if (!rgi->gtid_sub_id || !other_rgi->gtid_sub_id)
- return;
- if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id)
- return;
- if (rgi->gtid_sub_id > other_rgi->gtid_sub_id)
- return;
- /*
- This transaction is about to wait for another transaction that is required
- by replication binlog order to commit after. This would cause a deadlock.
-
- So send a kill to the other transaction, with a temporary error; this will
- cause replication to rollback (and later re-try) the other transaction,
- releasing the lock for this transaction so replication can proceed.
- */
- other_rgi->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED;
- mysql_mutex_lock(&other_thd->LOCK_thd_data);
- other_thd->awake(KILL_CONNECTION);
- mysql_mutex_unlock(&other_thd->LOCK_thd_data);
-}
-
-/*
- Used by storage engines (currently TokuDB) to report that one transaction
- THD is about to go to wait for a transactional lock held by another
- transactions OTHER_THD.
+ Used by storage engines (currently TokuDB and InnoDB/XtraDB) to report that
+ one transaction THD is about to go to wait for a transactional lock held by
+ another transactions OTHER_THD.
This is used for parallel replication, where transactions are required to
commit in the same order on the slave as they did on the master. If the
@@ -4584,9 +4605,9 @@ thd_report_wait_for(MYSQL_THD thd, MYSQL_THD other_thd)
chooses a different plan on the slave than on the master (eg. table scan
instead of index scan).
- InnoDB/XtraDB reports lock waits using this call. If a lock wait causes a
- deadlock with the pre-determined commit order, we kill the later transaction,
- and later re-try it, to resolve the deadlock.
+ Storage engines report lock waits using this call. If a lock wait causes a
+ deadlock with the pre-determined commit order, we kill the later
+ transaction, and later re-try it, to resolve the deadlock.
This call need only receive reports about waits for locks that will remain
until the holding transaction commits. InnoDB/XtraDB auto-increment locks,
@@ -4677,8 +4698,8 @@ thd_rpl_deadlock_check(MYSQL_THD thd, MYSQL_THD other_thd)
Calling this function is just an optimisation to avoid unnecessary
deadlocks. If it was not used, a gap lock would be set that could eventually
- cause a deadlock; the deadlock would be caught by thd_report_wait_for() and
- the transaction T2 killed and rolled back (and later re-tried).
+ cause a deadlock; the deadlock would be caught by thd_rpl_deadlock_check()
+ and the transaction T2 killed and rolled back (and later re-tried).
*/
extern "C" int
thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd)
@@ -5157,9 +5178,9 @@ void THD::inc_status_sort_range()
void THD::inc_status_sort_rows(ha_rows count)
{
- statistic_add(status_var.filesort_rows_, count, &LOCK_status);
+ statistic_add(status_var.filesort_rows_, (ulong)count, &LOCK_status);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count);
+ PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, (ulong)count);
#endif
}
@@ -5530,99 +5551,6 @@ int xid_cache_iterate(THD *thd, my_hash_walk_action action, void *arg)
&argument);
}
-/*
- Tells if two (or more) tables have auto_increment columns and we want to
- lock those tables with a write lock.
-
- SYNOPSIS
- has_two_write_locked_tables_with_auto_increment
- tables Table list
-
- NOTES:
- Call this function only when you have established the list of all tables
- which you'll want to update (including stored functions, triggers, views
- inside your statement).
-
- Ignore tables prelocked for foreign key cascading actions, as these
- actions cannot generate new auto_increment values.
-*/
-
-static bool
-has_write_table_with_auto_increment(TABLE_LIST *tables)
-{
- for (TABLE_LIST *table= tables; table; table= table->next_global)
- {
- /* we must do preliminary checks as table->table may be NULL */
- if (!table->placeholder() &&
- table->prelocking_placeholder != TABLE_LIST::FK &&
- table->table->found_next_number_field &&
- (table->lock_type >= TL_WRITE_ALLOW_WRITE))
- return 1;
- }
-
- return 0;
-}
-
-/*
- checks if we have select tables in the table list and write tables
- with auto-increment column.
-
- SYNOPSIS
- has_two_write_locked_tables_with_auto_increment_and_select
- tables Table list
-
- RETURN VALUES
-
- -true if the table list has atleast one table with auto-increment column
-
-
- and atleast one table to select from.
- -false otherwise
-*/
-
-static bool
-has_write_table_with_auto_increment_and_select(TABLE_LIST *tables)
-{
- bool has_select= false;
- bool has_auto_increment_tables = has_write_table_with_auto_increment(tables);
- for(TABLE_LIST *table= tables; table; table= table->next_global)
- {
- if (!table->placeholder() &&
- table->lock_type <= TL_READ_NO_INSERT &&
- table->prelocking_placeholder != TABLE_LIST::FK)
- {
- has_select= true;
- break;
- }
- }
- return(has_select && has_auto_increment_tables);
-}
-
-/*
- Tells if there is a table whose auto_increment column is a part
- of a compound primary key while is not the first column in
- the table definition.
-
- @param tables Table list
-
- @return true if the table exists, fais if does not.
-*/
-
-static bool
-has_write_table_auto_increment_not_first_in_pk(TABLE_LIST *tables)
-{
- for (TABLE_LIST *table= tables; table; table= table->next_global)
- {
- /* we must do preliminary checks as table->table may be NULL */
- if (!table->placeholder() &&
- table->table->found_next_number_field &&
- (table->lock_type >= TL_WRITE_ALLOW_WRITE)
- && table->table->s->next_number_keypart != 0)
- return 1;
- }
-
- return 0;
-}
/**
Decide on logging format to use for the statement and issue errors
@@ -5739,6 +5667,17 @@ int THD::decide_logging_format(TABLE_LIST *tables)
!(wsrep_binlog_format() == BINLOG_FORMAT_STMT &&
!binlog_filter->db_ok(db)))
{
+
+ if (is_bulk_op())
+ {
+ if (wsrep_binlog_format() == BINLOG_FORMAT_STMT)
+ {
+ my_error(ER_BINLOG_NON_SUPPORTED_BULK, MYF(0));
+ DBUG_PRINT("info",
+ ("decision: no logging since an error was generated"));
+ DBUG_RETURN(-1);
+ }
+ }
/*
Compute one bit field with the union of all the engine
capabilities, and one with the intersection of all the engine
@@ -5753,17 +5692,25 @@ int THD::decide_logging_format(TABLE_LIST *tables)
If different types of engines are about to be updated.
For example: Innodb and Falcon; Innodb and MyIsam.
*/
- my_bool multi_write_engine= FALSE;
+ bool multi_write_engine= FALSE;
/*
If different types of engines are about to be accessed
and any of them is about to be updated. For example:
Innodb and Falcon; Innodb and MyIsam.
*/
- my_bool multi_access_engine= FALSE;
+ bool multi_access_engine= FALSE;
/*
Identifies if a table is changed.
*/
- my_bool is_write= FALSE;
+ bool is_write= FALSE; // If any write tables
+ bool has_read_tables= FALSE; // If any read only tables
+ bool has_auto_increment_write_tables= FALSE; // Write with auto-increment
+ /* If a write table that doesn't have auto increment part first */
+ bool has_write_table_auto_increment_not_first_in_pk= FALSE;
+ bool has_auto_increment_write_tables_not_first= FALSE;
+ bool found_first_not_own_table= FALSE;
+ bool has_write_tables_with_unsafe_statements= FALSE;
+
/*
A pointer to a previous table that was changed.
*/
@@ -5809,31 +5756,6 @@ int THD::decide_logging_format(TABLE_LIST *tables)
}
#endif
- if (wsrep_binlog_format() != BINLOG_FORMAT_ROW && tables)
- {
- /*
- DML statements that modify a table with an auto_increment column based on
- rows selected from a table are unsafe as the order in which the rows are
- fetched fron the select tables cannot be determined and may differ on
- master and slave.
- */
- if (has_write_table_with_auto_increment_and_select(tables))
- lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_WRITE_AUTOINC_SELECT);
-
- if (has_write_table_auto_increment_not_first_in_pk(tables))
- lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_NOT_FIRST);
-
- /*
- A query that modifies autoinc column in sub-statement can make the
- master and slave inconsistent.
- We can solve these problems in mixed mode by switching to binlogging
- if at least one updated table is used by sub-statement
- */
- if (lex->requires_prelocking() &&
- has_write_table_with_auto_increment(lex->first_not_own_table()))
- lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_COLUMNS);
- }
-
/*
Get the capabilities vector for all involved storage engines and
mask out the flags for the binary log.
@@ -5872,16 +5794,35 @@ int THD::decide_logging_format(TABLE_LIST *tables)
continue;
}
}
+ if (table == lex->first_not_own_table())
+ found_first_not_own_table= true;
replicated_tables_count++;
+ if (table->prelocking_placeholder != TABLE_LIST::FK)
+ {
+ if (table->lock_type <= TL_READ_NO_INSERT)
+ has_read_tables= true;
+ else if (table->table->found_next_number_field &&
+ (table->lock_type >= TL_WRITE_ALLOW_WRITE))
+ {
+ has_auto_increment_write_tables= true;
+ has_auto_increment_write_tables_not_first= found_first_not_own_table;
+ if (table->table->s->next_number_keypart != 0)
+ has_write_table_auto_increment_not_first_in_pk= true;
+ }
+ }
+
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
{
+ bool trans;
if (prev_write_table && prev_write_table->file->ht !=
table->table->file->ht)
multi_write_engine= TRUE;
+ if (table->table->s->non_determinstic_insert)
+ has_write_tables_with_unsafe_statements= true;
- my_bool trans= table->table->file->has_transactions();
+ trans= table->table->file->has_transactions();
if (table->table->s->tmp_table)
lex->set_stmt_accessed_table(trans ? LEX::STMT_WRITES_TEMP_TRANS_TABLE :
@@ -5919,6 +5860,34 @@ int THD::decide_logging_format(TABLE_LIST *tables)
prev_access_table= table->table;
}
+ if (wsrep_binlog_format() != BINLOG_FORMAT_ROW)
+ {
+ /*
+ DML statements that modify a table with an auto_increment
+ column based on rows selected from a table are unsafe as the
+ order in which the rows are fetched fron the select tables
+ cannot be determined and may differ on master and slave.
+ */
+ if (has_auto_increment_write_tables && has_read_tables)
+ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_WRITE_AUTOINC_SELECT);
+
+ if (has_write_table_auto_increment_not_first_in_pk)
+ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_NOT_FIRST);
+
+ if (has_write_tables_with_unsafe_statements)
+ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
+
+ /*
+ A query that modifies autoinc column in sub-statement can make the
+ master and slave inconsistent.
+ We can solve these problems in mixed mode by switching to binlogging
+ if at least one updated table is used by sub-statement
+ */
+ if (lex->requires_prelocking() &&
+ has_auto_increment_write_tables_not_first)
+ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_COLUMNS);
+ }
+
DBUG_PRINT("info", ("flags_write_all_set: 0x%llx", flags_write_all_set));
DBUG_PRINT("info", ("flags_write_some_set: 0x%llx", flags_write_some_set));
DBUG_PRINT("info", ("flags_access_some_set: 0x%llx", flags_access_some_set));
@@ -5972,7 +5941,7 @@ int THD::decide_logging_format(TABLE_LIST *tables)
*/
my_error((error= ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE), MYF(0));
}
- else if (wsrep_binlog_format() == BINLOG_FORMAT_ROW &&
+ else if ((wsrep_binlog_format() == BINLOG_FORMAT_ROW || is_bulk_op()) &&
sqlcom_can_generate_row_events(this))
{
/*
@@ -6045,7 +6014,8 @@ int THD::decide_logging_format(TABLE_LIST *tables)
else
{
if (lex->is_stmt_unsafe() || lex->is_stmt_row_injection()
- || (flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0)
+ || (flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0 ||
+ is_bulk_op())
{
/* log in row format! */
set_current_stmt_binlog_format_row_if_mixed();
@@ -6374,7 +6344,8 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
Pack records into format for transfer. We are allocating more
memory than needed, but that doesn't matter.
*/
- Row_data_memory memory(table, max_row_length(table, record));
+ Row_data_memory memory(table, max_row_length(table, table->rpl_write_set,
+ record));
if (!memory.has_memory())
return HA_ERR_OUT_OF_MEM;
@@ -6386,7 +6357,14 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
if (variables.option_bits & OPTION_GTID_BEGIN)
is_trans= 1;
- Rows_log_event* const ev=
+ Rows_log_event* ev;
+ if (binlog_should_compress(len))
+ ev =
+ binlog_prepare_pending_rows_event(table, variables.server_id,
+ len, is_trans,
+ static_cast<Write_rows_compressed_log_event*>(0));
+ else
+ ev =
binlog_prepare_pending_rows_event(table, variables.server_id,
len, is_trans,
static_cast<Write_rows_log_event*>(0));
@@ -6420,8 +6398,10 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
*/
binlog_prepare_row_images(table);
- size_t const before_maxlen = max_row_length(table, before_record);
- size_t const after_maxlen = max_row_length(table, after_record);
+ size_t const before_maxlen= max_row_length(table, table->read_set,
+ before_record);
+ size_t const after_maxlen= max_row_length(table, table->rpl_write_set,
+ after_record);
Row_data_memory row_data(table, before_maxlen, after_maxlen);
if (!row_data.has_memory())
@@ -6450,8 +6430,15 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
DBUG_DUMP("after_row", after_row, after_size);
#endif
- Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, variables.server_id,
+ Rows_log_event* ev;
+ if(binlog_should_compress(before_size + after_size))
+ ev =
+ binlog_prepare_pending_rows_event(table, variables.server_id,
+ before_size + after_size, is_trans,
+ static_cast<Update_rows_compressed_log_event*>(0));
+ else
+ ev =
+ binlog_prepare_pending_rows_event(table, variables.server_id,
before_size + after_size, is_trans,
static_cast<Update_rows_log_event*>(0));
@@ -6493,7 +6480,8 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
Pack records into format for transfer. We are allocating more
memory than needed, but that doesn't matter.
*/
- Row_data_memory memory(table, max_row_length(table, record));
+ Row_data_memory memory(table, max_row_length(table, table->read_set,
+ record));
if (unlikely(!memory.has_memory()))
return HA_ERR_OUT_OF_MEM;
@@ -6506,8 +6494,15 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
if (variables.option_bits & OPTION_GTID_BEGIN)
is_trans= 1;
- Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, variables.server_id,
+ Rows_log_event* ev;
+ if(binlog_should_compress(len))
+ ev =
+ binlog_prepare_pending_rows_event(table, variables.server_id,
+ len, is_trans,
+ static_cast<Delete_rows_compressed_log_event*>(0));
+ else
+ ev =
+ binlog_prepare_pending_rows_event(table, variables.server_id,
len, is_trans,
static_cast<Delete_rows_log_event*>(0));
@@ -6525,15 +6520,17 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
}
+/**
+ Remove from read_set spurious columns. The write_set has been
+ handled before in table->mark_columns_needed_for_update.
+*/
+
void THD::binlog_prepare_row_images(TABLE *table)
{
DBUG_ENTER("THD::binlog_prepare_row_images");
- /**
- Remove from read_set spurious columns. The write_set has been
- handled before in table->mark_columns_needed_for_update.
- */
- DBUG_PRINT_BITSET("debug", "table->read_set (before preparing): %s", table->read_set);
+ DBUG_PRINT_BITSET("debug", "table->read_set (before preparing): %s",
+ table->read_set);
THD *thd= table->in_use;
/**
@@ -6551,21 +6548,19 @@ void THD::binlog_prepare_row_images(TABLE *table)
*/
DBUG_ASSERT(table->read_set != &table->tmp_set);
- bitmap_clear_all(&table->tmp_set);
-
- switch(thd->variables.binlog_row_image)
+ switch (thd->variables.binlog_row_image)
{
case BINLOG_ROW_IMAGE_MINIMAL:
/* MINIMAL: Mark only PK */
- table->mark_columns_used_by_index_no_reset(table->s->primary_key,
- &table->tmp_set);
+ table->mark_columns_used_by_index(table->s->primary_key,
+ &table->tmp_set);
break;
case BINLOG_ROW_IMAGE_NOBLOB:
/**
NOBLOB: Remove unnecessary BLOB fields from read_set
(the ones that are not part of PK).
*/
- bitmap_union(&table->tmp_set, table->read_set);
+ bitmap_copy(&table->tmp_set, table->read_set);
for (Field **ptr=table->field ; *ptr ; ptr++)
{
Field *field= (*ptr);
@@ -6583,7 +6578,8 @@ void THD::binlog_prepare_row_images(TABLE *table)
table->write_set);
}
- DBUG_PRINT_BITSET("debug", "table->read_set (after preparing): %s", table->read_set);
+ DBUG_PRINT_BITSET("debug", "table->read_set (after preparing): %s",
+ table->read_set);
DBUG_VOID_RETURN;
}
@@ -6972,15 +6968,27 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
flush the pending rows event if necessary.
*/
{
- Query_log_event qinfo(this, query_arg, query_len, is_trans, direct,
- suppress_use, errcode);
+ int error = 0;
+
/*
Binlog table maps will be irrelevant after a Query_log_event
(they are just removed on the slave side) so after the query
log event is written to the binary log, we pretend that no
table maps were written.
- */
- int error= mysql_bin_log.write(&qinfo);
+ */
+ if(binlog_should_compress(query_len))
+ {
+ Query_compressed_log_event qinfo(this, query_arg, query_len, is_trans, direct,
+ suppress_use, errcode);
+ error= mysql_bin_log.write(&qinfo);
+ }
+ else
+ {
+ Query_log_event qinfo(this, query_arg, query_len, is_trans, direct,
+ suppress_use, errcode);
+ error= mysql_bin_log.write(&qinfo);
+ }
+
binlog_table_maps= 0;
DBUG_RETURN(error);
}
@@ -7010,33 +7018,22 @@ THD::signal_wakeup_ready()
mysql_cond_signal(&COND_wakeup_ready);
}
-
-void THD::rgi_lock_temporary_tables()
+void THD::set_last_commit_gtid(rpl_gtid &gtid)
{
- mysql_mutex_lock(&rgi_slave->rli->data_lock);
- temporary_tables= rgi_slave->rli->save_temporary_tables;
-}
-
-void THD::rgi_unlock_temporary_tables(bool clear)
-{
- rgi_slave->rli->save_temporary_tables= temporary_tables;
- mysql_mutex_unlock(&rgi_slave->rli->data_lock);
- if (clear)
+#ifndef EMBEDDED_LIBRARY
+ bool changed_gtid= (m_last_commit_gtid.seq_no != gtid.seq_no);
+#endif
+ m_last_commit_gtid= gtid;
+#ifndef EMBEDDED_LIBRARY
+ if (changed_gtid &&
+ session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->is_enabled())
{
- /*
- Temporary tables are shared with other by sql execution threads.
- As a safety messure, clear the pointer to the common area.
- */
- temporary_tables= 0;
- }
-}
-
-bool THD::rgi_have_temporary_tables()
-{
- return rgi_slave->rli->save_temporary_tables != 0;
+ session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->
+ mark_as_changed(this, (LEX_CSTRING*)Sys_last_gtid_ptr);
+ }
+#endif
}
-
void
wait_for_commit::reinit()
{
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2517f5cc06f..38e55f9c4a9 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2016, MariaDB
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
/* Classes in mysql */
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "dur_prop.h"
#include <waiting_threads.h>
#include "sql_const.h"
#include <mysql/plugin_audit.h>
@@ -36,6 +37,7 @@
#include "violite.h" /* vio_is_connected */
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA, THR_LOCK_INFO */
#include "thr_timer.h"
+#include "thr_malloc.h"
#include "sql_digest_stream.h" // sql_digest_state
@@ -44,6 +46,7 @@
#include <mysql/psi/mysql_idle.h>
#include <mysql/psi/mysql_table.h>
#include <mysql_com_server.h>
+#include "session_tracker.h"
extern "C"
void set_thd_stage_info(void *thd,
@@ -150,7 +153,7 @@ extern MYSQL_PLUGIN_IMPORT const char **errmesg;
extern bool volatile shutdown_in_progress;
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd);
-extern "C" char **thd_query(MYSQL_THD thd);
+extern "C" size_t thd_query_safe(MYSQL_THD thd, char *buf, size_t buflen);
/**
@class CSET_STRING
@@ -176,11 +179,10 @@ public:
}
inline char *str() const { return string.str; }
- inline uint32 length() const { return string.length; }
+ inline size_t length() const { return string.length; }
CHARSET_INFO *charset() const { return cs; }
friend LEX_STRING * thd_query_string (MYSQL_THD thd);
- friend char **thd_query(MYSQL_THD thd);
};
@@ -225,6 +227,7 @@ typedef struct st_copy_info {
List<Item> *update_values;
/* for VIEW ... WITH CHECK OPTION */
TABLE_LIST *view;
+ TABLE_LIST *table_list; /* Normal table */
} COPY_INFO;
@@ -255,7 +258,7 @@ public:
class Alter_drop :public Sql_alloc {
public:
- enum drop_type {KEY, COLUMN, FOREIGN_KEY };
+ enum drop_type {KEY, COLUMN, FOREIGN_KEY, CHECK_CONSTRAINT };
const char *name;
enum drop_type type;
bool drop_if_exists;
@@ -270,15 +273,21 @@ public:
*/
Alter_drop *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Alter_drop(*this); }
+ const char *type_name()
+ {
+ return type == COLUMN ? "COLUMN" :
+ type == CHECK_CONSTRAINT ? "CONSTRAINT" :
+ type == KEY ? "INDEX" : "FOREIGN KEY";
+ }
};
class Alter_column :public Sql_alloc {
public:
const char *name;
- Item *def;
- Alter_column(const char *par_name,Item *literal)
- :name(par_name), def(literal) {}
+ Virtual_column_info *default_value;
+ Alter_column(const char *par_name, Virtual_column_info *expr)
+ :name(par_name), default_value(expr) {}
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
@@ -468,16 +477,27 @@ enum killed_state
KILL_TIMEOUT= 8,
KILL_TIMEOUT_HARD= 9,
/*
+ When binlog reading thread connects to the server it kills
+ all the binlog threads with the same ID.
+ */
+ KILL_SLAVE_SAME_ID= 10,
+ /*
All of the following killed states will kill the connection
KILL_CONNECTION must be the first of these and it must start with
an even number (becasue of HARD bit)!
*/
- KILL_CONNECTION= 10,
- KILL_CONNECTION_HARD= 11,
- KILL_SYSTEM_THREAD= 12,
- KILL_SYSTEM_THREAD_HARD= 13,
- KILL_SERVER= 14,
- KILL_SERVER_HARD= 15
+ KILL_CONNECTION= 12,
+ KILL_CONNECTION_HARD= 13,
+ KILL_SYSTEM_THREAD= 14,
+ KILL_SYSTEM_THREAD_HARD= 15,
+ KILL_SERVER= 16,
+ KILL_SERVER_HARD= 17,
+ /*
+ Used in threadpool to signal wait timeout.
+ */
+ KILL_WAIT_TIMEOUT= 18,
+ KILL_WAIT_TIMEOUT_HARD= 19
+
};
#define killed_mask_hard(killed) ((killed_state) ((killed) & ~KILL_HARD_BIT))
@@ -500,8 +520,6 @@ class Time_zone;
#define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC)
-typedef ulonglong sql_mode_t;
-
typedef struct system_variables
{
/*
@@ -522,7 +540,8 @@ typedef struct system_variables
uint dynamic_variables_size; /* how many bytes are in use */
ulonglong max_heap_table_size;
- ulonglong tmp_table_size;
+ ulonglong tmp_memory_table_size;
+ ulonglong tmp_disk_table_size;
ulonglong long_query_time;
ulonglong max_statement_time;
ulonglong optimizer_switch;
@@ -560,11 +579,13 @@ typedef struct system_variables
*/
ulong saved_auto_increment_increment, saved_auto_increment_offset;
#endif /* WITH_WSREP */
+ uint eq_range_index_dive_limit;
ulong lock_wait_timeout;
ulong join_cache_level;
ulong max_allowed_packet;
ulong max_error_count;
ulong max_length_for_sort_data;
+ ulong max_recursive_iterations;
ulong max_sort_length;
ulong max_tmp_tables;
ulong max_insert_delayed_threads;
@@ -635,6 +656,7 @@ typedef struct system_variables
my_bool old_alter_table;
my_bool old_passwords;
my_bool big_tables;
+ my_bool only_standard_compliant_cte;
my_bool query_cache_strip_comments;
my_bool sql_log_slow;
my_bool sql_log_bin;
@@ -665,7 +687,7 @@ typedef struct system_variables
/* Error messages */
MY_LOCALE *lc_messages;
- const char **errmsgs; /* lc_messages->errmsg->errmsgs */
+ const char ***errmsgs; /* lc_messages->errmsg->errmsgs */
/* Locale Support */
MY_LOCALE *lc_time_names;
@@ -688,6 +710,12 @@ typedef struct system_variables
my_bool pseudo_slave_mode;
+ char *session_track_system_variables;
+ ulong session_track_transaction_info;
+ my_bool session_track_schema;
+ my_bool session_track_state_change;
+
+ ulong threadpool_priority;
} SV;
/**
@@ -702,6 +730,7 @@ typedef struct system_status_var
ulong com_create_tmp_table;
ulong com_drop_tmp_table;
ulong com_other;
+ ulong com_multi;
ulong com_stmt_prepare;
ulong com_stmt_reprepare;
@@ -778,6 +807,7 @@ typedef struct system_status_var
ulong feature_timezone; /* +1 when XPATH is used */
ulong feature_trigger; /* +1 opening a table with triggers */
ulong feature_xml; /* +1 when XPATH is used */
+ ulong feature_window_functions; /* +1 when window functions are used */
/* From MASTER_GTID_WAIT usage */
ulonglong master_gtid_wait_timeouts; /* Number of timeouts */
@@ -826,8 +856,7 @@ typedef struct system_status_var
Global status variables
*/
-extern ulong feature_files_opened_with_delayed_keys;
-
+extern ulong feature_files_opened_with_delayed_keys, feature_check_constraint;
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
@@ -1060,7 +1089,10 @@ public:
inline char *query() const { return query_string.str(); }
- inline uint32 query_length() const { return query_string.length(); }
+ inline uint32 query_length() const
+ {
+ return static_cast<uint32>(query_string.length());
+ }
CHARSET_INFO *query_charset() const { return query_string.charset(); }
void set_query_inner(const CSET_STRING &string_arg)
{
@@ -1233,7 +1265,8 @@ public:
priv_user - The user privilege we are using. May be "" for anonymous user.
ip - client IP
*/
- char *host, *user, *ip;
+ const char *host;
+ char *user, *ip;
char priv_user[USERNAME_LENGTH];
char proxy_user[USERNAME_LENGTH + MAX_HOSTNAME + 5];
/* The host privilege we are using */
@@ -1280,7 +1313,40 @@ public:
*/
struct Item_change_record;
-typedef I_List<Item_change_record> Item_change_list;
+class Item_change_list
+{
+ I_List<Item_change_record> change_list;
+public:
+ void nocheck_register_item_tree_change(Item **place, Item *old_value,
+ MEM_ROOT *runtime_memroot);
+ void check_and_register_item_tree_change(Item **place, Item **new_value,
+ MEM_ROOT *runtime_memroot);
+ void rollback_item_tree_changes();
+ void move_elements_to(Item_change_list *to)
+ {
+ change_list.move_elements_to(&to->change_list);
+ }
+ bool is_empty() { return change_list.is_empty(); }
+};
+
+
+class Item_change_list_savepoint: public Item_change_list
+{
+public:
+ Item_change_list_savepoint(Item_change_list *list)
+ {
+ list->move_elements_to(this);
+ }
+ void rollback(Item_change_list *list)
+ {
+ list->rollback_item_tree_changes();
+ move_elements_to(list);
+ }
+ ~Item_change_list_savepoint()
+ {
+ DBUG_ASSERT(is_empty());
+ }
+};
/**
@@ -1297,6 +1363,61 @@ enum enum_locked_tables_mode
LTM_always_last
};
+/**
+ The following structure is an extension to TABLE_SHARE and is
+ exclusively for temporary tables.
+
+ @note:
+ Although, TDC_element has data members (like next, prev &
+ all_tables) to store the list of TABLE_SHARE & TABLE objects
+ related to a particular TABLE_SHARE, they cannot be moved to
+ TABLE_SHARE in order to be reused for temporary tables. This
+ is because, as concurrent threads iterating through hash of
+ TDC_element's may need access to all_tables, but if all_tables
+ is made part of TABLE_SHARE, then TDC_element->share->all_tables
+ is not always guaranteed to be valid, as TDC_element can live
+ longer than TABLE_SHARE.
+*/
+struct TMP_TABLE_SHARE : public TABLE_SHARE
+{
+private:
+ /*
+ Link to all temporary table shares. Declared as private to
+ avoid direct manipulation with those objects. One should
+ use methods of I_P_List template instead.
+ */
+ TMP_TABLE_SHARE *tmp_next;
+ TMP_TABLE_SHARE **tmp_prev;
+
+ friend struct All_tmp_table_shares;
+
+public:
+ /*
+ Doubly-linked (back-linked) lists of used and unused TABLE objects
+ for this share.
+ */
+ All_share_tables_list all_tmp_tables;
+};
+
+/**
+ Helper class which specifies which members of TMP_TABLE_SHARE are
+ used for participation in the list of temporary tables.
+*/
+
+struct All_tmp_table_shares
+{
+ static inline TMP_TABLE_SHARE **next_ptr(TMP_TABLE_SHARE *l)
+ {
+ return &l->tmp_next;
+ }
+ static inline TMP_TABLE_SHARE ***prev_ptr(TMP_TABLE_SHARE *l)
+ {
+ return &l->tmp_prev;
+ }
+};
+
+/* Also used in rpl_rli.h. */
+typedef I_P_List <TMP_TABLE_SHARE, All_tmp_table_shares> All_tmp_tables_list;
/**
Class that holds information about tables which were opened and locked
@@ -1326,15 +1447,25 @@ public:
base tables that were opened with @see open_tables().
*/
TABLE *open_tables;
+
/**
- List of temporary tables used by this thread. Contains user-level
- temporary tables, created with CREATE TEMPORARY TABLE, and
- internal temporary tables, created, e.g., to resolve a SELECT,
+ A list of temporary tables used by this thread. This includes
+ user-level temporary tables, created with CREATE TEMPORARY TABLE,
+ and internal temporary tables, created, e.g., to resolve a SELECT,
or for an intermediate table used in ALTER.
- XXX Why are internal temporary tables added to this list?
*/
- TABLE *temporary_tables;
+ All_tmp_tables_list *temporary_tables;
+
+ /*
+ Derived tables.
+ */
TABLE *derived_tables;
+
+ /*
+ Temporary tables created for recursive table references.
+ */
+ TABLE *rec_tables;
+
/*
During a MySQL session, one can lock tables in two modes: automatic
or manual. In automatic mode all necessary tables are locked just before
@@ -1412,8 +1543,12 @@ public:
void reset_open_tables_state(THD *thd)
{
- open_tables= temporary_tables= derived_tables= 0;
- extra_lock= lock= 0;
+ open_tables= 0;
+ temporary_tables= 0;
+ derived_tables= 0;
+ rec_tables= 0;
+ extra_lock= 0;
+ lock= 0;
locked_tables_mode= LTM_NONE;
state_flags= 0U;
m_reprepare_observer= NULL;
@@ -1461,9 +1596,9 @@ public:
Discrete_intervals_list auto_inc_intervals_forced;
ulonglong limit_found_rows;
ha_rows cuted_fields, sent_row_count, examined_row_count;
- ulong client_capabilities;
+ ulonglong client_capabilities;
ulong query_plan_flags;
- uint in_sub_stmt;
+ uint in_sub_stmt; /* 0, SUB_STMT_TRIGGER or SUB_STMT_FUNCTION */
bool enable_slow_log;
bool last_insert_id_used;
SAVEPOINT *savepoints;
@@ -1481,8 +1616,8 @@ enum enum_thread_type
SYSTEM_THREAD_EVENT_SCHEDULER= 8,
SYSTEM_THREAD_EVENT_WORKER= 16,
SYSTEM_THREAD_BINLOG_BACKGROUND= 32,
- SYSTEM_THREAD_SLAVE_INIT= 64,
- SYSTEM_THREAD_SLAVE_BACKGROUND= 128
+ SYSTEM_THREAD_SLAVE_BACKGROUND= 64,
+ SYSTEM_THREAD_GENERIC= 128
};
inline char const *
@@ -1497,7 +1632,7 @@ show_system_thread(enum_thread_type thread)
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_SQL);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_SCHEDULER);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_WORKER);
- RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_INIT);
+ RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_BACKGROUND);
default:
sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread);
return buf;
@@ -1510,6 +1645,7 @@ show_system_thread(enum_thread_type thread)
Internal error handlers are exception handlers used by the server
implementation.
*/
+
class Internal_error_handler
{
protected:
@@ -1547,7 +1683,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl) = 0;
@@ -1568,7 +1704,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
@@ -1590,11 +1726,11 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
- if (level == Sql_condition::WARN_LEVEL_ERROR)
+ if (*level == Sql_condition::WARN_LEVEL_ERROR)
errors++;
return false;
}
@@ -1618,7 +1754,7 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
@@ -1627,6 +1763,30 @@ private:
/**
+ Internal error handler to process an error from MDL_context::upgrade_lock()
+ and mysql_lock_tables(). Used by implementations of HANDLER READ and
+ LOCK TABLES LOCAL.
+*/
+
+class MDL_deadlock_and_lock_abort_error_handler: public Internal_error_handler
+{
+public:
+ virtual
+ bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char *sqlstate,
+ Sql_condition::enum_warning_level *level,
+ const char* msg,
+ Sql_condition **cond_hdl);
+
+ bool need_reopen() const { return m_need_reopen; };
+ void init() { m_need_reopen= FALSE; };
+private:
+ bool m_need_reopen;
+};
+
+
+/**
Tables that were locked with LOCK TABLES statement.
Encapsulates a list of TABLE_LIST instances for tables
@@ -1947,6 +2107,14 @@ void dbug_serve_apcs(THD *thd, int n_calls);
*/
class THD :public Statement,
+ /*
+ This is to track items changed during execution of a prepared
+ statement/stored procedure. It's created by
+ nocheck_register_item_tree_change() in memory root of THD,
+ and freed in rollback_item_tree_changes().
+ For conventional execution it's always empty.
+ */
+ public Item_change_list,
public MDL_context_owner,
public Open_tables_state
{
@@ -1963,6 +2131,19 @@ private:
inline bool is_conventional() const
{ DBUG_ASSERT(0); return Statement::is_conventional(); }
+ void dec_thread_count(void)
+ {
+ DBUG_ASSERT(thread_count > 0);
+ thread_safe_decrement32(&thread_count);
+ signal_thd_deleted();
+ }
+
+
+ void inc_thread_count(void)
+ {
+ thread_safe_increment32(&thread_count);
+ }
+
public:
MDL_context mdl_context;
@@ -2038,6 +2219,13 @@ public:
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
+
+ /* Last created prepared statement */
+ Statement *last_stmt;
+ inline void set_last_stmt(Statement *stmt)
+ { last_stmt= (is_error() ? NULL : stmt); }
+ inline void clear_last_stmt() { last_stmt= NULL; }
+
/*
A pointer to the stack frame of handle_one_connection(),
which is called first in the thread for handling a client
@@ -2120,7 +2308,7 @@ public:
/* Needed by MariaDB semi sync replication */
Trans_binlog_info *semisync_info;
- ulong client_capabilities; /* What the client supports */
+ ulonglong client_capabilities; /* What the client supports */
ulong max_client_packet_length;
HASH handler_tables_hash;
@@ -2134,6 +2322,9 @@ public:
uint dbug_sentry; // watch out for memory corruption
#endif
struct st_my_thread_var *mysys_var;
+
+ /* Original charset number from the first client packet, or COM_CHANGE_USER*/
+ CHARSET_INFO *org_charset;
private:
/*
Type of current query: COM_STMT_PREPARE, COM_QUERY, etc. Set from
@@ -2162,7 +2353,7 @@ public:
bool report_to_client;
/*
true, if we will send progress report packets to a client
- (client has requested them, see CLIENT_PROGRESS; report_to_client
+ (client has requested them, see MARIADB_CLIENT_PROGRESS; report_to_client
is true; not in sub-statement)
*/
bool report;
@@ -2400,14 +2591,6 @@ public:
#ifdef SIGNAL_WITH_VIO_CLOSE
Vio* active_vio;
#endif
- /*
- This is to track items changed during execution of a prepared
- statement/stored procedure. It's created by
- nocheck_register_item_tree_change() in memory root of THD, and freed in
- rollback_item_tree_changes(). For conventional execution it's always
- empty.
- */
- Item_change_list change_list;
/*
A permanent memory area of the statement. For conventional
@@ -2423,6 +2606,8 @@ public:
*/
Query_arena *stmt_arena;
+ void *bulk_param;
+
/*
map for tables that will be updated for a multi-table update query
statement, for other query statements, this will be zero.
@@ -2719,7 +2904,7 @@ public:
ulong query_plan_flags;
ulong query_plan_fsort_passes;
pthread_t real_id; /* For debugging */
- my_thread_id thread_id;
+ my_thread_id thread_id, thread_dbug_id;
uint32 os_thread_id;
uint tmp_table, global_disable_checkpoint;
uint server_status,open_options;
@@ -2840,7 +3025,7 @@ public:
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
- bool bootstrap, cleanup_done;
+ bool bootstrap, cleanup_done, free_connection_done;
/** is set if some thread specific value(s) used in a statement. */
bool thread_specific_used;
@@ -2983,7 +3168,7 @@ public:
/* Debug Sync facility. See debug_sync.cc. */
struct st_debug_sync_control *debug_sync_control;
#endif /* defined(ENABLED_DEBUG_SYNC) */
- THD(bool is_wsrep_applier= false);
+ THD(my_thread_id id, bool is_wsrep_applier= false);
~THD();
@@ -3003,6 +3188,8 @@ public:
void change_user(void);
void cleanup(void);
void cleanup_after_query();
+ void free_connection();
+ void reset_for_reuse();
bool store_globals();
void reset_globals();
#ifdef SIGNAL_WITH_VIO_CLOSE
@@ -3010,7 +3197,6 @@ public:
{
mysql_mutex_lock(&LOCK_thd_data);
active_vio = vio;
- vio_set_thread_id(vio, pthread_self());
mysql_mutex_unlock(&LOCK_thd_data);
}
inline void clear_active_vio()
@@ -3175,6 +3361,12 @@ public:
}
ulonglong current_utime() { return microsecond_interval_timer(); }
+ /* Tell SHOW PROCESSLIST to show time from this point */
+ inline void set_time_for_next_stage()
+ {
+ utime_after_query= current_utime();
+ }
+
/**
Update server status after execution of a top level statement.
Currently only checks if a query was slow, and assigns
@@ -3184,7 +3376,7 @@ public:
*/
void update_server_status()
{
- utime_after_query= current_utime();
+ set_time_for_next_stage();
if (utime_after_query > utime_after_lock + variables.long_query_time)
server_status|= SERVER_QUERY_WAS_SLOW;
}
@@ -3403,6 +3595,12 @@ public:
To raise this flag, use my_error().
*/
inline bool is_error() const { return m_stmt_da->is_error(); }
+ void set_bulk_execution(void *bulk)
+ {
+ bulk_param= bulk;
+ m_stmt_da->set_bulk_execution(MY_TEST(bulk));
+ }
+ bool is_bulk_op() const { return MY_TEST(bulk_param); }
/// Returns Diagnostics-area for the current statement.
Diagnostics_area *get_stmt_da()
@@ -3418,6 +3616,22 @@ public:
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
+ void update_charset(CHARSET_INFO *character_set_client,
+ CHARSET_INFO *collation_connection)
+ {
+ variables.character_set_client= character_set_client;
+ variables.collation_connection= collation_connection;
+ update_charset();
+ }
+ void update_charset(CHARSET_INFO *character_set_client,
+ CHARSET_INFO *collation_connection,
+ CHARSET_INFO *character_set_results)
+ {
+ variables.character_set_client= character_set_client;
+ variables.collation_connection= collation_connection;
+ variables.character_set_results= character_set_results;
+ update_charset();
+ }
inline Query_arena *activate_stmt_arena_if_needed(Query_arena *backup)
{
@@ -3464,11 +3678,6 @@ public:
*/
memcpy((char*) place, new_value, sizeof(*new_value));
}
- void nocheck_register_item_tree_change(Item **place, Item *old_value,
- MEM_ROOT *runtime_memroot);
- void check_and_register_item_tree_change(Item **place, Item **new_value,
- MEM_ROOT *runtime_memroot);
- void rollback_item_tree_changes();
/*
Cleanup statement parse state (parse tree, lex) and execution
@@ -3665,13 +3874,13 @@ public:
*/
DBUG_PRINT("debug",
("temporary_tables: %s, in_sub_stmt: %s, system_thread: %s",
- YESNO(temporary_tables), YESNO(in_sub_stmt),
+ YESNO(has_thd_temporary_tables()), YESNO(in_sub_stmt),
show_system_thread(system_thread)));
if (in_sub_stmt == 0)
{
if (wsrep_binlog_format() == BINLOG_FORMAT_ROW)
set_current_stmt_binlog_format_row();
- else if (temporary_tables == NULL)
+ else if (!has_thd_temporary_tables())
set_current_stmt_binlog_format_stmt();
}
DBUG_VOID_RETURN;
@@ -3722,7 +3931,7 @@ public:
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
if (result)
- PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
+ PSI_THREAD_CALL(set_thread_db)(new_db, (int) new_db_len);
#endif
return result;
}
@@ -3747,7 +3956,7 @@ public:
db_length= new_db_len;
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
+ PSI_THREAD_CALL(set_thread_db)(new_db, (int) new_db_len);
#endif
}
}
@@ -3760,11 +3969,28 @@ public:
{
if (db == NULL)
{
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- return TRUE;
+ /*
+ No default database is set. In this case if it's guaranteed that
+ no CTE can be used in the statement then we can throw an error right
+ now at the parser stage. Otherwise the decision about throwing such
+ a message must be postponed until a post-parser stage when we are able
+ to resolve all CTE names as we don't need this message to be thrown
+ for any CTE references.
+ */
+ if (!lex->with_clauses_list)
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ return TRUE;
+ }
+ /* This will allow to throw an error later for non-CTE references */
+ *p_db= NULL;
+ *p_db_length= 0;
+ }
+ else
+ {
+ *p_db= strmake(db, db_length);
+ *p_db_length= db_length;
}
- *p_db= strmake(db, db_length);
- *p_db_length= db_length;
return FALSE;
}
thd_scheduler event_scheduler;
@@ -3791,7 +4017,7 @@ private:
*/
bool handle_condition(uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl);
@@ -4051,11 +4277,10 @@ private:
LEX_STRING invoker_user;
LEX_STRING invoker_host;
- /* Protect against add/delete of temporary tables in parallel replication */
- void rgi_lock_temporary_tables();
- void rgi_unlock_temporary_tables(bool clear);
- bool rgi_have_temporary_tables();
public:
+#ifndef EMBEDDED_LIBRARY
+ Session_tracker session_tracker;
+#endif //EMBEDDED_LIBRARY
/*
Flag, mutex and condition for a thread to wait for a signal from another
thread.
@@ -4070,28 +4295,97 @@ public:
The GTID assigned to the last commit. If no GTID was assigned to any commit
so far, this is indicated by last_commit_gtid.seq_no == 0.
*/
- rpl_gtid last_commit_gtid;
+private:
+ rpl_gtid m_last_commit_gtid;
+
+public:
+ rpl_gtid get_last_commit_gtid() { return m_last_commit_gtid; }
+ void set_last_commit_gtid(rpl_gtid &gtid);
- inline void lock_temporary_tables()
- {
- if (rgi_slave)
- rgi_lock_temporary_tables();
- }
- inline void unlock_temporary_tables(bool clear)
- {
- if (rgi_slave)
- rgi_unlock_temporary_tables(clear);
- }
- inline bool have_temporary_tables()
- {
- return (temporary_tables ||
- (rgi_slave && unlikely(rgi_have_temporary_tables())));
- }
LF_PINS *tdc_hash_pins;
LF_PINS *xid_hash_pins;
bool fix_xid_hash_pins();
+/* Members related to temporary tables. */
+public:
+ /* Opened table states. */
+ enum Temporary_table_state {
+ TMP_TABLE_IN_USE,
+ TMP_TABLE_NOT_IN_USE,
+ TMP_TABLE_ANY
+ };
+ bool has_thd_temporary_tables();
+
+ TABLE *create_and_open_tmp_table(handlerton *hton,
+ LEX_CUSTRING *frm,
+ const char *path,
+ const char *db,
+ const char *table_name,
+ bool open_in_engine);
+
+ TABLE *find_temporary_table(const char *db, const char *table_name,
+ Temporary_table_state state= TMP_TABLE_IN_USE);
+ TABLE *find_temporary_table(const TABLE_LIST *tl,
+ Temporary_table_state state= TMP_TABLE_IN_USE);
+
+ TMP_TABLE_SHARE *find_tmp_table_share_w_base_key(const char *key,
+ uint key_length);
+ TMP_TABLE_SHARE *find_tmp_table_share(const char *db,
+ const char *table_name);
+ TMP_TABLE_SHARE *find_tmp_table_share(const TABLE_LIST *tl);
+ TMP_TABLE_SHARE *find_tmp_table_share(const char *key, uint key_length);
+
+ bool open_temporary_table(TABLE_LIST *tl);
+ bool open_temporary_tables(TABLE_LIST *tl);
+
+ bool close_temporary_tables();
+ bool rename_temporary_table(TABLE *table, const char *db,
+ const char *table_name);
+ bool drop_temporary_table(TABLE *table, bool *is_trans, bool delete_table);
+ bool rm_temporary_table(handlerton *hton, const char *path);
+ void mark_tmp_tables_as_free_for_reuse();
+ void mark_tmp_table_as_free_for_reuse(TABLE *table);
+
+ TMP_TABLE_SHARE* save_tmp_table_share(TABLE *table);
+ void restore_tmp_table_share(TMP_TABLE_SHARE *share);
+
+private:
+ /* Whether a lock has been acquired? */
+ bool m_tmp_tables_locked;
+
+ bool has_temporary_tables();
+ uint create_tmp_table_def_key(char *key, const char *db,
+ const char *table_name);
+ TMP_TABLE_SHARE *create_temporary_table(handlerton *hton, LEX_CUSTRING *frm,
+ const char *path, const char *db,
+ const char *table_name);
+ TABLE *find_temporary_table(const char *key, uint key_length,
+ Temporary_table_state state);
+ TABLE *open_temporary_table(TMP_TABLE_SHARE *share, const char *alias,
+ bool open_in_engine);
+ bool find_and_use_tmp_table(const TABLE_LIST *tl, TABLE **out_table);
+ bool use_temporary_table(TABLE *table, TABLE **out_table);
+ void close_temporary_table(TABLE *table);
+ bool log_events_and_free_tmp_shares();
+ void free_tmp_table_share(TMP_TABLE_SHARE *share, bool delete_table);
+ void free_temporary_table(TABLE *table);
+ bool lock_temporary_tables();
+ void unlock_temporary_tables();
+
+ inline uint tmpkeyval(TMP_TABLE_SHARE *share)
+ {
+ return uint4korr(share->table_cache_key.str +
+ share->table_cache_key.length - 4);
+ }
+
+ inline TMP_TABLE_SHARE *tmp_table_share(TABLE *table)
+ {
+ DBUG_ASSERT(table->s->tmp_table);
+ return static_cast<TMP_TABLE_SHARE *>(table->s);
+ }
+
+public:
inline ulong wsrep_binlog_format() const
{
return WSREP_FORMAT(variables.binlog_format);
@@ -4140,6 +4434,14 @@ public:
ulong wsrep_affected_rows;
bool wsrep_replicate_GTID;
bool wsrep_skip_wsrep_GTID;
+ /* This flag is set when innodb do an intermediate commit to
+ processing the LOAD DATA INFILE statement by splitting it into 10K
+ rows chunks. If flag is set, then binlog rotation is not performed
+ while intermediate transaction try to commit, because in this case
+ rotation causes unregistration of innodb handler. Later innodb handler
+ registered again, but replication of last chunk of rows is skipped
+ by the innodb engine: */
+ bool wsrep_split_flag;
#endif /* WITH_WSREP */
/* Handling of timeouts for commands */
@@ -4185,8 +4487,40 @@ public:
(THD_TRANS::DID_WAIT | THD_TRANS::CREATED_TEMP_TABLE |
THD_TRANS::DROPPED_TEMP_TABLE | THD_TRANS::DID_DDL));
}
+ /*
+ Reset current_linfo
+ Setting current_linfo to 0 needs to be done with LOCK_thread_count to
+ ensure that adjust_linfo_offsets doesn't use a structure that may
+ be deleted.
+ */
+ inline void reset_current_linfo()
+ {
+ mysql_mutex_lock(&LOCK_thread_count);
+ current_linfo= 0;
+ mysql_mutex_unlock(&LOCK_thread_count);
+ }
};
+inline void add_to_active_threads(THD *thd)
+{
+ mysql_mutex_lock(&LOCK_thread_count);
+ threads.append(thd);
+ mysql_mutex_unlock(&LOCK_thread_count);
+}
+
+/*
+ This should be called when you want to delete a thd that was not
+ running any queries.
+ This function will assert that the THD is linked.
+*/
+
+inline void unlink_not_visible_thd(THD *thd)
+{
+ thd->assert_linked();
+ mysql_mutex_lock(&LOCK_thread_count);
+ thd->unlink();
+ mysql_mutex_unlock(&LOCK_thread_count);
+}
/** A short cut for thd->get_stmt_da()->set_ok_status(). */
@@ -4206,6 +4540,8 @@ my_eof(THD *thd)
{
thd->set_row_count_func(-1);
thd->get_stmt_da()->set_eof_status(thd);
+
+ TRANSACT_TRACKER(add_trx_state(thd, TX_RESULT_SET));
}
#define tmp_disable_binlog(A) \
@@ -4285,12 +4621,13 @@ protected:
/*
All descendant classes have their send_data() skip the first
unit->offset_limit_cnt rows sent. Select_materialize
- also uses unit->get_unit_column_types().
+ also uses unit->get_column_types().
*/
SELECT_LEX_UNIT *unit;
/* Something used only by the parser: */
public:
select_result(THD *thd_arg): select_result_sink(thd_arg) {}
+ void set_unit(SELECT_LEX_UNIT *unit_arg) { unit= unit_arg; }
virtual ~select_result() {};
/**
Change wrapped select_result.
@@ -4347,6 +4684,9 @@ public:
#endif
virtual void update_used_tables() {}
+ /* this method is called just before the first row of the table can be read */
+ virtual void prepare_to_read_rows() {}
+
void reset_offset_limit()
{
unit->offset_limit_cnt= 0;
@@ -4578,6 +4918,7 @@ class select_create: public select_insert {
/* m_lock or thd->extra_lock */
MYSQL_LOCK **m_plock;
bool exit_done;
+ TMP_TABLE_SHARE *saved_tmp_table_share;
public:
select_create(THD *thd_arg, TABLE_LIST *table_arg,
@@ -4585,12 +4926,14 @@ public:
Alter_info *alter_info_arg,
List<Item> &select_fields,enum_duplicates duplic, bool ignore,
TABLE_LIST *select_tables_arg):
- select_insert(thd_arg, NULL, NULL, &select_fields, 0, 0, duplic, ignore),
+ select_insert(thd_arg, table_arg, NULL, &select_fields, 0, 0, duplic,
+ ignore),
create_table(table_arg),
create_info(create_info_par),
select_tables(select_tables_arg),
alter_info(alter_info_arg),
- m_plock(NULL), exit_done(0)
+ m_plock(NULL), exit_done(0),
+ saved_tmp_table_share(0)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
@@ -4636,16 +4979,9 @@ inline uint tmp_table_max_key_parts() { return MI_MAX_KEY_SEG; }
class TMP_TABLE_PARAM :public Sql_alloc
{
-private:
- /* Prevent use of these (not safe because of lists and copy_field) */
- TMP_TABLE_PARAM(const TMP_TABLE_PARAM &);
- void operator=(TMP_TABLE_PARAM &);
-
public:
List<Item> copy_funcs;
- List<Item> save_copy_funcs;
Copy_field *copy_field, *copy_field_end;
- Copy_field *save_copy_field, *save_copy_field_end;
uchar *group_buff;
Item **items_to_copy; /* Fields in tmp table */
TMP_ENGINE_COLUMNDEF *recinfo, *start_recinfo;
@@ -4680,7 +5016,13 @@ public:
uint hidden_field_count;
uint group_parts,group_length,group_null_parts;
uint quick_group;
- bool using_indirect_summary_function;
+ /**
+ Enabled when we have atleast one outer_sum_func. Needed when used
+ along with distinct.
+
+ @see create_tmp_table
+ */
+ bool using_outer_summary_function;
CHARSET_INFO *table_charset;
bool schema_table;
/* TRUE if the temp table is created for subquery materialization. */
@@ -4710,9 +5052,10 @@ public:
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
group_length(0), group_null_parts(0),
- schema_table(0), materialized_subquery(0), force_not_null_cols(0),
- precomputed_group_by(0),
- force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0)
+ using_outer_summary_function(0),
+ schema_table(0), materialized_subquery(0), force_not_null_cols(0),
+ precomputed_group_by(0),
+ force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0)
{}
~TMP_TABLE_PARAM()
{
@@ -4724,17 +5067,13 @@ public:
if (copy_field) /* Fix for Intel compiler */
{
delete [] copy_field;
- save_copy_field= copy_field= NULL;
- save_copy_field_end= copy_field_end= NULL;
+ copy_field= NULL;
+ copy_field_end= NULL;
}
}
- void free_copy_field_data()
- {
- for (Copy_field *ptr= copy_field ; ptr != copy_field_end ; ptr++)
- ptr->tmp.free();
- }
};
+
class select_union :public select_result_interceptor
{
public:
@@ -4772,6 +5111,36 @@ public:
};
+class select_union_recursive :public select_union
+{
+ public:
+ /* The temporary table with the new records generated by one iterative step */
+ TABLE *incr_table;
+ /* One of tables from the list rec_tables (determined dynamically) */
+ TABLE *first_rec_table_to_update;
+ /* The temporary tables used for recursive table references */
+ List<TABLE> rec_tables;
+ /*
+ The count of how many times cleanup() was called with cleaned==false
+ for the unit specifying the recursive CTE for which this object was created
+ or for the unit specifying a CTE that mutually recursive with this CTE.
+ */
+ uint cleanup_count;
+
+ select_union_recursive(THD *thd_arg):
+ select_union(thd_arg),
+ incr_table(0), first_rec_table_to_update(0), cleanup_count(0) {};
+
+ int send_data(List<Item> &items);
+ bool create_result_table(THD *thd, List<Item> *column_types,
+ bool is_distinct, ulonglong options,
+ const char *alias,
+ bool bit_fields_as_long,
+ bool create_table,
+ bool keep_row_order= FALSE);
+ void cleanup();
+};
+
/**
UNION result that is passed directly to the receiving select_result
without filling a temporary table.
@@ -5048,16 +5417,19 @@ public:
/* Structs used when sorting */
+struct SORT_FIELD_ATTR
+{
+ uint length; /* Length of sort field */
+ uint suffix_length; /* Length suffix (0-4) */
+};
+
-typedef struct st_sort_field {
+struct SORT_FIELD: public SORT_FIELD_ATTR
+{
Field *field; /* Field to sort */
Item *item; /* Item if not sorting fields */
- uint length; /* Length of sort field */
- uint suffix_length; /* Length suffix (0-4) */
- Item_result result_type; /* Type of item */
bool reverse; /* if descending sort */
- bool need_strxnfrm; /* If we have to use strxnfrm() */
-} SORT_FIELD;
+};
typedef struct st_sort_buffer {
@@ -5135,86 +5507,7 @@ class user_var_entry
user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
bool create_if_not_exists);
-/*
- Unique -- class for unique (removing of duplicates).
- Puts all values to the TREE. If the tree becomes too big,
- it's dumped to the file. User can request sorted values, or
- just iterate through them. In the last case tree merging is performed in
- memory simultaneously with iteration, so it should be ~2-3x faster.
- */
-
-class Unique :public Sql_alloc
-{
- DYNAMIC_ARRAY file_ptrs;
- ulong max_elements;
- ulonglong max_in_memory_size;
- IO_CACHE file;
- TREE tree;
- uchar *record_pointers;
- ulong filtered_out_elems;
- bool flush();
- uint size;
- uint full_size;
- uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
- bool with_counters;
-
- bool merge(TABLE *table, uchar *buff, bool without_last_merge);
-
-public:
- ulong elements;
- Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
- uint size_arg, ulonglong max_in_memory_size_arg,
- uint min_dupl_count_arg= 0);
- ~Unique();
- ulong elements_in_tree() { return tree.elements_in_tree; }
- inline bool unique_add(void *ptr)
- {
- DBUG_ENTER("unique_add");
- DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
- if (!(tree.flag & TREE_ONLY_DUPS) &&
- tree.elements_in_tree >= max_elements && flush())
- DBUG_RETURN(1);
- DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
- }
-
- bool is_in_memory() { return (my_b_tell(&file) == 0); }
- void close_for_expansion() { tree.flag= TREE_ONLY_DUPS; }
-
- bool get(TABLE *table);
-
- /* Cost of searching for an element in the tree */
- inline static double get_search_cost(ulonglong tree_elems, uint compare_factor)
- {
- return log((double) tree_elems) / (compare_factor * M_LN2);
- }
-
- static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
- ulonglong max_in_memory_size, uint compare_factor,
- bool intersect_fl, bool *in_memory);
- inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
- ulonglong max_in_memory_size)
- {
- register ulonglong max_elems_in_tree=
- max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
- return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
- }
-
- void reset();
- bool walk(TABLE *table, tree_walk_action action, void *walk_action_arg);
-
- uint get_size() const { return size; }
- ulonglong get_max_in_memory_size() const { return max_in_memory_size; }
-
- friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
- friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
-
- friend int unique_write_to_file_with_count(uchar* key, element_count count,
- Unique *unique);
- friend int unique_intersect_write_to_ptrs(uchar* key, element_count count,
- Unique *unique);
-};
-
-
+class SORT_INFO;
class multi_delete :public select_result_interceptor
{
TABLE_LIST *delete_tables, *table_being_deleted;
@@ -5241,13 +5534,11 @@ public:
int send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
int do_deletes();
- int do_table_deletes(TABLE *table, bool ignore);
+ int do_table_deletes(TABLE *table, SORT_INFO *sort_info, bool ignore);
bool send_eof();
- inline ha_rows num_deleted()
- {
- return deleted;
- }
+ inline ha_rows num_deleted() const { return deleted; }
virtual void abort_result_set();
+ void prepare_to_read_rows();
};
@@ -5291,16 +5582,11 @@ public:
bool initialize_tables (JOIN *join);
int do_updates();
bool send_eof();
- inline ha_rows num_found()
- {
- return found;
- }
- inline ha_rows num_updated()
- {
- return updated;
- }
+ inline ha_rows num_found() const { return found; }
+ inline ha_rows num_updated() const { return updated; }
virtual void abort_result_set();
void update_used_tables();
+ void prepare_to_read_rows();
};
class my_var : public Sql_alloc {
@@ -5465,6 +5751,15 @@ public:
*/
#define CF_ADMIN_COMMAND (1U << 19)
+/**
+ SP Bulk execution safe
+*/
+#define CF_SP_BULK_SAFE (1U << 20)
+/**
+ SP Bulk execution optimized
+*/
+#define CF_SP_BULK_OPTIMIZED (1U << 21)
+
/* Bits in server_command_flags */
/**
@@ -5481,11 +5776,19 @@ public:
sent by the user (ie: stored procedure).
*/
#define CF_SKIP_QUESTIONS (1U << 1)
-
+#ifdef WITH_WSREP
/**
Do not check that wsrep snapshot is ready before allowing this command
*/
#define CF_SKIP_WSREP_CHECK (1U << 2)
+#else
+#define CF_SKIP_WSREP_CHECK 0
+#endif /* WITH_WSREP */
+
+/**
+ Do not allow it for COM_MULTI batch
+*/
+#define CF_NO_COM_MULTI (1U << 3)
/* Inline functions */
@@ -5630,6 +5933,28 @@ void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage,
#define THD_EXIT_COND(P1, P2) \
thd_exit_cond(P1, P2, __func__, __FILE__, __LINE__)
+inline bool binlog_should_compress(ulong len)
+{
+ return opt_bin_log_compress &&
+ len >= opt_bin_log_compress_min_len;
+}
+
+
+/**
+ Save thd sql_mode on instantiation.
+ On destruction it resets the mode to the previously stored value.
+*/
+class Sql_mode_save
+{
+ public:
+ Sql_mode_save(THD *thd) : thd(thd), old_mode(thd->variables.sql_mode) {}
+ ~Sql_mode_save() { thd->variables.sql_mode = old_mode; }
+
+ private:
+ THD *thd;
+ sql_mode_t old_mode; // SQL mode saved at construction time.
+};
+
#endif /* MYSQL_SERVER */
#endif /* SQL_CLASS_INCLUDED */
diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h
index 904578134b4..e33f8e443dc 100644
--- a/sql/sql_cmd.h
+++ b/sql/sql_cmd.h
@@ -93,6 +93,9 @@ enum enum_sql_command {
SQLCOM_CREATE_ROLE, SQLCOM_DROP_ROLE, SQLCOM_GRANT_ROLE, SQLCOM_REVOKE_ROLE,
SQLCOM_COMPOUND,
SQLCOM_SHOW_GENERIC,
+ SQLCOM_ALTER_USER,
+ SQLCOM_SHOW_CREATE_USER,
+ SQLCOM_EXECUTE_IMMEDIATE,
/*
When a command is added here, be sure it's also added in mysqld.cc
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index abb234ab4b1..d7b388e310f 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -770,7 +770,6 @@ void update_global_user_stats(THD *thd, bool create_user, time_t now)
bool thd_init_client_charset(THD *thd, uint cs_number)
{
- SV *gv=&global_system_variables;
CHARSET_INFO *cs;
/*
Use server character set and collation if
@@ -781,10 +780,9 @@ bool thd_init_client_charset(THD *thd, uint cs_number)
if (!opt_character_set_client_handshake ||
!(cs= get_charset(cs_number, MYF(0))))
{
- DBUG_ASSERT(is_supported_parser_charset(gv->character_set_client));
- thd->variables.character_set_client= gv->character_set_client;
- thd->variables.collation_connection= gv->collation_connection;
- thd->variables.character_set_results= gv->character_set_results;
+ thd->update_charset(global_system_variables.character_set_client,
+ global_system_variables.collation_connection,
+ global_system_variables.character_set_results);
}
else
{
@@ -794,10 +792,9 @@ bool thd_init_client_charset(THD *thd, uint cs_number)
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client",
cs->csname);
return true;
- }
- thd->variables.character_set_results=
- thd->variables.collation_connection=
- thd->variables.character_set_client= cs;
+ }
+ thd->org_charset= cs;
+ thd->update_charset(cs,cs,cs);
}
return false;
}
@@ -807,14 +804,17 @@ bool thd_init_client_charset(THD *thd, uint cs_number)
Initialize connection threads
*/
+#ifndef EMBEDDED_LIBRARY
bool init_new_connection_handler_thread()
{
pthread_detach_this_thread();
if (my_thread_init())
{
+ statistic_increment(aborted_connects,&LOCK_status);
statistic_increment(connection_errors_internal, &LOCK_status);
return 1;
}
+ DBUG_EXECUTE_IF("simulate_failed_connection_1", return(1); );
return 0;
}
@@ -830,7 +830,6 @@ bool init_new_connection_handler_thread()
1 error
*/
-#ifndef EMBEDDED_LIBRARY
static int check_connection(THD *thd)
{
uint connect_errors= 0;
@@ -931,6 +930,7 @@ static int check_connection(THD *thd)
this is treated as a global server OOM error.
TODO: remove the need for my_strdup.
*/
+ statistic_increment(aborted_connects,&LOCK_status);
statistic_increment(connection_errors_internal, &LOCK_status);
return 1; /* The error is set by my_strdup(). */
}
@@ -948,7 +948,7 @@ static int check_connection(THD *thd)
if (thd->main_security_ctx.host)
{
if (thd->main_security_ctx.host != my_localhost)
- thd->main_security_ctx.host[MY_MIN(strlen(thd->main_security_ctx.host),
+ ((char*) thd->main_security_ctx.host)[MY_MIN(strlen(thd->main_security_ctx.host),
HOSTNAME_LENGTH)]= 0;
thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host;
}
@@ -996,6 +996,7 @@ static int check_connection(THD *thd)
Hence, there is no reason to account on OOM conditions per client IP,
we count failures in the global server status instead.
*/
+ statistic_increment(aborted_connects,&LOCK_status);
statistic_increment(connection_errors_internal, &LOCK_status);
return 1; /* The error is set by alloc(). */
}
@@ -1034,7 +1035,8 @@ bool setup_connection_thread_globals(THD *thd)
{
close_connection(thd, ER_OUT_OF_RESOURCES);
statistic_increment(aborted_connects,&LOCK_status);
- MYSQL_CALLBACK(thd->scheduler, end_thread, (thd, 0));
+ statistic_increment(connection_errors_internal, &LOCK_status);
+ thd->scheduler->end_thread(thd, 0);
return 1; // Error
}
return 0;
@@ -1062,7 +1064,7 @@ bool login_connection(THD *thd)
int error= 0;
DBUG_ENTER("login_connection");
DBUG_PRINT("info", ("login_connection called by thread %lu",
- thd->thread_id));
+ (ulong) thd->thread_id));
/* Use "connect_timeout" value during connection phase */
my_net_set_read_timeout(net, connect_timeout);
@@ -1088,7 +1090,7 @@ bool login_connection(THD *thd)
/* Updates global user connection stats. */
if (increment_connection_count(thd, TRUE))
{
- my_error(ER_OUTOFMEMORY, MYF(0), 2*sizeof(USER_STATS));
+ my_error(ER_OUTOFMEMORY, MYF(0), (int) (2*sizeof(USER_STATS)));
error= 1;
goto exit;
}
@@ -1114,13 +1116,12 @@ void end_connection(THD *thd)
{
wsrep_status_t rcode= wsrep->free_connection(wsrep, thd->thread_id);
if (rcode) {
- WSREP_WARN("wsrep failed to free connection context: %lu, code: %d",
- thd->thread_id, rcode);
+ WSREP_WARN("wsrep failed to free connection context: %lld code: %d",
+ (longlong) thd->thread_id, rcode);
}
}
thd->wsrep_client_thread= 0;
#endif
- plugin_thdvar_cleanup(thd);
if (thd->user_connect)
{
@@ -1168,7 +1169,6 @@ void prepare_new_connection_state(THD* thd)
*/
thd->proc_info= 0;
thd->set_command(COM_SLEEP);
- thd->set_time();
thd->init_for_queries();
if (opt_init_connect.length && !(sctx->master_access & SUPER_ACL))
@@ -1210,7 +1210,6 @@ void prepare_new_connection_state(THD* thd)
}
thd->proc_info=0;
- thd->set_time();
thd->init_for_queries();
}
}
@@ -1235,11 +1234,11 @@ void prepare_new_connection_state(THD* thd)
pthread_handler_t handle_one_connection(void *arg)
{
- THD *thd= (THD*) arg;
+ CONNECT *connect= (CONNECT*) arg;
- mysql_thread_set_psi_id(thd->thread_id);
+ mysql_thread_set_psi_id(connect->thread_id);
- do_handle_one_connection(thd);
+ do_handle_one_connection(connect);
return 0;
}
@@ -1271,19 +1270,17 @@ bool thd_is_connection_alive(THD *thd)
return FALSE;
}
-void do_handle_one_connection(THD *thd_arg)
-{
- THD *thd= thd_arg;
-
- thd->thr_create_utime= microsecond_interval_timer();
- /* We need to set this because of time_out_user_resource_limits */
- thd->start_utime= thd->thr_create_utime;
- if (MYSQL_CALLBACK_ELSE(thd->scheduler, init_new_connection_thread, (), 0))
+void do_handle_one_connection(CONNECT *connect)
+{
+ ulonglong thr_create_utime= microsecond_interval_timer();
+ THD *thd;
+ if (connect->scheduler->init_new_connection_thread() ||
+ !(thd= connect->create_thd(NULL)))
{
- close_connection(thd, ER_OUT_OF_RESOURCES);
- statistic_increment(aborted_connects,&LOCK_status);
- MYSQL_CALLBACK(thd->scheduler, end_thread, (thd, 0));
+ scheduler_functions *scheduler= connect->scheduler;
+ connect->close_with_error(0, 0, ER_OUT_OF_RESOURCES);
+ scheduler->end_thread(0, 0);
return;
}
@@ -1292,14 +1289,22 @@ void do_handle_one_connection(THD *thd_arg)
increment slow_launch_threads counter if it took more than
slow_launch_time seconds to create the thread.
*/
- if (thd->prior_thr_create_utime)
+
+ if (connect->prior_thr_create_utime)
{
- ulong launch_time= (ulong) (thd->thr_create_utime -
- thd->prior_thr_create_utime);
+ ulong launch_time= (ulong) (thr_create_utime -
+ connect->prior_thr_create_utime);
if (launch_time >= slow_launch_time*1000000L)
statistic_increment(slow_launch_threads, &LOCK_status);
- thd->prior_thr_create_utime= 0;
}
+ delete connect;
+
+ /* Make THD visible in show processlist */
+ add_to_active_threads(thd);
+
+ thd->thr_create_utime= thr_create_utime;
+ /* We need to set this because of time_out_user_resource_limits */
+ thd->start_utime= thr_create_utime;
/*
handle_one_connection() is normally the only way a thread would
@@ -1346,7 +1351,7 @@ end_thread:
if (thd->userstat_running)
update_global_user_stats(thd, create_user, time(NULL));
- if (MYSQL_CALLBACK_ELSE(thd->scheduler, end_thread, (thd, 1), 0))
+ if (thd->scheduler->end_thread(thd, 1))
return; // Probably no-threads
/*
@@ -1358,3 +1363,99 @@ end_thread:
}
}
#endif /* EMBEDDED_LIBRARY */
+
+
+/* Handling of CONNECT objects */
+
+/*
+ Close connection without error and delete the connect object
+ This and close_with_error are only called if we didn't manage to
+ create a new thd object.
+*/
+
+void CONNECT::close_and_delete()
+{
+ DBUG_ENTER("close_and_delete");
+
+ if (vio)
+ vio_close(vio);
+ if (thread_count_incremented)
+ dec_connection_count(scheduler);
+ statistic_increment(connection_errors_internal, &LOCK_status);
+ statistic_increment(aborted_connects,&LOCK_status);
+
+ delete this;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Close a connection with a possible error to the end user
+ Alse deletes the connection object, like close_and_delete()
+*/
+
+void CONNECT::close_with_error(uint sql_errno,
+ const char *message, uint close_error)
+{
+ THD *thd= create_thd(NULL);
+ if (thd)
+ {
+ if (sql_errno)
+ net_send_error(thd, sql_errno, message, NULL);
+ close_connection(thd, close_error);
+ delete thd;
+ set_current_thd(0);
+ }
+ close_and_delete();
+}
+
+
+CONNECT::~CONNECT()
+{
+ if (vio)
+ vio_delete(vio);
+}
+
+
+/* Reuse or create a THD based on a CONNECT object */
+
+THD *CONNECT::create_thd(THD *thd)
+{
+ bool res, thd_reused= thd != 0;
+ DBUG_ENTER("create_thd");
+
+ DBUG_EXECUTE_IF("simulate_failed_connection_2", DBUG_RETURN(0); );
+
+ if (thd)
+ {
+ /* reuse old thd */
+ thd->reset_for_reuse();
+ /*
+ reset tread_id's, but not thread_dbug_id's as the later isn't allowed
+ to change as there is already structures in thd marked with the old
+ value.
+ */
+ thd->thread_id= thd->variables.pseudo_thread_id= thread_id;
+ }
+ else if (!(thd= new THD(thread_id)))
+ DBUG_RETURN(0);
+
+ set_current_thd(thd);
+ res= my_net_init(&thd->net, vio, thd, MYF(MY_THREAD_SPECIFIC));
+ vio= 0; // Vio now handled by thd
+
+ if (res || thd->is_error())
+ {
+ if (!thd_reused)
+ delete thd;
+ set_current_thd(0);
+ DBUG_RETURN(0);
+ }
+
+ init_net_server_extension(thd);
+
+ thd->security_ctx->host= host;
+ thd->extra_port= extra_port;
+ thd->scheduler= scheduler;
+ thd->real_id= real_id;
+ DBUG_RETURN(thd);
+}
diff --git a/sql/sql_connect.h b/sql/sql_connect.h
index bab171606ba..9cd31814ad7 100644
--- a/sql/sql_connect.h
+++ b/sql/sql_connect.h
@@ -19,8 +19,43 @@
#include "my_sys.h" /* pthread_handler_t */
#include "mysql_com.h" /* enum_server_command */
#include "structs.h"
+#include <mysql/psi/mysql_socket.h>
#include <hash.h>
+/*
+ Object to hold connect information to be given to the newly created thread
+*/
+
+struct scheduler_functions;
+
+class CONNECT : public ilink {
+public:
+ /* To be copied to THD */
+ Vio *vio; /* Copied to THD with my_net_init() */
+ const char *host;
+ scheduler_functions *scheduler;
+ my_thread_id thread_id;
+ pthread_t real_id;
+ bool extra_port;
+
+ /* Own variables */
+ bool thread_count_incremented;
+ ulonglong prior_thr_create_utime;
+
+ CONNECT()
+ :vio(0), host(0), scheduler(thread_scheduler), thread_id(0), real_id(0),
+ extra_port(0),
+ thread_count_incremented(0), prior_thr_create_utime(0)
+ {
+ };
+ ~CONNECT();
+ void close_and_delete();
+ void close_with_error(uint sql_errno,
+ const char *message, uint close_error);
+ THD *create_thd(THD *thd);
+};
+
+
class THD;
typedef struct st_lex_user LEX_USER;
typedef struct user_conn USER_CONN;
@@ -37,7 +72,7 @@ void free_global_index_stats(void);
void free_global_client_stats(void);
pthread_handler_t handle_one_connection(void *arg);
-void do_handle_one_connection(THD *thd_arg);
+void do_handle_one_connection(CONNECT *connect);
bool init_new_connection_handler_thread();
void reset_mqh(LEX_USER *lu, bool get_them);
bool check_mqh(THD *thd, uint check_command);
diff --git a/sql/sql_const.h b/sql/sql_const.h
index 1d6549f777f..abadfbe48ff 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -123,6 +123,8 @@
#define DISK_BUFFER_SIZE (uint) (IO_SIZE*16) /* Size of diskbuffer */
#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* 10 */
+#define FRM_VER_EXPRESSSIONS (FRM_VER+5) /* 11 */
+#define FRM_VER_CURRENT FRM_VER_EXPRESSSIONS
/***************************************************************************
Configuration parameters
@@ -257,6 +259,8 @@
that does not respond to "initial server greeting" timely
*/
#define CONNECT_TIMEOUT 10
+ /* Wait 5 minutes before removing thread from thread cache */
+#define THREAD_CACHE_TIMEOUT 5*60
/* The following can also be changed from the command line */
#define DEFAULT_CONCURRENCY 10
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
new file mode 100644
index 00000000000..d922a7a7551
--- /dev/null
+++ b/sql/sql_cte.cc
@@ -0,0 +1,1447 @@
+#include "sql_class.h"
+#include "sql_lex.h"
+#include "sql_cte.h"
+#include "sql_view.h" // for make_valid_column_names
+#include "sql_parse.h"
+#include "sql_select.h"
+
+
+/**
+ @brief
+ Add a new element to this with clause
+
+ @param elem The with element to add to this with clause
+
+ @details
+ The method adds the with element 'elem' to the elements
+ in this with clause. The method reports an error if
+ the number of the added element exceeds the value
+ of the constant max_number_of_elements_in_with_clause.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool With_clause::add_with_element(With_element *elem)
+{
+ if (with_list.elements == max_number_of_elements_in_with_clause)
+ {
+ my_error(ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE, MYF(0));
+ return true;
+ }
+ elem->owner= this;
+ elem->number= with_list.elements;
+ elem->spec->with_element= elem;
+ with_list.link_in_list(elem, &elem->next);
+ return false;
+}
+
+
+/**
+ @brief
+ Check dependencies between tables defined in a list of with clauses
+
+ @param
+ with_clauses_list Pointer to the first clause in the list
+
+ @details
+ For each with clause from the given list the procedure finds all
+ dependencies between tables defined in the clause by calling the
+ method With_clause::checked_dependencies.
+ Additionally, based on the info collected by this method the procedure
+ finds anchors for each recursive definition and moves them at the head
+ of the definition.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool check_dependencies_in_with_clauses(With_clause *with_clauses_list)
+{
+ for (With_clause *with_clause= with_clauses_list;
+ with_clause;
+ with_clause= with_clause->next_with_clause)
+ {
+ if (with_clause->check_dependencies())
+ return true;
+ if (with_clause->check_anchors())
+ return true;
+ with_clause->move_anchors_ahead();
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Check dependencies between tables defined in this with clause
+
+ @details
+ The method performs the following for this with clause:
+ - checks that there are no definitions of the tables with the same name
+ - for each table T defined in this with clause looks for the tables
+ from the same with clause that are used in the query that specifies T
+ and set the dependencies of T on these tables in a bitmap.
+ - builds the transitive closure of the above direct dependencies
+ to find out all recursive definitions.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool With_clause::check_dependencies()
+{
+ if (dependencies_are_checked)
+ return false;
+ /*
+ Look for for definitions with the same query name.
+ When found report an error and return true immediately.
+ For each table T defined in this with clause look for all other tables
+ from the same with clause that are used in the specification of T.
+ For each such table set the dependency bit in the dependency map of
+ the with element for T.
+ */
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ for (With_element *elem= with_list.first;
+ elem != with_elem;
+ elem= elem->next)
+ {
+ if (my_strcasecmp(system_charset_info, with_elem->query_name->str,
+ elem->query_name->str) == 0)
+ {
+ my_error(ER_DUP_QUERY_NAME, MYF(0), with_elem->query_name->str);
+ return true;
+ }
+ }
+ if (with_elem->check_dependencies_in_spec())
+ return true;
+ }
+ /* Build the transitive closure of the direct dependencies found above */
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ with_elem->derived_dep_map= with_elem->base_dep_map;
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ table_map with_elem_map= with_elem->get_elem_map();
+ for (With_element *elem= with_list.first; elem; elem= elem->next)
+ {
+ if (elem->derived_dep_map & with_elem_map)
+ elem->derived_dep_map |= with_elem->derived_dep_map;
+ }
+ }
+
+ /*
+ Mark those elements where tables are defined with direct or indirect
+ recursion.
+ */
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (with_elem->derived_dep_map & with_elem->get_elem_map())
+ with_elem->is_recursive= true;
+ }
+
+ dependencies_are_checked= true;
+ return false;
+}
+
+
+/*
+ This structure describes an element of the stack of embedded units.
+ The stack is used when looking for a definition of a table in
+ with clauses. The definition can be found only in the scopes
+ of the with clauses attached to the units from the stack.
+ The with clauses are looked through from starting from the top
+ element of the stack.
+*/
+
+struct st_unit_ctxt_elem
+{
+ st_unit_ctxt_elem *prev; // the previous element of the stack
+ st_select_lex_unit *unit;
+};
+
+
+/**
+ @brief
+ Find the dependencies of this element on its siblings in its specification
+
+ @details
+ For each table reference ref(T) from the FROM list of every select sl
+ immediately contained in the specification query of this element this
+ method searches for the definition of T in the the with clause which
+ this element belongs to. If such definition is found then the dependency
+ on it is set in sl->with_dep and in this->base_dep_map.
+*/
+
+bool With_element::check_dependencies_in_spec()
+{
+ for (st_select_lex *sl= spec->first_select(); sl; sl= sl->next_select())
+ {
+ st_unit_ctxt_elem ctxt0= {NULL, owner->owner};
+ st_unit_ctxt_elem ctxt1= {&ctxt0, spec};
+ check_dependencies_in_select(sl, &ctxt1, false, &sl->with_dep);
+ base_dep_map|= sl->with_dep;
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Search for the definition of a table among the elements of this with clause
+
+ @param table The reference to the table that is looked for
+ @param barrier The barrier with element for the search
+
+ @details
+ The function looks through the elements of this with clause trying to find
+ the definition of the given table. When it encounters the element with
+ the same query name as the table's name it returns this element. If no
+ such definitions are found the function returns NULL.
+
+ @retval
+ found with element if the search succeeded
+ NULL - otherwise
+*/
+
+With_element *With_clause::find_table_def(TABLE_LIST *table,
+ With_element *barrier)
+{
+ for (With_element *with_elem= with_list.first;
+ with_elem != barrier;
+ with_elem= with_elem->next)
+ {
+ if (my_strcasecmp(system_charset_info, with_elem->query_name->str,
+ table->table_name) == 0 &&
+ !table->is_fqtn)
+ {
+ table->set_derived();
+ return with_elem;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ @brief
+ Search for the definition of a table in with clauses
+
+ @param tbl The reference to the table that is looked for
+ @param ctxt The context describing in what with clauses of the upper
+ levels the table has to be searched for.
+
+ @details
+ The function looks for the definition of the table tbl in the definitions
+ of the with clauses from the upper levels specified by the parameter ctxt.
+ When it encounters the element with the same query name as the table's name
+ it returns this element. If no such definitions are found the function
+ returns NULL.
+
+ @retval
+ found with element if the search succeeded
+ NULL - otherwise
+*/
+
+With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl,
+ st_unit_ctxt_elem *ctxt)
+{
+ With_element *barrier= NULL;
+ for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt;
+ unit_ctxt_elem;
+ unit_ctxt_elem= unit_ctxt_elem->prev)
+ {
+ st_select_lex_unit *unit= unit_ctxt_elem->unit;
+ With_clause *with_clause= unit->with_clause;
+ if (with_clause &&
+ (tbl->with= with_clause->find_table_def(tbl, barrier)))
+ return tbl->with;
+ barrier= NULL;
+ if (unit->with_element && !unit->with_element->get_owner()->with_recursive)
+ {
+ /*
+ This unit is the specification if the with element unit->with_element.
+ The with element belongs to a with clause without the specifier RECURSIVE.
+ So when searching for the matching definition of tbl this with clause must
+ be looked up to this with element
+ */
+ barrier= unit->with_element;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ @brief
+ Find the dependencies of this element on its siblings in a select
+
+ @param sl The select where to look for the dependencies
+ @param ctxt The structure specifying the scope of the definitions
+ of the with elements of the upper levels
+ @param in_sbq if true mark dependencies found in subqueries in
+ this->sq_dep_map
+ @param dep_map IN/OUT The bit where to mark the found dependencies
+
+ @details
+ For each table reference ref(T) from the FROM list of the select sl
+ the method searches in with clauses for the definition of the table T.
+ If the found definition belongs to the same with clause as this with
+ element then the method set dependency on T in the in/out parameter
+ dep_map, add if required - in this->sq_dep_map.
+ The parameter ctxt describes the proper context for the search
+ of the definition of T.
+*/
+
+void With_element::check_dependencies_in_select(st_select_lex *sl,
+ st_unit_ctxt_elem *ctxt,
+ bool in_subq,
+ table_map *dep_map)
+{
+ With_clause *with_clause= sl->get_with_clause();
+ for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local)
+ {
+ if (tbl->derived || tbl->nested_join)
+ continue;
+ tbl->with_internal_reference_map= 0;
+ /*
+ If there is a with clause attached to the unit containing sl
+ look first for the definition of tbl in this with clause.
+ If such definition is not found there look in the with
+ clauses of the upper levels.
+ If the definition of tbl is found somewhere in with clauses
+ then tbl->with is set to point to this definition
+ */
+ if (with_clause && !tbl->with)
+ tbl->with= with_clause->find_table_def(tbl, NULL);
+ if (!tbl->with)
+ tbl->with= find_table_def_in_with_clauses(tbl, ctxt);
+
+ if (tbl->with && tbl->with->owner== this->owner)
+ {
+ /*
+ The found definition T of tbl belongs to the same
+ with clause as this with element. In this case:
+ - set the dependence on T in the bitmap dep_map
+ - set tbl->with_internal_reference_map with
+ the bitmap for this definition
+ - set the dependence on T in the bitmap this->sq_dep_map
+ if needed
+ */
+ *dep_map|= tbl->with->get_elem_map();
+ tbl->with_internal_reference_map= get_elem_map();
+ if (in_subq)
+ sq_dep_map|= tbl->with->get_elem_map();
+ else
+ top_level_dep_map|= tbl->with->get_elem_map();
+ }
+ }
+ /* Now look for the dependencies in the subqueries of sl */
+ st_select_lex_unit *inner_unit= sl->first_inner_unit();
+ for (; inner_unit; inner_unit= inner_unit->next_unit())
+ {
+ if (!inner_unit->with_element)
+ check_dependencies_in_unit(inner_unit, ctxt, in_subq, dep_map);
+ }
+}
+
+
+/**
+ @brief
+ Find a recursive reference to this with element in subqueries of a select
+
+ @param sel The select in whose subqueries the reference
+ to be looked for
+
+ @details
+ The function looks for a recursive reference to this with element in
+ subqueries of select sl. When the first such reference is found
+ it is returned as the result.
+ The function assumes that the identification of all CTE references
+ has been performed earlier.
+
+ @retval
+ Pointer to the found recursive reference if the search succeeded
+ NULL - otherwise
+*/
+
+TABLE_LIST *With_element::find_first_sq_rec_ref_in_select(st_select_lex *sel)
+{
+ TABLE_LIST *rec_ref= NULL;
+ st_select_lex_unit *inner_unit= sel->first_inner_unit();
+ for (; inner_unit; inner_unit= inner_unit->next_unit())
+ {
+ st_select_lex *sl= inner_unit->first_select();
+ for (; sl; sl= sl->next_select())
+ {
+ for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local)
+ {
+ if (tbl->derived || tbl->nested_join)
+ continue;
+ if (tbl->with && tbl->with->owner== this->owner &&
+ (tbl->with_internal_reference_map & mutually_recursive))
+ {
+ rec_ref= tbl;
+ return rec_ref;
+ }
+ }
+ if ((rec_ref= find_first_sq_rec_ref_in_select(sl)))
+ return rec_ref;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ @brief
+ Find the dependencies of this element on its siblings in a unit
+
+ @param unit The unit where to look for the dependencies
+ @param ctxt The structure specifying the scope of the definitions
+ of the with elements of the upper levels
+ @param in_sbq if true mark dependencies found in subqueries in
+ this->sq_dep_map
+ @param dep_map IN/OUT The bit where to mark the found dependencies
+
+ @details
+ This method searches in the unit 'unit' for the the references in FROM
+ lists of all selects contained in this unit and in the with clause
+ attached to this unit that refer to definitions of tables from the
+ same with clause as this element.
+ If such definitions are found then the dependencies on them are
+ set in the in/out parameter dep_map and optionally in this->sq_dep_map.
+ The parameter ctxt describes the proper context for the search.
+*/
+
+void With_element::check_dependencies_in_unit(st_select_lex_unit *unit,
+ st_unit_ctxt_elem *ctxt,
+ bool in_subq,
+ table_map *dep_map)
+{
+ if (unit->with_clause)
+ check_dependencies_in_with_clause(unit->with_clause, ctxt, in_subq, dep_map);
+ in_subq |= unit->item != NULL;
+ st_unit_ctxt_elem unit_ctxt_elem= {ctxt, unit};
+ st_select_lex *sl= unit->first_select();
+ for (; sl; sl= sl->next_select())
+ {
+ check_dependencies_in_select(sl, &unit_ctxt_elem, in_subq, dep_map);
+ }
+}
+
+
+/**
+ @brief
+ Find the dependencies of this element on its siblings in a with clause
+
+ @param witt_clause The with clause where to look for the dependencies
+ @param ctxt The structure specifying the scope of the definitions
+ of the with elements of the upper levels
+ @param in_sbq if true mark dependencies found in subqueries in
+ this->sq_dep_map
+ @param dep_map IN/OUT The bit where to mark the found dependencies
+
+ @details
+ This method searches in the with_clause for the the references in FROM
+ lists of all selects contained in the specifications of the with elements
+ from this with_clause that refer to definitions of tables from the
+ same with clause as this element.
+ If such definitions are found then the dependencies on them are
+ set in the in/out parameter dep_map and optionally in this->sq_dep_map.
+ The parameter ctxt describes the proper context for the search.
+*/
+
+void
+With_element::check_dependencies_in_with_clause(With_clause *with_clause,
+ st_unit_ctxt_elem *ctxt,
+ bool in_subq,
+ table_map *dep_map)
+{
+ for (With_element *with_elem= with_clause->with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ check_dependencies_in_unit(with_elem->spec, ctxt, in_subq, dep_map);
+ }
+}
+
+
+/**
+ @brief
+ Find mutually recursive with elements and check that they have ancors
+
+ @details
+ This method performs the following:
+ - for each recursive with element finds all mutually recursive with it
+ - links each group of mutually recursive with elements into a ring chain
+ - checks that every group of mutually recursive with elements contains
+ at least one anchor
+ - checks that after removing any with element with anchor the remaining
+ with elements mutually recursive with the removed one are not recursive
+ anymore
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool With_clause::check_anchors()
+{
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (!with_elem->is_recursive)
+ continue;
+
+ /*
+ It with_elem is recursive with element find all elements mutually recursive
+ with it (any recursive element is mutually recursive with itself). Mark all
+ these elements in the bitmap->mutually_recursive. Also link all these
+ elements into a ring chain.
+ */
+ if (!with_elem->next_mutually_recursive)
+ {
+ With_element *last_mutually_recursive= with_elem;
+ table_map with_elem_dep= with_elem->derived_dep_map;
+ table_map with_elem_map= with_elem->get_elem_map();
+ for (With_element *elem= with_elem; elem; elem= elem->next)
+ {
+ if (!elem->is_recursive)
+ continue;
+
+ if (elem == with_elem ||
+ ((elem->derived_dep_map & with_elem_map) &&
+ (with_elem_dep & elem->get_elem_map())))
+ {
+ elem->next_mutually_recursive= with_elem;
+ last_mutually_recursive->next_mutually_recursive= elem;
+ last_mutually_recursive= elem;
+ with_elem->mutually_recursive|= elem->get_elem_map();
+ }
+ }
+ for (With_element *elem= with_elem->next_mutually_recursive;
+ elem != with_elem;
+ elem= elem->next_mutually_recursive)
+ elem->mutually_recursive= with_elem->mutually_recursive;
+ }
+
+ /*
+ For each select from the specification of 'with_elem' check whether
+ it is an anchor i.e. does not depend on any with elements mutually
+ recursive with 'with_elem".
+ */
+ for (st_select_lex *sl= with_elem->spec->first_select();
+ sl;
+ sl= sl->next_select())
+ {
+ if (with_elem->is_anchor(sl))
+ {
+ with_elem->with_anchor= true;
+ break;
+ }
+ }
+ }
+
+ /*
+ Check that for any group of mutually recursive with elements
+ - there is at least one anchor
+ - after removing any with element with anchor the remaining with elements
+ mutually recursive with the removed one are not recursive anymore
+ */
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (!with_elem->is_recursive)
+ continue;
+
+ if (!with_elem->with_anchor)
+ {
+ /*
+ Check that the other with elements mutually recursive with 'with_elem'
+ contain at least one anchor.
+ */
+ With_element *elem= with_elem;
+ while ((elem= elem->get_next_mutually_recursive()) != with_elem)
+ {
+ if (elem->with_anchor)
+ break;
+ }
+ if (elem == with_elem)
+ {
+ my_error(ER_RECURSIVE_WITHOUT_ANCHORS, MYF(0),
+ with_elem->query_name->str);
+ return true;
+ }
+ }
+ else
+ {
+ /* 'with_elem' is a with element with an anchor */
+ With_element *elem= with_elem;
+ /*
+ For the other with elements mutually recursive with 'with_elem'
+ set dependency bits between those elements in the field work_dep_map
+ and build transitive closure of these dependencies
+ */
+ while ((elem= elem->get_next_mutually_recursive()) != with_elem)
+ elem->work_dep_map= elem->base_dep_map & elem->mutually_recursive;
+ elem= with_elem;
+ while ((elem= elem->get_next_mutually_recursive()) != with_elem)
+ {
+ table_map elem_map= elem->get_elem_map();
+ With_element *el= with_elem;
+ while ((el= el->get_next_mutually_recursive()) != with_elem)
+ {
+ if (el->work_dep_map & elem_map)
+ el->work_dep_map|= elem->work_dep_map;
+ }
+ }
+ /* If the transitive closure displays any cycle report an arror */
+ elem= with_elem;
+ while ((elem= elem->get_next_mutually_recursive()) != with_elem)
+ {
+ if (elem->work_dep_map & elem->get_elem_map())
+ {
+ my_error(ER_UNACCEPTABLE_MUTUAL_RECURSION, MYF(0),
+ with_elem->query_name->str);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+
+/**
+ @brief
+ Move anchors at the beginning of the specifications for with elements
+
+ @details
+ This method moves anchors at the beginning of the specifications for
+ all recursive with elements.
+*/
+
+void With_clause::move_anchors_ahead()
+{
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (with_elem->is_recursive)
+ with_elem->move_anchors_ahead();
+ }
+}
+
+
+/**
+ @brief
+ Move anchors at the beginning of the specification of this with element
+
+ @details
+ If the specification of this with element contains anchors the method
+ moves them at the very beginning of the specification.
+ Additionally for the other selects of the specification if none of them
+ contains a recursive reference to this with element or a mutually recursive
+ one the method looks for the first such reference in the first recursive
+ select and set a pointer to it in this->sq_rec_ref.
+*/
+
+void With_element::move_anchors_ahead()
+{
+ st_select_lex *next_sl;
+ st_select_lex *new_pos= spec->first_select();
+ new_pos->linkage= UNION_TYPE;
+ for (st_select_lex *sl= new_pos; sl; sl= next_sl)
+ {
+ next_sl= sl->next_select();
+ if (is_anchor(sl))
+ {
+ sl->move_node(new_pos);
+ if (new_pos == spec->first_select())
+ {
+ enum sub_select_type type= new_pos->linkage;
+ new_pos->linkage= sl->linkage;
+ sl->linkage= type;
+ new_pos->with_all_modifier= sl->with_all_modifier;
+ sl->with_all_modifier= false;
+ }
+ new_pos= sl->next_select();
+ }
+ else if (!sq_rec_ref && no_rec_ref_on_top_level())
+ {
+ sq_rec_ref= find_first_sq_rec_ref_in_select(sl);
+ DBUG_ASSERT(sq_rec_ref != NULL);
+ }
+ }
+ first_recursive= new_pos;
+ spec->first_select()->linkage= DERIVED_TABLE_TYPE;
+}
+
+
+/**
+ @brief
+ Perform context analysis for all unreferenced tables defined in with clause
+
+ @param thd The context of the statement containing this with clause
+
+ @details
+ For each unreferenced table T defined in this with clause the method
+ calls the method With_element::prepare_unreferenced that performs
+ context analysis of the element with the definition of T.
+
+ @retval
+ false If context analysis does not report any error
+ true Otherwise
+*/
+
+bool With_clause::prepare_unreferenced_elements(THD *thd)
+{
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (!with_elem->is_referenced() && with_elem->prepare_unreferenced(thd))
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ @brief
+ Save the specification of the given with table as a string
+
+ @param thd The context of the statement containing this with element
+ @param spec_start The beginning of the specification in the input string
+ @param spec_end The end of the specification in the input string
+ @param spec_offset The offset of the specification in the input string
+
+ @details
+ The method creates for a string copy of the specification used in this
+ element. The method is called when the element is parsed. The copy may be
+ used to create clones of the specification whenever they are needed.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool With_element::set_unparsed_spec(THD *thd, char *spec_start, char *spec_end,
+ uint spec_offset)
+{
+ stmt_prepare_mode= thd->m_parser_state->m_lip.stmt_prepare_mode;
+ unparsed_spec.length= spec_end - spec_start;
+ if (stmt_prepare_mode || !thd->lex->sphead)
+ unparsed_spec.str= spec_start;
+ else
+ {
+ unparsed_spec.str= (char*) thd->memdup(spec_start, unparsed_spec.length+1);
+ unparsed_spec.str[unparsed_spec.length]= '\0';
+ }
+ unparsed_spec_offset= spec_offset;
+
+ if (!unparsed_spec.str)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ static_cast<int>(unparsed_spec.length));
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Create a clone of the specification for the given with table
+
+ @param thd The context of the statement containing this with element
+ @param with_table The reference to the table defined in this element for which
+ the clone is created.
+
+ @details
+ The method creates a clone of the specification used in this element.
+ The clone is created for the given reference to the table defined by
+ this element.
+ The clone is created when the string with the specification saved in
+ unparsed_spec is fed into the parser as an input string. The parsing
+ this string a unit object representing the specification is build.
+ A chain of all table references occurred in the specification is also
+ formed.
+ The method includes the new unit and its sub-unit into hierarchy of
+ the units of the main query. I also insert the constructed chain of the
+ table references into the chain of all table references of the main query.
+
+ @note
+ Clones is created only for not first references to tables defined in
+ the with clause. They are necessary for merged specifications because
+ the optimizer handles any such specification as independent on the others.
+ When a table defined in the with clause is materialized in a temporary table
+ one could do without specification clones. However in this case they
+ are created as well, because currently different table references to a
+ the same temporary table cannot share the same definition structure.
+
+ @retval
+ pointer to the built clone if succeeds
+ NULL - otherwise
+*/
+
+st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
+ TABLE_LIST *with_table)
+{
+ LEX *lex;
+ st_select_lex_unit *res= NULL;
+ Query_arena backup;
+ Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ if (!(lex= (LEX*) new(thd->mem_root) st_lex_local))
+ {
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ return res;
+ }
+ LEX *old_lex= thd->lex;
+ thd->lex= lex;
+
+ bool parse_status= false;
+ Parser_state parser_state;
+ TABLE_LIST *spec_tables;
+ TABLE_LIST *spec_tables_tail;
+ st_select_lex *with_select;
+
+ char save_end= unparsed_spec.str[unparsed_spec.length];
+ unparsed_spec.str[unparsed_spec.length]= '\0';
+ if (parser_state.init(thd, unparsed_spec.str, unparsed_spec.length))
+ goto err;
+ parser_state.m_lip.stmt_prepare_mode= stmt_prepare_mode;
+ parser_state.m_lip.multi_statements= false;
+ parser_state.m_lip.m_digest= NULL;
+
+ lex_start(thd);
+ lex->clone_spec_offset= unparsed_spec_offset;
+ lex->param_list= old_lex->param_list;
+ lex->sphead= old_lex->sphead;
+ lex->spname= old_lex->spname;
+ lex->spcont= old_lex->spcont;
+ lex->sp_chistics= old_lex->sp_chistics;
+
+ lex->stmt_lex= old_lex;
+ with_select= &lex->select_lex;
+ with_select->select_number= ++thd->lex->stmt_lex->current_select_number;
+ parse_status= parse_sql(thd, &parser_state, 0);
+ unparsed_spec.str[unparsed_spec.length]= save_end;
+
+ if (parse_status)
+ goto err;
+
+ if (check_dependencies_in_with_clauses(lex->with_clauses_list))
+ goto err;
+
+ spec_tables= lex->query_tables;
+ spec_tables_tail= 0;
+ for (TABLE_LIST *tbl= spec_tables;
+ tbl;
+ tbl= tbl->next_global)
+ {
+ if (!tbl->derived && !tbl->schema_table &&
+ thd->open_temporary_table(tbl))
+ goto err;
+ spec_tables_tail= tbl;
+ }
+ if (check_table_access(thd, SELECT_ACL, spec_tables, FALSE, UINT_MAX, FALSE))
+ goto err;
+ if (spec_tables)
+ {
+ if (with_table->next_global)
+ {
+ spec_tables_tail->next_global= with_table->next_global;
+ with_table->next_global->prev_global= &spec_tables_tail->next_global;
+ }
+ else
+ {
+ old_lex->query_tables_last= &spec_tables_tail->next_global;
+ }
+ spec_tables->prev_global= &with_table->next_global;
+ with_table->next_global= spec_tables;
+ }
+ res= &lex->unit;
+
+ lex->unit.include_down(with_table->select_lex);
+ lex->unit.set_slave(with_select);
+ old_lex->all_selects_list=
+ (st_select_lex*) (lex->all_selects_list->
+ insert_chain_before(
+ (st_select_lex_node **) &(old_lex->all_selects_list),
+ with_select));
+ if (check_dependencies_in_with_clauses(lex->with_clauses_list))
+ res= NULL;
+ lex->sphead= NULL; // in order not to delete lex->sphead
+ lex_end(lex);
+err:
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ thd->lex= old_lex;
+ return res;
+}
+
+
+/**
+ @brief
+ Rename columns of the unit derived from the spec of this with element
+ @param thd The context of the statement containing the with element
+ @param unit The specification of the with element or its clone
+
+ @details
+ The method assumes that the parameter unit is either specification itself
+ of this with element or a clone of this specification. The looks through
+ the column list in this with element. It reports an error if the cardinality
+ of this list differs from the cardinality of select lists in 'unit'.
+ Otherwise it renames the columns of the first select list and sets the flag
+ unit->column_list_is_processed to true preventing renaming columns for the
+ second time.
+
+ @retval
+ true if an error was reported
+ false otherwise
+*/
+
+bool
+With_element::rename_columns_of_derived_unit(THD *thd,
+ st_select_lex_unit *unit)
+{
+ if (unit->columns_are_renamed)
+ return false;
+
+ st_select_lex *select= unit->first_select();
+
+ if (column_list.elements) // The column list is optional
+ {
+ List_iterator_fast<Item> it(select->item_list);
+ List_iterator_fast<LEX_STRING> nm(column_list);
+ Item *item;
+ LEX_STRING *name;
+
+ if (column_list.elements != select->item_list.elements)
+ {
+ my_error(ER_WITH_COL_WRONG_LIST, MYF(0));
+ return true;
+ }
+
+ Query_arena *arena, backup;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ /* Rename the columns of the first select in the unit */
+ while ((item= it++, name= nm++))
+ {
+ item->set_name(thd, name->str, (uint) name->length, system_charset_info);
+ item->is_autogenerated_name= false;
+ }
+
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ }
+ else
+ make_valid_column_names(thd, select->item_list);
+
+ unit->columns_are_renamed= true;
+
+ return false;
+}
+
+
+/**
+ @brief
+ Perform context analysis the definition of an unreferenced table
+
+ @param thd The context of the statement containing this with element
+
+ @details
+ The method assumes that this with element contains the definition
+ of a table that is not used anywhere. In this case one has to check
+ that context conditions are met.
+
+ @retval
+ true if an error was reported
+ false otherwise
+*/
+
+bool With_element::prepare_unreferenced(THD *thd)
+{
+ bool rc= false;
+ st_select_lex *first_sl= spec->first_select();
+
+ /* Prevent name resolution for field references out of with elements */
+ for (st_select_lex *sl= first_sl;
+ sl;
+ sl= sl->next_select())
+ sl->context.outer_context= 0;
+
+ thd->lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
+ if (!spec->prepared &&
+ (spec->prepare(thd, 0, 0) ||
+ rename_columns_of_derived_unit(thd, spec) ||
+ check_duplicate_names(thd, first_sl->item_list, 1)))
+ rc= true;
+
+ thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
+ return rc;
+}
+
+
+bool With_element::is_anchor(st_select_lex *sel)
+{
+ return !(mutually_recursive & sel->with_dep);
+}
+
+
+/**
+ @brief
+ Search for the definition of the given table referred in this select node
+
+ @param table reference to the table whose definition is searched for
+
+ @details
+ The method looks for the definition of the table whose reference is occurred
+ in the FROM list of this select node. First it searches for it in the
+ with clause attached to the unit this select node belongs to. If such a
+ definition is not found then the embedding units are looked through.
+
+ @retval
+ pointer to the found definition if the search has been successful
+ NULL - otherwise
+*/
+
+With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table)
+{
+ With_element *found= NULL;
+ st_select_lex_unit *master_unit;
+ st_select_lex *outer_sl;
+ for (st_select_lex *sl= this; sl; sl= outer_sl)
+ {
+ /*
+ If sl->master_unit() is the spec of a with element then the search for
+ a definition was already done by With_element::check_dependencies_in_spec
+ and it was unsuccesful. Yet for units cloned from the spec it has not
+ been done yet.
+ */
+ With_clause *attached_with_clause= sl->get_with_clause();
+ if (attached_with_clause &&
+ (found= attached_with_clause->find_table_def(table, NULL)))
+ break;
+ master_unit= sl->master_unit();
+ outer_sl= master_unit->outer_select();
+ With_element *with_elem= sl->get_with_element();
+ if (with_elem)
+ {
+ With_clause *containing_with_clause= with_elem->get_owner();
+ With_element *barrier= containing_with_clause->with_recursive ?
+ NULL : with_elem;
+ if ((found= containing_with_clause->find_table_def(table, barrier)))
+ break;
+ if (outer_sl && !outer_sl->get_with_element())
+ break;
+ }
+ /* Do not look for the table's definition beyond the scope of the view */
+ if (master_unit->is_view)
+ break;
+ }
+ return found;
+}
+
+
+/**
+ @brief
+ Set the specifying unit in this reference to a with table
+
+ @details
+ The method assumes that the given element with_elem defines the table T
+ this table reference refers to.
+ If this is the first reference to T the method just sets its specification
+ in the field 'derived' as the unit that yields T. Otherwise the method
+ first creates a clone specification and sets rather this clone in this field.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem)
+{
+ if (table)
+ {
+ /*
+ This table was prematurely identified as a temporary table.
+ We correct it here, but it's not a nice solution in the case
+ when the temporary table with this name is not used anywhere
+ else in the query.
+ */
+ thd->mark_tmp_table_as_free_for_reuse(table);
+ table= 0;
+ }
+ with= with_elem;
+ schema_table= NULL;
+ if (!with_elem->is_referenced() || with_elem->is_recursive)
+ {
+ derived= with_elem->spec;
+ if (derived != select_lex->master_unit() &&
+ !is_with_table_recursive_reference())
+ {
+ derived->move_as_slave(select_lex);
+ }
+ }
+ else
+ {
+ if(!(derived= with_elem->clone_parsed_spec(thd, this)))
+ return true;
+ }
+ derived->first_select()->linkage= DERIVED_TABLE_TYPE;
+ with_elem->inc_references();
+ return false;
+}
+
+
+bool TABLE_LIST::is_recursive_with_table()
+{
+ return with && with->is_recursive;
+}
+
+
+/*
+ A reference to a with table T is recursive if it occurs somewhere
+ in the query specifying T or in the query specifying one of the tables
+ mutually recursive with T.
+*/
+
+bool TABLE_LIST::is_with_table_recursive_reference()
+{
+ return (with_internal_reference_map &&
+ (with->get_mutually_recursive() & with_internal_reference_map));
+}
+
+
+/*
+ Specifications of with tables with recursive table references
+ in non-mergeable derived tables are not allowed in this
+ implementation.
+*/
+
+
+/*
+ We say that the specification of a with table T is restricted
+ if all below is true.
+ 1. Any immediate select of the specification contains at most one
+ recursive table reference taking into account table references
+ from mergeable derived tables.
+ 2. Any recursive table reference is not an inner operand of an
+ outer join operation used in an immediate select of the
+ specification.
+ 3. Any immediate select from the specification of T does not
+ contain aggregate functions.
+ 4. The specification of T does not contain recursive table references.
+
+ If the specification of T is not restricted we call the corresponding
+ with element unrestricted.
+
+ The SQL standards allows only with elements with restricted specification.
+ By default we comply with the standards here.
+
+ Yet we allow unrestricted specification if the status variable
+ 'standards_compliant_cte' set to 'off'(0).
+*/
+
+
+/**
+ @brief
+ Check if this select makes the including specification unrestricted
+
+ @param
+ only_standards_compliant true if the system variable
+ 'standards_compliant_cte' is set to 'on'
+ @details
+ This method checks whether the conditions 1-4 (see the comment above)
+ are satisfied for this select. If not then mark this element as
+ unrestricted and report an error if 'only_standards_compliant' is true.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool st_select_lex::check_unrestricted_recursive(bool only_standard_compliant)
+{
+ With_element *with_elem= get_with_element();
+ if (!with_elem ||!with_elem->is_recursive)
+ {
+ /*
+ If this select is not from the specifiocation of a with elememt or
+ if this not a recursive with element then there is nothing to check.
+ */
+ return false;
+ }
+
+ /* Check conditions 1-2 for restricted specification*/
+ table_map unrestricted= 0;
+ table_map encountered= 0;
+ if (with_elem->check_unrestricted_recursive(this,
+ unrestricted,
+ encountered))
+ return true;
+ with_elem->get_owner()->add_unrestricted(unrestricted);
+
+
+ /* Check conditions 3-4 for restricted specification*/
+ if ((with_sum_func && !with_elem->is_anchor(this)) ||
+ (with_elem->contains_sq_with_recursive_reference()))
+ with_elem->get_owner()->add_unrestricted(
+ with_elem->get_mutually_recursive());
+
+ /* Report an error on unrestricted specification if this is required */
+ if (only_standard_compliant && with_elem->is_unrestricted())
+ {
+ my_error(ER_NOT_STANDARD_COMPLIANT_RECURSIVE,
+ MYF(0), with_elem->query_name->str);
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ @brief
+ Check if a select from the spec of this with element is partially restricted
+
+ @param
+ sel select from the specification of this element where to check
+ whether conditions 1-2 are satisfied
+ unrestricted IN/OUT bitmap where to mark unrestricted specs
+ encountered IN/OUT bitmap where to mark encountered recursive references
+ @details
+ This method checks whether the conditions 1-2 (see the comment above)
+ are satisfied for the select sel.
+ This method is called recursively for derived tables.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool With_element::check_unrestricted_recursive(st_select_lex *sel,
+ table_map &unrestricted,
+ table_map &encountered)
+{
+ /* Check conditions 1 for restricted specification*/
+ List_iterator<TABLE_LIST> ti(sel->leaf_tables);
+ TABLE_LIST *tbl;
+ while ((tbl= ti++))
+ {
+ st_select_lex_unit *unit= tbl->get_unit();
+ if (unit)
+ {
+ if(!tbl->is_with_table())
+ {
+ if (check_unrestricted_recursive(unit->first_select(),
+ unrestricted,
+ encountered))
+ return true;
+ }
+ if (!(tbl->is_recursive_with_table() && unit->with_element->owner == owner))
+ continue;
+ With_element *with_elem= unit->with_element;
+ if (encountered & with_elem->get_elem_map())
+ unrestricted|= with_elem->mutually_recursive;
+ else if (with_elem ==this)
+ encountered|= with_elem->get_elem_map();
+ }
+ }
+ for (With_element *with_elem= owner->with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (!with_elem->is_recursive && (unrestricted & with_elem->get_elem_map()))
+ continue;
+ if (encountered & with_elem->get_elem_map())
+ {
+ uint cnt= 0;
+ table_map encountered_mr= encountered & with_elem->mutually_recursive;
+ for (table_map map= encountered_mr >> with_elem->number;
+ map != 0;
+ map>>= 1)
+ {
+ if (map & 1)
+ {
+ if (cnt)
+ {
+ unrestricted|= with_elem->mutually_recursive;
+ break;
+ }
+ else
+ cnt++;
+ }
+ }
+ }
+ }
+
+
+ /* Check conditions 2 for restricted specification*/
+ ti.rewind();
+ while ((tbl= ti++))
+ {
+ if (!tbl->is_with_table_recursive_reference())
+ continue;
+ for (TABLE_LIST *tab= tbl; tab; tab= tab->embedding)
+ {
+ if (tab->outer_join & (JOIN_TYPE_LEFT | JOIN_TYPE_RIGHT))
+ {
+ unrestricted|= mutually_recursive;
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Check subqueries with recursive table references from FROM list of this select
+
+ @details
+ For each recursive table reference from the FROM list of this select
+ this method checks:
+ - whether this reference is within a materialized derived table and
+ if so it report an error
+ - whether this reference is within a subquery and if so it set a flag
+ in this subquery that disallows some optimization strategies for
+ this subquery.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool st_select_lex::check_subqueries_with_recursive_references()
+{
+ st_select_lex_unit *sl_master= master_unit();
+ List_iterator<TABLE_LIST> ti(leaf_tables);
+ TABLE_LIST *tbl;
+ while ((tbl= ti++))
+ {
+ if (!(tbl->is_with_table_recursive_reference() && sl_master->item))
+ continue;
+ With_element *with_elem= tbl->with;
+ bool check_embedding_materialized_derived= true;
+ for (st_select_lex *sl= this; sl; sl= sl_master->outer_select())
+ {
+ sl_master= sl->master_unit();
+ if (with_elem->get_owner() == sl_master->with_clause)
+ check_embedding_materialized_derived= false;
+ if (check_embedding_materialized_derived && !sl_master->with_element &&
+ sl_master->derived && sl_master->derived->is_materialized_derived())
+ {
+ my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED,
+ MYF(0), with_elem->query_name->str);
+ return true;
+ }
+ if (!sl_master->item)
+ continue;
+ Item_subselect *subq= (Item_subselect *) sl_master->item;
+ subq->with_recursive_reference= true;
+ subq->register_as_with_rec_ref(tbl->with);
+ }
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Print this with clause
+
+ @param str Where to print to
+ @param query_type The mode of printing
+
+ @details
+ The method prints a string representation of this clause in the
+ string str. The parameter query_type specifies the mode of printing.
+*/
+
+void With_clause::print(String *str, enum_query_type query_type)
+{
+ /*
+ Any with clause contains just definitions of CTE tables.
+ No data expansion is applied to these definitions.
+ */
+ query_type= (enum_query_type) (query_type | QT_NO_DATA_EXPANSION);
+
+ str->append(STRING_WITH_LEN("with "));
+ if (with_recursive)
+ str->append(STRING_WITH_LEN("recursive "));
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ if (with_elem != with_list.first)
+ str->append(", ");
+ with_elem->print(str, query_type);
+ }
+}
+
+
+/**
+ @brief
+ Print this with element
+
+ @param str Where to print to
+ @param query_type The mode of printing
+
+ @details
+ The method prints a string representation of this with element in the
+ string str. The parameter query_type specifies the mode of printing.
+*/
+
+void With_element::print(String *str, enum_query_type query_type)
+{
+ str->append(query_name);
+ str->append(STRING_WITH_LEN(" as "));
+ str->append('(');
+ spec->print(str, query_type);
+ str->append(')');
+}
+
+
+bool With_element::instantiate_tmp_tables()
+{
+ List_iterator_fast<TABLE> li(rec_result->rec_tables);
+ TABLE *rec_table;
+ while ((rec_table= li++))
+ {
+ if (!rec_table->is_created() &&
+ instantiate_tmp_table(rec_table,
+ rec_table->s->key_info,
+ rec_result->tmp_table_param.start_recinfo,
+ &rec_result->tmp_table_param.recinfo,
+ 0))
+ return true;
+
+ rec_table->file->extra(HA_EXTRA_WRITE_CACHE);
+ rec_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ }
+ return false;
+}
+
diff --git a/sql/sql_cte.h b/sql/sql_cte.h
new file mode 100644
index 00000000000..58f371d936b
--- /dev/null
+++ b/sql/sql_cte.h
@@ -0,0 +1,450 @@
+#ifndef SQL_CTE_INCLUDED
+#define SQL_CTE_INCLUDED
+#include "sql_list.h"
+#include "sql_lex.h"
+#include "sql_select.h"
+
+class select_union;
+struct st_unit_ctxt_elem;
+
+
+/**
+ @class With_element
+ @brief Definition of a CTE table
+
+ It contains a reference to the name of the table introduced by this with element,
+ and a reference to the unit that specificies this table. Also it contains
+ a reference to the with clause to which this element belongs to.
+*/
+
+class With_element : public Sql_alloc
+{
+private:
+ With_clause *owner; // with clause this object belongs to
+ With_element *next; // next element in the with clause
+ uint number; // number of the element in the with clause (starting from 0)
+ table_map elem_map; // The map where with only one 1 set in this->number
+ /*
+ The map base_dep_map has 1 in the i-th position if the query that
+ specifies this with element contains a reference to the with element number i
+ in the query FROM list.
+ (In this case this with element depends directly on the i-th with element.)
+ */
+ table_map base_dep_map;
+ /*
+ The map derived_dep_map has 1 in i-th position if this with element depends
+ directly or indirectly from the i-th with element.
+ */
+ table_map derived_dep_map;
+ /*
+ The map sq_dep_map has 1 in i-th position if there is a reference to this
+ with element somewhere in subqueries of the specifications of the tables
+ defined in the with clause containing this element;
+ */
+ table_map sq_dep_map;
+ table_map work_dep_map; // dependency map used for work
+ /* Dependency map of with elements mutually recursive with this with element */
+ table_map mutually_recursive;
+ /*
+ Dependency map built only for the top level references i.e. for those that
+ are encountered in from lists of the selects of the specification unit
+ */
+ table_map top_level_dep_map;
+ /*
+ Points to a recursive reference in subqueries.
+ Used only for specifications without recursive references on the top level.
+ */
+ TABLE_LIST *sq_rec_ref;
+ /*
+ The next with element from the circular chain of the with elements
+ mutually recursive with this with element.
+ (If This element is simply recursive than next_mutually_recursive contains
+ the pointer to itself. If it's not recursive than next_mutually_recursive
+ is set to NULL.)
+ */
+ With_element *next_mutually_recursive;
+ /*
+ Total number of references to this element in the FROM lists of
+ the queries that are in the scope of the element (including
+ subqueries and specifications of other with elements).
+ */
+ uint references;
+ /*
+ Unparsed specification of the query that specifies this element.
+ It used to build clones of the specification if they are needed.
+ */
+ LEX_STRING unparsed_spec;
+ /* Offset of the specification in the input string */
+ uint unparsed_spec_offset;
+
+ /* True if the with element is used a prepared statement */
+ bool stmt_prepare_mode;
+
+ /* Return the map where 1 is set only in the position for this element */
+ table_map get_elem_map() { return (table_map) 1 << number; }
+
+public:
+ /*
+ The name of the table introduced by this with elememt. The name
+ can be used in FROM lists of the queries in the scope of the element.
+ */
+ LEX_STRING *query_name;
+ /*
+ Optional list of column names to name the columns of the table introduced
+ by this with element. It is used in the case when the names are not
+ inherited from the query that specified the table. Otherwise the list is
+ always empty.
+ */
+ List <LEX_STRING> column_list;
+ /* The query that specifies the table introduced by this with element */
+ st_select_lex_unit *spec;
+ /*
+ Set to true is recursion is used (directly or indirectly)
+ for the definition of this element
+ */
+ bool is_recursive;
+ /*
+ For a simple recursive CTE: the number of references to the CTE from
+ outside of the CTE specification.
+ For a CTE mutually recursive with other CTEs : the total number of
+ references to all these CTEs outside of their specification.
+ Each of these mutually recursive CTEs has the same value in this field.
+ */
+ uint rec_outer_references;
+ /*
+ Any non-recursive select in the specification of a recursive
+ with element is a called anchor. In the case mutually recursive
+ elements the specification of some them may be without any anchor.
+ Yet at least one of them must contain an anchor.
+ All anchors of any recursivespecification are moved ahead before
+ the prepare stage.
+ */
+ /* Set to true if this is a recursive element with an anchor */
+ bool with_anchor;
+ /*
+ Set to the first recursive select of the unit specifying the element
+ after all anchor have been moved to the head of the unit.
+ */
+ st_select_lex *first_recursive;
+
+ /*
+ The number of the last performed iteration for recursive table
+ (the number of the initial non-recursive step is 0, the number
+ of the first iteration is 1).
+ */
+ uint level;
+
+ /*
+ The pointer to the object used to materialize this with element
+ if it's recursive. This object is built at the end of prepare
+ stage and is used at the execution stage.
+ */
+ select_union_recursive *rec_result;
+
+ /* List of Item_subselects containing recursive references to this CTE */
+ SQL_I_List<Item_subselect> sq_with_rec_ref;
+
+ With_element(LEX_STRING *name,
+ List <LEX_STRING> list,
+ st_select_lex_unit *unit)
+ : next(NULL), base_dep_map(0), derived_dep_map(0),
+ sq_dep_map(0), work_dep_map(0), mutually_recursive(0),
+ top_level_dep_map(0), sq_rec_ref(NULL),
+ next_mutually_recursive(NULL), references(0),
+ query_name(name), column_list(list), spec(unit),
+ is_recursive(false), rec_outer_references(0), with_anchor(false),
+ level(0), rec_result(NULL)
+ { unit->with_element= this; }
+
+ bool check_dependencies_in_spec();
+
+ void check_dependencies_in_select(st_select_lex *sl, st_unit_ctxt_elem *ctxt,
+ bool in_subq, table_map *dep_map);
+
+ void check_dependencies_in_unit(st_select_lex_unit *unit,
+ st_unit_ctxt_elem *ctxt,
+ bool in_subq,
+ table_map *dep_map);
+
+ void check_dependencies_in_with_clause(With_clause *with_clause,
+ st_unit_ctxt_elem *ctxt,
+ bool in_subq,
+ table_map *dep_map);
+
+ void set_dependency_on(With_element *with_elem)
+ { base_dep_map|= with_elem->get_elem_map(); }
+
+ bool check_dependency_on(With_element *with_elem)
+ { return base_dep_map & with_elem->get_elem_map(); }
+
+ TABLE_LIST *find_first_sq_rec_ref_in_select(st_select_lex *sel);
+
+ bool set_unparsed_spec(THD *thd, char *spec_start, char *spec_end,
+ uint spec_offset);
+
+ st_select_lex_unit *clone_parsed_spec(THD *thd, TABLE_LIST *with_table);
+
+ bool is_referenced() { return references != 0; }
+
+ void inc_references() { references++; }
+
+ bool rename_columns_of_derived_unit(THD *thd, st_select_lex_unit *unit);
+
+ bool prepare_unreferenced(THD *thd);
+
+ bool check_unrestricted_recursive(st_select_lex *sel,
+ table_map &unrestricted,
+ table_map &encountered);
+
+ void print(String *str, enum_query_type query_type);
+
+ With_clause *get_owner() { return owner; }
+
+ bool contains_sq_with_recursive_reference()
+ { return sq_dep_map & mutually_recursive; }
+
+ bool no_rec_ref_on_top_level()
+ { return !(top_level_dep_map & mutually_recursive); }
+
+ table_map get_mutually_recursive() { return mutually_recursive; }
+
+ With_element *get_next_mutually_recursive()
+ { return next_mutually_recursive; }
+
+ TABLE_LIST *get_sq_rec_ref() { return sq_rec_ref; }
+
+ bool is_anchor(st_select_lex *sel);
+
+ void move_anchors_ahead();
+
+ bool is_unrestricted();
+
+ bool is_with_prepared_anchor();
+
+ void mark_as_with_prepared_anchor();
+
+ bool is_cleaned();
+
+ void mark_as_cleaned();
+
+ void reset_recursive_for_exec();
+
+ void cleanup_stabilized();
+
+ void set_as_stabilized();
+
+ bool is_stabilized();
+
+ bool all_are_stabilized();
+
+ bool instantiate_tmp_tables();
+
+ void prepare_for_next_iteration();
+
+ friend class With_clause;
+};
+
+const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8;
+
+/**
+ @class With_clause
+ @brief Set of with_elements
+
+ It has a reference to the first with element from this with clause.
+ This reference allows to navigate through all the elements of the with clause.
+ It contains a reference to the unit to which this with clause is attached.
+ It also contains a flag saying whether this with clause was specified as recursive.
+*/
+
+class With_clause : public Sql_alloc
+{
+private:
+ st_select_lex_unit *owner; // the unit this with clause attached to
+
+ /* The list of all with elements from this with clause */
+ SQL_I_List<With_element> with_list;
+ /*
+ The with clause immediately containing this with clause if there is any,
+ otherwise NULL. Now used only at parsing.
+ */
+ With_clause *embedding_with_clause;
+ /*
+ The next with the clause of the chain of with clauses encountered
+ in the current statement
+ */
+ With_clause *next_with_clause;
+ /* Set to true if dependencies between with elements have been checked */
+ bool dependencies_are_checked;
+
+ /*
+ The bitmap of all recursive with elements whose specifications
+ are not complied with restrictions imposed by the SQL standards
+ on recursive specifications.
+ */
+ table_map unrestricted;
+ /*
+ The bitmap of all recursive with elements whose anchors
+ has been already prepared.
+ */
+ table_map with_prepared_anchor;
+ table_map cleaned;
+ /*
+ The bitmap of all recursive with elements that
+ has been already materialized
+ */
+ table_map stabilized;
+
+public:
+ /* If true the specifier RECURSIVE is present in the with clause */
+ bool with_recursive;
+
+ With_clause(bool recursive_fl, With_clause *emb_with_clause)
+ : owner(NULL),
+ embedding_with_clause(emb_with_clause), next_with_clause(NULL),
+ dependencies_are_checked(false), unrestricted(0),
+ with_prepared_anchor(0), cleaned(0), stabilized(0),
+ with_recursive(recursive_fl)
+ { }
+
+ bool add_with_element(With_element *elem);
+
+ /* Add this with clause to the list of with clauses used in the statement */
+ void add_to_list(With_clause ** &last_next)
+ {
+ *last_next= this;
+ last_next= &this->next_with_clause;
+ }
+
+ void set_owner(st_select_lex_unit *unit) { owner= unit; }
+
+ With_clause *pop() { return embedding_with_clause; }
+
+ bool check_dependencies();
+
+ bool check_anchors();
+
+ void move_anchors_ahead();
+
+ With_element *find_table_def(TABLE_LIST *table, With_element *barrier);
+
+ With_element *find_table_def_in_with_clauses(TABLE_LIST *table);
+
+ bool prepare_unreferenced_elements(THD *thd);
+
+ void add_unrestricted(table_map map) { unrestricted|= map; }
+
+ void print(String *str, enum_query_type query_type);
+
+ friend class With_element;
+
+ friend
+ bool
+ check_dependencies_in_with_clauses(With_clause *with_clauses_list);
+};
+
+inline
+bool With_element::is_unrestricted()
+{
+ return owner->unrestricted & get_elem_map();
+}
+
+inline
+
+bool With_element::is_with_prepared_anchor()
+{
+ return owner->with_prepared_anchor & get_elem_map();
+}
+
+inline
+void With_element::mark_as_with_prepared_anchor()
+{
+ owner->with_prepared_anchor|= mutually_recursive;
+}
+
+
+inline
+bool With_element::is_cleaned()
+{
+ return owner->cleaned & get_elem_map();
+}
+
+
+inline
+void With_element::mark_as_cleaned()
+{
+ owner->cleaned|= get_elem_map();
+}
+
+
+inline
+void With_element::reset_recursive_for_exec()
+{
+ DBUG_ASSERT(is_recursive);
+ level= 0;
+ owner->with_prepared_anchor&= ~mutually_recursive;
+ owner->cleaned&= ~get_elem_map();
+ cleanup_stabilized();
+ spec->columns_are_renamed= false;
+}
+
+
+
+inline
+void With_element::cleanup_stabilized()
+{
+ owner->stabilized&= ~mutually_recursive;
+}
+
+
+inline
+void With_element::set_as_stabilized()
+{
+ owner->stabilized|= get_elem_map();
+}
+
+
+inline
+bool With_element::is_stabilized()
+{
+ return owner->stabilized & get_elem_map();
+}
+
+
+inline
+bool With_element::all_are_stabilized()
+{
+ return (owner->stabilized & mutually_recursive) == mutually_recursive;
+}
+
+
+inline
+void With_element::prepare_for_next_iteration()
+{
+ With_element *with_elem= this;
+ while ((with_elem= with_elem->get_next_mutually_recursive()) != this)
+ {
+ TABLE *rec_table= with_elem->rec_result->first_rec_table_to_update;
+ if (rec_table)
+ rec_table->reginfo.join_tab->preread_init_done= false;
+ }
+}
+
+
+inline
+void st_select_lex_unit::set_with_clause(With_clause *with_cl)
+{
+ with_clause= with_cl;
+ if (with_clause)
+ with_clause->set_owner(this);
+}
+
+
+inline
+void st_select_lex::set_with_clause(With_clause *with_clause)
+{
+ master_unit()->with_clause= with_clause;
+ if (with_clause)
+ with_clause->set_owner(master_unit());
+}
+
+#endif /* SQL_CTE_INCLUDED */
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 69781b5def3..dbe5dd0dce9 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -277,7 +277,7 @@ int Materialized_cursor::send_result_set_metadata(
{
Send_field send_field;
Item_ident *ident= static_cast<Item_ident *>(item_dst);
- item_org->make_field(&send_field);
+ item_org->make_field(thd, &send_field);
ident->db_name= thd->strdup(send_field.db_name);
ident->table_name= thd->strdup(send_field.table_name);
@@ -433,7 +433,7 @@ void Materialized_cursor::on_table_fill_finished()
bool Select_materialize::send_result_set_metadata(List<Item> &list, uint flags)
{
DBUG_ASSERT(table == 0);
- if (create_result_table(unit->thd, unit->get_unit_column_types(),
+ if (create_result_table(unit->thd, unit->get_column_types(true),
FALSE,
thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS,
"", FALSE, TRUE, TRUE))
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index d7ed82a2ef3..0e554e29380 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2014, Oracle and/or its affiliates.
- Copyright (c) 2009, 2015, MariaDB
+ Copyright (c) 2009, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -338,7 +338,7 @@ static void del_dbopt(const char *path)
static bool write_db_opt(THD *thd, const char *path,
Schema_specification_st *create)
{
- register File file;
+ File file;
char buf[256]; // Should be enough for one option
bool error=1;
@@ -902,7 +902,8 @@ mysql_rm_db_internal(THD *thd,char *db, bool if_exists, bool silent)
{
LEX_STRING db_name= { table->db, table->db_length };
LEX_STRING table_name= { table->table_name, table->table_name_length };
- if (table->open_type == OT_BASE_ONLY || !find_temporary_table(thd, table))
+ if (table->open_type == OT_BASE_ONLY ||
+ !thd->find_temporary_table(table))
(void) delete_statistics_for_table(thd, &db_name, &table_name);
}
}
@@ -1019,7 +1020,7 @@ update_binlog:
These DDL methods and logging are protected with the exclusive
metadata lock on the schema.
*/
- if (write_to_binlog(thd, query, query_pos -1 - query, db, db_len))
+ if (write_to_binlog(thd, query, (uint)(query_pos -1 - query), db, db_len))
{
error= true;
goto exit;
@@ -1037,7 +1038,7 @@ update_binlog:
These DDL methods and logging are protected with the exclusive
metadata lock on the schema.
*/
- if (write_to_binlog(thd, query, query_pos -1 - query, db, db_len))
+ if (write_to_binlog(thd, query, (uint)(query_pos -1 - query), db, db_len))
{
error= true;
goto exit;
@@ -1053,7 +1054,10 @@ exit:
it to 0.
*/
if (thd->db && cmp_db_names(thd->db, db) && !error)
+ {
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
+ SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL);
+ }
my_dirend(dirp);
DBUG_RETURN(error);
}
@@ -1103,8 +1107,12 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp,
table_list->table_name_length= table->length;
table_list->open_type= OT_BASE_ONLY;
- /* To be able to correctly look up the table in the table cache. */
- if (lower_case_table_names)
+ /*
+ On the case-insensitive file systems table is opened
+ with the lowercased file name. So we should lowercase
+ as well to look up the cache properly.
+ */
+ if (lower_case_file_system)
table_list->table_name_length= my_casedn_str(files_charset_info,
table_list->table_name);
@@ -1480,7 +1488,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
- DBUG_RETURN(FALSE);
+ goto done;
}
else
{
@@ -1497,8 +1505,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
mysql_change_db_impl(thd, &INFORMATION_SCHEMA_NAME, SELECT_ACL,
system_charset_info);
-
- DBUG_RETURN(FALSE);
+ goto done;
}
/*
@@ -1585,8 +1592,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
/* The operation succeed. */
-
- DBUG_RETURN(FALSE);
+ goto done;
}
else
{
@@ -1610,6 +1616,9 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
mysql_change_db_impl(thd, &new_db_file_name, db_access, db_default_cl);
+done:
+ SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL);
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
DBUG_RETURN(FALSE);
}
@@ -1799,7 +1808,7 @@ bool mysql_upgrade_db(THD *thd, LEX_STRING *old_db)
create trigger trg1 before insert on t2 for each row set @a:=1
rename database d1 to d2;
- TODO: Triggers, having the renamed database explicitely written
+ TODO: Triggers, having the renamed database explicitly written
in the table qualifiers.
1. when the same database is renamed:
create trigger d1.trg1 before insert on d1.t1 for each row set @a:=1;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 9f3caf9df4f..503e9b9dcbe 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2010, Oracle and/or its affiliates.
- Copyright (c) 2010, 2015, MariaDB
+ Copyright (c) 2010, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -39,6 +39,8 @@
#include "sql_statistics.h"
#include "transaction.h"
#include "records.h" // init_read_record,
+#include "filesort.h"
+#include "uniques.h"
#include "sql_derived.h" // mysql_handle_derived
// end_read_record
#include "sql_partition.h" // make_used_partitions_str
@@ -226,10 +228,12 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
int error, loc_error;
TABLE *table;
SQL_SELECT *select=0;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
bool using_limit=limit != HA_POS_ERROR;
bool transactional_table, safe_update, const_cond;
bool const_cond_result;
+ bool return_error= 0;
ha_rows deleted= 0;
bool reverse= FALSE;
ORDER *order= (ORDER *) ((order_list && order_list->elements) ?
@@ -258,7 +262,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
DBUG_RETURN(TRUE);
}
- if (!(table= table_list->table) || !table->created)
+ if (!(table= table_list->table) || !table->is_created())
{
my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
table_list->view_db.str, table_list->view_name.str);
@@ -404,7 +408,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
table->covering_keys.clear_all();
table->quick_keys.clear_all(); // Can't use 'only index'
- select=make_select(table, 0, 0, conds, 0, &error);
+ select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
if (error)
DBUG_RETURN(TRUE);
if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
@@ -488,62 +492,47 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (query_plan.using_filesort)
{
- ha_rows examined_rows;
- ha_rows found_rows;
- uint length= 0;
- SORT_FIELD *sortorder;
{
+ Filesort fsort(order, HA_POS_ERROR, true, select);
DBUG_ASSERT(query_plan.index == MAX_KEY);
- table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
+
Filesort_tracker *fs_tracker=
thd->lex->explain->get_upd_del_plan()->filesort_tracker;
- if (!(sortorder= make_unireg_sortorder(thd, NULL, 0, order, &length, NULL)) ||
- (table->sort.found_records= filesort(thd, table, sortorder, length,
- select, HA_POS_ERROR,
- true,
- &examined_rows, &found_rows,
- fs_tracker))
- == HA_POS_ERROR)
- {
- delete select;
- free_underlaid_joins(thd, &thd->lex->select_lex);
- DBUG_RETURN(TRUE);
- }
- thd->inc_examined_row_count(examined_rows);
+ if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
+ goto got_error;
+
+ thd->inc_examined_row_count(file_sort->examined_rows);
/*
Filesort has already found and selected the rows we want to delete,
so we don't need the where clause
*/
delete select;
- free_underlaid_joins(thd, select_lex);
+
+ /*
+ If we are not in DELETE ... RETURNING, we can free subqueries. (in
+ DELETE ... RETURNING we can't, because the RETURNING part may have
+ a subquery in it)
+ */
+ if (!with_select)
+ free_underlaid_joins(thd, select_lex);
select= 0;
}
}
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
- {
- delete select;
- free_underlaid_joins(thd, select_lex);
- DBUG_RETURN(TRUE);
- }
+ goto got_error;
if (query_plan.index == MAX_KEY || (select && select->quick))
- error= init_read_record(&info, thd, table, select, 1, 1, FALSE);
+ error= init_read_record(&info, thd, table, select, file_sort, 1, 1, FALSE);
else
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
reverse);
if (error)
- {
- delete select;
- free_underlaid_joins(thd, select_lex);
- DBUG_RETURN(TRUE);
- }
-
+ goto got_error;
+
init_ftfuncs(thd, select_lex, 1);
THD_STAGE_INFO(thd, stage_updating);
@@ -571,10 +560,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
! thd->is_error())
{
explain->tracker.on_record_read();
- if (table->vfield)
- update_virtual_fields(thd, table, VCOL_UPDATE_FOR_READ);
thd->inc_examined_row_count(1);
- // thd->is_error() is tested to disallow delete row on error
+ if (table->vfield)
+ (void) table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_DELETE);
if (!select || select->skip_record(thd) > 0)
{
explain->tracker.on_record_after_where();
@@ -697,8 +685,6 @@ cleanup:
}
DBUG_ASSERT(transactional_table || !deleted || thd->transaction.stmt.modified_non_trans_table);
-
- free_underlaid_joins(thd, select_lex);
if (error < 0 ||
(thd->lex->ignore && !thd->is_error() && !thd->is_fatal_error))
{
@@ -711,6 +697,8 @@ cleanup:
my_ok(thd, deleted);
DBUG_PRINT("info",("%ld records deleted",(long) deleted));
}
+ delete file_sort;
+ free_underlaid_joins(thd, select_lex);
DBUG_RETURN(error >= 0 || thd->is_error());
/* Special exits */
@@ -729,9 +717,16 @@ send_nothing_and_leave:
*/
delete select;
+ delete file_sort;
free_underlaid_joins(thd, select_lex);
//table->set_keyread(false);
- DBUG_RETURN((thd->is_error() || thd->killed) ? 1 : 0);
+
+ DBUG_ASSERT(!return_error || thd->is_error() || thd->killed);
+ DBUG_RETURN((return_error || thd->is_error() || thd->killed) ? 1 : 0);
+
+got_error:
+ return_error= 1;
+ goto send_nothing_and_leave;
}
@@ -745,7 +740,7 @@ send_nothing_and_leave:
wild_num - number of wildcards used in optional SELECT clause
field_list - list of items in optional SELECT clause
conds - conditions
-
+l
RETURN VALUE
FALSE OK
TRUE error
@@ -765,8 +760,10 @@ send_nothing_and_leave:
select_lex->leaf_tables, FALSE,
DELETE_ACL, SELECT_ACL, TRUE))
DBUG_RETURN(TRUE);
- if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num)) ||
- setup_fields(thd, NULL, field_list, MARK_COLUMNS_READ, NULL, NULL, 0) ||
+ if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num,
+ &select_lex->hidden_bit_fields)) ||
+ setup_fields(thd, Ref_ptr_array(),
+ field_list, MARK_COLUMNS_READ, NULL, NULL, 0) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
setup_ftfuncs(select_lex))
DBUG_RETURN(TRUE);
@@ -798,7 +795,7 @@ send_nothing_and_leave:
Delete multiple tables from join
***************************************************************************/
-#define MEM_STRIP_BUF_SIZE current_thd->variables.sortbuff_size
+#define MEM_STRIP_BUF_SIZE (size_t)(current_thd->variables.sortbuff_size)
extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b)
{
@@ -927,6 +924,15 @@ multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_RETURN(0);
}
+void multi_delete::prepare_to_read_rows()
+{
+ /* see multi_update::prepare_to_read_rows() */
+ for (TABLE_LIST *walk= delete_tables; walk; walk= walk->next_local)
+ {
+ TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update();
+ tbl->table->mark_columns_needed_for_delete();
+ }
+}
bool
multi_delete::initialize_tables(JOIN *join)
@@ -956,7 +962,6 @@ multi_delete::initialize_tables(JOIN *join)
}
}
-
walk= delete_tables;
for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS,
@@ -980,7 +985,6 @@ multi_delete::initialize_tables(JOIN *join)
normal_tables= 1;
tbl->prepare_triggers_for_delete_stmt_or_event();
tbl->prepare_for_position();
- tbl->mark_columns_needed_for_delete();
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
@@ -1184,7 +1188,8 @@ int multi_delete::do_deletes()
if (tempfiles[counter]->get(table))
DBUG_RETURN(1);
- local_error= do_table_deletes(table, thd->lex->ignore);
+ local_error= do_table_deletes(table, &tempfiles[counter]->sort,
+ thd->lex->ignore);
if (thd->killed && !local_error)
DBUG_RETURN(1);
@@ -1214,14 +1219,15 @@ int multi_delete::do_deletes()
@retval 1 Triggers or handler reported error.
@retval -1 End of file from handler.
*/
-int multi_delete::do_table_deletes(TABLE *table, bool ignore)
+int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
+ bool ignore)
{
int local_error= 0;
READ_RECORD info;
ha_rows last_deleted= deleted;
DBUG_ENTER("do_deletes_for_table");
- if (init_read_record(&info, thd, table, NULL, 0, 1, FALSE))
+ if (init_read_record(&info, thd, table, NULL, sort_info, 0, 1, FALSE))
DBUG_RETURN(1);
/*
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index c93aa132e37..500b0431bf9 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -30,6 +30,8 @@
#include "sql_base.h"
#include "sql_view.h" // check_duplicate_names
#include "sql_acl.h" // SELECT_ACL
+#include "sql_class.h"
+#include "sql_cte.h"
typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived);
@@ -357,7 +359,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
thd->save_prep_leaf_list= TRUE;
arena= thd->activate_stmt_arena_if_needed(&backup); // For easier test
- derived->merged= TRUE;
if (!derived->merged_for_insert ||
(derived->is_multitable() &&
@@ -421,6 +422,7 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
if (parent_lex->join)
parent_lex->join->table_count+= dt_select->join->table_count - 1;
}
+ derived->merged= TRUE;
if (derived->get_unit()->prepared)
{
Item *expr= derived->on_expr;
@@ -458,10 +460,8 @@ exit_merge:
unconditional_materialization:
derived->change_refs_to_fields();
derived->set_materialized_derived();
- if (!derived->table || !derived->table->created)
+ if (!derived->table || !derived->table->is_created())
res= mysql_derived_create(thd, lex, derived);
- if (!res)
- res= mysql_derived_fill(thd, lex, derived);
goto exit_merge;
}
@@ -554,7 +554,11 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived)
if (!unit || unit->prepared)
DBUG_RETURN(FALSE);
- DBUG_RETURN(derived->init_derived(thd, TRUE));
+ bool res= derived->init_derived(thd, TRUE);
+
+ derived->updatable= derived->updatable && derived->is_view();
+
+ DBUG_RETURN(res);
}
@@ -614,6 +618,7 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived)
true Error
*/
+
bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
{
SELECT_LEX_UNIT *unit= derived->get_unit();
@@ -623,24 +628,87 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
(derived->alias ? derived->alias : "<NULL>"),
unit));
+ if (!unit)
+ DBUG_RETURN(FALSE);
+
+ SELECT_LEX *first_select= unit->first_select();
+
+ if (derived->is_recursive_with_table() &&
+ !derived->is_with_table_recursive_reference() &&
+ !derived->with->rec_result && derived->with->get_sq_rec_ref())
+ {
+ /*
+ This is a non-recursive reference to a recursive CTE whose
+ specification unit has not been prepared at the regular processing of
+ derived table references. This can happen only in the case when
+ the specification unit has no recursive references at the top level.
+ Force the preparation of the specification unit. Use a recursive
+ table reference from a subquery for this.
+ */
+ DBUG_ASSERT(derived->with->get_sq_rec_ref());
+ if (mysql_derived_prepare(lex->thd, lex, derived->with->get_sq_rec_ref()))
+ DBUG_RETURN(TRUE);
+ }
+
+ if (unit->prepared && derived->is_recursive_with_table() &&
+ !derived->table)
+ {
+ /*
+ Here 'derived' is either a non-recursive table reference to a recursive
+ with table or a recursive table reference to a recursvive table whose
+ specification has been already prepared (a secondary recursive table
+ reference.
+ */
+ if (!(derived->derived_result= new (thd->mem_root) select_union(thd)))
+ DBUG_RETURN(TRUE); // out of memory
+ thd->create_tmp_table_for_derived= TRUE;
+ res= derived->derived_result->create_result_table(
+ thd, &unit->types, FALSE,
+ (first_select->options |
+ thd->variables.option_bits |
+ TMP_TABLE_ALL_COLUMNS),
+ derived->alias, FALSE, FALSE);
+ thd->create_tmp_table_for_derived= FALSE;
+
+ if (!res && !derived->table)
+ {
+ derived->derived_result->set_unit(unit);
+ derived->table= derived->derived_result->table;
+ if (derived->is_with_table_recursive_reference())
+ {
+ /* Here 'derived" is a secondary recursive table reference */
+ unit->with_element->rec_result->rec_tables.push_back(derived->table);
+ }
+ }
+ DBUG_ASSERT(derived->table || res);
+ goto exit;
+ }
+
// Skip already prepared views/DT
- if (!unit || unit->prepared ||
+ if (unit->prepared ||
(derived->merged_for_insert &&
!(derived->is_multitable() &&
(thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
thd->lex->sql_command == SQLCOM_DELETE_MULTI))))
DBUG_RETURN(FALSE);
- SELECT_LEX *first_select= unit->first_select();
-
/* prevent name resolving out of derived table */
for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select())
{
sl->context.outer_context= 0;
- // Prepare underlying views/DT first.
- if ((res= sl->handle_derived(lex, DT_PREPARE)))
- goto exit;
-
+ if (!derived->is_with_table_recursive_reference() ||
+ (!derived->with->with_anchor &&
+ !derived->with->is_with_prepared_anchor()))
+ {
+ /*
+ Prepare underlying views/DT first unless 'derived' is a recursive
+ table reference and either the anchors from the specification of
+ 'derived' has been already prepared or there no anchor in this
+ specification
+ */
+ if ((res= sl->handle_derived(lex, DT_PREPARE)))
+ goto exit;
+ }
if (derived->outer_join && sl->first_cond_optimization)
{
/* Mark that table is part of OUTER JOIN and fields may be NULL */
@@ -672,8 +740,11 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
// st_select_lex_unit::prepare correctly work for single select
if ((res= unit->prepare(thd, derived->derived_result, 0)))
goto exit;
+ if (derived->with &&
+ (res= derived->with->rename_columns_of_derived_unit(thd, unit)))
+ goto exit;
lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
- if ((res= check_duplicate_names(unit->types, 0)))
+ if ((res= check_duplicate_names(thd, unit->types, 0)))
goto exit;
/*
@@ -695,19 +766,21 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
SELECT is last SELECT of UNION).
*/
thd->create_tmp_table_for_derived= TRUE;
- if (derived->derived_result->create_result_table(thd, &unit->types, FALSE,
- (first_select->options |
- thd->variables.option_bits |
- TMP_TABLE_ALL_COLUMNS),
- derived->alias,
- FALSE, FALSE))
+ if (!(derived->table) &&
+ derived->derived_result->create_result_table(thd, &unit->types, FALSE,
+ (first_select->options |
+ thd->variables.option_bits |
+ TMP_TABLE_ALL_COLUMNS),
+ derived->alias,
+ FALSE, FALSE, FALSE))
{
thd->create_tmp_table_for_derived= FALSE;
goto exit;
}
thd->create_tmp_table_for_derived= FALSE;
- derived->table= derived->derived_result->table;
+ if (!derived->table)
+ derived->table= derived->derived_result->table;
DBUG_ASSERT(derived->table);
if (derived->is_derived() && derived->is_merged_derived())
first_select->mark_as_belong_to_derived(derived);
@@ -734,9 +807,12 @@ exit:
*/
if (res)
{
- if (derived->table)
- free_tmp_table(thd, derived->table);
- delete derived->derived_result;
+ if (!derived->is_with_table_recursive_reference())
+ {
+ if (derived->table)
+ free_tmp_table(thd, derived->table);
+ delete derived->derived_result;
+ }
}
else
{
@@ -744,18 +820,22 @@ exit:
table->derived_select_number= first_select->select_number;
table->s->tmp_table= INTERNAL_TMP_TABLE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- if (derived->referencing_view)
+ if (derived->is_view())
table->grant= derived->grant;
else
{
+ DBUG_ASSERT(derived->is_derived());
+ DBUG_ASSERT(derived->is_anonymous_derived_table());
table->grant.privilege= SELECT_ACL;
- if (derived->is_derived())
- derived->grant.privilege= SELECT_ACL;
+ derived->grant.privilege= SELECT_ACL;
}
#endif
/* Add new temporary table to list of open derived tables */
- table->next= thd->derived_tables;
- thd->derived_tables= table;
+ if (!derived->is_with_table_recursive_reference())
+ {
+ table->next= thd->derived_tables;
+ thd->derived_tables= table;
+ }
/* If table is used by a left join, mark that any column may be null */
if (derived->outer_join)
@@ -868,9 +948,9 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
TABLE *table= derived->table;
SELECT_LEX_UNIT *unit= derived->get_unit();
- if (table->created)
+ if (table->is_created())
DBUG_RETURN(FALSE);
- select_union *result= (select_union*)unit->result;
+ select_union *result= derived->derived_result;
if (table->s->db_type() == TMP_ENGINE_HTON)
{
result->tmp_table_param.keyinfo= table->s->key_info;
@@ -889,6 +969,48 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
}
+/**
+ @brief
+ Fill the recursive with table
+
+ @param thd The thread handle
+
+ @details
+ The method is called only for recursive with tables.
+ The method executes the recursive part of the specification
+ of this with table until no more rows are added to the table
+ or the number of the performed iteration reaches the allowed
+ maximum.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool TABLE_LIST::fill_recursive(THD *thd)
+{
+ bool rc= false;
+ st_select_lex_unit *unit= get_unit();
+ rc= with->instantiate_tmp_tables();
+ while (!rc && !with->all_are_stabilized())
+ {
+ if (with->level > thd->variables.max_recursive_iterations)
+ break;
+ with->prepare_for_next_iteration();
+ rc= unit->exec_recursive();
+ }
+ if (!rc)
+ {
+ TABLE *src= with->rec_result->table;
+ rc =src->insert_all_rows_into_tmp_table(thd,
+ table,
+ &with->rec_result->tmp_table_param,
+ true);
+ }
+ return rc;
+}
+
+
/*
Execute subquery of a materialized derived table/view and fill the result
table.
@@ -899,9 +1021,10 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
@details
Execute subquery of given 'derived' table/view and fill the result
- table. After result table is filled, if this is not the EXPLAIN statement,
- the entire unit / node is deleted. unit is deleted if UNION is used
- for derived table and node is deleted is it is a simple SELECT.
+ table. After result table is filled, if this is not the EXPLAIN statement
+ and the table is not specified with a recursion the entire unit / node
+ is deleted. unit is deleted if UNION is used for derived table and node
+ is deleted is it is a simple SELECT.
'lex' is unused and 'thd' is passed as an argument to an underlying function.
@note
@@ -912,36 +1035,55 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
@return TRUE Error
*/
+
bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
{
Field_iterator_table field_iterator;
SELECT_LEX_UNIT *unit= derived->get_unit();
+ bool derived_is_recursive= derived->is_recursive_with_table();
bool res= FALSE;
DBUG_ENTER("mysql_derived_fill");
DBUG_PRINT("enter", ("Alias: '%s' Unit: %p",
(derived->alias ? derived->alias : "<NULL>"),
derived->get_unit()));
- if (unit->executed && !unit->uncacheable && !unit->describe)
+ if (unit->executed && !unit->uncacheable && !unit->describe &&
+ !derived_is_recursive)
DBUG_RETURN(FALSE);
/*check that table creation passed without problems. */
- DBUG_ASSERT(derived->table && derived->table->created);
- SELECT_LEX *first_select= unit->first_select();
+ DBUG_ASSERT(derived->table && derived->table->is_created());
select_union *derived_result= derived->derived_result;
SELECT_LEX *save_current_select= lex->current_select;
- if (unit->is_union())
+ bool derived_recursive_is_filled= false;
+
+ if (derived_is_recursive)
+ {
+ if (derived->is_with_table_recursive_reference())
+ {
+ /* Here only one iteration step is performed */
+ res= unit->exec_recursive();
+ }
+ else
+ {
+ /* In this case all iteration are performed */
+ res= derived->fill_recursive(thd);
+ derived_recursive_is_filled= true;
+ }
+ }
+ else if (unit->is_union())
{
// execute union without clean up
res= unit->exec();
}
else
{
+ SELECT_LEX *first_select= unit->first_select();
unit->set_limit(unit->global_parameters());
if (unit->select_limit_cnt == HA_POS_ERROR)
first_select->options&= ~OPTION_FOUND_ROWS;
lex->current_select= first_select;
- res= mysql_select(thd, &first_select->ref_pointer_array,
+ res= mysql_select(thd,
first_select->table_list.first,
first_select->with_wild,
first_select->item_list, first_select->where,
@@ -955,7 +1097,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
derived_result, unit, first_select);
}
- if (!res)
+ if (!res && !derived_is_recursive)
{
if (derived_result->flush())
res= TRUE;
@@ -981,7 +1123,9 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
}
}
- if (res || !lex->describe)
+ if (res || (!lex->describe &&
+ (!derived_is_recursive ||
+ derived_recursive_is_filled)))
unit->cleanup();
lex->current_select= save_current_select;
@@ -1019,6 +1163,174 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived)
unit->types.empty();
/* for derived tables & PS (which can't be reset by Item_subselect) */
unit->reinit_exec_mechanism();
+ for (st_select_lex *sl= unit->first_select(); sl; sl= sl->next_select())
+ {
+ sl->cond_pushed_into_where= NULL;
+ sl->cond_pushed_into_having= NULL;
+ }
unit->set_thd(thd);
DBUG_RETURN(FALSE);
}
+
+
+/**
+ @brief
+ Extract the condition depended on derived table/view and pushed it there
+
+ @param thd The thread handle
+ @param cond The condition from which to extract the pushed condition
+ @param derived The reference to the derived table/view
+
+ @details
+ This functiom builds the most restrictive condition depending only on
+ the derived table/view that can be extracted from the condition cond.
+ The built condition is pushed into the having clauses of the
+ selects contained in the query specifying the derived table/view.
+ The function also checks for each select whether any condition depending
+ only on grouping fields can be extracted from the pushed condition.
+ If so, it pushes the condition over grouping fields into the where
+ clause of the select.
+
+ @retval
+ true if an error is reported
+ false otherwise
+*/
+
+bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived)
+{
+ DBUG_ENTER("pushdown_cond_for_derived");
+ if (!cond)
+ DBUG_RETURN(false);
+
+ st_select_lex_unit *unit= derived->get_unit();
+ st_select_lex *sl= unit->first_select();
+
+ if (derived->prohibit_cond_pushdown)
+ DBUG_RETURN(false);
+
+ /* Do not push conditions into constant derived */
+ if (unit->executed)
+ DBUG_RETURN(false);
+
+ /* Do not push conditions into recursive with tables */
+ if (derived->is_recursive_with_table())
+ DBUG_RETURN(false);
+
+ /* Do not push conditions into unit with global ORDER BY ... LIMIT */
+ if (unit->fake_select_lex && unit->fake_select_lex->explicit_limit)
+ DBUG_RETURN(false);
+
+ /* Check whether any select of 'unit' allows condition pushdown */
+ bool some_select_allows_cond_pushdown= false;
+ for (; sl; sl= sl->next_select())
+ {
+ if (sl->cond_pushdown_is_allowed())
+ {
+ some_select_allows_cond_pushdown= true;
+ break;
+ }
+ }
+ if (!some_select_allows_cond_pushdown)
+ DBUG_RETURN(false);
+
+ /*
+ Build the most restrictive condition extractable from 'cond'
+ that can be pushed into the derived table 'derived'.
+ All subexpressions of this condition are cloned from the
+ subexpressions of 'cond'.
+ This condition has to be fixed yet.
+ */
+ Item *extracted_cond;
+ derived->check_pushable_cond_for_table(cond);
+ extracted_cond= derived->build_pushable_cond_for_table(thd, cond);
+ if (!extracted_cond)
+ {
+ /* Nothing can be pushed into the derived table */
+ DBUG_RETURN(false);
+ }
+ /* Push extracted_cond into every select of the unit specifying 'derived' */
+ st_select_lex *save_curr_select= thd->lex->current_select;
+ for (; sl; sl= sl->next_select())
+ {
+ if (!sl->cond_pushdown_is_allowed())
+ continue;
+ thd->lex->current_select= sl;
+ /*
+ For each select of the unit except the last one
+ create a clone of extracted_cond
+ */
+ Item *extracted_cond_copy= !sl->next_select() ? extracted_cond :
+ extracted_cond->build_clone(thd, thd->mem_root);
+ if (!extracted_cond_copy)
+ continue;
+
+ if (!sl->join->group_list && !sl->with_sum_func)
+ {
+ /* extracted_cond_copy is pushed into where of sl */
+ extracted_cond_copy= extracted_cond_copy->transform(thd,
+ &Item::derived_field_transformer_for_where,
+ (uchar*) sl);
+ if (extracted_cond_copy)
+ {
+ extracted_cond_copy->walk(
+ &Item::cleanup_excluding_const_fields_processor, 0, 0);
+ sl->cond_pushed_into_where= extracted_cond_copy;
+ }
+
+ continue;
+ }
+
+ /*
+ Figure out what can be extracted from the pushed condition
+ that could be pushed into the where clause of sl
+ */
+ Item *cond_over_grouping_fields;
+ sl->collect_grouping_fields(thd);
+ sl->check_cond_extraction_for_grouping_fields(extracted_cond_copy,
+ derived);
+ cond_over_grouping_fields=
+ sl->build_cond_for_grouping_fields(thd, extracted_cond_copy, true);
+
+ /*
+ Transform the references to the 'derived' columns from the condition
+ pushed into the where clause of sl to make them usable in the new context
+ */
+ if (cond_over_grouping_fields)
+ cond_over_grouping_fields= cond_over_grouping_fields->transform(thd,
+ &Item::derived_grouping_field_transformer_for_where,
+ (uchar*) sl);
+
+ if (cond_over_grouping_fields)
+ {
+ /*
+ In extracted_cond_copy remove top conjuncts that
+ has been pushed into the where clause of sl
+ */
+ extracted_cond_copy= remove_pushed_top_conjuncts(thd, extracted_cond_copy);
+
+ cond_over_grouping_fields->walk(
+ &Item::cleanup_excluding_const_fields_processor, 0, 0);
+ sl->cond_pushed_into_where= cond_over_grouping_fields;
+
+ if (!extracted_cond_copy)
+ continue;
+ }
+
+ /*
+ Transform the references to the 'derived' columns from the condition
+ pushed into the having clause of sl to make them usable in the new context
+ */
+ extracted_cond_copy= extracted_cond_copy->transform(thd,
+ &Item::derived_field_transformer_for_having,
+ (uchar*) sl);
+ if (!extracted_cond_copy)
+ continue;
+
+ extracted_cond_copy->walk(&Item::cleanup_excluding_const_fields_processor,
+ 0, 0);
+ sl->cond_pushed_into_having= extracted_cond_copy;
+ }
+ thd->lex->current_select= save_curr_select;
+ DBUG_RETURN(false);
+}
+
diff --git a/sql/sql_derived.h b/sql/sql_derived.h
index 301ae31b016..621a6e9ec24 100644
--- a/sql/sql_derived.h
+++ b/sql/sql_derived.h
@@ -36,4 +36,8 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived);
*/
bool mysql_derived_cleanup(THD *thd, LEX *lex, TABLE_LIST *derived);
+Item *delete_not_needed_parts(THD *thd, Item *cond);
+
+bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived);
+
#endif /* SQL_DERIVED_INCLUDED */
diff --git a/sql/sql_digest.cc b/sql/sql_digest.cc
index 6605d0af0a9..b7ee3c4ffea 100644
--- a/sql/sql_digest.cc
+++ b/sql/sql_digest.cc
@@ -20,7 +20,7 @@
#include "my_global.h"
#include "my_md5.h"
-#include "mysqld_error.h"
+#include "unireg.h"
#include "sql_string.h"
#include "sql_class.h"
diff --git a/sql/sql_do.cc b/sql/sql_do.cc
index 54850494ad0..da7dfe0c137 100644
--- a/sql/sql_do.cc
+++ b/sql/sql_do.cc
@@ -29,7 +29,7 @@ bool mysql_do(THD *thd, List<Item> &values)
List_iterator<Item> li(values);
Item *value;
DBUG_ENTER("mysql_do");
- if (setup_fields(thd, 0, values, MARK_COLUMNS_NONE, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_NONE, 0, NULL, 0))
DBUG_RETURN(TRUE);
while ((value = li++))
(void) value->is_null();
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index b72d642efbc..d14c7b83b77 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -320,7 +320,7 @@ Sql_condition::set_sqlstate(const char* sqlstate)
}
Diagnostics_area::Diagnostics_area(bool initialize)
- : m_main_wi(0, false, initialize)
+ : is_bulk_execution(0), m_main_wi(0, false, initialize)
{
push_warning_info(&m_main_wi);
@@ -330,7 +330,8 @@ Diagnostics_area::Diagnostics_area(bool initialize)
Diagnostics_area::Diagnostics_area(ulonglong warning_info_id,
bool allow_unlimited_warnings,
bool initialize)
- : m_main_wi(warning_info_id, allow_unlimited_warnings, initialize)
+ : is_bulk_execution(0),
+ m_main_wi(warning_info_id, allow_unlimited_warnings, initialize)
{
push_warning_info(&m_main_wi);
@@ -347,6 +348,7 @@ void
Diagnostics_area::reset_diagnostics_area()
{
DBUG_ENTER("reset_diagnostics_area");
+ m_skip_flush= FALSE;
#ifdef DBUG_OFF
m_can_overwrite_status= FALSE;
/** Don't take chances in production */
@@ -375,22 +377,33 @@ Diagnostics_area::set_ok_status(ulonglong affected_rows,
const char *message)
{
DBUG_ENTER("set_ok_status");
- DBUG_ASSERT(! is_set());
+ DBUG_ASSERT(!is_set() || (m_status == DA_OK_BULK && is_bulk_op()));
/*
In production, refuse to overwrite an error or a custom response
with an OK packet.
*/
if (is_error() || is_disabled())
return;
-
- m_statement_warn_count= current_statement_warn_count();
- m_affected_rows= affected_rows;
+ /*
+ When running a bulk operation, m_status will be DA_OK for the first
+ operation and set to DA_OK_BULK for all following operations.
+ */
+ if (m_status == DA_OK_BULK)
+ {
+ m_statement_warn_count+= current_statement_warn_count();
+ m_affected_rows+= affected_rows;
+ }
+ else
+ {
+ m_statement_warn_count= current_statement_warn_count();
+ m_affected_rows= affected_rows;
+ m_status= (is_bulk_op() ? DA_OK_BULK : DA_OK);
+ }
m_last_insert_id= last_insert_id;
if (message)
strmake_buf(m_message, message);
else
m_message[0]= '\0';
- m_status= DA_OK;
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_error.h b/sql/sql_error.h
index 0134f938c75..3ac06657323 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -51,7 +52,7 @@ public:
Convert a bitmask consisting of MYSQL_TIME_{NOTE|WARN}_XXX bits
to WARN_LEVEL_XXX
*/
- static enum_warning_level time_warn_level(int warnings)
+ static enum_warning_level time_warn_level(uint warnings)
{
return MYSQL_TIME_WARN_HAVE_WARNINGS(warnings) ?
WARN_LEVEL_WARN : WARN_LEVEL_NOTE;
@@ -571,7 +572,10 @@ public:
ErrConvString(const String *s)
: ErrConv(), str(s->ptr()), len(s->length()), cs(s->charset()) {}
const char *ptr() const
- { return err_conv(err_buffer, sizeof(err_buffer), str, len, cs); }
+ {
+ DBUG_ASSERT(len < UINT_MAX32);
+ return err_conv(err_buffer, (uint) sizeof(err_buffer), str, (uint) len, cs);
+ }
};
class ErrConvInteger : public ErrConv, public Longlong_hybrid
@@ -657,6 +661,8 @@ public:
DA_OK,
/** Set whenever one calls my_eof(). */
DA_EOF,
+ /** Set whenever one calls my_ok() in PS bulk mode. */
+ DA_OK_BULK,
/** Set whenever one calls my_error() or my_message(). */
DA_ERROR,
/** Set in case of a custom response, such as one from COM_STMT_PREPARE. */
@@ -698,10 +704,24 @@ public:
bool is_disabled() const { return m_status == DA_DISABLED; }
+ void set_bulk_execution(bool bulk) { is_bulk_execution= bulk; }
+
+ bool is_bulk_op() const { return is_bulk_execution; }
+
enum_diagnostics_status status() const { return m_status; }
const char *message() const
- { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; }
+ { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK ||
+ m_status == DA_OK_BULK); return m_message; }
+
+ bool skip_flush() const
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_OK_BULK);
+ return m_skip_flush;
+ }
+
+ void set_skip_flush()
+ { m_skip_flush= TRUE; }
uint sql_errno() const
{ DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; }
@@ -710,14 +730,21 @@ public:
{ DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; }
ulonglong affected_rows() const
- { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; }
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_OK_BULK);
+ return m_affected_rows;
+ }
ulonglong last_insert_id() const
- { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; }
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_OK_BULK);
+ return m_last_insert_id;
+ }
uint statement_warn_count() const
{
- DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_OK_BULK ||
+ m_status == DA_EOF);
return m_statement_warn_count;
}
@@ -856,6 +883,9 @@ private:
/** Set to make set_error_status after set_{ok,eof}_status possible. */
bool m_can_overwrite_status;
+ /** Skip flushing network buffer after writing OK (for COM_MULTI) */
+ bool m_skip_flush;
+
/** Message buffer. Can be used by OK or ERROR status. */
char m_message[MYSQL_ERRMSG_SIZE];
@@ -897,6 +927,8 @@ private:
enum_diagnostics_status m_status;
+ my_bool is_bulk_execution;
+
Warning_info m_main_wi;
Warning_info_list m_wi_stack;
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index a1d6764d8e4..fe51f8e1d64 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -30,6 +30,7 @@ const char * STR_IMPOSSIBLE_WHERE= "Impossible WHERE";
const char * STR_NO_ROWS_AFTER_PRUNING= "No matching rows after partition pruning";
static void write_item(Json_writer *writer, Item *item);
+static void append_item_to_str(String *out, Item *item);
Explain_query::Explain_query(THD *thd_arg, MEM_ROOT *root) :
mem_root(root), upd_del_plan(NULL), insert_plan(NULL),
@@ -549,7 +550,12 @@ void Explain_union::print_explain_json(Explain_query *query,
bool started_object= print_explain_json_cache(writer, is_analyze);
writer->add_member("query_block").start_object();
- writer->add_member("union_result").start_object();
+
+ if (is_recursive_cte)
+ writer->add_member("recursive_union").start_object();
+ else
+ writer->add_member("union_result").start_object();
+
// using_temporary_table
make_union_table_name(table_name_buffer);
writer->add_member("table_name").add_str(table_name_buffer);
@@ -700,13 +706,6 @@ bool Explain_node::print_explain_json_cache(Json_writer *writer,
}
-void Explain_select::replace_table(uint idx, Explain_table_access *new_tab)
-{
- delete join_tabs[idx];
- join_tabs[idx]= new_tab;
-}
-
-
Explain_basic_join::~Explain_basic_join()
{
if (join_tabs)
@@ -757,35 +756,23 @@ int Explain_select::print_explain(Explain_query *query,
}
else
{
- bool using_tmp;
- bool using_fs;
+ bool using_tmp= false;
+ bool using_fs= false;
- if (is_analyze)
+ for (Explain_aggr_node *node= aggr_tree; node; node= node->child)
{
- /*
- Get the data about "Using temporary; Using filesort" from execution
- tracking system.
- */
- using_tmp= false;
- using_fs= false;
- Sort_and_group_tracker::Iterator iter(&ops_tracker);
- enum_qep_action action;
- Filesort_tracker *dummy;
-
- while ((action= iter.get_next(&dummy)) != EXPL_ACTION_EOF)
+ switch (node->get_type())
{
- if (action == EXPL_ACTION_FILESORT)
- using_fs= true;
- else if (action == EXPL_ACTION_TEMPTABLE)
+ case AGGR_OP_TEMP_TABLE:
using_tmp= true;
+ break;
+ case AGGR_OP_FILESORT:
+ using_fs= true;
+ break;
+ default:
+ break;
}
}
- else
- {
- /* Use imprecise "estimates" we got with the query plan */
- using_tmp= using_temporary;
- using_fs= using_filesort;
- }
for (uint i=0; i< n_join_tabs; i++)
{
@@ -865,6 +852,12 @@ void Explain_select::print_explain_json(Explain_query *query,
writer->add_member("const_condition");
write_item(writer, exec_const_cond);
}
+ if (outer_ref_cond)
+ {
+ writer->add_member("outer_ref_condition");
+ write_item(writer, outer_ref_cond);
+ }
+
/* we do not print HAVING which always evaluates to TRUE */
if (having || (having_value == Item::COND_FALSE))
{
@@ -879,88 +872,40 @@ void Explain_select::print_explain_json(Explain_query *query,
}
}
- Filesort_tracker *first_table_sort= NULL;
- bool first_table_sort_used= false;
int started_objects= 0;
+
+ Explain_aggr_node *node= aggr_tree;
- if (is_analyze)
+ for (; node; node= node->child)
{
- /* ANALYZE has collected this part of query plan independently */
- if (ops_tracker.had_varied_executions())
+ switch (node->get_type())
{
- writer->add_member("varied-sort-and-tmp").start_object();
- started_objects++;
- }
- else
- {
- Sort_and_group_tracker::Iterator iter(&ops_tracker);
- enum_qep_action action;
- Filesort_tracker *fs_tracker= NULL;
-
- while ((action= iter.get_next(&fs_tracker)) != EXPL_ACTION_EOF)
+ case AGGR_OP_TEMP_TABLE:
+ writer->add_member("temporary_table").start_object();
+ break;
+ case AGGR_OP_FILESORT:
{
- if (action == EXPL_ACTION_FILESORT)
- {
- if (iter.is_last_element())
- {
- first_table_sort= fs_tracker;
- break;
- }
- writer->add_member("filesort").start_object();
- started_objects++;
- fs_tracker->print_json_members(writer);
- }
- else if (action == EXPL_ACTION_TEMPTABLE)
- {
- writer->add_member("temporary_table").start_object();
- started_objects++;
- /*
- if (tmp == EXPL_TMP_TABLE_BUFFER)
- func= "buffer";
- else if (tmp == EXPL_TMP_TABLE_GROUP)
- func= "group-by";
- else
- func= "distinct";
- writer->add_member("function").add_str(func);
- */
- }
- else if (action == EXPL_ACTION_REMOVE_DUPS)
- {
- writer->add_member("duplicate_removal").start_object();
- started_objects++;
- }
- else
- DBUG_ASSERT(0);
+ writer->add_member("filesort").start_object();
+ ((Explain_aggr_filesort*)node)->print_json_members(writer, is_analyze);
+ break;
}
- }
-
- if (first_table_sort)
- first_table_sort_used= true;
- }
- else
- {
- /* This is just EXPLAIN. Try to produce something meaningful */
- if (using_temporary)
- {
- started_objects= 1;
- if (using_filesort)
+ case AGGR_OP_REMOVE_DUPLICATES:
+ writer->add_member("duplicate_removal").start_object();
+ break;
+ case AGGR_OP_WINDOW_FUNCS:
{
- started_objects++;
- writer->add_member("filesort").start_object();
+ //TODO: make print_json_members virtual?
+ writer->add_member("window_functions_computation").start_object();
+ ((Explain_aggr_window_funcs*)node)->print_json_members(writer, is_analyze);
+ break;
}
- writer->add_member("temporary_table").start_object();
- writer->add_member("function").add_str("buffer");
- }
- else
- {
- if (using_filesort)
- first_table_sort_used= true;
+ default:
+ DBUG_ASSERT(0);
}
+ started_objects++;
}
- Explain_basic_join::print_explain_json_interns(query, writer, is_analyze,
- first_table_sort,
- first_table_sort_used);
+ Explain_basic_join::print_explain_json_interns(query, writer, is_analyze);
for (;started_objects; started_objects--)
writer->end_object();
@@ -973,6 +918,70 @@ void Explain_select::print_explain_json(Explain_query *query,
}
+Explain_aggr_filesort::Explain_aggr_filesort(MEM_ROOT *mem_root,
+ bool is_analyze,
+ Filesort *filesort)
+ : tracker(is_analyze)
+{
+ child= NULL;
+ for (ORDER *ord= filesort->order; ord; ord= ord->next)
+ {
+ sort_items.push_back(ord->item[0], mem_root);
+ sort_directions.push_back(&ord->direction, mem_root);
+ }
+ filesort->tracker= &tracker;
+}
+
+
+void Explain_aggr_filesort::print_json_members(Json_writer *writer,
+ bool is_analyze)
+{
+ char item_buf[256];
+ String str(item_buf, sizeof(item_buf), &my_charset_bin);
+ str.length(0);
+
+ List_iterator_fast<Item> it(sort_items);
+ List_iterator_fast<ORDER::enum_order> it_dir(sort_directions);
+ Item* item;
+ ORDER::enum_order *direction;
+ bool first= true;
+ while ((item= it++))
+ {
+ direction= it_dir++;
+ if (first)
+ first= false;
+ else
+ {
+ str.append(", ");
+ }
+ append_item_to_str(&str, item);
+ if (*direction == ORDER::ORDER_DESC)
+ str.append(" desc");
+ }
+
+ writer->add_member("sort_key").add_str(str.c_ptr_safe());
+
+ if (is_analyze)
+ tracker.print_json_members(writer);
+}
+
+
+void Explain_aggr_window_funcs::print_json_members(Json_writer *writer,
+ bool is_analyze)
+{
+ Explain_aggr_filesort *srt;
+ List_iterator<Explain_aggr_filesort> it(sorts);
+ writer->add_member("sorts").start_object();
+ while ((srt= it++))
+ {
+ writer->add_member("filesort").start_object();
+ srt->print_json_members(writer, is_analyze);
+ writer->end_object(); // filesort
+ }
+ writer->end_object(); // sorts
+}
+
+
void Explain_basic_join::print_explain_json(Explain_query *query,
Json_writer *writer,
bool is_analyze)
@@ -980,7 +989,7 @@ void Explain_basic_join::print_explain_json(Explain_query *query,
writer->add_member("query_block").start_object();
writer->add_member("select_id").add_ll(select_id);
- print_explain_json_interns(query, writer, is_analyze, NULL, false);
+ print_explain_json_interns(query, writer, is_analyze);
writer->end_object();
}
@@ -989,9 +998,7 @@ void Explain_basic_join::print_explain_json(Explain_query *query,
void Explain_basic_join::
print_explain_json_interns(Explain_query *query,
Json_writer *writer,
- bool is_analyze,
- Filesort_tracker *first_table_sort,
- bool first_table_sort_used)
+ bool is_analyze)
{
Json_writer_nesting_guard guard(writer);
for (uint i=0; i< n_join_tabs; i++)
@@ -999,12 +1006,7 @@ print_explain_json_interns(Explain_query *query,
if (join_tabs[i]->start_dups_weedout)
writer->add_member("duplicates_removal").start_object();
- join_tabs[i]->print_explain_json(query, writer, is_analyze,
- first_table_sort,
- first_table_sort_used);
-
- first_table_sort= NULL;
- first_table_sort_used= false;
+ join_tabs[i]->print_explain_json(query, writer, is_analyze);
if (join_tabs[i]->end_dups_weedout)
writer->end_object();
@@ -1296,7 +1298,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
extra_buf.append(STRING_WITH_LEN("Using temporary"));
}
- if (using_filesort)
+ if (using_filesort || this->pre_join_sort)
{
if (first)
first= false;
@@ -1356,6 +1358,15 @@ static void write_item(Json_writer *writer, Item *item)
writer->add_str(str.c_ptr_safe());
}
+static void append_item_to_str(String *out, Item *item)
+{
+ THD *thd= current_thd;
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+
+ item->print(out, QT_EXPLAIN);
+ thd->variables.option_bits= save_option_bits;
+}
void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_tag tag)
{
@@ -1483,25 +1494,14 @@ void add_json_keyset(Json_writer *writer, const char *elem_name,
print_json_array(writer, elem_name, *keyset);
}
-/*
- @param fs_tracker Normally NULL. When not NULL, it means that the join tab
- used filesort to pre-sort the data. Then, sorted data
- was read and the rest of the join was executed.
-
- @note
- EXPLAIN command will check whether fs_tracker is present, but it can't use
- any value from fs_tracker (these are only valid for ANALYZE).
-*/
void Explain_table_access::print_explain_json(Explain_query *query,
Json_writer *writer,
- bool is_analyze,
- Filesort_tracker *fs_tracker,
- bool first_table_sort_used)
+ bool is_analyze)
{
Json_writer_nesting_guard guard(writer);
- if (first_table_sort_used)
+ if (pre_join_sort)
{
/* filesort was invoked on this join tab before doing the join with the rest */
writer->add_member("read_sorted_file").start_object();
@@ -1528,8 +1528,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
}
}
writer->add_member("filesort").start_object();
- if (is_analyze)
- fs_tracker->print_json_members(writer);
+ pre_join_sort->print_json_members(writer, is_analyze);
}
if (bka_type.is_using_jbuf())
@@ -1607,11 +1606,11 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (is_analyze)
{
writer->add_member("r_rows");
- if (fs_tracker)
+ if (pre_join_sort)
{
/* Get r_rows value from filesort */
- if (fs_tracker->get_r_loops())
- writer->add_double(fs_tracker->get_avg_examined_rows());
+ if (pre_join_sort->tracker.get_r_loops())
+ writer->add_double(pre_join_sort->tracker.get_avg_examined_rows());
else
writer->add_null();
}
@@ -1638,11 +1637,11 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (is_analyze)
{
writer->add_member("r_filtered");
- if (fs_tracker)
+ if (pre_join_sort)
{
/* Get r_filtered value from filesort */
- if (fs_tracker->get_r_loops())
- writer->add_double(fs_tracker->get_r_filtered()*100);
+ if (pre_join_sort->tracker.get_r_loops())
+ writer->add_double(pre_join_sort->tracker.get_r_filtered()*100);
else
writer->add_null();
}
@@ -1720,7 +1719,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
writer->end_object();
}
- if (first_table_sort_used)
+ if (pre_join_sort)
{
writer->end_object(); // filesort
writer->end_object(); // read_sorted_file
@@ -2461,4 +2460,3 @@ void Explain_range_checked_fer::print_json(Json_writer *writer,
writer->end_object();
}
}
-
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index caacf6b3a2f..489a23f7cda 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -176,9 +176,7 @@ public:
bool is_analyze);
void print_explain_json_interns(Explain_query *query, Json_writer *writer,
- bool is_analyze,
- Filesort_tracker *first_table_sort,
- bool first_table_sort_used);
+ bool is_analyze);
/* A flat array of Explain structs for tables. */
Explain_table_access** join_tabs;
@@ -186,6 +184,7 @@ public:
};
+class Explain_aggr_node;
/*
EXPLAIN structure for a SELECT.
@@ -215,15 +214,9 @@ public:
having(NULL), having_value(Item::COND_UNDEF),
using_temporary(false), using_filesort(false),
time_tracker(is_analyze),
- ops_tracker(is_analyze)
+ aggr_tree(NULL)
{}
- /*
- This is used to save the results of "late" test_if_skip_sort_order() calls
- that are made from JOIN::exec
- */
- void replace_table(uint idx, Explain_table_access *new_tab);
-
public:
#ifndef DBUG_OFF
SELECT_LEX *select_lex;
@@ -238,9 +231,10 @@ public:
/* Expensive constant condition */
Item *exec_const_cond;
+ Item *outer_ref_cond;
/* HAVING condition */
- COND *having;
+ Item *having;
Item::cond_result having_value;
/* Global join attributes. In tabular form, they are printed on the first row */
@@ -249,9 +243,13 @@ public:
/* ANALYZE members */
Time_and_counter_tracker time_tracker;
-
- Sort_and_group_tracker ops_tracker;
+ /*
+ Part of query plan describing sorting, temp.table usage, and duplicate
+ removal
+ */
+ Explain_aggr_node* aggr_tree;
+
int print_explain(Explain_query *query, select_result_sink *output,
uint8 explain_flags, bool is_analyze);
void print_explain_json(Explain_query *query, Json_writer *writer,
@@ -265,6 +263,65 @@ private:
Table_access_tracker using_temporary_read_tracker;
};
+/////////////////////////////////////////////////////////////////////////////
+// EXPLAIN structures for ORDER/GROUP operations.
+/////////////////////////////////////////////////////////////////////////////
+typedef enum
+{
+ AGGR_OP_TEMP_TABLE,
+ AGGR_OP_FILESORT,
+ //AGGR_OP_READ_SORTED_FILE, // need this?
+ AGGR_OP_REMOVE_DUPLICATES,
+ AGGR_OP_WINDOW_FUNCS
+ //AGGR_OP_JOIN // Need this?
+} enum_explain_aggr_node_type;
+
+
+class Explain_aggr_node : public Sql_alloc
+{
+public:
+ virtual enum_explain_aggr_node_type get_type()= 0;
+ virtual ~Explain_aggr_node() {}
+ Explain_aggr_node *child;
+};
+
+class Explain_aggr_filesort : public Explain_aggr_node
+{
+ List<Item> sort_items;
+ List<ORDER::enum_order> sort_directions;
+public:
+ enum_explain_aggr_node_type get_type() { return AGGR_OP_FILESORT; }
+ Filesort_tracker tracker;
+
+ Explain_aggr_filesort(MEM_ROOT *mem_root, bool is_analyze,
+ Filesort *filesort);
+
+ void print_json_members(Json_writer *writer, bool is_analyze);
+};
+
+class Explain_aggr_tmp_table : public Explain_aggr_node
+{
+public:
+ enum_explain_aggr_node_type get_type() { return AGGR_OP_TEMP_TABLE; }
+};
+
+class Explain_aggr_remove_dups : public Explain_aggr_node
+{
+public:
+ enum_explain_aggr_node_type get_type() { return AGGR_OP_REMOVE_DUPLICATES; }
+};
+
+class Explain_aggr_window_funcs : public Explain_aggr_node
+{
+ List<Explain_aggr_filesort> sorts;
+public:
+ enum_explain_aggr_node_type get_type() { return AGGR_OP_WINDOW_FUNCS; }
+
+ void print_json_members(Json_writer *writer, bool is_analyze);
+ friend class Window_funcs_computation;
+};
+
+/////////////////////////////////////////////////////////////////////////////
/*
Explain structure for a UNION.
@@ -277,6 +334,7 @@ class Explain_union : public Explain_node
public:
Explain_union(MEM_ROOT *root, bool is_analyze) :
Explain_node(root),
+ is_recursive_cte(false),
fake_select_lex_explain(root, is_analyze)
{}
@@ -312,6 +370,7 @@ public:
const char *fake_select_type;
bool using_filesort;
bool using_tmp;
+ bool is_recursive_cte;
/*
Explain data structure for "fake_select_lex" (i.e. for the degenerate
@@ -622,7 +681,8 @@ public:
where_cond(NULL),
cache_cond(NULL),
pushed_index_cond(NULL),
- sjm_nest(NULL)
+ sjm_nest(NULL),
+ pre_join_sort(NULL)
{}
~Explain_table_access() { delete sjm_nest; }
@@ -715,6 +775,12 @@ public:
Item *pushed_index_cond;
Explain_basic_join *sjm_nest;
+
+ /*
+ This describes a possible filesort() call that is done before doing the
+ join operation.
+ */
+ Explain_aggr_filesort *pre_join_sort;
/* ANALYZE members */
@@ -728,9 +794,7 @@ public:
uint select_id, const char *select_type,
bool using_temporary, bool using_filesort);
void print_explain_json(Explain_query *query, Json_writer *writer,
- bool is_analyze,
- Filesort_tracker *fs_tracker,
- bool first_table_sort_used);
+ bool is_analyze);
private:
void append_tag_name(String *str, enum explain_extra_tag tag);
diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc
index 1713cb04ebc..fcda2463e13 100644
--- a/sql/sql_get_diagnostics.cc
+++ b/sql/sql_get_diagnostics.cc
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */
#include "sql_list.h" // Sql_alloc, List, List_iterator
#include "sql_cmd.h" // Sql_cmd
diff --git a/sql/sql_get_diagnostics.h b/sql/sql_get_diagnostics.h
index f34820757f5..6f1652bb146 100644
--- a/sql/sql_get_diagnostics.h
+++ b/sql/sql_get_diagnostics.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */
#ifndef SQL_GET_DIAGNOSTICS_H
#define SQL_GET_DIAGNOSTICS_H
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index bc6119b9a9c..e908e703bdd 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2001, 2015, Oracle and/or its affiliates.
- Copyright (c) 2011, 2015, MariaDB
+ Copyright (c) 2011, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -31,7 +31,7 @@
then do { handler_items=concat(handler_items, free_list); free_list=0; }
But !!! do_command calls free_root at the end of every query and frees up
- all the sql_alloc'ed memory. It's harder to work around...
+ all the memory allocated on THD::mem_root. It's harder to work around...
*/
/*
@@ -179,9 +179,8 @@ static void mysql_ha_close_table(SQL_HANDLER *handler)
{
/* Must be a temporary table */
table->file->ha_index_or_rnd_end();
- table->query_id= thd->query_id;
table->open_by_handler= 0;
- mark_tmp_table_for_reuse(table);
+ thd->mark_tmp_table_as_free_for_reuse(table);
}
my_free(handler->lock);
handler->init();
@@ -285,7 +284,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
back-off for such locks.
*/
tables->mdl_request.init(MDL_key::TABLE, tables->db, tables->table_name,
- MDL_SHARED, MDL_TRANSACTION);
+ MDL_SHARED_READ, MDL_TRANSACTION);
mdl_savepoint= thd->mdl_context.mdl_savepoint();
/* for now HANDLER can be used only for real TABLES */
@@ -296,7 +295,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
open_ltable() or open_table() because we would like to be able
to open a temporary table.
*/
- error= (open_temporary_tables(thd, tables) ||
+ error= (thd->open_temporary_tables(tables) ||
open_tables(thd, &tables, &counter, 0));
if (error)
@@ -391,8 +390,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
/*
Assert that the above check prevents opening of views and merge tables.
For temporary tables, TABLE::next can be set even if only one table
- was opened for HANDLER as it is used to link them together
- (see thd->temporary_tables).
+ was opened for HANDLER as it is used to link them together.
*/
DBUG_ASSERT(sql_handler->table->next == NULL ||
sql_handler->table->s->tmp_table);
@@ -459,9 +457,10 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
DBUG_RETURN(TRUE);
}
- if ((handler= (SQL_HANDLER*) my_hash_search(&thd->handler_tables_hash,
- (uchar*) tables->alias,
- strlen(tables->alias) + 1)))
+ if ((my_hash_inited(&thd->handler_tables_hash)) &&
+ (handler= (SQL_HANDLER*) my_hash_search(&thd->handler_tables_hash,
+ (uchar*) tables->alias,
+ strlen(tables->alias) + 1)))
{
mysql_ha_close_table(handler);
my_hash_delete(&thd->handler_tables_hash, (uchar*) handler);
@@ -487,56 +486,6 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
/**
- A helper class to process an error from mysql_lock_tables().
- HANDLER READ statement's attempt to lock the subject table
- may get aborted if there is a pending DDL. In that case
- we close the table, reopen it, and try to read again.
- This is implicit and obscure, since HANDLER position
- is lost in the process, but it's the legacy server
- behaviour we should preserve.
-*/
-
-class Sql_handler_lock_error_handler: public Internal_error_handler
-{
-public:
- virtual
- bool handle_condition(THD *thd,
- uint sql_errno,
- const char *sqlstate,
- Sql_condition::enum_warning_level level,
- const char* msg,
- Sql_condition **cond_hdl);
-
- bool need_reopen() const { return m_need_reopen; };
- void init() { m_need_reopen= FALSE; };
-private:
- bool m_need_reopen;
-};
-
-
-/**
- Handle an error from mysql_lock_tables().
- Ignore ER_LOCK_ABORTED errors.
-*/
-
-bool
-Sql_handler_lock_error_handler::
-handle_condition(THD *thd,
- uint sql_errno,
- const char *sqlstate,
- Sql_condition::enum_warning_level level,
- const char* msg,
- Sql_condition **cond_hdl)
-{
- *cond_hdl= NULL;
- if (sql_errno == ER_LOCK_ABORTED)
- m_need_reopen= TRUE;
-
- return m_need_reopen;
-}
-
-
-/**
Finds an open HANDLER table.
@params name Name of handler to open
@@ -548,8 +497,10 @@ handle_condition(THD *thd,
SQL_HANDLER *mysql_ha_find_handler(THD *thd, const char *name)
{
SQL_HANDLER *handler;
- if ((handler= (SQL_HANDLER*) my_hash_search(&thd->handler_tables_hash,
- (uchar*) name, strlen(name) + 1)))
+ if ((my_hash_inited(&thd->handler_tables_hash)) &&
+ (handler= (SQL_HANDLER*) my_hash_search(&thd->handler_tables_hash,
+ (uchar*) name,
+ strlen(name) + 1)))
{
DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: %p",
handler->db.str,
@@ -732,7 +683,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
int error, keyno;
uint num_rows;
uchar *UNINIT_VAR(key);
- Sql_handler_lock_error_handler sql_handler_lock_error;
+ MDL_deadlock_and_lock_abort_error_handler sql_handler_lock_error;
DBUG_ENTER("mysql_ha_read");
DBUG_PRINT("enter",("'%s'.'%s' as '%s'",
tables->db, tables->table_name, tables->alias));
@@ -751,11 +702,12 @@ retry:
tables->table= table; // This is used by fix_fields
table->pos_in_table_list= tables;
- if (handler->lock->lock_count > 0)
+ if (handler->lock->table_count > 0)
{
int lock_error;
- handler->lock->locks[0]->type= handler->lock->locks[0]->org_type;
+ if (handler->lock->lock_count > 0)
+ handler->lock->locks[0]->type= handler->lock->locks[0]->org_type;
/* save open_tables state */
TABLE* backup_open_tables= thd->open_tables;
@@ -929,9 +881,6 @@ retry:
}
goto ok;
}
- /* Generate values for virtual fields */
- if (table->vfield)
- update_virtual_fields(thd, table);
if (cond && !cond->val_int())
{
if (thd->is_error())
@@ -1195,10 +1144,10 @@ void mysql_ha_set_explicit_lock_duration(THD *thd)
Remove temporary tables from the HANDLER's hash table. The reason
for having a separate function, rather than calling
mysql_ha_rm_tables() is that it is not always feasible (e.g. in
- close_temporary_tables) to obtain a TABLE_LIST containing the
+ THD::close_temporary_tables) to obtain a TABLE_LIST containing the
temporary tables.
- @See close_temporary_tables
+ @See THD::close_temporary_tables()
@param thd Thread identifier.
*/
void mysql_ha_rm_temporary_tables(THD *thd)
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index d71a6caf813..fc218cb18f2 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -194,7 +194,8 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
DBUG_ENTER("search_topics");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, topics, select, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, topics, select, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info))
@@ -229,14 +230,16 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
2 found more then one topic matching the mask
*/
-int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
+int search_keyword(THD *thd, TABLE *keywords,
+ struct st_find_field *find_fields,
SQL_SELECT *select, int *key_id)
{
int count= 0;
READ_RECORD read_record_info;
DBUG_ENTER("search_keyword");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, keywords, select, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, thd, keywords, select, NULL, 1, 0,
+ FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info) && count<2)
@@ -370,7 +373,8 @@ int search_categories(THD *thd, TABLE *categories,
DBUG_ENTER("search_categories");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, categories, select,1,0,FALSE))
+ if (init_read_record(&read_record_info, thd, categories, select, NULL,
+ 1, 0, FALSE))
DBUG_RETURN(0);
while (!read_record_info.read_record(&read_record_info))
{
@@ -406,7 +410,8 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
DBUG_ENTER("get_all_items_for_category");
/* Should never happen. As this is part of help, we can ignore this */
- if (init_read_record(&read_record_info, thd, items, select,1,0,FALSE))
+ if (init_read_record(&read_record_info, thd, items, select, NULL, 1, 0,
+ FALSE))
DBUG_VOID_RETURN;
while (!read_record_info.read_record(&read_record_info))
@@ -608,7 +613,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond,
/* Assume that no indexes cover all required fields */
table->covering_keys.clear_all();
- SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error);
+ SQL_SELECT *res= make_select(table, 0, 0, cond, 0, 0, error);
if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) ||
(res && res->quick && res->quick->reset()))
{
@@ -852,7 +857,7 @@ error2:
bool mysqld_help(THD *thd, const char *mask)
{
- ulonglong sql_mode_backup= thd->variables.sql_mode;
+ sql_mode_t sql_mode_backup= thd->variables.sql_mode;
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
bool rc= mysqld_help_internal(thd, mask);
thd->variables.sql_mode= sql_mode_backup;
diff --git a/sql/sql_hset.h b/sql/sql_hset.h
index dc3bd487ce5..4dfddf898f0 100644
--- a/sql/sql_hset.h
+++ b/sql/sql_hset.h
@@ -32,10 +32,12 @@ public:
Constructs an empty hash. Does not allocate memory, it is done upon
the first insert. Thus does not cause or return errors.
*/
- Hash_set(uchar *(*K)(const T *, size_t *, my_bool))
+ Hash_set(uchar *(*K)(const T *, size_t *, my_bool),
+ CHARSET_INFO *cs= &my_charset_bin)
{
my_hash_clear(&m_hash);
m_hash.get_key= (my_hash_get_key)K;
+ m_hash.charset= cs;
}
/**
Destroy the hash by freeing the buckets table. Does
@@ -56,7 +58,7 @@ public:
*/
bool insert(T *value)
{
- my_hash_init_opt(&m_hash, &my_charset_bin, START_SIZE, 0, 0,
+ my_hash_init_opt(&m_hash, m_hash.charset, START_SIZE, 0, 0,
m_hash.get_key, 0, MYF(0));
size_t key_len;
uchar *v= reinterpret_cast<uchar *>(value);
@@ -65,6 +67,10 @@ public:
return my_hash_insert(&m_hash, v);
return FALSE;
}
+ bool remove(T *value)
+ {
+ return my_hash_delete(&m_hash, reinterpret_cast<uchar*>(value));
+ }
T *find(const void *key, size_t klen) const
{
return (T*)my_hash_search(&m_hash, reinterpret_cast<const uchar *>(key), klen);
@@ -73,6 +79,10 @@ public:
bool is_empty() const { return m_hash.records == 0; }
/** Returns the number of unique elements. */
size_t size() const { return static_cast<size_t>(m_hash.records); }
+ const T* at(size_t i) const
+ {
+ return reinterpret_cast<T*>(my_hash_element(const_cast<HASH*>(&m_hash), i));
+ }
/** An iterator over hash elements. Is not insert-stable. */
class Iterator
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 1745c6b4aaa..c623336fdba 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -77,6 +77,8 @@
#include "transaction.h"
#include "sql_audit.h"
#include "sql_derived.h" // mysql_handle_derived
+#include "sql_prepare.h"
+#include <my_bit.h>
#include "debug_sync.h"
@@ -87,7 +89,7 @@ static int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
LEX_STRING query, bool ignore, bool log_on);
static void end_delayed_insert(THD *thd);
pthread_handler_t handle_delayed_insert(void *arg);
-static void unlink_blobs(register TABLE *table);
+static void unlink_blobs(TABLE *table);
#endif
static bool check_view_insertability(THD *thd, TABLE_LIST *view);
@@ -126,6 +128,14 @@ static bool check_view_single_update(List<Item> &fields, List<Item> *values,
while ((item= it++))
tables|= item->used_tables();
+ /*
+ Check that table is only one
+ (we can not rely on check_single_table because it skips some
+ types of tables)
+ */
+ if (my_count_bits(tables) > 1)
+ goto error;
+
if (values)
{
it.init(*values);
@@ -258,7 +268,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (table_list->is_view())
unfix_fields(fields);
- res= setup_fields(thd, 0, fields, MARK_COLUMNS_WRITE, 0, NULL, 0);
+ res= setup_fields(thd, Ref_ptr_array(),
+ fields, MARK_COLUMNS_WRITE, 0, NULL, 0);
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
@@ -282,12 +293,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dup_field->field_name);
DBUG_RETURN(-1);
}
- if (table->default_field)
- table->mark_default_fields_for_write();
}
- /* Mark virtual columns used in the insert statement */
- if (table->vfield)
- table->mark_virtual_columns_for_write(TRUE);
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
@@ -372,7 +378,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
}
/* Check the fields we are going to modify */
- if (setup_fields(thd, 0, update_fields, MARK_COLUMNS_WRITE, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(),
+ update_fields, MARK_COLUMNS_WRITE, 0, NULL, 0))
return -1;
if (insert_table_list->is_view() &&
@@ -384,7 +391,7 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
return -1;
if (table->default_field)
- table->mark_default_fields_for_write();
+ table->mark_default_fields_for_write(FALSE);
if (table->found_next_number_field)
{
@@ -691,6 +698,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
bool using_bulk_insert= 0;
uint value_count;
ulong counter = 1;
+ /* counter of iteration in bulk PS operation*/
+ ulonglong iteration= 0;
ulonglong id;
COPY_INFO info;
TABLE *table= 0;
@@ -755,6 +764,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
THD_STAGE_INFO(thd, stage_init);
thd->lex->used_tables=0;
values= its++;
+ if (bulk_parameters_set(thd))
+ DBUG_RETURN(TRUE);
value_count= values->elements;
if (mysql_prepare_insert(thd, table_list, table, fields, values,
@@ -794,7 +805,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto abort;
}
- if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(),
+ *values, MARK_COLUMNS_READ, 0, NULL, 0))
goto abort;
switch_to_nullable_trigger_fields(*values, table);
}
@@ -823,6 +835,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
info.update_fields= &update_fields;
info.update_values= &update_values;
info.view= (table_list->view ? table_list : 0);
+ info.table_list= table_list;
/*
Count warnings for all inserts.
@@ -886,6 +899,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->abort_on_warning= !ignore && thd->is_strict_mode();
+ table->reset_default_fields();
table->prepare_triggers_for_insert_stmt_or_event();
table->mark_columns_needed_for_insert();
@@ -907,7 +921,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table_list->prepare_check_option(thd))
error= 1;
- table->reset_default_fields();
switch_to_nullable_trigger_fields(fields, table);
switch_to_nullable_trigger_fields(update_fields, table);
switch_to_nullable_trigger_fields(update_values, table);
@@ -925,122 +938,130 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
goto values_loop_end;
}
}
-
- while ((values= its++))
+ do
{
- if (fields.elements || !value_count)
+ DBUG_PRINT("info", ("iteration %llu", iteration));
+ if (iteration && bulk_parameters_set(thd))
+ goto abort;
+
+ while ((values= its++))
{
- /*
- There are possibly some default values:
- INSERT INTO t1 (fields) VALUES ...
- INSERT INTO t1 VALUES ()
- */
- restore_record(table,s->default_values); // Get empty record
- if (fill_record_n_invoke_before_triggers(thd, table, fields, *values, 0,
- TRG_EVENT_INSERT))
+ if (fields.elements || !value_count)
{
- if (values_list.elements != 1 && ! thd->is_error())
- {
- info.records++;
- continue;
- }
- /*
- TODO: set thd->abort_on_warning if values_list.elements == 1
- and check that all items return warning in case of problem with
- storing field.
+ /*
+ There are possibly some default values:
+ INSERT INTO t1 (fields) VALUES ...
+ INSERT INTO t1 VALUES ()
*/
- error=1;
- break;
+ restore_record(table,s->default_values); // Get empty record
+ table->reset_default_fields();
+ if (fill_record_n_invoke_before_triggers(thd, table, fields, *values, 0,
+ TRG_EVENT_INSERT))
+ {
+ if (values_list.elements != 1 && ! thd->is_error())
+ {
+ info.records++;
+ continue;
+ }
+ /*
+ TODO: set thd->abort_on_warning if values_list.elements == 1
+ and check that all items return warning in case of problem with
+ storing field.
+ */
+ error=1;
+ break;
+ }
}
- }
- else
- {
- /*
- No field list, all fields are set explicitly:
- INSERT INTO t1 VALUES (values)
- */
- if (thd->lex->used_tables) // Column used in values()
- restore_record(table,s->default_values); // Get empty record
else
{
- TABLE_SHARE *share= table->s;
-
/*
- Fix delete marker. No need to restore rest of record since it will
- be overwritten by fill_record() anyway (and fill_record() does not
- use default values in this case).
+ No field list, all fields are set explicitly:
+ INSERT INTO t1 VALUES (values)
*/
- table->record[0][0]= share->default_values[0];
+ if (thd->lex->used_tables) // Column used in values()
+ restore_record(table,s->default_values); // Get empty record
+ else
+ {
+ TABLE_SHARE *share= table->s;
- /* Fix undefined null_bits. */
- if (share->null_bytes > 1 && share->last_null_bit_pos)
+ /*
+ Fix delete marker. No need to restore rest of record since it will
+ be overwritten by fill_record() anyway (and fill_record() does not
+ use default values in this case).
+ */
+ table->record[0][0]= share->default_values[0];
+
+ /* Fix undefined null_bits. */
+ if (share->null_bytes > 1 && share->last_null_bit_pos)
+ {
+ table->record[0][share->null_bytes - 1]=
+ share->default_values[share->null_bytes - 1];
+ }
+ }
+ table->reset_default_fields();
+ if (fill_record_n_invoke_before_triggers(thd, table,
+ table->field_to_fill(),
+ *values, 0, TRG_EVENT_INSERT))
{
- table->record[0][share->null_bytes - 1]=
- share->default_values[share->null_bytes - 1];
+ if (values_list.elements != 1 && ! thd->is_error())
+ {
+ info.records++;
+ continue;
+ }
+ error=1;
+ break;
}
}
- if (fill_record_n_invoke_before_triggers(thd, table, table->field_to_fill(),
- *values, 0, TRG_EVENT_INSERT))
- {
- if (values_list.elements != 1 && ! thd->is_error())
- {
- info.records++;
- continue;
- }
- error=1;
- break;
- }
- }
- if (table->default_field && table->update_default_fields())
- {
- error= 1;
- break;
- }
- /*
- with triggers a field can get a value *conditionally*, so we have to repeat
- has_no_default_value() check for every row
- */
- if (table->triggers &&
- table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
- {
- for (Field **f=table->field ; *f ; f++)
+
+ /*
+ with triggers a field can get a value *conditionally*, so we have to repeat
+ has_no_default_value() check for every row
+ */
+ if (table->triggers &&
+ table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
{
- if (!((*f)->flags & HAS_EXPLICIT_VALUE) && has_no_default_value(thd, *f, table_list))
+ for (Field **f=table->field ; *f ; f++)
{
- error= 1;
- goto values_loop_end;
+ if (!(*f)->has_explicit_value() &&
+ has_no_default_value(thd, *f, table_list))
+ {
+ error= 1;
+ goto values_loop_end;
+ }
}
- (*f)->flags &= ~HAS_EXPLICIT_VALUE;
}
- }
- if ((res= table_list->view_check_option(thd,
- (values_list.elements == 1 ?
- 0 :
- ignore))) ==
- VIEW_CHECK_SKIP)
- continue;
- else if (res == VIEW_CHECK_ERROR)
- {
- error= 1;
- break;
- }
+ if ((res= table_list->view_check_option(thd,
+ (values_list.elements == 1 ?
+ 0 :
+ ignore))) ==
+ VIEW_CHECK_SKIP)
+ continue;
+ else if (res == VIEW_CHECK_ERROR)
+ {
+ error= 1;
+ break;
+ }
+
#ifndef EMBEDDED_LIBRARY
- if (lock_type == TL_WRITE_DELAYED)
- {
- LEX_STRING const st_query = { query, thd->query_length() };
- DEBUG_SYNC(thd, "before_write_delayed");
- error=write_delayed(thd, table, duplic, st_query, ignore, log_on);
- DEBUG_SYNC(thd, "after_write_delayed");
- query=0;
- }
- else
+ if (lock_type == TL_WRITE_DELAYED)
+ {
+ LEX_STRING const st_query = { query, thd->query_length() };
+ DEBUG_SYNC(thd, "before_write_delayed");
+ error=write_delayed(thd, table, duplic, st_query, ignore, log_on);
+ DEBUG_SYNC(thd, "after_write_delayed");
+ query=0;
+ }
+ else
#endif
- error=write_record(thd, table ,&info);
- if (error)
- break;
- thd->get_stmt_da()->inc_current_row_for_warning();
- }
+ error=write_record(thd, table ,&info);
+ if (error)
+ break;
+ thd->get_stmt_da()->inc_current_row_for_warning();
+ }
+ its.rewind();
+ iteration++;
+ } while (bulk_parameters_iterations(thd));
values_loop_end:
free_underlaid_joins(thd, &thd->lex->select_lex);
@@ -1187,7 +1208,7 @@ values_loop_end:
retval= thd->lex->explain->send_explain(thd);
goto abort;
}
- if (values_list.elements == 1 && (!(thd->variables.option_bits & OPTION_WARNINGS) ||
+ if ((iteration * values_list.elements) == 1 && (!(thd->variables.option_bits & OPTION_WARNINGS) ||
!thd->cuted_fields))
{
my_ok(thd, info.copied + info.deleted +
@@ -1451,8 +1472,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
bool res= 0;
table_map map= 0;
DBUG_ENTER("mysql_prepare_insert");
- DBUG_PRINT("enter", ("table_list: 0x%lx table: 0x%lx view: %d",
- (ulong)table_list, (ulong)table,
+ DBUG_PRINT("enter", ("table_list: %p table: %p view: %d",
+ table_list, table,
(int)insert_into_view));
/* INSERT should have a SELECT or VALUES clause */
DBUG_ASSERT (!select_insert || !values);
@@ -1509,12 +1530,14 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- res= (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, NULL, 0) ||
+ res= (setup_fields(thd, Ref_ptr_array(),
+ *values, MARK_COLUMNS_READ, 0, NULL, 0) ||
check_insert_fields(thd, context->table_list, fields, *values,
!insert_into_view, 0, &map));
if (!res)
- res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, NULL, 0);
+ res= setup_fields(thd, Ref_ptr_array(),
+ update_values, MARK_COLUMNS_READ, 0, NULL, 0);
if (!res && duplic == DUP_UPDATE)
{
@@ -1534,18 +1557,6 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
if (!table)
table= table_list->table;
- if (!fields.elements && table->vfield)
- {
- for (Field **vfield_ptr= table->vfield; *vfield_ptr; vfield_ptr++)
- {
- if ((*vfield_ptr)->stored_in_db)
- {
- thd->lex->unit.insert_table_with_stored_vcol= table;
- break;
- }
- }
- }
-
if (!select_insert)
{
Item *fake_conds= 0;
@@ -1721,6 +1732,16 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
HA_READ_KEY_EXACT))))
goto err;
}
+ if (table->vfield)
+ {
+ /*
+ We have not yet called update_virtual_fields(VOL_UPDATE_FOR_READ)
+ in handler methods for the just read row in record[1].
+ */
+ table->move_fields(table->field, table->record[1], table->record[0]);
+ table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE);
+ table->move_fields(table->field, table->record[0], table->record[1]);
+ }
if (info->handle_duplicates == DUP_UPDATE)
{
int res= 0;
@@ -1732,16 +1753,17 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
DBUG_ASSERT(table->insert_values != NULL);
store_record(table,insert_values);
restore_record(table,record[1]);
+ table->reset_default_fields();
/*
in INSERT ... ON DUPLICATE KEY UPDATE the set of modified fields can
change per row. Thus, we have to do reset_default_fields() per row.
Twice (before insert and before update).
*/
- table->reset_default_fields();
DBUG_ASSERT(info->update_fields->elements ==
info->update_values->elements);
- if (fill_record_n_invoke_before_triggers(thd, table, *info->update_fields,
+ if (fill_record_n_invoke_before_triggers(thd, table,
+ *info->update_fields,
*info->update_values,
info->ignore,
TRG_EVENT_UPDATE))
@@ -1758,20 +1780,13 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
*/
if (different_records && table->default_field)
{
- bool res;
- enum_sql_command cmd= thd->lex->sql_command;
- thd->lex->sql_command= SQLCOM_UPDATE;
- res= table->update_default_fields();
- thd->lex->sql_command= cmd;
- if (res)
+ if (table->update_default_fields(1, info->ignore))
goto err;
}
- table->reset_default_fields();
/* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */
- if (info->view &&
- (res= info->view->view_check_option(current_thd, info->ignore)) ==
- VIEW_CHECK_SKIP)
+ res= info->table_list->view_check_option(table->in_use, info->ignore);
+ if (res == VIEW_CHECK_SKIP)
goto ok_or_after_trg_err;
if (res == VIEW_CHECK_ERROR)
goto before_trg_err;
@@ -1981,7 +1996,7 @@ public:
enum_duplicates dup;
my_time_t start_time;
ulong start_time_sec_part;
- ulonglong sql_mode;
+ sql_mode_t sql_mode;
bool auto_increment_field_not_null;
bool query_start_used, ignore, log_query, query_start_sec_part_used;
bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
@@ -2045,7 +2060,8 @@ public:
MDL_request grl_protection;
Delayed_insert(SELECT_LEX *current_select)
- :locks_in_memory(0), table(0),tables_in_use(0),stacked_inserts(0),
+ :locks_in_memory(0), thd(next_thread_id()),
+ table(0),tables_in_use(0), stacked_inserts(0),
status(0), retry(0), handler_thread_initialized(FALSE), group_count(0)
{
DBUG_ENTER("Delayed_insert constructor");
@@ -2089,17 +2105,23 @@ public:
close_thread_tables(&thd);
thd.mdl_context.release_transactional_locks();
}
- mysql_mutex_lock(&LOCK_thread_count);
mysql_mutex_destroy(&mutex);
mysql_cond_destroy(&cond);
mysql_cond_destroy(&cond_client);
+
+ /*
+ We could use unlink_not_visible_threads() here, but as
+ delayed_insert_threads also needs to be protected by
+ the LOCK_thread_count mutex, we open code this.
+ */
+ mysql_mutex_lock(&LOCK_thread_count);
thd.unlink(); // Must be unlinked under lock
- my_free(thd.query());
- thd.security_ctx->user= thd.security_ctx->host=0;
delayed_insert_threads--;
mysql_mutex_unlock(&LOCK_thread_count);
- thread_safe_decrement32(&thread_count);
- mysql_cond_broadcast(&COND_thread_count); /* Tell main we are ready */
+
+ my_free(thd.query());
+ thd.security_ctx->user= 0;
+ thd.security_ctx->host= 0;
}
/* The following is for checking when we can delete ourselves */
@@ -2234,8 +2256,6 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
if (!(di= new Delayed_insert(thd->lex->current_select)))
goto end_create;
- thread_safe_increment32(&thread_count);
-
/*
Annotating delayed inserts is not supported.
*/
@@ -2339,6 +2359,12 @@ end_create:
DBUG_RETURN(thd->is_error());
}
+#define memdup_vcol(thd, vcol) \
+ if (vcol) \
+ { \
+ (vcol)= (Virtual_column_info*)(thd)->memdup((vcol), sizeof(*(vcol))); \
+ (vcol)->expr= NULL; \
+ }
/**
As we can't let many client threads modify the same TABLE
@@ -2360,11 +2386,11 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
{
my_ptrdiff_t adjust_ptrs;
Field **field,**org_field, *found_next_number_field;
- Field **UNINIT_VAR(vfield), **UNINIT_VAR(dfield_ptr);
TABLE *copy;
TABLE_SHARE *share;
uchar *bitmap;
char *copy_tmp;
+ uint bitmaps_used;
DBUG_ENTER("Delayed_insert::get_local_table");
/* First request insert thread to get a lock */
@@ -2418,35 +2444,39 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
copy_tmp= (char*) client_thd->alloc(sizeof(*copy)+
(share->fields+1)*sizeof(Field**)+
share->reclength +
- share->column_bitmap_size*3);
+ share->column_bitmap_size*4);
if (!copy_tmp)
goto error;
- if (share->vfields)
- {
- vfield= (Field **) client_thd->alloc((share->vfields+1)*sizeof(Field*));
- if (!vfield)
- goto error;
- }
-
/* Copy the TABLE object. */
copy= new (copy_tmp) TABLE;
*copy= *table;
+
/* We don't need to change the file handler here */
/* Assign the pointers for the field pointers array and the record. */
field= copy->field= (Field**) (copy + 1);
bitmap= (uchar*) (field + share->fields + 1);
- copy->record[0]= (bitmap + share->column_bitmap_size*3);
+ copy->record[0]= (bitmap + share->column_bitmap_size*4);
memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
- if (share->default_fields)
+ if (share->default_fields || share->default_expressions)
{
- copy->default_field= (Field**) client_thd->alloc((share->default_fields+1)*
- sizeof(Field**));
+ copy->default_field= (Field**)
+ client_thd->alloc((share->default_fields +
+ share->default_expressions + 1)*
+ sizeof(Field*));
if (!copy->default_field)
goto error;
- dfield_ptr= copy->default_field;
}
+ if (share->virtual_fields)
+ {
+ copy->vfield= (Field **) client_thd->alloc((share->virtual_fields+1)*
+ sizeof(Field*));
+ if (!copy->vfield)
+ goto error;
+ }
+ copy->expr_arena= NULL;
+
/* Ensure we don't use the table list of the original table */
copy->pos_in_table_list= 0;
@@ -2461,51 +2491,32 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
found_next_number_field= table->found_next_number_field;
for (org_field= table->field; *org_field; org_field++, field++)
{
- if (!(*field= (*org_field)->make_new_field(client_thd->mem_root, copy,
- 1)))
+ if (!(*field= (*org_field)->make_new_field(client_thd->mem_root, copy, 1)))
goto error;
+ (*field)->unireg_check= (*org_field)->unireg_check;
(*field)->orig_table= copy; // Remove connection
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
+ memdup_vcol(client_thd, (*field)->vcol_info);
+ memdup_vcol(client_thd, (*field)->default_value);
+ memdup_vcol(client_thd, (*field)->check_constraint);
if (*org_field == found_next_number_field)
(*field)->table->found_next_number_field= *field;
- if (share->default_fields &&
- ((*org_field)->has_insert_default_function() ||
- (*org_field)->has_update_default_function()))
- {
- /* Put the newly copied field into the set of default fields. */
- *dfield_ptr= *field;
- (*dfield_ptr)->unireg_check= (*org_field)->unireg_check;
- dfield_ptr++;
- }
}
*field=0;
- if (share->vfields)
+ if (share->virtual_fields || share->default_expressions ||
+ share->default_fields)
{
+ bool error_reported= FALSE;
if (!(copy->def_vcol_set= (MY_BITMAP*) alloc_root(client_thd->mem_root,
sizeof(MY_BITMAP))))
goto error;
- copy->vfield= vfield;
- for (field= copy->field; *field; field++)
- {
- if ((*field)->vcol_info)
- {
- bool error_reported= FALSE;
- if (unpack_vcol_info_from_frm(client_thd,
- client_thd->mem_root,
- copy,
- *field,
- &(*field)->vcol_info->expr_str,
- &error_reported))
- goto error;
- *vfield++= *field;
- }
- }
- *vfield= 0;
+
+ if (parse_vcol_defs(client_thd, client_thd->mem_root, copy, &error_reported))
+ goto error;
}
- if (share->default_fields)
- *dfield_ptr= NULL;
+ switch_defaults_to_nullable_trigger_fields(copy);
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
@@ -2517,15 +2528,25 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
copy->def_read_set.bitmap= (my_bitmap_map*) bitmap;
copy->def_write_set.bitmap= ((my_bitmap_map*)
(bitmap + share->column_bitmap_size));
- if (share->vfields)
+ bitmaps_used= 2;
+ if (share->virtual_fields)
{
my_bitmap_init(copy->def_vcol_set,
- (my_bitmap_map*) (bitmap + 2*share->column_bitmap_size),
+ (my_bitmap_map*) (bitmap +
+ bitmaps_used*share->column_bitmap_size),
share->fields, FALSE);
+ bitmaps_used++;
copy->vcol_set= copy->def_vcol_set;
}
+ if (share->default_fields || share->default_expressions)
+ {
+ my_bitmap_init(&copy->has_value_set,
+ (my_bitmap_map*) (bitmap +
+ bitmaps_used*share->column_bitmap_size),
+ share->fields, FALSE);
+ }
copy->tmp_set.bitmap= 0; // To catch errors
- bzero((char*) bitmap, share->column_bitmap_size * (share->vfields ? 3 : 2));
+ bzero((char*) bitmap, share->column_bitmap_size * bitmaps_used);
copy->read_set= &copy->def_read_set;
copy->write_set= &copy->def_write_set;
@@ -2795,11 +2816,11 @@ bool Delayed_insert::open_and_lock_table()
return TRUE;
}
- if (table->triggers)
+ if (table->triggers || table->check_constraints)
{
/*
- Table has triggers. This is not an error, but we do
- not support triggers with delayed insert. Terminate the delayed
+ Table has triggers or check constraints. This is not an error, but we do
+ not support these with delayed insert. Terminate the delayed
thread without an error and thus request lock upgrade.
*/
return TRUE;
@@ -2820,15 +2841,12 @@ pthread_handler_t handle_delayed_insert(void *arg)
pthread_detach_this_thread();
/* Add thread to THD list so that's it's visible in 'show processlist' */
- mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
thd->set_current_time();
- threads.append(thd);
+ add_to_active_threads(thd);
if (abort_loop)
thd->set_killed(KILL_CONNECTION);
else
thd->reset_killed();
- mysql_mutex_unlock(&LOCK_thread_count);
mysql_thread_set_psi_id(thd->thread_id);
@@ -2912,6 +2930,8 @@ pthread_handler_t handle_delayed_insert(void *arg)
thd->mdl_context.set_needs_thr_lock_abort(TRUE);
di->table->mark_columns_needed_for_insert();
+ /* Mark all columns for write as we don't know which columns we get from user */
+ bitmap_set_all(di->table->write_set);
/* Now wait until we get an insert or lock to handle */
/* We will not abort as long as a client thread uses this thread */
@@ -3079,9 +3099,9 @@ pthread_handler_t handle_delayed_insert(void *arg)
}
-/* Remove pointers from temporary fields to allocated values */
+/* Remove all pointers to data for blob fields so that original table doesn't try to free them */
-static void unlink_blobs(register TABLE *table)
+static void unlink_blobs(TABLE *table)
{
for (Field **ptr=table->field ; *ptr ; ptr++)
{
@@ -3092,16 +3112,28 @@ static void unlink_blobs(register TABLE *table)
/* Free blobs stored in current row */
-static void free_delayed_insert_blobs(register TABLE *table)
+static void free_delayed_insert_blobs(TABLE *table)
+{
+ for (Field **ptr=table->field ; *ptr ; ptr++)
+ {
+ if ((*ptr)->flags & BLOB_FLAG)
+ ((Field_blob *) *ptr)->free();
+ }
+}
+
+
+/* set value field for blobs to point to data in record */
+
+static void set_delayed_insert_blobs(TABLE *table)
{
for (Field **ptr=table->field ; *ptr ; ptr++)
{
if ((*ptr)->flags & BLOB_FLAG)
{
- uchar *str;
- ((Field_blob *) (*ptr))->get_ptr(&str);
- my_free(str);
- ((Field_blob *) (*ptr))->reset();
+ Field_blob *blob= ((Field_blob *) *ptr);
+ uchar *data= blob->get_ptr();
+ if (data)
+ blob->set_value(data); // Set value.ptr() to point to data
}
}
}
@@ -3156,9 +3188,12 @@ bool Delayed_insert::handle_inserts(void)
while ((row=rows.get()))
{
+ int tmp_error;
stacked_inserts--;
mysql_mutex_unlock(&mutex);
memcpy(table->record[0],row->record,table->s->reclength);
+ if (table->s->blob_fields)
+ set_delayed_insert_blobs(table);
thd.start_time=row->start_time;
thd.query_start_used=row->query_start_used;
@@ -3229,7 +3264,20 @@ bool Delayed_insert::handle_inserts(void)
if (info.handle_duplicates == DUP_UPDATE)
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
thd.clear_error(); // reset error for binlog
- if (write_record(&thd, table, &info))
+
+ tmp_error= 0;
+ if (table->vfield)
+ {
+ /*
+ Virtual fields where not calculated by caller as the temporary
+ TABLE object used had vcol_set empty. Better to calculate them
+ here to make the caller faster.
+ */
+ tmp_error= table->update_virtual_fields(table->file,
+ VCOL_UPDATE_FOR_WRITE);
+ }
+
+ if (tmp_error || write_record(&thd, table, &info))
{
info.error_count++; // Ignore errors
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
@@ -3350,6 +3398,7 @@ bool Delayed_insert::handle_inserts(void)
if (table->s->blob_fields)
{
memcpy(table->record[0],row->record,table->s->reclength);
+ set_delayed_insert_blobs(table);
free_delayed_insert_blobs(table);
}
delete row;
@@ -3453,8 +3502,8 @@ select_insert::select_insert(THD *thd_arg, TABLE_LIST *table_list_par,
info.ignore= ignore_check_option_errors;
info.update_fields= update_fields;
info.update_values= update_values;
- if (table_list_par)
- info.view= (table_list_par->view ? table_list_par : 0);
+ info.view= (table_list_par->view ? table_list_par : 0);
+ info.table_list= table_list_par;
}
@@ -3476,7 +3525,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
*/
lex->current_select= &lex->select_lex;
- res= (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, NULL, 0) ||
+ res= (setup_fields(thd, Ref_ptr_array(),
+ values, MARK_COLUMNS_READ, 0, NULL, 0) ||
check_insert_fields(thd, table_list, *fields, values,
!insert_into_view, 1, &map));
@@ -3528,7 +3578,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table_list->next_name_resolution_table=
ctx_state.get_first_name_resolution_table();
- res= res || setup_fields(thd, 0, *info.update_values,
+ res= res || setup_fields(thd, Ref_ptr_array(), *info.update_values,
MARK_COLUMNS_READ, 0, NULL, 0);
if (!res)
{
@@ -3657,7 +3707,7 @@ void select_insert::cleanup()
select_insert::~select_insert()
{
DBUG_ENTER("~select_insert");
- if (table && table->created)
+ if (table && table->is_created())
{
table->next_number_field=0;
table->auto_increment_field_not_null= FALSE;
@@ -3684,7 +3734,7 @@ int select_insert::send_data(List<Item> &values)
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
store_values(values);
- if (table->default_field && table->update_default_fields())
+ if (table->default_field && table->update_default_fields(0, info.ignore))
DBUG_RETURN(1);
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
if (thd->is_error())
@@ -3702,9 +3752,6 @@ int select_insert::send_data(List<Item> &values)
}
}
- // Release latches in case bulk insert takes a long time
- ha_release_temporary_latches(thd);
-
error= write_record(thd, table, &info);
table->auto_increment_field_not_null= FALSE;
@@ -3992,7 +4039,6 @@ static TABLE *create_table_from_items(THD *thd,
Item *item;
DBUG_ENTER("create_table_from_items");
- tmp_table.alias= 0;
tmp_table.s= &share;
init_tmp_table_share(thd, &share, "", 0, "", "");
@@ -4105,7 +4151,12 @@ static TABLE *create_table_from_items(THD *thd,
}
else
{
- if (open_temporary_table(thd, create_table))
+ /*
+ The pointer to the newly created temporary table has been stored in
+ table->create_info.
+ */
+ create_table->table= create_info->table;
+ if (!create_table->table)
{
/*
This shouldn't happen as creation of temporary table should make
@@ -4114,7 +4165,6 @@ static TABLE *create_table_from_items(THD *thd,
*/
DBUG_ASSERT(0);
}
- DBUG_ASSERT(create_table->table == create_info->table);
}
}
else
@@ -4249,6 +4299,18 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
/* abort() deletes table */
DBUG_RETURN(-1);
+ if (create_info->tmp_table())
+ {
+ /*
+ When the temporary table was created & opened in create_table_impl(),
+ the table's TABLE_SHARE (and thus TABLE) object was also linked to THD
+ temporary tables lists. So, we must temporarily remove it from the
+ list to keep them inaccessible from inner statements.
+ e.g. CREATE TEMPORARY TABLE `t1` AS SELECT * FROM `t1`;
+ */
+ saved_tmp_table_share= thd->save_tmp_table_share(create_table->table);
+ }
+
if (extra_lock)
{
DBUG_ASSERT(m_plock == NULL);
@@ -4371,6 +4433,27 @@ bool select_create::send_eof()
DBUG_RETURN(true);
}
+ if (table->s->tmp_table)
+ {
+ /*
+ Now is good time to add the new table to THD temporary tables list.
+ But, before that we need to check if same table got created by the sub-
+ statement.
+ */
+ if (thd->find_tmp_table_share(table->s->table_cache_key.str,
+ table->s->table_cache_key.length))
+ {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table->alias.c_ptr());
+ abort_result_set();
+ DBUG_RETURN(true);
+ }
+ else
+ {
+ DBUG_ASSERT(saved_tmp_table_share);
+ thd->restore_tmp_table_share(saved_tmp_table_share);
+ }
+ }
+
/*
Do an implicit commit at end of statement for non-temporary
tables. This can fail, but we should unlock the table
@@ -4419,8 +4502,9 @@ bool select_create::send_eof()
mysql_mutex_lock(&thd->LOCK_thd_data);
if (thd->wsrep_conflict_state != NO_CONFLICT)
{
- WSREP_DEBUG("select_create commit failed, thd: %lu err: %d %s",
- thd->thread_id, thd->wsrep_conflict_state, thd->query());
+ WSREP_DEBUG("select_create commit failed, thd: %lld err: %d %s",
+ (longlong) thd->thread_id, thd->wsrep_conflict_state,
+ thd->query());
mysql_mutex_unlock(&thd->LOCK_thd_data);
abort_result_set();
DBUG_RETURN(true);
@@ -4526,6 +4610,13 @@ void select_create::abort_result_set()
if (table)
{
bool tmp_table= table->s->tmp_table;
+
+ if (tmp_table)
+ {
+ DBUG_ASSERT(saved_tmp_table_share);
+ thd->restore_tmp_table_share(saved_tmp_table_share);
+ }
+
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->auto_increment_field_not_null= FALSE;
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 4b7667f1319..3612cb6cc32 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -281,8 +281,7 @@ void JOIN_CACHE::collect_info_on_key_args()
Item *ref_item= ref->items[i];
if (!(tab->table->map & ref_item->used_tables()))
continue;
- ref_item->walk(&Item::add_field_to_set_processor, 1,
- (uchar *) tab->table);
+ ref_item->walk(&Item::add_field_to_set_processor, 1, tab->table);
}
if ((key_args= bitmap_bits_set(&tab->table->tmp_set)))
{
@@ -407,7 +406,7 @@ void JOIN_CACHE::create_flag_fields()
}
/* Theoretically the new value of flag_fields can be less than the old one */
- flag_fields= copy-field_descr;
+ flag_fields= (uint)(copy-field_descr);
}
@@ -589,11 +588,6 @@ void JOIN_CACHE::create_remaining_fields()
{
MY_BITMAP *rem_field_set;
TABLE *table= tab->table;
-#if MYSQL_VERSION_ID < 100204
- empty_record(table);
-#else
-#error remove
-#endif
if (all_read_fields)
rem_field_set= table->read_set;
@@ -701,7 +695,7 @@ void JOIN_CACHE::set_constants()
pack_length_with_blob_ptrs= pack_length + blobs*sizeof(uchar *);
min_buff_size= 0;
min_records= 1;
- buff_size= MY_MAX(join->thd->variables.join_buff_size,
+ buff_size= (size_t)MY_MAX(join->thd->variables.join_buff_size,
get_min_join_buffer_size());
size_of_rec_ofs= offset_size(buff_size);
size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len);
@@ -846,7 +840,7 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size)
len+= get_max_key_addon_space_per_record() + avg_aux_buffer_incr;
space_per_record= len;
- size_t limit_sz= join->thd->variables.join_buff_size;
+ size_t limit_sz= (size_t)join->thd->variables.join_buff_size;
if (join_tab->join_buffer_size_limit)
set_if_smaller(limit_sz, join_tab->join_buffer_size_limit);
if (!optimize_buff_size)
@@ -1310,7 +1304,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
uint blob_len= blob_field->get_length();
(*copy_ptr)->blob_length= blob_len;
len+= blob_len;
- blob_field->get_ptr(&(*copy_ptr)->str);
+ (*copy_ptr)->str= blob_field->get_ptr();
}
}
}
@@ -1380,9 +1374,10 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
}
/* Save the offset of the field to put it later at the end of the record */
if (copy->referenced_field_no)
- copy->offset= cp-curr_rec_pos;
+ copy->offset= (uint)(cp-curr_rec_pos);
- if (copy->type == CACHE_BLOB)
+ switch (copy->type) {
+ case CACHE_BLOB:
{
Field_blob *blob_field= (Field_blob *) copy->field;
if (last_record)
@@ -1403,69 +1398,66 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
memcpy(cp+copy->length, copy->str, copy->blob_length);
cp+= copy->length+copy->blob_length;
}
+ break;
}
- else
+ case CACHE_VARSTR1:
+ /* Copy the significant part of the short varstring field */
+ len= (uint) copy->str[0] + 1;
+ DBUG_ASSERT(cp + len <= buff + buff_size);
+ memcpy(cp, copy->str, len);
+ cp+= len;
+ break;
+ case CACHE_VARSTR2:
+ /* Copy the significant part of the long varstring field */
+ len= uint2korr(copy->str) + 2;
+ DBUG_ASSERT(cp + len <= buff + buff_size);
+ memcpy(cp, copy->str, len);
+ cp+= len;
+ break;
+ case CACHE_STRIPPED:
{
- switch (copy->type) {
- case CACHE_VARSTR1:
- /* Copy the significant part of the short varstring field */
- len= (uint) copy->str[0] + 1;
- DBUG_ASSERT(cp + len <= buff + buff_size);
- memcpy(cp, copy->str, len);
- cp+= len;
- break;
- case CACHE_VARSTR2:
- /* Copy the significant part of the long varstring field */
- len= uint2korr(copy->str) + 2;
- DBUG_ASSERT(cp + len <= buff + buff_size);
- memcpy(cp, copy->str, len);
- cp+= len;
- break;
- case CACHE_STRIPPED:
+ /*
+ Put down the field value stripping all trailing spaces off.
+ After this insert the length of the written sequence of bytes.
+ */
+ uchar *str, *end;
+ for (str= copy->str, end= str+copy->length;
+ end > str && end[-1] == ' ';
+ end--) ;
+ len=(uint) (end-str);
+ DBUG_ASSERT(cp + len + 2 <= buff + buff_size);
+ int2store(cp, len);
+ memcpy(cp+2, str, len);
+ cp+= len+2;
+ break;
+ }
+ case CACHE_ROWID:
+ if (!copy->length)
{
- /*
- Put down the field value stripping all trailing spaces off.
- After this insert the length of the written sequence of bytes.
- */
- uchar *str, *end;
- for (str= copy->str, end= str+copy->length;
- end > str && end[-1] == ' ';
- end--) ;
- len=(uint) (end-str);
- DBUG_ASSERT(cp + len + 2 <= buff + buff_size);
- int2store(cp, len);
- memcpy(cp+2, str, len);
- cp+= len+2;
- break;
- }
- case CACHE_ROWID:
- if (!copy->length)
- {
+ /*
+ This may happen only for ROWID fields of materialized
+ derived tables and views.
+ */
+ TABLE *table= (TABLE *) copy->str;
+ copy->str= table->file->ref;
+ copy->length= table->file->ref_length;
+ if (!copy->str)
+ {
/*
- This may happen only for ROWID fields of materialized
- derived tables and views.
- */
- TABLE *table= (TABLE *) copy->str;
- copy->str= table->file->ref;
- copy->length= table->file->ref_length;
- if (!copy->str)
- {
- /*
- If table is an empty inner table of an outer join and it is
- a materialized derived table then table->file->ref == NULL.
- */
- cp+= copy->length;
- break;
- }
+ If table is an empty inner table of an outer join and it is
+ a materialized derived table then table->file->ref == NULL.
+ */
+ cp+= copy->length;
+ break;
}
- /* fall through */
- default:
- /* Copy the entire image of the field from the record buffer */
- DBUG_ASSERT(cp + copy->length <= buff + buff_size);
- if (copy->str)
- memcpy(cp, copy->str, copy->length);
- cp+= copy->length;
}
+ /* fall through */
+ default:
+ /* Copy the entire image of the field from the record buffer */
+ DBUG_ASSERT(cp + copy->length <= buff + buff_size);
+ if (copy->str)
+ memcpy(cp, copy->str, copy->length);
+ cp+= copy->length;
}
}
@@ -1723,7 +1715,7 @@ uint JOIN_CACHE::aux_buffer_incr(ulong recno)
The function reads all flag and data fields of a record from the join
buffer into the corresponding record buffers.
The fields are read starting from the position 'pos' which is
- supposed to point to the beginning og the first record field.
+ supposed to point to the beginning of the first record field.
The function increments the value of 'pos' by the length of the
read data.
@@ -1786,7 +1778,7 @@ uint JOIN_CACHE::read_flag_fields()
memcpy(copy->str, pos, copy->length);
pos+= copy->length;
}
- return (pos-init_pos);
+ return (uint)(pos-init_pos);
}
@@ -1814,60 +1806,58 @@ uint JOIN_CACHE::read_flag_fields()
uint JOIN_CACHE::read_record_field(CACHE_FIELD *copy, bool blob_in_rec_buff)
{
uint len;
- /* Do not copy the field if its value is null */
+ /* Do not copy the field if its value is null */
if (copy->field && copy->field->maybe_null() && copy->field->is_null())
- return 0;
- if (copy->type == CACHE_BLOB)
- {
- Field_blob *blob_field= (Field_blob *) copy->field;
- /*
- Copy the length and the pointer to data but not the blob data
- itself to the record buffer
- */
- if (blob_in_rec_buff)
- {
- blob_field->set_image(pos, copy->length+sizeof(char*),
- blob_field->charset());
- len= copy->length+sizeof(char*);
- }
- else
+ return 0;
+ switch (copy->type) {
+ case CACHE_BLOB:
{
- blob_field->set_ptr(pos, pos+copy->length);
- len= copy->length+blob_field->get_length();
- }
- }
- else
- {
- switch (copy->type) {
- case CACHE_VARSTR1:
- /* Copy the significant part of the short varstring field */
- len= (uint) pos[0] + 1;
- memcpy(copy->str, pos, len);
- break;
- case CACHE_VARSTR2:
- /* Copy the significant part of the long varstring field */
- len= uint2korr(pos) + 2;
- memcpy(copy->str, pos, len);
- break;
- case CACHE_STRIPPED:
- /* Pad the value by spaces that has been stripped off */
- len= uint2korr(pos);
- memcpy(copy->str, pos+2, len);
- memset(copy->str+len, ' ', copy->length-len);
- len+= 2;
- break;
- case CACHE_ROWID:
- if (!copy->str)
+ Field_blob *blob_field= (Field_blob *) copy->field;
+ /*
+ Copy the length and the pointer to data but not the blob data
+ itself to the record buffer
+ */
+ if (blob_in_rec_buff)
{
- len= copy->length;
- break;
+ blob_field->set_image(pos, copy->length + sizeof(char*),
+ blob_field->charset());
+ len= copy->length + sizeof(char*);
}
- /* fall through */
- default:
- /* Copy the entire image of the field from the record buffer */
+ else
+ {
+ blob_field->set_ptr(pos, pos+copy->length);
+ len= copy->length + blob_field->get_length();
+ }
+ }
+ break;
+ case CACHE_VARSTR1:
+ /* Copy the significant part of the short varstring field */
+ len= (uint) pos[0] + 1;
+ memcpy(copy->str, pos, len);
+ break;
+ case CACHE_VARSTR2:
+ /* Copy the significant part of the long varstring field */
+ len= uint2korr(pos) + 2;
+ memcpy(copy->str, pos, len);
+ break;
+ case CACHE_STRIPPED:
+ /* Pad the value by spaces that has been stripped off */
+ len= uint2korr(pos);
+ memcpy(copy->str, pos+2, len);
+ memset(copy->str+len, ' ', copy->length-len);
+ len+= 2;
+ break;
+ case CACHE_ROWID:
+ if (!copy->str)
+ {
len= copy->length;
- memcpy(copy->str, pos, len);
+ break;
}
+ /* fall through */
+ default:
+ /* Copy the entire image of the field from the record buffer */
+ len= copy->length;
+ memcpy(copy->str, pos, len);
}
pos+= len;
return len;
@@ -2724,7 +2714,7 @@ int JOIN_CACHE_HASHED::init(bool for_explain)
data_fields_offset+= copy->length;
}
- DBUG_RETURN(rc);
+ DBUG_RETURN(0);
}
@@ -3377,7 +3367,6 @@ int JOIN_TAB_SCAN::next()
int skip_rc;
READ_RECORD *info= &join_tab->read_record;
SQL_SELECT *select= join_tab->cache_select;
- TABLE *table= join_tab->table;
THD *thd= join->thd;
if (is_first_record)
@@ -3388,8 +3377,6 @@ int JOIN_TAB_SCAN::next()
if (!err)
{
join_tab->tracker->r_rows++;
- if (table->vfield)
- update_virtual_fields(thd, table);
}
while (!err && select && (skip_rc= select->skip_record(thd)) <= 0)
@@ -3404,8 +3391,6 @@ int JOIN_TAB_SCAN::next()
if (!err)
{
join_tab->tracker->r_rows++;
- if (table->vfield)
- update_virtual_fields(thd, table);
}
}
@@ -3843,7 +3828,7 @@ uint JOIN_TAB_SCAN_MRR::aux_buffer_incr(ulong recno)
set_if_bigger(rec_per_key, 1);
if (recno == 1)
incr= ref->key_length + tab->file->ref_length;
- incr+= tab->file->stats.mrr_length_per_rec * rec_per_key;
+ incr+= (uint)(tab->file->stats.mrr_length_per_rec * rec_per_key);
return incr;
}
@@ -3932,8 +3917,6 @@ int JOIN_TAB_SCAN_MRR::next()
DBUG_ASSERT(cache->buff <= (uchar *) (*ptr) &&
(uchar *) (*ptr) <= cache->end_pos);
*/
- if (join_tab->table->vfield)
- update_virtual_fields(join->thd, join_tab->table);
}
return rc;
}
@@ -3943,9 +3926,9 @@ static
void bka_range_seq_key_info(void *init_params, uint *length,
key_part_map *map)
{
-TABLE_REF *ref= &(((JOIN_CACHE*)init_params)->join_tab->ref);
-*length= ref->key_length;
-*map= (key_part_map(1) << ref->key_parts) - 1;
+ TABLE_REF *ref= &(((JOIN_CACHE*)init_params)->join_tab->ref);
+ *length= ref->key_length;
+ *map= (key_part_map(1) << ref->key_parts) - 1;
}
@@ -3973,10 +3956,10 @@ RETURN VALUE
static
range_seq_t bka_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
-DBUG_ENTER("bka_range_seq_init");
-JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) init_param;
-cache->reset(0);
-DBUG_RETURN((range_seq_t) init_param);
+ DBUG_ENTER("bka_range_seq_init");
+ JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) init_param;
+ cache->reset(0);
+ DBUG_RETURN((range_seq_t) init_param);
}
@@ -4004,21 +3987,21 @@ RETURN VALUE
static
bool bka_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
{
-DBUG_ENTER("bka_range_seq_next");
-JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
-TABLE_REF *ref= &cache->join_tab->ref;
-key_range *start_key= &range->start_key;
-if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
-{
- start_key->keypart_map= (1 << ref->key_parts) - 1;
- start_key->flag= HA_READ_KEY_EXACT;
- range->end_key= *start_key;
- range->end_key.flag= HA_READ_AFTER_KEY;
- range->ptr= (char *) cache->get_curr_rec();
- range->range_flag= EQ_RANGE;
- DBUG_RETURN(0);
-}
-DBUG_RETURN(1);
+ DBUG_ENTER("bka_range_seq_next");
+ JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+ TABLE_REF *ref= &cache->join_tab->ref;
+ key_range *start_key= &range->start_key;
+ if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
+ {
+ start_key->keypart_map= (1 << ref->key_parts) - 1;
+ start_key->flag= HA_READ_KEY_EXACT;
+ range->end_key= *start_key;
+ range->end_key.flag= HA_READ_AFTER_KEY;
+ range->ptr= (char *) cache->get_curr_rec();
+ range->range_flag= EQ_RANGE;
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(1);
}
@@ -4050,11 +4033,11 @@ RETURN VALUE
static
bool bka_range_seq_skip_record(range_seq_t rseq, range_id_t range_info, uchar *rowid)
{
-DBUG_ENTER("bka_range_seq_skip_record");
-JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
-bool res= cache->get_match_flag_by_pos((uchar *) range_info) ==
- JOIN_CACHE::MATCH_FOUND;
-DBUG_RETURN(res);
+ DBUG_ENTER("bka_range_seq_skip_record");
+ JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+ bool res= cache->get_match_flag_by_pos((uchar *) range_info) ==
+ JOIN_CACHE::MATCH_FOUND;
+ DBUG_RETURN(res);
}
@@ -4081,14 +4064,14 @@ RETURN VALUE
static
bool bka_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
-DBUG_ENTER("bka_skip_index_tuple");
-JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
-THD *thd= cache->thd();
-bool res;
-status_var_increment(thd->status_var.ha_icp_attempts);
-if (!(res= cache->skip_index_tuple(range_info)))
- status_var_increment(thd->status_var.ha_icp_match);
-DBUG_RETURN(res);
+ DBUG_ENTER("bka_skip_index_tuple");
+ JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+ THD *thd= cache->thd();
+ bool res;
+ status_var_increment(thd->status_var.ha_icp_attempts);
+ if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
+ DBUG_RETURN(res);
}
@@ -4117,10 +4100,10 @@ RETURN VALUE
bool JOIN_CACHE_BKA::prepare_look_for_matches(bool skip_last)
{
-if (!records)
- return TRUE;
-rem_records= 1;
-return FALSE;
+ if (!records)
+ return TRUE;
+ rem_records= 1;
+ return FALSE;
}
@@ -4149,11 +4132,11 @@ RETURN VALUE
uchar *JOIN_CACHE_BKA::get_next_candidate_for_match()
{
-if (!rem_records)
- return 0;
-rem_records--;
-return curr_association;
-}
+ if (!rem_records)
+ return 0;
+ rem_records--;
+ return curr_association;
+}
/*
@@ -4177,8 +4160,8 @@ RETURN VALUE
bool JOIN_CACHE_BKA::skip_next_candidate_for_match(uchar *rec_ptr)
{
-return join_tab->check_only_first_match() &&
- (get_match_flag_by_pos(rec_ptr) == MATCH_FOUND);
+ return join_tab->check_only_first_match() &&
+ (get_match_flag_by_pos(rec_ptr) == MATCH_FOUND);
}
@@ -4205,8 +4188,8 @@ RETURN VALUE
void JOIN_CACHE_BKA::read_next_candidate_for_match(uchar *rec_ptr)
{
-get_record_by_pos(rec_ptr);
-}
+ get_record_by_pos(rec_ptr);
+}
/*
@@ -4232,30 +4215,29 @@ RETURN VALUE
int JOIN_CACHE_BKA::init(bool for_explain)
{
-int res;
-bool check_only_first_match= join_tab->check_only_first_match();
+ int res;
+ bool check_only_first_match= join_tab->check_only_first_match();
-RANGE_SEQ_IF rs_funcs= { bka_range_seq_key_info,
- bka_range_seq_init,
- bka_range_seq_next,
- check_only_first_match ?
- bka_range_seq_skip_record : 0,
- bka_skip_index_tuple };
+ RANGE_SEQ_IF rs_funcs= { bka_range_seq_key_info,
+ bka_range_seq_init,
+ bka_range_seq_next,
+ check_only_first_match ? bka_range_seq_skip_record : 0,
+ bka_skip_index_tuple };
-DBUG_ENTER("JOIN_CACHE_BKA::init");
+ DBUG_ENTER("JOIN_CACHE_BKA::init");
-JOIN_TAB_SCAN_MRR *jsm;
-if (!(join_tab_scan= jsm= new JOIN_TAB_SCAN_MRR(join, join_tab,
- mrr_mode, rs_funcs)))
- DBUG_RETURN(1);
+ JOIN_TAB_SCAN_MRR *jsm;
+ if (!(join_tab_scan= jsm= new JOIN_TAB_SCAN_MRR(join, join_tab,
+ mrr_mode, rs_funcs)))
+ DBUG_RETURN(1);
-if ((res= JOIN_CACHE::init(for_explain)))
- DBUG_RETURN(res);
+ if ((res= JOIN_CACHE::init(for_explain)))
+ DBUG_RETURN(res);
-if (use_emb_key)
- jsm->mrr_mode |= HA_MRR_MATERIALIZED_KEYS;
+ if (use_emb_key)
+ jsm->mrr_mode |= HA_MRR_MATERIALIZED_KEYS;
-DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
@@ -4294,95 +4276,95 @@ RETURN VALUE
uint JOIN_CACHE_BKA::get_next_key(uchar ** key)
{
-uint len;
-uint32 rec_len;
-uchar *init_pos;
-JOIN_CACHE *cache;
+ uint len;
+ uint32 rec_len;
+ uchar *init_pos;
+ JOIN_CACHE *cache;
start:
-/* Any record in a BKA cache is prepended with its length */
-DBUG_ASSERT(with_length);
-
-if ((pos+size_of_rec_len) > last_rec_pos || !records)
- return 0;
+ /* Any record in a BKA cache is prepended with its length */
+ DBUG_ASSERT(with_length);
-/* Read the length of the record */
-rec_len= get_rec_length(pos);
-pos+= size_of_rec_len;
-init_pos= pos;
+ if ((pos+size_of_rec_len) > last_rec_pos || !records)
+ return 0;
-/* Read a reference to the previous cache if any */
-if (prev_cache)
- pos+= prev_cache->get_size_of_rec_offset();
+ /* Read the length of the record */
+ rec_len= get_rec_length(pos);
+ pos+= size_of_rec_len;
+ init_pos= pos;
-curr_rec_pos= pos;
+ /* Read a reference to the previous cache if any */
+ if (prev_cache)
+ pos+= prev_cache->get_size_of_rec_offset();
-/* Read all flag fields of the record */
-read_flag_fields();
+ curr_rec_pos= pos;
-if (with_match_flag &&
- (Match_flag) curr_rec_pos[0] == MATCH_IMPOSSIBLE )
-{
- pos= init_pos+rec_len;
- goto start;
-}
+ /* Read all flag fields of the record */
+ read_flag_fields();
-if (use_emb_key)
-{
- /* An embedded key is taken directly from the join buffer */
- *key= pos;
- len= emb_key_length;
-}
-else
-{
- /* Read key arguments from previous caches if there are any such fields */
- if (external_key_arg_fields)
+ if (with_match_flag &&
+ (Match_flag) curr_rec_pos[0] == MATCH_IMPOSSIBLE )
{
- uchar *rec_ptr= curr_rec_pos;
- uint key_arg_count= external_key_arg_fields;
- CACHE_FIELD **copy_ptr= blob_ptr-key_arg_count;
- for (cache= prev_cache; key_arg_count; cache= cache->prev_cache)
- {
- uint len= 0;
- DBUG_ASSERT(cache);
- rec_ptr= cache->get_rec_ref(rec_ptr);
- while (!cache->referenced_fields)
+ pos= init_pos+rec_len;
+ goto start;
+ }
+
+ if (use_emb_key)
+ {
+ /* An embedded key is taken directly from the join buffer */
+ *key= pos;
+ len= emb_key_length;
+ }
+ else
+ {
+ /* Read key arguments from previous caches if there are any such fields */
+ if (external_key_arg_fields)
+ {
+ uchar *rec_ptr= curr_rec_pos;
+ uint key_arg_count= external_key_arg_fields;
+ CACHE_FIELD **copy_ptr= blob_ptr-key_arg_count;
+ for (cache= prev_cache; key_arg_count; cache= cache->prev_cache)
{
- cache= cache->prev_cache;
+ uint len= 0;
DBUG_ASSERT(cache);
rec_ptr= cache->get_rec_ref(rec_ptr);
- }
- while (key_arg_count &&
- cache->read_referenced_field(*copy_ptr, rec_ptr, &len))
- {
- copy_ptr++;
- --key_arg_count;
+ while (!cache->referenced_fields)
+ {
+ cache= cache->prev_cache;
+ DBUG_ASSERT(cache);
+ rec_ptr= cache->get_rec_ref(rec_ptr);
+ }
+ while (key_arg_count &&
+ cache->read_referenced_field(*copy_ptr, rec_ptr, &len))
+ {
+ copy_ptr++;
+ --key_arg_count;
+ }
}
}
+
+ /*
+ Read the other key arguments from the current record. The fields for
+ these arguments are always first in the sequence of the record's fields.
+ */
+ CACHE_FIELD *copy= field_descr+flag_fields;
+ CACHE_FIELD *copy_end= copy+local_key_arg_fields;
+ bool blob_in_rec_buff= blob_data_is_in_rec_buff(curr_rec_pos);
+ for ( ; copy < copy_end; copy++)
+ read_record_field(copy, blob_in_rec_buff);
+
+ /* Build the key over the fields read into the record buffers */
+ TABLE_REF *ref= &join_tab->ref;
+ cp_buffer_from_ref(join->thd, join_tab->table, ref);
+ *key= ref->key_buff;
+ len= ref->key_length;
}
-
- /*
- Read the other key arguments from the current record. The fields for
- these arguments are always first in the sequence of the record's fields.
- */
- CACHE_FIELD *copy= field_descr+flag_fields;
- CACHE_FIELD *copy_end= copy+local_key_arg_fields;
- bool blob_in_rec_buff= blob_data_is_in_rec_buff(curr_rec_pos);
- for ( ; copy < copy_end; copy++)
- read_record_field(copy, blob_in_rec_buff);
-
- /* Build the key over the fields read into the record buffers */
- TABLE_REF *ref= &join_tab->ref;
- cp_buffer_from_ref(join->thd, join_tab->table, ref);
- *key= ref->key_buff;
- len= ref->key_length;
-}
-pos= init_pos+rec_len;
+ pos= init_pos+rec_len;
-return len;
-}
+ return len;
+}
/*
@@ -4423,9 +4405,9 @@ RETURN VALUE
bool JOIN_CACHE_BKA::skip_index_tuple(range_id_t range_info)
{
-DBUG_ENTER("JOIN_CACHE_BKA::skip_index_tuple");
-get_record_by_pos((uchar*)range_info);
-DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
+ DBUG_ENTER("JOIN_CACHE_BKA::skip_index_tuple");
+ get_record_by_pos((uchar*)range_info);
+ DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
}
@@ -4455,10 +4437,10 @@ RETURN VALUE
static
range_seq_t bkah_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
-DBUG_ENTER("bkah_range_seq_init");
-JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) init_param;
-cache->reset(0);
-DBUG_RETURN((range_seq_t) init_param);
+ DBUG_ENTER("bkah_range_seq_init");
+ JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) init_param;
+ cache->reset(0);
+ DBUG_RETURN((range_seq_t) init_param);
}
@@ -4483,24 +4465,24 @@ RETURN VALUE
TRUE no more ranges
*/
-static
+static
bool bkah_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
{
-DBUG_ENTER("bkah_range_seq_next");
-JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
-TABLE_REF *ref= &cache->join_tab->ref;
-key_range *start_key= &range->start_key;
-if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
-{
- start_key->keypart_map= (1 << ref->key_parts) - 1;
- start_key->flag= HA_READ_KEY_EXACT;
- range->end_key= *start_key;
- range->end_key.flag= HA_READ_AFTER_KEY;
- range->ptr= (char *) cache->get_curr_key_chain();
- range->range_flag= EQ_RANGE;
- DBUG_RETURN(0);
-}
-DBUG_RETURN(1);
+ DBUG_ENTER("bkah_range_seq_next");
+ JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+ TABLE_REF *ref= &cache->join_tab->ref;
+ key_range *start_key= &range->start_key;
+ if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
+ {
+ start_key->keypart_map= (1 << ref->key_parts) - 1;
+ start_key->flag= HA_READ_KEY_EXACT;
+ range->end_key= *start_key;
+ range->end_key.flag= HA_READ_AFTER_KEY;
+ range->ptr= (char *) cache->get_curr_key_chain();
+ range->range_flag= EQ_RANGE;
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(1);
}
@@ -4528,14 +4510,13 @@ RETURN VALUE
0 the record is to be left in the stream
*/
-static
-bool bkah_range_seq_skip_record(range_seq_t rseq, range_id_t range_info,
- uchar *rowid)
+static
+bool bkah_range_seq_skip_record(range_seq_t rseq, range_id_t range_info, uchar *rowid)
{
-DBUG_ENTER("bkah_range_seq_skip_record");
-JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
-bool res= cache->check_all_match_flags_for_key((uchar *) range_info);
-DBUG_RETURN(res);
+ DBUG_ENTER("bkah_range_seq_skip_record");
+ JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+ bool res= cache->check_all_match_flags_for_key((uchar *) range_info);
+ DBUG_RETURN(res);
}
@@ -4562,14 +4543,14 @@ RETURN VALUE
static
bool bkah_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
-DBUG_ENTER("bka_unique_skip_index_tuple");
-JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
-THD *thd= cache->thd();
-bool res;
-status_var_increment(thd->status_var.ha_icp_attempts);
-if (!(res= cache->skip_index_tuple(range_info)))
- status_var_increment(thd->status_var.ha_icp_match);
-DBUG_RETURN(res);
+ DBUG_ENTER("bka_unique_skip_index_tuple");
+ JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+ THD *thd= cache->thd();
+ bool res;
+ status_var_increment(thd->status_var.ha_icp_attempts);
+ if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
+ DBUG_RETURN(res);
}
@@ -4597,8 +4578,8 @@ RETURN VALUE
bool JOIN_CACHE_BKAH::prepare_look_for_matches(bool skip_last)
{
-last_matching_rec_ref_ptr= next_matching_rec_ref_ptr= 0;
-if (no_association &&
+ last_matching_rec_ref_ptr= next_matching_rec_ref_ptr= 0;
+ if (no_association &&
!(curr_matching_chain= get_matching_chain_by_join_key())) //psergey: added '!'
return 1;
last_matching_rec_ref_ptr= get_next_rec_ref(curr_matching_chain);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index df868d0321f..53056d220b8 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -29,6 +29,7 @@
#include "sp_head.h"
#include "sp.h"
#include "sql_select.h"
+#include "sql_cte.h"
static int lex_one_token(YYSTYPE *yylval, THD *thd);
@@ -195,7 +196,6 @@ init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex)
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VCOL_EXPR;
select_lex->cur_pos_in_select_list= UNDEF_POS;
table->map= 1; //To ensure correct calculation of const item
- table->get_fields_in_item_tree= TRUE;
table_list->table= table;
table_list->cacheable_table= false;
return FALSE;
@@ -231,9 +231,6 @@ void
st_parsing_options::reset()
{
allows_variable= TRUE;
- allows_select_into= TRUE;
- allows_select_procedure= TRUE;
- allows_derived= TRUE;
}
@@ -382,7 +379,7 @@ void Lex_input_stream::body_utf8_append(const char *ptr,
if (m_cpp_utf8_processed_ptr >= ptr)
return;
- int bytes_to_copy= ptr - m_cpp_utf8_processed_ptr;
+ size_t bytes_to_copy= ptr - m_cpp_utf8_processed_ptr;
memcpy(m_body_utf8_ptr, m_cpp_utf8_processed_ptr, bytes_to_copy);
m_body_utf8_ptr += bytes_to_copy;
@@ -671,11 +668,16 @@ void lex_start(THD *thd)
lex->select_lex.parent_lex= lex;
lex->select_lex.init_query();
lex->current_select_number= 1;
+ lex->curr_with_clause= 0;
+ lex->with_clauses_list= 0;
+ lex->with_clauses_list_last_next= &lex->with_clauses_list;
+ lex->clone_spec_offset= 0;
lex->value_list.empty();
lex->update_list.empty();
lex->set_var_list.empty();
lex->param_list.empty();
lex->view_list.empty();
+ lex->with_column_list.empty();
lex->with_persistent_for_clause= FALSE;
lex->column_list= NULL;
lex->index_list= NULL;
@@ -706,7 +708,6 @@ void lex_start(THD *thd)
lex->parsing_options.reset();
lex->empty_field_list_on_rset= 0;
lex->select_lex.select_number= 1;
- lex->length=0;
lex->part_info= 0;
lex->select_lex.in_sum_expr=0;
lex->select_lex.ftfunc_list_alloc.empty();
@@ -747,6 +748,14 @@ void lex_start(THD *thd)
lex->stmt_var_list.empty();
lex->proc_list.elements=0;
+ lex->save_group_list.empty();
+ lex->save_order_list.empty();
+ lex->win_ref= NULL;
+ lex->win_frame= NULL;
+ lex->frame_top_bound= NULL;
+ lex->frame_bottom_bound= NULL;
+ lex->win_spec= NULL;
+
lex->is_lex_started= TRUE;
DBUG_VOID_RETURN;
}
@@ -754,7 +763,7 @@ void lex_start(THD *thd)
void lex_end(LEX *lex)
{
DBUG_ENTER("lex_end");
- DBUG_PRINT("enter", ("lex: 0x%lx", (long) lex));
+ DBUG_PRINT("enter", ("lex: %p", lex));
lex_end_stage1(lex);
lex_end_stage2(lex);
@@ -1001,7 +1010,7 @@ Lex_input_stream::unescape(CHARSET_INFO *cs, char *to,
bool Lex_input_stream::get_text(LEX_STRING *dst, uint sep,
int pre_skip, int post_skip)
{
- reg1 uchar c;
+ uchar c;
uint found_escape=0;
CHARSET_INFO *cs= m_thd->charset();
@@ -1181,7 +1190,7 @@ static inline uint int_token(const char *str,uint length)
*/
bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted)
{
- reg1 uchar c;
+ uchar c;
while (! lip->eof())
{
c= lip->yyGet();
@@ -1239,11 +1248,11 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd)
lip->lookahead_token= -1;
*yylval= *(lip->lookahead_yylval);
lip->lookahead_yylval= NULL;
- lip->add_digest_token(token, yylval);
return token;
}
token= lex_one_token(yylval, thd);
+ lip->add_digest_token(token, yylval);
switch(token) {
case WITH:
@@ -1255,12 +1264,11 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd)
which sql_yacc.yy can process.
*/
token= lex_one_token(yylval, thd);
+ lip->add_digest_token(token, yylval);
switch(token) {
case CUBE_SYM:
- lip->add_digest_token(WITH_CUBE_SYM, yylval);
return WITH_CUBE_SYM;
case ROLLUP_SYM:
- lip->add_digest_token(WITH_ROLLUP_SYM, yylval);
return WITH_ROLLUP_SYM;
default:
/*
@@ -1269,21 +1277,18 @@ int MYSQLlex(YYSTYPE *yylval, THD *thd)
lip->lookahead_yylval= lip->yylval;
lip->yylval= NULL;
lip->lookahead_token= token;
- lip->add_digest_token(WITH, yylval);
return WITH;
}
break;
default:
break;
}
-
- lip->add_digest_token(token, yylval);
return token;
}
static int lex_one_token(YYSTYPE *yylval, THD *thd)
{
- reg1 uchar UNINIT_VAR(c);
+ uchar UNINIT_VAR(c);
bool comment_closed;
int tokval, result_state;
uint length;
@@ -1413,28 +1418,22 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
if (use_mb(cs))
{
result_state= IDENT_QUOTED;
- if (my_mbcharlen(cs, lip->yyGetLast()) > 1)
+ int char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
{
- int l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query());
- if (l == 0) {
- state = MY_LEX_CHAR;
- continue;
- }
- lip->skip_binary(l - 1);
+ state= MY_LEX_CHAR;
+ continue;
}
+ lip->skip_binary(char_length - 1);
+
while (ident_map[c=lip->yyGet()])
{
- if (my_mbcharlen(cs, c) > 1)
- {
- int l;
- if ((l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query())) == 0)
- break;
- lip->skip_binary(l-1);
- }
+ char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
+ break;
+ lip->skip_binary(char_length - 1);
}
}
else
@@ -1456,7 +1455,10 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
below by checking start != lex->ptr.
*/
for (; state_map[(uchar) c] == MY_LEX_SKIP ; c= lip->yyGet())
- ;
+ {
+ if (c == '\n')
+ lip->yylineno++;
+ }
}
if (start == lip->get_ptr() && c == '.' &&
ident_map[(uchar) lip->yyPeek()])
@@ -1575,15 +1577,11 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
result_state= IDENT_QUOTED;
while (ident_map[c=lip->yyGet()])
{
- if (my_mbcharlen(cs, c) > 1)
- {
- int l;
- if ((l = my_ismbchar(cs,
- lip->get_ptr() -1,
- lip->get_end_of_query())) == 0)
- break;
- lip->skip_binary(l-1);
- }
+ int char_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (char_length <= 0)
+ break;
+ lip->skip_binary(char_length - 1);
}
}
else
@@ -1611,8 +1609,9 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
char quote_char= c; // Used char
while ((c=lip->yyGet()))
{
- int var_length;
- if ((var_length= my_mbcharlen(cs, c)) == 1)
+ int var_length= my_charlen(cs, lip->get_ptr() - 1,
+ lip->get_end_of_query());
+ if (var_length == 1)
{
if (c == quote_char)
{
@@ -1624,11 +1623,9 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
}
}
#ifdef USE_MB
- else if (use_mb(cs))
+ else if (var_length > 1)
{
- if ((var_length= my_ismbchar(cs, lip->get_ptr() - 1,
- lip->get_end_of_query())))
- lip->skip_binary(var_length-1);
+ lip->skip_binary(var_length - 1);
}
#endif
}
@@ -2026,7 +2023,7 @@ static int lex_one_token(YYSTYPE *yylval, THD *thd)
}
-void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str)
+void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str, uint *prefix_length)
{
/*
TODO:
@@ -2034,8 +2031,10 @@ void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str)
that can be considered white-space.
*/
+ *prefix_length= 0;
while ((str->length > 0) && (my_isspace(cs, str->str[0])))
{
+ (*prefix_length)++;
str->length --;
str->str ++;
}
@@ -2076,6 +2075,7 @@ void st_select_lex_unit::init_query()
offset_limit_cnt= 0;
union_distinct= 0;
prepared= optimized= executed= 0;
+ optimize_started= 0;
item= 0;
union_result= 0;
table= 0;
@@ -2085,8 +2085,11 @@ void st_select_lex_unit::init_query()
item_list.empty();
describe= 0;
found_rows_for_union= 0;
- insert_table_with_stored_vcol= 0;
derived= 0;
+ is_view= false;
+ with_clause= 0;
+ with_element= 0;
+ columns_are_renamed= false;
}
void st_select_lex::init_query()
@@ -2102,8 +2105,10 @@ void st_select_lex::init_query()
min_max_opt_list.empty();
join= 0;
having= prep_having= where= prep_where= 0;
+ cond_pushed_into_where= cond_pushed_into_having= 0;
olap= UNSPECIFIED_OLAP_TYPE;
having_fix_field= 0;
+ having_fix_field_for_pushed_cond= 0;
context.select_lex= this;
context.init();
/*
@@ -2118,13 +2123,13 @@ void st_select_lex::init_query()
parent_lex->push_context(&context, parent_lex->thd->mem_root);
cond_count= between_count= with_wild= 0;
max_equal_elems= 0;
- ref_pointer_array= 0;
- ref_pointer_array_size= 0;
+ ref_pointer_array.reset();
select_n_where_fields= 0;
select_n_reserved= 0;
select_n_having_items= 0;
n_sum_items= 0;
n_child_sum_items= 0;
+ hidden_bit_fields= 0;
subquery_in_having= explicit_limit= 0;
is_item_list_lookup= 0;
changed_elements= 0;
@@ -2137,8 +2142,11 @@ void st_select_lex::init_query()
prep_leaf_list_state= UNINIT;
have_merged_subqueries= FALSE;
bzero((char*) expr_cache_may_be_used, sizeof(expr_cache_may_be_used));
+ select_list_tables= 0;
m_non_agg_field_used= false;
m_agg_func_used= false;
+ window_specs.empty();
+ window_funcs.empty();
}
void st_select_lex::init_select()
@@ -2155,7 +2163,6 @@ void st_select_lex::init_select()
in_sum_expr= with_wild= 0;
options= 0;
sql_cache= SQL_CACHE_UNSPECIFIED;
- interval_list.empty();
ftfunc_list_alloc.empty();
inner_sum_func_list= 0;
ftfunc_list= &ftfunc_list_alloc;
@@ -2167,6 +2174,7 @@ void st_select_lex::init_select()
select_limit= 0; /* denotes the default limit = HA_POS_ERROR */
offset_limit= 0; /* denotes the default offset = 0 */
with_sum_func= 0;
+ with_all_modifier= 0;
is_correlated= 0;
cur_pos_in_select_list= UNDEF_POS;
cond_value= having_value= Item::COND_UNDEF;
@@ -2176,6 +2184,7 @@ void st_select_lex::init_select()
m_non_agg_field_used= false;
m_agg_func_used= false;
name_visibility_map= 0;
+ with_dep= 0;
join= 0;
lock_type= TL_READ_DEFAULT;
}
@@ -2272,6 +2281,59 @@ void st_select_lex_node::fast_exclude()
}
+/**
+ @brief
+ Insert a new chain of nodes into another chain before a particular link
+
+ @param in/out
+ ptr_pos_to_insert the address of the chain pointer pointing to the link
+ before which the subchain has to be inserted
+ @param
+ end_chain_node the last link of the subchain to be inserted
+
+ @details
+ The method inserts the chain of nodes starting from this node and ending
+ with the node nd_chain_node into another chain of nodes before the node
+ pointed to by *ptr_pos_to_insert.
+ It is assumed that ptr_pos_to_insert belongs to the chain where we insert.
+ So it must be updated.
+
+ @retval
+ The method returns the pointer to the first link of the inserted chain
+*/
+
+st_select_lex_node *st_select_lex_node:: insert_chain_before(
+ st_select_lex_node **ptr_pos_to_insert,
+ st_select_lex_node *end_chain_node)
+{
+ end_chain_node->link_next= *ptr_pos_to_insert;
+ (*ptr_pos_to_insert)->link_prev= &end_chain_node->link_next;
+ this->link_prev= ptr_pos_to_insert;
+ return this;
+}
+
+
+/*
+ Detach the node from its master and attach it to a new master
+*/
+
+void st_select_lex_node::move_as_slave(st_select_lex_node *new_master)
+{
+ exclude_from_tree();
+ if (new_master->slave)
+ {
+ st_select_lex_node *curr= new_master->slave;
+ for ( ; curr->next ; curr= curr->next) ;
+ prev= &curr->next;
+ }
+ else
+ prev= &new_master->slave;
+ *prev= this;
+ next= 0;
+ master= new_master;
+}
+
+
/*
Exclude a node from the tree lex structure, but leave it in the global
list of nodes.
@@ -2350,9 +2412,12 @@ void st_select_lex_unit::exclude_level()
if (next)
next->prev= prev;
}
+ // Mark it excluded
+ prev= NULL;
}
+#if 0
/*
Exclude subtree of current unit from tree of SELECTs
@@ -2378,6 +2443,7 @@ void st_select_lex_unit::exclude_tree()
if (next)
next->prev= prev;
}
+#endif
/*
@@ -2432,7 +2498,6 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
return FALSE;
}
-bool st_select_lex_node::set_braces(bool value) { return 1; }
bool st_select_lex_node::inc_in_sum_expr() { return 1; }
uint st_select_lex_node::get_in_sum_expr() { return 0; }
TABLE_LIST* st_select_lex_node::get_table_list() { return 0; }
@@ -2559,7 +2624,7 @@ bool st_select_lex::add_gorder_to_list(THD *thd, Item *item, bool asc)
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
- DBUG_PRINT("info", ("Item: 0x%lx", (long) item));
+ DBUG_PRINT("info", ("Item: %p", item));
DBUG_RETURN(item_list.push_back(item, thd->mem_root));
}
@@ -2582,13 +2647,6 @@ st_select_lex* st_select_lex::outer_select()
}
-bool st_select_lex::set_braces(bool value)
-{
- braces= value;
- return 0;
-}
-
-
bool st_select_lex::inc_in_sum_expr()
{
in_sum_expr++;
@@ -2620,6 +2678,10 @@ ulong st_select_lex::get_table_join_options()
bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
{
+
+ if (!((options & SELECT_DISTINCT) && !group_list.elements))
+ hidden_bit_fields= 0;
+
// find_order_in_list() may need some extra space, so multiply by two.
order_group_num*= 2;
@@ -2634,8 +2696,9 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
select_n_reserved +
select_n_having_items +
select_n_where_fields +
- order_group_num) * 5;
- if (ref_pointer_array != NULL)
+ order_group_num +
+ hidden_bit_fields) * 5;
+ if (!ref_pointer_array.is_null())
{
/*
We need to take 'n_sum_items' into account when allocating the array,
@@ -2643,24 +2706,23 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
MIN/MAX rewrite in Item_in_subselect::single_value_transformer.
In the usual case we can reuse the array from the prepare phase.
If we need a bigger array, we must allocate a new one.
- */
- if (ref_pointer_array_size >= n_elems)
- {
- DBUG_PRINT("info", ("reusing old ref_array"));
+ */
+ if (ref_pointer_array.size() >= n_elems)
return false;
- }
- }
- ref_pointer_array= static_cast<Item**>(arena->alloc(sizeof(Item*) * n_elems));
- if (ref_pointer_array != NULL)
- ref_pointer_array_size= n_elems;
+ }
+ Item **array= static_cast<Item**>(arena->alloc(sizeof(Item*) * n_elems));
+ if (array != NULL)
+ ref_pointer_array= Ref_ptr_array(array, n_elems);
- return ref_pointer_array == NULL;
+ return array == NULL;
}
void st_select_lex_unit::print(String *str, enum_query_type query_type)
{
bool union_all= !union_distinct;
+ if (with_clause)
+ with_clause->print(str, query_type);
for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
{
if (sl != first_select())
@@ -2717,8 +2779,8 @@ void st_select_lex::print_order(String *str,
else
(*order->item)->print(str, query_type);
}
- if (!order->asc)
- str->append(STRING_WITH_LEN(" desc"));
+ if (order->direction == ORDER::ORDER_DESC)
+ str->append(STRING_WITH_LEN(" desc"));
if (order->next)
str->append(',');
}
@@ -2922,7 +2984,7 @@ bool LEX::can_be_merged()
tmp_unit= tmp_unit->next_unit())
{
if (tmp_unit->first_select()->parent_lex == this &&
- (tmp_unit->item == 0 ||
+ (tmp_unit->item != 0 &&
(tmp_unit->item->place() != IN_WHERE &&
tmp_unit->item->place() != IN_ON &&
tmp_unit->item->place() != SELECT_LIST)))
@@ -3157,6 +3219,8 @@ void st_select_lex_unit::set_limit(st_select_lex *sl)
bool st_select_lex_unit::union_needs_tmp_table()
{
+ if (with_element && with_element->is_recursive)
+ return true;
return union_distinct != NULL ||
global_parameters()->order_list.elements != 0 ||
thd->lex->sql_command == SQLCOM_INSERT_SELECT ||
@@ -4111,7 +4175,15 @@ void SELECT_LEX::update_used_tables()
TABLE *tab= tl->table;
tab->covering_keys= tab->s->keys_for_keyread;
tab->covering_keys.intersect(tab->keys_in_use_for_query);
- tab->merge_keys.clear_all();
+ /*
+ View/derived was merged. Need to recalculate read_set/vcol_set
+ bitmaps here. For example:
+ CREATE VIEW v1 AS SELECT f1,f2,f3 FROM t1;
+ SELECT f1 FROM v1;
+ Initially, the view definition will put all f1,f2,f3 in the
+ read_set for t1. But after the view is merged, only f1 should
+ be in the read_set.
+ */
bitmap_clear_all(tab->read_set);
if (tab->vcol_set)
bitmap_clear_all(tab->vcol_set);
@@ -4179,9 +4251,11 @@ void SELECT_LEX::update_used_tables()
Item *item;
List_iterator_fast<Item> it(join->fields_list);
+ select_list_tables= 0;
while ((item= it++))
{
item->update_used_tables();
+ select_list_tables|= item->used_tables();
}
Item_outer_ref *ref;
List_iterator_fast<Item_outer_ref> ref_it(inner_refs_list);
@@ -4217,6 +4291,7 @@ void st_select_lex::update_correlated_cache()
while ((tl= ti++))
{
+ // is_correlated|= tl->is_with_table_recursive_reference();
if (tl->on_expr)
is_correlated|= MY_TEST(tl->on_expr->used_tables() & OUTER_REF_TABLE_BIT);
for (TABLE_LIST *embedding= tl->embedding ; embedding ;
@@ -4231,6 +4306,8 @@ void st_select_lex::update_correlated_cache()
if (join->conds)
is_correlated|= MY_TEST(join->conds->used_tables() & OUTER_REF_TABLE_BIT);
+ is_correlated|= join->having_is_correlated;
+
if (join->having)
is_correlated|= MY_TEST(join->having->used_tables() & OUTER_REF_TABLE_BIT);
@@ -4345,7 +4422,33 @@ void st_select_lex::set_explain_type(bool on_the_fly)
type= is_uncacheable ? "UNCACHEABLE UNION": "UNION";
if (this == master_unit()->fake_select_lex)
type= "UNION RESULT";
-
+ /*
+ join below may be =NULL when this functions is called at an early
+ stage. It will be later called again and we will set the correct
+ value.
+ */
+ if (join)
+ {
+ bool uses_cte= false;
+ for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS,
+ WITH_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
+ {
+ /*
+ pos_in_table_list=NULL for e.g. post-join aggregation JOIN_TABs.
+ */
+ if (tab->table && tab->table->pos_in_table_list &&
+ tab->table->pos_in_table_list->with &&
+ tab->table->pos_in_table_list->with->is_recursive)
+ {
+ uses_cte= true;
+ break;
+ }
+ }
+ if (uses_cte)
+ type= "RECURSIVE UNION";
+ }
}
}
}
@@ -4371,6 +4474,19 @@ void SELECT_LEX::increase_derived_records(ha_rows records)
SELECT_LEX_UNIT *unit= master_unit();
DBUG_ASSERT(unit->derived);
+ if (unit->with_element && unit->with_element->is_recursive)
+ {
+ st_select_lex *first_recursive= unit->with_element->first_recursive;
+ st_select_lex *sl= unit->first_select();
+ for ( ; sl != first_recursive; sl= sl->next_select())
+ {
+ if (sl == this)
+ break;
+ }
+ if (sl == first_recursive)
+ return;
+ }
+
select_union *result= (select_union*)unit->result;
result->records+= records;
}
@@ -4577,9 +4693,9 @@ bool LEX::set_arena_for_set_stmt(Query_arena *backup)
Query_arena_memroot(mem_root_for_set_stmt,
Query_arena::STMT_INITIALIZED)))
DBUG_RETURN(1);
- DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx",
- (ulong) mem_root_for_set_stmt,
- (ulong) arena_for_set_stmt));
+ DBUG_PRINT("info", ("mem_root: %p arena: %p",
+ mem_root_for_set_stmt,
+ arena_for_set_stmt));
thd->set_n_backup_active_arena(arena_for_set_stmt, backup);
DBUG_RETURN(0);
}
@@ -4590,9 +4706,9 @@ void LEX::reset_arena_for_set_stmt(Query_arena *backup)
DBUG_ENTER("LEX::reset_arena_for_set_stmt");
DBUG_ASSERT(arena_for_set_stmt);
thd->restore_active_arena(arena_for_set_stmt, backup);
- DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx",
- (ulong) arena_for_set_stmt->mem_root,
- (ulong) arena_for_set_stmt));
+ DBUG_PRINT("info", ("mem_root: %p arena: %p",
+ arena_for_set_stmt->mem_root,
+ arena_for_set_stmt));
DBUG_VOID_RETURN;
}
@@ -4602,9 +4718,9 @@ void LEX::free_arena_for_set_stmt()
DBUG_ENTER("LEX::free_arena_for_set_stmt");
if (!arena_for_set_stmt)
return;
- DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx",
- (ulong) arena_for_set_stmt->mem_root,
- (ulong) arena_for_set_stmt));
+ DBUG_PRINT("info", ("mem_root: %p arena: %p",
+ arena_for_set_stmt->mem_root,
+ arena_for_set_stmt));
arena_for_set_stmt->free_items();
delete(arena_for_set_stmt);
free_root(mem_root_for_set_stmt, MYF(MY_KEEP_PREALLOC));
@@ -4653,7 +4769,9 @@ int st_select_lex_unit::save_union_explain(Explain_query *output)
new (output->mem_root) Explain_union(output->mem_root,
thd->lex->analyze_stmt);
-
+ if (with_element && with_element->is_recursive)
+ eu->is_recursive_cte= true;
+
if (derived)
eu->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
/*
@@ -4866,3 +4984,196 @@ void binlog_unsafe_map_init()
BINLOG_DIRECT_OFF & TRX_CACHE_NOT_EMPTY);
}
#endif
+
+
+/**
+ @brief
+ Finding fiels that are used in the GROUP BY of this st_select_lex
+
+ @param thd The thread handle
+
+ @details
+ This method looks through the fields which are used in the GROUP BY of this
+ st_select_lex and saves this fields.
+*/
+
+void st_select_lex::collect_grouping_fields(THD *thd)
+{
+ grouping_tmp_fields.empty();
+ List_iterator<Item> li(join->fields_list);
+ Item *item= li++;
+ for (uint i= 0; i < master_unit()->derived->table->s->fields; i++, (item=li++))
+ {
+ for (ORDER *ord= join->group_list; ord; ord= ord->next)
+ {
+ if ((*ord->item)->eq((Item*)item, 0))
+ {
+ Grouping_tmp_field *grouping_tmp_field=
+ new Grouping_tmp_field(master_unit()->derived->table->field[i], item);
+ grouping_tmp_fields.push_back(grouping_tmp_field);
+ }
+ }
+ }
+}
+
+/**
+ @brief
+ For a condition check possibility of exraction a formula over grouping fields
+
+ @param cond The condition whose subformulas are to be analyzed
+
+ @details
+ This method traverses the AND-OR condition cond and for each subformula of
+ the condition it checks whether it can be usable for the extraction of a
+ condition over the grouping fields of this select. The method uses
+ the call-back parameter check_processor to ckeck whether a primary formula
+ depends only on grouping fields.
+ The subformulas that are not usable are marked with the flag NO_EXTRACTION_FL.
+ The subformulas that can be entierly extracted are marked with the flag
+ FULL_EXTRACTION_FL.
+ @note
+ This method is called before any call of extract_cond_for_grouping_fields.
+ The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone
+ for the subformula when extracting the pushable condition.
+ The flag FULL_EXTRACTION_FL allows to delete later all top level conjuncts
+ from cond.
+*/
+
+void
+st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond,
+ TABLE_LIST *derived)
+{
+ cond->clear_extraction_flag();
+ if (cond->type() == Item::COND_ITEM)
+ {
+ bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC;
+ List<Item> *arg_list= ((Item_cond*) cond)->argument_list();
+ List_iterator<Item> li(*arg_list);
+ uint count= 0; // to count items not containing NO_EXTRACTION_FL
+ uint count_full= 0; // to count items with FULL_EXTRACTION_FL
+ Item *item;
+ while ((item=li++))
+ {
+ check_cond_extraction_for_grouping_fields(item, derived);
+ if (item->get_extraction_flag() != NO_EXTRACTION_FL)
+ {
+ count++;
+ if (item->get_extraction_flag() == FULL_EXTRACTION_FL)
+ count_full++;
+ }
+ else if (!and_cond)
+ break;
+ }
+ if ((and_cond && count == 0) || item)
+ cond->set_extraction_flag(NO_EXTRACTION_FL);
+ if (count_full == arg_list->elements)
+ cond->set_extraction_flag(FULL_EXTRACTION_FL);
+ if (cond->get_extraction_flag() != 0)
+ {
+ li.rewind();
+ while ((item=li++))
+ item->clear_extraction_flag();
+ }
+ }
+ else
+ {
+ int fl= cond->excl_dep_on_grouping_fields(this) ?
+ FULL_EXTRACTION_FL : NO_EXTRACTION_FL;
+ cond->set_extraction_flag(fl);
+ }
+}
+
+
+/**
+ @brief
+ Build condition extractable from the given one depended on grouping fields
+
+ @param thd The thread handle
+ @param cond The condition from which the condition depended
+ on grouping fields is to be extracted
+ @param no_top_clones If it's true then no clones for the top fully
+ extractable conjuncts are built
+
+ @details
+ For the given condition cond this method finds out what condition depended
+ only on the grouping fields can be extracted from cond. If such condition C
+ exists the method builds the item for it.
+ This method uses the flags NO_EXTRACTION_FL and FULL_EXTRACTION_FL set by the
+ preliminary call of st_select_lex::check_cond_extraction_for_grouping_fields
+ to figure out whether a subformula depends only on these fields or not.
+ @note
+ The built condition C is always implied by the condition cond
+ (cond => C). The method tries to build the most restictive such
+ condition (i.e. for any other condition C' such that cond => C'
+ we have C => C').
+ @note
+ The build item is not ready for usage: substitution for the field items
+ has to be done and it has to be re-fixed.
+
+ @retval
+ the built condition depended only on grouping fields if such a condition exists
+ NULL if there is no such a condition
+*/
+
+Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond,
+ bool no_top_clones)
+{
+ if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ if (no_top_clones)
+ return cond;
+ cond->clear_extraction_flag();
+ return cond->build_clone(thd, thd->mem_root);
+ }
+ if (cond->type() == Item::COND_ITEM)
+ {
+ bool cond_and= false;
+ Item_cond *new_cond;
+ if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ cond_and= true;
+ new_cond= new (thd->mem_root) Item_cond_and(thd);
+ }
+ else
+ new_cond= new (thd->mem_root) Item_cond_or(thd);
+ if (!new_cond)
+ return 0;
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (item->get_extraction_flag() == NO_EXTRACTION_FL)
+ {
+ DBUG_ASSERT(cond_and);
+ item->clear_extraction_flag();
+ continue;
+ }
+ Item *fix= build_cond_for_grouping_fields(thd, item,
+ no_top_clones & cond_and);
+ if (!fix)
+ {
+ if (cond_and)
+ continue;
+ break;
+ }
+ new_cond->argument_list()->push_back(fix, thd->mem_root);
+ }
+
+ if (!cond_and && item)
+ {
+ while((item= li++))
+ item->clear_extraction_flag();
+ return 0;
+ }
+ switch (new_cond->argument_list()->elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return new_cond->argument_list()->head();
+ default:
+ return new_cond;
+ }
+ }
+ return 0;
+}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 0142f812632..2af1d527cd3 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -28,6 +28,8 @@
#include "mem_root_array.h"
#include "sql_cmd.h"
#include "sql_alter.h" // Alter_info
+#include "sql_window.h"
+#include "sql_trigger.h"
/* YACC and LEX Definitions */
@@ -47,7 +49,10 @@ class sys_var;
class Item_func_match;
class File_parser;
class Key_part_spec;
+class Item_window_func;
struct sql_digest_state;
+class With_clause;
+
#define ALLOC_ROOT_SET 1024
@@ -178,6 +183,7 @@ const LEX_STRING sp_data_access_name[]=
#define DERIVED_SUBQUERY 1
#define DERIVED_VIEW 2
+#define DERIVED_WITH 4
enum enum_view_create_mode
{
@@ -240,11 +246,12 @@ struct LEX_MASTER_INFO
ulong server_id;
uint port, connect_retry;
float heartbeat_period;
+ int sql_delay;
/*
Enum is used for making it possible to detect if the user
changed variable or if it should be left at old value
*/
- enum {LEX_MI_UNCHANGED, LEX_MI_DISABLE, LEX_MI_ENABLE}
+ enum {LEX_MI_UNCHANGED= 0, LEX_MI_DISABLE, LEX_MI_ENABLE}
ssl, ssl_verify_server_cert, heartbeat_opt, repl_ignore_server_ids_opt,
repl_do_domain_ids_opt, repl_ignore_domain_ids_opt;
enum {
@@ -260,6 +267,7 @@ struct LEX_MASTER_INFO
sizeof(ulong), 0, 16, MYF(0));
my_init_dynamic_array(&repl_ignore_domain_ids,
sizeof(ulong), 0, 16, MYF(0));
+ sql_delay= -1;
}
void reset(bool is_change_master)
{
@@ -280,6 +288,7 @@ struct LEX_MASTER_INFO
repl_ignore_domain_ids_opt= LEX_MI_UNCHANGED;
gtid_pos_str= null_lex_str;
use_gtid_opt= LEX_GTID_UNCHANGED;
+ sql_delay= -1;
}
};
@@ -496,10 +505,6 @@ public:
enum sub_select_type linkage;
bool no_table_names_allowed; /* used for global order by */
- static void *operator new(size_t size) throw ()
- {
- return sql_alloc(size);
- }
static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
{ return (void*) alloc_root(mem_root, (uint) size); }
static void operator delete(void *ptr,size_t size) { TRASH_FREE(ptr, size); }
@@ -529,7 +534,6 @@ public:
virtual st_select_lex* outer_select()= 0;
virtual st_select_lex* return_after_parsing()= 0;
- virtual bool set_braces(bool value);
virtual bool inc_in_sum_expr();
virtual uint get_in_sum_expr();
virtual TABLE_LIST* get_table_list();
@@ -544,7 +548,20 @@ public:
List<String> *partition_names= 0,
LEX_STRING *option= 0);
virtual void set_lock_for_tables(thr_lock_type lock_type) {}
-
+ void set_slave(st_select_lex_node *slave_arg) { slave= slave_arg; }
+ void move_node(st_select_lex_node *where_to_move)
+ {
+ if (where_to_move == this)
+ return;
+ if (next)
+ next->prev= prev;
+ *prev= next;
+ *where_to_move->prev= this;
+ next= where_to_move;
+ }
+ st_select_lex_node *insert_chain_before(st_select_lex_node **ptr_pos_to_insert,
+ st_select_lex_node *end_chain_node);
+ void move_as_slave(st_select_lex_node *new_master);
friend class st_select_lex_unit;
friend bool mysql_new_select(LEX *lex, bool move_down);
friend bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
@@ -599,6 +616,8 @@ public:
executed, // already executed
cleaned;
+ bool optimize_started;
+
// list of fields which points to temporary table for union
List<Item> item_list;
/*
@@ -642,6 +661,11 @@ public:
derived tables/views handling.
*/
TABLE_LIST *derived;
+ bool is_view;
+ /* With clause attached to this unit (if any) */
+ With_clause *with_clause;
+ /* With element where this unit is used as the specification (if any) */
+ With_element *with_element;
/* thread handler */
THD *thd;
/*
@@ -650,7 +674,7 @@ public:
*/
st_select_lex *fake_select_lex;
/**
- SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when no
+ SELECT_LEX that stores LIMIT and OFFSET for UNION ALL when noq
fake_select_lex is used.
*/
st_select_lex *saved_fake_select_lex;
@@ -659,12 +683,7 @@ public:
bool describe; /* union exec() called for EXPLAIN */
Procedure *last_procedure; /* Pointer to procedure, if such exists */
- /*
- Insert table with stored virtual columns.
- This is used only in those rare cases
- when the list of inserted values is empty.
- */
- TABLE *insert_table_with_stored_vcol;
+ bool columns_are_renamed;
void init_query();
st_select_lex* outer_select();
@@ -672,18 +691,21 @@ public:
{
return reinterpret_cast<st_select_lex*>(slave);
}
+ void set_with_clause(With_clause *with_cl);
st_select_lex_unit* next_unit()
{
return reinterpret_cast<st_select_lex_unit*>(next);
}
st_select_lex* return_after_parsing() { return return_to; }
void exclude_level();
- void exclude_tree();
+ // void exclude_tree(); // it is not used for long time
+ bool is_excluded() { return prev == NULL; }
/* UNION methods */
bool prepare(THD *thd, select_result *result, ulong additional_options);
bool optimize();
bool exec();
+ bool exec_recursive();
bool cleanup();
inline void unclean() { cleaned= 0; }
void reinit_exec_mechanism();
@@ -705,7 +727,7 @@ public:
friend void lex_start(THD *thd);
friend int subselect_union_engine::exec();
- List<Item> *get_unit_column_types();
+ List<Item> *get_column_types(bool for_cursor);
select_union *get_union_result() { return union_result; }
int save_union_explain(Explain_query *output);
@@ -713,6 +735,22 @@ public:
};
typedef class st_select_lex_unit SELECT_LEX_UNIT;
+typedef Bounds_checked_array<Item*> Ref_ptr_array;
+
+
+/*
+ Structure which consists of the field and the item which
+ produces this field.
+*/
+
+class Grouping_tmp_field :public Sql_alloc
+{
+public:
+ Field *tmp_field;
+ Item *producing_item;
+ Grouping_tmp_field(Field *fld, Item *item)
+ :tmp_field(fld), producing_item(item) {}
+};
#define TOUCHED_SEL_COND 1/* WHERE/HAVING/ON should be reinited before use */
@@ -729,6 +767,8 @@ public:
Item *where, *having; /* WHERE & HAVING clauses */
Item *prep_where; /* saved WHERE clause for prepared statement processing */
Item *prep_having;/* saved HAVING clause for prepared statement processing */
+ Item *cond_pushed_into_where; /* condition pushed into the select's WHERE */
+ Item *cond_pushed_into_having; /* condition pushed into the select's HAVING */
/* Saved values of the WHERE and HAVING clauses*/
Item::cond_result cond_value, having_value;
/*
@@ -754,7 +794,6 @@ public:
List<Item> item_list; /* list of fields & expressions */
List<Item> pre_fix; /* above list before fix_fields */
- List<String> interval_list;
bool is_item_list_lookup;
/*
Usualy it is pointer to ftfunc_list_alloc, but in union used to create fake
@@ -807,9 +846,9 @@ public:
SQL_I_List<ORDER> order_list; /* ORDER clause */
SQL_I_List<ORDER> gorder_list;
Item *select_limit, *offset_limit; /* LIMIT clause parameters */
- // Arrays of pointers to top elements of all_fields list
- Item **ref_pointer_array;
- size_t ref_pointer_array_size; // Number of elements in array.
+
+ /// Array of pointers to top elements of all_fields list
+ Ref_ptr_array ref_pointer_array;
/*
number of items in select_list and HAVING clause used to get number
@@ -827,7 +866,13 @@ public:
uint select_n_where_fields;
/* reserved for exists 2 in */
uint select_n_reserved;
+ /*
+ it counts the number of bit fields in the SELECT list. These are used when DISTINCT is
+ converted to a GROUP BY involving BIT fields.
+ */
+ uint hidden_bit_fields;
enum_parsing_place parsing_place; /* where we are parsing expression */
+ enum_parsing_place context_analysis_place; /* where we are in prepare */
bool with_sum_func; /* sum function indicator */
ulong table_join_options;
@@ -846,6 +891,11 @@ public:
bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */
/* TRUE when having fix field called in processing of this SELECT */
bool having_fix_field;
+ /*
+ TRUE when fix field is called for a new condition pushed into the
+ HAVING clause of this SELECT
+ */
+ bool having_fix_field_for_pushed_cond;
/* List of references to fields referenced from inner selects */
List<Item_outer_ref> inner_refs_list;
/* Number of Item_sum-derived objects in this SELECT */
@@ -866,6 +916,7 @@ public:
*/
bool subquery_in_having;
/* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */
+ bool with_all_modifier; /* used for selects in union */
bool is_correlated;
/*
This variable is required to ensure proper work of subqueries and
@@ -907,8 +958,17 @@ public:
*/
List<String> *prev_join_using;
+ /**
+ The set of those tables whose fields are referenced in the select list of
+ this select level.
+ */
+ table_map select_list_tables;
+
/* namp of nesting SELECT visibility (for aggregate functions check) */
nesting_map name_visibility_map;
+
+ table_map with_dep;
+ List<Grouping_tmp_field> grouping_tmp_fields;
/* it is for correct printing SELECT options */
thr_lock_type lock_type;
@@ -938,7 +998,10 @@ public:
bool mark_as_dependent(THD *thd, st_select_lex *last, Item *dependency);
- bool set_braces(bool value);
+ void set_braces(bool value)
+ {
+ braces= value;
+ }
bool inc_in_sum_expr();
uint get_in_sum_expr();
@@ -1086,7 +1149,45 @@ public:
void set_non_agg_field_used(bool val) { m_non_agg_field_used= val; }
void set_agg_func_used(bool val) { m_agg_func_used= val; }
-
+ void set_with_clause(With_clause *with_clause);
+ With_clause *get_with_clause()
+ {
+ return master_unit()->with_clause;
+ }
+ With_element *get_with_element()
+ {
+ return master_unit()->with_element;
+ }
+ With_element *find_table_def_in_with_clauses(TABLE_LIST *table);
+ bool check_unrestricted_recursive(bool only_standard_compliant);
+ bool check_subqueries_with_recursive_references();
+ void collect_grouping_fields(THD *thd);
+ void check_cond_extraction_for_grouping_fields(Item *cond,
+ TABLE_LIST *derived);
+ Item *build_cond_for_grouping_fields(THD *thd, Item *cond,
+ bool no_to_clones);
+
+ List<Window_spec> window_specs;
+ void prepare_add_window_spec(THD *thd);
+ bool add_window_def(THD *thd, LEX_STRING *win_name, LEX_STRING *win_ref,
+ SQL_I_List<ORDER> win_partition_list,
+ SQL_I_List<ORDER> win_order_list,
+ Window_frame *win_frame);
+ bool add_window_spec(THD *thd, LEX_STRING *win_ref,
+ SQL_I_List<ORDER> win_partition_list,
+ SQL_I_List<ORDER> win_order_list,
+ Window_frame *win_frame);
+ List<Item_window_func> window_funcs;
+ bool add_window_func(Item_window_func *win_func)
+ {
+ return window_funcs.push_back(win_func);
+ }
+
+ bool have_window_funcs() const { return (window_funcs.elements !=0); }
+
+ bool cond_pushdown_is_allowed() const
+ { return !have_window_funcs() && !olap && !explicit_limit; }
+
private:
bool m_non_agg_field_used;
bool m_agg_func_used;
@@ -1121,10 +1222,16 @@ struct st_sp_chistics
enum enum_sp_data_access daccess;
};
-struct st_trg_chistics
+
+
+struct st_trg_chistics: public st_trg_execution_order
{
enum trg_action_time_type action_time;
enum trg_event_type event;
+
+ const char *ordering_clause_begin;
+ const char *ordering_clause_end;
+
};
extern sys_var *trg_new_row_fake_var;
@@ -1420,8 +1527,8 @@ public:
This has all flags from 0 (inclusive) to BINLOG_STMT_FLAG_COUNT
(exclusive) set.
*/
- static const int BINLOG_STMT_UNSAFE_ALL_FLAGS=
- ((1 << BINLOG_STMT_UNSAFE_COUNT) - 1);
+ static const uint32 BINLOG_STMT_UNSAFE_ALL_FLAGS=
+ ((1U << BINLOG_STMT_UNSAFE_COUNT) - 1);
/**
Maps elements of enum_binlog_stmt_unsafe to error codes.
@@ -1788,9 +1895,6 @@ private:
struct st_parsing_options
{
bool allows_variable;
- bool allows_select_into;
- bool allows_select_procedure;
- bool allows_derived;
st_parsing_options() { reset(); }
void reset();
@@ -2051,10 +2155,9 @@ public:
return m_tok_start;
}
- /** Get the token start position, in the pre-processed buffer. */
- const char *get_cpp_tok_start()
+ void set_cpp_tok_start(const char *pos)
{
- return m_cpp_tok_start;
+ m_cpp_tok_start= pos;
}
/** Get the token end position, in the raw buffer. */
@@ -2063,12 +2166,6 @@ public:
return m_tok_end;
}
- /** Get the token end position, in the pre-processed buffer. */
- const char *get_cpp_tok_end()
- {
- return m_cpp_tok_end;
- }
-
/** Get the previous token start position, in the raw buffer. */
const char *get_tok_start_prev()
{
@@ -2081,12 +2178,6 @@ public:
return m_ptr;
}
- /** Get the current stream pointer, in the pre-processed buffer. */
- const char *get_cpp_ptr()
- {
- return m_cpp_ptr;
- }
-
/** Get the length of the current token, in the raw buffer. */
uint yyLength()
{
@@ -2098,6 +2189,30 @@ public:
return (uint) ((m_ptr - m_tok_start) - 1);
}
+ /** Get the previus token start position, in the pre-processed buffer. */
+ const char *get_cpp_start_prev()
+ {
+ return m_cpp_tok_start_prev;
+ }
+
+ /** Get the token start position, in the pre-processed buffer. */
+ const char *get_cpp_tok_start()
+ {
+ return m_cpp_tok_start;
+ }
+
+ /** Get the token end position, in the pre-processed buffer. */
+ const char *get_cpp_tok_end()
+ {
+ return m_cpp_tok_end;
+ }
+
+ /** Get the current stream pointer, in the pre-processed buffer. */
+ const char *get_cpp_ptr()
+ {
+ return m_cpp_ptr;
+ }
+
/** Get the utf8-body string. */
const char *get_body_utf8_str()
{
@@ -2433,12 +2548,25 @@ struct LEX: public Query_tables_list
SELECT_LEX *current_select;
/* list of all SELECT_LEX */
SELECT_LEX *all_selects_list;
-
+ /* current with clause in parsing if any, otherwise 0*/
+ With_clause *curr_with_clause;
+ /* pointer to the first with clause in the current statement */
+ With_clause *with_clauses_list;
+ /*
+ (*with_clauses_list_last_next) contains a pointer to the last
+ with clause in the current statement
+ */
+ With_clause **with_clauses_list_last_next;
+ /*
+ When a copy of a with element is parsed this is set to the offset of
+ the with element in the input string, otherwise it's set to 0
+ */
+ uint clone_spec_offset;
+
/* Query Plan Footprint of a currently running select */
Explain_query *explain;
// type information
- char *length,*dec;
CHARSET_INFO *charset;
/*
LEX which represents current statement (conventional, SP or PS)
@@ -2519,6 +2647,7 @@ public:
List<Item_func_set_user_var> set_var_list; // in-query assignment list
List<Item_param> param_list;
List<LEX_STRING> view_list; // view list (list of field names in view)
+ List<LEX_STRING> with_column_list; // list of column names in with_list_element
List<LEX_STRING> *column_list; // list of column names (in ANALYZE)
List<LEX_STRING> *index_list; // list of index names (in ANALYZE)
/*
@@ -2538,7 +2667,7 @@ public:
SQL_I_List<ORDER> proc_list;
SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list;
- Create_field *last_field;
+ Column_definition *last_field;
Item_sum *in_sum_func;
udf_func udf;
HA_CHECK_OPT check_opt; // check/repair options
@@ -2641,15 +2770,10 @@ public:
TABLE_LIST *create_last_non_select_table;
/* Prepared statements SQL syntax:*/
LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */
- /*
- Prepared statement query text or name of variable that holds the
- prepared statement (in PREPARE ... queries)
- */
- LEX_STRING prepared_stmt_code;
- /* If true, prepared_stmt_code is a name of variable that holds the query */
- bool prepared_stmt_code_is_varref;
+ /* PREPARE or EXECUTE IMMEDIATE source expression */
+ Item *prepared_stmt_code;
/* Names of user variables holding parameters (in EXECUTE) */
- List<LEX_STRING> prepared_stmt_params;
+ List<Item> prepared_stmt_params;
sp_head *sphead;
sp_name *spname;
bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */
@@ -2772,6 +2896,14 @@ public:
}
+ SQL_I_List<ORDER> save_group_list;
+ SQL_I_List<ORDER> save_order_list;
+ LEX_STRING *win_ref;
+ Window_frame *win_frame;
+ Window_frame_bound *frame_top_bound;
+ Window_frame_bound *frame_bottom_bound;
+ Window_spec *win_spec;
+
inline void free_set_stmt_mem_root()
{
DBUG_ASSERT(!is_arena_for_set_stmt());
@@ -2810,21 +2942,24 @@ public:
{
safe_to_cache_query= 0;
- /*
- There are no sense to mark select_lex and union fields of LEX,
- but we should merk all subselects as uncacheable from current till
- most upper
- */
- SELECT_LEX *sl;
- SELECT_LEX_UNIT *un;
- for (sl= current_select, un= sl->master_unit();
- un != &unit;
- sl= sl->outer_select(), un= sl->master_unit())
+ if (current_select) // initialisation SP variables has no SELECT
{
- sl->uncacheable|= cause;
- un->uncacheable|= cause;
+ /*
+ There are no sense to mark select_lex and union fields of LEX,
+ but we should merk all subselects as uncacheable from current till
+ most upper
+ */
+ SELECT_LEX *sl;
+ SELECT_LEX_UNIT *un;
+ for (sl= current_select, un= sl->master_unit();
+ un != &unit;
+ sl= sl->outer_select(), un= sl->master_unit())
+ {
+ sl->uncacheable|= cause;
+ un->uncacheable|= cause;
+ }
+ select_lex.uncacheable|= cause;
}
- select_lex.uncacheable|= cause;
}
void set_trg_event_type_for_tables();
@@ -2923,9 +3058,23 @@ public:
bool is_analyze, bool *printed_anything);
void restore_set_statement_var();
- void init_last_field(Create_field *field, const char *name, CHARSET_INFO *cs);
- void set_last_field_type(enum enum_field_types type);
+ void init_last_field(Column_definition *field, const char *name, CHARSET_INFO *cs);
+ void set_last_field_type(const Lex_field_type_st &type);
bool set_bincmp(CHARSET_INFO *cs, bool bin);
+
+ bool get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer);
+ bool prepared_stmt_params_fix_fields(THD *thd)
+ {
+ // Fix Items in the EXECUTE..USING list
+ List_iterator_fast<Item> param_it(prepared_stmt_params);
+ while (Item *param= param_it++)
+ {
+ if (param->fix_fields(thd, 0) || param->check_cols(1))
+ return true;
+ }
+ return false;
+ }
+
// Check if "KEY IF NOT EXISTS name" used outside of ALTER context
bool check_add_key(DDL_options_st ddl)
{
@@ -2956,6 +3105,16 @@ public:
alter_info.key_list.push_back(last_key);
return false;
}
+ // Add a constraint as a part of CREATE TABLE or ALTER TABLE
+ bool add_constraint(LEX_STRING *name, Virtual_column_info *constr,
+ bool if_not_exists)
+ {
+ constr->name= *name;
+ constr->flags= if_not_exists ?
+ Alter_info::CHECK_CONSTRAINT_IF_NOT_EXISTS : 0;
+ alter_info.check_constraint_list.push_back(constr);
+ return false;
+ }
void set_command(enum_sql_command command,
DDL_options_st options)
{
@@ -3198,6 +3357,7 @@ public:
}
};
+
extern sql_digest_state *
digest_add_token(sql_digest_state *state, uint token, LEX_YYSTYPE yylval);
@@ -3218,7 +3378,8 @@ void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex);
int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex);
extern int MYSQLlex(union YYSTYPE *yylval, THD *thd);
-extern void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str);
+extern void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str,
+ uint *prefix_removed);
extern bool is_lex_native_function(const LEX_STRING *name);
diff --git a/sql/sql_list.cc b/sql/sql_list.cc
index 2c1b3c47d55..c63c83f0645 100644
--- a/sql/sql_list.cc
+++ b/sql/sql_list.cc
@@ -38,21 +38,21 @@ void free_list(I_List <i_string> *list)
}
-base_list::base_list(const base_list &rhs, MEM_ROOT *mem_root)
+bool base_list::copy(const base_list *rhs, MEM_ROOT *mem_root)
{
- if (rhs.elements)
+ bool error= 0;
+ if (rhs->elements)
{
/*
It's okay to allocate an array of nodes at once: we never
call a destructor for list_node objects anyway.
*/
- first= (list_node*) alloc_root(mem_root,
- sizeof(list_node) * rhs.elements);
- if (first)
+ if ((first= (list_node*) alloc_root(mem_root,
+ sizeof(list_node) * rhs->elements)))
{
- elements= rhs.elements;
+ elements= rhs->elements;
list_node *dst= first;
- list_node *src= rhs.first;
+ list_node *src= rhs->first;
for (; dst < first + elements - 1; dst++, src= src->next)
{
dst->info= src->info;
@@ -63,10 +63,12 @@ base_list::base_list(const base_list &rhs, MEM_ROOT *mem_root)
dst->next= &end_of_list;
/* Setup 'last' member */
last= &dst->next;
- return;
+ return 0;
}
+ error= 1;
}
elements= 0;
first= &end_of_list;
last= &first;
+ return error;
}
diff --git a/sql/sql_list.h b/sql/sql_list.h
index f1c4d1fe914..47590920510 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -22,7 +22,8 @@
#include "my_sys.h" /* alloc_root, TRASH, MY_WME,
MY_FAE, MY_ALLOW_ZERO_PTR */
#include "m_string.h" /* bfill */
-#include "thr_malloc.h" /* sql_alloc */
+
+THD *thd_get_current_thd();
/* mysql standard class memory allocator */
@@ -31,11 +32,11 @@ class Sql_alloc
public:
static void *operator new(size_t size) throw ()
{
- return sql_alloc(size);
+ return thd_alloc(thd_get_current_thd(), size);
}
static void *operator new[](size_t size) throw ()
{
- return sql_alloc(size);
+ return thd_alloc(thd_get_current_thd(), size);
}
static void *operator new[](size_t size, MEM_ROOT *mem_root) throw ()
{ return alloc_root(mem_root, size); }
@@ -199,7 +200,8 @@ public:
need to copy elements by value, you should employ
list_copy_and_replace_each_value after creating a copy.
*/
- base_list(const base_list &rhs, MEM_ROOT *mem_root);
+ bool copy(const base_list *rhs, MEM_ROOT *mem_root);
+ base_list(const base_list &rhs, MEM_ROOT *mem_root) { copy(&rhs, mem_root); }
inline base_list(bool error) { }
inline bool push_back(void *info)
{
@@ -453,6 +455,11 @@ public:
el= &current->next;
return current->info;
}
+ /* Get what calling next() would return, without moving the iterator */
+ inline void *peek()
+ {
+ return (*el)->info;
+ }
inline void *next_fast(void)
{
list_node *tmp;
@@ -505,6 +512,10 @@ public:
{
return el == &list->last_ref()->next;
}
+ inline bool at_end()
+ {
+ return current == &end_of_list;
+ }
friend class error_list_iterator;
};
@@ -529,6 +540,8 @@ public:
inline void disjoin(List<T> *list) { base_list::disjoin(list); }
inline bool add_unique(T *a, bool (*eq)(T *a, T *b))
{ return base_list::add_unique(a, (List_eq *)eq); }
+ inline bool copy(const List<T> *list, MEM_ROOT *root)
+ { return base_list::copy(list, root); }
void delete_elements(void)
{
list_node *element,*next;
@@ -552,6 +565,7 @@ public:
List_iterator() : base_list_iterator() {}
inline void init(List<T> &a) { base_list_iterator::init(a); }
inline T* operator++(int) { return (T*) base_list_iterator::next(); }
+ inline T* peek() { return (T*) base_list_iterator::peek(); }
inline T *replace(T *a) { return (T*) base_list_iterator::replace(a); }
inline T *replace(List<T> &a) { return (T*) base_list_iterator::replace(a); }
inline void rewind(void) { base_list_iterator::rewind(); }
@@ -609,7 +623,7 @@ inline void bubble_sort(List<T> *list_to_sort,
swap= FALSE;
while ((item2= it++) && (ref2= it.ref()) != last_ref)
{
- if (sort_func(item1, item2, arg) < 0)
+ if (sort_func(item1, item2, arg) > 0)
{
*ref1= item2;
*ref2= item1;
@@ -652,6 +666,10 @@ struct ilink
if (next) next->prev=prev;
prev=0 ; next=0;
}
+ inline void assert_linked()
+ {
+ DBUG_ASSERT(prev != 0 && next != 0);
+ }
virtual ~ilink() { unlink(); } /*lint -e1740 */
};
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 85ab43df0ec..49558f8b694 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2010, 2017, MariaDB Corporation
+ Copyright (c) 2010, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
#include "sql_trigger.h"
#include "sql_derived.h"
#include "sql_show.h"
+#include "debug_sync.h"
extern "C" int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t Count);
@@ -61,31 +62,120 @@ XML_TAG::XML_TAG(int l, String f, String v)
}
+/*
+ Field and line terminators must be interpreted as sequence of unsigned char.
+ Otherwise, non-ascii terminators will be negative on some platforms,
+ and positive on others (depending on the implementation of char).
+*/
+class Term_string
+{
+ const uchar *m_ptr;
+ uint m_length;
+ int m_initial_byte;
+public:
+ Term_string(const String &str) :
+ m_ptr(static_cast<const uchar*>(static_cast<const void*>(str.ptr()))),
+ m_length(str.length()),
+ m_initial_byte((uchar) (str.length() ? str.ptr()[0] : INT_MAX))
+ { }
+ void set(const uchar *str, uint length, int initial_byte)
+ {
+ m_ptr= str;
+ m_length= length;
+ m_initial_byte= initial_byte;
+ }
+ void reset() { set(NULL, 0, INT_MAX); }
+ const uchar *ptr() const { return m_ptr; }
+ uint length() const { return m_length; }
+ int initial_byte() const { return m_initial_byte; }
+ bool eq(const Term_string &other) const
+ {
+ return length() == other.length() && !memcmp(ptr(), other.ptr(), length());
+ }
+};
+
+
#define GET (stack_pos != stack ? *--stack_pos : my_b_get(&cache))
#define PUSH(A) *(stack_pos++)=(A)
-class READ_INFO {
+#ifdef WITH_WSREP
+/** If requested by wsrep_load_data_splitting, commit and restart
+the transaction after every 10,000 inserted rows. */
+
+static bool wsrep_load_data_split(THD *thd, const TABLE *table,
+ const COPY_INFO &info)
+{
+ DBUG_ENTER("wsrep_load_data_split");
+
+ if (!wsrep_load_data_splitting || !wsrep_on(thd)
+ || !info.records || (info.records % 10000)
+ || !thd->transaction.stmt.ha_list
+ || thd->transaction.stmt.ha_list->ht() != binlog_hton
+ || !thd->transaction.stmt.ha_list->next()
+ || thd->transaction.stmt.ha_list->next()->next())
+ DBUG_RETURN(false);
+
+ if (handlerton* hton= thd->transaction.stmt.ha_list->next()->ht())
+ {
+ if (hton->db_type != DB_TYPE_INNODB)
+ DBUG_RETURN(false);
+ WSREP_DEBUG("intermediate transaction commit in LOAD DATA");
+ wsrep_set_load_multi_commit(thd, true);
+ if (wsrep_run_wsrep_commit(thd, true) != WSREP_TRX_OK) DBUG_RETURN(true);
+ if (binlog_hton->commit(binlog_hton, thd, true)) DBUG_RETURN(true);
+ wsrep_post_commit(thd, true);
+ hton->commit(hton, thd, true);
+ wsrep_set_load_multi_commit(thd, false);
+ DEBUG_SYNC(thd, "intermediate_transaction_commit");
+ table->file->extra(HA_EXTRA_FAKE_START_STMT);
+ }
+
+ DBUG_RETURN(false);
+}
+/*
+ If the commit fails, then an early return from
+ the function occurs there and therefore we need
+ to reset the table->auto_increment_field_not_null
+ flag, which is usually reset after calling
+ the write_record():
+*/
+#define WSREP_LOAD_DATA_SPLIT(thd,table,info) \
+ if (wsrep_load_data_split(thd,table,info)) \
+ { \
+ table->auto_increment_field_not_null= FALSE; \
+ DBUG_RETURN(1); \
+ }
+#else /* WITH_WSREP */
+#define WSREP_LOAD_DATA_SPLIT(thd,table,info) /* empty */
+#endif /* WITH_WSREP */
+
+#define WRITE_RECORD(thd,table,info) \
+ do { \
+ int err_= write_record(thd, table, &info); \
+ table->auto_increment_field_not_null= FALSE; \
+ if (err_) \
+ DBUG_RETURN(1); \
+ } while (0)
+
+class READ_INFO: public Load_data_param
+{
File file;
- uchar *buffer, /* Buffer for read text */
- *end_of_buff; /* Data in bufferts ends here */
- uint buff_length; /* Length of buffert */
- const uchar *field_term_ptr,*line_term_ptr;
- const char *line_start_ptr,*line_start_end;
- uint field_term_length,line_term_length,enclosed_length;
- int field_term_char,line_term_char,enclosed_char,escape_char;
+ String data; /* Read buffer */
+ Term_string m_field_term; /* FIELDS TERMINATED BY 'string' */
+ Term_string m_line_term; /* LINES TERMINATED BY 'string' */
+ Term_string m_line_start; /* LINES STARTING BY 'string' */
+ int enclosed_char,escape_char;
int *stack,*stack_pos;
bool found_end_of_line,start_of_line,eof;
int level; /* for load xml */
-
-#if MYSQL_VERSION_ID >= 100200
-#error This 10.0 and 10.1 specific fix should be removed in 10.2.
-#error Fix read_mbtail() to use my_charlen() instead of my_charlen_tmp()
-#else
- int my_charlen_tmp(CHARSET_INFO *cs, const char *str, const char *end)
+ bool getbyte(char *to)
{
- my_wc_t wc;
- return cs->cset->mb_wc(cs, &wc, (const uchar *) str, (const uchar *) end);
+ int chr= GET;
+ if (chr == my_b_EOF)
+ return (eof= true);
+ *to= chr;
+ return false;
}
/**
@@ -122,7 +212,7 @@ class READ_INFO {
bool read_mbtail(String *str)
{
int chlen;
- if ((chlen= my_charlen_tmp(read_charset, str->end() - 1, str->end())) == 1)
+ if ((chlen= my_charlen(charset(), str->end() - 1, str->end())) == 1)
return false; // Single byte character found
for (uint32 length0= str->length() - 1 ; MY_CS_IS_TOOSMALL(chlen); )
{
@@ -133,7 +223,7 @@ class READ_INFO {
return true; // EOF
}
str->append(chr);
- chlen= my_charlen_tmp(read_charset, str->ptr() + length0, str->end());
+ chlen= my_charlen(charset(), str->ptr() + length0, str->end());
if (chlen == MY_CS_ILSEQ)
{
/**
@@ -150,16 +240,14 @@ class READ_INFO {
DBUG_PRINT("info", ("read_mbtail: chlen=%d", chlen));
return false; // Good multi-byte character
}
-#endif
public:
bool error,line_cuted,found_null,enclosed;
uchar *row_start, /* Found row starts here */
*row_end; /* Found row ends here */
- CHARSET_INFO *read_charset;
LOAD_FILE_IO_CACHE cache;
- READ_INFO(File file,uint tot_length,CHARSET_INFO *cs,
+ READ_INFO(THD *thd, File file, const Load_data_param &param,
String &field_term,String &line_start,String &line_term,
String &enclosed,int escape,bool get_it_from_net, bool is_fifo);
~READ_INFO();
@@ -167,7 +255,11 @@ public:
int read_fixed_length(void);
int next_line(void);
char unescape(char chr);
- int terminator(const uchar *ptr, uint length);
+ bool terminator(const uchar *ptr, uint length);
+ bool terminator(const Term_string &str)
+ { return terminator(str.ptr(), str.length()); }
+ bool terminator(int chr, const Term_string &str)
+ { return str.initial_byte() == chr && terminator(str); }
bool find_start_of_fields();
/* load xml */
List<XML_TAG> taglist;
@@ -210,6 +302,31 @@ static bool write_execute_load_query_log_event(THD *, sql_exchange*, const
char*, const char*, bool, enum enum_duplicates, bool, bool, int);
#endif /* EMBEDDED_LIBRARY */
+
+bool Load_data_param::add_outvar_field(THD *thd, const Field *field)
+{
+ if (field->flags & BLOB_FLAG)
+ {
+ m_use_blobs= true;
+ m_fixed_length+= 256; // Will be extended if needed
+ }
+ else
+ m_fixed_length+= field->field_length;
+ return false;
+}
+
+
+bool Load_data_param::add_outvar_user_var(THD *thd)
+{
+ if (m_is_fixed_length)
+ {
+ my_error(ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR, MYF(0));
+ return true;
+ }
+ return false;
+}
+
+
/*
Execute LOAD DATA query
@@ -241,8 +358,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
File file;
TABLE *table= NULL;
int error= 0;
- String *field_term=ex->field_term,*escaped=ex->escaped;
- String *enclosed=ex->enclosed;
bool is_fifo=0;
#ifndef EMBEDDED_LIBRARY
killed_state killed_status;
@@ -271,7 +386,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
read_file_from_client = 0; //server is always in the same process
#endif
- if (escaped->length() > 1 || enclosed->length() > 1)
+ if (ex->escaped->length() > 1 || ex->enclosed->length() > 1)
{
my_message(ER_WRONG_FIELD_TERMINATORS,
ER_THD(thd, ER_WRONG_FIELD_TERMINATORS),
@@ -280,8 +395,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
}
/* Report problems with non-ascii separators */
- if (!escaped->is_ascii() || !enclosed->is_ascii() ||
- !field_term->is_ascii() ||
+ if (!ex->escaped->is_ascii() || !ex->enclosed->is_ascii() ||
+ !ex->field_term->is_ascii() ||
!ex->line_term->is_ascii() || !ex->line_start->is_ascii())
{
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -357,22 +472,24 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
Let us also prepare SET clause, altough it is probably empty
in this case.
*/
- if (setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
- setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(),
+ set_fields, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
+ setup_fields(thd, Ref_ptr_array(),
+ set_values, MARK_COLUMNS_READ, 0, NULL, 0))
DBUG_RETURN(TRUE);
}
else
{ // Part field list
/* TODO: use this conds for 'WITH CHECK OPTIONS' */
- if (setup_fields(thd, 0, fields_vars, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
- setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
+ if (setup_fields(thd, Ref_ptr_array(),
+ fields_vars, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
+ setup_fields(thd, Ref_ptr_array(),
+ set_fields, MARK_COLUMNS_WRITE, 0, NULL, 0) ||
check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(TRUE);
- /* Add all fields with default functions to table->write_set. */
- if (table->default_field)
- table->mark_default_fields_for_write();
/* Fix the expressions in SET clause */
- if (setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(),
+ set_values, MARK_COLUMNS_READ, 0, NULL, 0))
DBUG_RETURN(TRUE);
}
switch_to_nullable_trigger_fields(fields_vars, table);
@@ -382,51 +499,21 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table->prepare_triggers_for_insert_stmt_or_event();
table->mark_columns_needed_for_insert();
- if (table->vfield)
- {
- for (Field **vfield_ptr= table->vfield; *vfield_ptr; vfield_ptr++)
- {
- if ((*vfield_ptr)->stored_in_db)
- {
- thd->lex->unit.insert_table_with_stored_vcol= table;
- break;
- }
- }
- }
-
- uint tot_length=0;
- bool use_blobs= 0, use_vars= 0;
+ Load_data_param param(ex->cs ? ex->cs : thd->variables.collation_database,
+ !ex->field_term->length() && !ex->enclosed->length());
List_iterator_fast<Item> it(fields_vars);
Item *item;
while ((item= it++))
{
- Item *real_item= item->real_item();
-
- if (real_item->type() == Item::FIELD_ITEM)
- {
- Field *field= ((Item_field*)real_item)->field;
- if (field->flags & BLOB_FLAG)
- {
- use_blobs= 1;
- tot_length+= 256; // Will be extended if needed
- }
- else
- tot_length+= field->field_length;
- }
- else if (item->type() == Item::STRING_ITEM)
- use_vars= 1;
+ const Load_data_outvar *var= item->get_load_data_outvar_or_error();
+ if (!var || var->load_data_add_outvar(thd, &param))
+ DBUG_RETURN(true);
}
- if (use_blobs && !ex->line_term->length() && !field_term->length())
+ if (param.use_blobs() && !ex->line_term->length() && !ex->field_term->length())
{
my_message(ER_BLOBS_AND_NO_TERMINATED,
- ER_THD(thd, ER_BLOBS_AND_NO_TERMINATED),
- MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (use_vars && !field_term->length() && !enclosed->length())
- {
- my_error(ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR, MYF(0));
+ ER_THD(thd, ER_BLOBS_AND_NO_TERMINATED), MYF(0));
DBUG_RETURN(TRUE);
}
@@ -516,13 +603,13 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
bzero((char*) &info,sizeof(info));
info.ignore= ignore;
info.handle_duplicates=handle_duplicates;
- info.escape_char= (escaped->length() && (ex->escaped_given() ||
+ info.escape_char= (ex->escaped->length() && (ex->escaped_given() ||
!(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
- ? (*escaped)[0] : INT_MAX;
+ ? (*ex->escaped)[0] : INT_MAX;
- READ_INFO read_info(file,tot_length,
- ex->cs ? ex->cs : thd->variables.collation_database,
- *field_term,*ex->line_start, *ex->line_term, *enclosed,
+ READ_INFO read_info(thd, file, param,
+ *ex->field_term, *ex->line_start,
+ *ex->line_term, *ex->enclosed,
info.escape_char, read_file_from_client, is_fifo);
if (read_info.error)
{
@@ -583,14 +670,14 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= read_xml_field(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
*(ex->line_term), skip_lines, ignore);
- else if (!field_term->length() && !enclosed->length())
+ else if (read_info.is_fixed_length())
error= read_fixed_length(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
skip_lines, ignore);
else
error= read_sep_field(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
- *enclosed, skip_lines, ignore);
+ *ex->enclosed, skip_lines, ignore);
thd_proc_info(thd, "End bulk insert");
if (!error)
@@ -794,14 +881,9 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
{
if (n++)
query_str.append(", ");
- if (item->real_type() == Item::FIELD_ITEM)
- append_identifier(thd, &query_str, item->name, strlen(item->name));
- else
- {
- /* Actually Item_user_var_as_out_param despite claiming STRING_ITEM. */
- DBUG_ASSERT(item->type() == Item::STRING_ITEM);
- ((Item_user_var_as_out_param *)item)->print_for_load(thd, &query_str);
- }
+ const Load_data_outvar *var= item->get_load_data_outvar();
+ DBUG_ASSERT(var);
+ var->load_data_print_for_log_event(thd, &query_str);
}
query_str.append(")");
}
@@ -849,9 +931,9 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
ulong skip_lines, bool ignore_check_option_errors)
{
List_iterator_fast<Item> it(fields_vars);
- Item_field *sql_field;
+ Item *item;
TABLE *table= table_list->table;
- bool err, progress_reports, auto_increment_field_not_null=false;
+ bool progress_reports;
ulonglong counter, time_to_report_progress;
DBUG_ENTER("read_fixed_length");
@@ -861,12 +943,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if ((thd->progress.max_counter= read_info.file_length()) == ~(my_off_t) 0)
progress_reports= 0;
- while ((sql_field= (Item_field*) it++))
- {
- if (table->field[sql_field->field->field_index] == table->next_number_field)
- auto_increment_field_not_null= true;
- }
-
while (!read_info.read_fixed_length())
{
if (thd->killed)
@@ -902,50 +978,28 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
#endif
restore_record(table, s->default_values);
- /*
- There is no variables in fields_vars list in this format so
- this conversion is safe.
- */
- while ((sql_field= (Item_field*) it++))
- {
- Field *field= sql_field->field;
- table->auto_increment_field_not_null= auto_increment_field_not_null;
- /*
- No fields specified in fields_vars list can be null in this format.
- Mark field as not null, we should do this for each row because of
- restore_record...
- */
- field->set_notnull();
+ while ((item= it++))
+ {
+ Load_data_outvar *dst= item->get_load_data_outvar();
+ DBUG_ASSERT(dst);
if (pos == read_info.row_end)
{
- thd->cuted_fields++; /* Not enough fields */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_TOO_FEW_RECORDS,
- ER_THD(thd, ER_WARN_TOO_FEW_RECORDS),
- thd->get_stmt_da()->current_row_for_warning());
- /*
- Timestamp fields that are NOT NULL are autoupdated if there is no
- corresponding value in the data file.
- */
- if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
- field->set_time();
+ if (dst->load_data_set_no_data(thd, &read_info))
+ DBUG_RETURN(1);
}
else
{
- uint length;
- uchar save_chr;
- if ((length=(uint) (read_info.row_end-pos)) >
- field->field_length)
- length=field->field_length;
- save_chr=pos[length]; pos[length]='\0'; // Safeguard aganst malloc
- field->store((char*) pos,length,read_info.read_charset);
- pos[length]=save_chr;
- if ((pos+=length) > read_info.row_end)
- pos= read_info.row_end; /* Fills rest with space */
+ uint length, fixed_length= dst->load_data_fixed_length();
+ uchar save_chr;
+ if ((length=(uint) (read_info.row_end - pos)) > fixed_length)
+ length= fixed_length;
+ save_chr= pos[length]; pos[length]= '\0'; // Safeguard aganst malloc
+ dst->load_data_set_value(thd, (const char *) pos, length, &read_info);
+ pos[length]= save_chr;
+ if ((pos+= length) > read_info.row_end)
+ pos= read_info.row_end; // Fills rest with space
}
- /* Do not auto-update this field. */
- field->set_has_explicit_value();
}
if (pos != read_info.row_end)
{
@@ -959,8 +1013,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (thd->killed ||
fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
ignore_check_option_errors,
- TRG_EVENT_INSERT) ||
- (table->default_field && table->update_default_fields()))
+ TRG_EVENT_INSERT))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd, ignore_check_option_errors)) {
@@ -971,11 +1024,9 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
DBUG_RETURN(-1);
}
- err= write_record(thd, table, &info);
- table->auto_increment_field_not_null= FALSE;
- if (err)
- DBUG_RETURN(1);
-
+ WSREP_LOAD_DATA_SPLIT(thd, table, info);
+ WRITE_RECORD(thd, table, info);
+
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
@@ -997,7 +1048,6 @@ continue_loop:;
}
-
static int
read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
List<Item> &fields_vars, List<Item> &set_fields,
@@ -1009,7 +1059,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
Item *item;
TABLE *table= table_list->table;
uint enclosed_length;
- bool err, progress_reports;
+ bool progress_reports;
ulonglong counter, time_to_report_progress;
DBUG_ENTER("read_sep_field");
@@ -1045,8 +1095,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
{
uint length;
uchar *pos;
- Item_field *real_item;
-
if (read_info.read_field())
break;
@@ -1057,71 +1105,22 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
pos=read_info.row_start;
length=(uint) (read_info.row_end-pos);
- real_item= item->field_for_view_update();
+ Load_data_outvar *dst= item->get_load_data_outvar_or_error();
+ DBUG_ASSERT(dst);
if ((!read_info.enclosed &&
(enclosed_length && length == 4 &&
!memcmp(pos, STRING_WITH_LEN("NULL")))) ||
(length == 1 && read_info.found_null))
{
- if (item->type() == Item::STRING_ITEM)
- {
- ((Item_user_var_as_out_param *)item)->set_null_value(
- read_info.read_charset);
- }
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
+ if (dst->load_data_set_null(thd, &read_info))
DBUG_RETURN(1);
- }
- else
- {
- Field *field= real_item->field;
- if (field->reset())
- {
- my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name,
- thd->get_stmt_da()->current_row_for_warning());
- DBUG_RETURN(1);
- }
- field->set_null();
- if (!field->maybe_null())
- {
- /*
- Timestamp fields that are NOT NULL are autoupdated if there is no
- corresponding value in the data file.
- */
- if (field->type() == MYSQL_TYPE_TIMESTAMP)
- field->set_time();
- else if (field != table->next_number_field)
- field->set_warning(Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_NULL_TO_NOTNULL, 1);
- }
- /* Do not auto-update this field. */
- field->set_has_explicit_value();
- }
-
- continue;
- }
-
- if (item->type() == Item::STRING_ITEM)
- {
- ((Item_user_var_as_out_param *)item)->set_value((char*) pos, length,
- read_info.read_charset);
- }
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
- DBUG_RETURN(1);
}
else
{
- Field *field= real_item->field;
- field->set_notnull();
- read_info.row_end[0]=0; // Safe to change end marker
- if (field == table->next_number_field)
- table->auto_increment_field_not_null= TRUE;
- field->store((char*) pos, length, read_info.read_charset);
- field->set_has_explicit_value();
+ read_info.row_end[0]= 0; // Safe to change end marker
+ if (dst->load_data_set_value(thd, (const char *) pos, length, &read_info))
+ DBUG_RETURN(1);
}
}
@@ -1142,49 +1141,18 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
break;
for (; item ; item= it++)
{
- Item_field *real_item= item->field_for_view_update();
- if (item->type() == Item::STRING_ITEM)
- {
- ((Item_user_var_as_out_param *)item)->set_null_value(
- read_info.read_charset);
- }
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
+ Load_data_outvar *dst= item->get_load_data_outvar_or_error();
+ DBUG_ASSERT(dst);
+ if (unlikely(dst->load_data_set_no_data(thd, &read_info)))
DBUG_RETURN(1);
- }
- else
- {
- Field *field= real_item->field;
- if (field->reset())
- {
- my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),field->field_name,
- thd->get_stmt_da()->current_row_for_warning());
- DBUG_RETURN(1);
- }
- if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
- field->set_time();
- field->set_has_explicit_value();
- /*
- TODO: We probably should not throw warning for each field.
- But how about intention to always have the same number
- of warnings in THD::cuted_fields (and get rid of cuted_fields
- in the end ?)
- */
- thd->cuted_fields++;
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_TOO_FEW_RECORDS,
- ER_THD(thd, ER_WARN_TOO_FEW_RECORDS),
- thd->get_stmt_da()->current_row_for_warning());
- }
}
}
if (thd->killed ||
- fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
+ fill_record_n_invoke_before_triggers(thd, table, set_fields,
+ set_values,
ignore_check_option_errors,
- TRG_EVENT_INSERT) ||
- (table->default_field && table->update_default_fields()))
+ TRG_EVENT_INSERT))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd,
@@ -1196,10 +1164,9 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
DBUG_RETURN(-1);
}
- err= write_record(thd, table, &info);
- table->auto_increment_field_not_null= FALSE;
- if (err)
- DBUG_RETURN(1);
+ WSREP_LOAD_DATA_SPLIT(thd, table, info);
+ WRITE_RECORD(thd, table, info);
+
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
@@ -1237,7 +1204,6 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
Item *item;
TABLE *table= table_list->table;
bool no_trans_update_stmt;
- CHARSET_INFO *cs= read_info.read_charset;
DBUG_ENTER("read_xml_field");
no_trans_update_stmt= !table->file->has_transactions();
@@ -1282,57 +1248,14 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
while(tag && strcmp(tag->field.c_ptr(), item->name) != 0)
tag= xmlit++;
-
- Item_field *real_item= item->field_for_view_update();
- if (!tag) // found null
- {
- if (item->type() == Item::STRING_ITEM)
- ((Item_user_var_as_out_param *) item)->set_null_value(cs);
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
- DBUG_RETURN(1);
- }
- else
- {
- Field *field= real_item->field;
- field->reset();
- field->set_null();
- if (field == table->next_number_field)
- table->auto_increment_field_not_null= TRUE;
- if (!field->maybe_null())
- {
- if (field->type() == FIELD_TYPE_TIMESTAMP)
- field->set_time();
- else if (field != table->next_number_field)
- field->set_warning(Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_NULL_TO_NOTNULL, 1);
- }
- /* Do not auto-update this field. */
- field->set_has_explicit_value();
- }
- continue;
- }
- if (item->type() == Item::STRING_ITEM)
- ((Item_user_var_as_out_param *) item)->set_value(
- (char *) tag->value.ptr(),
- tag->value.length(), cs);
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
+ Load_data_outvar *dst= item->get_load_data_outvar_or_error();
+ DBUG_ASSERT(dst);
+ if (!tag ? dst->load_data_set_null(thd, &read_info) :
+ dst->load_data_set_value(thd, tag->value.ptr(),
+ tag->value.length(),
+ &read_info))
DBUG_RETURN(1);
- }
- else
- {
-
- Field *field= ((Item_field *)item)->field;
- field->set_notnull();
- if (field == table->next_number_field)
- table->auto_increment_field_not_null= TRUE;
- field->store((char *) tag->value.ptr(), tag->value.length(), cs);
- field->set_has_explicit_value();
- }
}
if (read_info.error)
@@ -1343,45 +1266,13 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
skip_lines--;
continue;
}
-
- if (item)
- {
- /* Have not read any field, thus input file is simply ended */
- if (item == fields_vars.head())
- break;
-
- for ( ; item; item= it++)
- {
- Item_field *real_item= item->field_for_view_update();
- if (item->type() == Item::STRING_ITEM)
- ((Item_user_var_as_out_param *)item)->set_null_value(cs);
- else if (!real_item)
- {
- my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
- DBUG_RETURN(1);
- }
- else
- {
- /*
- QQ: We probably should not throw warning for each field.
- But how about intention to always have the same number
- of warnings in THD::cuted_fields (and get rid of cuted_fields
- in the end ?)
- */
- thd->cuted_fields++;
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_TOO_FEW_RECORDS,
- ER_THD(thd, ER_WARN_TOO_FEW_RECORDS),
- thd->get_stmt_da()->current_row_for_warning());
- }
- }
- }
+
+ DBUG_ASSERT(!item);
if (thd->killed ||
fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
ignore_check_option_errors,
- TRG_EVENT_INSERT) ||
- (table->default_field && table->update_default_fields()))
+ TRG_EVENT_INSERT))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd,
@@ -1392,10 +1283,10 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
case VIEW_CHECK_ERROR:
DBUG_RETURN(-1);
}
-
- if (write_record(thd, table, &info))
- DBUG_RETURN(1);
-
+
+ WSREP_LOAD_DATA_SPLIT(thd, table, info);
+ WRITE_RECORD(thd, table, info);
+
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
@@ -1435,67 +1326,47 @@ READ_INFO::unescape(char chr)
*/
-READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
+READ_INFO::READ_INFO(THD *thd, File file_par,
+ const Load_data_param &param,
String &field_term, String &line_start, String &line_term,
String &enclosed_par, int escape, bool get_it_from_net,
bool is_fifo)
- :file(file_par), buffer(NULL), buff_length(tot_length), escape_char(escape),
- found_end_of_line(false), eof(false),
- error(false), line_cuted(false), found_null(false), read_charset(cs)
+ :Load_data_param(param),
+ file(file_par),
+ m_field_term(field_term), m_line_term(line_term), m_line_start(line_start),
+ escape_char(escape), found_end_of_line(false), eof(false),
+ error(false), line_cuted(false), found_null(false)
{
+ data.set_thread_specific();
/*
Field and line terminators must be interpreted as sequence of unsigned char.
Otherwise, non-ascii terminators will be negative on some platforms,
and positive on others (depending on the implementation of char).
*/
- field_term_ptr=
- static_cast<const uchar*>(static_cast<const void*>(field_term.ptr()));
- field_term_length= field_term.length();
- line_term_ptr=
- static_cast<const uchar*>(static_cast<const void*>(line_term.ptr()));
- line_term_length= line_term.length();
level= 0; /* for load xml */
- if (line_start.length() == 0)
- {
- line_start_ptr=0;
- start_of_line= 0;
- }
- else
- {
- line_start_ptr= line_start.ptr();
- line_start_end=line_start_ptr+line_start.length();
- start_of_line= 1;
- }
+ start_of_line= line_start.length() != 0;
/* If field_terminator == line_terminator, don't use line_terminator */
- if (field_term_length == line_term_length &&
- !memcmp(field_term_ptr,line_term_ptr,field_term_length))
- {
- line_term_length=0;
- line_term_ptr= NULL;
- }
- enclosed_char= (enclosed_length=enclosed_par.length()) ?
- (uchar) enclosed_par[0] : INT_MAX;
- field_term_char= field_term_length ? field_term_ptr[0] : INT_MAX;
- line_term_char= line_term_length ? line_term_ptr[0] : INT_MAX;
+ if (m_field_term.eq(m_line_term))
+ m_line_term.reset();
+ enclosed_char= enclosed_par.length() ? (uchar) enclosed_par[0] : INT_MAX;
/* Set of a stack for unget if long terminators */
- uint length= MY_MAX(cs->mbmaxlen, MY_MAX(field_term_length, line_term_length)) + 1;
+ uint length= MY_MAX(charset()->mbmaxlen, MY_MAX(m_field_term.length(),
+ m_line_term.length())) + 1;
set_if_bigger(length,line_start.length());
- stack=stack_pos=(int*) sql_alloc(sizeof(int)*length);
+ stack= stack_pos= (int*) thd->alloc(sizeof(int) * length);
- if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(MY_WME | MY_THREAD_SPECIFIC))))
- error= 1; /* purecov: inspected */
+ DBUG_ASSERT(m_fixed_length < UINT_MAX32);
+ if (data.reserve((size_t) m_fixed_length))
+ error=1; /* purecov: inspected */
else
{
- end_of_buff=buffer+buff_length;
if (init_io_cache(&cache,(get_it_from_net) ? -1 : file, 0,
(get_it_from_net) ? READ_NET :
(is_fifo ? READ_FIFO : READ_CACHE),0L,1,
MYF(MY_WME | MY_THREAD_SPECIFIC)))
{
- my_free(buffer); /* purecov: inspected */
- buffer= NULL;
error=1;
}
else
@@ -1518,7 +1389,6 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
READ_INFO::~READ_INFO()
{
::end_io_cache(&cache);
- my_free(buffer);
List_iterator<XML_TAG> xmlit(taglist);
XML_TAG *t;
while ((t= xmlit++))
@@ -1526,7 +1396,7 @@ READ_INFO::~READ_INFO()
}
-inline int READ_INFO::terminator(const uchar *ptr,uint length)
+inline bool READ_INFO::terminator(const uchar *ptr, uint length)
{
int chr=0; // Keep gcc happy
uint i;
@@ -1538,11 +1408,11 @@ inline int READ_INFO::terminator(const uchar *ptr,uint length)
}
}
if (i == length)
- return 1;
+ return true;
PUSH(chr);
while (i-- > 1)
PUSH(*--ptr);
- return 0;
+ return false;
}
@@ -1597,7 +1467,6 @@ inline int READ_INFO::terminator(const uchar *ptr,uint length)
int READ_INFO::read_field()
{
int chr,found_enclosed_char;
- uchar *to,*new_buffer;
found_null=0;
if (found_end_of_line)
@@ -1616,11 +1485,11 @@ int READ_INFO::read_field()
found_end_of_line=eof=1;
return 1;
}
- to=buffer;
+ data.length(0);
if (chr == enclosed_char)
{
found_enclosed_char=enclosed_char;
- *to++=(uchar) chr; // If error
+ data.append(chr); // If error
}
else
{
@@ -1631,7 +1500,7 @@ int READ_INFO::read_field()
for (;;)
{
// Make sure we have enough space for the longest multi-byte character.
- while ( to + read_charset->mbmaxlen < end_of_buff)
+ while (data.length() + charset()->mbmaxlen <= data.alloced_length())
{
chr = GET;
if (chr == my_b_EOF)
@@ -1640,7 +1509,7 @@ int READ_INFO::read_field()
{
if ((chr=GET) == my_b_EOF)
{
- *to++= (uchar) escape_char;
+ data.append(escape_char);
goto found_eof;
}
/*
@@ -1652,24 +1521,24 @@ int READ_INFO::read_field()
*/
if (escape_char != enclosed_char || chr == escape_char)
{
- *to++ = (uchar) unescape((char) chr);
+ data.append(unescape((char) chr));
continue;
}
PUSH(chr);
chr= escape_char;
}
#ifdef ALLOW_LINESEPARATOR_IN_STRINGS
- if (chr == line_term_char)
+ if (chr == m_line_term.initial_byte())
#else
- if (chr == line_term_char && found_enclosed_char == INT_MAX)
+ if (chr == m_line_term.initial_byte() && found_enclosed_char == INT_MAX)
#endif
{
- if (terminator(line_term_ptr,line_term_length))
+ if (terminator(m_line_term))
{ // Maybe unexpected linefeed
enclosed=0;
found_end_of_line=1;
- row_start=buffer;
- row_end= to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
}
@@ -1677,27 +1546,24 @@ int READ_INFO::read_field()
{
if ((chr=GET) == found_enclosed_char)
{ // Remove dupplicated
- *to++ = (uchar) chr;
+ data.append(chr);
continue;
}
// End of enclosed field if followed by field_term or line_term
- if (chr == my_b_EOF ||
- (chr == line_term_char && terminator(line_term_ptr,
- line_term_length)))
+ if (chr == my_b_EOF || terminator(chr, m_line_term))
{
/* Maybe unexpected linefeed */
enclosed=1;
found_end_of_line=1;
- row_start=buffer+1;
- row_end= to;
+ row_start= (uchar *) data.ptr() + 1;
+ row_end= (uchar *) data.end();
return 0;
}
- if (chr == field_term_char &&
- terminator(field_term_ptr,field_term_length))
+ if (terminator(chr, m_field_term))
{
enclosed=1;
- row_start=buffer+1;
- row_end= to;
+ row_start= (uchar *) data.ptr() + 1;
+ row_end= (uchar *) data.end();
return 0;
}
/*
@@ -1708,56 +1574,33 @@ int READ_INFO::read_field()
/* copy the found term character to 'to' */
chr= found_enclosed_char;
}
- else if (chr == field_term_char && found_enclosed_char == INT_MAX)
+ else if (chr == m_field_term.initial_byte() &&
+ found_enclosed_char == INT_MAX)
{
- if (terminator(field_term_ptr,field_term_length))
+ if (terminator(m_field_term))
{
enclosed=0;
- row_start=buffer;
- row_end= to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
}
-#ifdef USE_MB
-#endif
- *to++ = (uchar) chr;
-#if MYSQL_VERSION_ID >= 100200
-#error This 10.0 and 10.1 specific fix should be removed in 10.2
-#else
- if (my_mbcharlen(read_charset, (uchar) chr) > 1)
- {
- /*
- A known MBHEAD found. Try to scan the full multi-byte character.
- Otherwise, a possible following second byte 0x5C would be
- mis-interpreted as an escape on the next iteration.
- (Important for big5, gbk, sjis, cp932).
- */
- String tmp((char *) to - 1, read_charset->mbmaxlen, read_charset);
- tmp.length(1);
- bool eof= read_mbtail(&tmp);
- to+= tmp.length() - 1;
- if (eof)
- goto found_eof;
- }
-#endif
+ data.append(chr);
+ if (use_mb(charset()) && read_mbtail(&data))
+ goto found_eof;
}
/*
** We come here if buffer is too small. Enlarge it and continue
*/
- if (!(new_buffer=(uchar*) my_realloc((char*) buffer,buff_length+1+IO_SIZE,
- MYF(MY_WME | MY_THREAD_SPECIFIC))))
- return (error=1);
- to=new_buffer + (to-buffer);
- buffer=new_buffer;
- buff_length+=IO_SIZE;
- end_of_buff=buffer+buff_length;
+ if (data.reserve(IO_SIZE))
+ return (error= 1);
}
found_eof:
enclosed=0;
found_end_of_line=eof=1;
- row_start=buffer;
- row_end=to;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
return 0;
}
@@ -1779,7 +1622,6 @@ found_eof:
int READ_INFO::read_fixed_length()
{
int chr;
- uchar *to;
if (found_end_of_line)
return 1; // One have to call next_line
@@ -1790,8 +1632,7 @@ int READ_INFO::read_fixed_length()
return 1;
}
- to=row_start=buffer;
- while (to < end_of_buff)
+ for (data.length(0); data.length() < m_fixed_length ; )
{
if ((chr=GET) == my_b_EOF)
goto found_eof;
@@ -1799,105 +1640,129 @@ int READ_INFO::read_fixed_length()
{
if ((chr=GET) == my_b_EOF)
{
- *to++= (uchar) escape_char;
+ data.append(escape_char);
goto found_eof;
}
- *to++ =(uchar) unescape((char) chr);
+ data.append((uchar) unescape((char) chr));
continue;
}
- if (chr == line_term_char)
- {
- if (terminator(line_term_ptr,line_term_length))
- { // Maybe unexpected linefeed
- found_end_of_line=1;
- row_end= to;
- return 0;
- }
+ if (terminator(chr, m_line_term))
+ { // Maybe unexpected linefeed
+ found_end_of_line= true;
+ break;
}
- *to++ = (uchar) chr;
+ data.append(chr);
}
- row_end=to; // Found full line
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end(); // Found full line
return 0;
found_eof:
found_end_of_line=eof=1;
- row_start=buffer;
- row_end=to;
- return to == buffer ? 1 : 0;
+ row_start= (uchar *) data.ptr();
+ row_end= (uchar *) data.end();
+ return data.length() == 0 ? 1 : 0;
}
int READ_INFO::next_line()
{
line_cuted=0;
- start_of_line= line_start_ptr != 0;
+ start_of_line= m_line_start.length() != 0;
if (found_end_of_line || eof)
{
found_end_of_line=0;
return eof;
}
found_end_of_line=0;
- if (!line_term_length)
+ if (!m_line_term.length())
return 0; // No lines
for (;;)
{
- int chr = GET;
-#ifdef USE_MB
- if (my_mbcharlen(read_charset, chr) > 1)
- {
- for (uint i=1;
- chr != my_b_EOF && i<my_mbcharlen(read_charset, chr);
- i++)
- chr = GET;
- if (chr == escape_char)
- continue;
- }
-#endif
- if (chr == my_b_EOF)
- {
- eof=1;
- return 1;
+ int chlen;
+ char buf[MY_CS_MBMAXLEN];
+
+ if (getbyte(&buf[0]))
+ return 1; // EOF
+
+ if (use_mb(charset()) &&
+ (chlen= my_charlen(charset(), buf, buf + 1)) != 1)
+ {
+ uint i;
+ for (i= 1; MY_CS_IS_TOOSMALL(chlen); )
+ {
+ DBUG_ASSERT(i < sizeof(buf));
+ DBUG_ASSERT(chlen != 1);
+ if (getbyte(&buf[i++]))
+ return 1; // EOF
+ chlen= my_charlen(charset(), buf, buf + i);
+ }
+
+ /*
+ Either a complete multi-byte sequence,
+ or a broken byte sequence was found.
+ Check if the sequence is a prefix of the "LINES TERMINATED BY" string.
+ */
+ if ((uchar) buf[0] == m_line_term.initial_byte() &&
+ i <= m_line_term.length() &&
+ !memcmp(buf, m_line_term.ptr(), i))
+ {
+ if (m_line_term.length() == i)
+ {
+ /*
+ We found a "LINES TERMINATED BY" string that consists
+ of a single multi-byte character.
+ */
+ return 0;
+ }
+ /*
+ buf[] is a prefix of "LINES TERMINATED BY".
+ Now check the suffix. Length of the suffix of line_term_ptr
+ that still needs to be checked is (line_term_length - i).
+ Note, READ_INFO::terminator() assumes that the leftmost byte of the
+ argument is already scanned from the file and is checked to
+ be a known prefix (e.g. against line_term.initial_char()).
+ So we need to pass one extra byte.
+ */
+ if (terminator(m_line_term.ptr() + i - 1,
+ m_line_term.length() - i + 1))
+ return 0;
+ }
+ /*
+ Here we have a good multi-byte sequence or a broken byte sequence,
+ and the sequence is not equal to "LINES TERMINATED BY".
+ No needs to check for escape_char, because:
+ - multi-byte escape characters in "FIELDS ESCAPED BY" are not
+ supported and are rejected at parse time.
+ - broken single-byte sequences are not recognized as escapes,
+ they are considered to be a part of the data and are converted to
+ question marks.
+ */
+ line_cuted= true;
+ continue;
}
- if (chr == escape_char)
+ if (buf[0] == escape_char)
{
- line_cuted=1;
+ line_cuted= true;
if (GET == my_b_EOF)
- return 1;
+ return 1;
continue;
}
- if (chr == line_term_char && terminator(line_term_ptr,line_term_length))
+ if (terminator(buf[0], m_line_term))
return 0;
- line_cuted=1;
+ line_cuted= true;
}
}
bool READ_INFO::find_start_of_fields()
{
- int chr;
- try_again:
- do
+ for (int chr= GET ; chr != my_b_EOF ; chr= GET)
{
- if ((chr=GET) == my_b_EOF)
- {
- found_end_of_line=eof=1;
- return 1;
- }
- } while ((char) chr != line_start_ptr[0]);
- for (const char *ptr=line_start_ptr+1 ; ptr != line_start_end ; ptr++)
- {
- chr=GET; // Eof will be checked later
- if ((char) chr != *ptr)
- { // Can't be line_start
- PUSH(chr);
- while (--ptr != line_start_ptr)
- { // Restart with next char
- PUSH( *ptr);
- }
- goto try_again;
- }
+ if (terminator(chr, m_line_start))
+ return false;
}
- return 0;
+ return (found_end_of_line= eof= true);
}
@@ -1978,26 +1843,8 @@ int READ_INFO::read_value(int delim, String *val)
int chr;
String tmp;
- for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF;)
+ for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF; chr= GET)
{
-#ifdef USE_MB
- if (my_mbcharlen(read_charset, chr) > 1)
- {
- DBUG_PRINT("read_xml",("multi byte"));
- int i, ml= my_mbcharlen(read_charset, chr);
- for (i= 1; i < ml; i++)
- {
- val->append(chr);
- /*
- Don't use my_tospace() in the middle of a multi-byte character
- TODO: check that the multi-byte sequence is valid.
- */
- chr= GET;
- if (chr == my_b_EOF)
- return chr;
- }
- }
-#endif
if(chr == '&')
{
tmp.length(0);
@@ -2017,8 +1864,11 @@ int READ_INFO::read_value(int delim, String *val)
}
}
else
+ {
val->append(chr);
- chr= GET;
+ if (use_mb(charset()) && read_mbtail(val))
+ return my_b_EOF;
+ }
}
return my_tospace(chr);
}
@@ -2087,11 +1937,11 @@ int READ_INFO::read_xml(THD *thd)
}
// row tag should be in ROWS IDENTIFIED BY '<row>' - stored in line_term
- if((tag.length() == line_term_length -2) &&
- (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0))
+ if((tag.length() == m_line_term.length() - 2) &&
+ (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0))
{
DBUG_PRINT("read_xml", ("start-of-row: %i %s %s",
- level,tag.c_ptr_safe(), line_term_ptr));
+ level,tag.c_ptr_safe(), m_line_term.ptr()));
}
if(chr == ' ' || chr == '>')
@@ -2158,8 +2008,8 @@ int READ_INFO::read_xml(THD *thd)
chr= my_tospace(GET);
}
- if((tag.length() == line_term_length -2) &&
- (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0))
+ if((tag.length() == m_line_term.length() - 2) &&
+ (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0))
{
DBUG_PRINT("read_xml", ("found end-of-row %i %s",
level, tag.c_ptr_safe()));
diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc
index 58443a9a977..a2efa5e072c 100644
--- a/sql/sql_locale.cc
+++ b/sql/sql_locale.cc
@@ -32,7 +32,7 @@ enum err_msgs_index
{
en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT,
ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK,
- es_ES, sv_SE, uk_UA
+ es_ES, sv_SE, uk_UA, hi_IN
} ERR_MSGS_INDEX;
@@ -61,6 +61,7 @@ MY_LOCALE_ERRMSGS global_errmsgs[]=
{"spanish", NULL},
{"swedish", NULL},
{"ukrainian", NULL},
+ {"hindi", NULL},
{NULL, NULL}
};
@@ -889,7 +890,7 @@ MY_LOCALE my_locale_hi_IN
'.', /* decimal point hi_IN */
',', /* thousands_sep hi_IN */
"\x03", /* grouping hi_IN */
- &global_errmsgs[en_US]
+ &global_errmsgs[hi_IN]
);
/***** LOCALE END hi_IN *****/
diff --git a/sql/sql_locale.h b/sql/sql_locale.h
index 8559bb55cd9..ec2f3d29e15 100644
--- a/sql/sql_locale.h
+++ b/sql/sql_locale.h
@@ -19,7 +19,7 @@
typedef struct my_locale_errmsgs
{
const char *language;
- const char **errmsgs;
+ const char ***errmsgs;
} MY_LOCALE_ERRMSGS;
#include "my_global.h" /* uint */
diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc
index 8cf849b97d0..8c8aee0cb03 100644
--- a/sql/sql_manager.cc
+++ b/sql/sql_manager.cc
@@ -159,7 +159,7 @@ void stop_handle_manager()
if (manager_thread_in_use)
{
mysql_mutex_lock(&LOCK_manager);
- DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: 0x%lx",
+ DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: %lu",
(ulong)manager_thread));
mysql_cond_signal(&COND_manager);
mysql_mutex_unlock(&LOCK_manager);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 6649c60f827..146c4d2d02e 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -23,7 +23,10 @@
// set_handler_table_locks,
// lock_global_read_lock,
// make_global_read_lock_block_commit
-#include "sql_base.h" // find_temporary_table
+#include "sql_base.h" // open_tables, open_and_lock_tables,
+ // lock_tables, unique_table,
+ // close_thread_tables, is_temporary_table
+ // table_cache.h
#include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE, query_cache_*
#include "sql_show.h" // mysqld_list_*, mysqld_show_*,
// calc_sum_of_all_status
@@ -92,6 +95,7 @@
#include "transaction.h"
#include "sql_audit.h"
#include "sql_prepare.h"
+#include "sql_cte.h"
#include "debug_sync.h"
#include "probes_mysql.h"
#include "set_var.h"
@@ -110,7 +114,9 @@
#include "wsrep_thd.h"
static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state);
+ Parser_state *parser_state,
+ bool is_com_multi,
+ bool is_next_command);
/**
@defgroup Runtime_Environment Runtime Environment
@@ -134,38 +140,263 @@ static bool check_rename_table(THD *, TABLE_LIST *, TABLE_LIST *);
const char *any_db="*any*"; // Special symbol for check_access
-const LEX_STRING command_name[]={
- { C_STRING_WITH_LEN("Sleep") },
- { C_STRING_WITH_LEN("Quit") },
- { C_STRING_WITH_LEN("Init DB") },
- { C_STRING_WITH_LEN("Query") },
- { C_STRING_WITH_LEN("Field List") },
- { C_STRING_WITH_LEN("Create DB") },
- { C_STRING_WITH_LEN("Drop DB") },
- { C_STRING_WITH_LEN("Refresh") },
- { C_STRING_WITH_LEN("Shutdown") },
- { C_STRING_WITH_LEN("Statistics") },
- { C_STRING_WITH_LEN("Processlist") },
- { C_STRING_WITH_LEN("Connect") },
- { C_STRING_WITH_LEN("Kill") },
- { C_STRING_WITH_LEN("Debug") },
- { C_STRING_WITH_LEN("Ping") },
- { C_STRING_WITH_LEN("Time") },
- { C_STRING_WITH_LEN("Delayed insert") },
- { C_STRING_WITH_LEN("Change user") },
- { C_STRING_WITH_LEN("Binlog Dump") },
- { C_STRING_WITH_LEN("Table Dump") },
- { C_STRING_WITH_LEN("Connect Out") },
- { C_STRING_WITH_LEN("Register Slave") },
- { C_STRING_WITH_LEN("Prepare") },
- { C_STRING_WITH_LEN("Execute") },
- { C_STRING_WITH_LEN("Long Data") },
- { C_STRING_WITH_LEN("Close stmt") },
- { C_STRING_WITH_LEN("Reset stmt") },
- { C_STRING_WITH_LEN("Set option") },
- { C_STRING_WITH_LEN("Fetch") },
- { C_STRING_WITH_LEN("Daemon") },
- { C_STRING_WITH_LEN("Error") } // Last command number
+const LEX_STRING command_name[257]={
+ { C_STRING_WITH_LEN("Sleep") }, //0
+ { C_STRING_WITH_LEN("Quit") }, //1
+ { C_STRING_WITH_LEN("Init DB") }, //2
+ { C_STRING_WITH_LEN("Query") }, //3
+ { C_STRING_WITH_LEN("Field List") }, //4
+ { C_STRING_WITH_LEN("Create DB") }, //5
+ { C_STRING_WITH_LEN("Drop DB") }, //6
+ { C_STRING_WITH_LEN("Refresh") }, //7
+ { C_STRING_WITH_LEN("Shutdown") }, //8
+ { C_STRING_WITH_LEN("Statistics") }, //9
+ { C_STRING_WITH_LEN("Processlist") }, //10
+ { C_STRING_WITH_LEN("Connect") }, //11
+ { C_STRING_WITH_LEN("Kill") }, //12
+ { C_STRING_WITH_LEN("Debug") }, //13
+ { C_STRING_WITH_LEN("Ping") }, //14
+ { C_STRING_WITH_LEN("Time") }, //15
+ { C_STRING_WITH_LEN("Delayed insert") }, //16
+ { C_STRING_WITH_LEN("Change user") }, //17
+ { C_STRING_WITH_LEN("Binlog Dump") }, //18
+ { C_STRING_WITH_LEN("Table Dump") }, //19
+ { C_STRING_WITH_LEN("Connect Out") }, //20
+ { C_STRING_WITH_LEN("Register Slave") }, //21
+ { C_STRING_WITH_LEN("Prepare") }, //22
+ { C_STRING_WITH_LEN("Execute") }, //23
+ { C_STRING_WITH_LEN("Long Data") }, //24
+ { C_STRING_WITH_LEN("Close stmt") }, //25
+ { C_STRING_WITH_LEN("Reset stmt") }, //26
+ { C_STRING_WITH_LEN("Set option") }, //27
+ { C_STRING_WITH_LEN("Fetch") }, //28
+ { C_STRING_WITH_LEN("Daemon") }, //29
+ { C_STRING_WITH_LEN("Unimpl get tid") }, //30
+ { C_STRING_WITH_LEN("Reset connection") },//31
+ { 0, 0 }, //32
+ { 0, 0 }, //33
+ { 0, 0 }, //34
+ { 0, 0 }, //35
+ { 0, 0 }, //36
+ { 0, 0 }, //37
+ { 0, 0 }, //38
+ { 0, 0 }, //39
+ { 0, 0 }, //40
+ { 0, 0 }, //41
+ { 0, 0 }, //42
+ { 0, 0 }, //43
+ { 0, 0 }, //44
+ { 0, 0 }, //45
+ { 0, 0 }, //46
+ { 0, 0 }, //47
+ { 0, 0 }, //48
+ { 0, 0 }, //49
+ { 0, 0 }, //50
+ { 0, 0 }, //51
+ { 0, 0 }, //52
+ { 0, 0 }, //53
+ { 0, 0 }, //54
+ { 0, 0 }, //55
+ { 0, 0 }, //56
+ { 0, 0 }, //57
+ { 0, 0 }, //58
+ { 0, 0 }, //59
+ { 0, 0 }, //60
+ { 0, 0 }, //61
+ { 0, 0 }, //62
+ { 0, 0 }, //63
+ { 0, 0 }, //64
+ { 0, 0 }, //65
+ { 0, 0 }, //66
+ { 0, 0 }, //67
+ { 0, 0 }, //68
+ { 0, 0 }, //69
+ { 0, 0 }, //70
+ { 0, 0 }, //71
+ { 0, 0 }, //72
+ { 0, 0 }, //73
+ { 0, 0 }, //74
+ { 0, 0 }, //75
+ { 0, 0 }, //76
+ { 0, 0 }, //77
+ { 0, 0 }, //78
+ { 0, 0 }, //79
+ { 0, 0 }, //80
+ { 0, 0 }, //81
+ { 0, 0 }, //82
+ { 0, 0 }, //83
+ { 0, 0 }, //84
+ { 0, 0 }, //85
+ { 0, 0 }, //86
+ { 0, 0 }, //87
+ { 0, 0 }, //88
+ { 0, 0 }, //89
+ { 0, 0 }, //90
+ { 0, 0 }, //91
+ { 0, 0 }, //92
+ { 0, 0 }, //93
+ { 0, 0 }, //94
+ { 0, 0 }, //95
+ { 0, 0 }, //96
+ { 0, 0 }, //97
+ { 0, 0 }, //98
+ { 0, 0 }, //99
+ { 0, 0 }, //100
+ { 0, 0 }, //101
+ { 0, 0 }, //102
+ { 0, 0 }, //103
+ { 0, 0 }, //104
+ { 0, 0 }, //105
+ { 0, 0 }, //106
+ { 0, 0 }, //107
+ { 0, 0 }, //108
+ { 0, 0 }, //109
+ { 0, 0 }, //110
+ { 0, 0 }, //111
+ { 0, 0 }, //112
+ { 0, 0 }, //113
+ { 0, 0 }, //114
+ { 0, 0 }, //115
+ { 0, 0 }, //116
+ { 0, 0 }, //117
+ { 0, 0 }, //118
+ { 0, 0 }, //119
+ { 0, 0 }, //120
+ { 0, 0 }, //121
+ { 0, 0 }, //122
+ { 0, 0 }, //123
+ { 0, 0 }, //124
+ { 0, 0 }, //125
+ { 0, 0 }, //126
+ { 0, 0 }, //127
+ { 0, 0 }, //128
+ { 0, 0 }, //129
+ { 0, 0 }, //130
+ { 0, 0 }, //131
+ { 0, 0 }, //132
+ { 0, 0 }, //133
+ { 0, 0 }, //134
+ { 0, 0 }, //135
+ { 0, 0 }, //136
+ { 0, 0 }, //137
+ { 0, 0 }, //138
+ { 0, 0 }, //139
+ { 0, 0 }, //140
+ { 0, 0 }, //141
+ { 0, 0 }, //142
+ { 0, 0 }, //143
+ { 0, 0 }, //144
+ { 0, 0 }, //145
+ { 0, 0 }, //146
+ { 0, 0 }, //147
+ { 0, 0 }, //148
+ { 0, 0 }, //149
+ { 0, 0 }, //150
+ { 0, 0 }, //151
+ { 0, 0 }, //152
+ { 0, 0 }, //153
+ { 0, 0 }, //154
+ { 0, 0 }, //155
+ { 0, 0 }, //156
+ { 0, 0 }, //157
+ { 0, 0 }, //158
+ { 0, 0 }, //159
+ { 0, 0 }, //160
+ { 0, 0 }, //161
+ { 0, 0 }, //162
+ { 0, 0 }, //163
+ { 0, 0 }, //164
+ { 0, 0 }, //165
+ { 0, 0 }, //166
+ { 0, 0 }, //167
+ { 0, 0 }, //168
+ { 0, 0 }, //169
+ { 0, 0 }, //170
+ { 0, 0 }, //171
+ { 0, 0 }, //172
+ { 0, 0 }, //173
+ { 0, 0 }, //174
+ { 0, 0 }, //175
+ { 0, 0 }, //176
+ { 0, 0 }, //177
+ { 0, 0 }, //178
+ { 0, 0 }, //179
+ { 0, 0 }, //180
+ { 0, 0 }, //181
+ { 0, 0 }, //182
+ { 0, 0 }, //183
+ { 0, 0 }, //184
+ { 0, 0 }, //185
+ { 0, 0 }, //186
+ { 0, 0 }, //187
+ { 0, 0 }, //188
+ { 0, 0 }, //189
+ { 0, 0 }, //190
+ { 0, 0 }, //191
+ { 0, 0 }, //192
+ { 0, 0 }, //193
+ { 0, 0 }, //194
+ { 0, 0 }, //195
+ { 0, 0 }, //196
+ { 0, 0 }, //197
+ { 0, 0 }, //198
+ { 0, 0 }, //199
+ { 0, 0 }, //200
+ { 0, 0 }, //201
+ { 0, 0 }, //202
+ { 0, 0 }, //203
+ { 0, 0 }, //204
+ { 0, 0 }, //205
+ { 0, 0 }, //206
+ { 0, 0 }, //207
+ { 0, 0 }, //208
+ { 0, 0 }, //209
+ { 0, 0 }, //210
+ { 0, 0 }, //211
+ { 0, 0 }, //212
+ { 0, 0 }, //213
+ { 0, 0 }, //214
+ { 0, 0 }, //215
+ { 0, 0 }, //216
+ { 0, 0 }, //217
+ { 0, 0 }, //218
+ { 0, 0 }, //219
+ { 0, 0 }, //220
+ { 0, 0 }, //221
+ { 0, 0 }, //222
+ { 0, 0 }, //223
+ { 0, 0 }, //224
+ { 0, 0 }, //225
+ { 0, 0 }, //226
+ { 0, 0 }, //227
+ { 0, 0 }, //228
+ { 0, 0 }, //229
+ { 0, 0 }, //230
+ { 0, 0 }, //231
+ { 0, 0 }, //232
+ { 0, 0 }, //233
+ { 0, 0 }, //234
+ { 0, 0 }, //235
+ { 0, 0 }, //236
+ { 0, 0 }, //237
+ { 0, 0 }, //238
+ { 0, 0 }, //239
+ { 0, 0 }, //240
+ { 0, 0 }, //241
+ { 0, 0 }, //242
+ { 0, 0 }, //243
+ { 0, 0 }, //244
+ { 0, 0 }, //245
+ { 0, 0 }, //246
+ { 0, 0 }, //247
+ { 0, 0 }, //248
+ { 0, 0 }, //249
+ { C_STRING_WITH_LEN("Bulk_execute") }, //250
+ { C_STRING_WITH_LEN("Slave_worker") }, //251
+ { C_STRING_WITH_LEN("Slave_IO") }, //252
+ { C_STRING_WITH_LEN("Slave_SQL") }, //253
+ { C_STRING_WITH_LEN("Com_multi") }, //254
+ { C_STRING_WITH_LEN("Error") } // Last command number 255
};
const char *xa_state_names[]={
@@ -190,7 +421,7 @@ static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables)
for (TABLE_LIST *table= tables; table; table= table->next_global)
{
DBUG_ASSERT(table->db && table->table_name);
- if (table->updating && !find_temporary_table(thd, table))
+ if (table->updating && !thd->find_tmp_table_share(table))
return 1;
}
return 0;
@@ -267,7 +498,7 @@ void init_update_queries(void)
memset(server_command_flags, 0, sizeof(server_command_flags));
server_command_flags[COM_STATISTICS]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
- server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK;
+ server_command_flags[COM_PING]= CF_SKIP_QUERY_ID | CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
server_command_flags[COM_QUIT]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_PROCESS_INFO]= CF_SKIP_WSREP_CHECK;
@@ -277,6 +508,10 @@ void init_update_queries(void)
server_command_flags[COM_TIME]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_INIT_DB]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_END]= CF_SKIP_WSREP_CHECK;
+ for (uint i= COM_MDB_GAP_BEG; i <= COM_MDB_GAP_END; i++)
+ {
+ server_command_flags[i]= CF_SKIP_WSREP_CHECK;
+ }
/*
COM_QUERY, COM_SET_OPTION and COM_STMT_XXX are allowed to pass the early
@@ -292,6 +527,8 @@ void init_update_queries(void)
server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_STMT_SEND_LONG_DATA]= CF_SKIP_WSREP_CHECK;
server_command_flags[COM_REGISTER_SLAVE]= CF_SKIP_WSREP_CHECK;
+ server_command_flags[COM_MULTI]= CF_SKIP_WSREP_CHECK | CF_NO_COM_MULTI;
+ server_command_flags[CF_NO_COM_MULTI]= CF_NO_COM_MULTI;
/* Initialize the sql command flags array. */
memset(sql_command_flags, 0, sizeof(sql_command_flags));
@@ -341,17 +578,19 @@ void init_update_queries(void)
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
- CF_UPDATES_DATA;
+ CF_UPDATES_DATA | CF_SP_BULK_SAFE;
sql_command_flags[SQLCOM_UPDATE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
- CF_UPDATES_DATA;
+ CF_UPDATES_DATA | CF_SP_BULK_SAFE;
sql_command_flags[SQLCOM_INSERT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
- CF_INSERTS_DATA;
+ CF_INSERTS_DATA |
+ CF_SP_BULK_SAFE |
+ CF_SP_BULK_OPTIMIZED;
sql_command_flags[SQLCOM_INSERT_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
@@ -360,7 +599,8 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_DELETE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
- CF_CAN_BE_EXPLAINED;
+ CF_CAN_BE_EXPLAINED |
+ CF_SP_BULK_SAFE;
sql_command_flags[SQLCOM_DELETE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
@@ -369,7 +609,7 @@ void init_update_queries(void)
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
CF_CAN_BE_EXPLAINED |
- CF_INSERTS_DATA;;
+ CF_INSERTS_DATA | CF_SP_BULK_SAFE;
sql_command_flags[SQLCOM_REPLACE_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE |
@@ -422,6 +662,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SHOW_EXPLAIN]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROCESSLIST]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_GRANTS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_CREATE_USER]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_DB]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_MASTER_STAT]= CF_STATUS_COMMAND;
@@ -443,6 +684,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_CREATE_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_RENAME_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_DROP_USER]= CF_CHANGES_DATA;
+ sql_command_flags[SQLCOM_ALTER_USER]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_CREATE_ROLE]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_GRANT]= CF_CHANGES_DATA;
sql_command_flags[SQLCOM_GRANT_ROLE]= CF_CHANGES_DATA;
@@ -473,6 +715,7 @@ void init_update_queries(void)
CF_CAN_GENERATE_ROW_EVENTS |
CF_OPTIMIZER_TRACE; // (1)
sql_command_flags[SQLCOM_EXECUTE]= CF_CAN_GENERATE_ROW_EVENTS;
+ sql_command_flags[SQLCOM_EXECUTE_IMMEDIATE]= CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_COMPOUND]= CF_CAN_GENERATE_ROW_EVENTS;
/*
@@ -506,6 +749,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_CHECKSUM]= CF_REPORT_PROGRESS;
sql_command_flags[SQLCOM_CREATE_USER]|= CF_AUTO_COMMIT_TRANS;
+ sql_command_flags[SQLCOM_ALTER_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_RENAME_USER]|= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_CREATE_ROLE]|= CF_AUTO_COMMIT_TRANS;
@@ -601,6 +845,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_ALTER_EVENT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_EVENT]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_USER]|= CF_DISALLOW_IN_RO_TRANS;
+ sql_command_flags[SQLCOM_ALTER_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_RENAME_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_DROP_USER]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_CREATE_SERVER]|= CF_DISALLOW_IN_RO_TRANS;
@@ -651,7 +896,7 @@ void execute_init_command(THD *thd, LEX_STRING *init_command,
mysql_rwlock_t *var_lock)
{
Vio* save_vio;
- ulong save_client_capabilities;
+ ulonglong save_client_capabilities;
mysql_rwlock_rdlock(var_lock);
if (!init_command->length)
@@ -684,7 +929,7 @@ void execute_init_command(THD *thd, LEX_STRING *init_command,
save_vio= thd->net.vio;
thd->net.vio= 0;
thd->clear_error(1);
- dispatch_command(COM_QUERY, thd, buf, len);
+ dispatch_command(COM_QUERY, thd, buf, len, FALSE, FALSE);
thd->client_capabilities= save_client_capabilities;
thd->net.vio= save_vio;
@@ -802,7 +1047,7 @@ static void handle_bootstrap_impl(THD *thd)
break;
}
- mysql_parse(thd, thd->query(), length, &parser_state);
+ mysql_parse(thd, thd->query(), length, &parser_state, FALSE, FALSE);
bootstrap_error= thd->is_error();
thd->protocol->end_statement();
@@ -859,13 +1104,12 @@ void do_handle_bootstrap(THD *thd)
end:
delete thd;
-#ifndef EMBEDDED_LIBRARY
- thread_safe_decrement32(&thread_count);
- in_bootstrap= FALSE;
-
mysql_mutex_lock(&LOCK_thread_count);
+ in_bootstrap = FALSE;
mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
+
+#ifndef EMBEDDED_LIBRARY
my_thread_end();
pthread_exit(0);
#endif
@@ -874,7 +1118,7 @@ end:
}
-/* This works because items are allocated with sql_alloc() */
+/* This works because items are allocated on THD::mem_root */
void free_items(Item *item)
{
@@ -889,7 +1133,7 @@ void free_items(Item *item)
}
/**
- This works because items are allocated with sql_alloc().
+ This works because items are allocated on THD::mem_root.
@note The function also handles null pointers (empty list).
*/
void cleanup_items(Item *item)
@@ -900,6 +1144,22 @@ void cleanup_items(Item *item)
DBUG_VOID_RETURN;
}
+static enum enum_server_command fetch_command(THD *thd, char *packet)
+{
+ enum enum_server_command
+ command= (enum enum_server_command) (uchar) packet[0];
+ DBUG_ENTER("fetch_command");
+
+ if (command >= COM_END ||
+ (command >= COM_MDB_GAP_BEG && command <= COM_MDB_GAP_END))
+ command= COM_END; // Wrong command
+
+ DBUG_PRINT("info",("Command on %s = %d (%s)",
+ vio_description(thd->net.vio), command,
+ command_name[command].str));
+ DBUG_RETURN(command);
+}
+
#ifdef WITH_WSREP
static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables)
@@ -1089,14 +1349,8 @@ bool do_command(THD *thd)
/* Do not rely on my_net_read, extra safety against programming errors. */
packet[packet_length]= '\0'; /* safety */
- command= (enum enum_server_command) (uchar) packet[0];
-
- if (command >= COM_END)
- command= COM_END; // Wrong command
- DBUG_PRINT("info",("Command on %s = %d (%s)",
- vio_description(net->vio), command,
- command_name[command].str));
+ command= fetch_command(thd, packet);
#ifdef WITH_WSREP
if (WSREP(thd))
@@ -1128,7 +1382,8 @@ bool do_command(THD *thd)
DBUG_ASSERT(packet_length);
DBUG_ASSERT(!thd->apc_target.is_enabled());
- return_value= dispatch_command(command, thd, packet+1, (uint) (packet_length-1));
+ return_value= dispatch_command(command, thd, packet+1,
+ (uint) (packet_length-1), FALSE, FALSE);
#ifdef WITH_WSREP
if (WSREP(thd))
{
@@ -1147,7 +1402,7 @@ bool do_command(THD *thd)
}
thd->clear_error();
return_value= dispatch_command(command, thd, thd->wsrep_retry_query,
- thd->wsrep_retry_query_len);
+ thd->wsrep_retry_query_len, FALSE, FALSE);
thd->variables.character_set_client = current_charset;
}
@@ -1229,6 +1484,47 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables)
/**
+ check COM_MULTI packet
+
+ @param thd thread handle
+ @param packet pointer on the packet of commands
+ @param packet_length length of this packet
+
+ @retval 0 - Error
+ @retval # - Number of commands in the batch
+*/
+
+uint maria_multi_check(THD *thd, char *packet, uint packet_length)
+{
+ uint counter= 0;
+ DBUG_ENTER("maria_multi_check");
+ while (packet_length)
+ {
+ char *packet_start= packet;
+ size_t subpacket_length= net_field_length((uchar **)&packet_start);
+ size_t length_length= packet_start - packet;
+ // length of command + 3 bytes where that length was stored
+ DBUG_PRINT("info", ("sub-packet length: %zu + %zu command: %x",
+ subpacket_length, length_length,
+ packet_start[3]));
+
+ if (subpacket_length == 0 ||
+ (subpacket_length + length_length) > packet_length)
+ {
+ my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR),
+ MYF(0));
+ DBUG_RETURN(0);
+ }
+
+ counter++;
+ packet= packet_start + subpacket_length;
+ packet_length-= (subpacket_length + length_length);
+ }
+ DBUG_RETURN(counter);
+}
+
+
+/**
Perform one connection-level (COM_XXXX) command.
@param command type of command to perform
@@ -1237,6 +1533,8 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables)
@param packet_length length of packet + 1 (to show that data is
null-terminated) except for COM_SLEEP, where it
can be zero.
+ @param is_com_multi recursive call from COM_MULTI
+ @param is_next_command there will be more command in the COM_MULTI batch
@todo
set thd->lex->sql_command to SQLCOM_END here.
@@ -1250,15 +1548,24 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables)
COM_QUIT/COM_SHUTDOWN
*/
bool dispatch_command(enum enum_server_command command, THD *thd,
- char* packet, uint packet_length)
+ char* packet, uint packet_length, bool is_com_multi,
+ bool is_next_command)
{
NET *net= &thd->net;
bool error= 0;
bool do_end_of_statement= true;
DBUG_ENTER("dispatch_command");
- DBUG_PRINT("info", ("command: %d", command));
+ DBUG_PRINT("info", ("command: %d %s", command,
+ (command_name[command].str != 0 ?
+ command_name[command].str :
+ "<?>")));
+ bool drop_more_results= 0;
- inc_thread_running();
+ if (!is_com_multi)
+ inc_thread_running();
+
+ /* keep it withing 1 byte */
+ compile_time_assert(COM_END == 255);
#ifdef WITH_WSREP
if (WSREP(thd))
@@ -1283,7 +1590,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
command != COM_STMT_CLOSE && command != COM_QUIT)
{
mysql_mutex_unlock(&thd->LOCK_thd_data);
- my_error(ER_LOCK_DEADLOCK, MYF(0), "wsrep aborted transaction");
+ my_message(ER_LOCK_DEADLOCK, "Deadlock: wsrep aborted transaction",
+ MYF(0));
WSREP_DEBUG("Deadlock error for: %s", thd->query());
thd->reset_killed();
thd->mysys_var->abort = 0;
@@ -1346,6 +1654,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
beginning of each command.
*/
thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
+ if (is_next_command)
+ {
+ drop_more_results= !MY_TEST(thd->server_status &
+ SERVER_MORE_RESULTS_EXISTS);
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
+ if (is_com_multi)
+ thd->get_stmt_da()->set_skip_flush();
+ }
+
switch (command) {
case COM_INIT_DB:
{
@@ -1370,6 +1687,17 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
}
#endif
+ case COM_RESET_CONNECTION:
+ {
+ thd->status_var.com_other++;
+ thd->change_user();
+ thd->clear_error(); // if errors from rollback
+ /* Restore original charset from client authentication packet.*/
+ if(thd->org_charset)
+ thd->update_charset(thd->org_charset,thd->org_charset,thd->org_charset);
+ my_ok(thd, 0, 0, 0);
+ break;
+ }
case COM_CHANGE_USER:
{
int auth_rc;
@@ -1419,10 +1747,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
decrease_user_connections(thd->user_connect);
thd->user_connect= save_user_connect;
thd->reset_db(save_db, save_db_length);
- thd->variables.character_set_client= save_character_set_client;
- thd->variables.collation_connection= save_collation_connection;
- thd->variables.character_set_results= save_character_set_results;
- thd->update_charset();
+ thd->update_charset(save_character_set_client, save_collation_connection,
+ save_character_set_results);
thd->failed_com_change_user++;
my_sleep(1000000);
}
@@ -1438,6 +1764,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
break;
}
+ case COM_STMT_BULK_EXECUTE:
+ {
+ mysqld_stmt_bulk_execute(thd, packet, packet_length);
+ break;
+ }
case COM_STMT_EXECUTE:
{
mysqld_stmt_execute(thd, packet, packet_length);
@@ -1494,13 +1825,16 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
if (WSREP_ON)
- wsrep_mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ wsrep_mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ is_com_multi, is_next_command);
else
- mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
+ is_com_multi, is_next_command);
while (!thd->killed && (parser_state.m_lip.found_semicolon != NULL) &&
! thd->is_error())
{
+ thd->get_stmt_da()->set_skip_flush();
/*
Multiple queries exist, execute them individually
*/
@@ -1581,9 +1915,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* TODO: set thd->lex->sql_command to SQLCOM_END here */
if (WSREP_ON)
- wsrep_mysql_parse(thd, beginning_of_next_stmt, length, &parser_state);
+ wsrep_mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
+ is_com_multi, is_next_command);
else
- mysql_parse(thd, beginning_of_next_stmt, length, &parser_state);
+ mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
+ is_com_multi, is_next_command);
}
@@ -1616,7 +1952,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
(The packet is guaranteed to end with an end zero)
*/
arg_end= strend(packet);
- uint arg_length= arg_end - packet;
+ uint arg_length= (uint)(arg_end - packet);
/* Check given table name length. */
if (packet_length - arg_length > NAME_LEN + 1 || arg_length > SAFE_NAME_LEN)
@@ -1635,6 +1971,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
packet= arg_end + 1;
thd->reset_for_next_command(0); // Don't clear errors
+ // thd->reset_for_next_command reset state => restore it
+ if (is_next_command)
+ {
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
+ if (is_com_multi)
+ thd->get_stmt_da()->set_skip_flush();
+ }
+
lex_start(thd);
/* Must be before we init the table list. */
if (lower_case_table_names)
@@ -1667,7 +2011,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->set_query(fields, query_length);
general_log_print(thd, command, "%s %s", table_list.table_name, fields);
- if (open_temporary_tables(thd, &table_list))
+ if (thd->open_temporary_tables(&table_list))
break;
if (check_table_access(thd, SELECT_ACL, &table_list,
@@ -1821,7 +2165,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
DBUG_PRINT("quit",("Got shutdown command for level %u", level));
general_log_print(thd, command, NullS);
my_eof(thd);
- kill_mysql();
+ kill_mysql(thd);
error=TRUE;
break;
}
@@ -1848,7 +2192,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
length= my_snprintf(buff, buff_len - 1,
"Uptime: %lu Threads: %d Questions: %lu "
- "Slow queries: %lu Opens: %lu Flush tables: %lu "
+ "Slow queries: %lu Opens: %lu Flush tables: %lld "
"Open tables: %u Queries per second avg: %u.%03u",
uptime,
(int) thread_count, (ulong) thd->query_id,
@@ -1918,11 +2262,90 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
general_log_print(thd, command, NullS);
my_eof(thd);
break;
+ case COM_MULTI:
+ {
+ uint counter;
+ uint current_com= 0;
+ DBUG_ASSERT(!is_com_multi);
+ if (!(thd->client_capabilities & CLIENT_MULTI_RESULTS))
+ {
+ /* The client does not support multiple result sets being sent back */
+ my_error(ER_COMMULTI_BADCONTEXT, MYF(0));
+ break;
+ }
+
+ if (!(counter= maria_multi_check(thd, packet, packet_length)))
+ break;
+
+ {
+ char *packet_start= packet;
+ /* We have to store next length because it will be destroyed by '\0' */
+ size_t next_subpacket_length= net_field_length((uchar **)&packet_start);
+ size_t next_length_length= packet_start - packet;
+ unsigned char *readbuff= net->buff;
+
+ if (net_allocate_new_packet(net, thd, MYF(0)))
+ break;
+
+ PSI_statement_locker *save_locker= thd->m_statement_psi;
+ sql_digest_state *save_digest= thd->m_digest;
+ thd->m_statement_psi= NULL;
+ thd->m_digest= NULL;
+
+ while (packet_length)
+ {
+ current_com++;
+ size_t subpacket_length= next_subpacket_length + next_length_length;
+ size_t length_length= next_length_length;
+ if (subpacket_length < packet_length)
+ {
+ packet_start= packet + subpacket_length;
+ next_subpacket_length= net_field_length((uchar**)&packet_start);
+ next_length_length= packet_start - (packet + subpacket_length);
+ }
+ /* safety like in do_command() */
+ packet[subpacket_length]= '\0';
+
+ enum enum_server_command subcommand=
+ fetch_command(thd, (packet + length_length));
+
+ if (server_command_flags[subcommand] & CF_NO_COM_MULTI)
+ {
+ my_error(ER_BAD_COMMAND_IN_MULTI, MYF(0),
+ command_name[subcommand].str);
+ goto com_multi_end;
+ }
+
+ if (dispatch_command(subcommand, thd, packet + (1 + length_length),
+ subpacket_length - (1 + length_length), TRUE,
+ (current_com != counter)))
+ {
+ DBUG_ASSERT(thd->is_error());
+ goto com_multi_end;
+ }
+
+ DBUG_ASSERT(subpacket_length <= packet_length);
+ packet+= subpacket_length;
+ packet_length-= subpacket_length;
+ }
+
+com_multi_end:
+ thd->m_statement_psi= save_locker;
+ thd->m_digest= save_digest;
+
+ /* release old buffer */
+ net_flush(net);
+ DBUG_ASSERT(net->buff == net->write_pos); // nothing to send
+ my_free(readbuff);
+ }
+ break;
+ }
case COM_SLEEP:
case COM_CONNECT: // Impossible here
case COM_TIME: // Impossible from client
case COM_DELAYED_INSERT:
case COM_END:
+ case COM_UNIMPLEMENTED:
default:
my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR),
MYF(0));
@@ -1961,9 +2384,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd_proc_info(thd, "updating status");
/* Finalize server status flags after executing a command. */
thd->update_server_status();
- thd->protocol->end_statement();
- query_cache_end_of_result(thd);
+ if (command != COM_MULTI)
+ {
+ thd->protocol->end_statement();
+ query_cache_end_of_result(thd);
+ }
}
+ if (drop_more_results)
+ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
if (!thd->is_error() && !thd->killed_errno())
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0);
@@ -1988,8 +2416,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->m_statement_psi= NULL;
thd->m_digest= NULL;
- dec_thread_running();
- thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
+ if (!is_com_multi)
+ {
+ dec_thread_running();
+ thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
+ }
thd->reset_kill_query(); /* Ensure that killed_errmsg is released */
/*
LEX::m_sql_cmd can point to Sql_cmd allocated on thd->mem_root.
@@ -2161,7 +2592,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
{
DBUG_RETURN(1);
}
- schema_select_lex= new SELECT_LEX();
+ schema_select_lex= new (thd->mem_root) SELECT_LEX();
db.str= schema_select_lex->db= lex->select_lex.db;
schema_select_lex->table_list.first= NULL;
db.length= strlen(db.str);
@@ -2184,7 +2615,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
{
DBUG_ASSERT(table_ident);
TABLE_LIST **query_tables_last= lex->query_tables_last;
- schema_select_lex= new SELECT_LEX();
+ schema_select_lex= new (thd->mem_root) SELECT_LEX();
/* 'parent_lex' is used in init_query() so it must be before it. */
schema_select_lex->parent_lex= lex;
schema_select_lex->init_query();
@@ -2382,27 +2813,80 @@ bool sp_process_definer(THD *thd)
static bool lock_tables_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
{
Lock_tables_prelocking_strategy lock_tables_prelocking_strategy;
+ MDL_deadlock_and_lock_abort_error_handler deadlock_handler;
+ MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint();
uint counter;
TABLE_LIST *table;
thd->in_lock_tables= 1;
+retry:
+
if (open_tables(thd, &tables, &counter, 0, &lock_tables_prelocking_strategy))
goto err;
- /*
- We allow to change temporary tables even if they were locked for read
- by LOCK TABLES. To avoid a discrepancy between lock acquired at LOCK
- TABLES time and by the statement which is later executed under LOCK TABLES
- we ensure that for temporary tables we always request a write lock (such
- discrepancy can cause problems for the storage engine).
- We don't set TABLE_LIST::lock_type in this case as this might result in
- extra warnings from THD::decide_logging_format() even though binary logging
- is totally irrelevant for LOCK TABLES.
- */
for (table= tables; table; table= table->next_global)
- if (!table->placeholder() && table->table->s->tmp_table)
- table->table->reginfo.lock_type= TL_WRITE;
+ {
+ if (!table->placeholder())
+ {
+ if (table->table->s->tmp_table)
+ {
+ /*
+ We allow to change temporary tables even if they were locked for read
+ by LOCK TABLES. To avoid a discrepancy between lock acquired at LOCK
+ TABLES time and by the statement which is later executed under LOCK
+ TABLES we ensure that for temporary tables we always request a write
+ lock (such discrepancy can cause problems for the storage engine).
+ We don't set TABLE_LIST::lock_type in this case as this might result
+ in extra warnings from THD::decide_logging_format() even though
+ binary logging is totally irrelevant for LOCK TABLES.
+ */
+ table->table->reginfo.lock_type= TL_WRITE;
+ }
+ else if (table->mdl_request.type == MDL_SHARED_READ &&
+ ! table->prelocking_placeholder &&
+ table->table->file->lock_count() == 0)
+ {
+ enum enum_mdl_type lock_type;
+ /*
+ In case when LOCK TABLE ... READ LOCAL was issued for table with
+ storage engine which doesn't support READ LOCAL option and doesn't
+ use THR_LOCK locks we need to upgrade weak SR metadata lock acquired
+ in open_tables() to stronger SRO metadata lock.
+ This is not needed for tables used through stored routines or
+ triggers as we always acquire SRO (or even stronger SNRW) metadata
+ lock for them.
+ */
+ deadlock_handler.init();
+ thd->push_internal_handler(&deadlock_handler);
+
+ lock_type= table->table->mdl_ticket->get_type() == MDL_SHARED_WRITE ?
+ MDL_SHARED_NO_READ_WRITE : MDL_SHARED_READ_ONLY;
+
+ bool result= thd->mdl_context.upgrade_shared_lock(
+ table->table->mdl_ticket,
+ lock_type,
+ thd->variables.lock_wait_timeout);
+
+ thd->pop_internal_handler();
+
+ if (deadlock_handler.need_reopen())
+ {
+ /*
+ Deadlock occurred during upgrade of metadata lock.
+ Let us restart acquring and opening tables for LOCK TABLES.
+ */
+ close_tables_for_reopen(thd, &tables, mdl_savepoint);
+ if (thd->open_temporary_tables(tables))
+ goto err;
+ goto retry;
+ }
+
+ if (result)
+ goto err;
+ }
+ }
+ }
if (lock_tables(thd, tables, counter, 0) ||
thd->locked_tables_list.init_locked_tables(thd))
@@ -2454,6 +2938,13 @@ static bool do_execute_sp(THD *thd, sp_head *sp)
thd->variables.select_limit= HA_POS_ERROR;
/*
+ Reset current_select as it may point to random data as a
+ result of previous parsing.
+ */
+ thd->lex->current_select= NULL;
+ thd->lex->in_sum_func= 0; // For Item_field::fix_fields()
+
+ /*
We never write CALL statements into binlog:
- If the mode is non-prelocked, each statement will be logged
separately.
@@ -2563,6 +3054,9 @@ mysql_execute_command(THD *thd)
thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
}
+ if (check_dependencies_in_with_clauses(thd->lex->with_clauses_list))
+ DBUG_RETURN(1);
+
#ifdef HAVE_REPLICATION
if (unlikely(thd->slave_thread))
{
@@ -2893,7 +3387,8 @@ mysql_execute_command(THD *thd)
thd->mdl_context.release_transactional_locks();
if (commit_failed)
{
- WSREP_DEBUG("implicit commit failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("implicit commit failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
}
@@ -2939,7 +3434,7 @@ mysql_execute_command(THD *thd)
*/
if (sql_command_flags[lex->sql_command] & CF_PREOPEN_TMP_TABLES)
{
- if (open_temporary_tables(thd, all_tables))
+ if (thd->open_temporary_tables(all_tables))
goto error;
}
@@ -3042,6 +3537,11 @@ mysql_execute_command(THD *thd)
break;
}
+ case SQLCOM_EXECUTE_IMMEDIATE:
+ {
+ mysql_sql_stmt_execute_immediate(thd);
+ break;
+ }
case SQLCOM_PREPARE:
{
mysql_sql_stmt_prepare(thd);
@@ -3465,8 +3965,8 @@ mysql_execute_command(THD *thd)
{
TABLE_LIST *duplicate;
if ((duplicate= unique_table(thd, lex->query_tables,
- lex->query_tables->next_global,
- CHECK_DUP_FOR_CREATE)))
+ lex->query_tables->next_global,
+ CHECK_DUP_FOR_CREATE | CHECK_DUP_SKIP_TEMP_TABLE)))
{
update_non_unique_table_error(lex->query_tables, "CREATE",
duplicate);
@@ -3544,6 +4044,12 @@ mysql_execute_command(THD *thd)
/* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */
if (create_info.tmp_table())
thd->variables.option_bits|= OPTION_KEEP_LOG;
+ /* in case of create temp tables if @@session_track_state_change is
+ ON then send session state notification in OK packet */
+ if(create_info.options & HA_LEX_CREATE_TMP_TABLE)
+ {
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
+ }
my_ok(thd);
}
}
@@ -3775,7 +4281,7 @@ end_with_restore_list:
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
MYSQL_UPDATE_START(thd->query());
- res= (up_result= mysql_update(thd, all_tables,
+ res= up_result= mysql_update(thd, all_tables,
select_lex->item_list,
lex->value_list,
select_lex->where,
@@ -3783,7 +4289,7 @@ end_with_restore_list:
select_lex->order_list.first,
unit->select_limit_cnt,
lex->duplicates, lex->ignore,
- &found, &updated));
+ &found, &updated);
MYSQL_UPDATE_DONE(res, found, updated);
/* mysql_update return 2 if we need to switch to multi-update */
if (up_result != 2)
@@ -3917,7 +4423,8 @@ end_with_restore_list:
*/
if (first_table->lock_type != TL_WRITE_DELAYED)
{
- if ((res= open_temporary_tables(thd, all_tables)))
+ res= (thd->open_temporary_tables(all_tables)) ? TRUE : FALSE;
+ if (res)
break;
}
@@ -4006,7 +4513,7 @@ end_with_restore_list:
unit->set_limit(select_lex);
- if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0)))
+ if (!(res=open_and_lock_tables(thd, all_tables, TRUE, 0)))
{
MYSQL_INSERT_SELECT_START(thd->query());
/*
@@ -4178,7 +4685,7 @@ end_with_restore_list:
lex->table_count);
if (result)
{
- res= mysql_select(thd, &select_lex->ref_pointer_array,
+ res= mysql_select(thd,
select_lex->get_table_list(),
select_lex->with_wild,
select_lex->item_list,
@@ -4241,7 +4748,7 @@ end_with_restore_list:
{
if (!lex->tmp_table() &&
(!thd->is_current_stmt_binlog_format_row() ||
- !find_temporary_table(thd, table)))
+ !thd->find_temporary_table(table)))
{
WSREP_TO_ISOLATION_BEGIN(NULL, NULL, all_tables);
break;
@@ -4251,6 +4758,13 @@ end_with_restore_list:
/* DDL and binlog write order are protected by metadata locks. */
res= mysql_rm_table(thd, first_table, lex->if_exists(), lex->tmp_table());
+
+ /* when dropping temporary tables if @@session_track_state_change is ON then
+ send the boolean tracker in the OK packet */
+ if(!res && (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE))
+ {
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
+ }
break;
}
case SQLCOM_SHOW_PROCESSLIST:
@@ -4330,7 +4844,8 @@ end_with_restore_list:
goto error;
if (!(res= sql_set_variables(thd, lex_var_list, true)))
{
- my_ok(thd);
+ if (!thd->is_error())
+ my_ok(thd);
}
else
{
@@ -4385,7 +4900,7 @@ end_with_restore_list:
CF_PREOPEN_TMP_TABLES was set and the tables would be pre-opened
in a usual way, they would have been closed.
*/
- if (open_temporary_tables(thd, all_tables))
+ if (thd->open_temporary_tables(all_tables))
goto error;
if (lock_tables_precheck(thd, all_tables))
@@ -4666,6 +5181,7 @@ end_with_restore_list:
my_ok(thd);
break;
}
+ case SQLCOM_ALTER_USER:
case SQLCOM_RENAME_USER:
{
if (check_access(thd, UPDATE_ACL, "mysql", NULL, NULL, 1, 1) &&
@@ -4673,7 +5189,11 @@ end_with_restore_list:
break;
/* Conditionally writes to binlog */
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
- if (!(res= mysql_rename_user(thd, lex->users_list)))
+ if (lex->sql_command == SQLCOM_ALTER_USER)
+ res= mysql_alter_user(thd, lex->users_list);
+ else
+ res= mysql_rename_user(thd, lex->users_list);
+ if (!res)
my_ok(thd);
break;
}
@@ -4962,7 +5482,7 @@ end_with_restore_list:
DBUG_EXECUTE_IF("crash_shutdown", DBUG_SUICIDE(););
if (check_global_access(thd,SHUTDOWN_ACL))
goto error;
- kill_mysql();
+ kill_mysql(thd);
my_ok(thd);
#else
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "embedded server");
@@ -4970,6 +5490,15 @@ end_with_restore_list:
break;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ case SQLCOM_SHOW_CREATE_USER:
+ {
+ LEX_USER *grant_user= lex->grant_user;
+ if (!grant_user)
+ goto error;
+
+ res = mysql_show_create_user(thd, grant_user);
+ break;
+ }
case SQLCOM_SHOW_GRANTS:
{
LEX_USER *grant_user= lex->grant_user;
@@ -5013,7 +5542,8 @@ end_with_restore_list:
if (trans_begin(thd, lex->start_transaction_opt))
{
thd->mdl_context.release_transactional_locks();
- WSREP_DEBUG("BEGIN failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("BEGIN failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
my_ok(thd);
@@ -5032,7 +5562,8 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
if (commit_failed)
{
- WSREP_DEBUG("COMMIT failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("COMMIT failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
/* Begin transaction with the same isolation level. */
@@ -5044,8 +5575,7 @@ end_with_restore_list:
else
{
/* Reset the isolation level and access mode if no chaining transaction.*/
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
}
/* Disconnect the current client connection. */
if (tx_release)
@@ -5084,7 +5614,8 @@ end_with_restore_list:
if (rollback_failed)
{
- WSREP_DEBUG("rollback failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("rollback failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
/* Begin transaction with the same isolation level. */
@@ -5096,8 +5627,7 @@ end_with_restore_list:
else
{
/* Reset the isolation level and access mode if no chaining transaction.*/
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
}
/* Disconnect the current client connection. */
if (tx_release)
@@ -5157,7 +5687,7 @@ end_with_restore_list:
{
if (check_routine_access(thd, ALTER_PROC_ACL, lex->spname->m_db.str,
lex->spname->m_name.str,
- lex->sql_command == SQLCOM_DROP_PROCEDURE, 0))
+ lex->sql_command == SQLCOM_CREATE_PROCEDURE, 0))
goto error;
}
@@ -5563,15 +6093,15 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
if (commit_failed)
{
- WSREP_DEBUG("XA commit failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("XA commit failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
/*
We've just done a commit, reset transaction
isolation level and access mode to the session default.
*/
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
my_ok(thd);
break;
}
@@ -5581,15 +6111,15 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
if (rollback_failed)
{
- WSREP_DEBUG("XA rollback failed, MDL released: %lu", thd->thread_id);
+ WSREP_DEBUG("XA rollback failed, MDL released: %lld",
+ (longlong) thd->thread_id);
goto error;
}
/*
We've just done a rollback, reset transaction
isolation level and access mode to the session default.
*/
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
my_ok(thd);
break;
}
@@ -5828,6 +6358,9 @@ finish:
{
thd->mdl_context.release_statement_locks();
}
+
+ TRANSACT_TRACKER(add_trx_state_from_thd(thd));
+
WSREP_TO_ISOLATION_END;
#ifdef WITH_WSREP
@@ -5839,8 +6372,8 @@ finish:
! thd->in_active_multi_stmt_transaction() &&
thd->mdl_context.has_transactional_locks())
{
- WSREP_DEBUG("Forcing release of transactional locks for thd %lu",
- thd->thread_id);
+ WSREP_DEBUG("Forcing release of transactional locks for thd: %lld",
+ (longlong) thd->thread_id);
thd->mdl_context.release_transactional_locks();
}
#endif /* WITH_WSREP */
@@ -5862,6 +6395,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
new (thd->mem_root) Item_int(thd,
(ulonglong) thd->variables.select_limit);
}
+
if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0)))
{
if (lex->describe)
@@ -5958,7 +6492,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
}
}
/* Count number of empty select queries */
- if (!thd->get_sent_row_count())
+ if (!thd->get_sent_row_count() && !res)
status_var_increment(thd->status_var.empty_queries);
else
status_var_add(thd->status_var.rows_sent, thd->get_sent_row_count());
@@ -6042,7 +6576,7 @@ static TABLE *find_temporary_table_for_rename(THD *thd,
}
}
if (!found)
- res= find_temporary_table(thd, table);
+ res= thd->find_temporary_table(table, THD::TMP_TABLE_ANY);
DBUG_RETURN(res);
}
@@ -6154,11 +6688,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
THD_STAGE_INFO(thd, stage_checking_permissions);
if ((!db || !db[0]) && !thd->db && !dont_check_global_grants)
{
- DBUG_PRINT("error",("No database"));
- if (!no_errors)
- my_message(ER_NO_DB_ERROR, ER_THD(thd, ER_NO_DB_ERROR),
- MYF(0)); /* purecov: tested */
- DBUG_RETURN(TRUE); /* purecov: tested */
+ DBUG_RETURN(FALSE); // CTE reference or an error later
}
if ((db != NULL) && (db != any_db))
@@ -6472,7 +7002,7 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
/*
Open temporary tables to be able to detect them during privilege check.
*/
- if (open_temporary_tables(thd, dst_table))
+ if (thd->open_temporary_tables(dst_table))
return TRUE;
if (check_access(thd, SELECT_ACL, dst_table->db,
@@ -7037,10 +7567,9 @@ void THD::reset_for_next_command(bool do_clear_error)
thd->thread_specific_used= FALSE;
if (opt_bin_log)
- {
reset_dynamic(&thd->user_var_events);
- thd->user_var_events_alloc= thd->mem_root;
- }
+ DBUG_ASSERT(thd->user_var_events_alloc == &thd->main_mem_root);
+
thd->get_stmt_da()->reset_for_next_command();
thd->rand_used= 0;
thd->m_sent_row_count= thd->m_examined_row_count= 0;
@@ -7147,22 +7676,30 @@ mysql_new_select(LEX *lex, bool move_down)
my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO");
DBUG_RETURN(TRUE);
}
+
+ /*
+ This type of query is not possible in the grammar:
+ SELECT 1 FROM t1 PROCEDURE ANALYSE() UNION ... ;
+
+ But this type of query is still possible:
+ (SELECT 1 FROM t1 PROCEDURE ANALYSE()) UNION ... ;
+ and it's not easy to disallow this grammatically,
+ because there can be any parenthesis nest level:
+ (((SELECT 1 FROM t1 PROCEDURE ANALYSE()))) UNION ... ;
+ */
if (lex->proc_list.elements!=0)
{
my_error(ER_WRONG_USAGE, MYF(0), "UNION",
"SELECT ... PROCEDURE ANALYSE()");
DBUG_RETURN(TRUE);
}
- if (lex->current_select->order_list.first && !lex->current_select->braces)
- {
- my_error(ER_WRONG_USAGE, MYF(0), "UNION", "ORDER BY");
- DBUG_RETURN(1);
- }
- if (lex->current_select->explicit_limit && !lex->current_select->braces)
- {
- my_error(ER_WRONG_USAGE, MYF(0), "UNION", "LIMIT");
- DBUG_RETURN(1);
- }
+ // SELECT 1 FROM t1 ORDER BY 1 UNION SELECT 1 FROM t1 -- not possible
+ DBUG_ASSERT(!lex->current_select->order_list.first ||
+ lex->current_select->braces);
+ // SELECT 1 FROM t1 LIMIT 1 UNION SELECT 1 FROM t1; -- not possible
+ DBUG_ASSERT(!lex->current_select->explicit_limit ||
+ lex->current_select->braces);
+
select_lex->include_neighbour(lex->current_select);
SELECT_LEX_UNIT *unit= select_lex->master_unit();
if (!unit->fake_select_lex && unit->add_fake_select_lex(lex->thd))
@@ -7213,7 +7750,7 @@ void create_select_for_variable(const char *var_name)
if ((var= get_system_var(thd, OPT_SESSION, tmp, null_lex_str)))
{
end= strxmov(buff, "@@session.", var_name, NullS);
- var->set_name(buff, end-buff, system_charset_info);
+ var->set_name(thd, buff, (uint)(end-buff), system_charset_info);
add_item_to_list(thd, var);
}
DBUG_VOID_RETURN;
@@ -7232,7 +7769,9 @@ void mysql_init_multi_delete(LEX *lex)
}
static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state)
+ Parser_state *parser_state,
+ bool is_com_multi,
+ bool is_next_command)
{
#ifdef WITH_WSREP
bool is_autocommit=
@@ -7262,7 +7801,8 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
WSREP_DEBUG("Retry autocommit query: %s", thd->query());
}
- mysql_parse(thd, rawbuf, length, parser_state);
+ mysql_parse(thd, rawbuf, length, parser_state, is_com_multi,
+ is_next_command);
if (WSREP(thd)) {
/* wsrep BF abort in query exec phase */
@@ -7316,12 +7856,14 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
{
mysql_mutex_unlock(&thd->LOCK_thd_data);
// This does dirty read to wsrep variables but it is only a debug code
- WSREP_DEBUG("%s, thd: %lu is_AC: %d, retry: %lu - %lu SQL: %s",
+ WSREP_DEBUG("%s, thd: %lld is_AC: %d, retry: %lu - %lu SQL: %s",
(thd->wsrep_conflict_state == ABORTED) ?
"BF Aborted" : "cert failure",
- thd->thread_id, is_autocommit, thd->wsrep_retry_counter,
+ (longlong) thd->thread_id, is_autocommit,
+ thd->wsrep_retry_counter,
thd->variables.wsrep_retry_autocommit, thd->query());
- my_error(ER_LOCK_DEADLOCK, MYF(0), "wsrep aborted transaction");
+ my_message(ER_LOCK_DEADLOCK, "Deadlock: wsrep aborted transaction",
+ MYF(0));
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->wsrep_conflict_state= NO_CONFLICT;
@@ -7379,10 +7921,13 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
@param length Length of the query text
@param[out] found_semicolon For multi queries, position of the character of
the next query in the query text.
+ @param is_next_command there will be more command in the COM_MULTI batch
*/
void mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state)
+ Parser_state *parser_state,
+ bool is_com_multi,
+ bool is_next_command)
{
int error __attribute__((unused));
DBUG_ENTER("mysql_parse");
@@ -7406,6 +7951,12 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
*/
lex_start(thd);
thd->reset_for_next_command();
+ if (is_next_command)
+ {
+ thd->server_status|= SERVER_MORE_RESULTS_EXISTS;
+ if (is_com_multi)
+ thd->get_stmt_da()->set_skip_flush();
+ }
if (query_cache_send_result_to_client(thd, rawbuf, length) <= 0)
{
@@ -7439,7 +7990,6 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
and Query_log_event::print() would give ';;' output).
This also helps display only the current query in SHOW
PROCESSLIST.
- Note that we don't need LOCK_thread_count to modify query_length.
*/
if (found_semicolon && (ulong) (found_semicolon - thd->query()))
thd->set_query(thd->query(),
@@ -7481,7 +8031,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
sp_cache_enforce_limit(thd->sp_func_cache, stored_program_cache_size);
thd->end_statement();
thd->cleanup_after_query();
- DBUG_ASSERT(thd->change_list.is_empty());
+ DBUG_ASSERT(thd->Item_change_list::is_empty());
}
else
{
@@ -7537,13 +8087,6 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length)
#endif
-/** Store position for column in ALTER TABLE .. ADD column. */
-
-void store_position_for_column(const char *name)
-{
- current_thd->lex->last_field->after=(char*) (name);
-}
-
bool
add_proc_to_list(THD* thd, Item *item)
{
@@ -7555,7 +8098,6 @@ add_proc_to_list(THD* thd, Item *item)
item_ptr = (Item**) (order+1);
*item_ptr= item;
order->item=item_ptr;
- order->free_me=0;
thd->lex->proc_list.link_in_list(order, &order->next);
return 0;
}
@@ -7573,8 +8115,7 @@ bool add_to_list(THD *thd, SQL_I_List<ORDER> &list, Item *item,bool asc)
DBUG_RETURN(1);
order->item_ptr= item;
order->item= &order->item_ptr;
- order->asc = asc;
- order->free_me=0;
+ order->direction= (asc ? ORDER::ORDER_ASC : ORDER::ORDER_DESC);
order->used=0;
order->counter_used= 0;
order->fast_field_copier_setup= 0;
@@ -7613,7 +8154,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
List<String> *partition_names,
LEX_STRING *option)
{
- register TABLE_LIST *ptr;
+ TABLE_LIST *ptr;
TABLE_LIST *UNINIT_VAR(previous_table_ref); /* The table preceding the current one. */
char *alias_str;
LEX *lex= thd->lex;
@@ -7680,7 +8221,6 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->derived= table->sel;
if (!ptr->derived && is_infoschema_db(ptr->db, ptr->db_length))
{
- ST_SCHEMA_TABLE *schema_table;
if (ptr->updating &&
/* Special cases which are processed by commands itself */
lex->sql_command != SQLCOM_CHECK &&
@@ -7692,20 +8232,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
INFORMATION_SCHEMA_NAME.str);
DBUG_RETURN(0);
}
+ ST_SCHEMA_TABLE *schema_table;
schema_table= find_schema_table(thd, ptr->table_name);
- if (!schema_table ||
- (schema_table->hidden &&
- ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
- /*
- this check is used for show columns|keys from I_S hidden table
- */
- lex->sql_command == SQLCOM_SHOW_FIELDS ||
- lex->sql_command == SQLCOM_SHOW_KEYS)))
- {
- my_error(ER_UNKNOWN_TABLE, MYF(0),
- ptr->table_name, INFORMATION_SCHEMA_NAME.str);
- DBUG_RETURN(0);
- }
ptr->schema_table_name= ptr->table_name;
ptr->schema_table= schema_table;
}
@@ -7771,7 +8299,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
lex->add_to_query_tables(ptr);
// Pure table aliases do not need to be locked:
- if (!MY_TEST(table_options & TL_OPTION_ALIAS))
+ if (ptr->db && !(table_options & TL_OPTION_ALIAS))
{
ptr->mdl_request.init(MDL_key::TABLE, ptr->db, ptr->table_name, mdl_type,
MDL_TRANSACTION);
@@ -7991,6 +8519,65 @@ TABLE_LIST *st_select_lex::convert_right_join()
DBUG_RETURN(tab1);
}
+
+void st_select_lex::prepare_add_window_spec(THD *thd)
+{
+ LEX *lex= thd->lex;
+ lex->save_group_list= group_list;
+ lex->save_order_list= order_list;
+ lex->win_ref= NULL;
+ lex->win_frame= NULL;
+ lex->frame_top_bound= NULL;
+ lex->frame_bottom_bound= NULL;
+ group_list.empty();
+ order_list.empty();
+}
+
+bool st_select_lex::add_window_def(THD *thd,
+ LEX_STRING *win_name,
+ LEX_STRING *win_ref,
+ SQL_I_List<ORDER> win_partition_list,
+ SQL_I_List<ORDER> win_order_list,
+ Window_frame *win_frame)
+{
+ SQL_I_List<ORDER> *win_part_list_ptr=
+ new (thd->mem_root) SQL_I_List<ORDER> (win_partition_list);
+ SQL_I_List<ORDER> *win_order_list_ptr=
+ new (thd->mem_root) SQL_I_List<ORDER> (win_order_list);
+ if (!(win_part_list_ptr && win_order_list_ptr))
+ return true;
+ Window_def *win_def= new (thd->mem_root) Window_def(win_name,
+ win_ref,
+ win_part_list_ptr,
+ win_order_list_ptr,
+ win_frame);
+ group_list= thd->lex->save_group_list;
+ order_list= thd->lex->save_order_list;
+ return (win_def == NULL || window_specs.push_back(win_def));
+}
+
+bool st_select_lex::add_window_spec(THD *thd,
+ LEX_STRING *win_ref,
+ SQL_I_List<ORDER> win_partition_list,
+ SQL_I_List<ORDER> win_order_list,
+ Window_frame *win_frame)
+{
+ SQL_I_List<ORDER> *win_part_list_ptr=
+ new (thd->mem_root) SQL_I_List<ORDER> (win_partition_list);
+ SQL_I_List<ORDER> *win_order_list_ptr=
+ new (thd->mem_root) SQL_I_List<ORDER> (win_order_list);
+ if (!(win_part_list_ptr && win_order_list_ptr))
+ return true;
+ Window_spec *win_spec= new (thd->mem_root) Window_spec(win_ref,
+ win_part_list_ptr,
+ win_order_list_ptr,
+ win_frame);
+ group_list= thd->lex->save_group_list;
+ order_list= thd->lex->save_order_list;
+ thd->lex->win_spec= win_spec;
+ return (win_spec == NULL || window_specs.push_back(win_spec));
+}
+
/**
Set lock for all tables in current select level.
@@ -9223,11 +9810,8 @@ bool check_string_char_length(LEX_STRING *str, uint err_msg,
uint max_char_length, CHARSET_INFO *cs,
bool no_error)
{
- int well_formed_error;
- uint res= cs->cset->well_formed_len(cs, str->str, str->str + str->length,
- max_char_length, &well_formed_error);
-
- if (!well_formed_error && str->length == res)
+ Well_formed_prefix prefix(cs, str->str, str->length, max_char_length);
+ if (!prefix.well_formed_error_pos() && str->length == prefix.length())
return FALSE;
if (!no_error)
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 602a3e8d788..c1dcc0d3cb9 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -36,6 +36,7 @@ enum enum_mysql_completiontype {
extern "C" int path_starts_from_data_home_dir(const char *dir);
int test_if_data_home_dir(const char *dir);
int error_if_data_home_dir(const char *path, const char *what);
+my_bool net_allocate_new_packet(NET *net, void *thd, uint my_flags);
bool multi_update_precheck(THD *thd, TABLE_LIST *tables);
bool multi_delete_precheck(THD *thd, TABLE_LIST *tables);
@@ -91,7 +92,8 @@ bool is_log_table_write_query(enum enum_sql_command command);
bool alloc_query(THD *thd, const char *packet, uint packet_length);
void mysql_init_select(LEX *lex);
void mysql_parse(THD *thd, char *rawbuf, uint length,
- Parser_state *parser_state);
+ Parser_state *parser_state, bool is_com_multi,
+ bool is_next_command);
bool mysql_new_select(LEX *lex, bool move_down);
void create_select_for_variable(const char *var_name);
void create_table_set_open_action_and_adjust_tables(LEX *lex);
@@ -103,7 +105,8 @@ int mysql_execute_command(THD *thd);
bool do_command(THD *thd);
void do_handle_bootstrap(THD *thd);
bool dispatch_command(enum enum_server_command command, THD *thd,
- char* packet, uint packet_length);
+ char* packet, uint packet_length,
+ bool is_com_multi, bool is_next_command);
void log_slow_statement(THD *thd);
bool append_file_to_dir(THD *thd, const char **filename_ptr,
const char *table_name);
@@ -119,7 +122,6 @@ bool add_proc_to_list(THD *thd, Item *item);
bool push_new_name_resolution_context(THD *thd,
TABLE_LIST *left_op,
TABLE_LIST *right_op);
-void store_position_for_column(const char *name);
void init_update_queries(void);
bool check_simple_select();
Item *normalize_cond(THD *thd, Item *cond);
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 05ef69e5795..153c1e0ff2e 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -83,67 +83,21 @@ using std::min;
/*
Partition related functions declarations and some static constants;
*/
-const LEX_STRING partition_keywords[]=
-{
- { C_STRING_WITH_LEN("HASH") },
- { C_STRING_WITH_LEN("RANGE") },
- { C_STRING_WITH_LEN("LIST") },
- { C_STRING_WITH_LEN("KEY") },
- { C_STRING_WITH_LEN("MAXVALUE") },
- { C_STRING_WITH_LEN("LINEAR ") },
- { C_STRING_WITH_LEN(" COLUMNS") },
- { C_STRING_WITH_LEN("ALGORITHM") }
-
-};
-static const char *part_str= "PARTITION";
-static const char *sub_str= "SUB";
-static const char *by_str= "BY";
-static const char *space_str= " ";
-static const char *equal_str= "=";
-static const char *end_paren_str= ")";
-static const char *begin_paren_str= "(";
-static const char *comma_str= ",";
-
-int get_partition_id_list_col(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_col(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_part(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_subpart(partition_info *part_info,
- uint32 *part_id);
-int get_partition_id_hash_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_key_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_linear_hash_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_linear_key_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_with_sub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_hash_sub(partition_info *part_info,
- uint32 *part_id);
-int get_partition_id_key_sub(partition_info *part_info,
- uint32 *part_id);
-int get_partition_id_linear_hash_sub(partition_info *part_info,
- uint32 *part_id);
-int get_partition_id_linear_key_sub(partition_info *part_info,
- uint32 *part_id);
+static int get_partition_id_list_col(partition_info *, uint32 *, longlong *);
+static int get_partition_id_list(partition_info *, uint32 *, longlong *);
+static int get_partition_id_range_col(partition_info *, uint32 *, longlong *);
+static int get_partition_id_range(partition_info *, uint32 *, longlong *);
+static int get_part_id_charset_func_part(partition_info *, uint32 *, longlong *);
+static int get_part_id_charset_func_subpart(partition_info *, uint32 *);
+static int get_partition_id_hash_nosub(partition_info *, uint32 *, longlong *);
+static int get_partition_id_key_nosub(partition_info *, uint32 *, longlong *);
+static int get_partition_id_linear_hash_nosub(partition_info *, uint32 *, longlong *);
+static int get_partition_id_linear_key_nosub(partition_info *, uint32 *, longlong *);
+static int get_partition_id_with_sub(partition_info *, uint32 *, longlong *);
+static int get_partition_id_hash_sub(partition_info *part_info, uint32 *part_id);
+static int get_partition_id_key_sub(partition_info *part_info, uint32 *part_id);
+static int get_partition_id_linear_hash_sub(partition_info *part_info, uint32 *part_id);
+static int get_partition_id_linear_key_sub(partition_info *part_info, uint32 *part_id);
static uint32 get_next_partition_via_walking(PARTITION_ITERATOR*);
static void set_up_range_analysis_info(partition_info *part_info);
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*);
@@ -255,7 +209,7 @@ static bool is_name_in_list(char *name, List<char> list_names)
FALSE Success
*/
-bool partition_default_handling(TABLE *table, partition_info *part_info,
+bool partition_default_handling(THD *thd, TABLE *table, partition_info *part_info,
bool is_create_table_ind,
const char *normalized_path)
{
@@ -283,7 +237,7 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
part_info->num_subparts= num_parts / part_info->num_parts;
}
}
- part_info->set_up_defaults_for_partitioning(table->file,
+ part_info->set_up_defaults_for_partitioning(thd, table->file,
NULL, 0U);
DBUG_RETURN(FALSE);
}
@@ -318,10 +272,10 @@ int get_parts_for_update(const uchar *old_data, uchar *new_data,
DBUG_ENTER("get_parts_for_update");
DBUG_ASSERT(new_data == rec0); // table->record[0]
- set_field_ptr(part_field_array, old_data, rec0);
+ part_info->table->move_fields(part_field_array, old_data, rec0);
error= part_info->get_partition_id(part_info, old_part_id,
&old_func_value);
- set_field_ptr(part_field_array, rec0, old_data);
+ part_info->table->move_fields(part_field_array, rec0, old_data);
if (unlikely(error)) // Should never happen
{
DBUG_ASSERT(0);
@@ -346,10 +300,10 @@ int get_parts_for_update(const uchar *old_data, uchar *new_data,
future use. It will be tested by ensuring that the above
condition is false in one test situation before pushing the code.
*/
- set_field_ptr(part_field_array, new_data, rec0);
+ part_info->table->move_fields(part_field_array, new_data, rec0);
error= part_info->get_partition_id(part_info, new_part_id,
new_func_value);
- set_field_ptr(part_field_array, rec0, new_data);
+ part_info->table->move_fields(part_field_array, rec0, new_data);
if (unlikely(error))
{
DBUG_RETURN(error);
@@ -400,9 +354,9 @@ int get_part_for_delete(const uchar *buf, const uchar *rec0,
else
{
Field **part_field_array= part_info->full_part_field_array;
- set_field_ptr(part_field_array, buf, rec0);
+ part_info->table->move_fields(part_field_array, buf, rec0);
error= part_info->get_partition_id(part_info, part_id, &func_value);
- set_field_ptr(part_field_array, rec0, buf);
+ part_info->table->move_fields(part_field_array, rec0, buf);
if (unlikely(error))
{
DBUG_RETURN(error);
@@ -455,7 +409,7 @@ int get_part_for_delete(const uchar *buf, const uchar *rec0,
function.
*/
-static bool set_up_field_array(TABLE *table,
+static bool set_up_field_array(THD *thd, TABLE *table,
bool is_sub_part)
{
Field **ptr, *field, **field_array;
@@ -492,7 +446,7 @@ static bool set_up_field_array(TABLE *table,
DBUG_RETURN(result);
}
size_field_array= (num_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_calloc(size_field_array);
+ field_array= (Field**) thd->calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
@@ -617,7 +571,7 @@ static bool create_full_part_field_array(THD *thd, TABLE *table,
num_part_fields++;
}
size_field_array= (num_part_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_calloc(size_field_array);
+ field_array= (Field**) thd->calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
@@ -660,9 +614,16 @@ static bool create_full_part_field_array(THD *thd, TABLE *table,
full_part_field_array may be NULL if storage engine supports native
partitioning.
*/
+ table->vcol_set= table->read_set= &part_info->full_part_field_set;
if ((ptr= part_info->full_part_field_array))
for (; *ptr; ptr++)
- bitmap_set_bit(&part_info->full_part_field_set, (*ptr)->field_index);
+ {
+ if ((*ptr)->vcol_info)
+ table->mark_virtual_col(*ptr);
+ else
+ bitmap_fast_test_and_set(table->read_set, (*ptr)->field_index);
+ }
+ table->default_column_bitmaps();
end:
DBUG_RETURN(result);
@@ -804,7 +765,7 @@ static void clear_field_flag(TABLE *table)
*/
-static bool handle_list_of_fields(List_iterator<char> it,
+static bool handle_list_of_fields(THD *thd, List_iterator<char> it,
TABLE *table,
partition_info *part_info,
bool is_sub_part)
@@ -865,7 +826,7 @@ static bool handle_list_of_fields(List_iterator<char> it,
}
}
}
- result= set_up_field_array(table, is_sub_part);
+ result= set_up_field_array(thd, table, is_sub_part);
end:
DBUG_RETURN(result);
}
@@ -961,9 +922,9 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
if (init_lex_with_single_table(thd, table, &lex))
goto end;
+ table->get_fields_in_item_tree= true;
- func_expr->walk(&Item::change_context_processor, 0,
- (uchar*) &lex.select_lex.context);
+ func_expr->walk(&Item::change_context_processor, 0, &lex.select_lex.context);
thd->where= "partition function";
/*
In execution we must avoid the use of thd->change_item_tree since
@@ -988,7 +949,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
thd->lex->allow_sum_func= 0;
if (!(error= func_expr->fix_fields(thd, (Item**)&func_expr)))
- func_expr->walk(&Item::vcol_in_partition_func_processor, 0, NULL);
+ func_expr->walk(&Item::post_fix_fields_part_expr_processor, 0, NULL);
/*
Restore agg_field/agg_func and allow_sum_func,
@@ -1018,8 +979,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
easier maintenance. This exception should be deprecated at some point
in future so that we always throw an error.
*/
- if (func_expr->walk(&Item::check_valid_arguments_processor,
- 0, NULL))
+ if (func_expr->walk(&Item::check_valid_arguments_processor, 0, NULL))
{
if (is_create_table_ind)
{
@@ -1034,13 +994,10 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
if ((!is_sub_part) && (error= check_signed_flag(part_info)))
goto end;
- result= set_up_field_array(table, is_sub_part);
+ result= set_up_field_array(thd, table, is_sub_part);
end:
end_lex_with_single_table(thd, table, old_lex);
-#if !defined(DBUG_OFF)
- func_expr->walk(&Item::change_context_processor, 0,
- (uchar*) 0);
-#endif
+ func_expr->walk(&Item::change_context_processor, 0, 0);
DBUG_RETURN(result);
}
@@ -1622,7 +1579,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (!is_create_table_ind ||
thd->lex->sql_command != SQLCOM_CREATE_TABLE)
{
- if (partition_default_handling(table, part_info,
+ if (partition_default_handling(thd, table, part_info,
is_create_table_ind,
table->s->normalized_path.str))
{
@@ -1641,7 +1598,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (part_info->list_of_subpart_fields)
{
List_iterator<char> it(part_info->subpart_field_list);
- if (unlikely(handle_list_of_fields(it, table, part_info, TRUE)))
+ if (unlikely(handle_list_of_fields(thd, it, table, part_info, TRUE)))
goto end;
}
else
@@ -1668,7 +1625,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (part_info->list_of_part_fields)
{
List_iterator<char> it(part_info->part_field_list);
- if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ if (unlikely(handle_list_of_fields(thd, it, table, part_info, FALSE)))
goto end;
}
else
@@ -1690,7 +1647,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (part_info->column_list)
{
List_iterator<char> it(part_info->part_field_list);
- if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ if (unlikely(handle_list_of_fields(thd, it, table, part_info, FALSE)))
goto end;
}
else
@@ -1702,13 +1659,13 @@ bool fix_partition_func(THD *thd, TABLE *table,
part_info->fixed= TRUE;
if (part_info->part_type == RANGE_PARTITION)
{
- error_str= partition_keywords[PKW_RANGE].str;
+ error_str= "HASH";
if (unlikely(part_info->check_range_constants(thd)))
goto end;
}
else if (part_info->part_type == LIST_PARTITION)
{
- error_str= partition_keywords[PKW_LIST].str;
+ error_str= "LIST";
if (unlikely(part_info->check_list_constants(thd)))
goto end;
}
@@ -1786,154 +1743,48 @@ end:
ALTER TABLE commands. Finally it is used for SHOW CREATE TABLES.
*/
-static int add_write(File fptr, const char *buf, uint len)
-{
- uint ret_code= mysql_file_write(fptr, (const uchar*)buf, len, MYF(MY_FNABP));
-
- if (likely(ret_code == 0))
- return 0;
- else
- return 1;
-}
-
-static int add_string_object(File fptr, String *string)
-{
- return add_write(fptr, string->ptr(), string->length());
-}
-
-static int add_string(File fptr, const char *string)
-{
- return add_write(fptr, string, strlen(string));
-}
-
-static int add_string_len(File fptr, const char *string, uint len)
-{
- return add_write(fptr, string, len);
-}
-
-static int add_space(File fptr)
-{
- return add_string(fptr, space_str);
-}
-
-static int add_comma(File fptr)
-{
- return add_string(fptr, comma_str);
-}
-
-static int add_equal(File fptr)
-{
- return add_string(fptr, equal_str);
-}
-
-static int add_end_parenthesis(File fptr)
-{
- return add_string(fptr, end_paren_str);
-}
-
-static int add_begin_parenthesis(File fptr)
-{
- return add_string(fptr, begin_paren_str);
-}
-
-static int add_part_key_word(File fptr, const char *key_string)
-{
- int err= add_string(fptr, key_string);
- err+= add_space(fptr);
- return err;
-}
-
-static int add_partition(File fptr)
-{
- char buff[22];
- strxmov(buff, part_str, space_str, NullS);
- return add_string(fptr, buff);
-}
-
-static int add_subpartition(File fptr)
-{
- int err= add_string(fptr, sub_str);
-
- return err + add_partition(fptr);
-}
-
-static int add_partition_by(File fptr)
-{
- char buff[22];
- strxmov(buff, part_str, space_str, by_str, space_str, NullS);
- return add_string(fptr, buff);
-}
-
-static int add_subpartition_by(File fptr)
-{
- int err= add_string(fptr, sub_str);
-
- return err + add_partition_by(fptr);
-}
-
-static int add_name_string(File fptr, const char *name)
-{
- int err;
- String name_string("", 0, system_charset_info);
- THD *thd= current_thd;
- ulonglong save_sql_mode= thd->variables.sql_mode;
- thd->variables.sql_mode&= ~MODE_ANSI_QUOTES;
- ulonglong save_options= thd->variables.option_bits;
- thd->variables.option_bits&= ~OPTION_QUOTE_SHOW_CREATE;
- append_identifier(thd, &name_string, name, strlen(name));
- thd->variables.sql_mode= save_sql_mode;
- thd->variables.option_bits= save_options;
- err= add_string_object(fptr, &name_string);
- return err;
-}
-
-static int add_part_field_list(File fptr, List<char> field_list)
+static int add_part_field_list(THD *thd, String *str, List<char> field_list)
{
- uint i, num_fields;
int err= 0;
-
+ const char *field_name;
List_iterator<char> part_it(field_list);
- num_fields= field_list.elements;
- i= 0;
- err+= add_begin_parenthesis(fptr);
- while (i < num_fields)
+
+ err+= str->append('(');
+ while ((field_name= part_it++))
{
- err+= add_name_string(fptr, part_it++);
- if (i != (num_fields-1))
- err+= add_comma(fptr);
- i++;
+ err+= append_identifier(thd, str, field_name, strlen(field_name));
+ err+= str->append(',');
}
- err+= add_end_parenthesis(fptr);
+ if (field_list.elements)
+ str->length(str->length()-1);
+ err+= str->append(')');
return err;
}
-static int add_int(File fptr, longlong number)
-{
- char buff[32];
- llstr(number, buff);
- return add_string(fptr, buff);
-}
-
-static int add_uint(File fptr, ulonglong number)
-{
- char buff[32];
- longlong2str(number, buff, 10);
- return add_string(fptr, buff);
-}
-
/*
Must escape strings in partitioned tables frm-files,
parsing it later with mysql_unpack_partition will fail otherwise.
*/
-static int add_quoted_string(File fptr, const char *quotestr)
+
+static int add_keyword_string(String *str, const char *keyword,
+ bool quoted, const char *keystr)
{
- String escapedstr;
- int err= add_string(fptr, "'");
- err+= escapedstr.append_for_single_quote(quotestr);
- err+= add_string(fptr, escapedstr.c_ptr_safe());
- return err + add_string(fptr, "'");
+ int err= str->append(' ');
+ err+= str->append(keyword);
+
+ str->append(STRING_WITH_LEN(" = "));
+ if (quoted)
+ {
+ err+= str->append('\'');
+ err+= str->append_for_single_quote(keystr);
+ err+= str->append('\'');
+ }
+ else
+ err+= str->append(keystr);
+ return err;
}
+
/**
@brief Truncate the partition file name from a path it it exists.
@@ -1966,7 +1817,6 @@ void truncate_partition_filename(char *path)
}
}
-
/**
@brief Output a filepath. Similar to add_keyword_string except it
also converts \ to / on Windows and skips the partition file name at
@@ -1978,15 +1828,9 @@ table. So when the storage engine is asked for the DATA DIRECTORY string
after a restart through Handler::update_create_options(), the storage
engine may include the filename.
*/
-static int add_keyword_path(File fptr, const char *keyword,
+static int add_keyword_path(String *str, const char *keyword,
const char *path)
{
- int err= add_string(fptr, keyword);
-
- err+= add_space(fptr);
- err+= add_equal(fptr);
- err+= add_space(fptr);
-
char temp_path[FN_REFLEN];
strcpy(temp_path, path);
#ifdef __WIN__
@@ -2006,73 +1850,44 @@ static int add_keyword_path(File fptr, const char *keyword,
*/
truncate_partition_filename(temp_path);
- err+= add_quoted_string(fptr, temp_path);
-
- return err + add_space(fptr);
+ return add_keyword_string(str, keyword, true, temp_path);
}
-static int add_keyword_string(File fptr, const char *keyword,
- bool should_use_quotes,
- const char *keystr)
+static int add_keyword_int(String *str, const char *keyword, longlong num)
{
- int err= add_string(fptr, keyword);
-
- err+= add_space(fptr);
- err+= add_equal(fptr);
- err+= add_space(fptr);
- if (should_use_quotes)
- err+= add_quoted_string(fptr, keystr);
- else
- err+= add_string(fptr, keystr);
- return err + add_space(fptr);
-}
-
-static int add_keyword_int(File fptr, const char *keyword, longlong num)
-{
- int err= add_string(fptr, keyword);
-
- err+= add_space(fptr);
- err+= add_equal(fptr);
- err+= add_space(fptr);
- err+= add_int(fptr, num);
- return err + add_space(fptr);
+ int err= str->append(' ');
+ err+= str->append(keyword);
+ str->append(STRING_WITH_LEN(" = "));
+ return err + str->append_longlong(num);
}
-static int add_engine(File fptr, handlerton *engine_type)
-{
- const char *engine_str= ha_resolve_storage_engine_name(engine_type);
- DBUG_PRINT("info", ("ENGINE: %s", engine_str));
- int err= add_string(fptr, "ENGINE = ");
- return err + add_string(fptr, engine_str);
-}
-
-static int add_partition_options(File fptr, partition_element *p_elem)
+static int add_partition_options(String *str, partition_element *p_elem)
{
int err= 0;
- err+= add_space(fptr);
if (p_elem->tablespace_name)
- err+= add_keyword_string(fptr,"TABLESPACE", FALSE,
- p_elem->tablespace_name);
+ err+= add_keyword_string(str,"TABLESPACE", false, p_elem->tablespace_name);
if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
- err+= add_keyword_int(fptr,"NODEGROUP",(longlong)p_elem->nodegroup_id);
+ err+= add_keyword_int(str,"NODEGROUP",(longlong)p_elem->nodegroup_id);
if (p_elem->part_max_rows)
- err+= add_keyword_int(fptr,"MAX_ROWS",(longlong)p_elem->part_max_rows);
+ err+= add_keyword_int(str,"MAX_ROWS",(longlong)p_elem->part_max_rows);
if (p_elem->part_min_rows)
- err+= add_keyword_int(fptr,"MIN_ROWS",(longlong)p_elem->part_min_rows);
+ err+= add_keyword_int(str,"MIN_ROWS",(longlong)p_elem->part_min_rows);
if (!(current_thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
{
if (p_elem->data_file_name)
- err+= add_keyword_path(fptr, "DATA DIRECTORY", p_elem->data_file_name);
+ err+= add_keyword_path(str, "DATA DIRECTORY", p_elem->data_file_name);
if (p_elem->index_file_name)
- err+= add_keyword_path(fptr, "INDEX DIRECTORY", p_elem->index_file_name);
+ err+= add_keyword_path(str, "INDEX DIRECTORY", p_elem->index_file_name);
}
if (p_elem->part_comment)
- err+= add_keyword_string(fptr, "COMMENT", TRUE, p_elem->part_comment);
+ err+= add_keyword_string(str, "COMMENT", true, p_elem->part_comment);
if (p_elem->connect_string.length)
- err+= add_keyword_string(fptr, "CONNECTION", TRUE,
+ err+= add_keyword_string(str, "CONNECTION", true,
p_elem->connect_string.str);
- return err + add_engine(fptr,p_elem->engine_type);
+ err += add_keyword_string(str, "ENGINE", false,
+ ha_resolve_storage_engine_name(p_elem->engine_type));
+ return err;
}
@@ -2183,7 +1998,7 @@ static Create_field* get_sql_field(char *field_name,
}
-static int add_column_list_values(File fptr, partition_info *part_info,
+static int add_column_list_values(String *str, partition_info *part_info,
part_elem_value *list_value,
HA_CREATE_INFO *create_info,
Alter_info *alter_info)
@@ -2196,25 +2011,22 @@ static int add_column_list_values(File fptr, partition_info *part_info,
part_info->num_columns > 1U);
if (use_parenthesis)
- err+= add_begin_parenthesis(fptr);
+ err+= str->append('(');
for (i= 0; i < num_elements; i++)
{
part_column_list_val *col_val= &list_value->col_val_array[i];
char *field_name= it++;
if (col_val->max_value)
- err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ err+= str->append(STRING_WITH_LEN("MAXVALUE"));
else if (col_val->null_value)
- err+= add_string(fptr, "NULL");
+ err+= str->append(STRING_WITH_LEN("NULL"));
else
{
- char buffer[MAX_KEY_LENGTH];
- String str(buffer, sizeof(buffer), &my_charset_bin);
Item *item_expr= col_val->item_expression;
if (item_expr->null_value)
- err+= add_string(fptr, "NULL");
+ err+= str->append(STRING_WITH_LEN("NULL"));
else
{
- String *res;
CHARSET_INFO *field_cs;
bool need_cs_check= FALSE;
Item_result result_type= STRING_RESULT;
@@ -2275,27 +2087,28 @@ static int add_column_list_values(File fptr, partition_info *part_info,
}
}
{
- String val_conv;
+ StringBuffer<MAX_KEY_LENGTH> buf;
+ String val_conv, *res;
val_conv.set_charset(system_charset_info);
- res= item_expr->val_str(&str);
+ res= item_expr->val_str(&buf);
if (get_cs_converted_part_value_from_string(current_thd,
item_expr, res,
&val_conv, field_cs,
(bool)(alter_info != NULL)))
return 1;
- err+= add_string_object(fptr, &val_conv);
+ err+= str->append(val_conv);
}
}
}
if (i != (num_elements - 1))
- err+= add_string(fptr, comma_str);
+ err+= str->append(',');
}
if (use_parenthesis)
- err+= add_end_parenthesis(fptr);
+ err+= str->append(')');
return err;
}
-static int add_partition_values(File fptr, partition_info *part_info,
+static int add_partition_values(String *str, partition_info *part_info,
partition_element *p_elem,
HA_CREATE_INFO *create_info,
Alter_info *alter_info)
@@ -2304,48 +2117,57 @@ static int add_partition_values(File fptr, partition_info *part_info,
if (part_info->part_type == RANGE_PARTITION)
{
- err+= add_string(fptr, " VALUES LESS THAN ");
+ err+= str->append(STRING_WITH_LEN(" VALUES LESS THAN "));
if (part_info->column_list)
{
List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
part_elem_value *list_value= list_val_it++;
- err+= add_begin_parenthesis(fptr);
- err+= add_column_list_values(fptr, part_info, list_value,
+ err+= str->append('(');
+ err+= add_column_list_values(str, part_info, list_value,
create_info, alter_info);
- err+= add_end_parenthesis(fptr);
+ err+= str->append(')');
}
else
{
if (!p_elem->max_value)
{
- err+= add_begin_parenthesis(fptr);
+ err+= str->append('(');
if (p_elem->signed_flag)
- err+= add_int(fptr, p_elem->range_value);
+ err+= str->append_longlong(p_elem->range_value);
else
- err+= add_uint(fptr, p_elem->range_value);
- err+= add_end_parenthesis(fptr);
+ err+= str->append_ulonglong(p_elem->range_value);
+ err+= str->append(')');
}
else
- err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ err+= str->append(STRING_WITH_LEN("MAXVALUE"));
}
}
else if (part_info->part_type == LIST_PARTITION)
{
uint i;
List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
- err+= add_string(fptr, " VALUES IN ");
+
+ if (p_elem->max_value)
+ {
+ DBUG_ASSERT(part_info->defined_max_value ||
+ current_thd->lex->sql_command == SQLCOM_ALTER_TABLE);
+ err+= str->append(STRING_WITH_LEN(" DEFAULT"));
+ return err;
+ }
+
+ err+= str->append(STRING_WITH_LEN(" VALUES IN "));
uint num_items= p_elem->list_val_list.elements;
- err+= add_begin_parenthesis(fptr);
+ err+= str->append('(');
if (p_elem->has_null_value)
{
- err+= add_string(fptr, "NULL");
+ err+= str->append(STRING_WITH_LEN("NULL"));
if (num_items == 0)
{
- err+= add_end_parenthesis(fptr);
+ err+= str->append(')');
goto end;
}
- err+= add_comma(fptr);
+ err+= str->append(',');
}
i= 0;
do
@@ -2353,19 +2175,19 @@ static int add_partition_values(File fptr, partition_info *part_info,
part_elem_value *list_value= list_val_it++;
if (part_info->column_list)
- err+= add_column_list_values(fptr, part_info, list_value,
+ err+= add_column_list_values(str, part_info, list_value,
create_info, alter_info);
else
{
if (!list_value->unsigned_flag)
- err+= add_int(fptr, list_value->value);
+ err+= str->append_longlong(list_value->value);
else
- err+= add_uint(fptr, list_value->value);
+ err+= str->append_ulonglong(list_value->value);
}
if (i != (num_items-1))
- err+= add_comma(fptr);
+ err+= str->append(',');
} while (++i < num_items);
- err+= add_end_parenthesis(fptr);
+ err+= str->append(')');
}
end:
return err;
@@ -2375,53 +2197,40 @@ end:
/**
Add 'KEY' word, with optional 'ALGORTIHM = N'.
- @param fptr File to write to.
+ @param str String to write to.
@param part_info partition_info holding the used key_algorithm
- @param current_comment_start NULL, or comment string encapsulating the
- PARTITION BY clause.
@return Operation status.
@retval 0 Success
@retval != 0 Failure
*/
-static int add_key_with_algorithm(File fptr, partition_info *part_info,
- const char *current_comment_start)
+static int add_key_with_algorithm(String *str, partition_info *part_info)
{
int err= 0;
- err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ err+= str->append(STRING_WITH_LEN("KEY "));
- /*
- current_comment_start is given when called from SHOW CREATE TABLE,
- Then only add ALGORITHM = 1, not the default 2 or non-set 0!
- For .frm current_comment_start is NULL, then add ALGORITHM if != 0.
- */
- if (part_info->key_algorithm == partition_info::KEY_ALGORITHM_51 || // SHOW
- (!current_comment_start && // .frm
- (part_info->key_algorithm != partition_info::KEY_ALGORITHM_NONE)))
- {
- /* If we already are within a comment, end that comment first. */
- if (current_comment_start)
- err+= add_string(fptr, "*/ ");
- err+= add_string(fptr, "/*!50611 ");
- err+= add_part_key_word(fptr, partition_keywords[PKW_ALGORITHM].str);
- err+= add_equal(fptr);
- err+= add_space(fptr);
- err+= add_int(fptr, part_info->key_algorithm);
- err+= add_space(fptr);
- err+= add_string(fptr, "*/ ");
- if (current_comment_start)
- {
- /* Skip new line. */
- if (current_comment_start[0] == '\n')
- current_comment_start++;
- err+= add_string(fptr, current_comment_start);
- err+= add_space(fptr);
- }
+ if (part_info->key_algorithm == partition_info::KEY_ALGORITHM_51)
+ {
+ err+= str->append(STRING_WITH_LEN("ALGORITHM = "));
+ err+= str->append_longlong(part_info->key_algorithm);
+ err+= str->append(' ');
}
return err;
}
+char *generate_partition_syntax_for_frm(THD *thd, partition_info *part_info,
+ uint *buf_length,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
+{
+ sql_mode_t old_mode= thd->variables.sql_mode;
+ thd->variables.sql_mode &= ~MODE_ANSI_QUOTES;
+ char *res= generate_partition_syntax(thd, part_info, buf_length,
+ true, create_info, alter_info);
+ thd->variables.sql_mode= old_mode;
+ return res;
+}
/*
Generate the partition syntax from the partition data structure.
@@ -2432,8 +2241,6 @@ static int add_key_with_algorithm(File fptr, partition_info *part_info,
generate_partition_syntax()
part_info The partitioning data structure
buf_length A pointer to the returned buffer length
- use_sql_alloc Allocate buffer from sql_alloc if true
- otherwise use my_malloc
show_partition_options Should we display partition options
create_info Info generated by parser
alter_info Info generated by parser
@@ -2450,64 +2257,42 @@ static int add_key_with_algorithm(File fptr, partition_info *part_info,
type ALTER TABLE commands focusing on changing the PARTITION structure
in any fashion.
- The implementation writes the syntax to a temporary file (essentially
- an abstraction of a dynamic array) and if all writes goes well it
- allocates a buffer and writes the syntax into this one and returns it.
-
- As a security precaution the file is deleted before writing into it. This
- means that no other processes on the machine can open and read the file
- while this processing is ongoing.
-
The code is optimised for minimal code size since it is not used in any
common queries.
*/
-char *generate_partition_syntax(partition_info *part_info,
+char *generate_partition_syntax(THD *thd, partition_info *part_info,
uint *buf_length,
- bool use_sql_alloc,
bool show_partition_options,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- const char *current_comment_start)
+ Alter_info *alter_info)
{
uint i,j, tot_num_parts, num_subparts;
partition_element *part_elem;
- ulonglong buffer_length;
- char path[FN_REFLEN];
int err= 0;
List_iterator<partition_element> part_it(part_info->partitions);
- File fptr;
- char *buf= NULL; //Return buffer
+ StringBuffer<1024> str;
DBUG_ENTER("generate_partition_syntax");
- if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy",
- O_RDWR | O_BINARY | O_TRUNC |
- O_TEMPORARY, MYF(MY_WME)))) < 0))
- DBUG_RETURN(NULL);
-#ifndef __WIN__
- unlink(path);
-#endif
- err+= add_space(fptr);
- err+= add_partition_by(fptr);
+ err+= str.append(STRING_WITH_LEN(" PARTITION BY "));
switch (part_info->part_type)
{
case RANGE_PARTITION:
- err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
+ err+= str.append(STRING_WITH_LEN("RANGE "));
break;
case LIST_PARTITION:
- err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
+ err+= str.append(STRING_WITH_LEN("LIST "));
break;
case HASH_PARTITION:
if (part_info->linear_hash_ind)
- err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
+ err+= str.append(STRING_WITH_LEN("LINEAR "));
if (part_info->list_of_part_fields)
{
- err+= add_key_with_algorithm(fptr, part_info,
- current_comment_start);
- err+= add_part_field_list(fptr, part_info->part_field_list);
+ err+= add_key_with_algorithm(&str, part_info);
+ err+= add_part_field_list(thd, &str, part_info->part_field_list);
}
else
- err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
+ err+= str.append(STRING_WITH_LEN("HASH "));
break;
default:
DBUG_ASSERT(0);
@@ -2517,51 +2302,45 @@ char *generate_partition_syntax(partition_info *part_info,
}
if (part_info->part_expr)
{
- err+= add_begin_parenthesis(fptr);
- err+= add_string_len(fptr, part_info->part_func_string,
- part_info->part_func_len);
- err+= add_end_parenthesis(fptr);
+ err+= str.append('(');
+ part_info->part_expr->print_for_table_def(&str);
+ err+= str.append(')');
}
else if (part_info->column_list)
{
- err+= add_string(fptr, partition_keywords[PKW_COLUMNS].str);
- err+= add_part_field_list(fptr, part_info->part_field_list);
+ err+= str.append(STRING_WITH_LEN(" COLUMNS"));
+ err+= add_part_field_list(thd, &str, part_info->part_field_list);
}
if ((!part_info->use_default_num_partitions) &&
part_info->use_default_partitions)
{
- err+= add_string(fptr, "\n");
- err+= add_string(fptr, "PARTITIONS ");
- err+= add_int(fptr, part_info->num_parts);
+ err+= str.append(STRING_WITH_LEN("\nPARTITIONS "));
+ err+= str.append_ulonglong(part_info->num_parts);
}
if (part_info->is_sub_partitioned())
{
- err+= add_string(fptr, "\n");
- err+= add_subpartition_by(fptr);
+ err+= str.append(STRING_WITH_LEN("\nSUBPARTITION BY "));
/* Must be hash partitioning for subpartitioning */
if (part_info->linear_hash_ind)
- err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
+ err+= str.append(STRING_WITH_LEN("LINEAR "));
if (part_info->list_of_subpart_fields)
{
- err+= add_key_with_algorithm(fptr, part_info,
- current_comment_start);
- err+= add_part_field_list(fptr, part_info->subpart_field_list);
+ err+= add_key_with_algorithm(&str, part_info);
+ err+= add_part_field_list(thd, &str, part_info->subpart_field_list);
}
else
- err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
+ err+= str.append(STRING_WITH_LEN("HASH "));
if (part_info->subpart_expr)
{
- err+= add_begin_parenthesis(fptr);
- err+= add_string_len(fptr, part_info->subpart_func_string,
- part_info->subpart_func_len);
- err+= add_end_parenthesis(fptr);
+ err+= str.append('(');
+ part_info->subpart_expr->print_for_table_def(&str);
+ err+= str.append(')');
}
if ((!part_info->use_default_num_subpartitions) &&
part_info->use_default_subpartitions)
{
- err+= add_string(fptr, "\n");
- err+= add_string(fptr, "SUBPARTITIONS ");
- err+= add_int(fptr, part_info->num_subparts);
+ err+= str.append(STRING_WITH_LEN("\nSUBPARTITIONS "));
+ err+= str.append_ulonglong(part_info->num_subparts);
}
}
tot_num_parts= part_info->partitions.elements;
@@ -2570,8 +2349,7 @@ char *generate_partition_syntax(partition_info *part_info,
if (!part_info->use_default_partitions)
{
bool first= TRUE;
- err+= add_string(fptr, "\n");
- err+= add_begin_parenthesis(fptr);
+ err+= str.append(STRING_WITH_LEN("\n("));
i= 0;
do
{
@@ -2580,80 +2358,47 @@ char *generate_partition_syntax(partition_info *part_info,
part_elem->part_state != PART_REORGED_DROPPED)
{
if (!first)
- {
- err+= add_comma(fptr);
- err+= add_string(fptr, "\n");
- err+= add_space(fptr);
- }
+ err+= str.append(STRING_WITH_LEN(",\n "));
first= FALSE;
- err+= add_partition(fptr);
- err+= add_name_string(fptr, part_elem->partition_name);
- err+= add_partition_values(fptr, part_info, part_elem,
+ err+= str.append(STRING_WITH_LEN("PARTITION "));
+ err+= append_identifier(thd, &str, part_elem->partition_name,
+ strlen(part_elem->partition_name));
+ err+= add_partition_values(&str, part_info, part_elem,
create_info, alter_info);
if (!part_info->is_sub_partitioned() ||
part_info->use_default_subpartitions)
{
if (show_partition_options)
- err+= add_partition_options(fptr, part_elem);
+ err+= add_partition_options(&str, part_elem);
}
else
{
- err+= add_string(fptr, "\n");
- err+= add_space(fptr);
- err+= add_begin_parenthesis(fptr);
+ err+= str.append(STRING_WITH_LEN("\n ("));
List_iterator<partition_element> sub_it(part_elem->subpartitions);
j= 0;
do
{
part_elem= sub_it++;
- err+= add_subpartition(fptr);
- err+= add_name_string(fptr, part_elem->partition_name);
+ err+= str.append(STRING_WITH_LEN("SUBPARTITION "));
+ err+= append_identifier(thd, &str, part_elem->partition_name,
+ strlen(part_elem->partition_name));
if (show_partition_options)
- err+= add_partition_options(fptr, part_elem);
+ err+= add_partition_options(&str, part_elem);
if (j != (num_subparts-1))
- {
- err+= add_comma(fptr);
- err+= add_string(fptr, "\n");
- err+= add_space(fptr);
- err+= add_space(fptr);
- }
+ err+= str.append(STRING_WITH_LEN(",\n "));
else
- err+= add_end_parenthesis(fptr);
+ err+= str.append(')');
} while (++j < num_subparts);
}
}
if (i == (tot_num_parts-1))
- err+= add_end_parenthesis(fptr);
+ err+= str.append(')');
} while (++i < tot_num_parts);
}
if (err)
- goto close_file;
- buffer_length= mysql_file_seek(fptr, 0L, MY_SEEK_END, MYF(0));
- if (unlikely(buffer_length == MY_FILEPOS_ERROR))
- goto close_file;
- if (unlikely(mysql_file_seek(fptr, 0L, MY_SEEK_SET, MYF(0))
- == MY_FILEPOS_ERROR))
- goto close_file;
- *buf_length= (uint)buffer_length;
- if (use_sql_alloc)
- buf= (char*) sql_alloc(*buf_length+1);
- else
- buf= (char*) my_malloc(*buf_length+1, MYF(MY_WME));
- if (!buf)
- goto close_file;
-
- if (unlikely(mysql_file_read(fptr, (uchar*)buf, *buf_length, MYF(MY_FNABP))))
- {
- if (!use_sql_alloc)
- my_free(buf);
- buf= NULL;
- }
- else
- buf[*buf_length]= 0;
-
-close_file:
- mysql_file_close(fptr, MYF(0));
- DBUG_RETURN(buf);
+ DBUG_RETURN(NULL);
+ *buf_length= str.length();
+ DBUG_RETURN(thd->strmake(str.ptr(), str.length()));
}
@@ -3092,6 +2837,11 @@ int get_partition_id_list_col(partition_info *part_info,
}
}
notfound:
+ if (part_info->defined_max_value)
+ {
+ *part_id= part_info->default_partition_id;
+ DBUG_RETURN(0);
+ }
*part_id= 0;
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
}
@@ -3145,6 +2895,11 @@ int get_partition_id_list(partition_info *part_info,
}
}
notfound:
+ if (part_info->defined_max_value)
+ {
+ *part_id= part_info->default_partition_id;
+ DBUG_RETURN(0);
+ }
*part_id= 0;
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
}
@@ -3790,9 +3545,9 @@ static int get_sub_part_id_from_key(const TABLE *table,uchar *buf,
else
{
Field **part_field_array= part_info->subpart_field_array;
- set_field_ptr(part_field_array, buf, rec0);
+ part_info->table->move_fields(part_field_array, buf, rec0);
res= part_info->get_subpartition_id(part_info, part_id);
- set_field_ptr(part_field_array, rec0, buf);
+ part_info->table->move_fields(part_field_array, rec0, buf);
}
DBUG_RETURN(res);
}
@@ -3836,10 +3591,10 @@ bool get_part_id_from_key(const TABLE *table, uchar *buf, KEY *key_info,
else
{
Field **part_field_array= part_info->part_field_array;
- set_field_ptr(part_field_array, buf, rec0);
+ part_info->table->move_fields(part_field_array, buf, rec0);
result= part_info->get_part_partition_id(part_info, part_id,
&func_value);
- set_field_ptr(part_field_array, rec0, buf);
+ part_info->table->move_fields(part_field_array, rec0, buf);
}
DBUG_RETURN(result);
}
@@ -3885,10 +3640,10 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf,
else
{
Field **part_field_array= part_info->full_part_field_array;
- set_field_ptr(part_field_array, buf, rec0);
+ part_info->table->move_fields(part_field_array, buf, rec0);
result= part_info->get_partition_id(part_info, &part_spec->start_part,
&func_value);
- set_field_ptr(part_field_array, rec0, buf);
+ part_info->table->move_fields(part_field_array, rec0, buf);
}
part_spec->end_part= part_spec->start_part;
if (unlikely(result))
@@ -3938,7 +3693,7 @@ bool verify_data_with_partition(TABLE *table, TABLE *part_table,
bitmap_union(table->read_set, &part_info->full_part_field_set);
old_rec= part_table->record[0];
part_table->record[0]= table->record[0];
- set_field_ptr(part_info->full_part_field_array, table->record[0], old_rec);
+ part_info->table->move_fields(part_info->full_part_field_array, table->record[0], old_rec);
if ((error= file->ha_rnd_init(TRUE)))
{
file->print_error(error, MYF(0));
@@ -3973,7 +3728,7 @@ bool verify_data_with_partition(TABLE *table, TABLE *part_table,
} while (TRUE);
(void) file->ha_rnd_end();
err:
- set_field_ptr(part_info->full_part_field_array, old_rec,
+ part_info->table->move_fields(part_info->full_part_field_array, old_rec,
table->record[0]);
part_table->record[0]= old_rec;
if (error)
@@ -4374,39 +4129,6 @@ bool mysql_unpack_partition(THD *thd,
DBUG_ASSERT(part_info->default_engine_type == default_db_type);
DBUG_ASSERT(part_info->default_engine_type->db_type != DB_TYPE_UNKNOWN);
DBUG_ASSERT(part_info->default_engine_type != partition_hton);
-
- {
- /*
- This code part allocates memory for the serialised item information for
- the partition functions. In most cases this is not needed but if the
- table is used for SHOW CREATE TABLES or ALTER TABLE that modifies
- partition information it is needed and the info is lost if we don't
- save it here so unfortunately we have to do it here even if in most
- cases it is not needed. This is a consequence of that item trees are
- not serialisable.
- */
- uint part_func_len= part_info->part_func_len;
- uint subpart_func_len= part_info->subpart_func_len;
- char *part_func_string= NULL;
- char *subpart_func_string= NULL;
- if ((part_func_len &&
- !((part_func_string= (char*) thd->alloc(part_func_len)))) ||
- (subpart_func_len &&
- !((subpart_func_string= (char*) thd->alloc(subpart_func_len)))))
- {
- mem_alloc_error(part_func_len);
- thd->free_items();
- goto end;
- }
- if (part_func_len)
- memcpy(part_func_string, part_info->part_func_string, part_func_len);
- if (subpart_func_len)
- memcpy(subpart_func_string, part_info->subpart_func_string,
- subpart_func_len);
- part_info->part_func_string= part_func_string;
- part_info->subpart_func_string= subpart_func_string;
- }
-
result= FALSE;
end:
end_lex_with_single_table(thd, table, old_lex);
@@ -4709,13 +4431,31 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
DBUG_RETURN(TRUE);
}
+ partition_info *alt_part_info= thd->lex->part_info;
+ /*
+ This variable is TRUE in very special case when we add only DEFAULT
+ partition to the existing table
+ */
+ bool only_default_value_added=
+ (alt_part_info &&
+ alt_part_info->current_partition &&
+ alt_part_info->current_partition->list_val_list.elements == 1 &&
+ alt_part_info->current_partition->list_val_list.head()->
+ added_items >= 1 &&
+ alt_part_info->current_partition->list_val_list.head()->
+ col_val_array[0].max_value) &&
+ alt_part_info->part_type == LIST_PARTITION &&
+ (alter_info->flags & Alter_info::ALTER_ADD_PARTITION);
+ if (only_default_value_added &&
+ !thd->lex->part_info->num_columns)
+ thd->lex->part_info->num_columns= 1; // to make correct clone
+
/*
One of these is done in handle_if_exists_option():
thd->work_part_info= thd->lex->part_info;
or
thd->work_part_info= NULL;
*/
-
if (thd->work_part_info &&
!(thd->work_part_info= thd->work_part_info->get_clone(thd)))
DBUG_RETURN(TRUE);
@@ -4732,12 +4472,12 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
Alter_info::ALTER_REBUILD_PARTITION))
{
partition_info *tab_part_info;
- partition_info *alt_part_info= thd->work_part_info;
uint flags= 0;
bool is_last_partition_reorged= FALSE;
part_elem_value *tab_max_elem_val= NULL;
part_elem_value *alt_max_elem_val= NULL;
longlong tab_max_range= 0, alt_max_range= 0;
+ alt_part_info= thd->work_part_info;
if (!table->part_info)
{
@@ -4897,14 +4637,16 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
}
}
if ((tab_part_info->column_list &&
- alt_part_info->num_columns != tab_part_info->num_columns) ||
+ alt_part_info->num_columns != tab_part_info->num_columns &&
+ !only_default_value_added) ||
(!tab_part_info->column_list &&
(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION) &&
- alt_part_info->num_columns != 1U) ||
+ alt_part_info->num_columns != 1U &&
+ !only_default_value_added) ||
(!tab_part_info->column_list &&
tab_part_info->part_type == HASH_PARTITION &&
- alt_part_info->num_columns != 0))
+ (alt_part_info->num_columns != 0)))
{
my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
goto err;
@@ -4937,9 +4679,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_NO_BINLOG_ERROR, MYF(0));
goto err;
}
- if (tab_part_info->defined_max_value)
+ if (tab_part_info->defined_max_value &&
+ (tab_part_info->part_type == RANGE_PARTITION ||
+ alt_part_info->defined_max_value))
{
- my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
+ my_error((tab_part_info->part_type == RANGE_PARTITION ?
+ ER_PARTITION_MAXVALUE_ERROR :
+ ER_PARTITION_DEFAULT_ERROR), MYF(0));
goto err;
}
if (num_new_partitions == 0)
@@ -4966,7 +4712,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0,
+ if (alt_part_info->set_up_defaults_for_partitioning(thd, table->file, 0,
tab_part_info->num_parts))
{
goto err;
@@ -5158,7 +4904,7 @@ that are reorganised.
{
if (!alt_part_info->use_default_partitions)
{
- DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
+ DBUG_PRINT("info", ("part_info: %p", tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
tab_part_info->use_default_num_partitions= FALSE;
@@ -5385,7 +5131,8 @@ state of p1.
DBUG_ASSERT(!alt_part_info->use_default_partitions);
/* We specified partitions explicitly so don't use defaults anymore. */
tab_part_info->use_default_partitions= FALSE;
- if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0, 0))
+ if (alt_part_info->set_up_defaults_for_partitioning(thd, table->file, 0,
+ 0))
{
goto err;
}
@@ -6626,7 +6373,8 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
}
}
/* Ensure the share is destroyed and reopened. */
- part_info= lpt->part_info->get_clone(thd);
+ if (part_info)
+ part_info= part_info->get_clone(thd);
close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
else
@@ -6644,7 +6392,8 @@ err_exclusive_lock:
the table cache.
*/
mysql_lock_remove(thd, thd->lock, table);
- part_info= lpt->part_info->get_clone(thd);
+ if (part_info)
+ part_info= part_info->get_clone(thd);
close_thread_table(thd, &thd->open_tables);
lpt->table_list->table= NULL;
}
@@ -7184,39 +6933,6 @@ err:
}
#endif
-
-/*
- Prepare for calling val_int on partition function by setting fields to
- point to the record where the values of the PF-fields are stored.
-
- SYNOPSIS
- set_field_ptr()
- ptr Array of fields to change ptr
- new_buf New record pointer
- old_buf Old record pointer
-
- DESCRIPTION
- Set ptr in field objects of field array to refer to new_buf record
- instead of previously old_buf. Used before calling val_int and after
- it is used to restore pointers to table->record[0].
- This routine is placed outside of partition code since it can be useful
- also for other programs.
-*/
-
-void set_field_ptr(Field **ptr, const uchar *new_buf,
- const uchar *old_buf)
-{
- my_ptrdiff_t diff= (new_buf - old_buf);
- DBUG_ENTER("set_field_ptr");
-
- do
- {
- (*ptr)->move_field_offset(diff);
- } while (*(++ptr));
- DBUG_VOID_RETURN;
-}
-
-
/*
Prepare for calling val_int on partition function by setting fields to
point to the record where the values of the PF-fields are stored.
@@ -7695,8 +7411,10 @@ int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
uint flags,
PARTITION_ITERATOR *part_iter)
{
+ bool can_match_multiple_values;
uint32 nparts;
get_col_endpoint_func UNINIT_VAR(get_col_endpoint);
+ uint full_length= 0;
DBUG_ENTER("get_part_iter_for_interval_cols_via_map");
if (part_info->part_type == RANGE_PARTITION)
@@ -7706,6 +7424,9 @@ int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
}
else if (part_info->part_type == LIST_PARTITION)
{
+ if (part_info->has_default_partititon() &&
+ part_info->num_parts == 1)
+ DBUG_RETURN(-1); //only DEFAULT partition
get_col_endpoint= get_partition_id_cols_list_for_endpoint;
part_iter->get_next= get_next_partition_id_list;
part_iter->part_info= part_info;
@@ -7714,6 +7435,19 @@ int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
else
assert(0);
+ for (uint32 i= 0; i < part_info->num_columns; i++)
+ full_length+= store_length_array[i];
+
+ can_match_multiple_values= ((flags &
+ (NO_MIN_RANGE | NO_MAX_RANGE | NEAR_MIN |
+ NEAR_MAX)) ||
+ (min_len != max_len) ||
+ (min_len != full_length) ||
+ memcmp(min_value, max_value, min_len));
+ DBUG_ASSERT(can_match_multiple_values || (flags & EQ_RANGE) || flags == 0);
+ if (can_match_multiple_values && part_info->has_default_partititon())
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= TRUE;
+
if (flags & NO_MIN_RANGE)
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
else
@@ -7749,7 +7483,15 @@ int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
nparts);
}
if (part_iter->part_nums.start == part_iter->part_nums.end)
+ {
+ // No matching partition found.
+ if (part_info->has_default_partititon())
+ {
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= TRUE;
+ DBUG_RETURN(1);
+ }
DBUG_RETURN(0);
+ }
DBUG_RETURN(1);
}
@@ -7810,6 +7552,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
(void)min_len;
(void)max_len;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= FALSE;
if (part_info->part_type == RANGE_PARTITION)
{
@@ -7846,8 +7589,13 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
else
MY_ASSERT_UNREACHABLE();
- can_match_multiple_values= (flags || !min_value || !max_value ||
+ can_match_multiple_values= ((flags &
+ (NO_MIN_RANGE | NO_MAX_RANGE | NEAR_MIN |
+ NEAR_MAX)) ||
memcmp(min_value, max_value, field_len));
+ DBUG_ASSERT(can_match_multiple_values || (flags & EQ_RANGE) || flags == 0);
+ if (can_match_multiple_values && part_info->has_default_partititon())
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= TRUE;
if (can_match_multiple_values &&
(part_info->part_type == RANGE_PARTITION ||
part_info->has_null_value))
@@ -7877,6 +7625,12 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
{
/* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */
part_iter->part_nums.end= 0;
+ /*
+ It is something like select * from tbl where col IS NULL
+ and we have partition with NULL to catch it, so we do not need
+ DEFAULT partition
+ */
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= FALSE;
DBUG_RETURN(1);
}
}
@@ -7900,8 +7654,19 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
/* col = x and F(x) = NULL -> only search NULL partition */
part_iter->part_nums.cur= part_iter->part_nums.start= 0;
part_iter->part_nums.end= 0;
- part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
- DBUG_RETURN(1);
+ /*
+ if NULL partition exists:
+ for RANGE it is the first partition (always exists);
+ for LIST should be indicator that it is present
+ */
+ if (part_info->part_type == RANGE_PARTITION ||
+ part_info->has_null_value)
+ {
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ DBUG_RETURN(1);
+ }
+ // If no NULL partition look up in DEFAULT or there is no such value
+ goto not_found;
}
part_iter->part_nums.cur= part_iter->part_nums.start;
if (check_zero_dates && !part_info->part_expr->null_value)
@@ -7918,7 +7683,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
}
}
if (part_iter->part_nums.start == max_endpoint_val)
- DBUG_RETURN(0); /* No partitions */
+ goto not_found;
}
}
@@ -7955,9 +7720,17 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
}
if (part_iter->part_nums.start >= part_iter->part_nums.end &&
!part_iter->ret_null_part)
- DBUG_RETURN(0); /* No partitions */
+ goto not_found;
}
DBUG_RETURN(1); /* Ok, iterator initialized */
+
+not_found:
+ if (part_info->has_default_partititon())
+ {
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= TRUE;
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0); /* No partitions */
}
@@ -8021,6 +7794,8 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
(void)max_len;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
+ part_iter->ret_default_part= part_iter->ret_default_part_orig= FALSE;
+
if (is_subpart)
{
field= part_info->subpart_field_array[0];
@@ -8152,6 +7927,9 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
part_iter->ret_null_part= FALSE;
return 0; /* NULL always in first range partition */
}
+ // we do not have default partition in RANGE partitioning
+ DBUG_ASSERT(!part_iter->ret_default_part);
+
part_iter->part_nums.cur= part_iter->part_nums.start;
part_iter->ret_null_part= part_iter->ret_null_part_orig;
return NOT_A_PARTITION_ID;
@@ -8189,8 +7967,15 @@ uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
part_iter->ret_null_part= FALSE;
return part_iter->part_info->has_null_part_id;
}
+ if (part_iter->ret_default_part)
+ {
+ part_iter->ret_default_part= FALSE;
+ return part_iter->part_info->default_partition_id;
+ }
+ /* Reset partition for next read */
part_iter->part_nums.cur= part_iter->part_nums.start;
part_iter->ret_null_part= part_iter->ret_null_part_orig;
+ part_iter->ret_default_part= part_iter->ret_default_part_orig;
return NOT_A_PARTITION_ID;
}
else
@@ -8311,13 +8096,10 @@ int create_partition_name(char *out, size_t outlen, const char *in1,
end= strxnmov(out, outlen-1, in1, "#P#", transl_part, NullS);
else if (name_variant == TEMP_PART_NAME)
end= strxnmov(out, outlen-1, in1, "#P#", transl_part, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
- end= strxnmov(out, outlen-1, in1, "#P#", transl_part, "#REN#", NullS);
else
{
- DBUG_ASSERT(0);
- out[0]= 0;
- end= out + (outlen-1);
+ DBUG_ASSERT(name_variant == RENAMED_PART_NAME);
+ end= strxnmov(out, outlen-1, in1, "#P#", transl_part, "#REN#", NullS);
}
if (end - out == static_cast<ptrdiff_t>(outlen-1))
{
@@ -8358,14 +8140,11 @@ int create_subpartition_name(char *out, size_t outlen,
else if (name_variant == TEMP_PART_NAME)
end= strxnmov(out, outlen-1, in1, "#P#", transl_part_name,
"#SP#", transl_subpart_name, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
- end= strxnmov(out, outlen-1, in1, "#P#", transl_part_name,
- "#SP#", transl_subpart_name, "#REN#", NullS);
else
{
- DBUG_ASSERT(0);
- out[0]= 0;
- end= out + (outlen-1);
+ DBUG_ASSERT(name_variant == RENAMED_PART_NAME);
+ end= strxnmov(out, outlen-1, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#REN#", NullS);
}
if (end - out == static_cast<ptrdiff_t>(outlen-1))
{
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index 6629537b2ae..d1eb208cbd1 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -178,6 +178,10 @@ typedef struct st_partition_iter
iterator also produce id of the partition that contains NULL value.
*/
bool ret_null_part, ret_null_part_orig;
+ /*
+ We should return DEFAULT partition.
+ */
+ bool ret_default_part, ret_default_part_orig;
struct st_part_num_range
{
uint32 start;
@@ -263,12 +267,15 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
Alter_table_ctx *alter_ctx,
bool *partition_changed,
bool *fast_alter_table);
-char *generate_partition_syntax(partition_info *part_info,
- uint *buf_length, bool use_sql_alloc,
+char *generate_partition_syntax(THD *thd, partition_info *part_info,
+ uint *buf_length,
bool show_partition_options,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- const char *current_comment_start);
+ Alter_info *alter_info);
+char *generate_partition_syntax_for_frm(THD *thd, partition_info *part_info,
+ uint *buf_length,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info);
bool verify_data_with_partition(TABLE *table, TABLE *part_table,
uint32 part_id);
bool compare_partition_options(HA_CREATE_INFO *table_create_info,
@@ -285,10 +292,7 @@ int __attribute__((warn_unused_result))
create_subpartition_name(char *out, size_t outlen, const char *in1, const
char *in2, const char *in3, uint name_variant);
-void set_field_ptr(Field **ptr, const uchar *new_buf, const uchar *old_buf);
void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
const uchar *old_buf);
-extern const LEX_STRING partition_keywords[];
-
#endif /* SQL_PARTITION_INCLUDED */
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index d2fdee934ce..7b2a2c24eff 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -1,5 +1,6 @@
/* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2014, SkySQL Ab.
+ Copyright (c) 2016, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -88,8 +89,14 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
/* Not allowed with EXCHANGE PARTITION */
DBUG_ASSERT(!create_info.data_file_name && !create_info.index_file_name);
+ WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table);
DBUG_RETURN(exchange_partition(thd, first_table, &alter_info));
+#ifdef WITH_WSREP
+ wsrep_error_label:
+ /* handle errors in TO_ISOLATION here */
+ DBUG_RETURN(true);
+#endif /* WITH_WSREP */
}
@@ -370,17 +377,13 @@ static bool exchange_name_with_ddl_log(THD *thd,
*/
/* call rename table from table to tmp-name */
DBUG_EXECUTE_IF("exchange_partition_fail_3",
- my_error(ER_ERROR_ON_RENAME, MYF(0),
- name, tmp_name, 0, "n/a");
+ my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name, 0);
error_set= TRUE;
goto err_rename;);
DBUG_EXECUTE_IF("exchange_partition_abort_3", DBUG_SUICIDE(););
if (file->ha_rename_table(name, tmp_name))
{
- char errbuf[MYSYS_STRERROR_SIZE];
- my_strerror(errbuf, sizeof(errbuf), my_errno);
- my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name,
- my_errno, errbuf);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name, my_errno);
error_set= TRUE;
goto err_rename;
}
@@ -391,17 +394,13 @@ static bool exchange_name_with_ddl_log(THD *thd,
/* call rename table from partition to table */
DBUG_EXECUTE_IF("exchange_partition_fail_5",
- my_error(ER_ERROR_ON_RENAME, MYF(0),
- from_name, name, 0, "n/a");
+ my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name, 0);
error_set= TRUE;
goto err_rename;);
DBUG_EXECUTE_IF("exchange_partition_abort_5", DBUG_SUICIDE(););
if (file->ha_rename_table(from_name, name))
{
- char errbuf[MYSYS_STRERROR_SIZE];
- my_strerror(errbuf, sizeof(errbuf), my_errno);
- my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name,
- my_errno, errbuf);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name, my_errno);
error_set= TRUE;
goto err_rename;
}
@@ -412,17 +411,13 @@ static bool exchange_name_with_ddl_log(THD *thd,
/* call rename table from tmp-nam to partition */
DBUG_EXECUTE_IF("exchange_partition_fail_7",
- my_error(ER_ERROR_ON_RENAME, MYF(0),
- tmp_name, from_name, 0, "n/a");
+ my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name, 0);
error_set= TRUE;
goto err_rename;);
DBUG_EXECUTE_IF("exchange_partition_abort_7", DBUG_SUICIDE(););
if (file->ha_rename_table(tmp_name, from_name))
{
- char errbuf[MYSYS_STRERROR_SIZE];
- my_strerror(errbuf, sizeof(errbuf), my_errno);
- my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name,
- my_errno, errbuf);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name, my_errno);
error_set= TRUE;
goto err_rename;
}
@@ -532,24 +527,6 @@ bool Sql_cmd_alter_table_exchange_partition::
&alter_prelocking_strategy))
DBUG_RETURN(true);
-#ifdef WITH_WSREP
- if (WSREP_ON)
- {
- /* Forward declaration */
- TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl);
-
- if ((!thd->is_current_stmt_binlog_format_row() ||
- /* TODO: Do we really need to check for temp tables in this case? */
- !find_temporary_table(thd, table_list)) &&
- wsrep_to_isolation_begin(thd, table_list->db, table_list->table_name,
- NULL))
- {
- WSREP_WARN("ALTER TABLE EXCHANGE PARTITION isolation failure");
- DBUG_RETURN(TRUE);
- }
- }
-#endif /* WITH_WSREP */
-
part_table= table_list->table;
swap_table= swap_table_list->table;
@@ -785,11 +762,9 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
DBUG_RETURN(TRUE);
#ifdef WITH_WSREP
- /* Forward declaration */
- TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl);
-
- if (WSREP(thd) && (!thd->is_current_stmt_binlog_format_row() ||
- !find_temporary_table(thd, first_table)) &&
+ if (WSREP(thd) &&
+ (!thd->is_current_stmt_binlog_format_row() ||
+ !thd->find_temporary_table(first_table)) &&
wsrep_to_isolation_begin(
thd, first_table->db, first_table->table_name, NULL)
)
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 1000fc3711a..da83eae8f0a 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -268,6 +268,7 @@ struct st_bookmark
uint name_len;
int offset;
uint version;
+ bool loaded;
char key[1];
};
@@ -777,7 +778,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (global_system_variables.log_warnings > 2)
{
struct link_map *lm = (struct link_map*) plugin_dl.handle;
- sql_print_information("Loaded '%s' with offset 0x%lx", dl->str, lm->l_addr);
+ sql_print_information("Loaded '%s' with offset 0x%zx", dl->str, (size_t)lm->l_addr);
}
#endif
@@ -980,8 +981,8 @@ static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref rc,
*plugin= pi;
#endif
pi->ref_count++;
- DBUG_PRINT("lock",("thd: 0x%lx plugin: \"%s\" LOCK ref_count: %d",
- (long) current_thd, pi->name.str, pi->ref_count));
+ DBUG_PRINT("lock",("thd: %p plugin: \"%s\" LOCK ref_count: %d",
+ current_thd, pi->name.str, pi->ref_count));
if (lex)
insert_dynamic(&lex->plugins, (uchar*)&plugin);
@@ -1177,6 +1178,13 @@ err:
DBUG_RETURN(errs > 0 || oks + dupes == 0);
}
+static void plugin_variables_deinit(struct st_plugin_int *plugin)
+{
+
+ for (sys_var *var= plugin->system_vars; var; var= var->next)
+ (*var->test_load)= FALSE;
+ mysql_del_sys_var_chain(plugin->system_vars);
+}
static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check)
{
@@ -1228,8 +1236,7 @@ static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check)
if (ref_check && plugin->ref_count)
sql_print_error("Plugin '%s' has ref_count=%d after deinitialization.",
plugin->name.str, plugin->ref_count);
-
- mysql_del_sys_var_chain(plugin->system_vars);
+ plugin_variables_deinit(plugin);
}
static void plugin_del(struct st_plugin_int *plugin)
@@ -1334,8 +1341,8 @@ static void intern_plugin_unlock(LEX *lex, plugin_ref plugin)
DBUG_ASSERT(pi->ref_count);
pi->ref_count--;
- DBUG_PRINT("lock",("thd: 0x%lx plugin: \"%s\" UNLOCK ref_count: %d",
- (long) current_thd, pi->name.str, pi->ref_count));
+ DBUG_PRINT("lock",("thd: %p plugin: \"%s\" UNLOCK ref_count: %d",
+ current_thd, pi->name.str, pi->ref_count));
if (pi->state == PLUGIN_IS_DELETED && !pi->ref_count)
reap_needed= true;
@@ -1392,10 +1399,10 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin,
mysql_mutex_unlock(&LOCK_plugin);
- mysql_rwlock_wrlock(&LOCK_system_variables_hash);
+ mysql_prlock_wrlock(&LOCK_system_variables_hash);
if (test_plugin_options(tmp_root, plugin, argc, argv))
state= PLUGIN_IS_DISABLED;
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
if (options_only || state == PLUGIN_IS_DISABLED)
{
@@ -1448,7 +1455,7 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin,
err:
if (ret)
- mysql_del_sys_var_chain(plugin->system_vars);
+ plugin_variables_deinit(plugin);
mysql_mutex_lock(&LOCK_plugin);
plugin->state= state;
@@ -1541,19 +1548,23 @@ int plugin_init(int *argc, char **argv, int flags)
init_alloc_root(&plugin_vars_mem_root, 4096, 4096, MYF(0));
init_alloc_root(&tmp_root, 4096, 4096, MYF(0));
- if (my_hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
+ if (my_hash_init(&bookmark_hash, &my_charset_bin, 32, 0, 0,
get_bookmark_hash_key, NULL, HASH_UNIQUE))
goto err;
+ /*
+ The 80 is from 2016-04-27 when we had 71 default plugins
+ Big enough to avoid many mallocs even in future
+ */
if (my_init_dynamic_array(&plugin_dl_array,
sizeof(struct st_plugin_dl *), 16, 16, MYF(0)) ||
my_init_dynamic_array(&plugin_array,
- sizeof(struct st_plugin_int *), 16, 16, MYF(0)))
+ sizeof(struct st_plugin_int *), 80, 32, MYF(0)))
goto err;
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
{
- if (my_hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
+ if (my_hash_init(&plugin_hash[i], system_charset_info, 32, 0, 0,
get_plugin_hash_key, NULL, HASH_UNIQUE))
goto err;
}
@@ -1765,7 +1776,7 @@ static void plugin_load(MEM_ROOT *tmp_root)
TABLE *table;
READ_RECORD read_record_info;
int error;
- THD *new_thd= new THD;
+ THD *new_thd= new THD(0);
bool result;
DBUG_ENTER("plugin_load");
@@ -1796,7 +1807,8 @@ static void plugin_load(MEM_ROOT *tmp_root)
goto end;
}
- if (init_read_record(&read_record_info, new_thd, table, NULL, 1, 0, FALSE))
+ if (init_read_record(&read_record_info, new_thd, table, NULL, NULL, 1, 0,
+ FALSE))
{
sql_print_error("Could not initialize init_read_record; Plugins not "
"loaded");
@@ -2154,8 +2166,8 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name,
See also mysql_uninstall_plugin() and initialize_audit_plugin()
*/
-
- mysql_audit_acquire_plugins(thd, event_class_mask);
+ if (mysql_audit_general_enabled())
+ mysql_audit_acquire_plugins(thd, event_class_mask);
mysql_mutex_lock(&LOCK_plugin);
error= plugin_add(thd->mem_root, name, &dl, REPORT_TO_USER);
@@ -2300,7 +2312,8 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name,
See also mysql_install_plugin() and initialize_audit_plugin()
*/
- mysql_audit_acquire_plugins(thd, event_class_mask);
+ if (mysql_audit_general_enabled())
+ mysql_audit_acquire_plugins(thd, event_class_mask);
mysql_mutex_lock(&LOCK_plugin);
@@ -2785,20 +2798,22 @@ static void update_func_double(THD *thd, struct st_mysql_sys_var *var,
System Variables support
****************************************************************************/
-
-sys_var *find_sys_var(THD *thd, const char *str, uint length)
+sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length,
+ bool throw_error, bool locked)
{
sys_var *var;
sys_var_pluginvar *pi= NULL;
plugin_ref plugin;
- DBUG_ENTER("find_sys_var");
+ DBUG_ENTER("find_sys_var_ex");
+ DBUG_PRINT("enter", ("var '%.*s'", (int)length, str));
- mysql_mutex_lock(&LOCK_plugin);
- mysql_rwlock_rdlock(&LOCK_system_variables_hash);
+ if (!locked)
+ mysql_mutex_lock(&LOCK_plugin);
+ mysql_prlock_rdlock(&LOCK_system_variables_hash);
if ((var= intern_find_sys_var(str, length)) &&
(pi= var->cast_pluginvar()))
{
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
LEX *lex= thd ? thd->lex : 0;
if (!(plugin= intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin))))
var= NULL; /* failed to lock it, it must be uninstalling */
@@ -2811,15 +2826,21 @@ sys_var *find_sys_var(THD *thd, const char *str, uint length)
}
}
else
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
- mysql_mutex_unlock(&LOCK_plugin);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
+ if (!locked)
+ mysql_mutex_unlock(&LOCK_plugin);
- if (!var)
- my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str);
+ if (!throw_error && !var)
+ my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (int)length, (char*) str);
DBUG_RETURN(var);
}
+sys_var *find_sys_var(THD *thd, const char *str, size_t length)
+{
+ return find_sys_var_ex(thd, str, length, false, false);
+}
+
/*
called by register_var, construct_options and test_plugin_options.
Returns the 'bookmark' for the named variable.
@@ -3034,9 +3055,9 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
if (!thd->variables.dynamic_variables_ptr ||
(uint)offset > thd->variables.dynamic_variables_head)
{
- mysql_rwlock_rdlock(&LOCK_system_variables_hash);
+ mysql_prlock_rdlock(&LOCK_system_variables_hash);
sync_dynamic_session_variables(thd, global_lock);
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
}
DBUG_RETURN((uchar*)thd->variables.dynamic_variables_ptr + offset);
}
@@ -3151,7 +3172,7 @@ static void cleanup_variables(struct system_variables *vars)
st_bookmark *v;
uint idx;
- mysql_rwlock_rdlock(&LOCK_system_variables_hash);
+ mysql_prlock_rdlock(&LOCK_system_variables_hash);
for (idx= 0; idx < bookmark_hash.records; idx++)
{
v= (st_bookmark*) my_hash_element(&bookmark_hash, idx);
@@ -3170,7 +3191,7 @@ static void cleanup_variables(struct system_variables *vars)
*ptr= NULL;
}
}
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
DBUG_ASSERT(vars->table_plugin == NULL);
DBUG_ASSERT(vars->tmp_table_plugin == NULL);
@@ -3235,8 +3256,8 @@ static void plugin_vars_free_values(sys_var *vars)
{
/* Free the string from global_system_variables. */
char **valptr= (char**) piv->real_value_ptr(NULL, OPT_GLOBAL);
- DBUG_PRINT("plugin", ("freeing value for: '%s' addr: 0x%lx",
- var->name.str, (long) valptr));
+ DBUG_PRINT("plugin", ("freeing value for: '%s' addr: %p",
+ var->name.str, valptr));
my_free(*valptr);
*valptr= NULL;
}
@@ -3299,14 +3320,14 @@ uchar* sys_var_pluginvar::real_value_ptr(THD *thd, enum_var_type type)
{
switch (plugin_var->flags & PLUGIN_VAR_TYPEMASK) {
case PLUGIN_VAR_BOOL:
- thd->sys_var_tmp.my_bool_value= option.def_value;
+ thd->sys_var_tmp.my_bool_value= (my_bool)option.def_value;
return (uchar*) &thd->sys_var_tmp.my_bool_value;
case PLUGIN_VAR_INT:
- thd->sys_var_tmp.int_value= option.def_value;
+ thd->sys_var_tmp.int_value= (int)option.def_value;
return (uchar*) &thd->sys_var_tmp.int_value;
case PLUGIN_VAR_LONG:
case PLUGIN_VAR_ENUM:
- thd->sys_var_tmp.long_value= option.def_value;
+ thd->sys_var_tmp.long_value= (long)option.def_value;
return (uchar*) &thd->sys_var_tmp.long_value;
case PLUGIN_VAR_LONGLONG:
case PLUGIN_VAR_SET:
@@ -3924,6 +3945,14 @@ my_bool mark_changed(int, const struct my_option *opt, char *)
}
/**
+ It is always false to mark global plugin variable unloaded just to be
+ safe because we have no way now to know truth about them.
+
+ TODO: make correct mechanism for global plugin variables
+*/
+static bool static_unload= FALSE;
+
+/**
Create and register system variables supplied from the plugin and
assigns initial values from corresponding command line arguments.
@@ -4000,9 +4029,13 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
tmp_backup[tmp->nbackups++].save(&o->name);
if ((var= find_bookmark(tmp->name.str, o->name, o->flags)))
+ {
varname= var->key + 1;
+ var->loaded= TRUE;
+ }
else
{
+ var= NULL;
len= tmp->name.length + strlen(o->name) + 2;
varname= (char*) alloc_root(mem_root, len);
strxmov(varname, tmp->name.str, "-", o->name, NullS);
@@ -4010,6 +4043,9 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
convert_dash_to_underscore(varname, len-1);
}
v= new (mem_root) sys_var_pluginvar(&chain, varname, tmp, o);
+ v->test_load= (var ? &var->loaded : &static_unload);
+ DBUG_ASSERT(static_unload == FALSE);
+
if (!(o->flags & PLUGIN_VAR_NOCMDOPT))
{
// update app_type, used for I_S.SYSTEM_VARIABLES
@@ -4210,10 +4246,10 @@ int thd_key_create(MYSQL_THD_KEY_T *key)
PLUGIN_VAR_NOSYSVAR | PLUGIN_VAR_NOCMDOPT;
char namebuf[256];
snprintf(namebuf, sizeof(namebuf), "%u", thd_key_no++);
- mysql_rwlock_wrlock(&LOCK_system_variables_hash);
+ mysql_prlock_wrlock(&LOCK_system_variables_hash);
// non-letters in the name as an extra safety
st_bookmark *bookmark= register_var("\a\v\a\t\a\r", namebuf, flags);
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
if (bookmark)
{
*key= bookmark->offset;
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index d11c449962a..7b89246a9f9 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2005, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2012, Monty Program Ab
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -53,9 +53,9 @@ extern ulong dlopen_count;
/*
the following flags are valid for plugin_init()
*/
-#define PLUGIN_INIT_SKIP_DYNAMIC_LOADING 1
-#define PLUGIN_INIT_SKIP_PLUGIN_TABLE 2
-#define PLUGIN_INIT_SKIP_INITIALIZATION 4
+#define PLUGIN_INIT_SKIP_DYNAMIC_LOADING 1U
+#define PLUGIN_INIT_SKIP_PLUGIN_TABLE 2U
+#define PLUGIN_INIT_SKIP_INITIALIZATION 4U
#define INITIAL_LEX_PLUGIN_LIST_SIZE 16
@@ -71,12 +71,12 @@ typedef struct st_mysql_show_var SHOW_VAR;
It's a bitmap, because it makes it easier to test
"whether the state is one of those..."
*/
-#define PLUGIN_IS_FREED 1
-#define PLUGIN_IS_DELETED 2
-#define PLUGIN_IS_UNINITIALIZED 4
-#define PLUGIN_IS_READY 8
-#define PLUGIN_IS_DYING 16
-#define PLUGIN_IS_DISABLED 32
+#define PLUGIN_IS_FREED 1U
+#define PLUGIN_IS_DELETED 2U
+#define PLUGIN_IS_UNINITIALIZED 4U
+#define PLUGIN_IS_READY 8U
+#define PLUGIN_IS_DYING 16U
+#define PLUGIN_IS_DISABLED 32U
struct st_ptr_backup {
void **ptr;
@@ -120,6 +120,8 @@ struct st_plugin_int
};
+extern mysql_mutex_t LOCK_plugin;
+
/*
See intern_plugin_lock() for the explanation for the
conditionally defined plugin_ref type
@@ -193,6 +195,9 @@ extern void sync_dynamic_session_variables(THD* thd, bool global_lock);
extern bool plugin_dl_foreach(THD *thd, const LEX_STRING *dl,
plugin_foreach_func *func, void *arg);
+sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length,
+ bool throw_error, bool locked);
+
extern void sync_dynamic_session_variables(THD* thd, bool global_lock);
#endif
diff --git a/sql/sql_plugin_services.ic b/sql/sql_plugin_services.ic
index 427d8937c57..e7de45b5ee2 100644
--- a/sql/sql_plugin_services.ic
+++ b/sql/sql_plugin_services.ic
@@ -125,12 +125,12 @@ static struct thd_rnd_service_st thd_rnd_handler= {
};
static struct base64_service_st base64_handler= {
- base64_needed_encoded_length,
- base64_encode_max_arg_length,
- base64_needed_decoded_length,
- base64_decode_max_arg_length,
- base64_encode,
- base64_decode
+ my_base64_needed_encoded_length,
+ my_base64_encode_max_arg_length,
+ my_base64_needed_decoded_length,
+ my_base64_decode_max_arg_length,
+ my_base64_encode,
+ my_base64_decode
};
static struct thd_error_context_service_st thd_error_context_handler= {
@@ -178,10 +178,13 @@ static struct wsrep_service_st wsrep_handler = {
wsrep_thd_trx_seqno,
wsrep_thd_ws_handle,
wsrep_thd_auto_increment_variables,
+ wsrep_set_load_multi_commit,
+ wsrep_is_load_multi_commit,
wsrep_trx_is_aborting,
wsrep_trx_order_before,
wsrep_unlock_rollback,
- wsrep_set_data_home_dir
+ wsrep_set_data_home_dir,
+ wsrep_thd_is_applier
};
static struct thd_specifics_service_st thd_specifics_handler=
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 9de2f7f34a5..fa9fc6fb9f8 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2002, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2017, MariaDB
+ Copyright (c) 2008, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -102,6 +102,7 @@ When one supplies long data for a placeholder:
#include "sql_acl.h" // *_ACL
#include "sql_derived.h" // mysql_derived_prepare,
// mysql_handle_derived
+#include "sql_cte.h"
#include "sql_cursor.h"
#include "sql_show.h"
#include "sql_repl.h"
@@ -161,19 +162,27 @@ public:
Select_fetch_protocol_binary result;
Item_param **param_array;
Server_side_cursor *cursor;
+ uchar *packet;
+ uchar *packet_end;
uint param_count;
uint last_errno;
uint flags;
char last_error[MYSQL_ERRMSG_SIZE];
+ my_bool iterations;
+ my_bool start_param;
+ my_bool read_types;
#ifndef EMBEDDED_LIBRARY
bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end,
uchar *read_pos, String *expanded_query);
+ bool (*set_bulk_params)(Prepared_statement *st,
+ uchar **read_pos, uchar *data_end, bool reset);
#else
bool (*set_params_data)(Prepared_statement *st, String *expanded_query);
+ /*TODO: add bulk support for builtin server */
#endif
- bool (*set_params_from_vars)(Prepared_statement *stmt,
- List<LEX_STRING>& varnames,
- String *expanded_query);
+ bool (*set_params_from_actual_params)(Prepared_statement *stmt,
+ List<Item> &list,
+ String *expanded_query);
public:
Prepared_statement(THD *thd_arg);
virtual ~Prepared_statement();
@@ -189,20 +198,28 @@ public:
bool execute_loop(String *expanded_query,
bool open_cursor,
uchar *packet_arg, uchar *packet_end_arg);
+ bool execute_bulk_loop(String *expanded_query,
+ bool open_cursor,
+ uchar *packet_arg, uchar *packet_end_arg);
bool execute_server_runnable(Server_runnable *server_runnable);
+ my_bool set_bulk_parameters(bool reset);
+ bool bulk_iterations() { return iterations; };
/* Destroy this statement */
void deallocate();
+ bool execute_immediate(const char *query, uint query_length);
private:
/**
The memory root to allocate parsed tree elements (instances of Item,
SELECT_LEX and other classes).
*/
MEM_ROOT main_mem_root;
+ sql_mode_t m_sql_mode;
private:
bool set_db(const char *db, uint db_length);
bool set_parameters(String *expanded_query,
uchar *packet, uchar *packet_end);
bool execute(String *expanded_query, bool open_cursor);
+ void deallocate_immediate();
bool reprepare();
bool validate_metadata(Prepared_statement *copy);
void swap_prepared_statement(Prepared_statement *copy);
@@ -262,7 +279,7 @@ protected:
virtual bool send_ok(uint server_status, uint statement_warn_count,
ulonglong affected_rows, ulonglong last_insert_id,
- const char *message);
+ const char *message, bool skip_flush);
virtual bool send_eof(uint server_status, uint statement_warn_count);
virtual bool send_error(uint sql_errno, const char *err_msg, const char* sqlstate);
@@ -312,8 +329,14 @@ find_prepared_statement(THD *thd, ulong id)
To strictly separate namespaces of SQL prepared statements and C API
prepared statements find() will return 0 if there is a named prepared
statement with such id.
+
+ LAST_STMT_ID is special value which mean last prepared statement ID
+ (it was made for COM_MULTI to allow prepare and execute a statement
+ in the same command but usage is not limited by COM_MULTI only).
*/
- Statement *stmt= thd->stmt_map.find(id);
+ Statement *stmt= ((id == LAST_STMT_ID) ?
+ thd->last_stmt :
+ thd->stmt_map.find(id));
if (stmt == 0 || stmt->type() != Query_arena::PREPARED_STATEMENT)
return NULL;
@@ -404,7 +427,7 @@ static bool send_prep_stmt(Prepared_statement *stmt,
static ulong get_param_length(uchar **packet, ulong len)
{
- reg1 uchar *pos= *packet;
+ uchar *pos= *packet;
if (len < 1)
return 0;
if (*pos < 251)
@@ -706,55 +729,35 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
switch (param_type) {
case MYSQL_TYPE_TINY:
param->set_param_func= set_param_tiny;
- param->item_type= Item::INT_ITEM;
- param->item_result_type= INT_RESULT;
break;
case MYSQL_TYPE_SHORT:
param->set_param_func= set_param_short;
- param->item_type= Item::INT_ITEM;
- param->item_result_type= INT_RESULT;
break;
case MYSQL_TYPE_LONG:
param->set_param_func= set_param_int32;
- param->item_type= Item::INT_ITEM;
- param->item_result_type= INT_RESULT;
break;
case MYSQL_TYPE_LONGLONG:
param->set_param_func= set_param_int64;
- param->item_type= Item::INT_ITEM;
- param->item_result_type= INT_RESULT;
break;
case MYSQL_TYPE_FLOAT:
param->set_param_func= set_param_float;
- param->item_type= Item::REAL_ITEM;
- param->item_result_type= REAL_RESULT;
break;
case MYSQL_TYPE_DOUBLE:
param->set_param_func= set_param_double;
- param->item_type= Item::REAL_ITEM;
- param->item_result_type= REAL_RESULT;
break;
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL:
param->set_param_func= set_param_decimal;
- param->item_type= Item::DECIMAL_ITEM;
- param->item_result_type= DECIMAL_RESULT;
break;
case MYSQL_TYPE_TIME:
param->set_param_func= set_param_time;
- param->item_type= Item::STRING_ITEM;
- param->item_result_type= STRING_RESULT;
break;
case MYSQL_TYPE_DATE:
param->set_param_func= set_param_date;
- param->item_type= Item::STRING_ITEM;
- param->item_result_type= STRING_RESULT;
break;
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_TIMESTAMP:
param->set_param_func= set_param_datetime;
- param->item_type= Item::STRING_ITEM;
- param->item_result_type= STRING_RESULT;
break;
case MYSQL_TYPE_TINY_BLOB:
case MYSQL_TYPE_MEDIUM_BLOB:
@@ -766,8 +769,6 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
thd->variables.character_set_client;
DBUG_ASSERT(thd->variables.character_set_client);
param->value.cs_info.final_character_set_of_str_value= &my_charset_bin;
- param->item_type= Item::STRING_ITEM;
- param->item_result_type= STRING_RESULT;
break;
default:
/*
@@ -796,11 +797,9 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
Exact value of max_length is not known unless data is converted to
charset of connection, so we have to set it later.
*/
- param->item_type= Item::STRING_ITEM;
- param->item_result_type= STRING_RESULT;
}
}
- param->param_type= (enum enum_field_types) param_type;
+ param->set_handler_by_field_type((enum enum_field_types) param_type);
}
#ifndef EMBEDDED_LIBRARY
@@ -812,8 +811,10 @@ static void setup_one_conversion_function(THD *thd, Item_param *param,
*/
inline bool is_param_long_data_type(Item_param *param)
{
- return ((param->param_type >= MYSQL_TYPE_TINY_BLOB) &&
- (param->param_type <= MYSQL_TYPE_STRING));
+ enum_field_types field_type= param->field_type();
+ return (((field_type >= MYSQL_TYPE_TINY_BLOB) &&
+ (field_type <= MYSQL_TYPE_STRING)) ||
+ field_type == MYSQL_TYPE_VARCHAR);
}
@@ -868,7 +869,7 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array,
for (Item_param **it= begin; it < end; ++it)
{
Item_param *param= *it;
- if (param->state != Item_param::LONG_DATA_VALUE)
+ if (!param->has_long_data_value())
{
if (is_param_null(null_array, (uint) (it - begin)))
param->set_null();
@@ -877,13 +878,12 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array,
if (read_pos >= data_end)
DBUG_RETURN(1);
param->set_param_func(param, &read_pos, (uint) (data_end - read_pos));
- if (param->state == Item_param::NO_VALUE)
+ if (param->has_no_value())
DBUG_RETURN(1);
- if (param->limit_clause_param && param->state != Item_param::INT_VALUE)
+ if (param->limit_clause_param && !param->has_int_value())
{
param->set_int(param->val_int(), MY_INT64_NUM_DECIMAL_DIGITS);
- param->item_type= Item::INT_ITEM;
if (!param->unsigned_flag && param->value.integer < 0)
DBUG_RETURN(1);
}
@@ -923,7 +923,8 @@ static bool insert_params(Prepared_statement *stmt, uchar *null_array,
for (Item_param **it= begin; it < end; ++it)
{
Item_param *param= *it;
- if (param->state != Item_param::LONG_DATA_VALUE)
+ param->indicator= STMT_INDICATOR_NONE; // only for bulk parameters
+ if (!param->has_long_data_value())
{
if (is_param_null(null_array, (uint) (it - begin)))
param->set_null();
@@ -932,7 +933,7 @@ static bool insert_params(Prepared_statement *stmt, uchar *null_array,
if (read_pos >= data_end)
DBUG_RETURN(1);
param->set_param_func(param, &read_pos, (uint) (data_end - read_pos));
- if (param->state == Item_param::NO_VALUE)
+ if (param->has_no_value())
DBUG_RETURN(1);
}
}
@@ -946,41 +947,107 @@ static bool insert_params(Prepared_statement *stmt, uchar *null_array,
DBUG_RETURN(1);
if (param->convert_str_value(stmt->thd))
DBUG_RETURN(1); /* out of memory */
+ param->sync_clones();
+ }
+ DBUG_RETURN(0);
+}
+
+
+static bool insert_bulk_params(Prepared_statement *stmt,
+ uchar **read_pos, uchar *data_end,
+ bool reset)
+{
+ Item_param **begin= stmt->param_array;
+ Item_param **end= begin + stmt->param_count;
+
+ DBUG_ENTER("insert_params");
+
+ for (Item_param **it= begin; it < end; ++it)
+ {
+ Item_param *param= *it;
+ if (reset)
+ param->reset();
+ if (!param->has_long_data_value())
+ {
+ param->indicator= (enum_indicator_type) *((*read_pos)++);
+ if ((*read_pos) > data_end)
+ DBUG_RETURN(1);
+ switch (param->indicator)
+ {
+ case STMT_INDICATOR_NONE:
+ if ((*read_pos) >= data_end)
+ DBUG_RETURN(1);
+ param->set_param_func(param, read_pos, (uint) (data_end - (*read_pos)));
+ if (param->has_no_value())
+ DBUG_RETURN(1);
+ if (param->convert_str_value(stmt->thd))
+ DBUG_RETURN(1); /* out of memory */
+ break;
+ case STMT_INDICATOR_NULL:
+ param->set_null();
+ break;
+ case STMT_INDICATOR_DEFAULT:
+ param->set_default();
+ break;
+ case STMT_INDICATOR_IGNORE:
+ param->set_ignore();
+ break;
+ }
+ }
+ else
+ DBUG_RETURN(1); // long is not supported here
+ param->sync_clones();
}
DBUG_RETURN(0);
}
+static bool set_conversion_functions(Prepared_statement *stmt,
+ uchar **data, uchar *data_end)
+{
+ uchar *read_pos= *data;
+ const uint signed_bit= 1 << 15;
+ DBUG_ENTER("set_conversion_functions");
+ /*
+ First execute or types altered by the client, setup the
+ conversion routines for all parameters (one time)
+ */
+ Item_param **it= stmt->param_array;
+ Item_param **end= it + stmt->param_count;
+ THD *thd= stmt->thd;
+ for (; it < end; ++it)
+ {
+ ushort typecode;
+
+ if (read_pos >= data_end)
+ DBUG_RETURN(1);
+
+ typecode= sint2korr(read_pos);
+ read_pos+= 2;
+ (**it).unsigned_flag= MY_TEST(typecode & signed_bit);
+ setup_one_conversion_function(thd, *it, (uchar) (typecode & 0xff));
+ (*it)->sync_clones();
+ }
+ *data= read_pos;
+ DBUG_RETURN(0);
+}
+
static bool setup_conversion_functions(Prepared_statement *stmt,
- uchar **data, uchar *data_end)
+ uchar **data, uchar *data_end,
+ bool bulk_protocol= 0)
{
/* skip null bits */
- uchar *read_pos= *data + (stmt->param_count+7) / 8;
+ uchar *read_pos= *data;
+ if (!bulk_protocol)
+ read_pos+= (stmt->param_count+7) / 8;
DBUG_ENTER("setup_conversion_functions");
if (*read_pos++) //types supplied / first execute
{
- /*
- First execute or types altered by the client, setup the
- conversion routines for all parameters (one time)
- */
- Item_param **it= stmt->param_array;
- Item_param **end= it + stmt->param_count;
- THD *thd= stmt->thd;
- for (; it < end; ++it)
- {
- ushort typecode;
- const uint signed_bit= 1 << 15;
-
- if (read_pos >= data_end)
- DBUG_RETURN(1);
-
- typecode= sint2korr(read_pos);
- read_pos+= 2;
- (**it).unsigned_flag= MY_TEST(typecode & signed_bit);
- setup_one_conversion_function(thd, *it, (uchar) (typecode & ~signed_bit));
- }
+ *data= read_pos;
+ bool res= set_conversion_functions(stmt, data, data_end);
+ DBUG_RETURN(res);
}
*data= read_pos;
DBUG_RETURN(0);
@@ -988,6 +1055,8 @@ static bool setup_conversion_functions(Prepared_statement *stmt,
#else
+//TODO: support bulk parameters
+
/**
Embedded counterparts of parameter assignment routines.
@@ -1012,7 +1081,7 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query)
{
Item_param *param= *it;
setup_one_conversion_function(thd, param, client_param->buffer_type);
- if (param->state != Item_param::LONG_DATA_VALUE)
+ if (!param->has_long_data_value())
{
if (*client_param->is_null)
param->set_null();
@@ -1024,9 +1093,10 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query)
client_param->length ?
*client_param->length :
client_param->buffer_length);
- if (param->state == Item_param::NO_VALUE)
+ if (param->has_no_value())
DBUG_RETURN(1);
}
+ param->sync_clones();
}
if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */
@@ -1048,7 +1118,7 @@ static bool emb_insert_params_with_log(Prepared_statement *stmt, String *query)
{
Item_param *param= *it;
setup_one_conversion_function(thd, param, client_param->buffer_type);
- if (param->state != Item_param::LONG_DATA_VALUE)
+ if (!param->has_long_data_value())
{
if (*client_param->is_null)
param->set_null();
@@ -1060,7 +1130,7 @@ static bool emb_insert_params_with_log(Prepared_statement *stmt, String *query)
client_param->length ?
*client_param->length :
client_param->buffer_length);
- if (param->state == Item_param::NO_VALUE)
+ if (param->has_no_value())
DBUG_RETURN(1);
}
}
@@ -1069,6 +1139,7 @@ static bool emb_insert_params_with_log(Prepared_statement *stmt, String *query)
if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */
+ param->sync_clones();
}
if (acc.finalize())
DBUG_RETURN(1);
@@ -1124,7 +1195,11 @@ swap_parameter_array(Item_param **param_array_dst,
Item_param **end= param_array_dst + param_count;
for (; dst < end; ++src, ++dst)
+ {
(*dst)->set_param_type_and_swap_value(*src);
+ (*dst)->sync_clones();
+ (*src)->sync_clones();
+ }
}
@@ -1132,78 +1207,71 @@ swap_parameter_array(Item_param **param_array_dst,
Assign prepared statement parameters from user variables.
@param stmt Statement
- @param varnames List of variables. Caller must ensure that number
- of variables in the list is equal to number of statement
+ @param params A list of parameters. Caller must ensure that number
+ of parameters in the list is equal to number of statement
parameters
@param query Ignored
*/
-static bool insert_params_from_vars(Prepared_statement *stmt,
- List<LEX_STRING>& varnames,
- String *query __attribute__((unused)))
+static bool
+insert_params_from_actual_params(Prepared_statement *stmt,
+ List<Item> &params,
+ String *query __attribute__((unused)))
{
Item_param **begin= stmt->param_array;
Item_param **end= begin + stmt->param_count;
- user_var_entry *entry;
- LEX_STRING *varname;
- List_iterator<LEX_STRING> var_it(varnames);
- DBUG_ENTER("insert_params_from_vars");
+ List_iterator<Item> param_it(params);
+ DBUG_ENTER("insert_params_from_actual_params");
for (Item_param **it= begin; it < end; ++it)
{
Item_param *param= *it;
- varname= var_it++;
- entry= (user_var_entry*)my_hash_search(&stmt->thd->user_vars,
- (uchar*) varname->str,
- varname->length);
- if (param->set_from_user_var(stmt->thd, entry) ||
+ Item *ps_param= param_it++;
+ if (ps_param->save_in_param(stmt->thd, param) ||
param->convert_str_value(stmt->thd))
DBUG_RETURN(1);
+ param->sync_clones();
}
DBUG_RETURN(0);
}
/**
- Do the same as insert_params_from_vars but also construct query text for
- binary log.
+ Do the same as insert_params_from_actual_params
+ but also construct query text for binary log.
@param stmt Prepared statement
- @param varnames List of variables. Caller must ensure that number of
- variables in the list is equal to number of statement
+ @param params A list of parameters. Caller must ensure that number of
+ parameters in the list is equal to number of statement
parameters
@param query The query with parameter markers replaced with corresponding
user variables that were used to execute the query.
*/
-static bool insert_params_from_vars_with_log(Prepared_statement *stmt,
- List<LEX_STRING>& varnames,
- String *query)
+static bool
+insert_params_from_actual_params_with_log(Prepared_statement *stmt,
+ List<Item> &params,
+ String *query)
{
Item_param **begin= stmt->param_array;
Item_param **end= begin + stmt->param_count;
- user_var_entry *entry;
- LEX_STRING *varname;
- List_iterator<LEX_STRING> var_it(varnames);
+ List_iterator<Item> param_it(params);
THD *thd= stmt->thd;
Copy_query_with_rewrite acc(thd, stmt->query(), stmt->query_length(), query);
- DBUG_ENTER("insert_params_from_vars_with_log");
+ DBUG_ENTER("insert_params_from_actual_params_with_log");
for (Item_param **it= begin; it < end; ++it)
{
Item_param *param= *it;
- varname= var_it++;
-
- entry= (user_var_entry *) my_hash_search(&thd->user_vars, (uchar*)
- varname->str, varname->length);
+ Item *ps_param= param_it++;
/*
We have to call the setup_one_conversion_function() here to set
the parameter's members that might be needed further
(e.g. value.cs_info.character_set_client is used in the query_val_str()).
*/
- setup_one_conversion_function(thd, param, param->param_type);
- if (param->set_from_user_var(thd, entry))
+ setup_one_conversion_function(thd, param, param->field_type());
+ if (ps_param->save_in_param(thd, param))
DBUG_RETURN(1);
if (acc.append(param))
@@ -1211,6 +1279,8 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt,
if (param->convert_str_value(thd))
DBUG_RETURN(1);
+
+ param->sync_clones();
}
if (acc.finalize())
DBUG_RETURN(1);
@@ -1250,7 +1320,7 @@ static bool mysql_test_insert(Prepared_statement *stmt,
*/
if (table_list->lock_type != TL_WRITE_DELAYED)
{
- if (open_temporary_tables(thd, table_list))
+ if (thd->open_temporary_tables(table_list))
goto error;
}
@@ -1307,7 +1377,8 @@ static bool mysql_test_insert(Prepared_statement *stmt,
my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter);
goto error;
}
- if (setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(),
+ *values, MARK_COLUMNS_NONE, 0, NULL, 0))
goto error;
}
}
@@ -1397,7 +1468,8 @@ static int mysql_test_update(Prepared_statement *stmt,
table_list->register_want_access(want_privilege);
#endif
thd->lex->select_lex.no_wrap_view_item= TRUE;
- res= setup_fields(thd, 0, select->item_list, MARK_COLUMNS_READ, 0, NULL, 0);
+ res= setup_fields(thd, Ref_ptr_array(),
+ select->item_list, MARK_COLUMNS_READ, 0, NULL, 0);
thd->lex->select_lex.no_wrap_view_item= FALSE;
if (res)
goto error;
@@ -1408,8 +1480,8 @@ static int mysql_test_update(Prepared_statement *stmt,
(SELECT_ACL & ~table_list->table->grant.privilege);
table_list->register_want_access(SELECT_ACL);
#endif
- if (setup_fields(thd, 0, stmt->lex->value_list, MARK_COLUMNS_NONE, 0, NULL,
- 0) ||
+ if (setup_fields(thd, Ref_ptr_array(),
+ stmt->lex->value_list, MARK_COLUMNS_NONE, 0, NULL, 0) ||
check_unique_table(thd, table_list))
goto error;
/* TODO: here we should send types of placeholders to the client. */
@@ -1455,7 +1527,7 @@ static bool mysql_test_delete(Prepared_statement *stmt,
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
goto error;
}
- if (!table_list->table || !table_list->table->created)
+ if (!table_list->table || !table_list->table->is_created())
{
my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
table_list->view_db.str, table_list->view_name.str);
@@ -1515,7 +1587,7 @@ static int mysql_test_select(Prepared_statement *stmt,
}
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
goto error;
thd->lex->used_tables= 0; // Updated by setup_fields
@@ -1577,9 +1649,10 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
DBUG_RETURN(TRUE);
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
- DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, NULL, 0));
+ DBUG_RETURN(setup_fields(thd, Ref_ptr_array(),
+ *values, MARK_COLUMNS_NONE, 0, NULL, 0));
}
@@ -1608,7 +1681,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
goto error;
while ((var= it++))
@@ -1645,7 +1718,8 @@ static bool mysql_test_call_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
- open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL, DT_PREPARE))
+ open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
+ DT_INIT | DT_PREPARE))
goto err;
while ((item= it++))
@@ -1772,7 +1846,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
select_lex->context.resolve_in_select_list= TRUE;
@@ -1793,7 +1867,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
*/
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
DBUG_RETURN(TRUE);
}
@@ -2026,14 +2100,14 @@ static bool mysql_test_create_view(Prepared_statement *stmt)
Since we can't pre-open temporary tables for SQLCOM_CREATE_VIEW,
(see mysql_create_view) we have to do it here instead.
*/
- if (open_temporary_tables(thd, tables))
+ if (thd->open_temporary_tables(tables))
goto err;
+ lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
goto err;
- lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
res= select_like_stmt_test(stmt, 0, 0);
err:
@@ -2215,10 +2289,8 @@ static int mysql_test_handler_read(Prepared_statement *stmt,
if (!stmt->is_sql_prepare())
{
if (!lex->result && !(lex->result= new (stmt->mem_root) select_send(thd)))
- {
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(select_send));
DBUG_RETURN(1);
- }
+
if (send_prep_stmt(stmt, ha_table->fields.elements) ||
lex->result->send_result_set_metadata(ha_table->fields, Protocol::SEND_EOF) ||
thd->protocol->flush())
@@ -2269,6 +2341,9 @@ static bool check_prepared_statement(Prepared_statement *stmt)
if (tables)
thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
+ if (check_dependencies_in_with_clauses(thd->lex->with_clauses_list))
+ goto error;
+
if (sql_command_flags[sql_command] & CF_HA_CLOSE)
mysql_ha_rm_tables(thd, tables);
@@ -2278,7 +2353,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
*/
if (sql_command_flags[sql_command] & CF_PREOPEN_TMP_TABLES)
{
- if (open_temporary_tables(thd, tables))
+ if (thd->open_temporary_tables(tables))
goto error;
}
@@ -2455,6 +2530,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
case SQLCOM_ALTER_DB_UPGRADE:
case SQLCOM_CHECKSUM:
case SQLCOM_CREATE_USER:
+ case SQLCOM_ALTER_USER:
case SQLCOM_RENAME_USER:
case SQLCOM_DROP_USER:
case SQLCOM_CREATE_ROLE:
@@ -2603,7 +2679,10 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
{
/* Statement map deletes statement on erase */
thd->stmt_map.erase(stmt);
+ thd->clear_last_stmt();
}
+ else
+ thd->set_last_stmt(stmt);
thd->protocol= save_protocol;
@@ -2616,95 +2695,99 @@ end:
}
/**
- Get an SQL statement text from a user variable or from plain text.
+ Get an SQL statement from an item in lex->prepared_stmt_code.
- If the statement is plain text, just assign the
- pointers, otherwise allocate memory in thd->mem_root and copy
- the contents of the variable, possibly with character
- set conversion.
+ This function can return pointers to very different memory classes:
+ - a static string "NULL", if the item returned NULL
+ - the result of prepare_stmt_code->val_str(), if no conversion was needed
+ - a thd->mem_root allocated string with the result of
+ prepare_stmt_code->val_str() converted to @@collation_connection,
+ if conversion was needed
- @param[in] lex main lex
- @param[out] query_len length of the SQL statement (is set only
- in case of success)
+ The caller must dispose the result before the life cycle of "buffer" ends.
+ As soon as buffer's destructor is called, the value is not valid any more!
- @retval
- non-zero success
- @retval
- 0 in case of error (out of memory)
+ mysql_sql_stmt_prepare() and mysql_sql_stmt_execute_immediate()
+ call get_dynamic_sql_string() and then call respectively
+ Prepare_statement::prepare() and Prepare_statment::execute_immediate(),
+ who store the returned result into its permanent location using
+ alloc_query(). "buffer" is still not destructed at that time.
+
+ @param[out] dst the result is stored here
+ @param[inout] buffer
+
+ @retval false on success
+ @retval true on error (out of memory)
*/
-static const char *get_dynamic_sql_string(LEX *lex, uint *query_len)
+bool LEX::get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer)
{
- THD *thd= lex->thd;
- char *query_str= 0;
+ if (prepared_stmt_code->fix_fields(thd, NULL) ||
+ prepared_stmt_code->check_cols(1))
+ return true;
- if (lex->prepared_stmt_code_is_varref)
+ const String *str= prepared_stmt_code->val_str(buffer);
+ if (prepared_stmt_code->null_value)
{
- /* This is PREPARE stmt FROM or EXECUTE IMMEDIATE @var. */
- String str;
- CHARSET_INFO *to_cs= thd->variables.collation_connection;
- bool needs_conversion;
- user_var_entry *entry;
- String *var_value= &str;
- uint32 unused, len;
/*
- Convert @var contents to string in connection character set. Although
- it is known that int/real/NULL value cannot be a valid query we still
- convert it for error messages to be uniform.
+ Prepare source was NULL, so we need to set "str" to
+ something reasonable to get a readable error message during parsing
*/
- if ((entry=
- (user_var_entry*)my_hash_search(&thd->user_vars,
- (uchar*)lex->prepared_stmt_code.str,
- lex->prepared_stmt_code.length))
- && entry->value)
- {
- bool is_var_null;
- var_value= entry->val_str(&is_var_null, &str, NOT_FIXED_DEC);
- /*
- NULL value of variable checked early as entry->value so here
- we can't get NULL in normal conditions
- */
- DBUG_ASSERT(!is_var_null);
- if (!var_value)
- goto end;
- }
- else
- {
- /*
- variable absent or equal to NULL, so we need to set variable to
- something reasonable to get a readable error message during parsing
- */
- str.set(STRING_WITH_LEN("NULL"), &my_charset_latin1);
- }
-
- needs_conversion= String::needs_conversion(var_value->length(),
- var_value->charset(), to_cs,
- &unused);
+ dst->str= "NULL";
+ dst->length= 4;
+ return false;
+ }
- len= (needs_conversion ? var_value->length() * to_cs->mbmaxlen :
- var_value->length());
- if (!(query_str= (char*) alloc_root(thd->mem_root, len+1)))
- goto end;
+ /*
+ Character set conversion notes:
+
+ 1) When PREPARE or EXECUTE IMMEDIATE are used with string literals:
+ PREPARE stmt FROM 'SELECT ''str''';
+ EXECUTE IMMEDIATE 'SELECT ''str''';
+ it's very unlikely that any conversion will happen below, because
+ @@character_set_client and @@collation_connection are normally
+ set to the same CHARSET_INFO pointer.
+
+ In tricky environments when @@collation_connection is set to something
+ different from @@character_set_client, double conversion may happen:
+ - When the parser scans the string literal
+ (sql_yacc.yy rules "prepare_src" -> "expr" -> ... -> "text_literal")
+ it will convert 'str' from @@character_set_client to
+ @@collation_connection.
+ - Then in the code below will convert 'str' from @@collation_connection
+ back to @@character_set_client.
+
+ 2) When PREPARE or EXECUTE IMMEDIATE is used with a user variable,
+ it should work about the same way, because user variables are usually
+ assigned like this:
+ SET @str='str';
+ and thus have the same character set with string literals.
+
+ 3) When PREPARE or EXECUTE IMMEDIATE is used with some
+ more complex expression, conversion will depend on this expression.
+ For example, a concatenation of string literals:
+ EXECUTE IMMEDIATE 'SELECT * FROM'||'t1';
+ should work the same way with just a single literal,
+ so no conversion normally.
+ */
+ CHARSET_INFO *to_cs= thd->variables.character_set_client;
- if (needs_conversion)
+ uint32 unused;
+ if (String::needs_conversion(str->length(), str->charset(), to_cs, &unused))
+ {
+ if (!(dst->str= sql_strmake_with_convert(thd, str->ptr(), str->length(),
+ str->charset(), UINT_MAX32,
+ to_cs, &dst->length)))
{
- uint dummy_errors;
- len= copy_and_convert(query_str, len, to_cs, var_value->ptr(),
- var_value->length(), var_value->charset(),
- &dummy_errors);
+ dst->length= 0;
+ return true;
}
- else
- memcpy(query_str, var_value->ptr(), var_value->length());
- query_str[len]= '\0'; // Safety (mostly for debug)
- *query_len= len;
- }
- else
- {
- query_str= lex->prepared_stmt_code.str;
- *query_len= lex->prepared_stmt_code.length;
+ DBUG_ASSERT(dst->length <= UINT_MAX32);
+ return false;
}
-end:
- return query_str;
+ dst->str= str->ptr();
+ dst->length= str->length();
+ return false;
}
@@ -2727,8 +2810,7 @@ void mysql_sql_stmt_prepare(THD *thd)
LEX *lex= thd->lex;
LEX_STRING *name= &lex->prepared_stmt_name;
Prepared_statement *stmt;
- const char *query;
- uint query_len= 0;
+ LEX_CSTRING query;
DBUG_ENTER("mysql_sql_stmt_prepare");
if ((stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name)))
@@ -2746,7 +2828,12 @@ void mysql_sql_stmt_prepare(THD *thd)
stmt->deallocate();
}
- if (! (query= get_dynamic_sql_string(lex, &query_len)) ||
+ /*
+ It's important for "buffer" not to be destructed before stmt->prepare()!
+ See comments in get_dynamic_sql_string().
+ */
+ StringBuffer<256> buffer;
+ if (lex->get_dynamic_sql_string(&query, &buffer) ||
! (stmt= new Prepared_statement(thd)))
{
DBUG_VOID_RETURN; /* out of memory */
@@ -2767,35 +2854,98 @@ void mysql_sql_stmt_prepare(THD *thd)
DBUG_VOID_RETURN;
}
-#if MYSQL_VERSION_ID < 100200
/*
- Backpoiting MDEV-14603 from 10.2 to 10.1
- Remove the code between #if..#endif when merging.
+ Make sure we call Prepared_statement::prepare() with an empty
+ THD::change_list. It can be non-empty as LEX::get_dynamic_sql_string()
+ calls fix_fields() for the Item containing the PS source,
+ e.g. on character set conversion:
+
+ SET NAMES utf8;
+ DELIMITER $$
+ CREATE PROCEDURE p1()
+ BEGIN
+ PREPARE stmt FROM CONCAT('SELECT ',CONVERT(RAND() USING latin1));
+ EXECUTE stmt;
+ END;
+ $$
+ DELIMITER ;
+ CALL p1();
*/
- Item_change_list change_list_save_point;
- thd->change_list.move_elements_to(&change_list_save_point);
-#endif
+ Item_change_list_savepoint change_list_savepoint(thd);
- if (stmt->prepare(query, query_len))
+ if (stmt->prepare(query.str, (uint) query.length))
{
/* Statement map deletes the statement on erase */
thd->stmt_map.erase(stmt);
}
else
+ {
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
my_ok(thd, 0L, 0L, "Statement prepared");
+ }
+ change_list_savepoint.rollback(thd);
+
+ DBUG_VOID_RETURN;
+}
+
+
+void mysql_sql_stmt_execute_immediate(THD *thd)
+{
+ LEX *lex= thd->lex;
+ Prepared_statement *stmt;
+ LEX_CSTRING query;
+ DBUG_ENTER("mysql_sql_stmt_execute_immediate");
+
+ if (lex->prepared_stmt_params_fix_fields(thd))
+ DBUG_VOID_RETURN;
-#if MYSQL_VERSION_ID < 100200
/*
- Backpoiting MDEV-14603 from 10.2 to 10.1
- Remove the code between #if..#endif when merging.
+ Prepared_statement is quite large,
+ let's allocate it on the heap rather than on the stack.
+
+ It's important for "buffer" not to be destructed
+ before stmt->execute_immediate().
+ See comments in get_dynamic_sql_string().
*/
- thd->rollback_item_tree_changes();
- change_list_save_point.move_elements_to(&thd->change_list);
-#endif
+ StringBuffer<256> buffer;
+ if (lex->get_dynamic_sql_string(&query, &buffer) ||
+ !(stmt= new Prepared_statement(thd)))
+ DBUG_VOID_RETURN; // out of memory
+ // See comments on thd->free_list in mysql_sql_stmt_execute()
+ Item *free_list_backup= thd->free_list;
+ thd->free_list= NULL;
+ /*
+ Make sure we call Prepared_statement::execute_immediate()
+ with an empty THD::change_list. It can be non empty as the above
+ LEX::prepared_stmt_params_fix_fields() and LEX::get_dynamic_str_string()
+ call fix_fields() for the PS source and PS parameter Items and
+ can do Item tree changes, e.g. on character set conversion:
+
+ - Example #1: Item tree changes in get_dynamic_str_string()
+ SET NAMES utf8;
+ CREATE PROCEDURE p1()
+ EXECUTE IMMEDIATE CONCAT('SELECT ',CONVERT(RAND() USING latin1));
+ CALL p1();
+
+ - Example #2: Item tree changes in prepared_stmt_param_fix_fields():
+ SET NAMES utf8;
+ CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8)
+ EXECUTE IMMEDIATE 'SELECT ?' USING CONCAT(a, CONVERT(RAND() USING latin1));
+ CALL p1('x');
+ */
+ Item_change_list_savepoint change_list_savepoint(thd);
+ (void) stmt->execute_immediate(query.str, (uint) query.length);
+ change_list_savepoint.rollback(thd);
+ thd->free_items();
+ thd->free_list= free_list_backup;
+
+ stmt->lex->restore_set_statement_var();
+ delete stmt;
DBUG_VOID_RETURN;
}
+
/**
Reinit prepared statement/stored procedure before execution.
@@ -2809,6 +2959,7 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
{
SELECT_LEX *sl= lex->all_selects_list;
DBUG_ENTER("reinit_stmt_before_use");
+ Window_spec *win_spec;
/*
We have to update "thd" pointer in LEX, all its units and in LEX::result,
@@ -2881,8 +3032,16 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
}
/* Fix ORDER list */
for (order= sl->order_list.first; order; order= order->next)
- {
order->item= &order->item_ptr;
+ /* Fix window functions too */
+ List_iterator<Window_spec> it(sl->window_specs);
+
+ while ((win_spec= it++))
+ {
+ for (order= win_spec->partition_list->first; order; order= order->next)
+ order->item= &order->item_ptr;
+ for (order= win_spec->order_list->first; order; order= order->next)
+ order->item= &order->item_ptr;
}
}
if (sl->changed_elements & TOUCHED_SEL_DERIVED)
@@ -2968,6 +3127,14 @@ static void reset_stmt_params(Prepared_statement *stmt)
}
+static void mysql_stmt_execute_common(THD *thd,
+ ulong stmt_id,
+ uchar *packet,
+ uchar *packet_end,
+ ulong cursor_flags,
+ bool iteration,
+ bool types);
+
/**
COM_STMT_EXECUTE handler: execute a previously prepared statement.
@@ -2990,15 +3157,91 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround
ulong stmt_id= uint4korr(packet);
ulong flags= (ulong) packet[4];
+ uchar *packet_end= packet + packet_length;
+ DBUG_ENTER("mysqld_stmt_execute");
+
+ packet+= 9; /* stmt_id + 5 bytes of flags */
+
+ mysql_stmt_execute_common(thd, stmt_id, packet, packet_end, flags, FALSE,
+ FALSE);
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ COM_STMT_BULK_EXECUTE handler: execute a previously prepared statement.
+
+ If there are any parameters, then replace parameter markers with the
+ data supplied from the client, and then execute the statement.
+ This function uses binary protocol to send a possible result set
+ to the client.
+
+ @param thd current thread
+ @param packet_arg parameter types and data, if any
+ @param packet_length packet length, including the terminator character.
+
+ @return
+ none: in case of success OK packet or a result set is sent to the
+ client, otherwise an error message is set in THD.
+*/
+
+void mysqld_stmt_bulk_execute(THD *thd, char *packet_arg, uint packet_length)
+{
+ uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround
+ ulong stmt_id= uint4korr(packet);
+ uint flags= (uint) uint2korr(packet + 4);
+ uchar *packet_end= packet + packet_length;
+ DBUG_ENTER("mysqld_stmt_execute_bulk");
+
+ if (!(thd->client_capabilities &
+ MARIADB_CLIENT_STMT_BULK_OPERATIONS))
+ {
+ DBUG_PRINT("error",
+ ("An attempt to execute bulk operation without support"));
+ my_error(ER_UNSUPPORTED_PS, MYF(0));
+ }
+ /* Check for implemented parameters */
+ if (flags & (~STMT_BULK_FLAG_CLIENT_SEND_TYPES))
+ {
+ DBUG_PRINT("error", ("unsupported bulk execute flags %x", flags));
+ my_error(ER_UNSUPPORTED_PS, MYF(0));
+ }
+
+ /* stmt id and two bytes of flags */
+ packet+= 4 + 2;
+ mysql_stmt_execute_common(thd, stmt_id, packet, packet_end, 0, TRUE,
+ (flags & STMT_BULK_FLAG_CLIENT_SEND_TYPES));
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Common part of prepared statement execution
+
+ @param thd THD handle
+ @param stmt_id id of the prepared statement
+ @param paket packet with parameters to bind
+ @param packet_end pointer to the byte after parameters end
+ @param cursor_flags cursor flags
+ @param bulk_op id it bulk operation
+ @param read_types flag say that types muast been read
+*/
+
+static void mysql_stmt_execute_common(THD *thd,
+ ulong stmt_id,
+ uchar *packet,
+ uchar *packet_end,
+ ulong cursor_flags,
+ bool bulk_op,
+ bool read_types)
+{
/* Query text for binary, general or slow log, if any of them is open */
String expanded_query;
- uchar *packet_end= packet + packet_length;
Prepared_statement *stmt;
Protocol *save_protocol= thd->protocol;
bool open_cursor;
- DBUG_ENTER("mysqld_stmt_execute");
-
- packet+= 9; /* stmt_id + 5 bytes of flags */
+ DBUG_ENTER("mysqld_stmt_execute_common");
+ DBUG_ASSERT((!read_types) || (read_types && bulk_op));
/* First of all clear possible warnings from the previous command */
thd->reset_for_next_command();
@@ -3010,24 +3253,28 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
llstr(stmt_id, llbuf), "mysqld_stmt_execute");
DBUG_VOID_RETURN;
}
+ stmt->read_types= read_types;
#if defined(ENABLED_PROFILING)
thd->profiling.set_query_source(stmt->query(), stmt->query_length());
#endif
DBUG_PRINT("exec_query", ("%s", stmt->query()));
- DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
+ DBUG_PRINT("info",("stmt: %p bulk_op %d", stmt, bulk_op));
- open_cursor= MY_TEST(flags & (ulong) CURSOR_TYPE_READ_ONLY);
+ open_cursor= MY_TEST(cursor_flags & (ulong) CURSOR_TYPE_READ_ONLY);
thd->protocol= &thd->protocol_binary;
- stmt->execute_loop(&expanded_query, open_cursor, packet, packet_end);
+ if (!bulk_op)
+ stmt->execute_loop(&expanded_query, open_cursor, packet, packet_end);
+ else
+ stmt->execute_bulk_loop(&expanded_query, open_cursor, packet, packet_end);
thd->protocol= save_protocol;
sp_cache_enforce_limit(thd->sp_proc_cache, stored_program_cache_size);
sp_cache_enforce_limit(thd->sp_func_cache, stored_program_cache_size);
/* Close connection socket; for use with client testing (Bug#43560). */
- DBUG_EXECUTE_IF("close_conn_after_stmt_execute", vio_close(thd->net.vio););
+ DBUG_EXECUTE_IF("close_conn_after_stmt_execute", vio_shutdown(thd->net.vio,SHUT_RD););
DBUG_VOID_RETURN;
}
@@ -3073,11 +3320,19 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN;
}
- DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
+ DBUG_PRINT("info",("stmt: %p", stmt));
+
+ if (lex->prepared_stmt_params_fix_fields(thd))
+ DBUG_VOID_RETURN;
/*
- thd->free_list can already have some Items,
- e.g. for a query like this:
+ thd->free_list can already have some Items.
+
+ Example queries:
+ - SET STATEMENT var=expr FOR EXECUTE stmt;
+ - EXECUTE stmt USING expr;
+
+ E.g. for a query like this:
PREPARE stmt FROM 'INSERT INTO t1 VALUES (@@max_sort_length)';
SET STATEMENT max_sort_length=2048 FOR EXECUTE stmt;
thd->free_list contains a pointer to Item_int corresponding to 2048.
@@ -3091,33 +3346,33 @@ void mysql_sql_stmt_execute(THD *thd)
which calls Query_arena::free_items().
We hide "external" Items, e.g. those created while parsing the
- "SET STATEMENT" part of the query,
+ "SET STATEMENT" or "USING" parts of the query,
so they don't get freed in case of re-prepare.
See MDEV-10702 Crash in SET STATEMENT FOR EXECUTE
*/
Item *free_list_backup= thd->free_list;
thd->free_list= NULL; // Hide the external (e.g. "SET STATEMENT") Items
-
-#if MYSQL_VERSION_ID < 100200
/*
- Backpoiting MDEV-14603 from 10.2 to 10.1
- Remove the code between #if..#endif when merging.
+ Make sure we call Prepared_statement::execute_loop() with an empty
+ THD::change_list. It can be non-empty because the above
+ LEX::prepared_stmt_params_fix_fields() calls fix_fields() for
+ the PS parameter Items and can do some Item tree changes,
+ e.g. on character set conversion:
+
+ SET NAMES utf8;
+ DELIMITER $$
+ CREATE PROCEDURE p1(a VARCHAR(10) CHARACTER SET utf8)
+ BEGIN
+ PREPARE stmt FROM 'SELECT ?';
+ EXECUTE stmt USING CONCAT(a, CONVERT(RAND() USING latin1));
+ END;
+ $$
+ DELIMITER ;
+ CALL p1('x');
*/
- Item_change_list change_list_save_point;
- thd->change_list.move_elements_to(&change_list_save_point);
-#endif
-
+ Item_change_list_savepoint change_list_savepoint(thd);
(void) stmt->execute_loop(&expanded_query, FALSE, NULL, NULL);
-
-#if MYSQL_VERSION_ID < 100200
- /*
- Backpoiting MDEV-14603 from 10.2 to 10.1
- Remove the code between #if..#endif when merging.
- */
- thd->rollback_item_tree_changes();
- change_list_save_point.move_elements_to(&thd->change_list);
-#endif
-
+ change_list_savepoint.rollback(thd);
thd->free_items(); // Free items created by execute_loop()
/*
Now restore the "external" (e.g. "SET STATEMENT") Item list.
@@ -3265,6 +3520,9 @@ void mysqld_stmt_close(THD *thd, char *packet)
stmt->deallocate();
general_log_print(thd, thd->get_command(), NullS);
+ if (thd->last_stmt == stmt)
+ thd->clear_last_stmt();
+
DBUG_VOID_RETURN;
}
@@ -3296,6 +3554,7 @@ void mysql_sql_stmt_close(THD *thd)
else
{
stmt->deallocate();
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
my_ok(thd);
}
}
@@ -3527,14 +3786,21 @@ end:
Prepared_statement::Prepared_statement(THD *thd_arg)
:Statement(NULL, &main_mem_root,
- STMT_INITIALIZED, ++thd_arg->statement_id_counter),
+ STMT_INITIALIZED,
+ ((++thd_arg->statement_id_counter) & STMT_ID_MASK)),
thd(thd_arg),
result(thd_arg),
param_array(0),
cursor(0),
+ packet(0),
+ packet_end(0),
param_count(0),
last_errno(0),
- flags((uint) IS_IN_USE)
+ flags((uint) IS_IN_USE),
+ iterations(0),
+ start_param(0),
+ read_types(0),
+ m_sql_mode(thd->variables.sql_mode)
{
init_sql_alloc(&main_mem_root, thd_arg->variables.query_alloc_block_size,
thd_arg->variables.query_prealloc_size, MYF(MY_THREAD_SPECIFIC));
@@ -3567,19 +3833,23 @@ void Prepared_statement::setup_set_params()
if (replace_params_with_values)
{
- set_params_from_vars= insert_params_from_vars_with_log;
+ set_params_from_actual_params= insert_params_from_actual_params_with_log;
#ifndef EMBEDDED_LIBRARY
set_params= insert_params_with_log;
+ set_bulk_params= insert_bulk_params; // RBR is on for bulk operation
#else
+ //TODO: add bulk support for bulk parameters
set_params_data= emb_insert_params_with_log;
#endif
}
else
{
- set_params_from_vars= insert_params_from_vars;
+ set_params_from_actual_params= insert_params_from_actual_params;
#ifndef EMBEDDED_LIBRARY
set_params= insert_params;
+ set_bulk_params= insert_bulk_params;
#else
+ //TODO: add bulk support for bulk parameters
set_params_data= emb_insert_params;
#endif
}
@@ -3597,8 +3867,8 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement()
{
DBUG_ENTER("Prepared_statement::~Prepared_statement");
- DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx",
- (long) this, (long) cursor));
+ DBUG_PRINT("enter",("stmt: %p cursor: %p",
+ this, cursor));
delete cursor;
/*
We have to call free on the items even if cleanup is called as some items,
@@ -3625,7 +3895,7 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::cleanup_stmt()
{
DBUG_ENTER("Prepared_statement::cleanup_stmt");
- DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
+ DBUG_PRINT("enter",("stmt: %p", this));
thd->restore_set_statement_var();
thd->rollback_item_tree_changes();
cleanup_items(free_list);
@@ -3704,6 +3974,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
Statement stmt_backup;
Query_arena *old_stmt_arena;
DBUG_ENTER("Prepared_statement::prepare");
+ DBUG_ASSERT(m_sql_mode == thd->variables.sql_mode);
/*
If this is an SQLCOM_PREPARE, we also increase Com_prepare_sql.
However, it seems handy if com_stmt_prepare is increased always,
@@ -3770,7 +4041,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
If called from a stored procedure, ensure that we won't rollback
external changes when cleaning up after validation.
*/
- DBUG_ASSERT(thd->change_list.is_empty());
+ DBUG_ASSERT(thd->Item_change_list::is_empty());
/*
Marker used to release metadata locks acquired while the prepared
@@ -3885,8 +4156,8 @@ Prepared_statement::set_parameters(String *expanded_query,
if (is_sql_ps)
{
/* SQL prepared statement */
- res= set_params_from_vars(this, thd->lex->prepared_stmt_params,
- expanded_query);
+ res= set_params_from_actual_params(this, thd->lex->prepared_stmt_params,
+ expanded_query);
}
else if (param_count)
{
@@ -3935,6 +4206,7 @@ Prepared_statement::set_parameters(String *expanded_query,
@retval FALSE successfully executed the statement, perhaps
after having reprepared it a few times.
*/
+const static int MAX_REPREPARE_ATTEMPTS= 3;
bool
Prepared_statement::execute_loop(String *expanded_query,
@@ -3942,10 +4214,10 @@ Prepared_statement::execute_loop(String *expanded_query,
uchar *packet,
uchar *packet_end)
{
- const int MAX_REPREPARE_ATTEMPTS= 3;
Reprepare_observer reprepare_observer;
bool error;
int reprepare_attempt= 0;
+ iterations= FALSE;
/*
- In mysql_sql_stmt_execute() we hide all "external" Items
@@ -4002,8 +4274,9 @@ reexecute:
switch (thd->wsrep_conflict_state)
{
case CERT_FAILURE:
- WSREP_DEBUG("PS execute fail for CERT_FAILURE: thd: %ld err: %d",
- thd->thread_id, thd->get_stmt_da()->sql_errno() );
+ WSREP_DEBUG("PS execute fail for CERT_FAILURE: thd: %lld err: %d",
+ (longlong) thd->thread_id,
+ thd->get_stmt_da()->sql_errno() );
thd->wsrep_conflict_state = NO_CONFLICT;
break;
@@ -4036,6 +4309,204 @@ reexecute:
return error;
}
+my_bool bulk_parameters_set(THD *thd)
+{
+ DBUG_ENTER("bulk_parameters_set");
+ Prepared_statement *stmt= (Prepared_statement *) thd->bulk_param;
+
+ if (stmt && stmt->set_bulk_parameters(FALSE))
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+my_bool bulk_parameters_iterations(THD *thd)
+{
+ Prepared_statement *stmt= (Prepared_statement *) thd->bulk_param;
+ if (!stmt)
+ return FALSE;
+ return stmt->bulk_iterations();
+}
+
+
+my_bool Prepared_statement::set_bulk_parameters(bool reset)
+{
+ DBUG_ENTER("Prepared_statement::set_bulk_parameters");
+ DBUG_PRINT("info", ("iteration: %d", iterations));
+
+ if (iterations)
+ {
+#ifndef EMBEDDED_LIBRARY
+ if ((*set_bulk_params)(this, &packet, packet_end, reset))
+#else
+ // bulk parameters are not supported for embedded, so it will an error
+#endif
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0),
+ "mysqld_stmt_bulk_execute");
+ reset_stmt_params(this);
+ DBUG_RETURN(true);
+ }
+ if (packet >= packet_end)
+ iterations= FALSE;
+ }
+ start_param= 0;
+ DBUG_RETURN(false);
+}
+
+bool
+Prepared_statement::execute_bulk_loop(String *expanded_query,
+ bool open_cursor,
+ uchar *packet_arg,
+ uchar *packet_end_arg)
+{
+ Reprepare_observer reprepare_observer;
+ bool error= 0;
+ packet= packet_arg;
+ packet_end= packet_end_arg;
+ iterations= TRUE;
+ start_param= true;
+#ifndef DBUG_OFF
+ Item *free_list_state= thd->free_list;
+#endif
+ thd->set_bulk_execution((void *)this);
+ /* Check if we got an error when sending long data */
+ if (state == Query_arena::STMT_ERROR)
+ {
+ my_message(last_errno, last_error, MYF(0));
+ thd->set_bulk_execution(0);
+ return TRUE;
+ }
+ /* Check for non zero parameter count*/
+ if (param_count == 0)
+ {
+ DBUG_PRINT("error", ("Statement with no parameters for bulk execution."));
+ my_error(ER_UNSUPPORTED_PS, MYF(0));
+ thd->set_bulk_execution(0);
+ return TRUE;
+ }
+
+ if (!(sql_command_flags[lex->sql_command] & CF_SP_BULK_SAFE))
+ {
+ DBUG_PRINT("error", ("Command is not supported in bulk execution."));
+ my_error(ER_UNSUPPORTED_PS, MYF(0));
+ thd->set_bulk_execution(0);
+ return TRUE;
+ }
+
+#ifndef EMBEDDED_LIBRARY
+ if (read_types &&
+ set_conversion_functions(this, &packet, packet_end))
+#else
+ // bulk parameters are not supported for embedded, so it will an error
+#endif
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0),
+ "mysqld_stmt_bulk_execute");
+ reset_stmt_params(this);
+ thd->set_bulk_execution(0);
+ return true;
+ }
+ read_types= FALSE;
+
+#ifdef NOT_YET_FROM_MYSQL_5_6
+ if (unlikely(thd->security_ctx->password_expired &&
+ !lex->is_change_password))
+ {
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ thd->set_bulk_execution(0);
+ return true;
+ }
+#endif
+
+ // iterations changed by set_bulk_parameters
+ while ((iterations || start_param) && !error && !thd->is_error())
+ {
+ int reprepare_attempt= 0;
+
+ /*
+ Here we set parameters for not optimized commands,
+ optimized commands do it inside thier internal loop.
+ */
+ if (!(sql_command_flags[lex->sql_command] & CF_SP_BULK_OPTIMIZED))
+ {
+ if (set_bulk_parameters(TRUE))
+ {
+ thd->set_bulk_execution(0);
+ return true;
+ }
+ }
+
+reexecute:
+ /*
+ If the free_list is not empty, we'll wrongly free some externally
+ allocated items when cleaning up after validation of the prepared
+ statement.
+ */
+ DBUG_ASSERT(thd->free_list == free_list_state);
+
+ /*
+ Install the metadata observer. If some metadata version is
+ different from prepare time and an observer is installed,
+ the observer method will be invoked to push an error into
+ the error stack.
+ */
+
+ if (sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE)
+ {
+ reprepare_observer.reset_reprepare_observer();
+ DBUG_ASSERT(thd->m_reprepare_observer == NULL);
+ thd->m_reprepare_observer= &reprepare_observer;
+ }
+
+ error= execute(expanded_query, open_cursor) || thd->is_error();
+
+ thd->m_reprepare_observer= NULL;
+#ifdef WITH_WSREP
+
+ if (WSREP_ON)
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ switch (thd->wsrep_conflict_state)
+ {
+ case CERT_FAILURE:
+ WSREP_DEBUG("PS execute fail for CERT_FAILURE: thd: %lld err: %d",
+ (longlong) thd->thread_id,
+ thd->get_stmt_da()->sql_errno() );
+ thd->wsrep_conflict_state = NO_CONFLICT;
+ break;
+
+ case MUST_REPLAY:
+ (void) wsrep_replay_transaction(thd);
+ break;
+
+ default:
+ break;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ }
+#endif /* WITH_WSREP */
+
+ if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
+ error && !thd->is_fatal_error && !thd->killed &&
+ reprepare_observer.is_invalidated() &&
+ reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS)
+ {
+ DBUG_ASSERT(thd->get_stmt_da()->sql_errno() == ER_NEED_REPREPARE);
+ thd->clear_error();
+
+ error= reprepare();
+
+ if (! error) /* Success */
+ goto reexecute;
+ }
+ }
+ reset_stmt_params(this);
+ thd->set_bulk_execution(0);
+
+ return error;
+}
+
+
bool
Prepared_statement::execute_server_runnable(Server_runnable *server_runnable)
{
@@ -4043,7 +4514,7 @@ Prepared_statement::execute_server_runnable(Server_runnable *server_runnable)
bool error;
Query_arena *save_stmt_arena= thd->stmt_arena;
Item_change_list save_change_list;
- thd->change_list.move_elements_to(&save_change_list);
+ thd->Item_change_list::move_elements_to(&save_change_list);
state= STMT_CONVENTIONAL_EXECUTION;
@@ -4062,7 +4533,7 @@ Prepared_statement::execute_server_runnable(Server_runnable *server_runnable)
thd->restore_backup_statement(this, &stmt_backup);
thd->stmt_arena= save_stmt_arena;
- save_change_list.move_elements_to(&thd->change_list);
+ save_change_list.move_elements_to(thd);
/* Items and memory will freed in destructor */
@@ -4094,6 +4565,7 @@ Prepared_statement::reprepare()
bool error;
Prepared_statement copy(thd);
+ copy.m_sql_mode= m_sql_mode;
copy.set_sql_prepare(); /* To suppress sending metadata to the client. */
@@ -4103,9 +4575,12 @@ Prepared_statement::reprepare()
&cur_db_changed))
return TRUE;
+ sql_mode_t save_sql_mode= thd->variables.sql_mode;
+ thd->variables.sql_mode= m_sql_mode;
error= ((name.str && copy.set_name(&name)) ||
copy.prepare(query(), query_length()) ||
validate_metadata(&copy));
+ thd->variables.sql_mode= save_sql_mode;
if (cur_db_changed)
mysql_change_db(thd, &saved_cur_db_name, TRUE);
@@ -4287,7 +4762,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
If the free_list is not empty, we'll wrongly free some externally
allocated items when cleaning up after execution of this statement.
*/
- DBUG_ASSERT(thd->change_list.is_empty());
+ DBUG_ASSERT(thd->Item_change_list::is_empty());
/*
The only case where we should have items in the thd->free_list is
@@ -4413,7 +4888,19 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
if (error == 0 && this->lex->sql_command == SQLCOM_CALL)
{
if (is_sql_prepare())
+ {
+ /*
+ Here we have the diagnostics area status already set to DA_OK.
+ sent_out_parameters() can raise errors when assigning OUT parameters:
+ DECLARE a DATETIME;
+ EXECUTE IMMEDIATE 'CALL p1(?)' USING a;
+ when the procedure p1 assigns a DATETIME-incompatible value (e.g. 10)
+ to the out parameter. Allow to overwrite status (to DA_ERROR).
+ */
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->protocol_text.send_out_parameters(&this->lex->param_list);
+ thd->get_stmt_da()->set_overwrite_status(false);
+ }
else
thd->protocol->send_out_parameters(&this->lex->param_list);
}
@@ -4443,16 +4930,58 @@ error:
}
-/** Common part of DEALLOCATE PREPARE and mysqld_stmt_close. */
+/**
+ Prepare, execute and clean-up a statement.
+ @param query - query text
+ @param length - query text length
+ @retval true - the query was not executed (parse error, wrong parameters)
+ @retval false - the query was prepared and executed
-void Prepared_statement::deallocate()
+ Note, if some error happened during execution, it still returns "false".
+*/
+bool Prepared_statement::execute_immediate(const char *query, uint query_len)
+{
+ DBUG_ENTER("Prepared_statement::execute_immediate");
+ String expanded_query;
+ static LEX_STRING execute_immediate_stmt_name=
+ {(char*) STRING_WITH_LEN("(immediate)") };
+
+ set_sql_prepare();
+ name= execute_immediate_stmt_name; // for DBUG_PRINT etc
+ if (prepare(query, query_len))
+ DBUG_RETURN(true);
+
+ if (param_count != thd->lex->prepared_stmt_params.elements)
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE");
+ deallocate_immediate();
+ DBUG_RETURN(true);
+ }
+
+ (void) execute_loop(&expanded_query, FALSE, NULL, NULL);
+ deallocate_immediate();
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Common part of DEALLOCATE PREPARE, EXECUTE IMMEDIATE, mysqld_stmt_close.
+*/
+void Prepared_statement::deallocate_immediate()
{
/* We account deallocate in the same manner as mysqld_stmt_close */
status_var_increment(thd->status_var.com_stmt_close);
/* It should now be safe to reset CHANGE MASTER parameters */
lex_end_stage2(lex);
+}
+
+/** Common part of DEALLOCATE PREPARE and mysqld_stmt_close. */
+
+void Prepared_statement::deallocate()
+{
+ deallocate_immediate();
/* Statement map calls delete stmt on erase */
thd->stmt_map.erase(this);
}
@@ -4963,7 +5492,7 @@ bool Protocol_local::send_out_parameters(List<Item_param> *sp_params)
bool
Protocol_local::send_ok(uint server_status, uint statement_warn_count,
ulonglong affected_rows, ulonglong last_insert_id,
- const char *message)
+ const char *message, bool skip_flush)
{
/*
Just make sure nothing is sent to the client, we have grabbed
diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h
index b468ac1bf9b..203b37b3b26 100644
--- a/sql/sql_prepare.h
+++ b/sql/sql_prepare.h
@@ -18,6 +18,10 @@
#include "sql_error.h"
+
+#define LAST_STMT_ID 0xFFFFFFFF
+#define STMT_ID_MASK 0x7FFFFFFF
+
class THD;
struct LEX;
@@ -68,15 +72,20 @@ private:
void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length);
void mysqld_stmt_execute(THD *thd, char *packet, uint packet_length);
+void mysqld_stmt_execute_bulk(THD *thd, char *packet, uint packet_length);
+void mysqld_stmt_bulk_execute(THD *thd, char *packet, uint packet_length);
void mysqld_stmt_close(THD *thd, char *packet);
void mysql_sql_stmt_prepare(THD *thd);
void mysql_sql_stmt_execute(THD *thd);
+void mysql_sql_stmt_execute_immediate(THD *thd);
void mysql_sql_stmt_close(THD *thd);
void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length);
void mysqld_stmt_reset(THD *thd, char *packet);
void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length);
void reinit_stmt_before_use(THD *thd, LEX *lex);
+my_bool bulk_parameters_iterations(THD *thd);
+my_bool bulk_parameters_set(THD *thd);
/**
Execute a fragment of server code in an isolated context, so that
it doesn't leave any effect on THD. THD must have no open tables.
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index f54c66e1d99..81dea4e41b0 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -126,7 +126,7 @@
#define TMP_TABLE_ALL_COLUMNS (1ULL << 12) // SELECT, intern
#define OPTION_WARNINGS (1ULL << 13) // THD, user
#define OPTION_AUTO_IS_NULL (1ULL << 14) // THD, user, binlog
-#define OPTION_FOUND_COMMENT (1ULL << 15) // SELECT, intern, parser
+#define OPTION_NO_CHECK_CONSTRAINT_CHECKS (1ULL << 15)
#define OPTION_SAFE_UPDATES (1ULL << 16) // THD, user
#define OPTION_BUFFER_RESULT (1ULL << 17) // SELECT, user
#define OPTION_BIN_LOG (1ULL << 18) // THD, user
@@ -177,6 +177,7 @@
#define OPTION_SKIP_REPLICATION (1ULL << 37) // THD, user
#define OPTION_RPL_SKIP_PARALLEL (1ULL << 38)
+#define OPTION_FOUND_COMMENT (1ULL << 39) // SELECT, intern, parser
/* The rest of the file is included in the server only */
#ifndef MYSQL_CLIENT
@@ -219,6 +220,7 @@
#define OPTIMIZER_SWITCH_EXTENDED_KEYS (1ULL << 27)
#define OPTIMIZER_SWITCH_EXISTS_TO_IN (1ULL << 28)
#define OPTIMIZER_SWITCH_ORDERBY_EQ_PROP (1ULL << 29)
+#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED (1ULL << 30)
#define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \
OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \
@@ -242,7 +244,9 @@
OPTIMIZER_SWITCH_SEMIJOIN | \
OPTIMIZER_SWITCH_FIRSTMATCH | \
OPTIMIZER_SWITCH_LOOSE_SCAN | \
- OPTIMIZER_SWITCH_EXISTS_TO_IN)
+ OPTIMIZER_SWITCH_EXISTS_TO_IN | \
+ OPTIMIZER_SWITCH_ORDERBY_EQ_PROP | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED)
/*
Replication uses 8 bytes to store SQL_MODE in the binary log. The day you
use strictly more than 64 bits by adding one more define above, you should
@@ -319,7 +323,6 @@
/* BINLOG_DUMP options */
#define BINLOG_DUMP_NON_BLOCK 1
-
#define BINLOG_SEND_ANNOTATE_ROWS_EVENT 2
#ifndef MYSQL_CLIENT
@@ -339,6 +342,7 @@ enum enum_parsing_place
IN_WHERE,
IN_ON,
IN_GROUP_BY,
+ IN_ORDER_BY,
PARSING_PLACE_SIZE /* always should be the last */
};
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index 48f7987daf5..f3b62991c5c 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -124,7 +124,7 @@ int make_profile_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table)
NullS, NullS, field_info->field_name);
if (field)
{
- field->set_name(field_info->old_name,
+ field->set_name(thd, field_info->old_name,
(uint) strlen(field_info->old_name),
system_charset_info);
if (add_item_to_list(thd, field))
@@ -329,13 +329,27 @@ PROFILING::PROFILING()
PROFILING::~PROFILING()
{
+ restart();
+}
+
+/*
+ Restart profiling from scratch
+*/
+
+void PROFILING::restart()
+{
while (! history.is_empty())
delete history.pop();
if (current != NULL)
delete current;
+ /* Ensure that profiling object can be reused */
+ profile_id_counter= 1;
+ current= NULL;
+ last= NULL;
}
+
/**
Throw away the current profile, because it's useless or unwanted
or corrupted.
@@ -675,6 +689,6 @@ int PROFILING::fill_statistics_info(THD *thd_arg, TABLE_LIST *tables, Item *cond
void PROFILING::reset()
{
- enabled= thd->variables.option_bits & OPTION_PROFILING;
+ enabled= (thd->variables.option_bits & OPTION_PROFILING) != 0;
}
#endif /* ENABLED_PROFILING */
diff --git a/sql/sql_profile.h b/sql/sql_profile.h
index 1d770ca1147..38682f3ddec 100644
--- a/sql/sql_profile.h
+++ b/sql/sql_profile.h
@@ -324,6 +324,7 @@ public:
/* ... from INFORMATION_SCHEMA.PROFILING ... */
int fill_statistics_info(THD *thd, TABLE_LIST *tables, Item *cond);
void reset();
+ void restart();
};
# endif /* ENABLED_PROFILING */
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index ab9e7c33a92..ac66f7e9609 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -30,6 +30,7 @@
#include "sql_show.h"
#include "debug_sync.h"
#include "des_key_file.h"
+#include "transaction.h"
static void disable_checkpoints(THD *thd);
@@ -73,7 +74,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
If reload_acl_and_cache() is called from SIGHUP handler we have to
allocate temporary THD for execution of acl_reload()/grant_reload().
*/
- if (!thd && (thd= (tmp_thd= new THD)))
+ if (!thd && (thd= (tmp_thd= new THD(0))))
{
thd->thread_stack= (char*) &tmp_thd;
thd->store_globals();
@@ -602,6 +603,7 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
return FALSE;
error_reset_bits:
+ trans_rollback_stmt(thd);
close_thread_tables(thd);
thd->variables.option_bits&= ~OPTION_TABLE_LOCK;
error:
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index 4c164a3a621..4e576602a1a 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -27,7 +27,6 @@
#include "sql_table.h" // write_bin_log
#include "sql_view.h" // mysql_frm_type, mysql_rename_view
#include "sql_trigger.h"
-#include "lock.h" // MYSQL_OPEN_SKIP_TEMPORARY
#include "sql_base.h" // tdc_remove_table, lock_table_names,
#include "sql_handler.h" // mysql_ha_rm_tables
#include "sql_statistics.h"
@@ -222,14 +221,14 @@ do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table,
new_alias= (lower_case_table_names == 2) ? new_table->alias :
new_table->table_name;
- if (find_temporary_table(thd, new_table))
+ if (thd->find_temporary_table(new_table, THD::TMP_TABLE_ANY))
{
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
DBUG_RETURN(1); // This can't be skipped
}
- DBUG_RETURN(rename_temporary_table(thd, ren_table->table,
+ DBUG_RETURN(thd->rename_temporary_table(ren_table->table,
new_table->db, new_alias));
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index f1a1ca6fd2b..3bc1a019f40 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -182,6 +182,7 @@ struct binlog_send_info {
{
error_text[0] = 0;
bzero(&error_gtid, sizeof(error_gtid));
+ until_binlog_state.init();
}
};
@@ -494,7 +495,7 @@ static enum enum_binlog_checksum_alg get_binlog_checksum_value_at_connect(THD *
TODO
- Inform the slave threads that they should sync the position
- in the binary log file with flush_relay_log_info.
+ in the binary log file with Relay_log_info::flush().
Now they sync is done for next read.
*/
@@ -864,7 +865,6 @@ get_binlog_list(MEM_ROOT *memroot)
!(e->name= strmake_root(memroot, fname, length)))
{
mysql_bin_log.unlock_index();
- my_error(ER_OUTOFMEMORY, MYF(0), length + 1 + sizeof(*e));
DBUG_RETURN(NULL);
}
e->next= current_list;
@@ -1562,7 +1562,7 @@ is_until_reached(binlog_send_info *info, ulong *ev_offset,
break;
case GTID_UNTIL_STOP_AFTER_TRANSACTION:
if (event_type != XID_EVENT &&
- (event_type != QUERY_EVENT ||
+ (event_type != QUERY_EVENT || /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */
!Query_log_event::peek_is_commit_rollback
(info->packet->ptr()+*ev_offset,
info->packet->length()-*ev_offset,
@@ -1796,7 +1796,7 @@ send_event_to_slave(binlog_send_info *info, Log_event_type event_type,
return NULL;
case GTID_SKIP_TRANSACTION:
if (event_type == XID_EVENT ||
- (event_type == QUERY_EVENT &&
+ (event_type == QUERY_EVENT && /* QUERY_COMPRESSED_EVENT would never be commmit or rollback */
Query_log_event::peek_is_commit_rollback(packet->ptr() + ev_offset,
len - ev_offset,
current_checksum_alg)))
@@ -2046,12 +2046,6 @@ static int init_binlog_sender(binlog_send_info *info,
info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG;
return 1;
}
- if (!server_id_supplied)
- {
- info->errmsg= "Misconfigured master - server id was not set";
- info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG;
- return 1;
- }
char search_file_name[FN_REFLEN];
const char *name=search_file_name;
@@ -2112,9 +2106,7 @@ static int init_binlog_sender(binlog_send_info *info,
linfo->pos= *pos;
// note: publish that we use file, before we open it
- mysql_mutex_lock(&LOCK_thread_count);
thd->current_linfo= linfo;
- mysql_mutex_unlock(&LOCK_thread_count);
if (check_start_offset(info, linfo->log_file_name, *pos))
return 1;
@@ -2620,7 +2612,7 @@ static int send_events(binlog_send_info *info, IO_CACHE* log, LOG_INFO* linfo,
Gtid_list_log_event glev(&info->until_binlog_state, 0);
if (reset_transmit_packet(info, info->flags, &ev_offset, &info->errmsg) ||
- fake_gtid_list_event(info, &glev, &info->errmsg, my_b_tell(log)))
+ fake_gtid_list_event(info, &glev, &info->errmsg, (uint32)my_b_tell(log)))
{
info->error= ER_UNKNOWN_ERROR;
return 1;
@@ -2630,7 +2622,7 @@ static int send_events(binlog_send_info *info, IO_CACHE* log, LOG_INFO* linfo,
if (info->until_gtid_state &&
is_until_reached(info, &ev_offset, event_type, &info->errmsg,
- my_b_tell(log)))
+ (uint32)my_b_tell(log)))
{
if (info->errmsg)
{
@@ -2685,7 +2677,7 @@ static int send_one_binlog_file(binlog_send_info *info,
if (end_pos <= 1)
{
/** end of file or error */
- return end_pos;
+ return (int)end_pos;
}
/**
@@ -2849,6 +2841,13 @@ err:
THD_STAGE_INFO(thd, stage_waiting_to_finalize_termination);
RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
+ if (info->thd->killed == KILL_SLAVE_SAME_ID)
+ {
+ info->errmsg= "A slave with the same server_uuid/server_id as this slave "
+ "has connected to the master";
+ info->error= ER_SLAVE_SAME_ID;
+ }
+
const bool binlog_open = my_b_inited(&log);
if (file >= 0)
{
@@ -2856,13 +2855,12 @@ err:
mysql_file_close(file, MYF(MY_WME));
}
- mysql_mutex_lock(&LOCK_thread_count);
- thd->current_linfo = 0;
- mysql_mutex_unlock(&LOCK_thread_count);
+ thd->reset_current_linfo();
thd->variables.max_allowed_packet= old_max_allowed_packet;
delete info->fdev;
- if (info->error == ER_MASTER_FATAL_ERROR_READING_BINLOG && binlog_open)
+ if ((info->error == ER_MASTER_FATAL_ERROR_READING_BINLOG ||
+ info->error == ER_SLAVE_SAME_ID) && binlog_open)
{
/*
detailing the fatal error message with coordinates
@@ -2978,7 +2976,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
{
/* connection was deleted while we waited for lock_slave_threads */
mi->unlock_slave_threads();
- my_error(WARN_NO_MASTER_INFO, mi->connection_name.length,
+ my_error(WARN_NO_MASTER_INFO, MYF(0), (int) mi->connection_name.length,
mi->connection_name.str);
DBUG_RETURN(-1);
}
@@ -3018,12 +3016,6 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
if (init_master_info(mi,master_info_file_tmp,relay_log_info_file_tmp, 0,
thread_mask))
slave_errno=ER_MASTER_INFO;
- else if (!server_id_supplied)
- {
- slave_errno= ER_BAD_SLAVE; net_report= 0;
- my_message(slave_errno, "Misconfigured slave: server_id was not set; Fix in config file",
- MYF(0));
- }
else if (!*mi->host)
{
slave_errno= ER_BAD_SLAVE; net_report= 0;
@@ -3242,7 +3234,7 @@ int reset_slave(THD *thd, Master_info* mi)
{
/* connection was deleted while we waited for lock_slave_threads */
mi->unlock_slave_threads();
- my_error(WARN_NO_MASTER_INFO, mi->connection_name.length,
+ my_error(WARN_NO_MASTER_INFO, MYF(0), (int) mi->connection_name.length,
mi->connection_name.str);
DBUG_RETURN(-1);
}
@@ -3275,6 +3267,7 @@ int reset_slave(THD *thd, Master_info* mi)
mi->clear_error();
mi->rli.clear_error();
mi->rli.clear_until_condition();
+ mi->rli.clear_sql_delay();
mi->rli.slave_skip_counter= 0;
// close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0
@@ -3336,10 +3329,8 @@ err:
SYNOPSIS
kill_zombie_dump_threads()
slave_server_id the slave's server id
-
*/
-
void kill_zombie_dump_threads(uint32 slave_server_id)
{
mysql_mutex_lock(&LOCK_thread_count);
@@ -3363,7 +3354,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id)
it will be slow because it will iterate through the list
again. We just to do kill the thread ourselves.
*/
- tmp->awake(KILL_QUERY);
+ tmp->awake(KILL_SLAVE_SAME_ID);
mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
}
@@ -3386,7 +3377,8 @@ static bool get_string_parameter(char *to, const char *from, size_t length,
uint from_numchars= cs->cset->numchars(cs, from, from + from_length);
if (from_numchars > length / cs->mbmaxlen)
{
- my_error(ER_WRONG_STRING_LENGTH, MYF(0), from, name, length / cs->mbmaxlen);
+ my_error(ER_WRONG_STRING_LENGTH, MYF(0), from, name,
+ (int) (length / cs->mbmaxlen));
return 1;
}
memcpy(to, from, from_length+1);
@@ -3455,7 +3447,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
{
/* connection was deleted while we waited for lock_slave_threads */
mi->unlock_slave_threads();
- my_error(WARN_NO_MASTER_INFO, mi->connection_name.length,
+ my_error(WARN_NO_MASTER_INFO, MYF(0), (int) mi->connection_name.length,
mi->connection_name.str);
DBUG_RETURN(TRUE);
}
@@ -3596,6 +3588,9 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
if (lex_mi->ssl != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::LEX_MI_ENABLE);
+ if (lex_mi->sql_delay != -1)
+ mi->rli.set_sql_delay(lex_mi->sql_delay);
+
if (lex_mi->ssl_verify_server_cert != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->ssl_verify_server_cert=
(lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::LEX_MI_ENABLE);
@@ -3780,7 +3775,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
in-memory value at restart (thus causing errors, as the old relay log does
not exist anymore).
*/
- if (flush_relay_log_info(&mi->rli))
+ if (mi->rli.flush())
ret= 1;
mysql_cond_broadcast(&mi->data_cond);
mysql_mutex_unlock(&mi->rli.data_lock);
@@ -3909,10 +3904,7 @@ bool mysql_show_binlog_events(THD* thd)
goto err;
}
- /* These locks is here to enable syncronization with log_in_use() */
- mysql_mutex_lock(&LOCK_thread_count);
- thd->current_linfo = &linfo;
- mysql_mutex_unlock(&LOCK_thread_count);
+ thd->current_linfo= &linfo;
if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
goto err;
@@ -4044,10 +4036,7 @@ err:
else
my_eof(thd);
- /* These locks is here to enable syncronization with log_in_use() */
- mysql_mutex_lock(&LOCK_thread_count);
- thd->current_linfo= 0;
- mysql_mutex_unlock(&LOCK_thread_count);
+ thd->reset_current_linfo();
thd->variables.max_allowed_packet= old_max_allowed_packet;
DBUG_RETURN(ret);
}
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 37acff3141f..8d9a127bca7 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -36,7 +36,6 @@ struct slave_connection_state;
extern my_bool opt_show_slave_auth_info;
extern char *master_host, *master_info_file;
-extern bool server_id_supplied;
extern int max_binlog_dump_events;
extern my_bool opt_sporadic_binlog_dump_fail;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 62365f48404..3c1cea6be51 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016 Oracle and/or its affiliates.
- Copyright (c) 2009, 2018 MariaDB Corporation
+ Copyright (c) 2009, 2019 MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -53,6 +53,8 @@
#include "log_slow.h"
#include "sql_derived.h"
#include "sql_statistics.h"
+#include "sql_cte.h"
+#include "sql_window.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@@ -115,14 +117,7 @@ static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2);
static int join_tab_cmp_straight(const void *dummy, const void* ptr1, const void* ptr2);
static int join_tab_cmp_embedded_first(const void *emb, const void* ptr1, const void *ptr2);
C_MODE_END
-/*
- TODO: 'find_best' is here only temporarily until 'greedy_search' is
- tested and approved.
-*/
-static bool find_best(JOIN *join,table_map rest_tables,uint index,
- double record_count,double read_time, uint use_cond_selectivity);
static uint cache_record_length(JOIN *join,uint index);
-bool get_best_combination(JOIN *join);
static store_key *get_store_key(THD *thd,
KEYUSE *keyuse, table_map used_tables,
KEY_PART_INFO *key_part, uchar *key_buff,
@@ -169,8 +164,7 @@ static COND *optimize_cond(JOIN *join, COND *conds,
COND_EQUAL **cond_equal,
int flags= 0);
bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
-static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
- Procedure *proc);
+static int do_select(JOIN *join, Procedure *procedure);
static enum_nested_loop_state evaluate_join_record(JOIN *, JOIN_TAB *, int);
static enum_nested_loop_state
@@ -184,7 +178,6 @@ end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
static enum_nested_loop_state
end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
-static int test_if_group_changed(List<Cached_item> &list);
static int join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos);
static int join_read_system(JOIN_TAB *tab);
static int join_read_const(JOIN_TAB *tab);
@@ -237,11 +230,7 @@ static bool list_contains_unique_index(TABLE *table,
bool (*find_func) (Field *, void *), void *data);
static bool find_field_in_item_list (Field *field, void *data);
static bool find_field_in_order_list (Field *field, void *data);
-static int create_sort_index(THD *thd, JOIN *join, ORDER *order,
- ha_rows filesort_limit, ha_rows select_limit,
- bool is_order_by);
-static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
- Item *having);
+int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
Item *having);
static int remove_dup_with_hash_index(THD *thd,TABLE *table,
@@ -250,7 +239,7 @@ static int remove_dup_with_hash_index(THD *thd,TABLE *table,
static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref);
static bool setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_order);
-static ORDER *create_distinct_group(THD *thd, Item **ref_pointer_array,
+static ORDER *create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array,
ORDER *order, List<Item> &fields,
List<Item> &all_fields,
bool *all_order_by_fields_used);
@@ -261,12 +250,12 @@ static void calc_group_buffer(JOIN *join,ORDER *group);
static bool make_group_fields(JOIN *main_join, JOIN *curr_join);
static bool alloc_group_fields(JOIN *join,ORDER *group);
// Create list for using with tempory table
-static bool change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
+static bool change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &new_list1,
List<Item> &new_list2,
uint elements, List<Item> &items);
// Create list for using with tempory table
-static bool change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
+static bool change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &new_list1,
List<Item> &new_list2,
uint elements, List<Item> &items);
@@ -291,14 +280,12 @@ JOIN_TAB *next_depth_first_tab(JOIN* join, JOIN_TAB* tab);
static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab,
uint n_top_tabs_count, JOIN_TAB *tab);
-static bool
-find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
- ORDER *order, List<Item> &fields, List<Item> &all_fields,
- bool is_group_field, bool add_to_all_fields);
+static bool find_order_in_list(THD *, Ref_ptr_array, TABLE_LIST *, ORDER *,
+ List<Item> &, List<Item> &, bool, bool, bool);
static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
table_map rem_tables);
-
+void set_postjoin_aggr_write_func(JOIN_TAB *tab);
#ifndef DBUG_OFF
/*
@@ -347,7 +334,8 @@ bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
}
return FALSE;
}
-#endif
+#endif
+
/**
This handles SELECT with and without UNION.
@@ -357,7 +345,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
ulong setup_tables_done_option)
{
bool res;
- register SELECT_LEX *select_lex = &lex->select_lex;
+ SELECT_LEX *select_lex = &lex->select_lex;
DBUG_ENTER("handle_select");
MYSQL_SELECT_START(thd->query());
@@ -373,7 +361,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
every PS/SP execution new, we will not need reset this flag if
setup_tables_done_option changed for next rexecution
*/
- res= mysql_select(thd, &select_lex->ref_pointer_array,
+ res= mysql_select(thd,
select_lex->table_list.first,
select_lex->with_wild, select_lex->item_list,
select_lex->where,
@@ -464,7 +452,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
bool
fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
- Item **ref_pointer_array)
+ Ref_ptr_array ref_pointer_array)
{
Item_outer_ref *ref;
@@ -477,8 +465,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
List_iterator_fast <Item_outer_ref> ref_it(select->inner_refs_list);
for (ORDER *group= select->join->group_list; group; group= group->next)
{
- (*group->item)->walk(&Item::check_inner_refs_processor,
- TRUE, (uchar *) &ref_it);
+ (*group->item)->walk(&Item::check_inner_refs_processor, TRUE, &ref_it);
}
while ((ref= ref_it++))
@@ -493,10 +480,9 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
existing one. The change will lead to less operations for copying fields,
smaller temporary tables and less data passed through filesort.
*/
- if (ref_pointer_array && !ref->found_in_select_list)
+ if (!ref_pointer_array.is_null() && !ref->found_in_select_list)
{
int el= all_fields.elements;
- DBUG_ASSERT(all_fields.elements <= select->ref_pointer_array_size);
ref_pointer_array[el]= item;
/* Add the field item to the select list of the current select. */
all_fields.push_front(item, thd->mem_root);
@@ -504,7 +490,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
If it's needed reset each Item_ref item that refers this field with
a new reference taken from ref_pointer_array.
*/
- item_ref= ref_pointer_array + el;
+ item_ref= &ref_pointer_array[el];
}
if (ref->in_sum_func)
@@ -542,6 +528,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
if (!ref->fixed && ref->fix_fields(thd, 0))
return TRUE;
thd->lex->used_tables|= item->used_tables();
+ thd->lex->current_select->select_list_tables|= item->used_tables();
}
return false;
}
@@ -624,22 +611,26 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex)
/**
Function to setup clauses without sum functions.
*/
-inline int setup_without_group(THD *thd, Item **ref_pointer_array,
- TABLE_LIST *tables,
- List<TABLE_LIST> &leaves,
- List<Item> &fields,
- List<Item> &all_fields,
- COND **conds,
- ORDER *order,
- ORDER *group,
- bool *hidden_group_fields,
- uint *reserved)
+static inline int
+setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array,
+ TABLE_LIST *tables,
+ List<TABLE_LIST> &leaves,
+ List<Item> &fields,
+ List<Item> &all_fields,
+ COND **conds,
+ ORDER *order,
+ ORDER *group,
+ List<Window_spec> &win_specs,
+ List<Item_window_func> &win_funcs,
+ bool *hidden_group_fields,
+ uint *reserved)
{
int res;
+ enum_parsing_place save_place;
st_select_lex *const select= thd->lex->current_select;
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
/*
- Need to save the value, so we can turn off only any new non_agg_field_used
+ Need to stave the value, so we can turn off only any new non_agg_field_used
additions coming from the WHERE
*/
const bool saved_non_agg_field_used= select->non_agg_field_used();
@@ -659,11 +650,19 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array,
select->set_non_agg_field_used(saved_non_agg_field_used);
thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level;
+
+ save_place= thd->lex->current_select->context_analysis_place;
+ thd->lex->current_select->context_analysis_place= IN_ORDER_BY;
res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields,
order);
thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level);
+ thd->lex->current_select->context_analysis_place= IN_GROUP_BY;
res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields,
group, hidden_group_fields);
+ thd->lex->current_select->context_analysis_place= save_place;
+ thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level;
+ res= res || setup_windows(thd, ref_pointer_array, tables, fields, all_fields,
+ win_specs, win_funcs);
thd->lex->allow_sum_func= save_allow_sum_func;
DBUG_RETURN(res);
}
@@ -687,8 +686,7 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array,
0 on success
*/
int
-JOIN::prepare(Item ***rref_pointer_array,
- TABLE_LIST *tables_init,
+JOIN::prepare(TABLE_LIST *tables_init,
uint wild_num, COND *conds_init, uint og_num,
ORDER *order_init, bool skip_order_by,
ORDER *group_init, Item *having_init,
@@ -718,6 +716,7 @@ JOIN::prepare(Item ***rref_pointer_array,
if (select_lex->handle_derived(thd->lex, DT_PREPARE))
DBUG_RETURN(1);
+ thd->lex->current_select->context_analysis_place= NO_MATTER;
thd->lex->current_select->is_item_list_lookup= 1;
/*
If we have already executed SELECT, then it have not sense to prevent
@@ -800,32 +799,47 @@ JOIN::prepare(Item ***rref_pointer_array,
select_lex != select_lex->master_unit()->global_parameters())
real_og_num+= select_lex->order_list.elements;
- if ((wild_num && setup_wild(thd, tables_list, fields_list, &all_fields,
- wild_num)) ||
- select_lex->setup_ref_array(thd, real_og_num) ||
- setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ,
- &all_fields, &select_lex->pre_fix, 1) ||
- setup_without_group(thd, (*rref_pointer_array), tables_list,
- select_lex->leaf_tables, fields_list,
- all_fields, &conds, order, group_list,
- &hidden_group_fields, &select_lex->select_n_reserved))
- DBUG_RETURN(-1); /* purecov: inspected */
-
- ref_pointer_array= *rref_pointer_array;
+ DBUG_ASSERT(select_lex->hidden_bit_fields == 0);
+ if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num,
+ &select_lex->hidden_bit_fields))
+ DBUG_RETURN(-1);
+ if (select_lex->setup_ref_array(thd, real_og_num))
+ DBUG_RETURN(-1);
+ ref_ptrs= ref_ptr_array_slice(0);
+
+ enum_parsing_place save_place=
+ thd->lex->current_select->context_analysis_place;
+ thd->lex->current_select->context_analysis_place= SELECT_LIST;
+ if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ,
+ &all_fields, &select_lex->pre_fix, 1))
+ DBUG_RETURN(-1);
+ thd->lex->current_select->context_analysis_place= save_place;
+
+ if (setup_without_group(thd, ref_ptrs, tables_list,
+ select_lex->leaf_tables, fields_list,
+ all_fields, &conds, order, group_list,
+ select_lex->window_specs,
+ select_lex->window_funcs,
+ &hidden_group_fields,
+ &select_lex->select_n_reserved))
+ DBUG_RETURN(-1);
/* Resolve the ORDER BY that was skipped, then remove it. */
if (skip_order_by && select_lex !=
select_lex->master_unit()->global_parameters())
{
+ nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
+ thd->lex->allow_sum_func|= (nesting_map)1 << select_lex->nest_level;
thd->where= "order clause";
for (ORDER *order= select_lex->order_list.first; order; order= order->next)
{
/* Don't add the order items to all fields. Just resolve them to ensure
the query is valid, we'll drop them immediately after. */
- if (find_order_in_list(thd, *rref_pointer_array, tables_list, order,
- fields_list, all_fields, false, false))
+ if (find_order_in_list(thd, ref_ptrs, tables_list, order,
+ fields_list, all_fields, false, false, false))
DBUG_RETURN(-1);
}
+ thd->lex->allow_sum_func= save_allow_sum_func;
select_lex->order_list.empty();
}
@@ -851,7 +865,41 @@ JOIN::prepare(Item ***rref_pointer_array,
if (having_fix_rc || thd->is_error())
DBUG_RETURN(-1); /* purecov: inspected */
thd->lex->allow_sum_func= save_allow_sum_func;
+
+ if (having->with_window_func)
+ {
+ my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
+ DBUG_RETURN(-1);
+ }
}
+
+ /*
+ After setting up window functions, we may have discovered additional
+ used tables from the PARTITION BY and ORDER BY list. Update all items
+ that contain window functions.
+ */
+ if (select_lex->have_window_funcs())
+ {
+ List_iterator_fast<Item> it(select_lex->item_list);
+ Item *item;
+ while ((item= it++))
+ {
+ if (item->with_window_func)
+ item->update_used_tables();
+ }
+ }
+
+ With_clause *with_clause=select_lex->get_with_clause();
+ if (with_clause && with_clause->prepare_unreferenced_elements(thd))
+ DBUG_RETURN(1);
+
+ With_element *with_elem= select_lex->get_with_element();
+ if (with_elem &&
+ select_lex->check_unrestricted_recursive(
+ thd->variables.only_standard_compliant_cte))
+ DBUG_RETURN(-1);
+ if (!(select_lex->changed_elements & TOUCHED_SEL_COND))
+ select_lex->check_subqueries_with_recursive_references();
int res= check_and_do_in_subquery_rewrites(this);
@@ -886,14 +934,14 @@ JOIN::prepare(Item ***rref_pointer_array,
real_order= TRUE;
if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
- item->split_sum_func(thd, ref_pointer_array, all_fields, 0);
+ item->split_sum_func(thd, ref_ptrs, all_fields, 0);
}
if (!real_order)
order= NULL;
}
if (having && having->with_sum_func)
- having->split_sum_func2(thd, ref_pointer_array, all_fields,
+ having->split_sum_func2(thd, ref_ptrs, all_fields,
&having, SPLIT_SUM_SKIP_REGISTERED);
if (select_lex->inner_sum_func_list)
{
@@ -902,13 +950,13 @@ JOIN::prepare(Item ***rref_pointer_array,
do
{
item_sum= item_sum->next;
- item_sum->split_sum_func2(thd, ref_pointer_array,
+ item_sum->split_sum_func2(thd, ref_ptrs,
all_fields, item_sum->ref_by, 0);
} while (item_sum != end);
}
if (select_lex->inner_refs_list.elements &&
- fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array))
+ fix_inner_refs(thd, all_fields, select_lex, ref_ptrs))
DBUG_RETURN(-1);
if (group_list)
@@ -926,10 +974,9 @@ JOIN::prepare(Item ***rref_pointer_array,
{
Item_field *field= new (thd->mem_root) Item_field(thd, *(Item_field**)ord->item);
int el= all_fields.elements;
- DBUG_ASSERT(all_fields.elements <= select_lex->ref_pointer_array_size);
- ref_pointer_array[el]= field;
+ ref_ptrs[el]= field;
all_fields.push_front(field, thd->mem_root);
- ord->item= ref_pointer_array + el;
+ ord->item= &ref_ptrs[el];
}
}
}
@@ -982,6 +1029,12 @@ JOIN::prepare(Item ***rref_pointer_array,
}
if (thd->lex->derived_tables)
{
+ /*
+ Queries with derived tables and PROCEDURE are not allowed.
+ Many of such queries are disallowed grammatically, but there
+ are still some complex cases:
+ SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE()
+ */
my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE",
thd->lex->derived_tables & DERIVED_VIEW ?
"view" : "subquery");
@@ -989,6 +1042,7 @@ JOIN::prepare(Item ***rref_pointer_array,
}
if (thd->lex->sql_command != SQLCOM_SELECT)
{
+ // EXPLAIN SELECT * FROM t1 PROCEDURE ANALYSE()
my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "non-SELECT");
goto err;
}
@@ -1025,11 +1079,14 @@ bool JOIN::prepare_stage2()
/* Init join struct */
count_field_types(select_lex, &tmp_table_param, all_fields, 0);
- ref_pointer_array_size= all_fields.elements*sizeof(Item*);
this->group= group_list != 0;
if (tmp_table_param.sum_func_count && !group_list)
+ {
implicit_grouping= TRUE;
+ // Result will contain zero or one row - ordering is meaningless
+ order= NULL;
+ }
#ifdef RESTRICTED_GROUP
if (implicit_grouping)
@@ -1060,16 +1117,75 @@ int JOIN::optimize()
{
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
have_query_plan= QEP_AVAILABLE;
+
+ /*
+ explain data must be created on the Explain_query::mem_root. Because it's
+ just a memroot, not an arena, explain data must not contain any Items
+ */
+ MEM_ROOT *old_mem_root= thd->mem_root;
+ Item *old_free_list __attribute__((unused))= thd->free_list;
+ thd->mem_root= thd->lex->explain->mem_root;
save_explain_data(thd->lex->explain, false /* can overwrite */,
need_tmp,
!skip_sort_order && !no_order && (order || group_list),
select_distinct);
+ thd->mem_root= old_mem_root;
+ DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created
+
+ uint select_nr= select_lex->select_number;
+ JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
+ for (uint i= 0; i < aggr_tables; i++, curr_tab++)
+ {
+ if (select_nr == INT_MAX)
+ {
+ /* this is a fake_select_lex of a union */
+ select_nr= select_lex->master_unit()->first_select()->select_number;
+ curr_tab->tracker= thd->lex->explain->get_union(select_nr)->
+ get_tmptable_read_tracker();
+ }
+ else
+ {
+ curr_tab->tracker= thd->lex->explain->get_select(select_nr)->
+ get_using_temporary_read_tracker();
+ }
+ }
+
}
optimization_state= JOIN::OPTIMIZATION_DONE;
return res;
}
+int JOIN::init_join_caches()
+{
+ JOIN_TAB *tab;
+
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ {
+ TABLE *table= tab->table;
+ if (table->file->keyread_enabled())
+ {
+ if (!(table->file->index_flags(table->file->keyread, 0, 1) & HA_CLUSTERED_INDEX))
+ table->mark_columns_used_by_index(table->file->keyread, table->read_set);
+ }
+ else if ((tab->read_first_record == join_read_first ||
+ tab->read_first_record == join_read_last) &&
+ !tab->filesort && table->covering_keys.is_set(tab->index) &&
+ !table->no_keyread)
+ {
+ table->prepare_for_keyread(tab->index, table->read_set);
+ }
+ if (tab->cache && tab->cache->init(select_options & SELECT_DESCRIBE))
+ revise_cache_usage(tab);
+ else
+ tab->remove_redundant_bnl_scan_conds();
+ }
+ return 0;
+}
+
+
/**
global select optimisation.
@@ -1089,7 +1205,6 @@ JOIN::optimize_inner()
uint no_jbuf_after;
JOIN_TAB *tab;
DBUG_ENTER("JOIN::optimize");
-
do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
DEBUG_SYNC(thd, "before_join_optimize");
@@ -1099,9 +1214,11 @@ JOIN::optimize_inner()
set_allowed_join_cache_types();
need_distinct= TRUE;
- /* Run optimize phase for all derived tables/views used in this SELECT. */
- if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
- DBUG_RETURN(1);
+ /*
+ Needed in case optimizer short-cuts,
+ set properly in make_aggr_tables_info()
+ */
+ fields= &select_lex->item_list;
if (select_lex->first_cond_optimization)
{
@@ -1120,7 +1237,7 @@ JOIN::optimize_inner()
conversion happened (which done in the same way.
*/
if (select_lex->first_cond_optimization &&
- conds && conds->walk(&Item::exists2in_processor, 0, (uchar *)thd))
+ conds && conds->walk(&Item::exists2in_processor, 0, thd))
DBUG_RETURN(1);
/*
TODO
@@ -1128,7 +1245,7 @@ JOIN::optimize_inner()
for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
{
if (tbl->on_expr &&
- tbl->on_expr->walk(&Item::exists2in_processor, 0, (uchar *)thd))
+ tbl->on_expr->walk(&Item::exists2in_processor, 0, thd))
DBUG_RETURN(1);
}
*/
@@ -1188,7 +1305,11 @@ JOIN::optimize_inner()
/*
The following code will allocate the new items in a permanent
MEMROOT for prepared statements and stored procedures.
+
+ But first we need to ensure that thd->lex->explain is allocated
+ in the execution arena
*/
+ create_explain_query_if_not_exists(thd->lex, thd->mem_root);
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
@@ -1197,8 +1318,12 @@ JOIN::optimize_inner()
/* Convert all outer joins to inner joins if possible */
conds= simplify_joins(this, join_list, conds, TRUE, FALSE);
- if (select_lex->save_leaf_tables(thd))
+ if (thd->is_error() || select_lex->save_leaf_tables(thd))
+ {
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
DBUG_RETURN(1);
+ }
build_bitmap_for_nested_joins(join_list, 0);
sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0;
@@ -1214,17 +1339,37 @@ JOIN::optimize_inner()
if (conds && conds->has_subquery())
(void) conds->walk(&Item::cleanup_is_expensive_cache_processor,
- 0, (uchar*)0);
+ 0, (void *) 0);
if (having && having->has_subquery())
(void) having->walk(&Item::cleanup_is_expensive_cache_processor,
- 0, (uchar*)0);
+ 0, (void *) 0);
if (setup_jtbm_semi_joins(this, join_list, &conds))
DBUG_RETURN(1);
+ if (select_lex->cond_pushed_into_where)
+ {
+ conds= and_conds(thd, conds, select_lex->cond_pushed_into_where);
+ if (conds && conds->fix_fields(thd, &conds))
+ DBUG_RETURN(1);
+ }
+ if (select_lex->cond_pushed_into_having)
+ {
+ having= and_conds(thd, having, select_lex->cond_pushed_into_having);
+ if (having)
+ {
+ select_lex->having_fix_field= 1;
+ select_lex->having_fix_field_for_pushed_cond= 1;
+ if (having->fix_fields(thd, &having))
+ DBUG_RETURN(1);
+ select_lex->having_fix_field= 0;
+ select_lex->having_fix_field_for_pushed_cond= 0;
+ }
+ }
+
conds= optimize_cond(this, conds, join_list, FALSE,
&cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS);
-
+
if (thd->is_error())
{
error= 1;
@@ -1232,6 +1377,39 @@ JOIN::optimize_inner()
DBUG_RETURN(1);
}
+ if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED))
+ {
+ TABLE_LIST *tbl;
+ List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
+ while ((tbl= li++))
+ {
+ /*
+ Do not push conditions from where into materialized inner tables
+ of outer joins: this is not valid.
+ */
+ if (tbl->is_materialized_derived())
+ {
+ /*
+ Do not push conditions from where into materialized inner tables
+ of outer joins: this is not valid.
+ */
+ if (!tbl->is_inner_table_of_outer_join())
+ {
+ if (pushdown_cond_for_derived(thd, conds, tbl))
+ DBUG_RETURN(1);
+ }
+ if (mysql_handle_single_derived(thd->lex, tbl, DT_OPTIMIZE))
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ else
+ {
+ /* Run optimize phase for all derived tables/views used in this SELECT. */
+ if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
+ DBUG_RETURN(1);
+ }
+
{
having= optimize_cond(this, having, join_list, TRUE,
&having_value, &having_equal);
@@ -1335,7 +1513,8 @@ JOIN::optimize_inner()
}
DBUG_PRINT("info",("Select tables optimized away"));
- zero_result_cause= "Select tables optimized away";
+ if (!select_lex->have_window_funcs())
+ zero_result_cause= "Select tables optimized away";
tables_list= 0; // All tables resolved
select_lex->min_max_opt_list.empty();
const_tables= top_join_tab_count= table_count;
@@ -1377,7 +1556,6 @@ JOIN::optimize_inner()
calling make_join_statistics() as this may call get_best_group_min_max()
which needs a simplfied group_list.
*/
- simple_group= 1;
if (group_list && table_count == 1)
{
group_list= remove_const(this, group_list, conds,
@@ -1400,6 +1578,7 @@ JOIN::optimize_inner()
/* Calculate how to do the join */
THD_STAGE_INFO(thd, stage_statistics);
+ result->prepare_to_read_rows();
if (make_join_statistics(this, select_lex->leaf_tables, &keyuse) ||
thd->is_fatal_error)
{
@@ -1465,7 +1644,7 @@ JOIN::optimize_inner()
}
select= make_select(*table, const_table_map,
- const_table_map, conds, 1, &error);
+ const_table_map, conds, (SORT_INFO*) 0, 1, &error);
if (error)
{ /* purecov: inspected */
error= -1; /* purecov: inspected */
@@ -1642,7 +1821,8 @@ JOIN::optimize_inner()
(!join_tab[const_tables].select ||
!join_tab[const_tables].select->quick ||
join_tab[const_tables].select->quick->get_type() !=
- QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX))
+ QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) &&
+ !select_lex->have_window_funcs())
{
if (group && rollup.state == ROLLUP::STATE_NONE &&
list_contains_unique_index(join_tab[const_tables].table,
@@ -1693,11 +1873,13 @@ JOIN::optimize_inner()
}
if (group || tmp_table_param.sum_func_count)
{
- if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE)
+ if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE
+ && !select_lex->have_window_funcs())
select_distinct=0;
}
else if (select_distinct && table_count - const_tables == 1 &&
- rollup.state == ROLLUP::STATE_NONE)
+ rollup.state == ROLLUP::STATE_NONE &&
+ !select_lex->have_window_funcs())
{
/*
We are only using one table. In this case we change DISTINCT to a
@@ -1719,16 +1901,20 @@ JOIN::optimize_inner()
tab= &join_tab[const_tables];
if (order)
{
- skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1,
- &tab->table->keys_in_use_for_order_by);
+ skip_sort_order=
+ test_if_skip_sort_order(tab, order, select_limit,
+ true, // no_changes
+ &tab->table->keys_in_use_for_order_by);
}
if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array,
order, fields_list, all_fields,
&all_order_fields_used)))
{
- bool skip_group= (skip_sort_order &&
- test_if_skip_sort_order(tab, group_list, select_limit, 1,
- &tab->table->keys_in_use_for_group_by) != 0);
+ const bool skip_group=
+ skip_sort_order &&
+ test_if_skip_sort_order(tab, group_list, select_limit,
+ true, // no_changes
+ &tab->table->keys_in_use_for_group_by);
count_field_types(select_lex, &tmp_table_param, all_fields, 0);
if ((skip_group && all_order_fields_used) ||
select_limit == HA_POS_ERROR ||
@@ -1757,6 +1943,7 @@ JOIN::optimize_inner()
else if (thd->is_fatal_error) // End of memory
DBUG_RETURN(1);
}
+ simple_group= rollup.state == ROLLUP::STATE_NONE;
if (group)
{
/*
@@ -1780,6 +1967,7 @@ JOIN::optimize_inner()
group_optimized_away= 1;
}
}
+
calc_group_buffer(this, group_list);
send_group_parts= tmp_table_param.group_parts; /* Save org parts */
if (procedure && procedure->group)
@@ -1821,6 +2009,11 @@ JOIN::optimize_inner()
}
need_tmp= test_if_need_tmp_table();
+ //TODO this could probably go in test_if_need_tmp_table.
+ if (this->select_lex->window_specs.elements > 0) {
+ need_tmp= TRUE;
+ simple_order= FALSE;
+ }
/*
If the hint FORCE INDEX FOR ORDER BY/GROUP BY is used for the table
@@ -1844,6 +2037,32 @@ JOIN::optimize_inner()
if (!(select_options & SELECT_DESCRIBE))
init_ftfuncs(thd, select_lex, MY_TEST(order));
+ /*
+ It's necessary to check const part of HAVING cond as
+ there is a chance that some cond parts may become
+ const items after make_join_statistics(for example
+ when Item is a reference to cost table field from
+ outer join).
+ This check is performed only for those conditions
+ which do not use aggregate functions. In such case
+ temporary table may not be used and const condition
+ elements may be lost during further having
+ condition transformation in JOIN::exec.
+ */
+ if (having && const_table_map && !having->with_sum_func)
+ {
+ having->update_used_tables();
+ having= having->remove_eq_conds(thd, &select_lex->having_value, true);
+ if (select_lex->having_value == Item::COND_FALSE)
+ {
+ having= new (thd->mem_root) Item_int(thd, (longlong) 0,1);
+ zero_result_cause= "Impossible HAVING noticed after reading const tables";
+ error= 0;
+ select_lex->mark_const_derived(zero_result_cause);
+ goto setup_subq_exit;
+ }
+ }
+
if (optimize_unflattened_subqueries())
DBUG_RETURN(1);
@@ -1870,8 +2089,28 @@ JOIN::optimize_inner()
DBUG_EXECUTE("info",TEST_join(this););
- if (const_tables != table_count)
+ if (!only_const_tables())
{
+ JOIN_TAB *tab= &join_tab[const_tables];
+
+ if (order)
+ {
+ /*
+ Force using of tmp table if sorting by a SP or UDF function due to
+ their expensive and probably non-deterministic nature.
+ */
+ for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
+ {
+ Item *item= *tmp_order->item;
+ if (item->is_expensive())
+ {
+ /* Force tmp table without sort */
+ need_tmp=1; simple_order=simple_group=0;
+ break;
+ }
+ }
+ }
+
/*
Because filesort always does a full table scan or a quick range scan
we must add the removed reference to the select for the table.
@@ -1879,72 +2118,300 @@ JOIN::optimize_inner()
as in other cases the join is done before the sort.
*/
if ((order || group_list) &&
- join_tab[const_tables].type != JT_ALL &&
- join_tab[const_tables].type != JT_FT &&
- join_tab[const_tables].type != JT_REF_OR_NULL &&
+ tab->type != JT_ALL &&
+ tab->type != JT_FT &&
+ tab->type != JT_REF_OR_NULL &&
((order && simple_order) || (group_list && simple_group)))
{
- if (add_ref_to_table_cond(thd,&join_tab[const_tables])) {
+ if (add_ref_to_table_cond(thd,tab)) {
DBUG_RETURN(1);
}
}
/*
- Calculate a possible 'limit' of table rows for 'GROUP BY': 'need_tmp'
- implies that there will be more postprocessing so the specified
- 'limit' should not be enforced yet in the call to
- 'test_if_skip_sort_order'.
+ Investigate whether we may use an ordered index as part of either
+ DISTINCT, GROUP BY or ORDER BY execution. An ordered index may be
+ used for only the first of any of these terms to be executed. This
+ is reflected in the order which we check for test_if_skip_sort_order()
+ below. However we do not check for DISTINCT here, as it would have
+ been transformed to a GROUP BY at this stage if it is a candidate for
+ ordered index optimization.
+ If a decision was made to use an ordered index, the availability
+ of such an access path is stored in 'ordered_index_usage' for later
+ use by 'execute' or 'explain'
*/
- const ha_rows limit = need_tmp ? HA_POS_ERROR : unit->select_limit_cnt;
+ DBUG_ASSERT(ordered_index_usage == ordered_index_void);
- if (!(select_options & SELECT_BIG_RESULT) &&
- ((group_list &&
- (!simple_group ||
- !test_if_skip_sort_order(&join_tab[const_tables], group_list,
- limit, 0,
- &join_tab[const_tables].table->
- keys_in_use_for_group_by))) ||
- select_distinct) &&
- tmp_table_param.quick_group && !procedure)
- {
- need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort
- }
- if (order)
+ if (group_list) // GROUP BY honoured first
+ // (DISTINCT was rewritten to GROUP BY if skippable)
{
/*
- Do we need a temporary table due to the ORDER BY not being equal to
- the GROUP BY? The call to test_if_skip_sort_order above tests for the
- GROUP BY clause only and hence is not valid in this case. So the
- estimated number of rows to be read from the first table is not valid.
- We clear it here so that it doesn't show up in EXPLAIN.
- */
- if (need_tmp && (select_options & SELECT_DESCRIBE) != 0)
- join_tab[const_tables].limit= 0;
- /*
- Force using of tmp table if sorting by a SP or UDF function due to
- their expensive and probably non-deterministic nature.
+ When there is SQL_BIG_RESULT do not sort using index for GROUP BY,
+ and thus force sorting on disk unless a group min-max optimization
+ is going to be used as it is applied now only for one table queries
+ with covering indexes.
*/
- for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
- {
- Item *item= *tmp_order->item;
- if (item->is_expensive())
+ if (!(select_options & SELECT_BIG_RESULT) ||
+ (tab->select &&
+ tab->select->quick &&
+ tab->select->quick->get_type() ==
+ QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX))
+ {
+ if (simple_group && // GROUP BY is possibly skippable
+ !select_distinct) // .. if not preceded by a DISTINCT
{
- /* Force tmp table without sort */
- need_tmp=1; simple_order=simple_group=0;
- break;
+ /*
+ Calculate a possible 'limit' of table rows for 'GROUP BY':
+ A specified 'LIMIT' is relative to the final resultset.
+ 'need_tmp' implies that there will be more postprocessing
+ so the specified 'limit' should not be enforced yet.
+ */
+ const ha_rows limit = need_tmp ? HA_POS_ERROR : select_limit;
+ if (test_if_skip_sort_order(tab, group_list, limit, false,
+ &tab->table->keys_in_use_for_group_by))
+ {
+ ordered_index_usage= ordered_index_group_by;
+ }
+ }
+
+ /*
+ If we are going to use semi-join LooseScan, it will depend
+ on the selected index scan to be used. If index is not used
+ for the GROUP BY, we risk that sorting is put on the LooseScan
+ table. In order to avoid this, force use of temporary table.
+ TODO: Explain the quick_group part of the test below.
+ */
+ if ((ordered_index_usage != ordered_index_group_by) &&
+ ((tmp_table_param.quick_group && !procedure) ||
+ (tab->emb_sj_nest &&
+ best_positions[const_tables].sj_strategy == SJ_OPT_LOOSE_SCAN)))
+ {
+ need_tmp=1;
+ simple_order= simple_group= false; // Force tmp table without sort
}
}
}
- }
+ else if (order && // ORDER BY wo/ preceding GROUP BY
+ (simple_order || skip_sort_order)) // which is possibly skippable
+ {
+ if (test_if_skip_sort_order(tab, order, select_limit, false,
+ &tab->table->keys_in_use_for_order_by))
+ {
+ ordered_index_usage= ordered_index_order_by;
+ }
+ }
+ }
+
+ if (having)
+ having_is_correlated= MY_TEST(having->used_tables() & OUTER_REF_TABLE_BIT);
+ tmp_having= having;
if ((select_lex->options & OPTION_SCHEMA_TABLE))
optimize_schema_tables_reads(this);
/*
+ The loose index scan access method guarantees that all grouping or
+ duplicate row elimination (for distinct) is already performed
+ during data retrieval, and that all MIN/MAX functions are already
+ computed for each group. Thus all MIN/MAX functions should be
+ treated as regular functions, and there is no need to perform
+ grouping in the main execution loop.
+ Notice that currently loose index scan is applicable only for
+ single table queries, thus it is sufficient to test only the first
+ join_tab element of the plan for its access method.
+ */
+ if (join_tab->is_using_loose_index_scan())
+ {
+ tmp_table_param.precomputed_group_by= TRUE;
+ if (join_tab->is_using_agg_loose_index_scan())
+ {
+ need_distinct= FALSE;
+ tmp_table_param.precomputed_group_by= FALSE;
+ }
+ }
+
+ if (make_aggr_tables_info())
+ DBUG_RETURN(1);
+
+ if (init_join_caches())
+ DBUG_RETURN(1);
+
+ error= 0;
+
+ if (select_options & SELECT_DESCRIBE)
+ goto derived_exit;
+
+ DBUG_RETURN(0);
+
+setup_subq_exit:
+ /* Choose an execution strategy for this JOIN. */
+ if (!tables_list || !table_count)
+ {
+ choose_tableless_subquery_plan();
+
+ /* The output has atmost one row */
+ if (group_list)
+ {
+ group_list= NULL;
+ group_optimized_away= 1;
+ rollup.state= ROLLUP::STATE_NONE;
+ }
+ order= NULL;
+ simple_order= TRUE;
+ select_distinct= FALSE;
+
+ if (select_lex->have_window_funcs())
+ {
+ if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
+ DBUG_RETURN(1);
+ need_tmp= 1;
+ }
+ if (make_aggr_tables_info())
+ DBUG_RETURN(1);
+ }
+ /*
+ Even with zero matching rows, subqueries in the HAVING clause may
+ need to be evaluated if there are aggregate functions in the query.
+ */
+ if (optimize_unflattened_subqueries())
+ DBUG_RETURN(1);
+ error= 0;
+
+derived_exit:
+
+ select_lex->mark_const_derived(zero_result_cause);
+ DBUG_RETURN(0);
+}
+
+/**
+ Add having condition as a where clause condition of the given temp table.
+
+ @param tab Table to which having condition is added.
+
+ @returns false if success, true if error.
+*/
+
+bool JOIN::add_having_as_table_cond(JOIN_TAB *tab)
+{
+ tmp_having->update_used_tables();
+ table_map used_tables= tab->table->map | OUTER_REF_TABLE_BIT;
+
+ /* If tmp table is not used then consider conditions of const table also */
+ if (!need_tmp)
+ used_tables|= const_table_map;
+
+ DBUG_ENTER("JOIN::add_having_as_table_cond");
+
+ Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables,
+ (table_map) 0, false,
+ false, false);
+ if (sort_table_cond)
+ {
+ if (!tab->select)
+ {
+ if (!(tab->select= new SQL_SELECT))
+ DBUG_RETURN(true);
+ tab->select->head= tab->table;
+ }
+ if (!tab->select->cond)
+ tab->select->cond= sort_table_cond;
+ else
+ {
+ if (!(tab->select->cond=
+ new (thd->mem_root) Item_cond_and(thd,
+ tab->select->cond,
+ sort_table_cond)))
+ DBUG_RETURN(true);
+ }
+ if (tab->pre_idx_push_select_cond)
+ {
+ if (sort_table_cond->type() == Item::COND_ITEM)
+ sort_table_cond= sort_table_cond->copy_andor_structure(thd);
+ if (!(tab->pre_idx_push_select_cond=
+ new (thd->mem_root) Item_cond_and(thd,
+ tab->pre_idx_push_select_cond,
+ sort_table_cond)))
+ DBUG_RETURN(true);
+ }
+ if (tab->select->cond && !tab->select->cond->fixed)
+ tab->select->cond->fix_fields(thd, 0);
+ if (tab->pre_idx_push_select_cond && !tab->pre_idx_push_select_cond->fixed)
+ tab->pre_idx_push_select_cond->fix_fields(thd, 0);
+ tab->select->pre_idx_push_select_cond= tab->pre_idx_push_select_cond;
+ tab->set_select_cond(tab->select->cond, __LINE__);
+ tab->select_cond->top_level_item();
+ DBUG_EXECUTE("where",print_where(tab->select->cond,
+ "select and having",
+ QT_ORDINARY););
+
+ having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0,
+ ~used_tables, false, false, false);
+ DBUG_EXECUTE("where",
+ print_where(having, "having after sort", QT_ORDINARY););
+ }
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Set info for aggregation tables
+
+ @details
+ This function finalizes execution plan by taking following actions:
+ .) aggregation temporary tables are created, but not instantiated
+ (this is done during execution).
+ JOIN_TABs for aggregation tables are set appropriately
+ (see JOIN::create_postjoin_aggr_table).
+ .) prepare fields lists (fields, all_fields, ref_pointer_array slices) for
+ each required stage of execution. These fields lists are set for
+ working tables' tabs and for the tab of last table in the join.
+ .) info for sorting/grouping/dups removal is prepared and saved in
+ appropriate tabs. Here is an example:
+
+ @returns
+ false - Ok
+ true - Error
+*/
+
+bool JOIN::make_aggr_tables_info()
+{
+ List<Item> *curr_all_fields= &all_fields;
+ List<Item> *curr_fields_list= &fields_list;
+ JOIN_TAB *curr_tab= join_tab + const_tables;
+ TABLE *exec_tmp_table= NULL;
+ bool distinct= false;
+ bool keep_row_order= false;
+ bool is_having_added_as_table_cond= false;
+ DBUG_ENTER("JOIN::make_aggr_tables_info");
+
+ const bool has_group_by= this->group;
+
+ sort_and_group_aggr_tab= NULL;
+
+ if (group_optimized_away)
+ implicit_grouping= true;
+
+ bool implicit_grouping_with_window_funcs= implicit_grouping &&
+ select_lex->have_window_funcs();
+ bool implicit_grouping_without_tables= implicit_grouping &&
+ !tables_list;
+
+ /*
+ Setup last table to provide fields and all_fields lists to the next
+ node in the plan.
+ */
+ if (join_tab && top_join_tab_count && tables_list)
+ {
+ join_tab[top_join_tab_count - 1].fields= &fields_list;
+ join_tab[top_join_tab_count - 1].all_fields= &all_fields;
+ }
+
+ /*
All optimization is done. Check if we can use the storage engines
group by handler to evaluate the group by
*/
-
- if ((tmp_table_param.sum_func_count || group_list) && !procedure)
+ if (tables_list && (tmp_table_param.sum_func_count || group_list) &&
+ !procedure)
{
/*
At the moment we only support push down for queries where
@@ -1964,24 +2431,39 @@ JOIN::optimize_inner()
Query query= {&all_fields, select_distinct, tables_list, conds,
group_list, order ? order : group_list, having};
group_by_handler *gbh= ht->create_group_by(thd, &query);
+
if (gbh)
{
pushdown_query= new (thd->mem_root) Pushdown_query(select_lex, gbh);
-
/*
We must store rows in the tmp table if we need to do an ORDER BY
or DISTINCT and the storage handler can't handle it.
*/
need_tmp= query.order_by || query.group_by || query.distinct;
- tmp_table_param.hidden_field_count= (all_fields.elements -
- fields_list.elements);
- if (!(exec_tmp_table1=
- create_tmp_table(thd, &tmp_table_param, all_fields, 0,
- query.distinct, 1,
- select_options, HA_POS_ERROR, "",
- !need_tmp, query.order_by || query.group_by)))
+ distinct= query.distinct;
+ keep_row_order= query.order_by || query.group_by;
+
+ order= query.order_by;
+
+ aggr_tables++;
+ curr_tab= join_tab + exec_join_tab_cnt();
+ bzero((void*)curr_tab, sizeof(JOIN_TAB));
+ curr_tab->ref.key= -1;
+ curr_tab->join= this;
+
+ curr_tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param);
+ TABLE* table= create_tmp_table(thd, curr_tab->tmp_table_param,
+ all_fields,
+ NULL, query.distinct,
+ TRUE, select_options, HA_POS_ERROR,
+ "", !need_tmp,
+ query.order_by || query.group_by);
+ if (!table)
DBUG_RETURN(1);
+ curr_tab->aggr= new (thd->mem_root) AGGR_OP(curr_tab);
+ curr_tab->aggr->set_write_func(::end_send);
+ curr_tab->table= table;
/*
Setup reference fields, used by summary functions and group by fields,
to point to the temporary table.
@@ -1990,17 +2472,18 @@ JOIN::optimize_inner()
set_items_ref_array(items1).
*/
init_items_ref_array();
- items1= items0 + all_fields.elements;
+ items1= ref_ptr_array_slice(2);
+ //items1= items0 + all_fields.elements;
if (change_to_use_tmp_fields(thd, items1,
tmp_fields_list1, tmp_all_fields1,
fields_list.elements, all_fields))
DBUG_RETURN(1);
/* Give storage engine access to temporary table */
- gbh->table= exec_tmp_table1;
-
+ gbh->table= table;
pushdown_query->store_data_in_temp_table= need_tmp;
pushdown_query->having= having;
+
/*
Group by and having is calculated by the group_by handler.
Reset the group by and having
@@ -2017,21 +2500,19 @@ JOIN::optimize_inner()
tmp_table_param.field_count+= tmp_table_param.sum_func_count;
tmp_table_param.sum_func_count= 0;
- /* Remember information about the original join */
- original_join_tab= join_tab;
- original_table_count= table_count;
+ fields= curr_fields_list;
- /* Set up one join tab to get sorting to work */
- const_tables= 0;
- table_count= 1;
- join_tab= (JOIN_TAB*) thd->calloc(sizeof(JOIN_TAB));
- join_tab[0].table= exec_tmp_table1;
+ //todo: new:
+ curr_tab->ref_array= &items1;
+ curr_tab->all_fields= &tmp_all_fields1;
+ curr_tab->fields= &tmp_fields_list1;
DBUG_RETURN(thd->is_fatal_error);
}
}
}
+
/*
The loose index scan access method guarantees that all grouping or
duplicate row elimination (for distinct) is already performed
@@ -2043,193 +2524,574 @@ JOIN::optimize_inner()
single table queries, thus it is sufficient to test only the first
join_tab element of the plan for its access method.
*/
- if (join_tab->is_using_loose_index_scan())
+ if (join_tab && top_join_tab_count && tables_list &&
+ join_tab->is_using_loose_index_scan())
+ tmp_table_param.precomputed_group_by=
+ !join_tab->is_using_agg_loose_index_scan();
+
+ group_list_for_estimates= group_list;
+ /* Create a tmp table if distinct or if the sort is too complicated */
+ if (need_tmp)
{
- tmp_table_param.precomputed_group_by= TRUE;
- if (join_tab->is_using_agg_loose_index_scan())
+ aggr_tables++;
+ curr_tab= join_tab + exec_join_tab_cnt();
+ bzero((void*)curr_tab, sizeof(JOIN_TAB));
+ curr_tab->ref.key= -1;
+ if (only_const_tables())
+ first_select= sub_select_postjoin_aggr;
+
+ /*
+ Create temporary table on first execution of this join.
+ (Will be reused if this is a subquery that is executed several times.)
+ */
+ init_items_ref_array();
+
+ ORDER *tmp_group= (ORDER *) 0;
+ if (!simple_group && !procedure && !(test_flags & TEST_NO_KEY_GROUP))
+ tmp_group= group_list;
+
+ tmp_table_param.hidden_field_count=
+ all_fields.elements - fields_list.elements;
+
+ distinct= select_distinct && !group_list &&
+ !select_lex->have_window_funcs();
+ keep_row_order= false;
+ bool save_sum_fields= (group_list && simple_group) ||
+ implicit_grouping_with_window_funcs;
+ if (create_postjoin_aggr_table(curr_tab,
+ &all_fields, tmp_group,
+ save_sum_fields,
+ distinct, keep_row_order))
+ DBUG_RETURN(true);
+ exec_tmp_table= curr_tab->table;
+
+ if (exec_tmp_table->distinct)
+ optimize_distinct();
+
+ /* Change sum_fields reference to calculated fields in tmp_table */
+ items1= ref_ptr_array_slice(2);
+ if ((sort_and_group || curr_tab->table->group ||
+ tmp_table_param.precomputed_group_by) &&
+ !implicit_grouping_without_tables)
+ {
+ if (change_to_use_tmp_fields(thd, items1,
+ tmp_fields_list1, tmp_all_fields1,
+ fields_list.elements, all_fields))
+ DBUG_RETURN(true);
+ }
+ else
{
- need_distinct= FALSE;
- tmp_table_param.precomputed_group_by= FALSE;
+ if (change_refs_to_tmp_fields(thd, items1,
+ tmp_fields_list1, tmp_all_fields1,
+ fields_list.elements, all_fields))
+ DBUG_RETURN(true);
}
- }
+ curr_all_fields= &tmp_all_fields1;
+ curr_fields_list= &tmp_fields_list1;
+ // Need to set them now for correct group_fields setup, reset at the end.
+ set_items_ref_array(items1);
+ curr_tab->ref_array= &items1;
+ curr_tab->all_fields= &tmp_all_fields1;
+ curr_tab->fields= &tmp_fields_list1;
+ set_postjoin_aggr_write_func(curr_tab);
- error= 0;
+ /*
+ If having is not handled here, it will be checked before the row is sent
+ to the client.
+ */
+ if (tmp_having &&
+ (sort_and_group || (exec_tmp_table->distinct && !group_list) ||
+ select_lex->have_window_funcs()))
+ {
+ /*
+ If there is no select distinct and there are no window functions
+ then move the having to table conds of tmp table.
+ NOTE : We cannot apply having after distinct or window functions
+ If columns of having are not part of select distinct,
+ then distinct may remove rows which can satisfy having.
+ In the case of window functions we *must* make sure to not
+ store any rows which don't match HAVING within the temp table,
+ as rows will end up being used during their computation.
+ */
+ if (!select_distinct && !select_lex->have_window_funcs() &&
+ add_having_as_table_cond(curr_tab))
+ DBUG_RETURN(true);
+ is_having_added_as_table_cond= tmp_having != having;
- tmp_having= having;
- if (select_options & SELECT_DESCRIBE)
- goto derived_exit;
- having= 0;
+ /*
+ Having condition which we are not able to add as tmp table conds are
+ kept as before. And, this will be applied before storing the rows in
+ tmp table.
+ */
+ curr_tab->having= having;
+ having= NULL; // Already done
+ }
- DBUG_RETURN(0);
+ tmp_table_param.func_count= 0;
+ tmp_table_param.field_count+= tmp_table_param.func_count;
+ if (sort_and_group || curr_tab->table->group)
+ {
+ tmp_table_param.field_count+= tmp_table_param.sum_func_count;
+ tmp_table_param.sum_func_count= 0;
+ }
-setup_subq_exit:
- /* Choose an execution strategy for this JOIN. */
- if (!tables_list || !table_count)
- choose_tableless_subquery_plan();
- /*
- Even with zero matching rows, subqueries in the HAVING clause may
- need to be evaluated if there are aggregate functions in the query.
- */
- if (optimize_unflattened_subqueries())
- DBUG_RETURN(1);
- error= 0;
+ if (exec_tmp_table->group)
+ { // Already grouped
+ if (!order && !no_order && !skip_sort_order)
+ order= group_list; /* order by group */
+ group_list= NULL;
+ }
-derived_exit:
+ /*
+ If we have different sort & group then we must sort the data by group
+ and copy it to another tmp table
+ This code is also used if we are using distinct something
+ we haven't been able to store in the temporary table yet
+ like SEC_TO_TIME(SUM(...)).
+ */
+ if ((group_list &&
+ (!test_if_subpart(group_list, order) || select_distinct)) ||
+ (select_distinct && tmp_table_param.using_outer_summary_function))
+ { /* Must copy to another table */
+ DBUG_PRINT("info",("Creating group table"));
+
+ calc_group_buffer(this, group_list);
+ count_field_types(select_lex, &tmp_table_param, tmp_all_fields1,
+ select_distinct && !group_list);
+ tmp_table_param.hidden_field_count=
+ tmp_all_fields1.elements - tmp_fields_list1.elements;
+
+ curr_tab++;
+ aggr_tables++;
+ bzero((void*)curr_tab, sizeof(JOIN_TAB));
+ curr_tab->ref.key= -1;
- select_lex->mark_const_derived(zero_result_cause);
- DBUG_RETURN(0);
-}
+ /* group data to new table */
+ /*
+ If the access method is loose index scan then all MIN/MAX
+ functions are precomputed, and should be treated as regular
+ functions. See extended comment above.
+ */
+ if (join_tab->is_using_loose_index_scan())
+ tmp_table_param.precomputed_group_by= TRUE;
+ tmp_table_param.hidden_field_count=
+ curr_all_fields->elements - curr_fields_list->elements;
+ ORDER *dummy= NULL; //TODO can use table->group here also
-/**
- Create and initialize objects neeed for the execution of a query plan.
- Evaluate constant expressions not evaluated during optimization.
-*/
+ if (create_postjoin_aggr_table(curr_tab,
+ curr_all_fields, dummy, true,
+ distinct, keep_row_order))
+ DBUG_RETURN(true);
-int JOIN::init_execution()
-{
- DBUG_ENTER("JOIN::init_execution");
+ if (group_list)
+ {
+ if (!only_const_tables()) // No need to sort a single row
+ {
+ if (add_sorting_to_table(curr_tab - 1, group_list))
+ DBUG_RETURN(true);
+ }
- DBUG_ASSERT(optimization_state == JOIN::OPTIMIZATION_DONE);
- DBUG_ASSERT(!(select_options & SELECT_DESCRIBE));
- initialized= true;
+ if (make_group_fields(this, this))
+ DBUG_RETURN(true);
+ }
- /*
- Enable LIMIT ROWS EXAMINED during query execution if:
- (1) This JOIN is the outermost query (not a subquery or derived table)
- This ensures that the limit is enabled when actual execution begins,
- and not if a subquery is evaluated during optimization of the outer
- query.
- (2) This JOIN is not the result of a UNION. In this case do not apply the
- limit in order to produce the partial query result stored in the
- UNION temp table.
- */
- if (!select_lex->outer_select() && // (1)
- select_lex != select_lex->master_unit()->fake_select_lex) // (2)
- thd->lex->set_limit_rows_examined();
+ // Setup sum funcs only when necessary, otherwise we might break info
+ // for the first table
+ if (group_list || tmp_table_param.sum_func_count)
+ {
+ if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true))
+ DBUG_RETURN(true);
+ if (prepare_sum_aggregators(sum_funcs,
+ !join_tab->is_using_agg_loose_index_scan()))
+ DBUG_RETURN(true);
+ group_list= NULL;
+ if (setup_sum_funcs(thd, sum_funcs))
+ DBUG_RETURN(true);
+ }
+ // No sum funcs anymore
+ DBUG_ASSERT(items2.is_null());
- /* Create a tmp table if distinct or if the sort is too complicated */
- if (need_tmp && !exec_tmp_table1)
- {
- DBUG_PRINT("info",("Creating tmp table"));
- THD_STAGE_INFO(thd, stage_creating_tmp_table);
+ items2= ref_ptr_array_slice(3);
+ if (change_to_use_tmp_fields(thd, items2,
+ tmp_fields_list2, tmp_all_fields2,
+ fields_list.elements, tmp_all_fields1))
+ DBUG_RETURN(true);
- init_items_ref_array();
+ curr_fields_list= &tmp_fields_list2;
+ curr_all_fields= &tmp_all_fields2;
+ set_items_ref_array(items2);
+ curr_tab->ref_array= &items2;
+ curr_tab->all_fields= &tmp_all_fields2;
+ curr_tab->fields= &tmp_fields_list2;
+ set_postjoin_aggr_write_func(curr_tab);
- tmp_table_param.hidden_field_count= (all_fields.elements -
- fields_list.elements);
- ORDER *tmp_group= ((!simple_group && !procedure &&
- !(test_flags & TEST_NO_KEY_GROUP)) ? group_list :
- (ORDER*) 0);
- /*
- Pushing LIMIT to the temporary table creation is not applicable
- when there is ORDER BY or GROUP BY or there is no GROUP BY, but
- there are aggregate functions, because in all these cases we need
- all result rows.
- */
- ha_rows tmp_rows_limit= ((order == 0 || skip_sort_order) &&
- !tmp_group &&
- !thd->lex->current_select->with_sum_func) ?
- select_limit : HA_POS_ERROR;
-
- if (!(exec_tmp_table1=
- create_tmp_table(thd, &tmp_table_param, all_fields,
- tmp_group, group_list ? 0 : select_distinct,
- group_list && simple_group,
- select_options, tmp_rows_limit, "")))
- DBUG_RETURN(1);
- explain->ops_tracker.report_tmp_table(exec_tmp_table1);
- /*
- We don't have to store rows in temp table that doesn't match HAVING if:
- - we are sorting the table and writing complete group rows to the
- temp table.
- - We are using DISTINCT without resolving the distinct as a GROUP BY
- on all columns.
-
- If having is not handled here, it will be checked before the row
- is sent to the client.
- */
- if (tmp_having &&
- (sort_and_group || (exec_tmp_table1->distinct && !group_list)))
- having= tmp_having;
-
- /* if group or order on first table, sort first */
- if (group_list && simple_group)
- {
- DBUG_PRINT("info",("Sorting for group"));
- THD_STAGE_INFO(thd, stage_sorting_for_group);
- if (create_sort_index(thd, this, group_list,
- HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
- alloc_group_fields(this, group_list) ||
- make_sum_func_list(all_fields, fields_list, 1) ||
- prepare_sum_aggregators(sum_funcs, need_distinct) ||
- setup_sum_funcs(thd, sum_funcs))
+ tmp_table_param.field_count+= tmp_table_param.sum_func_count;
+ tmp_table_param.sum_func_count= 0;
+ }
+ if (curr_tab->table->distinct)
+ select_distinct= false; /* Each row is unique */
+
+ if (select_distinct && !group_list)
+ {
+ if (having)
{
- DBUG_RETURN(1);
+ curr_tab->having= having;
+ having->update_used_tables();
}
- group_list=0;
+ /*
+ We only need DISTINCT operation if the join is not degenerate.
+ If it is, we must not request DISTINCT processing, because
+ remove_duplicates() assumes there is a preceding computation step (and
+ in the degenerate join, there's none)
+ */
+ if (top_join_tab_count && tables_list)
+ curr_tab->distinct= true;
+
+ having= NULL;
+ select_distinct= false;
+ }
+ /* Clean tmp_table_param for the next tmp table. */
+ tmp_table_param.field_count= tmp_table_param.sum_func_count=
+ tmp_table_param.func_count= 0;
+
+ tmp_table_param.copy_field= tmp_table_param.copy_field_end=0;
+ first_record= sort_and_group=0;
+
+ if (!group_optimized_away || implicit_grouping_with_window_funcs)
+ {
+ group= false;
}
else
{
- if (make_sum_func_list(all_fields, fields_list, 0) ||
- prepare_sum_aggregators(sum_funcs, need_distinct) ||
- setup_sum_funcs(thd, sum_funcs))
- {
- DBUG_RETURN(1);
- }
+ /*
+ If grouping has been optimized away, a temporary table is
+ normally not needed unless we're explicitly requested to create
+ one (e.g. due to a SQL_BUFFER_RESULT hint or INSERT ... SELECT).
+
+ In this case (grouping was optimized away), temp_table was
+ created without a grouping expression and JOIN::exec() will not
+ perform the necessary grouping (by the use of end_send_group()
+ or end_write_group()) if JOIN::group is set to false.
+ */
+ // the temporary table was explicitly requested
+ DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT));
+ // the temporary table does not have a grouping expression
+ DBUG_ASSERT(!curr_tab->table->group);
+ }
+ calc_group_buffer(this, group_list);
+ count_field_types(select_lex, &tmp_table_param, *curr_all_fields, false);
+ }
+
+ if (group ||
+ (implicit_grouping && !implicit_grouping_with_window_funcs) ||
+ tmp_table_param.sum_func_count)
+ {
+ if (make_group_fields(this, this))
+ DBUG_RETURN(true);
+
+ DBUG_ASSERT(items3.is_null());
- if (!group_list && ! exec_tmp_table1->distinct && order && simple_order)
+ if (items0.is_null())
+ init_items_ref_array();
+ items3= ref_ptr_array_slice(4);
+ setup_copy_fields(thd, &tmp_table_param,
+ items3, tmp_fields_list3, tmp_all_fields3,
+ curr_fields_list->elements, *curr_all_fields);
+
+ curr_fields_list= &tmp_fields_list3;
+ curr_all_fields= &tmp_all_fields3;
+ set_items_ref_array(items3);
+ if (join_tab)
+ {
+ JOIN_TAB *last_tab= join_tab + top_join_tab_count + aggr_tables - 1;
+ // Set grouped fields on the last table
+ last_tab->ref_array= &items3;
+ last_tab->all_fields= &tmp_all_fields3;
+ last_tab->fields= &tmp_fields_list3;
+ }
+ if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true))
+ DBUG_RETURN(true);
+ if (prepare_sum_aggregators(sum_funcs,
+ !join_tab ||
+ !join_tab-> is_using_agg_loose_index_scan()))
+ DBUG_RETURN(true);
+ if (setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error)
+ DBUG_RETURN(true);
+ }
+ if (group_list || order)
+ {
+ DBUG_PRINT("info",("Sorting for send_result_set_metadata"));
+ THD_STAGE_INFO(thd, stage_sorting_result);
+ /* If we have already done the group, add HAVING to sorted table */
+ if (tmp_having && !is_having_added_as_table_cond &&
+ !group_list && !sort_and_group)
+ {
+ if (add_having_as_table_cond(curr_tab))
+ DBUG_RETURN(true);
+ }
+
+ if (group)
+ select_limit= HA_POS_ERROR;
+ else if (!need_tmp)
+ {
+ /*
+ We can abort sorting after thd->select_limit rows if there are no
+ filter conditions for any tables after the sorted one.
+ Filter conditions come in several forms:
+ 1. as a condition item attached to the join_tab, or
+ 2. as a keyuse attached to the join_tab (ref access).
+ */
+ for (uint i= const_tables + 1; i < top_join_tab_count; i++)
{
- DBUG_PRINT("info",("Sorting for order"));
- THD_STAGE_INFO(thd, stage_sorting_for_order);
- if (create_sort_index(thd, this, order,
- HA_POS_ERROR, HA_POS_ERROR, TRUE))
+ JOIN_TAB *const tab= join_tab + i;
+ if (tab->select_cond || // 1
+ (tab->keyuse && !tab->first_inner)) // 2
{
- DBUG_RETURN(1);
+ /* We have to sort all rows */
+ select_limit= HA_POS_ERROR;
+ break;
}
- order=0;
}
}
-
/*
- Optimize distinct when used on some of the tables
- SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b
- In this case we can stop scanning t2 when we have found one t1.a
+ Here we add sorting stage for ORDER BY/GROUP BY clause, if the
+ optimiser chose FILESORT to be faster than INDEX SCAN or there is
+ no suitable index present.
+ OPTION_FOUND_ROWS supersedes LIMIT and is taken into account.
*/
+ DBUG_PRINT("info",("Sorting for order by/group by"));
+ ORDER *order_arg= group_list ? group_list : order;
+ if (top_join_tab_count + aggr_tables > const_tables &&
+ ordered_index_usage !=
+ (group_list ? ordered_index_group_by : ordered_index_order_by) &&
+ curr_tab->type != JT_CONST &&
+ curr_tab->type != JT_EQ_REF) // Don't sort 1 row
+ {
+ // Sort either first non-const table or the last tmp table
+ JOIN_TAB *sort_tab= curr_tab;
+
+ if (add_sorting_to_table(sort_tab, order_arg))
+ DBUG_RETURN(true);
+ /*
+ filesort_limit: Return only this many rows from filesort().
+ We can use select_limit_cnt only if we have no group_by and 1 table.
+ This allows us to use Bounded_queue for queries like:
+ "select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;"
+ m_select_limit == HA_POS_ERROR (we need a full table scan)
+ unit->select_limit_cnt == 1 (we only need one row in the result set)
+ */
+ sort_tab->filesort->limit=
+ (has_group_by || (join_tab + table_count > curr_tab + 1)) ?
+ select_limit : unit->select_limit_cnt;
+ }
+ if (!only_const_tables() &&
+ !join_tab[const_tables].filesort &&
+ !(select_options & SELECT_DESCRIBE))
+ {
+ /*
+ If no IO cache exists for the first table then we are using an
+ INDEX SCAN and no filesort. Thus we should not remove the sorted
+ attribute on the INDEX SCAN.
+ */
+ skip_sort_order= true;
+ }
+ }
+
+ /*
+ Window functions computation step should be attached to the last join_tab
+ that's doing aggregation.
+ The last join_tab reads the data from the temp. table. It also may do
+ - sorting
+ - duplicate value removal
+ Both of these operations are done after window function computation step.
+ */
+ curr_tab= join_tab + total_join_tab_cnt();
+ if (select_lex->window_funcs.elements)
+ {
+ curr_tab->window_funcs_step= new Window_funcs_computation;
+ if (curr_tab->window_funcs_step->setup(thd, &select_lex->window_funcs,
+ curr_tab))
+ DBUG_RETURN(true);
+ /* Count that we're using window functions. */
+ status_var_increment(thd->status_var.feature_window_functions);
+ }
+
+ fields= curr_fields_list;
+ // Reset before execution
+ set_items_ref_array(items0);
+ if (join_tab)
+ join_tab[exec_join_tab_cnt() + aggr_tables - 1].next_select=
+ setup_end_select_func(this, NULL);
+ group= has_group_by;
+
+ DBUG_RETURN(false);
+}
+
+
- if (exec_tmp_table1->distinct)
+bool
+JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields,
+ ORDER *table_group,
+ bool save_sum_fields,
+ bool distinct,
+ bool keep_row_order)
+{
+ DBUG_ENTER("JOIN::create_postjoin_aggr_table");
+ THD_STAGE_INFO(thd, stage_creating_tmp_table);
+
+ /*
+ Pushing LIMIT to the post-join temporary table creation is not applicable
+ when there is ORDER BY or GROUP BY or there is no GROUP BY, but
+ there are aggregate functions, because in all these cases we need
+ all result rows.
+ */
+ ha_rows table_rows_limit= ((order == NULL || skip_sort_order) &&
+ !table_group &&
+ !select_lex->with_sum_func) ?
+ select_limit : HA_POS_ERROR;
+
+ tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param);
+ tab->tmp_table_param->skip_create_table= true;
+ TABLE* table= create_tmp_table(thd, tab->tmp_table_param, *table_fields,
+ table_group, distinct,
+ save_sum_fields, select_options, table_rows_limit,
+ "", true, keep_row_order);
+ if (!table)
+ DBUG_RETURN(true);
+ tmp_table_param.using_outer_summary_function=
+ tab->tmp_table_param->using_outer_summary_function;
+ tab->join= this;
+ DBUG_ASSERT(tab > tab->join->join_tab || !top_join_tab_count || !tables_list);
+ if (tab > join_tab)
+ (tab - 1)->next_select= sub_select_postjoin_aggr;
+ tab->aggr= new (thd->mem_root) AGGR_OP(tab);
+ if (!tab->aggr)
+ goto err;
+ tab->table= table;
+ table->reginfo.join_tab= tab;
+
+ /* if group or order on first table, sort first */
+ if ((group_list && simple_group) ||
+ (implicit_grouping && select_lex->have_window_funcs()))
+ {
+ DBUG_PRINT("info",("Sorting for group"));
+ THD_STAGE_INFO(thd, stage_sorting_for_group);
+
+ if (ordered_index_usage != ordered_index_group_by &&
+ !only_const_tables() &&
+ (join_tab + const_tables)->type != JT_CONST && // Don't sort 1 row
+ !implicit_grouping &&
+ add_sorting_to_table(join_tab + const_tables, group_list))
+ goto err;
+
+ if (alloc_group_fields(this, group_list))
+ goto err;
+ if (make_sum_func_list(all_fields, fields_list, true))
+ goto err;
+ if (prepare_sum_aggregators(sum_funcs,
+ !(tables_list &&
+ join_tab->is_using_agg_loose_index_scan())))
+ goto err;
+ if (setup_sum_funcs(thd, sum_funcs))
+ goto err;
+ group_list= NULL;
+ }
+ else
+ {
+ if (make_sum_func_list(all_fields, fields_list, false))
+ goto err;
+ if (prepare_sum_aggregators(sum_funcs,
+ !join_tab->is_using_agg_loose_index_scan()))
+ goto err;
+ if (setup_sum_funcs(thd, sum_funcs))
+ goto err;
+
+ if (!group_list && !table->distinct && order && simple_order &&
+ tab == join_tab + const_tables)
{
- table_map used_tables= select_list_used_tables;
- JOIN_TAB *last_join_tab= join_tab + top_join_tab_count - 1;
- do
- {
- if (used_tables & last_join_tab->table->map ||
- last_join_tab->use_join_cache)
- break;
- last_join_tab->shortcut_for_distinct= true;
- } while (last_join_tab-- != join_tab);
- /* Optimize "select distinct b from t1 order by key_part_1 limit #" */
- if (order && skip_sort_order)
- {
- /* Should always succeed */
- if (test_if_skip_sort_order(&join_tab[const_tables],
- order, unit->select_limit_cnt, 0,
- &join_tab[const_tables].table->
- keys_in_use_for_order_by))
- order=0;
- join_tab[const_tables].update_explain_data(const_tables);
- }
+ DBUG_PRINT("info",("Sorting for order"));
+ THD_STAGE_INFO(thd, stage_sorting_for_order);
+
+ if (ordered_index_usage != ordered_index_order_by &&
+ !only_const_tables() &&
+ add_sorting_to_table(join_tab + const_tables, order))
+ goto err;
+ order= NULL;
}
+ }
+
+ DBUG_RETURN(false);
+
+err:
+ if (table != NULL)
+ free_tmp_table(thd, table);
+ DBUG_RETURN(true);
+}
+
- /* If this join belongs to an uncacheable query save the original join */
- if (select_lex->uncacheable && init_save_join_tab())
- DBUG_RETURN(-1); /* purecov: inspected */
+void
+JOIN::optimize_distinct()
+{
+ for (JOIN_TAB *last_join_tab= join_tab + top_join_tab_count - 1; ;)
+ {
+ if (select_lex->select_list_tables & last_join_tab->table->map ||
+ last_join_tab->use_join_cache)
+ break;
+ last_join_tab->shortcut_for_distinct= true;
+ if (last_join_tab == join_tab)
+ break;
+ --last_join_tab;
}
- DBUG_RETURN(0);
+ /* Optimize "select distinct b from t1 order by key_part_1 limit #" */
+ if (order && skip_sort_order)
+ {
+ /* Should already have been optimized away */
+ DBUG_ASSERT(ordered_index_usage == ordered_index_order_by);
+ if (ordered_index_usage == ordered_index_order_by)
+ {
+ order= NULL;
+ }
+ }
+}
+
+
+/**
+ @brief Add Filesort object to the given table to sort if with filesort
+
+ @param tab the JOIN_TAB object to attach created Filesort object to
+ @param order List of expressions to sort the table by
+
+ @note This function moves tab->select, if any, to filesort->select
+
+ @return false on success, true on OOM
+*/
+
+bool
+JOIN::add_sorting_to_table(JOIN_TAB *tab, ORDER *order)
+{
+ tab->filesort=
+ new (thd->mem_root) Filesort(order, HA_POS_ERROR, tab->keep_current_rowid,
+ tab->select);
+ if (!tab->filesort)
+ return true;
+ /*
+ Select was moved to filesort->select to force join_init_read_record to use
+ sorted result instead of reading table through select.
+ */
+ if (tab->select)
+ {
+ tab->select= NULL;
+ tab->set_select_cond(NULL, __LINE__);
+ }
+ tab->read_first_record= join_init_read_record;
+ return false;
}
+
+
/**
Setup expression caches for subqueries that need them
@@ -2322,17 +3184,6 @@ bool JOIN::setup_subquery_caches()
}
-/**
- Restore values in temporary join.
-*/
-void JOIN::restore_tmp()
-{
- DBUG_PRINT("info", ("restore_tmp this %p tmp_join %p", this, tmp_join));
- DBUG_ASSERT(tmp_join != this);
- memcpy((void*)tmp_join, this, (size_t) sizeof(JOIN));
-}
-
-
/*
Shrink join buffers used for preceding tables to reduce the occupied space
@@ -2400,29 +3251,29 @@ JOIN::reinit()
unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ?
select_lex->offset_limit->val_uint() : 0);
- first_record= 0;
+ first_record= false;
+ group_sent= false;
cleaned= false;
- if (exec_tmp_table1)
+ if (aggr_tables)
{
- exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
- exec_tmp_table1->file->ha_delete_all_rows();
- free_io_cache(exec_tmp_table1);
- filesort_free_buffers(exec_tmp_table1,0);
- }
- if (exec_tmp_table2)
- {
- exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
- exec_tmp_table2->file->ha_delete_all_rows();
- free_io_cache(exec_tmp_table2);
- filesort_free_buffers(exec_tmp_table2,0);
+ JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
+ JOIN_TAB *end_tab= curr_tab + aggr_tables;
+ for ( ; curr_tab < end_tab; curr_tab++)
+ {
+ TABLE *tmp_table= curr_tab->table;
+ if (!tmp_table->is_created())
+ continue;
+ tmp_table->file->extra(HA_EXTRA_RESET_STATE);
+ tmp_table->file->ha_delete_all_rows();
+ }
}
clear_sj_tmp_tables(this);
- if (items0)
+ if (current_ref_ptrs != items0)
+ {
set_items_ref_array(items0);
-
- if (join_tab_save)
- memcpy(join_tab, join_tab_save, sizeof(JOIN_TAB) * table_count);
+ set_group_rpa= false;
+ }
/* need to reset ref access state (see join_read_key) */
if (join_tab)
@@ -2435,9 +3286,6 @@ JOIN::reinit()
}
}
- if (tmp_join)
- restore_tmp();
-
/* Reset of sum functions */
if (sum_funcs)
{
@@ -2462,38 +3310,40 @@ JOIN::reinit()
DBUG_RETURN(0);
}
+
/**
- @brief Save the original join layout
-
- @details Saves the original join layout so it can be reused in
- re-execution and for EXPLAIN.
-
- @return Operation status
- @retval 0 success.
- @retval 1 error occurred.
+ Prepare join result.
+
+ @details Prepare join result prior to join execution or describing.
+ Instantiate derived tables and get schema tables result if necessary.
+
+ @return
+ TRUE An error during derived or schema tables instantiation.
+ FALSE Ok
*/
-bool
-JOIN::init_save_join_tab()
+bool JOIN::prepare_result(List<Item> **columns_list)
{
- if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN))))
- return 1; /* purecov: inspected */
- error= 0; // Ensure that tmp_join.error= 0
- restore_tmp();
- return 0;
-}
+ DBUG_ENTER("JOIN::prepare_result");
+ error= 0;
+ /* Create result tables for materialized views. */
+ if (!zero_result_cause &&
+ select_lex->handle_derived(thd->lex, DT_CREATE))
+ goto err;
-bool
-JOIN::save_join_tab()
-{
- if (!join_tab_save && select_lex->master_unit()->uncacheable)
- {
- if (!(join_tab_save= (JOIN_TAB*)thd->memdup((uchar*) join_tab,
- sizeof(JOIN_TAB) * table_count)))
- return 1;
- }
- return 0;
+ if (result->prepare2())
+ goto err;
+
+ if ((select_lex->options & OPTION_SCHEMA_TABLE) &&
+ get_schema_tables_result(this, PROCESSED_BY_JOIN_EXEC))
+ goto err;
+
+ DBUG_RETURN(FALSE);
+
+err:
+ error= 1;
+ DBUG_RETURN(TRUE);
}
@@ -2502,7 +3352,7 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
bool distinct)
{
/*
- If there is SELECT in this statemet with the same number it must be the
+ If there is SELECT in this statement with the same number it must be the
same SELECT
*/
DBUG_ASSERT(select_lex->select_number == UINT_MAX ||
@@ -2545,6 +3395,14 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
Explain_union *eu= output->get_union(nr);
explain= &eu->fake_select_lex_explain;
join_tab[0].tracker= eu->get_fake_select_lex_tracker();
+ for (uint i=0 ; i < exec_join_tab_cnt() + aggr_tables; i++)
+ {
+ if (join_tab[i].filesort)
+ {
+ join_tab[i].filesort->tracker=
+ new Filesort_tracker(thd->lex->analyze_stmt);
+ }
+ }
}
}
@@ -2558,7 +3416,6 @@ void JOIN::exec()
dbug_serve_apcs(thd, 1);
);
ANALYZE_START_TRACKING(&explain->time_tracker);
- explain->ops_tracker.report_join_start();
exec_inner();
ANALYZE_STOP_TRACKING(&explain->time_tracker);
@@ -2571,29 +3428,27 @@ void JOIN::exec()
}
-/**
- Exec select.
-
- @todo
- Note, that create_sort_index calls test_if_skip_sort_order and may
- finally replace sorting with index scan if there is a LIMIT clause in
- the query. It's never shown in EXPLAIN!
-
- @todo
- When can we have here thd->net.report_error not zero?
-*/
-
void JOIN::exec_inner()
{
List<Item> *columns_list= &fields_list;
- int tmp_error;
+ DBUG_ENTER("JOIN::exec_inner");
+ DBUG_ASSERT(optimization_state == JOIN::OPTIMIZATION_DONE);
- DBUG_ENTER("JOIN::exec");
+ THD_STAGE_INFO(thd, stage_executing);
- const bool has_group_by= this->group;
+ /*
+ Enable LIMIT ROWS EXAMINED during query execution if:
+ (1) This JOIN is the outermost query (not a subquery or derived table)
+ This ensures that the limit is enabled when actual execution begins, and
+ not if a subquery is evaluated during optimization of the outer query.
+ (2) This JOIN is not the result of a UNION. In this case do not apply the
+ limit in order to produce the partial query result stored in the
+ UNION temp table.
+ */
+ if (!select_lex->outer_select() && // (1)
+ select_lex != select_lex->master_unit()->fake_select_lex) // (2)
+ thd->lex->set_limit_rows_examined();
- THD_STAGE_INFO(thd, stage_executing);
- error= 0;
if (procedure)
{
procedure_fields_list= fields_list;
@@ -2609,18 +3464,22 @@ void JOIN::exec_inner()
if (result->prepare2())
DBUG_VOID_RETURN;
- if (!tables_list && (table_count || !select_lex->with_sum_func))
+ if (!tables_list && (table_count || !select_lex->with_sum_func) &&
+ !select_lex->have_window_funcs())
{ // Only test of functions
if (select_options & SELECT_DESCRIBE)
select_describe(this, FALSE, FALSE, FALSE,
(zero_result_cause?zero_result_cause:"No tables used"));
+
else
{
if (result->send_result_set_metadata(*columns_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF))
{
DBUG_VOID_RETURN;
}
+
/*
We have to test for 'conds' here as the WHERE may not be constant
even if we don't have any tables for prepared statements or if
@@ -2664,7 +3523,8 @@ void JOIN::exec_inner()
condtions may be arbitrarily costly, and because the optimize phase
might not have produced a complete executable plan for EXPLAINs.
*/
- if (exec_const_cond && !(select_options & SELECT_DESCRIBE) &&
+ if (!zero_result_cause &&
+ exec_const_cond && !(select_options & SELECT_DESCRIBE) &&
!exec_const_cond->val_int())
zero_result_cause= "Impossible WHERE noticed after reading const tables";
@@ -2679,15 +3539,29 @@ void JOIN::exec_inner()
if (zero_result_cause)
{
- (void) return_zero_rows(this, result, select_lex->leaf_tables,
- *columns_list,
- send_row_on_empty_set(),
- select_options,
- zero_result_cause,
- having ? having : tmp_having, all_fields);
- DBUG_VOID_RETURN;
- }
+ if (select_lex->have_window_funcs() && send_row_on_empty_set())
+ {
+ /*
+ The query produces just one row but it has window functions.
+ The only way to compute the value of window function(s) is to
+ run the entire window function computation step (there is no shortcut).
+ */
+ const_tables= table_count;
+ first_select= sub_select_postjoin_aggr;
+ }
+ else
+ {
+ (void) return_zero_rows(this, result, select_lex->leaf_tables,
+ *columns_list,
+ send_row_on_empty_set(),
+ select_options,
+ zero_result_cause,
+ having ? having : tmp_having, all_fields);
+ DBUG_VOID_RETURN;
+ }
+ }
+
/*
Evaluate all constant expressions with subqueries in the
ORDER/GROUP clauses to make sure that all subqueries return a
@@ -2716,42 +3590,6 @@ void JOIN::exec_inner()
if (select_options & SELECT_DESCRIBE)
{
- /*
- Check if we managed to optimize ORDER BY away and don't use temporary
- table to resolve ORDER BY: in that case, we only may need to do
- filesort for GROUP BY.
- */
- if (!order && !no_order && (!skip_sort_order || !need_tmp))
- {
- /*
- Reset 'order' to 'group_list' and reinit variables describing
- 'order'
- */
- order= group_list;
- simple_order= simple_group;
- skip_sort_order= 0;
- }
- if (order && join_tab)
- {
- bool made_call= false;
- SQL_SELECT *tmp_select= join_tab[const_tables].select;
- if ((order != group_list ||
- !(select_options & SELECT_BIG_RESULT) ||
- (tmp_select && tmp_select->quick &&
- tmp_select->quick->get_type() ==
- QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
- (const_tables == table_count ||
- ((simple_order || skip_sort_order) &&
- (made_call=true) &&
- test_if_skip_sort_order(&join_tab[const_tables], order,
- select_limit, 0,
- &join_tab[const_tables].table->
- keys_in_use_for_query))))
- order=0;
- if (made_call)
- join_tab[const_tables].update_explain_data(const_tables);
- }
- having= tmp_having;
select_describe(this, need_tmp,
order != 0 && !skip_sort_order,
select_distinct,
@@ -2764,540 +3602,31 @@ void JOIN::exec_inner()
select_lex->mark_const_derived(zero_result_cause);
}
- if (!initialized && init_execution())
- DBUG_VOID_RETURN;
-
- JOIN *curr_join= this;
- List<Item> *curr_all_fields= &all_fields;
- List<Item> *curr_fields_list= &fields_list;
- TABLE *curr_tmp_table= 0;
- /*
- curr_join->join_free() will call JOIN::cleanup(full=TRUE). It will not
- be safe to call update_used_tables() after that.
- */
- if (curr_join->tmp_having)
- curr_join->tmp_having->update_used_tables();
-
/*
Initialize examined rows here because the values from all join parts
must be accumulated in examined_row_count. Hence every join
iteration must count from zero.
*/
- curr_join->join_examined_rows= 0;
-
- curr_join->do_select_call_count= 0;
-
- /* Create a tmp table if distinct or if the sort is too complicated */
- if (need_tmp)
- {
- if (tmp_join)
- {
- /*
- We are in a non cacheable sub query. Get the saved join structure
- after optimization.
- (curr_join may have been modified during last exection and we need
- to reset it)
- */
- curr_join= tmp_join;
- }
- curr_tmp_table= exec_tmp_table1;
-
- /* Copy data to the temporary table */
- THD_STAGE_INFO(thd, stage_copying_to_tmp_table);
- DBUG_PRINT("info", ("%s", thd->proc_info));
- if (!curr_join->sort_and_group &&
- curr_join->const_tables != curr_join->table_count)
- {
- JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables;
- first_tab->sorted= MY_TEST(first_tab->loosescan_match_tab);
- }
-
- Procedure *save_proc= curr_join->procedure;
- tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0);
- curr_join->procedure= save_proc;
- if (tmp_error)
- {
- error= tmp_error;
- DBUG_VOID_RETURN;
- }
- curr_tmp_table->file->info(HA_STATUS_VARIABLE);
-
- if (curr_join->having)
- curr_join->having= curr_join->tmp_having= 0; // Allready done
-
- /* Change sum_fields reference to calculated fields in tmp_table */
-#ifdef HAVE_valgrind
- if (curr_join != this)
-#endif
- curr_join->all_fields= *curr_all_fields;
- if (!items1)
- {
- items1= items0 + all_fields.elements;
- if (sort_and_group || curr_tmp_table->group ||
- tmp_table_param.precomputed_group_by)
- {
- if (change_to_use_tmp_fields(thd, items1,
- tmp_fields_list1, tmp_all_fields1,
- fields_list.elements, all_fields))
- DBUG_VOID_RETURN;
- }
- else
- {
- if (change_refs_to_tmp_fields(thd, items1,
- tmp_fields_list1, tmp_all_fields1,
- fields_list.elements, all_fields))
- DBUG_VOID_RETURN;
- }
-#ifdef HAVE_valgrind
- if (curr_join != this)
-#endif
- {
- curr_join->tmp_all_fields1= tmp_all_fields1;
- curr_join->tmp_fields_list1= tmp_fields_list1;
- }
- curr_join->items1= items1;
- }
- curr_all_fields= &tmp_all_fields1;
- curr_fields_list= &tmp_fields_list1;
- curr_join->set_items_ref_array(items1);
-
- if (sort_and_group || curr_tmp_table->group)
- {
- curr_join->tmp_table_param.field_count+=
- curr_join->tmp_table_param.sum_func_count+
- curr_join->tmp_table_param.func_count;
- curr_join->tmp_table_param.sum_func_count=
- curr_join->tmp_table_param.func_count= 0;
- }
- else
- {
- curr_join->tmp_table_param.field_count+=
- curr_join->tmp_table_param.func_count;
- curr_join->tmp_table_param.func_count= 0;
- }
-
- // procedure can't be used inside subselect => we do nothing special for it
- if (procedure)
- procedure->update_refs();
-
- if (curr_tmp_table->group)
- { // Already grouped
- if (!curr_join->order && !curr_join->no_order && !skip_sort_order)
- curr_join->order= curr_join->group_list; /* order by group */
- curr_join->group_list= 0;
- }
-
- /*
- If we have different sort & group then we must sort the data by group
- and copy it to another tmp table
- This code is also used if we are using distinct something
- we haven't been able to store in the temporary table yet
- like SEC_TO_TIME(SUM(...)).
- */
+ join_examined_rows= 0;
- if ((curr_join->group_list && (!test_if_subpart(curr_join->group_list,
- curr_join->order) ||
- curr_join->select_distinct)) ||
- (curr_join->select_distinct &&
- curr_join->tmp_table_param.using_indirect_summary_function))
- { /* Must copy to another table */
- DBUG_PRINT("info",("Creating group table"));
-
- /* Free first data from old join */
-
- /*
- psergey-todo: this is the place of pre-mature JOIN::free call.
- */
- curr_join->join_free();
- if (curr_join->make_simple_join(this, curr_tmp_table))
- DBUG_VOID_RETURN;
- calc_group_buffer(curr_join, group_list);
- count_field_types(select_lex, &curr_join->tmp_table_param,
- curr_join->tmp_all_fields1,
- curr_join->select_distinct && !curr_join->group_list);
- curr_join->tmp_table_param.hidden_field_count=
- (curr_join->tmp_all_fields1.elements-
- curr_join->tmp_fields_list1.elements);
-
- if (exec_tmp_table2)
- curr_tmp_table= exec_tmp_table2;
- else
- {
- /* group data to new table */
-
- /*
- If the access method is loose index scan then all MIN/MAX
- functions are precomputed, and should be treated as regular
- functions. See extended comment in JOIN::exec.
- */
- if (curr_join->join_tab->is_using_loose_index_scan())
- curr_join->tmp_table_param.precomputed_group_by= TRUE;
-
- if (!(curr_tmp_table=
- exec_tmp_table2= create_tmp_table(thd,
- &curr_join->tmp_table_param,
- *curr_all_fields,
- (ORDER*) 0,
- curr_join->select_distinct &&
- !curr_join->group_list,
- 1, curr_join->select_options,
- HA_POS_ERROR, "")))
- DBUG_VOID_RETURN;
- curr_join->exec_tmp_table2= exec_tmp_table2;
- explain->ops_tracker.report_tmp_table(exec_tmp_table2);
- }
- if (curr_join->group_list)
- {
- if (curr_join->join_tab == join_tab && save_join_tab())
- {
- DBUG_VOID_RETURN;
- }
- DBUG_PRINT("info",("Sorting for index"));
- THD_STAGE_INFO(thd, stage_creating_sort_index);
- if (create_sort_index(thd, curr_join, curr_join->group_list,
- HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
- make_group_fields(this, curr_join))
- {
- DBUG_VOID_RETURN;
- }
- sortorder= curr_join->sortorder;
- }
-
- THD_STAGE_INFO(thd, stage_copying_to_group_table);
- DBUG_PRINT("info", ("%s", thd->proc_info));
- if (curr_join != this)
- {
- if (sum_funcs2)
- {
- curr_join->sum_funcs= sum_funcs2;
- curr_join->sum_funcs_end= sum_funcs_end2;
- }
- else
- {
- curr_join->alloc_func_list();
- sum_funcs2= curr_join->sum_funcs;
- sum_funcs_end2= curr_join->sum_funcs_end;
- }
- }
- if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
- 1, TRUE) ||
- prepare_sum_aggregators(curr_join->sum_funcs,
- !curr_join->join_tab->is_using_agg_loose_index_scan()))
- DBUG_VOID_RETURN;
- curr_join->group_list= 0;
- if (!curr_join->sort_and_group &&
- curr_join->const_tables != curr_join->table_count)
- {
- JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables;
- first_tab->sorted= MY_TEST(first_tab->loosescan_match_tab);
- }
- tmp_error= -1;
- if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
- (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
- 0)))
- {
- error= tmp_error;
- DBUG_VOID_RETURN;
- }
- end_read_record(&curr_join->join_tab->read_record);
- curr_join->const_tables= curr_join->table_count; // Mark free for cleanup()
- curr_join->join_tab[0].table= 0; // Table is freed
-
- // No sum funcs anymore
- if (!items2)
- {
- items2= items1 + all_fields.elements;
- if (change_to_use_tmp_fields(thd, items2,
- tmp_fields_list2, tmp_all_fields2,
- fields_list.elements, tmp_all_fields1))
- DBUG_VOID_RETURN;
-#ifdef HAVE_valgrind
- /*
- Some GCCs use memcpy() for struct assignment, even for x=x.
- GCC bug 19410: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410
- */
- if (curr_join != this)
-#endif
- {
- curr_join->tmp_fields_list2= tmp_fields_list2;
- curr_join->tmp_all_fields2= tmp_all_fields2;
- }
- }
- curr_fields_list= &curr_join->tmp_fields_list2;
- curr_all_fields= &curr_join->tmp_all_fields2;
- curr_join->set_items_ref_array(items2);
- curr_join->tmp_table_param.field_count+=
- curr_join->tmp_table_param.sum_func_count;
- curr_join->tmp_table_param.sum_func_count= 0;
- }
- if (curr_tmp_table->distinct)
- curr_join->select_distinct=0; /* Each row is unique */
-
- curr_join->join_free(); /* Free quick selects */
-
- if (curr_join->select_distinct && ! curr_join->group_list)
- {
- THD_STAGE_INFO(thd, stage_removing_duplicates);
- if (remove_duplicates(curr_join, curr_tmp_table,
- *curr_fields_list, curr_join->tmp_having))
- DBUG_VOID_RETURN;
- curr_join->tmp_having=0;
- curr_join->select_distinct=0;
- }
- curr_tmp_table->reginfo.lock_type= TL_UNLOCK;
- // psergey-todo: here is one place where we switch to
- if (curr_join->make_simple_join(this, curr_tmp_table))
- DBUG_VOID_RETURN;
- calc_group_buffer(curr_join, curr_join->group_list);
- count_field_types(select_lex, &curr_join->tmp_table_param,
- *curr_all_fields, 0);
-
- }
- if (procedure)
- count_field_types(select_lex, &curr_join->tmp_table_param,
- *curr_all_fields, 0);
-
- if (curr_join->group || curr_join->implicit_grouping ||
- curr_join->tmp_table_param.sum_func_count ||
- (procedure && (procedure->flags & PROC_GROUP)))
- {
- if (make_group_fields(this, curr_join))
- {
- DBUG_VOID_RETURN;
- }
- if (!items3)
- {
- if (!items0)
- init_items_ref_array();
- items3= ref_pointer_array + (all_fields.elements*4);
- setup_copy_fields(thd, &curr_join->tmp_table_param,
- items3, tmp_fields_list3, tmp_all_fields3,
- curr_fields_list->elements, *curr_all_fields);
- tmp_table_param.save_copy_funcs= curr_join->tmp_table_param.copy_funcs;
- tmp_table_param.save_copy_field= curr_join->tmp_table_param.copy_field;
- tmp_table_param.save_copy_field_end=
- curr_join->tmp_table_param.copy_field_end;
-#ifdef HAVE_valgrind
- if (curr_join != this)
-#endif
- {
- curr_join->tmp_all_fields3= tmp_all_fields3;
- curr_join->tmp_fields_list3= tmp_fields_list3;
- }
- }
- else
- {
- curr_join->tmp_table_param.copy_funcs= tmp_table_param.save_copy_funcs;
- curr_join->tmp_table_param.copy_field= tmp_table_param.save_copy_field;
- curr_join->tmp_table_param.copy_field_end=
- tmp_table_param.save_copy_field_end;
- }
- curr_fields_list= &tmp_fields_list3;
- curr_all_fields= &tmp_all_fields3;
- curr_join->set_items_ref_array(items3);
-
- if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
- 1, TRUE) ||
- prepare_sum_aggregators(curr_join->sum_funcs,
- !curr_join->join_tab ||
- !curr_join->join_tab->
- is_using_agg_loose_index_scan()) ||
- setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
- thd->is_fatal_error)
- DBUG_VOID_RETURN;
- }
- if (curr_join->group_list || curr_join->order)
- {
- DBUG_PRINT("info",("Sorting for send_result_set_metadata"));
- THD_STAGE_INFO(thd, stage_sorting_result);
- /* If we have already done the group, add HAVING to sorted table */
- if (curr_join->tmp_having && ! curr_join->group_list &&
- ! curr_join->sort_and_group)
- {
- JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables];
- table_map used_tables= (curr_join->const_table_map |
- curr_table->table->map);
- curr_join->tmp_having->update_used_tables();
-
- Item* sort_table_cond= make_cond_for_table(thd, curr_join->tmp_having,
- used_tables,
- (table_map)0, -1,
- FALSE, FALSE);
- if (sort_table_cond)
- {
- if (!curr_table->select)
- {
- if (!(curr_table->select= new SQL_SELECT))
- DBUG_VOID_RETURN;
- curr_table->select->head= curr_table->table;
- }
- if (!curr_table->select->cond)
- curr_table->select->cond= sort_table_cond;
- else
- {
- if (!(curr_table->select->cond=
- new (thd->mem_root) Item_cond_and(thd, curr_table->select->cond,
- sort_table_cond)))
- DBUG_VOID_RETURN;
- }
- if (curr_table->pre_idx_push_select_cond)
- {
- if (sort_table_cond->type() == Item::COND_ITEM)
- sort_table_cond= sort_table_cond->copy_andor_structure(thd);
- if (!(curr_table->pre_idx_push_select_cond=
- new (thd->mem_root) Item_cond_and(thd, curr_table->pre_idx_push_select_cond,
- sort_table_cond)))
- DBUG_VOID_RETURN;
- }
- if (curr_table->select->cond && !curr_table->select->cond->fixed)
- curr_table->select->cond->fix_fields(thd, 0);
- if (curr_table->pre_idx_push_select_cond &&
- !curr_table->pre_idx_push_select_cond->fixed)
- curr_table->pre_idx_push_select_cond->fix_fields(thd, 0);
-
- curr_table->select->pre_idx_push_select_cond=
- curr_table->pre_idx_push_select_cond;
- curr_table->set_select_cond(curr_table->select->cond, __LINE__);
- curr_table->select_cond->top_level_item();
- DBUG_EXECUTE("where",print_where(curr_table->select->cond,
- "select and having",
- QT_ORDINARY););
- curr_join->tmp_having= make_cond_for_table(thd, curr_join->tmp_having,
- ~ (table_map) 0,
- ~used_tables, -1,
- FALSE, FALSE);
- DBUG_EXECUTE("where",print_where(curr_join->tmp_having,
- "having after sort",
- QT_ORDINARY););
- }
- }
- {
- if (group)
- curr_join->select_limit= HA_POS_ERROR;
- else
- {
- /*
- We can abort sorting after thd->select_limit rows if we there is no
- WHERE clause for any tables after the sorted one.
- */
- JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables+1];
- JOIN_TAB *end_table= &curr_join->join_tab[curr_join->top_join_tab_count];
- for (; curr_table < end_table ; curr_table++)
- {
- /*
- table->keyuse is set in the case there was an original WHERE clause
- on the table that was optimized away.
- */
- if (curr_table->select_cond ||
- (curr_table->keyuse && !curr_table->first_inner))
- {
- /* We have to sort all rows */
- curr_join->select_limit= HA_POS_ERROR;
- break;
- }
- }
- }
- if (curr_join->join_tab == join_tab && save_join_tab())
- {
- DBUG_VOID_RETURN;
- }
- /*
- Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser
- chose FILESORT to be faster than INDEX SCAN or there is no
- suitable index present.
- Note, that create_sort_index calls test_if_skip_sort_order and may
- finally replace sorting with index scan if there is a LIMIT clause in
- the query. XXX: it's never shown in EXPLAIN!
- OPTION_FOUND_ROWS supersedes LIMIT and is taken into account.
- */
- DBUG_PRINT("info",("Sorting for order by/group by"));
- ORDER *order_arg=
- curr_join->group_list ? curr_join->group_list : curr_join->order;
- /*
- filesort_limit: Return only this many rows from filesort().
- We can use select_limit_cnt only if we have no group_by and 1 table.
- This allows us to use Bounded_queue for queries like:
- "select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;"
- select_limit == HA_POS_ERROR (we need a full table scan)
- unit->select_limit_cnt == 1 (we only need one row in the result set)
- */
- const ha_rows filesort_limit_arg=
- (has_group_by || curr_join->table_count > 1)
- ? curr_join->select_limit : unit->select_limit_cnt;
- const ha_rows select_limit_arg=
- select_options & OPTION_FOUND_ROWS
- ? HA_POS_ERROR : unit->select_limit_cnt;
- curr_join->filesort_found_rows= filesort_limit_arg != HA_POS_ERROR;
-
- DBUG_PRINT("info", ("has_group_by %d "
- "curr_join->table_count %d "
- "curr_join->m_select_limit %d "
- "unit->select_limit_cnt %d",
- has_group_by,
- curr_join->table_count,
- (int) curr_join->select_limit,
- (int) unit->select_limit_cnt));
- if (create_sort_index(thd,
- curr_join,
- order_arg,
- filesort_limit_arg,
- select_limit_arg,
- curr_join->group_list ? FALSE : TRUE))
- DBUG_VOID_RETURN;
- sortorder= curr_join->sortorder;
- if (curr_join->const_tables != curr_join->table_count &&
- !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
- {
- /*
- If no IO cache exists for the first table then we are using an
- INDEX SCAN and no filesort. Thus we should not remove the sorted
- attribute on the INDEX SCAN.
- */
- skip_sort_order= 1;
- }
- }
- }
/* XXX: When can we have here thd->is_error() not zero? */
if (thd->is_error())
{
error= thd->is_error();
DBUG_VOID_RETURN;
}
- curr_join->having= curr_join->tmp_having;
- curr_join->fields= curr_fields_list;
- curr_join->procedure= procedure;
THD_STAGE_INFO(thd, stage_sending_data);
DBUG_PRINT("info", ("%s", thd->proc_info));
- result->send_result_set_metadata((procedure ? curr_join->procedure_fields_list :
- *curr_fields_list),
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
- error= do_select(curr_join, curr_fields_list, NULL, procedure);
- if (curr_join->order && curr_join->sortorder &&
- curr_join->filesort_found_rows)
- {
- /* Use info provided by filesort. */
- DBUG_ASSERT(curr_join->table_count > curr_join->const_tables);
- JOIN_TAB *tab= curr_join->join_tab + curr_join->const_tables;
- thd->limit_found_rows= tab->records;
- }
-
+ result->send_result_set_metadata(
+ procedure ? procedure_fields_list : *fields,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
+ error= do_select(this, procedure);
/* Accumulate the counts from all join iterations of all join parts. */
- thd->inc_examined_row_count(curr_join->join_examined_rows);
+ thd->inc_examined_row_count(join_examined_rows);
DBUG_PRINT("counts", ("thd->examined_row_count: %lu",
(ulong) thd->get_examined_row_count()));
- /*
- With EXPLAIN EXTENDED we have to restore original ref_array
- for a derived table which is always materialized.
- We also need to do this when we have temp table(s).
- Otherwise we would not be able to print the query correctly.
- */
- if (items0 && (thd->lex->describe & DESCRIBE_EXTENDED) &&
- (select_lex->linkage == DERIVED_TABLE_TYPE ||
- exec_tmp_table1 || exec_tmp_table2))
- set_items_ref_array(items0);
-
DBUG_VOID_RETURN;
}
@@ -3315,42 +3644,33 @@ JOIN::destroy()
DBUG_ENTER("JOIN::destroy");
select_lex->join= 0;
- if (tmp_join)
+ cond_equal= 0;
+ having_equal= 0;
+
+ cleanup(1);
+
+ if (join_tab)
{
- if (join_tab != tmp_join->join_tab)
+ for (JOIN_TAB *tab= first_linear_tab(this, WITH_BUSH_ROOTS,
+ WITH_CONST_TABLES);
+ tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
- JOIN_TAB *tab;
- for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES);
- tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ if (tab->aggr)
{
- tab->cleanup();
+ free_tmp_table(thd, tab->table);
+ delete tab->tmp_table_param;
+ tab->tmp_table_param= NULL;
+ tab->aggr= NULL;
}
+ tab->table= NULL;
}
- tmp_join->tmp_join= 0;
- /*
- We need to clean up tmp_table_param for reusable JOINs (having non-zero
- and different from self tmp_join) because it's not being cleaned up
- anywhere else (as we need to keep the join is reusable).
- */
- tmp_table_param.cleanup();
- tmp_join->tmp_table_param.copy_field= 0;
- cleanup(1);
- DBUG_RETURN(tmp_join->destroy());
}
- cond_equal= 0;
- having_equal= 0;
- cleanup(1);
- /* Cleanup items referencing temporary table columns */
+ /* Cleanup items referencing temporary table columns */
cleanup_item_list(tmp_all_fields1);
cleanup_item_list(tmp_all_fields3);
- if (exec_tmp_table1)
- free_tmp_table(thd, exec_tmp_table1);
- if (exec_tmp_table2)
- free_tmp_table(thd, exec_tmp_table2);
- delete select;
destroy_sj_tmp_tables(this);
- delete_dynamic(&keyuse);
+ delete_dynamic(&keyuse);
delete procedure;
DBUG_RETURN(error);
}
@@ -3415,7 +3735,7 @@ void JOIN::cleanup_item_list(List<Item> &items) const
*/
bool
-mysql_select(THD *thd, Item ***rref_pointer_array,
+mysql_select(THD *thd,
TABLE_LIST *tables, uint wild_num, List<Item> &fields,
COND *conds, uint og_num, ORDER *order, ORDER *group,
Item *having, ORDER *proc_param, ulonglong select_options,
@@ -3450,7 +3770,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
}
else
{
- if ((err= join->prepare(rref_pointer_array, tables, wild_num,
+ if ((err= join->prepare( tables, wild_num,
conds, og_num, order, false, group, having,
proc_param, select_lex, unit)))
{
@@ -3474,7 +3794,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
DBUG_RETURN(TRUE);
THD_STAGE_INFO(thd, stage_init);
thd->lex->used_tables=0;
- if ((err= join->prepare(rref_pointer_array, tables, wild_num,
+ if ((err= join->prepare(tables, wild_num,
conds, og_num, order, false, group, having, proc_param,
select_lex, unit)))
{
@@ -3630,6 +3950,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->checked_keys.init();
s->needed_reg.init();
table_vector[i]=s->table=table=tables->table;
+ s->tab_list= tables;
table->pos_in_table_list= tables;
error= tables->fetch_number_of_rows();
set_statistics_for_table(join->thd, table);
@@ -4149,6 +4470,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
select= make_select(s->table, found_const_table_map,
found_const_table_map,
*s->on_expr_ref ? *s->on_expr_ref : join->conds,
+ (SORT_INFO*) 0,
1, &error);
if (!select)
goto error;
@@ -4276,7 +4598,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
DEBUG_SYNC(join->thd, "inside_make_join_statistics");
/* Generate an execution plan from the found optimal join order. */
- DBUG_RETURN(join->thd->check_killed() || get_best_combination(join));
+ DBUG_RETURN(join->thd->check_killed() || join->get_best_combination());
error:
/*
@@ -4548,6 +4870,8 @@ static uint get_semi_join_select_list_index(Field *field)
@param num_values Number of values[] that we are comparing against
@param usable_tables Tables which can be used for key optimization
@param sargables IN/OUT Array of found sargable candidates
+ @param row_col_no if = n that > 0 then field is compared only
+ against the n-th component of row values
@note
If we are doing a NOT NULL comparison on a NOT NULL field in a outer join
@@ -4561,16 +4885,17 @@ static void
add_key_field(JOIN *join,
KEY_FIELD **key_fields,uint and_level, Item_bool_func *cond,
Field *field, bool eq_func, Item **value, uint num_values,
- table_map usable_tables, SARGABLE_PARAM **sargables)
+ table_map usable_tables, SARGABLE_PARAM **sargables,
+ uint row_col_no= 0)
{
uint optimize= 0;
if (eq_func &&
((join->is_allowed_hash_join_access() &&
field->hash_join_is_possible() &&
!(field->table->pos_in_table_list->is_materialized_derived() &&
- field->table->created)) ||
+ field->table->is_created())) ||
(field->table->pos_in_table_list->is_materialized_derived() &&
- !field->table->created && !(field->flags & BLOB_FLAG))))
+ !field->table->is_created() && !(field->flags & BLOB_FLAG))))
{
optimize= KEY_OPTIMIZE_EQ;
}
@@ -4590,7 +4915,15 @@ add_key_field(JOIN *join,
bool optimizable=0;
for (uint i=0; i<num_values; i++)
{
- table_map value_used_tables= (value[i])->used_tables();
+ Item *curr_val;
+ if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM)
+ {
+ Item_row *value_tuple= (Item_row *) (value[i]->real_item());
+ curr_val= value_tuple->element_index(row_col_no - 1);
+ }
+ else
+ curr_val= value[i];
+ table_map value_used_tables= curr_val->used_tables();
used_tables|= value_used_tables;
if (!(value_used_tables & (field->table->map | RAND_TABLE_BIT)))
optimizable=1;
@@ -4628,7 +4961,15 @@ add_key_field(JOIN *join,
bool is_const=1;
for (uint i=0; i<num_values; i++)
{
- if (!(is_const&= value[i]->const_item()))
+ Item *curr_val;
+ if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM)
+ {
+ Item_row *value_tuple= (Item_row *) (value[i]->real_item());
+ curr_val= value_tuple->element_index(row_col_no - 1);
+ }
+ else
+ curr_val= value[i];
+ if (!(is_const&= curr_val->const_item()))
break;
}
if (is_const)
@@ -4695,12 +5036,14 @@ add_key_field(JOIN *join,
@param key_fields Pointer to add key, if usable
@param and_level And level, to be stored in KEY_FIELD
@param cond Condition predicate
- @param field Field used in comparision
+ @param field_item Field item used for comparison
@param eq_func True if we used =, <=> or IS NULL
- @param value Value used for comparison with field
- Is NULL for BETWEEN and IN
+ @param value Value used for comparison with field_item
+ @param num_values Number of values[] that we are comparing against
@param usable_tables Tables which can be used for key optimization
@param sargables IN/OUT Array of found sargable candidates
+ @param row_col_no if = n that > 0 then field is compared only
+ against the n-th component of row values
@note
If field items f1 and f2 belong to the same multiple equality and
@@ -4715,11 +5058,12 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level,
Item_bool_func *cond, Item *field_item,
bool eq_func, Item **val,
uint num_values, table_map usable_tables,
- SARGABLE_PARAM **sargables)
+ SARGABLE_PARAM **sargables, uint row_col_no= 0)
{
Field *field= ((Item_field *) (field_item->real_item()))->field;
add_key_field(join, key_fields, and_level, cond, field,
- eq_func, val, num_values, usable_tables, sargables);
+ eq_func, val, num_values, usable_tables, sargables,
+ row_col_no);
Item_equal *item_equal= field_item->get_item_equal();
if (item_equal)
{
@@ -4735,7 +5079,7 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level,
{
add_key_field(join, key_fields, and_level, cond, equal_field,
eq_func, val, num_values, usable_tables,
- sargables);
+ sargables, row_col_no);
}
}
}
@@ -4917,6 +5261,24 @@ Item_func_in::add_key_fields(JOIN *join, KEY_FIELD **key_fields,
(Item_field*) (args[0]->real_item()), false,
args + 1, arg_count - 1, usable_tables, sargables);
}
+ else if (key_item()->type() == Item::ROW_ITEM &&
+ !(used_tables() & OUTER_REF_TABLE_BIT))
+ {
+ Item_row *key_row= (Item_row *) key_item();
+ Item **key_col= key_row->addr(0);
+ uint row_cols= key_row->cols();
+ for (uint i= 0; i < row_cols; i++, key_col++)
+ {
+ if (is_local_field(*key_col))
+ {
+ Item_field *field_item= (Item_field *)((*key_col)->real_item());
+ add_key_equal_fields(join, key_fields, *and_level, this,
+ field_item, false, args + 1, arg_count - 1,
+ usable_tables, sargables, i + 1);
+ }
+ }
+ }
+
}
@@ -5354,8 +5716,7 @@ void count_cond_for_nj(SELECT_LEX *sel, TABLE_LIST *nested_join_table)
count_cond_for_nj(sel, table);
}
if (nested_join_table->on_expr)
- nested_join_table->on_expr->walk(&Item::count_sargable_conds,
- 0, (uchar*) sel);
+ nested_join_table->on_expr->walk(&Item::count_sargable_conds, 0, sel);
}
@@ -5396,12 +5757,11 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
sel->cond_count= 0;
sel->between_count= 0;
if (cond)
- cond->walk(&Item::count_sargable_conds, 0, (uchar*) sel);
+ cond->walk(&Item::count_sargable_conds, 0, sel);
for (i=0 ; i < tables ; i++)
{
if (*join_tab[i].on_expr_ref)
- (*join_tab[i].on_expr_ref)->walk(&Item::count_sargable_conds,
- 0, (uchar*) sel);
+ (*join_tab[i].on_expr_ref)->walk(&Item::count_sargable_conds, 0, sel);
}
{
List_iterator<TABLE_LIST> li(*join_tab->join->join_list);
@@ -5766,7 +6126,7 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
{ /* Collect all query fields referenced in the GROUP clause. */
for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
(*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
- (uchar*) &indexed_fields);
+ &indexed_fields);
}
else if (join->select_distinct)
{ /* Collect all query fields referenced in the SELECT clause. */
@@ -5774,10 +6134,10 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
List_iterator<Item> select_items_it(select_items);
Item *item;
while ((item= select_items_it++))
- item->walk(&Item::collect_item_field_processor, 0,
- (uchar*) &indexed_fields);
+ item->walk(&Item::collect_item_field_processor, 0, &indexed_fields);
}
- else if (is_indexed_agg_distinct(join, &indexed_fields))
+ else if (join->tmp_table_param.sum_func_count &&
+ is_indexed_agg_distinct(join, &indexed_fields))
{
join->sort_and_group= 1;
}
@@ -5858,7 +6218,7 @@ double matching_candidates_in_table(JOIN_TAB *s, bool with_found_constraint,
{
TABLE *table= s->table;
double sel= table->cond_selectivity;
- double table_records= table->stat_records();
+ double table_records= (double)table->stat_records();
dbl_records= table_records * sel;
return dbl_records;
}
@@ -5884,7 +6244,7 @@ double matching_candidates_in_table(JOIN_TAB *s, bool with_found_constraint,
if (s->table->quick_condition_rows != s->found_records)
records= s->table->quick_condition_rows;
- dbl_records= records;
+ dbl_records= (double)records;
return dbl_records;
}
@@ -6575,7 +6935,7 @@ static void choose_initial_table_order(JOIN *join)
if ((emb_subq= get_emb_subq(*tab)))
break;
}
- uint n_subquery_tabs= tabs_end - tab;
+ uint n_subquery_tabs= (uint)(tabs_end - tab);
if (!n_subquery_tabs)
DBUG_VOID_RETURN;
@@ -6603,7 +6963,7 @@ static void choose_initial_table_order(JOIN *join)
last_tab_for_subq < subq_tabs_end &&
get_emb_subq(*last_tab_for_subq) == cur_subq_nest;
last_tab_for_subq++) {}
- uint n_subquery_tables= last_tab_for_subq - subq_tab;
+ uint n_subquery_tables= (uint)(last_tab_for_subq - subq_tab);
/*
Walk the original array and find where this subquery would have been
@@ -6621,7 +6981,7 @@ static void choose_initial_table_order(JOIN *join)
if (!need_tables)
{
/* Move away the top-level tables that are after top_level_tab */
- uint top_tail_len= last_top_level_tab - top_level_tab - 1;
+ size_t top_tail_len= last_top_level_tab - top_level_tab - 1;
memmove(top_level_tab + 1 + n_subquery_tables, top_level_tab + 1,
sizeof(JOIN_TAB*)*top_tail_len);
last_top_level_tab += n_subquery_tables;
@@ -6650,10 +7010,6 @@ static void choose_initial_table_order(JOIN *join)
the query
@param join_tables set of the tables in the query
- @todo
- 'MAX_TABLES+2' denotes the old implementation of find_best before
- the greedy version. Will be removed when greedy_search is approved.
-
@retval
FALSE ok
@retval
@@ -6716,27 +7072,13 @@ choose_plan(JOIN *join, table_map join_tables)
}
else
{
- if (search_depth == MAX_TABLES+2)
- { /*
- TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before
- the greedy version. Will be removed when greedy_search is approved.
- */
- join->best_read= DBL_MAX;
- if (find_best(join, join_tables, join->const_tables, 1.0, 0.0,
- use_cond_selectivity))
- {
- DBUG_RETURN(TRUE);
- }
- }
- else
- {
- if (search_depth == 0)
- /* Automatically determine a reasonable value for 'search_depth' */
- search_depth= determine_search_depth(join);
- if (greedy_search(join, join_tables, search_depth, prune_level,
- use_cond_selectivity))
- DBUG_RETURN(TRUE);
- }
+ DBUG_ASSERT(search_depth <= MAX_TABLES + 1);
+ if (search_depth == 0)
+ /* Automatically determine a reasonable value for 'search_depth' */
+ search_depth= determine_search_depth(join);
+ if (greedy_search(join, join_tables, search_depth, prune_level,
+ use_cond_selectivity))
+ DBUG_RETURN(TRUE);
}
/*
@@ -7382,13 +7724,13 @@ double JOIN::get_examined_rows()
{
double examined_rows;
double prev_fanout= 1;
- JOIN_TAB *tab= first_breadth_first_optimization_tab();
+ JOIN_TAB *tab= first_breadth_first_tab();
JOIN_TAB *prev_tab= tab;
- examined_rows= tab->get_examined_rows();
+ examined_rows= (double)tab->get_examined_rows();
- while ((tab= next_breadth_first_tab(first_breadth_first_optimization_tab(),
- top_table_access_tabs_count, tab)))
+ while ((tab= next_breadth_first_tab(first_breadth_first_tab(),
+ top_join_tab_count, tab)))
{
prev_fanout *= prev_tab->records_read;
examined_rows+= tab->get_examined_rows() * prev_fanout;
@@ -7424,7 +7766,7 @@ double table_multi_eq_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
if (!s->keyuse)
return sel;
- Item_equal *item_equal;
+ Item_equal *item_equal;
List_iterator_fast<Item_equal> it(cond_equal->current_level);
TABLE *table= s->table;
table_map table_bit= table->map;
@@ -7606,7 +7948,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
*/
if (!is_hash_join_key_no(key))
{
- table_map quick_key_map= (table_map(1) << table->quick_key_parts[key]) - 1;
+ key_part_map quick_key_map= (key_part_map(1) << table->quick_key_parts[key]) - 1;
if (table->quick_rows[key] &&
!(quick_key_map & ~table->const_key_parts[key]))
{
@@ -7683,7 +8025,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
}
if (keyparts > 1)
{
- ref_keyuse_steps[keyparts-2]= keyuse - prev_ref_keyuse;
+ ref_keyuse_steps[keyparts-2]= (uint16)(keyuse - prev_ref_keyuse);
prev_ref_keyuse= keyuse;
}
}
@@ -8040,105 +8382,6 @@ best_extension_by_limited_search(JOIN *join,
/**
- @todo
- - TODO: this function is here only temporarily until 'greedy_search' is
- tested and accepted.
-
- RETURN VALUES
- FALSE ok
- TRUE Fatal error
-*/
-static bool
-find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
- double read_time, uint use_cond_selectivity)
-{
- DBUG_ENTER("find_best");
- THD *thd= join->thd;
- if (thd->check_killed())
- DBUG_RETURN(TRUE);
- if (!rest_tables)
- {
- DBUG_PRINT("best",("read_time: %g record_count: %g",read_time,
- record_count));
-
- read_time+=record_count/(double) TIME_FOR_COMPARE;
- if (join->sort_by_table &&
- join->sort_by_table !=
- join->positions[join->const_tables].table->table)
- read_time+=record_count; // We have to make a temp table
- if (read_time < join->best_read)
- {
- memcpy((uchar*) join->best_positions,(uchar*) join->positions,
- sizeof(POSITION)*idx);
- join->best_read= read_time - 0.001;
- }
- DBUG_RETURN(FALSE);
- }
- if (read_time+record_count/(double) TIME_FOR_COMPARE >= join->best_read)
- DBUG_RETURN(FALSE); /* Found better before */
-
- JOIN_TAB *s;
- double best_record_count=DBL_MAX,best_read_time=DBL_MAX;
- bool disable_jbuf= join->thd->variables.join_cache_level == 0;
- for (JOIN_TAB **pos=join->best_ref+idx ; (s=*pos) ; pos++)
- {
- table_map real_table_bit=s->table->map;
- if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent) &&
- (!idx|| !check_interleaving_with_nj(s)))
- {
- double records, best;
- POSITION loose_scan_pos;
- best_access_path(join, s, rest_tables, idx, disable_jbuf, record_count,
- join->positions + idx, &loose_scan_pos);
- records= join->positions[idx].records_read;
- best= join->positions[idx].read_time;
- /*
- Go to the next level only if there hasn't been a better key on
- this level! This will cut down the search for a lot simple cases!
- */
- double current_record_count=record_count*records;
- double current_read_time=read_time+best;
- advance_sj_state(join, rest_tables, idx, &current_record_count,
- &current_read_time, &loose_scan_pos);
-
- double pushdown_cond_selectivity= 1.0;
- if (use_cond_selectivity > 1)
- pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
- rest_tables &
- ~real_table_bit);
- join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
- double partial_join_cardinality= current_record_count *
- pushdown_cond_selectivity;
-
- if (best_record_count > partial_join_cardinality ||
- best_read_time > current_read_time ||
- (idx == join->const_tables && s->table == join->sort_by_table))
- {
- if (best_record_count >= partial_join_cardinality &&
- best_read_time >= current_read_time &&
- (!(s->key_dependent & rest_tables) || records < 2.0))
- {
- best_record_count= partial_join_cardinality;
- best_read_time=current_read_time;
- }
- swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
- if (find_best(join,rest_tables & ~real_table_bit,idx+1,
- partial_join_cardinality,current_read_time,
- use_cond_selectivity))
- DBUG_RETURN(TRUE);
- swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
- }
- restore_prev_nj_state(s);
- restore_prev_sj_state(rest_tables, s, idx);
- if (join->select_options & SELECT_STRAIGHT_JOIN)
- break; // Don't test all combinations
- }
- }
- DBUG_RETURN(FALSE);
-}
-
-
-/**
Find how much space the prevous read not const tables takes in cache.
*/
@@ -8448,6 +8691,7 @@ prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab,
uint n_top_tabs_count, JOIN_TAB *tab)
{
+ n_top_tabs_count += tab->join->aggr_tables;
if (!tab->bush_root_tab)
{
/* We're at top level. Get the next top-level tab */
@@ -8499,7 +8743,9 @@ static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab,
JOIN_TAB *first_explain_order_tab(JOIN* join)
{
JOIN_TAB* tab;
- tab= join->table_access_tabs;
+ tab= join->join_tab;
+ if (!tab)
+ return NULL; /* Can happen when when the tables were optimized away */
return (tab->bush_children) ? tab->bush_children->start : tab;
}
@@ -8513,7 +8759,7 @@ JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab)
/* Move to next tab in the array we're traversing */
tab++;
- if (tab == join->table_access_tabs + join->top_join_tab_count)
+ if (tab == join->join_tab + join->top_join_tab_count)
return NULL; /* Outside SJM nest and reached EOF */
if (tab->bush_children)
@@ -8529,7 +8775,7 @@ JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls
JOIN_TAB *tab= join->join_tab;
if (const_tbls == WITHOUT_CONST_TABLES)
{
- if (join->const_tables == join->table_count)
+ if (join->const_tables == join->table_count || !tab)
return NULL;
tab += join->const_tables;
}
@@ -8539,7 +8785,7 @@ JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls
JOIN_TAB *next_top_level_tab(JOIN *join, JOIN_TAB *tab)
{
- tab= next_breadth_first_tab(join->first_breadth_first_execution_tab(),
+ tab= next_breadth_first_tab(join->first_breadth_first_tab(),
join->top_join_tab_count, tab);
if (tab && tab->bush_root_tab)
tab= NULL;
@@ -8552,6 +8798,10 @@ JOIN_TAB *first_linear_tab(JOIN *join,
enum enum_with_const_tables const_tbls)
{
JOIN_TAB *first= join->join_tab;
+
+ if (!first)
+ return NULL;
+
if (const_tbls == WITHOUT_CONST_TABLES)
first+= join->const_tables;
@@ -8617,7 +8867,7 @@ JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab,
}
/* If no more JOIN_TAB's on the top level */
- if (++tab == join->join_tab + join->top_join_tab_count)
+ if (++tab == join->join_tab + join->top_join_tab_count + join->aggr_tables)
return NULL;
if (include_bush_roots == WITHOUT_BUSH_ROOTS && tab->bush_children)
@@ -8638,7 +8888,7 @@ JOIN_TAB *first_depth_first_tab(JOIN* join)
{
JOIN_TAB* tab;
/* This means we're starting the enumeration */
- if (join->const_tables == join->top_join_tab_count)
+ if (join->const_tables == join->top_join_tab_count || !join->join_tab)
return NULL;
tab= join->join_tab + join->const_tables;
@@ -8711,35 +8961,56 @@ static Item * const null_ptr= NULL;
TRUE Out of memory
*/
-bool
-get_best_combination(JOIN *join)
+bool JOIN::get_best_combination()
{
uint tablenr;
table_map used_tables;
- JOIN_TAB *join_tab,*j;
+ JOIN_TAB *j;
KEYUSE *keyuse;
- uint table_count;
- THD *thd=join->thd;
DBUG_ENTER("get_best_combination");
- table_count=join->table_count;
- if (!(join->join_tab=join_tab=
- (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count)))
+ /*
+ Additional plan nodes for postjoin tmp tables:
+ 1? + // For GROUP BY
+ 1? + // For DISTINCT
+ 1? + // For aggregation functions aggregated in outer query
+ // when used with distinct
+ 1? + // For ORDER BY
+ 1? // buffer result
+ Up to 2 tmp tables are actually used, but it's hard to tell exact number
+ at this stage.
+ */
+ uint aggr_tables= (group_list ? 1 : 0) +
+ (select_distinct ?
+ (tmp_table_param. using_outer_summary_function ? 2 : 1) : 0) +
+ (order ? 1 : 0) +
+ (select_options & (SELECT_BIG_RESULT | OPTION_BUFFER_RESULT) ? 1 : 0) ;
+
+ if (aggr_tables == 0)
+ aggr_tables= 1; /* For group by pushdown */
+
+ if (select_lex->window_specs.elements)
+ aggr_tables++;
+
+ if (aggr_tables > 2)
+ aggr_tables= 2;
+ if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*
+ (top_join_tab_count + aggr_tables))))
DBUG_RETURN(TRUE);
- join->full_join=0;
- join->hash_join= FALSE;
+ full_join=0;
+ hash_join= FALSE;
- fix_semijoin_strategies_for_picked_join_order(join);
-
+ fix_semijoin_strategies_for_picked_join_order(this);
+
JOIN_TAB_RANGE *root_range;
if (!(root_range= new (thd->mem_root) JOIN_TAB_RANGE))
DBUG_RETURN(TRUE);
- root_range->start= join->join_tab;
+ root_range->start= join_tab;
/* root_range->end will be set later */
- join->join_tab_ranges.empty();
+ join_tab_ranges.empty();
- if (join->join_tab_ranges.push_back(root_range, thd->mem_root))
+ if (join_tab_ranges.push_back(root_range, thd->mem_root))
DBUG_RETURN(TRUE);
JOIN_TAB *sjm_nest_end= NULL;
@@ -8748,7 +9019,7 @@ get_best_combination(JOIN *join)
for (j=join_tab, tablenr=0 ; tablenr < table_count ; tablenr++,j++)
{
TABLE *form;
- POSITION *cur_pos= &join->best_positions[tablenr];
+ POSITION *cur_pos= &best_positions[tablenr];
if (cur_pos->sj_strategy == SJ_OPT_MATERIALIZE ||
cur_pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN)
{
@@ -8759,7 +9030,7 @@ get_best_combination(JOIN *join)
in the temptable.
*/
bzero((void*)j, sizeof(JOIN_TAB));
- j->join= join;
+ j->join= this;
j->table= NULL; //temporary way to tell SJM tables from others.
j->ref.key = -1;
j->on_expr_ref= (Item**) &null_ptr;
@@ -8775,12 +9046,12 @@ get_best_combination(JOIN *join)
j->cond_selectivity= 1.0;
JOIN_TAB *jt;
JOIN_TAB_RANGE *jt_range;
- if (!(jt= (JOIN_TAB*)join->thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) ||
+ if (!(jt= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) ||
!(jt_range= new JOIN_TAB_RANGE))
DBUG_RETURN(TRUE);
jt_range->start= jt;
jt_range->end= jt + sjm->tables;
- join->join_tab_ranges.push_back(jt_range, join->thd->mem_root);
+ join_tab_ranges.push_back(jt_range, thd->mem_root);
j->bush_children= jt_range;
sjm_nest_end= jt + sjm->tables;
sjm_nest_root= j;
@@ -8788,11 +9059,11 @@ get_best_combination(JOIN *join)
j= jt;
}
- *j= *join->best_positions[tablenr].table;
+ *j= *best_positions[tablenr].table;
j->bush_root_tab= sjm_nest_root;
- form=join->table[tablenr]=j->table;
+ form= table[tablenr]= j->table;
form->reginfo.join_tab=j;
DBUG_PRINT("info",("type: %d", j->type));
if (j->type == JT_CONST)
@@ -8805,33 +9076,33 @@ get_best_combination(JOIN *join)
if (j->type == JT_SYSTEM)
goto loop_end;
- if ( !(keyuse= join->best_positions[tablenr].key))
+ if ( !(keyuse= best_positions[tablenr].key))
{
j->type=JT_ALL;
- if (join->best_positions[tablenr].use_join_buffer &&
- tablenr != join->const_tables)
- join->full_join= 1;
+ if (best_positions[tablenr].use_join_buffer &&
+ tablenr != const_tables)
+ full_join= 1;
}
- /*if (join->best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN)
+ /*if (best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN)
{
DBUG_ASSERT(!keyuse || keyuse->key ==
- join->best_positions[tablenr].loosescan_picker.loosescan_key);
- j->index= join->best_positions[tablenr].loosescan_picker.loosescan_key;
+ best_positions[tablenr].loosescan_picker.loosescan_key);
+ j->index= best_positions[tablenr].loosescan_picker.loosescan_key;
}*/
if ((j->type == JT_REF || j->type == JT_EQ_REF) &&
is_hash_join_key_no(j->ref.key))
- join->hash_join= TRUE;
+ hash_join= TRUE;
loop_end:
/*
Save records_read in JOIN_TAB so that select_describe()/etc don't have
to access join->best_positions[].
*/
- j->records_read= join->best_positions[tablenr].records_read;
- j->cond_selectivity= join->best_positions[tablenr].cond_selectivity;
- join->map2table[j->table->tablenr]= j;
+ j->records_read= best_positions[tablenr].records_read;
+ j->cond_selectivity= best_positions[tablenr].cond_selectivity;
+ map2table[j->table->tablenr]= j;
/* If we've reached the end of sjm nest, switch back to main sequence */
if (j + 1 == sjm_nest_end)
@@ -8853,24 +9124,18 @@ get_best_combination(JOIN *join)
used_tables|= j->table->map;
if (j->type != JT_CONST && j->type != JT_SYSTEM)
{
- if ((keyuse= join->best_positions[tablenr].key) &&
- create_ref_for_key(join, j, keyuse, TRUE, used_tables))
+ if ((keyuse= best_positions[tablenr].key) &&
+ create_ref_for_key(this, j, keyuse, TRUE, used_tables))
DBUG_RETURN(TRUE); // Something went wrong
}
if (j->last_leaf_in_bush)
j= j->bush_root_tab;
}
- join->top_join_tab_count= join->join_tab_ranges.head()->end -
- join->join_tab_ranges.head()->start;
- /*
- Save pointers to select join tabs for SHOW EXPLAIN
- */
- join->table_access_tabs= join->join_tab;
- join->top_table_access_tabs_count= join->top_join_tab_count;
-
+ top_join_tab_count= (uint)(join_tab_ranges.head()->end -
+ join_tab_ranges.head()->start);
- update_depend_map(join);
+ update_depend_map(this);
DBUG_RETURN(0);
}
@@ -9273,124 +9538,6 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
keyuse->val, FALSE);
}
-/**
- @details Initialize a JOIN as a query execution plan
- that accesses a single table via a table scan.
-
- @param parent contains JOIN_TAB and TABLE object buffers for this join
- @param tmp_table temporary table
-
- @retval FALSE success
- @retval TRUE error occurred
-*/
-bool
-JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
-{
- DBUG_ENTER("JOIN::make_simple_join");
-
- /*
- Reuse TABLE * and JOIN_TAB if already allocated by a previous call
- to this function through JOIN::exec (may happen for sub-queries).
- */
- if (!parent->join_tab_reexec &&
- !(parent->join_tab_reexec= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
- DBUG_RETURN(TRUE); /* purecov: inspected */
-
- // psergey-todo: here, save the pointer for original join_tabs.
- join_tab= parent->join_tab_reexec;
- table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
- table_count= top_join_tab_count= 1;
-
- const_tables= 0;
- const_table_map= 0;
- eliminated_tables= 0;
- tmp_table_param.field_count= tmp_table_param.sum_func_count=
- tmp_table_param.func_count= 0;
- /*
- We need to destruct the copy_field (allocated in create_tmp_table())
- before setting it to 0 if the join is not "reusable".
- */
- if (!tmp_join || tmp_join != this)
- tmp_table_param.cleanup();
- else
- {
- /*
- Free data buffered in copy_fields, but keep data pointed by copy_field
- around for next iteration (possibly stored in save_copy_fields).
-
- It would be logically simpler to not clear copy_field
- below, but as we have loops that runs over copy_field to
- copy_field_end that should not be done anymore, it's simpler to
- just clear the pointers.
-
- Another option would be to just clear copy_field_end and not run
- the loops if this is not set or to have tmp_table_param.cleanup()
- to run cleanup on save_copy_field if copy_field is not set.
- */
- tmp_table_param.free_copy_field_data();
- tmp_table_param.copy_field= tmp_table_param.copy_field_end=0;
- }
- first_record= sort_and_group=0;
- send_records= (ha_rows) 0;
-
- if (group_optimized_away && !tmp_table_param.precomputed_group_by)
- {
- /*
- If grouping has been optimized away, a temporary table is
- normally not needed unless we're explicitly requested to create
- one (e.g. due to a SQL_BUFFER_RESULT hint or INSERT ... SELECT).
-
- In this case (grouping was optimized away), temp_table was
- created without a grouping expression and JOIN::exec() will not
- perform the necessary grouping (by the use of end_send_group()
- or end_write_group()) if JOIN::group is set to false.
-
- There is one exception: if the loose index scan access method is
- used to read into the temporary table, grouping and aggregate
- functions are handled.
- */
- // the temporary table was explicitly requested
- DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT));
- // the temporary table does not have a grouping expression
- DBUG_ASSERT(!temp_table->group);
- }
- else
- group= false;
-
- row_limit= unit->select_limit_cnt;
- do_send_rows= row_limit ? 1 : 0;
-
- bzero((void*)join_tab, sizeof(JOIN_TAB));
- join_tab->table=temp_table;
- join_tab->set_select_cond(NULL, __LINE__);
- join_tab->type= JT_ALL; /* Map through all records */
- join_tab->keys.init();
- join_tab->keys.set_all(); /* test everything in quick */
- join_tab->ref.key = -1;
- join_tab->shortcut_for_distinct= false;
- join_tab->read_first_record= join_init_read_record;
- join_tab->join= this;
- join_tab->ref.key_parts= 0;
-
- uint select_nr= select_lex->select_number;
- if (select_nr == INT_MAX)
- {
- /* this is a fake_select_lex of a union */
- select_nr= select_lex->master_unit()->first_select()->select_number;
- join_tab->tracker= thd->lex->explain->get_union(select_nr)->
- get_tmptable_read_tracker();
- }
- else
- {
- join_tab->tracker= thd->lex->explain->get_select(select_nr)->
- get_using_temporary_read_tracker();
- }
- bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
- temp_table->status=0;
- temp_table->null_row=0;
- DBUG_RETURN(FALSE);
-}
-
inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2)
{
@@ -9814,6 +9961,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tab= next_depth_first_tab(join, tab))
{
bool is_hj;
+
/*
first_inner is the X in queries like:
SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
@@ -10274,8 +10422,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tmp_cond= new (thd->mem_root) Item_func_trig_cond(thd, tmp_cond,
&first_inner_tab->
not_null_compl);
- DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx",
- (ulong) tmp_cond));
+ DBUG_PRINT("info", ("Item_func_trig_cond %p",
+ tmp_cond));
if (tmp_cond)
tmp_cond->quick_fix_field();
/* Add the predicate to other pushed down predicates */
@@ -10283,8 +10431,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*sel_cond_ref= !(*sel_cond_ref) ?
tmp_cond :
new (thd->mem_root) Item_cond_and(thd, *sel_cond_ref, tmp_cond);
- DBUG_PRINT("info", ("Item_cond_and 0x%lx",
- (ulong)(*sel_cond_ref)));
+ DBUG_PRINT("info", ("Item_cond_and %p",
+ (*sel_cond_ref)));
if (!(*sel_cond_ref))
DBUG_RETURN(1);
(*sel_cond_ref)->quick_fix_field();
@@ -10563,7 +10711,7 @@ static uint make_join_orderinfo(JOIN *join)
if (join->need_tmp)
return join->table_count;
tab= join->get_sort_by_join_tab();
- return tab ? tab-join->join_tab : join->table_count;
+ return tab ? (uint)(tab-join->join_tab) : join->table_count;
}
/*
@@ -10594,11 +10742,10 @@ void set_join_cache_denial(JOIN_TAB *join_tab)
if (join_tab->cache->prev_cache)
join_tab->cache->prev_cache->next_cache= 0;
/*
- No need to do the same for next_cache since cache denial is done
- backwards starting from the latest cache in the linked list (see
- revise_cache_usage()).
+ Same for the next_cache
*/
- DBUG_ASSERT(!join_tab->cache->next_cache);
+ if (join_tab->cache->next_cache)
+ join_tab->cache->next_cache->prev_cache= 0;
join_tab->cache->free();
join_tab->cache= 0;
@@ -10938,6 +11085,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
uint bufsz= 4096;
JOIN_CACHE *prev_cache=0;
JOIN *join= tab->join;
+ MEM_ROOT *root= join->thd->mem_root;
uint cache_level= tab->used_join_cache_level;
bool force_unlinked_cache=
!(join->allowed_join_cache_types & JOIN_CACHE_INCREMENTAL_BIT);
@@ -11065,8 +11213,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
case JT_ALL:
if (cache_level == 1)
prev_cache= 0;
- if ((tab->cache= new JOIN_CACHE_BNL(join, tab, prev_cache)) &&
- !tab->cache->init(options & SELECT_DESCRIBE))
+ if ((tab->cache= new (root) JOIN_CACHE_BNL(join, tab, prev_cache)))
{
tab->icp_other_tables_ok= FALSE;
return (2 - MY_TEST(!prev_cache));
@@ -11100,8 +11247,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
goto no_join_cache;
if (cache_level == 3)
prev_cache= 0;
- if ((tab->cache= new JOIN_CACHE_BNLH(join, tab, prev_cache)) &&
- !tab->cache->init(options & SELECT_DESCRIBE))
+ if ((tab->cache= new (root) JOIN_CACHE_BNLH(join, tab, prev_cache)))
{
tab->icp_other_tables_ok= FALSE;
return (4 - MY_TEST(!prev_cache));
@@ -11121,8 +11267,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
{
if (cache_level == 5)
prev_cache= 0;
- if ((tab->cache= new JOIN_CACHE_BKA(join, tab, flags, prev_cache)) &&
- !tab->cache->init(options & SELECT_DESCRIBE))
+ if ((tab->cache= new (root) JOIN_CACHE_BKA(join, tab, flags, prev_cache)))
return (6 - MY_TEST(!prev_cache));
goto no_join_cache;
}
@@ -11130,10 +11275,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
{
if (cache_level == 7)
prev_cache= 0;
- if ((tab->cache= new JOIN_CACHE_BKAH(join, tab, flags, prev_cache)) &&
- !tab->cache->init(options & SELECT_DESCRIBE))
+ if ((tab->cache= new (root) JOIN_CACHE_BKAH(join, tab, flags, prev_cache)))
{
- tab->idx_cond_fact_out= FALSE;
+ tab->idx_cond_fact_out= FALSE;
return (8 - MY_TEST(!prev_cache));
}
goto no_join_cache;
@@ -11273,8 +11417,8 @@ void JOIN_TAB::remove_redundant_bnl_scan_conds()
select->cond is not processed separately. This method assumes it is always
the same as select_cond.
*/
- DBUG_ASSERT(!select || !select->cond ||
- (select->cond == select_cond));
+ if (select && select->cond != select_cond)
+ return;
if (is_cond_and(select_cond))
{
@@ -11450,20 +11594,18 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
case JT_SYSTEM: // Only happens with left join
case JT_CONST: // Only happens with left join
/* Only happens with outer joins */
- tab->read_first_record= tab->type == JT_SYSTEM ?
- join_read_system :join_read_const;
- if (table->covering_keys.is_set(tab->ref.key) &&
- !table->no_keyread)
- table->enable_keyread();
+ tab->read_first_record= tab->type == JT_SYSTEM ? join_read_system
+ : join_read_const;
+ if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread)
+ table->file->ha_start_keyread(tab->ref.key);
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
case JT_EQ_REF:
tab->read_record.unlock_row= join_read_key_unlock_row;
/* fall through */
- if (table->covering_keys.is_set(tab->ref.key) &&
- !table->no_keyread)
- table->enable_keyread();
+ if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread)
+ table->file->ha_start_keyread(tab->ref.key);
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
@@ -11476,9 +11618,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
}
delete tab->quick;
tab->quick=0;
- if (table->covering_keys.is_set(tab->ref.key) &&
- !table->no_keyread)
- table->enable_keyread();
+ if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread)
+ table->file->ha_start_keyread(tab->ref.key);
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
@@ -11541,7 +11682,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
if (tab->select && tab->select->quick &&
tab->select->quick->index != MAX_KEY && //not index_merge
table->covering_keys.is_set(tab->select->quick->index))
- table->enable_keyread();
+ table->file->ha_start_keyread(tab->select->quick->index);
else if (!table->covering_keys.is_clear_all() &&
!(tab->select && tab->select->quick))
{ // Only read index tree
@@ -11570,7 +11711,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
}
}
if (tab->select && tab->select->quick &&
- tab->select->quick->index != MAX_KEY && ! tab->table->key_read)
+ tab->select->quick->index != MAX_KEY &&
+ !tab->table->file->keyread_enabled())
push_index_cond(tab, tab->select->quick->index);
}
break;
@@ -11586,7 +11728,6 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
/* purecov: end */
}
- tab->remove_redundant_bnl_scan_conds();
DBUG_EXECUTE("where",
char buff[256];
String str(buff,sizeof(buff),system_charset_info);
@@ -11595,8 +11736,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
str.append(" final_pushdown_cond");
print_where(tab->select_cond, str.c_ptr_safe(), QT_ORDINARY););
}
- uint n_top_tables= join->join_tab_ranges.head()->end -
- join->join_tab_ranges.head()->start;
+ uint n_top_tables= (uint)(join->join_tab_ranges.head()->end -
+ join->join_tab_ranges.head()->start);
join->join_tab[n_top_tables - 1].next_select=0; /* Set by do_select */
@@ -11695,7 +11836,13 @@ bool error_if_full_join(JOIN *join)
void JOIN_TAB::cleanup()
{
DBUG_ENTER("JOIN_TAB::cleanup");
- DBUG_PRINT("enter", ("table %s.%s",
+
+ if (tab_list && tab_list->is_with_table_recursive_reference() &&
+ tab_list->with->is_cleaned())
+ DBUG_VOID_RETURN;
+
+ DBUG_PRINT("enter", ("tab: %p table %s.%s",
+ this,
(table ? table->s->db.str : "?"),
(table ? table->s->table_name.str : "?")));
delete select;
@@ -11708,9 +11855,21 @@ void JOIN_TAB::cleanup()
cache= 0;
}
limit= 0;
+ // Free select that was created for filesort outside of create_sort_index
+ if (filesort && filesort->select && !filesort->own_select)
+ delete filesort->select;
+ delete filesort;
+ filesort= NULL;
+ /* Skip non-existing derived tables/views result tables */
+ if (table &&
+ (table->s->tmp_table != INTERNAL_TMP_TABLE || table->is_created()))
+ {
+ table->file->ha_end_keyread();
+ table->file->ha_index_or_rnd_end();
+ }
if (table)
{
- table->disable_keyread();
+ table->file->ha_end_keyread();
table->file->ha_index_or_rnd_end();
preread_init_done= FALSE;
if (table->pos_in_table_list &&
@@ -11760,7 +11919,7 @@ void JOIN_TAB::cleanup()
double JOIN_TAB::scan_time()
{
double res;
- if (table->created)
+ if (table->is_created())
{
if (table->is_filled_at_execution())
{
@@ -11799,9 +11958,10 @@ double JOIN_TAB::scan_time()
ha_rows JOIN_TAB::get_examined_rows()
{
double examined_rows;
+ SQL_SELECT *sel= filesort? filesort->select : this->select;
- if (select && select->quick && use_quick != 2)
- examined_rows= select->quick->records;
+ if (sel && sel->quick && use_quick != 2)
+ examined_rows= (double)sel->quick->records;
else if (type == JT_NEXT || type == JT_ALL ||
type == JT_HASH || type ==JT_HASH_NEXT)
{
@@ -11811,19 +11971,19 @@ ha_rows JOIN_TAB::get_examined_rows()
@todo This estimate is wrong, a LIMIT query may examine much more rows
than the LIMIT itself.
*/
- examined_rows= limit;
+ examined_rows= (double)limit;
}
else
{
if (table->is_filled_at_execution())
- examined_rows= records;
+ examined_rows= (double)records;
else
{
/*
handler->info(HA_STATUS_VARIABLE) has been called in
make_join_statistics()
*/
- examined_rows= table->stat_records();
+ examined_rows= (double)table->stat_records();
}
}
}
@@ -11850,7 +12010,8 @@ bool JOIN_TAB::preread_init()
}
/* Materialize derived table/view. */
- if (!derived->get_unit()->executed &&
+ if ((!derived->get_unit()->executed ||
+ derived->is_recursive_with_table()) &&
mysql_handle_single_derived(join->thd->lex,
derived, DT_CREATE | DT_FILL))
return TRUE;
@@ -12088,35 +12249,12 @@ void JOIN::cleanup(bool full)
table_count= original_table_count;
}
- if (table)
+ if (join_tab)
{
JOIN_TAB *tab;
- /*
- Only a sorted table may be cached. This sorted table is always the
- first non const table in join->table
- */
- if (table_count > const_tables) // Test for not-const tables
- {
- JOIN_TAB *first_tab= first_top_level_tab(this, WITHOUT_CONST_TABLES);
- if (first_tab->table)
- {
- free_io_cache(first_tab->table);
- filesort_free_buffers(first_tab->table, full);
- }
- }
+
if (full)
{
- JOIN_TAB *sort_tab= first_linear_tab(this, WITH_BUSH_ROOTS,
- WITHOUT_CONST_TABLES);
- if (pre_sort_join_tab)
- {
- if (sort_tab && sort_tab->select == pre_sort_join_tab->select)
- {
- pre_sort_join_tab->select= NULL;
- }
- else
- clean_pre_sort_join_tab();
- }
/*
Call cleanup() on join tabs used by the join optimization
(join->join_tab may now be pointing to result of make_simple_join
@@ -12126,51 +12264,66 @@ void JOIN::cleanup(bool full)
w/o tables: they don't have some members initialized and
WALK_OPTIMIZATION_TABS may not work correctly for them.
*/
- if (table_count)
+ if (top_join_tab_count && tables_list)
{
- for (tab= first_breadth_first_optimization_tab(); tab;
- tab= next_breadth_first_tab(first_breadth_first_optimization_tab(),
- top_table_access_tabs_count, tab))
- tab->cleanup();
-
- /* We've walked optimization tabs, do execution ones too. */
- if (first_breadth_first_execution_tab() !=
- first_breadth_first_optimization_tab())
+ for (tab= first_breadth_first_tab(); tab;
+ tab= next_breadth_first_tab(first_breadth_first_tab(),
+ top_join_tab_count, tab))
{
- for (tab= first_breadth_first_execution_tab(); tab;
- tab= next_breadth_first_tab(first_breadth_first_execution_tab(),
- top_join_tab_count, tab))
- tab->cleanup();
+ tab->cleanup();
+ delete tab->filesort_result;
+ tab->filesort_result= NULL;
}
}
cleaned= true;
+ //psergey2: added (Q: why not in the above loop?)
+ {
+ JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
+ for (uint i= 0; i < aggr_tables; i++, curr_tab++)
+ {
+ if (curr_tab->aggr)
+ {
+ free_tmp_table(thd, curr_tab->table);
+ delete curr_tab->tmp_table_param;
+ curr_tab->tmp_table_param= NULL;
+ curr_tab->aggr= NULL;
+
+ delete curr_tab->filesort_result;
+ curr_tab->filesort_result= NULL;
+ }
+ }
+ aggr_tables= 0; // psergey3
+ }
}
else
{
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
- if (tab->table)
+ if (!tab->table)
+ continue;
+ DBUG_PRINT("info", ("close index: %s.%s alias: %s",
+ tab->table->s->db.str,
+ tab->table->s->table_name.str,
+ tab->table->alias.c_ptr()));
+ if (tab->table->is_created())
{
- DBUG_PRINT("info", ("close index: %s.%s alias: %s",
- tab->table->s->db.str,
- tab->table->s->table_name.str,
- tab->table->alias.c_ptr()));
tab->table->file->ha_index_or_rnd_end();
+ if (tab->aggr)
+ {
+ int tmp= 0;
+ if ((tmp= tab->table->file->extra(HA_EXTRA_NO_CACHE)))
+ tab->table->file->print_error(tmp, MYF(0));
+ }
}
+ delete tab->filesort_result;
+ tab->filesort_result= NULL;
}
}
}
if (full)
{
cleanup_empty_jtbm_semi_joins(this, join_list);
- /*
- Ensure that the following delete_elements() would not be called
- twice for the same list.
- */
- if (tmp_join && tmp_join != this &&
- tmp_join->group_fields == this->group_fields)
- tmp_join->group_fields.empty();
// Run Cached_item DTORs!
group_fields.delete_elements();
@@ -12186,14 +12339,6 @@ void JOIN::cleanup(bool full)
pointer to tmp_table_param.copy_field from tmp_join, because it will
be removed in tmp_table_param.cleanup().
*/
- if (tmp_join &&
- tmp_join != this &&
- tmp_join->tmp_table_param.copy_field ==
- tmp_table_param.copy_field)
- {
- tmp_join->tmp_table_param.copy_field=
- tmp_join->tmp_table_param.save_copy_field= 0;
- }
tmp_table_param.cleanup();
delete pushdown_query;
@@ -12215,6 +12360,12 @@ void JOIN::cleanup(bool full)
}
}
}
+ /* Restore ref array to original state */
+ if (current_ref_ptrs != items0)
+ {
+ set_items_ref_array(items0);
+ set_group_rpa= false;
+ }
DBUG_VOID_RETURN;
}
@@ -12390,8 +12541,8 @@ static ORDER *
remove_const(JOIN *join,ORDER *first_order, COND *cond,
bool change_list, bool *simple_order)
{
- *simple_order= 1;
- if (join->table_count == join->const_tables)
+ *simple_order= join->rollup.state == ROLLUP::STATE_NONE;
+ if (join->only_const_tables())
return change_list ? 0 : first_order; // No need to sort
ORDER *order,**prev_ptr, *tmp_order;
@@ -12493,8 +12644,37 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
can be used without tmp. table.
*/
bool can_subst_to_first_table= false;
+ bool first_is_in_sjm_nest= false;
+ if (first_is_base_table)
+ {
+ TABLE_LIST *tbl_for_first=
+ join->join_tab[join->const_tables].table->pos_in_table_list;
+ first_is_in_sjm_nest= tbl_for_first->sj_mat_info &&
+ tbl_for_first->sj_mat_info->is_used;
+ }
+ /*
+ Currently we do not employ the optimization that uses multiple
+ equalities for ORDER BY to remove tmp table in the case when
+ the first table happens to be the result of materialization of
+ a semi-join nest ( <=> first_is_in_sjm_nest == true).
+
+ When a semi-join nest is materialized and scanned to look for
+ possible matches in the remaining tables for every its row
+ the fields from the result of materialization are copied
+ into the record buffers of tables from the semi-join nest.
+ So these copies are used to access the remaining tables rather
+ than the fields from the result of materialization.
+
+ Unfortunately now this so-called 'copy back' technique is
+ supported only if the rows are scanned with the rr_sequential
+ function, but not with other rr_* functions that are employed
+ when the result of materialization is required to be sorted.
+
+ TODO: either to support 'copy back' technique for the above case,
+ or to get rid of this technique altogether.
+ */
if (optimizer_flag(join->thd, OPTIMIZER_SWITCH_ORDERBY_EQ_PROP) &&
- first_is_base_table &&
+ first_is_base_table && !first_is_in_sjm_nest &&
order->item[0]->real_item()->type() == Item::FIELD_ITEM &&
join->cond_equal)
{
@@ -12693,9 +12873,9 @@ static void clear_tables(JOIN *join)
class COND_CMP :public ilink {
public:
- static void *operator new(size_t size)
+ static void *operator new(size_t size, MEM_ROOT *mem_root)
{
- return (void*) sql_alloc((uint) size);
+ return alloc_root(mem_root, size);
}
static void operator delete(void *ptr __attribute__((unused)),
size_t size __attribute__((unused)))
@@ -13399,7 +13579,8 @@ COND *Item_func_eq::build_equal_items(THD *thd,
List_iterator_fast<Item_equal> it(cond_equal.current_level);
while ((item_equal= it++))
{
- item_equal->fix_length_and_dec();
+ if (item_equal->fix_length_and_dec())
+ return NULL;
item_equal->update_used_tables();
set_if_bigger(thd->lex->current_select->max_equal_elems,
item_equal->n_field_items());
@@ -13598,16 +13779,16 @@ static int compare_fields_by_table_order(Item *field1,
Item_field *f1= (Item_field *) (field1->real_item());
Item_field *f2= (Item_field *) (field2->real_item());
if (field1->const_item() || f1->const_item())
- return 1;
- if (field2->const_item() || f2->const_item())
return -1;
- if (f2->used_tables() & OUTER_REF_TABLE_BIT)
- {
+ if (field2->const_item() || f2->const_item())
+ return 1;
+ if (f1->used_tables() & OUTER_REF_TABLE_BIT)
+ {
outer_ref= 1;
cmp= -1;
}
- if (f1->used_tables() & OUTER_REF_TABLE_BIT)
- {
+ if (f2->used_tables() & OUTER_REF_TABLE_BIT)
+ {
outer_ref= 1;
cmp++;
}
@@ -13631,10 +13812,12 @@ static int compare_fields_by_table_order(Item *field1,
tab2= tab2->bush_root_tab;
}
- cmp= tab2 - tab1;
+ cmp= (int)(tab1 - tab2);
if (!cmp)
{
+ /* Fields f1, f2 belong to the same table */
+
JOIN_TAB *tab= idx[f1->field->table->tablenr];
uint keyno= MAX_KEY;
if (tab->ref.key_parts)
@@ -13643,31 +13826,38 @@ static int compare_fields_by_table_order(Item *field1,
keyno = tab->select->quick->index;
if (keyno != MAX_KEY)
{
- if (f2->field->part_of_key.is_set(keyno))
- cmp= -1;
if (f1->field->part_of_key.is_set(keyno))
+ cmp= -1;
+ if (f2->field->part_of_key.is_set(keyno))
cmp++;
+ /*
+ Here:
+ if both f1, f2 are components of the key tab->ref.key then cmp==0,
+ if only f1 is a component of the key then cmp==-1 (f1 is better),
+ if only f2 is a component of the key then cmp==1, (f2 is better),
+ if none of f1,f1 is component of the key cmp==0.
+ */
if (!cmp)
{
KEY *key_info= tab->table->key_info + keyno;
for (uint i= 0; i < key_info->user_defined_key_parts; i++)
{
Field *fld= key_info->key_part[i].field;
- if (fld->eq(f2->field))
+ if (fld->eq(f1->field))
{
- cmp= -1;
+ cmp= -1; // f1 is better
break;
}
- if (fld->eq(f1->field))
+ if (fld->eq(f2->field))
{
- cmp= 1;
+ cmp= 1; // f2 is better
break;
}
}
}
}
- else
- cmp= f2->field->field_index-f1->field->field_index;
+ if (!cmp)
+ cmp= f1->field->field_index-f2->field->field_index;
}
return cmp < 0 ? -1 : (cmp ? 1 : 0);
}
@@ -14318,7 +14508,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
{
cond->marker=1;
COND_CMP *tmp2;
- if ((tmp2=new COND_CMP(and_father,func)))
+ if ((tmp2= new (thd->mem_root) COND_CMP(and_father, func)))
save_list->push_back(tmp2);
}
/*
@@ -14350,7 +14540,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
thd->change_item_tree(args + 1, value);
cond->marker=1;
COND_CMP *tmp2;
- if ((tmp2=new COND_CMP(and_father,func)))
+ if ((tmp2=new (thd->mem_root) COND_CMP(and_father, func)))
save_list->push_back(tmp2);
}
if (functype != Item_func::LIKE_FUNC)
@@ -15995,7 +16185,6 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
else if (org_field->type() == FIELD_TYPE_DOUBLE)
((Field_double *) new_field)->not_fixed= TRUE;
new_field->vcol_info= 0;
- new_field->stored_in_db= TRUE;
new_field->cond_selectivity= 1.0;
new_field->next_equal_field= NULL;
new_field->option_list= NULL;
@@ -16073,6 +16262,8 @@ Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length)
update the record in the original table.
If modify_item is 0 then fill_record() will
update the temporary table
+ @param convert_blob_length If >0 create a varstring(convert_blob_length)
+ field instead of blob.
@retval
0 on error
@@ -16295,6 +16486,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
case Item::NULL_ITEM:
case Item::VARBIN_ITEM:
case Item::CACHE_ITEM:
+ case Item::WINDOW_FUNC_ITEM: // psergey-winfunc:
case Item::EXPR_CACHE_ITEM:
case Item::PARAM_ITEM:
if (make_copy_field)
@@ -16323,12 +16515,12 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
a tmp_set bitmap to be used by things like filesort.
*/
-void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
+void
+setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count)
{
- uint field_count= table->s->fields;
uint bitmap_size= bitmap_buffer_size(field_count);
- DBUG_ASSERT(table->s->vfields == 0 && table->def_vcol_set == 0);
+ DBUG_ASSERT(table->s->virtual_fields == 0 && table->def_vcol_set == 0);
my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
FALSE);
@@ -16341,6 +16533,9 @@ void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
bitmaps+= bitmap_size;
my_bitmap_init(&table->cond_set,
(my_bitmap_map*) bitmaps, field_count, FALSE);
+ bitmaps+= bitmap_size;
+ my_bitmap_init(&table->has_value_set,
+ (my_bitmap_map*) bitmaps, field_count, FALSE);
/* write_set and all_set are copies of read_set */
table->def_write_set= table->def_read_set;
table->s->all_set= table->def_read_set;
@@ -16349,6 +16544,13 @@ void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
}
+void
+setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
+{
+ setup_tmp_table_column_bitmaps(table, bitmaps, table->s->fields);
+}
+
+
/**
Create a temp table according to a field list.
@@ -16436,7 +16638,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
{
/* if we run out of slots or we are not using tempool */
sprintf(path, "%s%lx_%lx_%x", tmp_file_prefix,current_pid,
- thd->thread_id, thd->tmp_table++);
+ (ulong) thd->thread_id, thd->tmp_table++);
}
/*
@@ -16509,7 +16711,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
&tmpname, (uint) strlen(path)+1,
&group_buff, (group && ! using_unique_constraint ?
param->group_length : 0),
- &bitmaps, bitmap_buffer_size(field_count)*5,
+ &bitmaps, bitmap_buffer_size(field_count)*6,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
@@ -16547,13 +16749,12 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->in_use= thd;
table->quick_keys.init();
table->covering_keys.init();
- table->merge_keys.init();
table->intersect_keys.init();
table->keys_in_use_for_query.init();
table->no_rows_with_nulls= param->force_not_null_cols;
table->s= share;
- init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
+ init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
share->blob_field= blob_field;
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
@@ -16566,14 +16767,19 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
reclength= string_total_length= 0;
blob_count= string_count= null_count= hidden_null_count= group_null_items= 0;
- param->using_indirect_summary_function=0;
+ param->using_outer_summary_function= 0;
List_iterator_fast<Item> li(fields);
Item *item;
Field **tmp_from_field=from_field;
while ((item=li++))
{
- Item::Type type=item->type();
+ Item::Type type= item->type();
+ if (type == Item::COPY_STR_ITEM)
+ {
+ item= ((Item_copy *)item)->get_item();
+ type= item->type();
+ }
if (not_all_columns)
{
if (item->with_sum_func && type != Item::SUM_FUNC_ITEM)
@@ -16588,7 +16794,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
function. We need to know this if someone is going to use
DISTINCT on the result.
*/
- param->using_indirect_summary_function=1;
+ param->using_outer_summary_function=1;
continue;
}
}
@@ -16779,7 +16985,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (blob_count || using_unique_constraint
|| (thd->variables.big_tables && !(select_options & SELECT_SMALL_RESULT))
|| (select_options & TMP_TABLE_FORCE_MYISAM)
- || thd->variables.tmp_table_size == 0)
+ || thd->variables.tmp_memory_table_size == 0)
{
share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON);
table->file= get_new_handler(share, &table->mem_root,
@@ -16841,7 +17047,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
share->default_values= table->record[1]+alloc_length;
}
copy_func[0]=0; // End marker
- param->func_count= copy_func - param->items_to_copy;
+ param->func_count= (uint)(copy_func - param->items_to_copy);
setup_tmp_table_column_bitmaps(table, bitmaps);
@@ -16943,14 +17149,14 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
param->recinfo= recinfo; // Pointer to after last field
store_record(table,s->default_values); // Make empty default record
- if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
+ if (thd->variables.tmp_memory_table_size == ~ (ulonglong) 0) // No limit
share->max_rows= ~(ha_rows) 0;
else
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
- MY_MIN(thd->variables.tmp_table_size,
+ MY_MIN(thd->variables.tmp_memory_table_size,
thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size) /
- share->reclength);
+ thd->variables.tmp_memory_table_size) /
+ share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
@@ -17042,8 +17248,6 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
cur_group->buff++; // Pointer to field data
group_buff++; // Skipp null flag
}
- /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */
- key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL;
group_buff+= cur_group->field->pack_length();
}
keyinfo->key_length+= key_part_info->length;
@@ -17204,13 +17408,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (!do_not_open)
{
- if (share->db_type() == TMP_ENGINE_HTON)
- {
- if (create_internal_tmp_table(table, param->keyinfo, param->start_recinfo,
- &param->recinfo, select_options))
- goto err;
- }
- if (open_tmp_table(table))
+ if (instantiate_tmp_table(table, param->keyinfo, param->start_recinfo,
+ &param->recinfo, select_options))
goto err;
}
@@ -17233,148 +17432,115 @@ err:
/****************************************************************************/
-/**
- Create a reduced TABLE object with properly set up Field list from a
- list of field definitions.
-
- The created table doesn't have a table handler associated with
- it, has no keys, no group/distinct, no copy_funcs array.
- The sole purpose of this TABLE object is to use the power of Field
- class to read/write data to/from table->record[0]. Then one can store
- the record in any container (RB tree, hash, etc).
- The table is created in THD mem_root, so are the table's fields.
- Consequently, if you don't BLOB fields, you don't need to free it.
-
- @param thd connection handle
- @param field_list list of column definitions
+void *Virtual_tmp_table::operator new(size_t size, THD *thd) throw()
+{
+ return (Virtual_tmp_table *) alloc_root(thd->mem_root, size);
+}
- @return
- 0 if out of memory, TABLE object in case of success
-*/
-TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
+bool Virtual_tmp_table::init(uint field_count)
{
- uint field_count= field_list.elements;
- uint blob_count= 0;
- Field **field;
- Create_field *cdef; /* column definition */
- uint record_length= 0;
- uint null_count= 0; /* number of columns which may be null */
- uint null_pack_length; /* NULL representation array length */
uint *blob_field;
uchar *bitmaps;
- TABLE *table;
- TABLE_SHARE *share;
-
- if (!multi_alloc_root(thd->mem_root,
- &table, sizeof(*table),
- &share, sizeof(*share),
+ if (!multi_alloc_root(in_use->mem_root,
+ &s, sizeof(*s),
&field, (field_count + 1) * sizeof(Field*),
- &blob_field, (field_count+1) *sizeof(uint),
- &bitmaps, bitmap_buffer_size(field_count)*5,
+ &blob_field, (field_count + 1) * sizeof(uint),
+ &bitmaps, bitmap_buffer_size(field_count) * 6,
NullS))
- return 0;
+ return true;
+ s->reset();
+ s->blob_field= blob_field;
+ setup_tmp_table_column_bitmaps(this, bitmaps, field_count);
+ m_alloced_field_count= field_count;
+ return false;
+};
- table->reset();
- table->field= field;
- table->s= share;
- table->temp_pool_slot= MY_BIT_NONE;
- share->reset();
- share->blob_field= blob_field;
- share->fields= field_count;
- setup_tmp_table_column_bitmaps(table, bitmaps);
+bool Virtual_tmp_table::add(List<Column_definition> &field_list)
+{
/* Create all fields and calculate the total length of record */
- List_iterator_fast<Create_field> it(field_list);
- while ((cdef= it++))
- {
- *field= make_field(share, thd->mem_root, 0, cdef->length,
- (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
- f_maybe_null(cdef->pack_flag) ? 1 : 0,
- cdef->pack_flag, cdef->sql_type, cdef->charset,
- cdef->geom_type, cdef->srid, cdef->unireg_check,
- cdef->interval, cdef->field_name);
- if (!*field)
- goto error;
- (*field)->init(table);
- record_length+= (*field)->pack_length();
- if (! ((*field)->flags & NOT_NULL_FLAG))
- null_count++;
-
- if ((*field)->flags & BLOB_FLAG)
- share->blob_field[blob_count++]= (uint) (field - table->field);
-
- field++;
+ Column_definition *cdef; /* column definition */
+ List_iterator_fast<Column_definition> it(field_list);
+ for ( ; (cdef= it++); )
+ {
+ Field *tmp;
+ if (!(tmp= cdef->make_field(s, in_use->mem_root, 0,
+ (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
+ f_maybe_null(cdef->pack_flag) ? 1 : 0,
+ cdef->field_name)))
+ return true;
+ add(tmp);
}
- *field= NULL; /* mark the end of the list */
- share->blob_field[blob_count]= 0; /* mark the end of the list */
- share->blob_fields= blob_count;
+ return false;
+}
- null_pack_length= (null_count + 7)/8;
- share->reclength= record_length + null_pack_length;
- share->rec_buff_length= ALIGN_SIZE(share->reclength + 1);
- table->record[0]= (uchar*) thd->alloc(share->rec_buff_length);
- if (!table->record[0])
- goto error;
- if (null_pack_length)
- {
- table->null_flags= (uchar*) table->record[0];
- share->null_fields= null_count;
- share->null_bytes= share->null_bytes_for_compare= null_pack_length;
- }
+void Virtual_tmp_table::setup_field_pointers()
+{
+ uchar *null_pos= record[0];
+ uchar *field_pos= null_pos + s->null_bytes;
+ uint null_bit= 1;
- table->in_use= thd; /* field->reset() may access table->in_use */
+ for (Field **cur_ptr= field; *cur_ptr; ++cur_ptr)
{
- /* Set up field pointers */
- uchar *null_pos= table->record[0];
- uchar *field_pos= null_pos + share->null_bytes;
- uint null_bit= 1;
-
- for (field= table->field; *field; ++field)
+ Field *cur_field= *cur_ptr;
+ if ((cur_field->flags & NOT_NULL_FLAG))
+ cur_field->move_field(field_pos);
+ else
{
- Field *cur_field= *field;
- if ((cur_field->flags & NOT_NULL_FLAG))
- cur_field->move_field(field_pos);
- else
+ cur_field->move_field(field_pos, (uchar*) null_pos, null_bit);
+ null_bit<<= 1;
+ if (null_bit == (uint)1 << 8)
{
- cur_field->move_field(field_pos, (uchar*) null_pos, null_bit);
- null_bit<<= 1;
- if (null_bit == (uint)1 << 8)
- {
- ++null_pos;
- null_bit= 1;
- }
+ ++null_pos;
+ null_bit= 1;
}
- if (cur_field->type() == MYSQL_TYPE_BIT &&
- cur_field->key_type() == HA_KEYTYPE_BIT)
+ }
+ if (cur_field->type() == MYSQL_TYPE_BIT &&
+ cur_field->key_type() == HA_KEYTYPE_BIT)
+ {
+ /* This is a Field_bit since key_type is HA_KEYTYPE_BIT */
+ static_cast<Field_bit*>(cur_field)->set_bit_ptr(null_pos, null_bit);
+ null_bit+= cur_field->field_length & 7;
+ if (null_bit > 7)
{
- /* This is a Field_bit since key_type is HA_KEYTYPE_BIT */
- static_cast<Field_bit*>(cur_field)->set_bit_ptr(null_pos, null_bit);
- null_bit+= cur_field->field_length & 7;
- if (null_bit > 7)
- {
- null_pos++;
- null_bit-= 8;
- }
+ null_pos++;
+ null_bit-= 8;
}
- cur_field->reset();
-
- field_pos+= cur_field->pack_length();
}
+ cur_field->reset();
+ field_pos+= cur_field->pack_length();
}
- return table;
-error:
- for (field= table->field; *field; ++field)
- delete *field; /* just invokes field destructor */
- return 0;
+}
+
+
+bool Virtual_tmp_table::open()
+{
+ // Make sure that we added all the fields we planned to:
+ DBUG_ASSERT(s->fields == m_alloced_field_count);
+ field[s->fields]= NULL; // mark the end of the list
+ s->blob_field[s->blob_fields]= 0; // mark the end of the list
+
+ uint null_pack_length= (s->null_fields + 7) / 8; // NULL-bit array length
+ s->reclength+= null_pack_length;
+ s->rec_buff_length= ALIGN_SIZE(s->reclength + 1);
+ if (!(record[0]= (uchar*) in_use->alloc(s->rec_buff_length)))
+ return true;
+ if (null_pack_length)
+ {
+ null_flags= (uchar*) record[0];
+ s->null_bytes= s->null_bytes_for_compare= null_pack_length;
+ }
+ setup_field_pointers();
+ return false;
}
bool open_tmp_table(TABLE *table)
{
int error;
- if ((error= table->file->ha_open(table, table->s->table_name.str, O_RDWR,
+ if ((error= table->file->ha_open(table, table->s->path.str, O_RDWR,
HA_OPEN_TMP_TABLE |
HA_OPEN_INTERNAL_TABLE)))
{
@@ -17382,11 +17548,11 @@ bool open_tmp_table(TABLE *table)
table->db_stat= 0;
return 1;
}
- table->db_stat= HA_OPEN_KEYFILE+HA_OPEN_RNDFILE;
+ table->db_stat= HA_OPEN_KEYFILE;
(void) table->file->extra(HA_EXTRA_QUICK); /* Faster */
- if (!table->created)
+ if (!table->is_created())
{
- table->created= TRUE;
+ table->set_created();
table->in_use->inc_status_created_tmp_tables();
}
@@ -17528,10 +17694,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
}
bzero((char*) &create_info,sizeof(create_info));
-
- /* Use long data format, to ensure we never get a 'table is full' error */
- if (!(options & SELECT_SMALL_RESULT))
- create_info.data_file_length= ~(ulonglong) 0;
+ create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
/*
The logic for choosing the record format:
@@ -17568,18 +17731,14 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
Emulate behaviour by making column not-nullable when creating the
table.
*/
- uint cols= (*recinfo-start_recinfo);
+ uint cols= (uint)(*recinfo-start_recinfo);
start_recinfo[cols-1].null_bit= 0;
}
}
- if ((error= maria_create(share->table_name.str,
- file_type,
- share->keys, &keydef,
- (uint) (*recinfo-start_recinfo),
- start_recinfo,
- share->uniques, &uniquedef,
- &create_info,
+ if ((error= maria_create(share->path.str, file_type, share->keys, &keydef,
+ (uint) (*recinfo-start_recinfo), start_recinfo,
+ share->uniques, &uniquedef, &create_info,
create_flags)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
@@ -17592,7 +17751,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
table->in_use->inc_status_created_tmp_tables();
table->in_use->query_plan_flags|= QPLAN_TMP_DISK;
share->db_record_offset= 1;
- table->created= TRUE;
+ table->set_created();
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@@ -17727,15 +17886,11 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
MI_CREATE_INFO create_info;
bzero((char*) &create_info,sizeof(create_info));
+ create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
- if (!(options & SELECT_SMALL_RESULT))
- create_info.data_file_length= ~(ulonglong) 0;
-
- if ((error=mi_create(share->table_name.str, share->keys, &keydef,
- (uint) (*recinfo-start_recinfo),
- start_recinfo,
- share->uniques, &uniquedef,
- &create_info,
+ if ((error=mi_create(share->path.str, share->keys, &keydef,
+ (uint) (*recinfo-start_recinfo), start_recinfo,
+ share->uniques, &uniquedef, &create_info,
HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE |
((share->db_create_options & HA_OPTION_PACK_RECORD) ?
HA_PACK_RECORD : 0)
@@ -17749,7 +17904,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
table->in_use->inc_status_created_tmp_tables();
table->in_use->query_plan_flags|= QPLAN_TMP_DISK;
share->db_record_offset= 1;
- table->created= TRUE;
+ table->set_created();
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@@ -17889,7 +18044,7 @@ err_killed:
(void) table->file->ha_rnd_end();
(void) new_table.file->ha_close();
err1:
- new_table.file->ha_delete_table(new_table.s->table_name.str);
+ new_table.file->ha_delete_table(new_table.s->path.str);
err2:
delete new_table.file;
thd_proc_info(thd, save_proc_info);
@@ -17910,20 +18065,19 @@ free_tmp_table(THD *thd, TABLE *entry)
save_proc_info=thd->proc_info;
THD_STAGE_INFO(thd, stage_removing_tmp_table);
- if (entry->file && entry->created)
+ if (entry->file && entry->is_created())
{
entry->file->ha_index_or_rnd_end();
if (entry->db_stat)
- entry->file->ha_drop_table(entry->s->table_name.str);
+ entry->file->ha_drop_table(entry->s->path.str);
else
- entry->file->ha_delete_table(entry->s->table_name.str);
+ entry->file->ha_delete_table(entry->s->path.str);
delete entry->file;
}
/* free blobs */
for (Field **ptr=entry->field ; *ptr ; ptr++)
(*ptr)->free();
- free_io_cache(entry);
if (entry->temp_pool_slot != MY_BIT_NONE)
bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
@@ -17939,81 +18093,101 @@ free_tmp_table(THD *thd, TABLE *entry)
/**
- @details
- Rows produced by a join sweep may end up in a temporary table or be sent
- to a client. Setup the function of the nested loop join algorithm which
- handles final fully constructed and matched records.
+ @brief
+ Set write_func of AGGR_OP object
- @param join join to setup the function for.
+ @param join_tab JOIN_TAB of the corresponding tmp table
- @return
- end_select function to use. This function can't fail.
+ @details
+ Function sets up write_func according to how AGGR_OP object that
+ is attached to the given join_tab will be used in the query.
*/
-Next_select_func setup_end_select_func(JOIN *join)
+void set_postjoin_aggr_write_func(JOIN_TAB *tab)
{
- TABLE *table= join->tmp_table;
- TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param;
- Next_select_func end_select;
+ JOIN *join= tab->join;
+ TABLE *table= tab->table;
+ AGGR_OP *aggr= tab->aggr;
+ TMP_TABLE_PARAM *tmp_tbl= tab->tmp_table_param;
- /* Set up select_end */
- if (table)
+ DBUG_ASSERT(table && aggr);
+
+ if (table->group && tmp_tbl->sum_func_count &&
+ !tmp_tbl->precomputed_group_by)
{
- if (table->group && tmp_tbl->sum_func_count &&
- !tmp_tbl->precomputed_group_by)
- {
- if (table->s->keys)
- {
- DBUG_PRINT("info",("Using end_update"));
- end_select=end_update;
- }
- else
- {
- DBUG_PRINT("info",("Using end_unique_update"));
- end_select=end_unique_update;
- }
- }
- else if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
+ /*
+ Note for MyISAM tmp tables: if uniques is true keys won't be
+ created.
+ */
+ if (table->s->keys && !table->s->uniques)
{
- DBUG_PRINT("info",("Using end_write_group"));
- end_select=end_write_group;
+ DBUG_PRINT("info",("Using end_update"));
+ aggr->set_write_func(end_update);
}
else
{
- DBUG_PRINT("info",("Using end_write"));
- end_select=end_write;
- if (tmp_tbl->precomputed_group_by)
- {
- /*
- A preceding call to create_tmp_table in the case when loose
- index scan is used guarantees that
- TMP_TABLE_PARAM::items_to_copy has enough space for the group
- by functions. It is OK here to use memcpy since we copy
- Item_sum pointers into an array of Item pointers.
- */
- memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
- join->sum_funcs,
- sizeof(Item*)*tmp_tbl->sum_func_count);
- tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
- }
+ DBUG_PRINT("info",("Using end_unique_update"));
+ aggr->set_write_func(end_unique_update);
}
}
+ else if (join->sort_and_group && !tmp_tbl->precomputed_group_by &&
+ !join->sort_and_group_aggr_tab && join->tables_list)
+ {
+ DBUG_PRINT("info",("Using end_write_group"));
+ aggr->set_write_func(end_write_group);
+ join->sort_and_group_aggr_tab= tab;
+ }
else
{
- /*
- Choose method for presenting result to user. Use end_send_group
- if the query requires grouping (has a GROUP BY clause and/or one or
- more aggregate functions). Use end_send if the query should not
- be grouped.
- */
- if ((join->sort_and_group ||
- (join->procedure && join->procedure->flags & PROC_GROUP)) &&
- !tmp_tbl->precomputed_group_by)
- end_select= end_send_group;
- else
- end_select= end_send;
+ DBUG_PRINT("info",("Using end_write"));
+ aggr->set_write_func(end_write);
+ if (tmp_tbl->precomputed_group_by)
+ {
+ /*
+ A preceding call to create_tmp_table in the case when loose
+ index scan is used guarantees that
+ TMP_TABLE_PARAM::items_to_copy has enough space for the group
+ by functions. It is OK here to use memcpy since we copy
+ Item_sum pointers into an array of Item pointers.
+ */
+ memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
+ join->sum_funcs,
+ sizeof(Item*)*tmp_tbl->sum_func_count);
+ tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
+ }
}
- return end_select;
+}
+
+
+/**
+ @details
+ Rows produced by a join sweep may end up in a temporary table or be sent
+ to a client. Set the function of the nested loop join algorithm which
+ handles final fully constructed and matched records.
+
+ @param join join to setup the function for.
+
+ @return
+ end_select function to use. This function can't fail.
+*/
+
+Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab)
+{
+ TMP_TABLE_PARAM *tmp_tbl= tab ? tab->tmp_table_param : &join->tmp_table_param;
+
+ /*
+ Choose method for presenting result to user. Use end_send_group
+ if the query requires grouping (has a GROUP BY clause and/or one or
+ more aggregate functions). Use end_send if the query should not
+ be grouped.
+ */
+ if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
+ {
+ DBUG_PRINT("info",("Using end_send_group"));
+ return end_send_group;
+ }
+ DBUG_PRINT("info",("Using end_send"));
+ return end_send;
}
@@ -18029,19 +18203,13 @@ Next_select_func setup_end_select_func(JOIN *join)
*/
static int
-do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
+do_select(JOIN *join, Procedure *procedure)
{
int rc= 0;
enum_nested_loop_state error= NESTED_LOOP_OK;
- JOIN_TAB *UNINIT_VAR(join_tab);
DBUG_ENTER("do_select");
-
- join->procedure=procedure;
- join->tmp_table= table; /* Save for easy recursion */
- join->fields= fields;
- join->do_select_call_count++;
- if (join->pushdown_query && join->do_select_call_count == 1)
+ if (join->pushdown_query)
{
/* Select fields are in the temporary table */
join->fields= &join->tmp_fields_list1;
@@ -18049,34 +18217,34 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
join->set_items_ref_array(join->items1);
/* The storage engine will take care of the group by query result */
int res= join->pushdown_query->execute(join);
- DBUG_RETURN(res);
- }
- if (table)
- {
- (void) table->file->extra(HA_EXTRA_WRITE_CACHE);
- empty_record(table);
- if (table->group && join->tmp_table_param.sum_func_count &&
- table->s->keys && !table->file->inited)
+ if (res)
+ DBUG_RETURN(res);
+
+ if (join->pushdown_query->store_data_in_temp_table)
{
- rc= table->file->ha_index_init(0, 0);
- if (rc)
- {
- table->file->print_error(rc, MYF(0));
- DBUG_RETURN(-1);
- }
+ JOIN_TAB *last_tab= join->join_tab + join->table_count -
+ join->exec_join_tab_cnt();
+ last_tab->next_select= end_send;
+
+ enum_nested_loop_state state= last_tab->aggr->end_send();
+ if (state >= NESTED_LOOP_OK)
+ state= sub_select(join, last_tab, true);
+
+ if (state < NESTED_LOOP_OK)
+ res= 1;
+
+ if (join->result->send_eof())
+ res= 1;
}
+ DBUG_RETURN(res);
}
- /* Set up select_end */
- Next_select_func end_select= setup_end_select_func(join);
- if (join->table_count)
- {
- join->join_tab[join->top_join_tab_count - 1].next_select= end_select;
- join_tab=join->join_tab+join->const_tables;
- }
+
+ join->procedure= procedure;
join->duplicate_rows= join->send_records=0;
- if (join->table_count == join->const_tables)
+ if (join->only_const_tables() && !join->need_tmp)
{
+ Next_select_func end_select= setup_end_select_func(join, NULL);
/*
HAVING will be checked after processing aggregate functions,
But WHERE should checked here (we alredy have read tables).
@@ -18088,8 +18256,9 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
DBUG_ASSERT(join->outer_ref_cond == NULL);
if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
{
+ // HAVING will be checked by end_select
error= (*end_select)(join, 0, 0);
- if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT)
+ if (error >= NESTED_LOOP_OK)
error= (*end_select)(join, 0, 1);
/*
@@ -18105,7 +18274,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
if (!join->having || join->having->val_int())
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
- fields);
+ join->fields);
rc= join->result->send_data(*columns_list) > 0;
}
}
@@ -18119,8 +18288,6 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
}
else
{
- DBUG_ASSERT(join->table_count);
-
DBUG_EXECUTE_IF("show_explain_probe_do_select",
if (dbug_user_var_equals_int(join->thd,
"show_explain_probe_select_id",
@@ -18128,15 +18295,14 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
dbug_serve_apcs(join->thd, 1);
);
+ JOIN_TAB *join_tab= join->join_tab +
+ (join->tables_list ? join->const_tables : 0);
if (join->outer_ref_cond && !join->outer_ref_cond->val_int())
error= NESTED_LOOP_NO_MORE_ROWS;
else
- error= sub_select(join,join_tab,0);
- if ((error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) &&
- join->thd->killed != ABORT_QUERY)
- error= sub_select(join,join_tab,1);
- if (error == NESTED_LOOP_QUERY_LIMIT)
- error= NESTED_LOOP_OK; /* select_limit used */
+ error= join->first_select(join,join_tab,0);
+ if (error >= NESTED_LOOP_OK && join->thd->killed != ABORT_QUERY)
+ error= join->first_select(join,join_tab,1);
}
join->thd->limit_found_rows= join->send_records - join->duplicate_rows;
@@ -18144,23 +18310,37 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
if (error == NESTED_LOOP_NO_MORE_ROWS || join->thd->killed == ABORT_QUERY)
error= NESTED_LOOP_OK;
- if (table)
+ /*
+ For "order by with limit", we cannot rely on send_records, but need
+ to use the rowcount read originally into the join_tab applying the
+ filesort. There cannot be any post-filtering conditions, nor any
+ following join_tabs in this case, so this rowcount properly represents
+ the correct number of qualifying rows.
+ */
+ if (join->order)
{
- int tmp, new_errno= 0;
- if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE)))
+ // Save # of found records prior to cleanup
+ JOIN_TAB *sort_tab;
+ JOIN_TAB *join_tab= join->join_tab;
+ uint const_tables= join->const_tables;
+
+ // Take record count from first non constant table or from last tmp table
+ if (join->aggr_tables > 0)
+ sort_tab= join_tab + join->top_join_tab_count + join->aggr_tables - 1;
+ else
{
- DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed"));
- new_errno= tmp;
+ DBUG_ASSERT(!join->only_const_tables());
+ sort_tab= join_tab + const_tables;
}
- if ((tmp=table->file->ha_index_or_rnd_end()))
+ if (sort_tab->filesort &&
+ join->select_options & OPTION_FOUND_ROWS &&
+ sort_tab->filesort->sortorder &&
+ sort_tab->filesort->limit != HA_POS_ERROR)
{
- DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
- new_errno= tmp;
+ join->thd->limit_found_rows= sort_tab->records;
}
- if (new_errno)
- table->file->print_error(new_errno,MYF(0));
}
- else
+
{
/*
The following will unlock all cursors if the command wasn't an
@@ -18174,11 +18354,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
Sic: this branch works even if rc != 0, e.g. when
send_data above returns an error.
*/
- if (!table) // If sending data to client
- {
- if (join->result->send_eof())
- rc= 1; // Don't send error
- }
+ if (join->result->send_eof())
+ rc= 1; // Don't send error
DBUG_PRINT("info",("%ld records output", (long) join->send_records));
}
else
@@ -18189,7 +18366,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
DBUG_PRINT("error",("Error: do_select() failed"));
}
#endif
- DBUG_RETURN(join->thd->is_error() ? -1 : rc);
+ rc= join->thd->is_error() ? -1 : rc;
+ DBUG_RETURN(rc);
}
@@ -18206,6 +18384,105 @@ int rr_sequential_and_unpack(READ_RECORD *info)
}
+/**
+ @brief
+ Instantiates temporary table
+
+ @param table Table object that describes the table to be
+ instantiated
+ @param keyinfo Description of the index (there is always one index)
+ @param start_recinfo Column descriptions
+ @param recinfo INOUT End of column descriptions
+ @param options Option bits
+
+ @details
+ Creates tmp table and opens it.
+
+ @return
+ FALSE - OK
+ TRUE - Error
+*/
+
+bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
+ TMP_ENGINE_COLUMNDEF *start_recinfo,
+ TMP_ENGINE_COLUMNDEF **recinfo,
+ ulonglong options)
+{
+ if (table->s->db_type() == TMP_ENGINE_HTON)
+ {
+ if (create_internal_tmp_table(table, keyinfo, start_recinfo, recinfo,
+ options))
+ return TRUE;
+ // Make empty record so random data is not written to disk
+ empty_record(table);
+ }
+ if (open_tmp_table(table))
+ return TRUE;
+
+ return FALSE;
+}
+
+
+/**
+ @brief
+ Accumulate rows of the result of an aggregation operation in a tmp table
+
+ @param join pointer to the structure providing all context info for the query
+ @param join_tab the JOIN_TAB object to which the operation is attached
+ @param end_records TRUE <=> all records were accumulated, send them further
+
+ @details
+ This function accumulates records of the aggreagation operation for
+ the node join_tab from the execution plan in a tmp table. To add a new
+ record the function calls join_tab->aggr->put_records.
+ When there is no more records to save, in this
+ case the end_of_records argument == true, function tells the operation to
+ send records further by calling aggr->send_records().
+ When all records are sent this function passes 'end_of_records' signal
+ further by calling sub_select() with end_of_records argument set to
+ true. After that aggr->end_send() is called to tell the operation that
+ it could end internal buffer scan.
+
+ @note
+ This function is not expected to be called when dynamic range scan is
+ used to scan join_tab because range scans aren't used for tmp tables.
+
+ @return
+ return one of enum_nested_loop_state.
+*/
+
+enum_nested_loop_state
+sub_select_postjoin_aggr(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
+{
+ enum_nested_loop_state rc;
+ AGGR_OP *aggr= join_tab->aggr;
+
+ /* This function cannot be called if join_tab has no associated aggregation */
+ DBUG_ASSERT(aggr != NULL);
+
+ DBUG_ENTER("sub_select_aggr_tab");
+
+ if (join->thd->killed)
+ {
+ /* The user has aborted the execution of the query */
+ join->thd->send_kill_message();
+ DBUG_RETURN(NESTED_LOOP_KILLED);
+ }
+
+ if (end_of_records)
+ {
+ rc= aggr->end_send();
+ if (rc >= NESTED_LOOP_OK)
+ rc= sub_select(join, join_tab, end_of_records);
+ DBUG_RETURN(rc);
+ }
+
+ rc= aggr->put_record();
+
+ DBUG_RETURN(rc);
+}
+
+
/*
Fill the join buffer with partial records, retrieve all full matches for
them
@@ -18259,7 +18536,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
if (end_of_records)
{
rc= cache->join_records(FALSE);
- if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS)
+ if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS ||
+ rc == NESTED_LOOP_QUERY_LIMIT)
rc= sub_select(join, join_tab, end_of_records);
DBUG_RETURN(rc);
}
@@ -18286,7 +18564,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
without it. If it's not the case remove it.
*/
rc= cache->join_records(TRUE);
- if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS)
+ if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS ||
+ rc == NESTED_LOOP_QUERY_LIMIT)
rc= sub_select(join, join_tab, end_of_records);
DBUG_RETURN(rc);
}
@@ -18373,7 +18652,7 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
is the same as the value of the predicate, otherwise it's just returns
true.
To carry out a return to a nested loop level of join table t the pointer
- to t is remembered in the field 'return_tab' of the join structure.
+ to t is remembered in the field 'return_rtab' of the join structure.
Consider the following query:
@code
SELECT * FROM t1,
@@ -18436,7 +18715,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
int error;
enum_nested_loop_state rc= NESTED_LOOP_OK;
READ_RECORD *info= &join_tab->read_record;
-
+
+
for (SJ_TMP_TABLE *flush_dups_table= join_tab->flush_weedout_table;
flush_dups_table;
flush_dups_table= flush_dups_table->next_flush_table)
@@ -18512,7 +18792,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
skip_over= FALSE;
}
- if (join_tab->keep_current_rowid)
+ if (join_tab->keep_current_rowid && !error)
join_tab->table->file->position(join_tab->table->record[0]);
rc= evaluate_join_record(join, join_tab, error);
@@ -18527,7 +18807,6 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
DBUG_RETURN(rc);
}
-
/**
@brief Process one row of the nested loop join.
@@ -18570,9 +18849,6 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
join_tab->tracker->r_rows++;
- if (join_tab->table->vfield)
- update_virtual_fields(join->thd, join_tab->table);
-
if (select_cond)
{
select_cond_result= MY_TEST(select_cond->val_int());
@@ -18589,6 +18865,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
condition is true => a match is found.
*/
join_tab->tracker->r_rows_after_where++;
+
bool found= 1;
while (join_tab->first_unmatched && found)
{
@@ -18924,15 +19201,15 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
}
else
{
- if (!table->key_read && table->covering_keys.is_set(tab->ref.key) &&
- !table->no_keyread &&
+ if (/*!table->file->key_read && */
+ table->covering_keys.is_set(tab->ref.key) && !table->no_keyread &&
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
{
- table->enable_keyread();
+ table->file->ha_start_keyread(tab->ref.key);
tab->index= tab->ref.key;
}
error=join_read_const(tab);
- table->disable_keyread();
+ table->file->ha_end_keyread();
if (error)
{
tab->info= ET_UNIQUE_ROW_NOT_FOUND;
@@ -19020,8 +19297,6 @@ join_read_system(JOIN_TAB *tab)
empty_record(table); // Make empty record
return -1;
}
- if (table->vfield)
- update_virtual_fields(tab->join->thd, table);
store_record(table,record[1]);
}
else if (!table->status) // Only happens with left join
@@ -19067,8 +19342,6 @@ join_read_const(JOIN_TAB *tab)
return report_error(table, error);
return -1;
}
- if (table->vfield)
- update_virtual_fields(tab->join->thd, table);
store_record(table,record[1]);
}
else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join
@@ -19383,12 +19656,31 @@ bool test_if_use_dynamic_range_scan(JOIN_TAB *join_tab)
int join_init_read_record(JOIN_TAB *tab)
{
+ /*
+ Note: the query plan tree for the below operations is constructed in
+ save_agg_explain_data.
+ */
+ if (tab->distinct && tab->remove_duplicates()) // Remove duplicates.
+ return 1;
+ if (tab->filesort && tab->sort_table()) // Sort table.
+ return 1;
+
+ DBUG_EXECUTE_IF("kill_join_init_read_record",
+ tab->join->thd->set_killed(KILL_QUERY););
if (tab->select && tab->select->quick && tab->select->quick->reset())
+ {
+ /* Ensures error status is propagated back to client */
+ report_error(tab->table,
+ tab->join->thd->killed ? HA_ERR_QUERY_INTERRUPTED : HA_ERR_OUT_OF_MEM);
return 1;
- if (!tab->preread_init_done && tab->preread_init())
+ }
+ /* make sure we won't get ER_QUERY_INTERRUPTED from any code below */
+ DBUG_EXECUTE_IF("kill_join_init_read_record",
+ tab->join->thd->reset_killed(););
+ if (!tab->preread_init_done && tab->preread_init())
return 1;
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
- tab->select,1,1, FALSE))
+ tab->select, tab->filesort_result, 1,1, FALSE))
return 1;
return (*tab->read_record.read_record)(&tab->read_record);
}
@@ -19406,7 +19698,7 @@ join_read_record_no_init(JOIN_TAB *tab)
save_copy_end= tab->read_record.copy_field_end;
init_read_record(&tab->read_record, tab->join->thd, tab->table,
- tab->select,1,1, FALSE);
+ tab->select, tab->filesort_result, 1, 1, FALSE);
tab->read_record.copy_field= save_copy;
tab->read_record.copy_field_end= save_copy_end;
@@ -19415,6 +19707,25 @@ join_read_record_no_init(JOIN_TAB *tab)
return (*tab->read_record.read_record)(&tab->read_record);
}
+
+/*
+ Helper function for sorting table with filesort.
+*/
+
+bool
+JOIN_TAB::sort_table()
+{
+ int rc;
+ DBUG_PRINT("info",("Sorting for index"));
+ THD_STAGE_INFO(join->thd, stage_creating_sort_index);
+ DBUG_ASSERT(join->ordered_index_usage != (filesort->order == join->order ?
+ JOIN::ordered_index_order_by :
+ JOIN::ordered_index_group_by));
+ rc= create_sort_index(join->thd, join, this, NULL);
+ return (rc != 0);
+}
+
+
static int
join_read_first(JOIN_TAB *tab)
{
@@ -19422,9 +19733,9 @@ join_read_first(JOIN_TAB *tab)
TABLE *table=tab->table;
DBUG_ENTER("join_read_first");
- if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
- !table->key_read)
- table->enable_keyread();
+ DBUG_ASSERT(table->no_keyread ||
+ !table->covering_keys.is_set(tab->index) ||
+ table->file->keyread == tab->index);
tab->table->status=0;
tab->read_record.read_record=join_read_next;
tab->read_record.table=table;
@@ -19462,9 +19773,9 @@ join_read_last(JOIN_TAB *tab)
int error= 0;
DBUG_ENTER("join_read_first");
- if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
- !table->key_read)
- table->enable_keyread();
+ DBUG_ASSERT(table->no_keyread ||
+ !table->covering_keys.is_set(tab->index) ||
+ table->file->keyread == tab->index);
tab->table->status=0;
tab->read_record.read_record=join_read_prev;
tab->read_record.table=table;
@@ -19588,16 +19899,19 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
DBUG_ENTER("end_send");
+ /*
+ When all tables are const this function is called with jointab == NULL.
+ This function shouldn't be called for the first join_tab as it needs
+ to get fields from previous tab.
+ */
+ DBUG_ASSERT(join_tab == NULL || join_tab != join->join_tab);
+ //TODO pass fields via argument
+ List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields;
+
if (!end_of_records)
{
if (join->table_count &&
- (join->join_tab->is_using_loose_index_scan() ||
- /*
- When order by used a loose scan as its input, the quick select may
- be attached to pre_sort_join_tab.
- */
- (join->pre_sort_join_tab &&
- join->pre_sort_join_tab->is_using_loose_index_scan())))
+ join->join_tab->is_using_loose_index_scan())
{
/* Copy non-aggregated fields when loose index scan is used. */
copy_fields(&join->tmp_table_param);
@@ -19614,7 +19928,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
/* result < 0 if row was not accepted and should not be counted */
- if ((error= join->result->send_data(*join->fields)))
+ if ((error= join->result->send_data(*fields)))
{
if (error > 0)
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -19628,13 +19942,15 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
!join->do_send_rows)
{
/*
- If filesort is used for sorting, stop after select_limit_cnt+1
- records are read. Because of optimization in some cases it can
- provide only select_limit_cnt+1 records.
+ If we have used Priority Queue for optimizing order by with limit,
+ then stop here, there are no more records to consume.
+ When this optimization is used, end_send is called on the next
+ join_tab.
*/
- if (join->order && join->sortorder &&
- join->filesort_found_rows &&
- join->select_options & OPTION_FOUND_ROWS)
+ if (join->order &&
+ join->select_options & OPTION_FOUND_ROWS &&
+ join_tab > join->join_tab &&
+ (join_tab - 1)->filesort && (join_tab - 1)->filesort->using_pq)
{
DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT"));
DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
@@ -19646,7 +19962,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (join->select_options & OPTION_FOUND_ROWS)
{
JOIN_TAB *jt=join->join_tab;
- if ((join->table_count == 1) && !join->tmp_table && !join->sort_and_group
+ if ((join->table_count == 1) && !join->sort_and_group
&& !join->send_group_parts && !join->having && !jt->select_cond &&
!(jt->select && jt->select->quick) &&
(jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
@@ -19655,12 +19971,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/* Join over all rows in table; Return number of found rows */
TABLE *table=jt->table;
- join->select_options ^= OPTION_FOUND_ROWS;
- if (table->sort.record_pointers ||
- (table->sort.io_cache && my_b_inited(table->sort.io_cache)))
+ if (jt->filesort_result) // If filesort was used
{
- /* Using filesort */
- join->send_records= table->sort.found_records;
+ join->send_records= jt->filesort_result->found_rows;
}
else
{
@@ -19711,13 +20024,21 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int idx= -1;
enum_nested_loop_state ok_code= NESTED_LOOP_OK;
+ List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields;
DBUG_ENTER("end_send_group");
+ if (!join->items3.is_null() && !join->set_group_rpa)
+ {
+ join->set_group_rpa= true;
+ join->set_items_ref_array(join->items3);
+ }
+
if (!join->first_record || end_of_records ||
(idx=test_if_group_changed(join->group_fields)) >= 0)
{
- if (join->first_record ||
- (end_of_records && !join->group && !join->group_optimized_away))
+ if (!join->group_sent &&
+ (join->first_record ||
+ (end_of_records && !join->group && !join->group_optimized_away)))
{
if (join->procedure)
join->procedure->end_group();
@@ -19731,7 +20052,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
else
{
if (join->do_send_rows)
- error=join->procedure->send_row(*join->fields) ? 1 : 0;
+ error=join->procedure->send_row(*fields) ? 1 : 0;
join->send_records++;
}
if (end_of_records && join->procedure->end_of_records())
@@ -19743,11 +20064,8 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
List_iterator_fast<Item> it(*join->fields);
Item *item;
- DBUG_PRINT("info", ("no matching rows"));
-
- /* No matching rows for group function */
- join->clear();
- join->no_rows_in_result_called= 1;
+ /* No matching rows for group function */
+ join->clear();
while ((item= it++))
item->no_rows_in_result();
@@ -19758,7 +20076,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (join->do_send_rows)
{
- error= join->result->send_data(*join->fields);
+ error=join->result->send_data(*fields);
if (error < 0)
{
/* Duplicate row, don't count */
@@ -19767,6 +20085,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
}
join->send_records++;
+ join->group_sent= true;
}
if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0)
{
@@ -19818,6 +20137,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR);
if (join->procedure)
join->procedure->add();
+ join->group_sent= false;
DBUG_RETURN(ok_code);
}
}
@@ -19834,16 +20154,16 @@ static enum_nested_loop_state
end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
- TABLE *table=join->tmp_table;
+ TABLE *const table= join_tab->table;
DBUG_ENTER("end_write");
if (!end_of_records)
{
- copy_fields(&join->tmp_table_param);
- if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ copy_fields(join_tab->tmp_table_param);
+ if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if (!join->having || join->having->val_int())
+ if (!join_tab->having || join_tab->having->val_int())
{
int error;
join->found_records++;
@@ -19853,15 +20173,16 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
goto end;
bool is_duplicate;
if (create_internal_tmp_table_from_heap(join->thd, table,
- join->tmp_table_param.start_recinfo,
- &join->tmp_table_param.recinfo,
+ join_tab->tmp_table_param->start_recinfo,
+ &join_tab->tmp_table_param->recinfo,
error, 1, &is_duplicate))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
if (is_duplicate)
goto end;
table->s->uniques=0; // To ensure rows are the same
}
- if (++join->send_records >= join->tmp_table_param.end_write_records &&
+ if (++join_tab->send_records >=
+ join_tab->tmp_table_param->end_write_records &&
join->do_send_rows)
{
if (!(join->select_options & OPTION_FOUND_ROWS))
@@ -19896,7 +20217,7 @@ static enum_nested_loop_state
end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
- TABLE *table=join->tmp_table;
+ TABLE *const table= join_tab->table;
ORDER *group;
int error;
DBUG_ENTER("end_update");
@@ -19905,16 +20226,16 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_OK);
join->found_records++;
- copy_fields(&join->tmp_table_param); // Groups are copied twice.
+ copy_fields(join_tab->tmp_table_param); // Groups are copied twice.
/* Make a key of group index */
for (group=table->group ; group ; group=group->next)
{
Item *item= *group->item;
if (group->fast_field_copier_setup != group->field)
{
- DBUG_PRINT("info", ("new setup 0x%lx -> 0x%lx",
- (ulong)group->fast_field_copier_setup,
- (ulong)group->field));
+ DBUG_PRINT("info", ("new setup %p -> %p",
+ group->fast_field_copier_setup,
+ group->field));
group->fast_field_copier_setup= group->field;
group->fast_field_copier_func=
item->setup_fast_field_copier(group->field);
@@ -19925,7 +20246,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
group->buff[-1]= (char) group->field->is_null();
}
if (!table->file->ha_index_read_map(table->record[1],
- join->tmp_table_param.group_buff,
+ join_tab->tmp_table_param->group_buff,
HA_WHOLE_KEY,
HA_READ_KEY_EXACT))
{ /* Update old record */
@@ -19941,13 +20262,13 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
- if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
if (create_internal_tmp_table_from_heap(join->thd, table,
- join->tmp_table_param.start_recinfo,
- &join->tmp_table_param.recinfo,
+ join_tab->tmp_table_param->start_recinfo,
+ &join_tab->tmp_table_param->recinfo,
error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
@@ -19957,9 +20278,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR);
}
- join->join_tab[join->top_join_tab_count-1].next_select=end_unique_update;
+ join_tab->aggr->set_write_func(end_unique_update);
}
- join->send_records++;
+ join_tab->send_records++;
end:
if (join->thd->check_killed())
{
@@ -19976,7 +20297,7 @@ static enum_nested_loop_state
end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
- TABLE *table=join->tmp_table;
+ TABLE *table= join_tab->table;
int error;
DBUG_ENTER("end_unique_update");
@@ -19984,12 +20305,12 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_OK);
init_tmptable_sum_functions(join->sum_funcs);
- copy_fields(&join->tmp_table_param); // Groups are copied twice.
- if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ copy_fields(join_tab->tmp_table_param); // Groups are copied twice.
+ if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if (!(error= table->file->ha_write_tmp_row(table->record[0])))
- join->send_records++; // New group
+ join_tab->send_records++; // New group
else
{
if ((int) table->file->get_dup_key(error) < 0)
@@ -20035,7 +20356,7 @@ enum_nested_loop_state
end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
- TABLE *table=join->tmp_table;
+ TABLE *table= join_tab->table;
int idx= -1;
DBUG_ENTER("end_write_group");
@@ -20049,27 +20370,30 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
int send_group_parts= join->send_group_parts;
if (idx < send_group_parts)
{
- if (!join->first_record)
- {
- /* No matching rows for group function */
- join->clear();
- }
+ if (!join->first_record)
+ {
+ /* No matching rows for group function */
+ join->clear();
+ }
copy_sum_funcs(join->sum_funcs,
join->sum_funcs_end[send_group_parts]);
- if (!join->having || join->having->val_int())
+ if (!join_tab->having || join_tab->having->val_int())
{
int error= table->file->ha_write_tmp_row(table->record[0]);
if (error &&
create_internal_tmp_table_from_heap(join->thd, table,
- join->tmp_table_param.start_recinfo,
- &join->tmp_table_param.recinfo,
- error, 0, NULL))
+ join_tab->tmp_table_param->start_recinfo,
+ &join_tab->tmp_table_param->recinfo,
+ error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR);
}
if (join->rollup.state != ROLLUP::STATE_NONE)
{
- if (join->rollup_write_data((uint) (idx+1), table))
+ if (join->rollup_write_data((uint) (idx+1),
+ join_tab->tmp_table_param, table))
+ {
DBUG_RETURN(NESTED_LOOP_ERROR);
+ }
}
if (end_of_records)
goto end;
@@ -20084,8 +20408,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
if (idx < (int) join->send_group_parts)
{
- copy_fields(&join->tmp_table_param);
- if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ copy_fields(join_tab->tmp_table_param);
+ if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR);
if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -20291,7 +20615,8 @@ make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond,
the new parent Item. This should not be expensive because all
children of Item_cond_and should be fixed by now.
*/
- new_cond->fix_fields(thd, 0);
+ if (new_cond->fix_fields(thd, 0))
+ return (COND*) 0;
new_cond->used_tables_cache=
((Item_cond_and*) cond)->used_tables_cache &
tables;
@@ -20623,7 +20948,7 @@ static int test_if_order_by_key(JOIN *join,
(1) this is an extended key
(2) we've reached its end
*/
- key_parts= (key_part - table->key_info[idx].key_part);
+ key_parts= (uint)(key_part - table->key_info[idx].key_part);
if (have_pk_suffix &&
reverse == 0 && // all were =const so far
key_parts == table->key_info[idx].ext_key_parts &&
@@ -20658,9 +20983,11 @@ static int test_if_order_by_key(JOIN *join,
if (key_part->field != field || !field->part_of_sortkey.is_set(idx))
DBUG_RETURN(0);
+ const ORDER::enum_order keypart_order=
+ (key_part->key_part_flag & HA_REVERSE_SORT) ?
+ ORDER::ORDER_DESC : ORDER::ORDER_ASC;
/* set flag to 1 if we can use read-next on key, else to -1 */
- flag= ((order->asc == !(key_part->key_part_flag & HA_REVERSE_SORT)) ?
- 1 : -1);
+ flag= (order->direction == keypart_order) ? 1 : -1;
if (reverse && flag != reverse)
DBUG_RETURN(0);
reverse=flag; // Remember if reverse
@@ -20671,7 +20998,7 @@ static int test_if_order_by_key(JOIN *join,
key_parts= (uint) (key_part - table->key_info[idx].key_part);
if (reverse == -1 &&
- !(table->file->index_flags(idx, user_defined_kp, 1) & HA_READ_PREV))
+ !(table->file->index_flags(idx, user_defined_kp-1, 1) & HA_READ_PREV))
reverse= 0; // Index can't be used
if (have_pk_suffix && reverse == -1)
@@ -21336,13 +21663,10 @@ check_reverse_order:
If ref_key used index tree reading only ('Using index' in EXPLAIN),
and best_key doesn't, then revert the decision.
*/
- if (!table->covering_keys.is_set(best_key))
- table->disable_keyread();
+ if (table->covering_keys.is_set(best_key))
+ table->file->ha_start_keyread(best_key);
else
- {
- if (!table->key_read)
- table->enable_keyread();
- }
+ table->file->ha_end_keyread();
if (!quick_created)
{
@@ -21372,7 +21696,7 @@ check_reverse_order:
tab->ref.key_parts= 0;
if (select_limit < table->stat_records())
tab->limit= select_limit;
- table->disable_keyread();
+ table->file->ha_end_keyread();
}
}
else if (tab->type != JT_ALL || tab->select->quick)
@@ -21496,21 +21820,15 @@ use_filesort:
create_sort_index()
thd Thread handler
join Join with table to sort
- order How table should be sorted
- filesort_limit Max number of rows that needs to be sorted
- select_limit Max number of rows in final output
- Used to decide if we should use index or not
- is_order_by true if we are sorting on ORDER BY, false if GROUP BY
- Used to decide if we should use index or not
-
-
+ join_tab What table to sort
+ fsort Filesort object. NULL means "use tab->filesort".
+
IMPLEMENTATION
- If there is an index that can be used, the first non-const join_tab in
'join' is modified to use this index.
- If no index, create with filesort() an index file that can be used to
retrieve rows in order (should be done with 'read_record').
- The sorted data is stored in tab->table and will be freed when calling
- free_io_cache(tab->table).
+ The sorted data is stored in tab->filesort
RETURN VALUES
0 ok
@@ -21518,152 +21836,69 @@ use_filesort:
1 No records
*/
-static int
-create_sort_index(THD *thd, JOIN *join, ORDER *order,
- ha_rows filesort_limit, ha_rows select_limit,
- bool is_order_by)
-{
- uint length= 0;
- ha_rows examined_rows;
- ha_rows found_rows;
- ha_rows filesort_retval= HA_POS_ERROR;
+int
+create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
+{
TABLE *table;
SQL_SELECT *select;
- JOIN_TAB *tab;
- int err= 0;
bool quick_created= FALSE;
+ SORT_INFO *file_sort= 0;
DBUG_ENTER("create_sort_index");
- if (join->table_count == join->const_tables)
- DBUG_RETURN(0); // One row, no need to sort
- tab= join->join_tab + join->const_tables;
- table= tab->table;
- select= tab->select;
-
- JOIN_TAB *save_pre_sort_join_tab= NULL;
- if (join->pre_sort_join_tab)
- {
- /*
- we've already been in this function, and stashed away the
- original access method in join->pre_sort_join_tab, restore it
- now.
- */
-
- /* First, restore state of the handler */
- if (join->pre_sort_index != MAX_KEY)
- {
- if (table->file->ha_index_or_rnd_end())
- goto err;
- if (join->pre_sort_idx_pushed_cond)
- {
- table->file->idx_cond_push(join->pre_sort_index,
- join->pre_sort_idx_pushed_cond);
- }
- }
- else
- {
- if (table->file->ha_index_or_rnd_end() ||
- table->file->ha_rnd_init(TRUE))
- goto err;
- }
-
- /* Second, restore access method parameters */
- tab->records= join->pre_sort_join_tab->records;
- tab->select= join->pre_sort_join_tab->select;
- tab->select_cond= join->pre_sort_join_tab->select_cond;
- tab->type= join->pre_sort_join_tab->type;
- tab->read_first_record= join->pre_sort_join_tab->read_first_record;
-
- save_pre_sort_join_tab= join->pre_sort_join_tab;
- join->pre_sort_join_tab= NULL;
- }
- else
- {
- /*
- Save index #, save index condition. Do it right now, because MRR may
- */
- if (table->file->inited == handler::INDEX)
- {
- join->pre_sort_index= table->file->active_index;
- join->pre_sort_idx_pushed_cond= table->file->pushed_idx_cond;
- // no need to save key_read
- }
- else
- join->pre_sort_index= MAX_KEY;
- }
-
- /* Currently ORDER BY ... LIMIT is not supported in subqueries. */
- DBUG_ASSERT(join->group_list || !join->is_in_subquery());
+ if (fsort == NULL)
+ fsort= tab->filesort;
- /*
- When there is SQL_BIG_RESULT do not sort using index for GROUP BY,
- and thus force sorting on disk unless a group min-max optimization
- is going to be used as it is applied now only for one table queries
- with covering indexes.
- The expections is if we are already using the index for GROUP BY
- (in which case sort would be free) or ORDER and GROUP BY are different.
- */
- if ((order != join->group_list ||
- !(join->select_options & SELECT_BIG_RESULT) ||
- (select && select->quick &&
- select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
- test_if_skip_sort_order(tab,order,select_limit,0,
- is_order_by ? &table->keys_in_use_for_order_by :
- &table->keys_in_use_for_group_by))
- {
- tab->update_explain_data(join->const_tables);
- DBUG_RETURN(0);
- }
- tab->update_explain_data(join->const_tables);
-
- for (ORDER *ord= join->order; ord; ord= ord->next)
- length++;
- if (!(join->sortorder=
- make_unireg_sortorder(thd, join, tab->table->map, order, &length,
- join->sortorder)))
- {
- goto err; /* purecov: inspected */
- }
-
- table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_WME | MY_ZEROFILL|
- MY_THREAD_SPECIFIC));
+ table= tab->table;
+ select= fsort->select;
+
table->status=0; // May be wrong if quick_select
if (!tab->preread_init_done && tab->preread_init())
goto err;
// If table has a range, move it to select
- if (select && !select->quick && tab->ref.key >= 0)
+ if (select && tab->ref.key >= 0)
{
- if (tab->quick)
+ if (!select->quick)
{
- select->quick=tab->quick;
- tab->quick=0;
+ if (tab->quick)
+ {
+ select->quick= tab->quick;
+ tab->quick= NULL;
/*
We can only use 'Only index' if quick key is same as ref_key
and in index_merge 'Only index' cannot be used
*/
if (((uint) tab->ref.key != select->quick->index))
- table->disable_keyread();
+ table->file->ha_end_keyread();
+ }
+ else
+ {
+ /*
+ We have a ref on a const; Change this to a range that filesort
+ can use.
+ For impossible ranges (like when doing a lookup on NULL on a NOT NULL
+ field, quick will contain an empty record set.
+ */
+ if (!(select->quick= (tab->type == JT_FT ?
+ get_ft_select(thd, table, tab->ref.key) :
+ get_quick_select_for_ref(thd, table, &tab->ref,
+ tab->found_records))))
+ goto err;
+ quick_created= TRUE;
+ }
+ fsort->own_select= true;
}
else
{
- /*
- We have a ref on a const; Change this to a range that filesort
- can use.
- For impossible ranges (like when doing a lookup on NULL on a NOT NULL
- field, quick will contain an empty record set.
- */
- if (!(select->quick= (tab->type == JT_FT ?
- get_ft_select(thd, table, tab->ref.key) :
- get_quick_select_for_ref(thd, table, &tab->ref,
- tab->found_records))))
- goto err;
- quick_created= TRUE;
+ DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF);
+ // Update ref value
+ if ((cp_buffer_from_ref(thd, table, &tab->ref) && thd->is_fatal_error))
+ goto err; // out of memory
}
}
+
/* Fill schema tables with data before filesort if it's necessary */
if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
@@ -21671,58 +21906,34 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
- filesort_retval= filesort(thd, table, join->sortorder, length,
- select, filesort_limit, 0,
- &examined_rows, &found_rows,
- join->explain->ops_tracker.report_sorting(thd));
- table->sort.found_records= filesort_retval;
- tab->records= join->select_options & OPTION_FOUND_ROWS ? found_rows : filesort_retval;
+ file_sort= filesort(thd, table, fsort, fsort->tracker, join, tab->table->map);
+ DBUG_ASSERT(tab->filesort_result == 0);
+ tab->filesort_result= file_sort;
+ tab->records= 0;
+ if (file_sort)
+ {
+ tab->records= join->select_options & OPTION_FOUND_ROWS ?
+ file_sort->found_rows : file_sort->return_rows;
+ tab->join->join_examined_rows+= file_sort->examined_rows;
+ }
if (quick_created)
{
/* This will delete the quick select. */
select->cleanup();
}
+
+ table->file->ha_end_keyread();
+ if (tab->type == JT_FT)
+ table->file->ft_end();
+ else
+ table->file->ha_index_or_rnd_end();
- if (!join->pre_sort_join_tab)
- {
- if (save_pre_sort_join_tab)
- join->pre_sort_join_tab= save_pre_sort_join_tab;
- else if (!(join->pre_sort_join_tab= (JOIN_TAB*)thd->alloc(sizeof(JOIN_TAB))))
- goto err;
- }
-
- *(join->pre_sort_join_tab)= *tab;
-
- tab->select=NULL;
- tab->set_select_cond(NULL, __LINE__);
- tab->type=JT_ALL; // Read with normal read_record
- tab->read_first_record= join_init_read_record;
- tab->table->file->ha_index_or_rnd_end();
-
- if (err)
- goto err;
-
- tab->join->join_examined_rows+= examined_rows;
- DBUG_RETURN(filesort_retval == HA_POS_ERROR);
+ DBUG_RETURN(file_sort == 0);
err:
DBUG_RETURN(-1);
}
-void JOIN::clean_pre_sort_join_tab()
-{
- //TABLE *table= pre_sort_join_tab->table;
- /*
- Note: we can come here for fake_select_lex object. That object will have
- the table already deleted by st_select_lex_unit::cleanup().
- We rely on that fake_select_lex didn't have quick select.
- */
- if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick)
- {
- pre_sort_join_tab->select->cleanup();
- }
-}
-
/**
Compare fields from table->record[0] and table->record[1],
@@ -21786,22 +21997,28 @@ static void free_blobs(Field **ptr)
Rows that do not satisfy 'having' condition are also removed.
*/
-static int
-remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
+bool
+JOIN_TAB::remove_duplicates()
+
{
- int error;
+ bool error;
ulong keylength= 0;
uint field_count;
+ List<Item> *fields= (this-1)->fields;
THD *thd= join->thd;
DBUG_ENTER("remove_duplicates");
- join->explain->ops_tracker.report_duplicate_removal();
+
+ DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE);
+ THD_STAGE_INFO(join->thd, stage_removing_duplicates);
+
+ //join->explain->ops_tracker.report_duplicate_removal();
table->reginfo.lock_type=TL_WRITE;
/* Calculate how many saved fields there is in list */
field_count=0;
- List_iterator<Item> it(fields);
+ List_iterator<Item> it(*fields);
Item *item;
while ((item=it++))
{
@@ -21812,7 +22029,7 @@ remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having)
{ // only const items with no OPTION_FOUND_ROWS
join->unit->select_limit_cnt= 1; // Only send first row
- DBUG_RETURN(0);
+ DBUG_RETURN(false);
}
Field **first_field=table->field+table->s->fields - field_count;
@@ -21827,7 +22044,6 @@ remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
if (thd->killed == ABORT_QUERY)
thd->reset_killed();
- free_io_cache(table); // Safety
table->file->info(HA_STATUS_VARIABLE);
if (table->s->db_type() == heap_hton ||
(!table->s->blob_fields &&
@@ -21924,9 +22140,11 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
file->extra(HA_EXTRA_NO_CACHE);
+ (void) file->ha_rnd_end();
DBUG_RETURN(0);
err:
file->extra(HA_EXTRA_NO_CACHE);
+ (void) file->ha_rnd_end();
if (error)
file->print_error(error,MYF(0));
DBUG_RETURN(1);
@@ -22041,92 +22259,9 @@ err:
}
-SORT_FIELD *make_unireg_sortorder(THD *thd, JOIN *join,
- table_map first_table_bit,
- ORDER *order, uint *length,
- SORT_FIELD *sortorder)
-{
- uint count;
- SORT_FIELD *sort,*pos;
- DBUG_ENTER("make_unireg_sortorder");
-
- count=0;
- for (ORDER *tmp = order; tmp; tmp=tmp->next)
- count++;
- if (!sortorder)
- sortorder= (SORT_FIELD*) thd->alloc(sizeof(SORT_FIELD) *
- (MY_MAX(count, *length) + 1));
- pos= sort= sortorder;
-
- if (!pos)
- DBUG_RETURN(0);
-
- for (;order;order=order->next,pos++)
- {
- Item *first= order->item[0];
- /*
- It is possible that the query plan is to read table t1, while the
- sort criteria actually has "ORDER BY t2.col" and the WHERE clause has
- a multi-equality(t1.col, t2.col, ...).
- The optimizer detects such cases (grep for
- UseMultipleEqualitiesToRemoveTempTable to see where), but doesn't
- perform equality substitution in the order->item. We need to do the
- substitution here ourselves.
- */
- table_map item_map= first->used_tables();
- if (join && (item_map & ~join->const_table_map) &&
- !(item_map & first_table_bit) && join->cond_equal &&
- first->get_item_equal())
- {
- /*
- Ok, this is the case descibed just above. Get the first element of the
- multi-equality.
- */
- Item_equal *item_eq= first->get_item_equal();
- first= item_eq->get_first(NO_PARTICULAR_TAB, NULL);
- }
-
- Item *const item= first, *const real_item= item->real_item();
- pos->field= 0; pos->item= 0;
- if (real_item->type() == Item::FIELD_ITEM)
- {
- // Could be a field, or Item_direct_view_ref wrapping a field
- DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
- (item->type() == Item::REF_ITEM &&
- static_cast<Item_ref*>(item)->ref_type() ==
- Item_ref::VIEW_REF));
- pos->field= static_cast<Item_field*>(real_item)->field;
- }
- else if (real_item->type() == Item::SUM_FUNC_ITEM &&
- !real_item->const_item())
- {
- // Aggregate, or Item_aggregate_ref
- DBUG_ASSERT(item->type() == Item::SUM_FUNC_ITEM ||
- (item->type() == Item::REF_ITEM &&
- static_cast<Item_ref*>(item)->ref_type() ==
- Item_ref::AGGREGATE_REF));
- pos->field= item->get_tmp_table_field();
- }
- else if (real_item->type() == Item::COPY_STR_ITEM)
- { // Blob patch
- pos->item= static_cast<Item_copy*>(real_item)->get_item();
- }
- else
- pos->item= item;
- pos->reverse=! order->asc;
- DBUG_ASSERT(pos->field != NULL || pos->item != NULL);
- }
- *length=count;
- DBUG_RETURN(sort);
-}
-
-
/*
eq_ref: Create the lookup key and check if it is the same as saved key
-
-
-
SYNOPSIS
cmp_buffer_with_ref()
tab Join tab of the accessed table
@@ -22226,6 +22361,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
@param[in] add_to_all_fields If the item is to be added to all_fields and
ref_pointer_array, this flag can be set to
false to stop the automatic insertion.
+ @param[in] from_window_spec If true then order is from a window spec
@retval
FALSE if OK
@@ -22234,9 +22370,11 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
*/
static bool
-find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
+find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
+ TABLE_LIST *tables,
ORDER *order, List<Item> &fields, List<Item> &all_fields,
- bool is_group_field, bool add_to_all_fields)
+ bool is_group_field, bool add_to_all_fields,
+ bool from_window_spec)
{
Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */
Item::Type order_item_type;
@@ -22249,7 +22387,8 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
Local SP variables may be int but are expressions, not positions.
(And they can't be used before fix_fields is called for them).
*/
- if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item())
+ if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item() &&
+ !from_window_spec)
{ /* Order by position */
uint count;
if (order->counter_used)
@@ -22262,7 +22401,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
order_item->full_name(), thd->where);
return TRUE;
}
- thd->change_item_tree((Item**)&order->item, (Item*)(ref_pointer_array + count - 1));
+ thd->change_item_tree((Item **)&order->item, (Item *)&ref_pointer_array[count - 1]);
order->in_field_list= 1;
order->counter= count;
order->counter_used= 1;
@@ -22322,7 +22461,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
'shadowed' a table field with the same name, the table field will be
chosen over the derived field.
*/
- order->item= ref_pointer_array + counter;
+ order->item= &ref_pointer_array[counter];
order->in_field_list=1;
return FALSE;
}
@@ -22341,6 +22480,18 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
thd->where);
}
}
+ else if (from_window_spec)
+ {
+ Item **found_item= find_item_in_list(order_item, all_fields, &counter,
+ REPORT_EXCEPT_NOT_FOUND, &resolution,
+ all_fields.elements - fields.elements);
+ if (found_item != not_found_item)
+ {
+ order->item= &ref_pointer_array[all_fields.elements-1-counter];
+ order->in_field_list= 0;
+ return FALSE;
+ }
+ }
order->in_field_list=0;
/*
@@ -22364,8 +22515,6 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
return FALSE;
uint el= all_fields.elements;
- DBUG_ASSERT(all_fields.elements <=
- thd->lex->current_select->ref_pointer_array_size);
/* Add new field to field list. */
all_fields.push_front(order_item, thd->mem_root);
ref_pointer_array[el]= order_item;
@@ -22380,7 +22529,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
if (order_item->type() == Item::SUM_FUNC_ITEM)
((Item_sum *)order_item)->ref_by= all_fields.head_ref();
- order->item= ref_pointer_array + el;
+ order->item= &ref_pointer_array[el];
return FALSE;
}
@@ -22392,15 +22541,24 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
the field list.
*/
-int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
- List<Item> &fields, List<Item> &all_fields, ORDER *order)
-{
+int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
+ List<Item> &fields, List<Item> &all_fields, ORDER *order,
+ bool from_window_spec)
+{
+ enum_parsing_place context_analysis_place=
+ thd->lex->current_select->context_analysis_place;
thd->where="order clause";
for (; order; order=order->next)
{
if (find_order_in_list(thd, ref_pointer_array, tables, order, fields,
- all_fields, FALSE, true))
+ all_fields, false, true, from_window_spec))
+ return 1;
+ if ((*order->item)->with_window_func &&
+ context_analysis_place != IN_ORDER_BY)
+ {
+ my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0));
return 1;
+ }
}
return 0;
}
@@ -22409,18 +22567,19 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
/**
Intitialize the GROUP BY list.
- @param thd Thread handler
- @param ref_pointer_array We store references to all fields that was
+ @param thd Thread handler
+ @param ref_pointer_array We store references to all fields that was
not in 'fields' here.
- @param fields All fields in the select part. Any item in
+ @param fields All fields in the select part. Any item in
'order' that is part of these list is replaced
by a pointer to this fields.
- @param all_fields Total list of all unique fields used by the
+ @param all_fields Total list of all unique fields used by the
select. All items in 'order' that was not part
of fields will be added first to this list.
- @param order The fields we should do GROUP BY on.
- @param hidden_group_fields Pointer to flag that is set to 1 if we added
+ @param order The fields we should do GROUP/PARTITION BY on
+ @param hidden_group_fields Pointer to flag that is set to 1 if we added
any fields to all_fields.
+ @param from_window_spec If true then list is from a window spec
@todo
change ER_WRONG_FIELD_WITH_GROUP to more detailed
@@ -22433,10 +22592,12 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
*/
int
-setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
+setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
List<Item> &fields, List<Item> &all_fields, ORDER *order,
- bool *hidden_group_fields)
+ bool *hidden_group_fields, bool from_window_spec)
{
+ enum_parsing_place context_analysis_place=
+ thd->lex->current_select->context_analysis_place;
*hidden_group_fields=0;
ORDER *ord;
@@ -22446,23 +22607,28 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
uint org_fields=all_fields.elements;
thd->where="group statement";
- enum_parsing_place save_place= thd->lex->current_select->parsing_place;
- thd->lex->current_select->parsing_place= IN_GROUP_BY;
for (ord= order; ord; ord= ord->next)
{
if (find_order_in_list(thd, ref_pointer_array, tables, ord, fields,
- all_fields, TRUE, true))
+ all_fields, true, true, from_window_spec))
return 1;
(*ord->item)->marker= UNDEF_POS; /* Mark found */
- if ((*ord->item)->with_sum_func)
+ if ((*ord->item)->with_sum_func && context_analysis_place == IN_GROUP_BY)
{
my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name());
return 1;
}
+ if ((*ord->item)->with_window_func)
+ {
+ if (context_analysis_place == IN_GROUP_BY)
+ my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
+ else
+ my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0));
+ return 1;
+ }
}
- thd->lex->current_select->parsing_place= save_place;
-
- if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY)
+ if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY &&
+ context_analysis_place == IN_GROUP_BY)
{
/*
Don't allow one to use fields that is not used in GROUP BY
@@ -22568,14 +22734,16 @@ setup_new_fields(THD *thd, List<Item> &fields,
*/
ORDER *
-create_distinct_group(THD *thd, Item **ref_pointer_array,
+create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array,
ORDER *order_list, List<Item> &fields,
List<Item> &all_fields,
bool *all_order_by_fields_used)
{
List_iterator<Item> li(fields);
- Item *item, **orig_ref_pointer_array= ref_pointer_array;
+ Item *item;
+ Ref_ptr_array orig_ref_pointer_array= ref_pointer_array;
ORDER *order,*group,**prev;
+ uint idx= 0;
*all_order_by_fields_used= 1;
while ((item=li++))
@@ -22622,16 +22790,14 @@ create_distinct_group(THD *thd, Item **ref_pointer_array,
Because HEAP tables can't index BIT fields we need to use an
additional hidden field for grouping because later it will be
converted to a LONG field. Original field will remain of the
- BIT type and will be returned to a client.
+ BIT type and will be returned [el]client.
*/
Item_field *new_item= new (thd->mem_root) Item_field(thd, (Item_field*)item);
int el= all_fields.elements;
- DBUG_ASSERT(all_fields.elements <=
- thd->lex->current_select->ref_pointer_array_size);
orig_ref_pointer_array[el]= new_item;
all_fields.push_front(new_item, thd->mem_root);
- ord->item= orig_ref_pointer_array + el;
- }
+ ord->item=&orig_ref_pointer_array[el];
+ }
else
{
/*
@@ -22639,14 +22805,14 @@ create_distinct_group(THD *thd, Item **ref_pointer_array,
simple indexing of ref_pointer_array (order in the array and in the
list are same)
*/
- ord->item= ref_pointer_array;
+ ord->item= &ref_pointer_array[idx];
}
- ord->asc=1;
+ ord->direction= ORDER::ORDER_ASC;
*prev=ord;
prev= &ord->next;
}
next_item:
- ref_pointer_array++;
+ idx++;
}
*prev=0;
return group;
@@ -22718,7 +22884,7 @@ test_if_subpart(ORDER *a,ORDER *b)
for (; a && b; a=a->next,b=b->next)
{
if ((*a->item)->eq(*b->item,1))
- a->asc=b->asc;
+ a->direction=b->direction;
else
return 0;
}
@@ -22897,9 +23063,9 @@ make_group_fields(JOIN *main_join, JOIN *curr_join)
/**
- Get a list of buffers for saveing last group.
+ Get a list of buffers for saving last group.
- Groups are saved in reverse order for easyer check loop.
+ Groups are saved in reverse order for easier check loop.
*/
static bool
@@ -22950,8 +23116,13 @@ int test_if_item_cache_changed(List<Cached_item> &list)
}
+/*
+ @return
+ -1 - Group not changed
+ value>=0 - Number of the component where the group changed
+*/
-static int
+int
test_if_group_changed(List<Cached_item> &list)
{
DBUG_ENTER("test_if_group_changed");
@@ -23000,7 +23171,7 @@ test_if_group_changed(List<Cached_item> &list)
bool
setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
- Item **ref_pointer_array,
+ Ref_ptr_array ref_pointer_array,
List<Item> &res_selected_fields, List<Item> &res_all_fields,
uint elements, List<Item> &all_fields)
{
@@ -23229,7 +23400,8 @@ bool JOIN::alloc_func_list()
1 error
*/
-bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_result_set_metadata,
+bool JOIN::make_sum_func_list(List<Item> &field_list,
+ List<Item> &send_result_set_metadata,
bool before_group_by, bool recompute)
{
List_iterator_fast<Item> it(field_list);
@@ -23284,7 +23456,7 @@ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_result_se
*/
static bool
-change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
+change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &res_selected_fields,
List<Item> &res_all_fields,
uint elements, List<Item> &all_fields)
@@ -23321,14 +23493,6 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
Item_field *new_field= new (thd->mem_root) Item_temptable_field(thd, field);
if (!suv || !new_field)
DBUG_RETURN(true); // Fatal error
- /*
- We are replacing the argument of Item_func_set_user_var after
- its value has been read. The argument's null_value should be
- set by now, so we must set it explicitly for the replacement
- argument since the null_value may be read without any
- preceeding call to val_*().
- */
- new_field->update_null_value();
List<Item> list;
list.push_back(new_field, thd->mem_root);
suv->set_arguments(thd, list);
@@ -23364,7 +23528,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
str.length(0);
str.extra_allocation(1024);
item->print(&str, QT_ORDINARY);
- item_field->name= sql_strmake(str.ptr(),str.length());
+ item_field->name= thd->strmake(str.ptr(),str.length());
}
#endif
}
@@ -23402,7 +23566,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
*/
static bool
-change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
+change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &res_selected_fields,
List<Item> &res_all_fields, uint elements,
List<Item> &all_fields)
@@ -23415,8 +23579,11 @@ change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
uint i, border= all_fields.elements - elements;
for (i= 0; (item= it++); i++)
{
- res_all_fields.push_back(new_item= item->get_tmp_table_item(thd),
- thd->mem_root);
+ if (item->type() == Item::SUM_FUNC_ITEM && item->const_item())
+ new_item= item;
+ else
+ new_item= item->get_tmp_table_item(thd);
+ res_all_fields.push_back(new_item, thd->mem_root);
ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
new_item;
}
@@ -23558,6 +23725,9 @@ copy_funcs(Item **func_ptr, const THD *thd)
Item *func;
for (; (func = *func_ptr) ; func_ptr++)
{
+ if (func->type() == Item::FUNC_ITEM &&
+ ((Item_func *) func)->with_window_func)
+ continue;
func->save_in_result_field(1);
/*
Need to check the THD error state because Item::val_xxx() don't
@@ -23626,8 +23796,8 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
join_tab->set_select_cond(cond, __LINE__);
}
- else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
- &error)))
+ else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond,
+ (SORT_INFO*) 0, 0, &error)))
join_tab->set_select_cond(cond, __LINE__);
DBUG_RETURN(error ? TRUE : FALSE);
@@ -23755,17 +23925,23 @@ bool JOIN::rollup_init()
*/
tmp_table_param.group_parts= send_group_parts;
- if (!(rollup.null_items= (Item_null_result**) thd->alloc((sizeof(Item*) +
- sizeof(Item**) +
- sizeof(List<Item>) +
- ref_pointer_array_size)
- * send_group_parts )))
- return 1;
-
- rollup.fields= (List<Item>*) (rollup.null_items + send_group_parts);
- rollup.ref_pointer_arrays= (Item***) (rollup.fields + send_group_parts);
+ Item_null_result **null_items=
+ static_cast<Item_null_result**>(thd->alloc(sizeof(Item*)*send_group_parts));
+
+ rollup.null_items= Item_null_array(null_items, send_group_parts);
+ rollup.ref_pointer_arrays=
+ static_cast<Ref_ptr_array*>
+ (thd->alloc((sizeof(Ref_ptr_array) +
+ all_fields.elements * sizeof(Item*)) * send_group_parts));
+ rollup.fields=
+ static_cast<List<Item>*>(thd->alloc(sizeof(List<Item>) * send_group_parts));
+
+ if (!null_items || !rollup.ref_pointer_arrays || !rollup.fields)
+ return true;
+
ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts);
+
/*
Prepare space for field list for the different levels
These will be filled up in rollup_make_fields()
@@ -23775,7 +23951,7 @@ bool JOIN::rollup_init()
rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd);
List<Item> *rollup_fields= &rollup.fields[i];
rollup_fields->empty();
- rollup.ref_pointer_arrays[i]= ref_array;
+ rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements);
ref_array+= all_fields.elements;
}
for (i= 0 ; i < send_group_parts; i++)
@@ -23922,11 +24098,12 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
bool real_fields= 0;
Item *item;
List_iterator<Item> new_it(rollup.fields[pos]);
- Item **ref_array_start= rollup.ref_pointer_arrays[pos];
+ Ref_ptr_array ref_array_start= rollup.ref_pointer_arrays[pos];
ORDER *start_group;
/* Point to first hidden field */
- Item **ref_array= ref_array_start + fields_arg.elements-1;
+ uint ref_array_ix= fields_arg.elements-1;
+
/* Remember where the sum functions ends for the previous level */
sum_funcs_end[pos+1]= *func;
@@ -23943,7 +24120,7 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
if (item == first_field)
{
real_fields= 1; // End of hidden fields
- ref_array= ref_array_start;
+ ref_array_ix= 0;
}
if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item() &&
@@ -23987,15 +24164,15 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
}
}
}
- *ref_array= item;
+ ref_array_start[ref_array_ix]= item;
if (real_fields)
{
(void) new_it++; // Point to next item
new_it.replace(item); // Replace previous
- ref_array++;
+ ref_array_ix++;
}
else
- ref_array--;
+ ref_array_ix--;
}
}
sum_funcs_end[0]= *func; // Point to last function
@@ -24028,9 +24205,7 @@ int JOIN::rollup_send_data(uint idx)
{
int res= 0;
/* Get reference pointers to sum functions in place */
- memcpy((char*) ref_pointer_array,
- (char*) rollup.ref_pointer_arrays[i],
- ref_pointer_array_size);
+ copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]);
if ((!having || having->val_int()))
{
if (send_records < unit->select_limit_cnt && do_send_rows &&
@@ -24041,7 +24216,7 @@ int JOIN::rollup_send_data(uint idx)
}
}
/* Restore ref_pointer_array */
- set_items_ref_array(current_ref_pointer_array);
+ set_items_ref_array(current_ref_ptrs);
return 0;
}
@@ -24065,15 +24240,13 @@ int JOIN::rollup_send_data(uint idx)
1 if write_data_failed()
*/
-int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
+int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABLE *table_arg)
{
uint i;
for (i= send_group_parts ; i-- > idx ; )
{
/* Get reference pointers to sum functions in place */
- memcpy((char*) ref_pointer_array,
- (char*) rollup.ref_pointer_arrays[i],
- ref_pointer_array_size);
+ copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]);
if ((!having || having->val_int()))
{
int write_error;
@@ -24088,15 +24261,15 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
if ((write_error= table_arg->file->ha_write_tmp_row(table_arg->record[0])))
{
if (create_internal_tmp_table_from_heap(thd, table_arg,
- tmp_table_param.start_recinfo,
- &tmp_table_param.recinfo,
+ tmp_table_param_arg->start_recinfo,
+ &tmp_table_param_arg->recinfo,
write_error, 0, NULL))
return 1;
}
}
}
/* Restore ref_pointer_array */
- set_items_ref_array(current_ref_pointer_array);
+ set_items_ref_array(current_ref_ptrs);
return 0;
}
@@ -24219,33 +24392,9 @@ int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
}
-/*
- TODO: this function is only applicable for the first non-const optimization
- join tab.
-*/
-
-void JOIN_TAB::update_explain_data(uint idx)
-{
- if (this == join->first_breadth_first_optimization_tab() + join->const_tables &&
- join->select_lex->select_number != INT_MAX &&
- join->select_lex->select_number != UINT_MAX)
- {
- Explain_table_access *eta= new (join->thd->mem_root)
- Explain_table_access(join->thd->mem_root);
- save_explain_data(eta, join->const_table_map, join->select_distinct,
- join->first_breadth_first_optimization_tab());
-
- Explain_select *sel= join->thd->lex->explain->
- get_select(join->select_lex->select_number);
- idx -= my_count_bits(join->eliminated_tables);
- sel->replace_table(idx, eta);
- }
-}
-
-
void JOIN_TAB::save_explain_data(Explain_table_access *eta,
table_map prefix_tables,
- bool distinct, JOIN_TAB *first_top_tab)
+ bool distinct_arg, JOIN_TAB *first_top_tab)
{
int quick_type;
CHARSET_INFO *cs= system_charset_info;
@@ -24261,6 +24410,21 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
explain_plan= eta;
eta->key.clear();
eta->quick_info= NULL;
+
+ SQL_SELECT *tab_select;
+ /*
+ We assume that if this table does pre-sorting, then it doesn't do filtering
+ with SQL_SELECT.
+ */
+ DBUG_ASSERT(!(select && filesort));
+ tab_select= (filesort)? filesort->select : select;
+
+ if (filesort)
+ {
+ eta->pre_join_sort= new (thd->mem_root) Explain_aggr_filesort(thd->mem_root,
+ thd->lex->analyze_stmt,
+ filesort);
+ }
tracker= &eta->tracker;
jbuf_tracker= &eta->jbuf_tracker;
@@ -24338,9 +24502,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
/* "type" column */
enum join_type tab_type= type;
if ((type == JT_ALL || type == JT_HASH) &&
- select && select->quick && use_quick != 2)
+ tab_select && tab_select->quick && use_quick != 2)
{
- cur_quick= select->quick;
+ cur_quick= tab_select->quick;
quick_type= cur_quick->get_type();
if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) ||
@@ -24375,9 +24539,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
In STRAIGHT_JOIN queries, there can be join tabs with JT_CONST type
that still have quick selects.
*/
- if (select && select->quick && tab_type != JT_CONST)
+ if (tab_select && tab_select->quick && tab_type != JT_CONST)
{
- eta->quick_info= select->quick->get_explain(thd->mem_root);
+ eta->quick_info= tab_select->quick->get_explain(thd->mem_root);
}
if (key_info) /* 'index' or 'ref' access */
@@ -24454,7 +24618,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
}
else
{
- double examined_rows= get_examined_rows();
+ double examined_rows= (double)get_examined_rows();
eta->rows_set= true;
eta->rows= (ha_rows) examined_rows;
@@ -24475,7 +24639,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
}
/* Build "Extra" field and save it */
- key_read=table->key_read;
+ key_read= table->file->keyread_enabled();
if ((tab_type == JT_NEXT || tab_type == JT_CONST) &&
table->covering_keys.is_set(index))
key_read=1;
@@ -24501,7 +24665,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
uint keyno= MAX_KEY;
if (ref.key_parts)
keyno= ref.key;
- else if (select && cur_quick)
+ else if (tab_select && cur_quick)
keyno = cur_quick->index;
if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno &&
@@ -24523,7 +24687,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
{
eta->push_extra(ET_USING);
}
- if (select)
+ if (tab_select)
{
if (use_quick == 2)
{
@@ -24533,7 +24697,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
eta->range_checked_fer->
append_possible_keys_stat(thd->mem_root, table, keys);
}
- else if (select->cond ||
+ else if (tab_select->cond ||
(cache_select && cache_select->cond))
{
const COND *pushed_cond= table->file->pushed_cond;
@@ -24546,7 +24710,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
}
else
{
- eta->where_cond= select->cond;
+ eta->where_cond= tab_select->cond;
eta->cache_cond= cache_select? cache_select->cond : NULL;
eta->push_extra(ET_USING_WHERE);
}
@@ -24578,7 +24742,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
{
QUICK_GROUP_MIN_MAX_SELECT *qgs=
- (QUICK_GROUP_MIN_MAX_SELECT *) select->quick;
+ (QUICK_GROUP_MIN_MAX_SELECT *) tab_select->quick;
eta->push_extra(ET_USING_INDEX_FOR_GROUP_BY);
eta->loose_scan_is_scanning= qgs->loose_scan_is_scanning();
}
@@ -24590,14 +24754,15 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (quick_type == QUICK_SELECT_I::QS_TYPE_RANGE)
{
- explain_append_mrr_info((QUICK_RANGE_SELECT*)(select->quick),
+ explain_append_mrr_info((QUICK_RANGE_SELECT*)(tab_select->quick),
&eta->mrr_type);
if (eta->mrr_type.length() > 0)
eta->push_extra(ET_USING_MRR);
}
- if (distinct & test_all_bits(prefix_tables, join->select_list_used_tables))
+ if (shortcut_for_distinct)
eta->push_extra(ET_DISTINCT);
+
if (loosescan_match_tab)
{
eta->push_extra(ET_LOOSESCAN);
@@ -24657,7 +24822,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
In case this is a derived table, here we remember the number of
subselect that used to produce it.
*/
- eta->derived_select_number= table->derived_select_number;
+ if (!(table_list && table_list->is_with_table_recursive_reference()))
+ eta->derived_select_number= table->derived_select_number;
/* The same for non-merged semi-joins */
eta->non_merged_sjm_number = get_non_merged_semijoin_select();
@@ -24665,21 +24831,76 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
/*
+ Walk through join->aggr_tables and save aggregation/grouping query plan into
+ an Explain_select object
+*/
+
+void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
+{
+ JOIN_TAB *join_tab=join->join_tab + join->exec_join_tab_cnt();
+ Explain_aggr_node *prev_node;
+ Explain_aggr_node *node= xpl_sel->aggr_tree;
+ bool is_analyze= join->thd->lex->analyze_stmt;
+ THD *thd= join->thd;
+
+ for (uint i= 0; i < join->aggr_tables; i++, join_tab++)
+ {
+ // Each aggregate means a temp.table
+ prev_node= node;
+ node= new (thd->mem_root) Explain_aggr_tmp_table;
+ node->child= prev_node;
+
+ if (join_tab->window_funcs_step)
+ {
+ Explain_aggr_node *new_node=
+ join_tab->window_funcs_step->save_explain_plan(thd->mem_root,
+ is_analyze);
+ if (new_node)
+ {
+ prev_node=node;
+ node= new_node;
+ node->child= prev_node;
+ }
+ }
+
+ /* The below matches execution in join_init_read_record() */
+ if (join_tab->distinct)
+ {
+ prev_node= node;
+ node= new (thd->mem_root) Explain_aggr_remove_dups;
+ node->child= prev_node;
+ }
+
+ if (join_tab->filesort)
+ {
+ Explain_aggr_filesort *eaf =
+ new (thd->mem_root) Explain_aggr_filesort(thd->mem_root, is_analyze, join_tab->filesort);
+ prev_node= node;
+ node= eaf;
+ node->child= prev_node;
+ }
+ }
+ xpl_sel->aggr_tree= node;
+}
+
+
+/*
Save Query Plan Footprint
@note
Currently, this function may be called multiple times
*/
-int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
- bool need_order, bool distinct,
+int JOIN::save_explain_data_intern(Explain_query *output,
+ bool need_tmp_table_arg,
+ bool need_order_arg, bool distinct_arg,
const char *message)
{
JOIN *join= this; /* Legacy: this code used to be a non-member function */
int cur_error= 0;
DBUG_ENTER("JOIN::save_explain_data_intern");
- DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s",
- (ulong)join->select_lex, join->select_lex->type,
+ DBUG_PRINT("info", ("Select %p, type %s, message %s",
+ join->select_lex, join->select_lex->type,
message ? message : "NULL"));
DBUG_ASSERT(have_query_plan == QEP_AVAILABLE);
/* fake_select_lex is created/printed by Explain_union */
@@ -24687,9 +24908,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
/* There should be no attempts to save query plans for merged selects */
DBUG_ASSERT(!join->select_lex->master_unit()->derived ||
- join->select_lex->master_unit()->derived->is_materialized_derived());
-
- explain= NULL;
+ join->select_lex->master_unit()->derived->is_materialized_derived() ||
+ join->select_lex->master_unit()->derived->is_with_table());
/* Don't log this into the slow query log */
@@ -24707,12 +24927,13 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
explain->select_id= join->select_lex->select_number;
explain->select_type= join->select_lex->type;
explain->using_temporary= need_tmp;
- explain->using_filesort= need_order;
+ explain->using_filesort= need_order_arg;
/* Setting explain->message means that all other members are invalid */
explain->message= message;
if (select_lex->master_unit()->derived)
explain->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
+ save_agg_explain_data(this, explain);
output->add_node(explain);
}
else if (pushdown_query)
@@ -24724,7 +24945,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
explain->select_id= select_lex->select_number;
explain->select_type= select_lex->type;
explain->using_temporary= need_tmp;
- explain->using_filesort= need_order;
+ explain->using_filesort= need_order_arg;
explain->message= "Storage engine handles GROUP BY";
if (select_lex->master_unit()->derived)
@@ -24744,21 +24965,18 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
xpl_sel->select_type= join->select_lex->type;
if (select_lex->master_unit()->derived)
xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
-
- if (need_tmp_table)
- xpl_sel->using_temporary= true;
-
- if (need_order)
- xpl_sel->using_filesort= true;
+
+ save_agg_explain_data(this, xpl_sel);
xpl_sel->exec_const_cond= exec_const_cond;
+ xpl_sel->outer_ref_cond= outer_ref_cond;
if (tmp_having)
xpl_sel->having= tmp_having;
else
xpl_sel->having= having;
xpl_sel->having_value= having_value;
- JOIN_TAB* const first_top_tab= join->first_breadth_first_optimization_tab();
+ JOIN_TAB* const first_top_tab= join->first_breadth_first_tab();
JOIN_TAB* prev_bush_root_tab= NULL;
Explain_basic_join *cur_parent= xpl_sel;
@@ -24777,13 +24995,6 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
}
- if (join->table_access_tabs == join->join_tab &&
- tab == (first_top_tab + join->const_tables) && pre_sort_join_tab)
- {
- saved_join_tab= tab;
- tab= pre_sort_join_tab;
- }
-
Explain_table_access *eta= (new (output->mem_root)
Explain_table_access(output->mem_root));
@@ -24814,7 +25025,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
prev_bush_root_tab= tab->bush_root_tab;
cur_parent->add_table(eta, output);
- tab->save_explain_data(eta, used_tables, distinct, first_top_tab);
+ tab->save_explain_data(eta, used_tables, distinct_arg, first_top_tab);
if (saved_join_tab)
tab= saved_join_tab;
@@ -24834,11 +25045,14 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
(1) they are not parts of ON clauses that were eliminated by table
elimination.
(2) they are not merged derived tables
+ (3) they are not hanging CTEs (they are needed for execution)
*/
if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1)
(!tmp_unit->derived ||
- tmp_unit->derived->is_materialized_derived())) // (2)
- {
+ tmp_unit->derived->is_materialized_derived()) && // (2)
+ !(tmp_unit->with_element &&
+ (!tmp_unit->derived || !tmp_unit->derived->derived_result))) // (3)
+ {
explain->add_child(tmp_unit->first_select()->select_number);
}
}
@@ -24874,14 +25088,6 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
DBUG_ENTER("select_describe");
/* Update the QPF with latest values of using_temporary, using_filesort */
- Explain_select *explain_sel;
- uint select_nr= join->select_lex->select_number;
- if ((explain_sel= thd->lex->explain->get_select(select_nr)))
- {
- explain_sel->using_temporary= need_tmp_table;
- explain_sel->using_filesort= need_order;
- }
-
for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
unit;
unit= unit->next_unit())
@@ -24906,9 +25112,12 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
Save plans for child subqueries, when
(1) they are not parts of eliminated WHERE/ON clauses.
(2) they are not VIEWs that were "merged for INSERT".
+ (3) they are not hanging CTEs (they are needed for execution)
*/
- if (!(unit->item && unit->item->eliminated) && // (1)
- !(unit->derived && unit->derived->merged_for_insert)) // (2)
+ if (!(unit->item && unit->item->eliminated) && // (1)
+ !(unit->derived && unit->derived->merged_for_insert) && // (2)
+ !(unit->with_element &&
+ (!unit->derived || !unit->derived->derived_result))) // (3)
{
if (mysql_explain_union(thd, unit, result))
DBUG_VOID_RETURN;
@@ -24932,7 +25141,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
if (unit->is_union())
{
- if (unit->union_needs_tmp_table())
+ if (unit->union_needs_tmp_table() && unit->fake_select_lex)
{
unit->fake_select_lex->select_number= FAKE_SELECT_LEX_ID; // just for initialization
unit->fake_select_lex->type= "UNION RESULT";
@@ -24945,18 +25154,17 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
{
thd->lex->current_select= first;
unit->set_limit(unit->global_parameters());
- res= mysql_select(thd, &first->ref_pointer_array,
- first->table_list.first,
- first->with_wild, first->item_list,
- first->where,
- first->order_list.elements +
- first->group_list.elements,
- first->order_list.first,
- first->group_list.first,
- first->having,
- thd->lex->proc_list.first,
- first->options | thd->variables.option_bits | SELECT_DESCRIBE,
- result, unit, first);
+ res= mysql_select(thd,
+ first->table_list.first,
+ first->with_wild, first->item_list,
+ first->where,
+ first->order_list.elements + first->group_list.elements,
+ first->order_list.first,
+ first->group_list.first,
+ first->having,
+ thd->lex->proc_list.first,
+ first->options | thd->variables.option_bits | SELECT_DESCRIBE,
+ result, unit, first);
}
DBUG_RETURN(res || thd->is_error());
}
@@ -24989,7 +25197,8 @@ static void print_table_array(THD *thd,
continue;
}
- if (curr->outer_join)
+ /* JOIN_TYPE_OUTER is just a marker unrelated to real join */
+ if (curr->outer_join & (JOIN_TYPE_LEFT|JOIN_TYPE_RIGHT))
{
/* MySQL converts right to left joins */
str->append(STRING_WITH_LEN(" left join "));
@@ -25000,6 +25209,7 @@ static void print_table_array(THD *thd,
str->append(STRING_WITH_LEN(" semi join "));
else
str->append(STRING_WITH_LEN(" join "));
+
curr->print(thd, eliminated_tables, str, query_type);
if (curr->on_expr)
{
@@ -25223,11 +25433,19 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
else if (derived)
{
- // A derived table
- str->append('(');
- derived->print(str, query_type);
- str->append(')');
- cmp_name= ""; // Force printing of alias
+ if (!is_with_table())
+ {
+ // A derived table
+ str->append('(');
+ derived->print(str, query_type);
+ str->append(')');
+ cmp_name= ""; // Force printing of alias
+ }
+ else
+ {
+ append_identifier(thd, str, table_name, table_name_length);
+ cmp_name= table_name;
+ }
}
else
{
@@ -25771,8 +25989,8 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
Start from quick select's rows and cost. These are always cheaper than
full index scan/cost.
*/
- double best_rows= table->quick_rows[keynr];
- double best_cost= table->quick_costs[keynr];
+ double best_rows= (double)table->quick_rows[keynr];
+ double best_cost= (double)table->quick_costs[keynr];
/*
Check if ref(const) access was possible on this index.
@@ -25806,7 +26024,7 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
if (ref_rows > 0)
{
- double tmp= ref_rows;
+ double tmp= (double)ref_rows;
/* Reuse the cost formula from best_access_path: */
set_if_smaller(tmp, (double) tab->join->thd->variables.max_seeks_for_key);
if (table->covering_keys.is_set(keynr))
@@ -25817,7 +26035,7 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
if (tmp < best_cost)
{
best_cost= tmp;
- best_rows= ref_rows;
+ best_rows= (double)ref_rows;
}
}
}
@@ -25930,7 +26148,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
if (join)
{
- uint tablenr= tab - join->join_tab;
+ uint tablenr= (uint)(tab - join->join_tab);
read_time= join->best_positions[tablenr].read_time;
for (uint i= tablenr+1; i < join->table_count; i++)
fanout*= join->best_positions[i].records_read; // fanout is always >= 1
@@ -26341,6 +26559,239 @@ err:
DBUG_RETURN(0);
}
+/****************************************************************************
+ AGGR_OP implementation
+****************************************************************************/
+
+/**
+ @brief Instantiate tmp table for aggregation and start index scan if needed
+ @todo Tmp table always would be created, even for empty result. Extend
+ executor to avoid tmp table creation when no rows were written
+ into tmp table.
+ @return
+ true error
+ false ok
+*/
+
+bool
+AGGR_OP::prepare_tmp_table()
+{
+ TABLE *table= join_tab->table;
+ JOIN *join= join_tab->join;
+ int rc= 0;
+
+ if (!join_tab->table->is_created())
+ {
+ if (instantiate_tmp_table(table, join_tab->tmp_table_param->keyinfo,
+ join_tab->tmp_table_param->start_recinfo,
+ &join_tab->tmp_table_param->recinfo,
+ join->select_options))
+ return true;
+ (void) table->file->extra(HA_EXTRA_WRITE_CACHE);
+ empty_record(table);
+ }
+ /* If it wasn't already, start index scan for grouping using table index. */
+ if (!table->file->inited && table->group &&
+ join_tab->tmp_table_param->sum_func_count && table->s->keys)
+ rc= table->file->ha_index_init(0, 0);
+ else
+ {
+ /* Start index scan in scanning mode */
+ rc= table->file->ha_rnd_init(true);
+ }
+ if (rc)
+ {
+ table->file->print_error(rc, MYF(0));
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief Prepare table if necessary and call write_func to save record
+
+ @param end_of_records the end_of_record signal to pass to the writer
+
+ @return return one of enum_nested_loop_state.
+*/
+
+enum_nested_loop_state
+AGGR_OP::put_record(bool end_of_records)
+{
+ // Lasy tmp table creation/initialization
+ if (!join_tab->table->file->inited)
+ if (prepare_tmp_table())
+ return NESTED_LOOP_ERROR;
+ enum_nested_loop_state rc= (*write_func)(join_tab->join, join_tab,
+ end_of_records);
+ return rc;
+}
+
+
+/**
+ @brief Finish rnd/index scan after accumulating records, switch ref_array,
+ and send accumulated records further.
+ @return return one of enum_nested_loop_state.
+*/
+
+enum_nested_loop_state
+AGGR_OP::end_send()
+{
+ enum_nested_loop_state rc= NESTED_LOOP_OK;
+ TABLE *table= join_tab->table;
+ JOIN *join= join_tab->join;
+
+ // All records were stored, send them further
+ int tmp, new_errno= 0;
+
+ if ((rc= put_record(true)) < NESTED_LOOP_OK)
+ return rc;
+
+ if ((tmp= table->file->extra(HA_EXTRA_NO_CACHE)))
+ {
+ DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed"));
+ new_errno= tmp;
+ }
+ if ((tmp= table->file->ha_index_or_rnd_end()))
+ {
+ DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
+ new_errno= tmp;
+ }
+ if (new_errno)
+ {
+ table->file->print_error(new_errno,MYF(0));
+ return NESTED_LOOP_ERROR;
+ }
+
+ // Update ref array
+ join_tab->join->set_items_ref_array(*join_tab->ref_array);
+ bool keep_last_filesort_result = join_tab->filesort ? false : true;
+ if (join_tab->window_funcs_step)
+ {
+ if (join_tab->window_funcs_step->exec(join, keep_last_filesort_result))
+ return NESTED_LOOP_ERROR;
+ }
+
+ table->reginfo.lock_type= TL_UNLOCK;
+
+ bool in_first_read= true;
+ while (rc == NESTED_LOOP_OK)
+ {
+ int error;
+ if (in_first_read)
+ {
+ in_first_read= false;
+ error= join_init_read_record(join_tab);
+ }
+ else
+ error= join_tab->read_record.read_record(&join_tab->read_record);
+
+ if (error > 0 || (join->thd->is_error())) // Fatal error
+ rc= NESTED_LOOP_ERROR;
+ else if (error < 0)
+ break;
+ else if (join->thd->killed) // Aborted by user
+ {
+ join->thd->send_kill_message();
+ rc= NESTED_LOOP_KILLED;
+ }
+ else
+ {
+ /*
+ In case we have window functions present, an extra step is required
+ to compute all the fields from the temporary table.
+ In case we have a compound expression such as: expr + expr,
+ where one of the terms has a window function inside it, only
+ after computing window function values we actually know the true
+ final result of the compounded expression.
+
+ Go through all the func items and save their values once again in the
+ corresponding temp table fields. Do this for each row in the table.
+ */
+ if (join_tab->window_funcs_step)
+ {
+ Item **func_ptr= join_tab->tmp_table_param->items_to_copy;
+ Item *func;
+ for (; (func = *func_ptr) ; func_ptr++)
+ {
+ if (func->with_window_func)
+ func->save_in_result_field(true);
+ }
+ }
+ rc= evaluate_join_record(join, join_tab, 0);
+ }
+ }
+
+ if (keep_last_filesort_result)
+ {
+ delete join_tab->filesort_result;
+ join_tab->filesort_result= NULL;
+ }
+
+ // Finish rnd scn after sending records
+ if (join_tab->table->file->inited)
+ join_tab->table->file->ha_rnd_end();
+
+ return rc;
+}
+
+
+/**
+ @brief
+ Remove marked top conjuncts of a condition
+
+ @param thd The thread handle
+ @param cond The condition which subformulas are to be removed
+
+ @details
+ The function removes all top conjuncts marked with the flag
+ FULL_EXTRACTION_FL from the condition 'cond'. The resulting
+ formula is returned a the result of the function
+ If 'cond' s marked with such flag the function returns 0.
+ The function clear the extraction flags for the removed
+ formulas
+
+ @retval
+ condition without removed subformulas
+ 0 if the whole 'cond' is removed
+*/
+
+Item *remove_pushed_top_conjuncts(THD *thd, Item *cond)
+{
+ if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ cond->clear_extraction_flag();
+ return 0;
+ }
+ if (cond->type() == Item::COND_ITEM)
+ {
+ if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ while ((item= li++))
+ {
+ if (item->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ item->clear_extraction_flag();
+ li.remove();
+ }
+ }
+ switch (((Item_cond*) cond)->argument_list()->elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return ((Item_cond*) cond)->argument_list()->head();
+ default:
+ return cond;
+ }
+ }
+ }
+ return cond;
+}
+
/**
@} (end of group Query_Optimizer)
*/
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 266fe7a7066..5eea5937ea6 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -2,7 +2,7 @@
#define SQL_SELECT_INCLUDED
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2015, MariaDB
+ Copyright (c) 2008, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -32,11 +32,13 @@
#include "sql_array.h" /* Array */
#include "records.h" /* READ_RECORD */
#include "opt_range.h" /* SQL_SELECT, QUICK_SELECT_I */
+#include "filesort.h"
+typedef struct st_join_table JOIN_TAB;
/* Values in optimize */
-#define KEY_OPTIMIZE_EXISTS 1
-#define KEY_OPTIMIZE_REF_OR_NULL 2
-#define KEY_OPTIMIZE_EQ 4
+#define KEY_OPTIMIZE_EXISTS 1U
+#define KEY_OPTIMIZE_REF_OR_NULL 2U
+#define KEY_OPTIMIZE_EQ 4U
inline uint get_hash_join_key_no() { return MAX_KEY; }
@@ -176,17 +178,17 @@ enum sj_strategy_enum
};
/* Values for JOIN_TAB::packed_info */
-#define TAB_INFO_HAVE_VALUE 1
-#define TAB_INFO_USING_INDEX 2
-#define TAB_INFO_USING_WHERE 4
-#define TAB_INFO_FULL_SCAN_ON_NULL 8
+#define TAB_INFO_HAVE_VALUE 1U
+#define TAB_INFO_USING_INDEX 2U
+#define TAB_INFO_USING_WHERE 4U
+#define TAB_INFO_FULL_SCAN_ON_NULL 8U
typedef enum_nested_loop_state
(*Next_select_func)(JOIN *, struct st_join_table *, bool);
-Next_select_func setup_end_select_func(JOIN *join);
+Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab);
int rr_sequential(READ_RECORD *info);
int rr_sequential_and_unpack(READ_RECORD *info);
-
+Item *remove_pushed_top_conjuncts(THD *thd, Item *cond);
#include "sql_explain.h"
@@ -197,9 +199,12 @@ int rr_sequential_and_unpack(READ_RECORD *info);
class JOIN_CACHE;
class SJ_TMP_TABLE;
class JOIN_TAB_RANGE;
+class AGGR_OP;
+class Filesort;
typedef struct st_join_table {
TABLE *table;
+ TABLE_LIST *tab_list;
KEYUSE *keyuse; /**< pointer to first used key */
KEY *hj_key; /**< descriptor of the used best hash join key
not supported by any index */
@@ -258,6 +263,7 @@ typedef struct st_join_table {
*/
uint packed_info;
+ // READ_RECORD::Setup_func materialize_table;
READ_RECORD::Setup_func read_first_record;
Next_select_func next_select;
READ_RECORD read_record;
@@ -344,6 +350,7 @@ typedef struct st_join_table {
*/
Item *cache_idx_cond;
SQL_SELECT *cache_select;
+ AGGR_OP *aggr;
JOIN *join;
/*
Embedding SJ-nest (may be not the direct parent), or NULL if none.
@@ -410,6 +417,46 @@ typedef struct st_join_table {
/* NestedOuterJoins: Bitmap of nested joins this table is part of */
nested_join_map embedding_map;
+ /* Tmp table info */
+ TMP_TABLE_PARAM *tmp_table_param;
+
+ /* Sorting related info */
+ Filesort *filesort;
+ SORT_INFO *filesort_result;
+
+ /*
+ Non-NULL value means this join_tab must do window function computation
+ before reading.
+ */
+ Window_funcs_computation* window_funcs_step;
+
+ /**
+ List of topmost expressions in the select list. The *next* JOIN TAB
+ in the plan should use it to obtain correct values. Same applicable to
+ all_fields. These lists are needed because after tmp tables functions
+ will be turned to fields. These variables are pointing to
+ tmp_fields_list[123]. Valid only for tmp tables and the last non-tmp
+ table in the query plan.
+ @see JOIN::make_aggr_tables_info()
+ */
+ List<Item> *fields;
+ /** List of all expressions in the select list */
+ List<Item> *all_fields;
+ /*
+ Pointer to the ref array slice which to switch to before sending
+ records. Valid only for tmp tables.
+ */
+ Ref_ptr_array *ref_array;
+
+ /** Number of records saved in tmp table */
+ ha_rows send_records;
+
+ /** HAVING condition for checking prior saving a record into tmp table*/
+ Item *having;
+
+ /** TRUE <=> remove duplicates on this table. */
+ bool distinct;
+
/*
Semi-join strategy to be used for this join table. This is a copy of
POSITION::sj_strategy field. This field is set up by the
@@ -424,9 +471,9 @@ typedef struct st_join_table {
void cleanup();
inline bool is_using_loose_index_scan()
{
- return (select && select->quick &&
- (select->quick->get_type() ==
- QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
+ const SQL_SELECT *sel= filesort ? filesort->select : select;
+ return (sel && sel->quick &&
+ (sel->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
}
bool is_using_agg_loose_index_scan ()
{
@@ -563,16 +610,22 @@ typedef struct st_join_table {
void save_explain_data(Explain_table_access *eta, table_map prefix_tables,
bool distinct, struct st_join_table *first_top_tab);
- void update_explain_data(uint idx);
+ bool use_order() const; ///< Use ordering provided by chosen index?
+ bool sort_table();
+ bool remove_duplicates();
+
} JOIN_TAB;
#include "sql_join_cache.h"
-enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool
- end_of_records);
-enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool
- end_of_records);
+enum_nested_loop_state
+sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
+enum_nested_loop_state
+sub_select(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
+enum_nested_loop_state
+sub_select_postjoin_aggr(JOIN *join, JOIN_TAB *join_tab, bool end_of_records);
+
enum_nested_loop_state
end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records);
@@ -867,12 +920,14 @@ typedef struct st_position
Sj_materialization_picker sjmat_picker;
} POSITION;
+typedef Bounds_checked_array<Item_null_result*> Item_null_array;
+
typedef struct st_rollup
{
enum State { STATE_NONE, STATE_INITED, STATE_READY };
State state;
- Item_null_result **null_items;
- Item ***ref_pointer_arrays;
+ Item_null_array null_items;
+ Ref_ptr_array *ref_pointer_arrays;
List<Item> *fields;
} ROLLUP;
@@ -886,6 +941,56 @@ public:
class Pushdown_query;
+/**
+ @brief
+ Class to perform postjoin aggregation operations
+
+ @details
+ The result records are obtained on the put_record() call.
+ The aggrgation process is determined by the write_func, it could be:
+ end_write Simply store all records in tmp table.
+ end_write_group Perform grouping using join->group_fields,
+ records are expected to be sorted.
+ end_update Perform grouping using the key generated on tmp
+ table. Input records aren't expected to be sorted.
+ Tmp table uses the heap engine
+ end_update_unique Same as above, but the engine is myisam.
+
+ Lazy table initialization is used - the table will be instantiated and
+ rnd/index scan started on the first put_record() call.
+
+*/
+
+class AGGR_OP :public Sql_alloc
+{
+public:
+ JOIN_TAB *join_tab;
+
+ AGGR_OP(JOIN_TAB *tab) : join_tab(tab), write_func(NULL)
+ {};
+
+ enum_nested_loop_state put_record() { return put_record(false); };
+ /*
+ Send the result of operation further (to a next operation/client)
+ This function is called after all records were put into tmp table.
+
+ @return return one of enum_nested_loop_state values.
+ */
+ enum_nested_loop_state end_send();
+ /** write_func setter */
+ void set_write_func(Next_select_func new_write_func)
+ {
+ write_func= new_write_func;
+ }
+
+private:
+ /** Write function that would be used for saving records in tmp table. */
+ Next_select_func write_func;
+ enum_nested_loop_state put_record(bool end_of_records);
+ bool prepare_tmp_table();
+};
+
+
class JOIN :public Sql_alloc
{
private:
@@ -954,33 +1059,11 @@ protected:
public:
JOIN_TAB *join_tab, **best_ref;
-
- /*
- Saved join_tab for pre_sorting. create_sort_index() will save here..
- */
- JOIN_TAB *pre_sort_join_tab;
- uint pre_sort_index;
- Item *pre_sort_idx_pushed_cond;
- void clean_pre_sort_join_tab();
/* List of fields that aren't under an aggregate function */
List<Item_field> non_agg_fields;
- /*
- For "Using temporary+Using filesort" queries, JOIN::join_tab can point to
- either:
- 1. array of join tabs describing how to run the select, or
- 2. array of single join tab describing read from the temporary table.
-
- SHOW EXPLAIN code needs to read/show #1. This is why two next members are
- there for saving it.
- */
- JOIN_TAB *table_access_tabs;
- uint top_table_access_tabs_count;
-
JOIN_TAB **map2table; ///< mapping between table indexes and JOIN_TABs
- JOIN_TAB *join_tab_save; ///< saved join_tab for subquery reexecution
-
List<JOIN_TAB_RANGE> join_tab_ranges;
/*
@@ -1011,14 +1094,9 @@ public:
We keep it here so that it is saved/restored with JOIN::restore_tmp.
*/
uint top_join_tab_count;
+ uint aggr_tables; ///< Number of post-join tmp tables
uint send_group_parts;
/*
- This counts how many times do_select() was invoked for this JOIN.
- It's used to restrict Pushdown_query::execute() only to the first
- do_select() invocation.
- */
- uint do_select_call_count;
- /*
True if the query has GROUP BY.
(that is, if group_by != NULL. when DISTINCT is converted into GROUP BY, it
will set this, too. It is not clear why we need a separate var from
@@ -1132,6 +1210,7 @@ public:
*/
table_map complex_firstmatch_tables;
+ Next_select_func first_select;
/*
The cost of best complete join plan found so far during optimization,
after optimization phase - cost of picked join order (not taking into
@@ -1147,9 +1226,6 @@ public:
double join_record_count;
List<Item> *fields;
List<Cached_item> group_fields, group_fields_cache;
- TABLE *tmp_table;
- /// used to store 2 possible tmp table of SELECT
- TABLE *exec_tmp_table1, *exec_tmp_table2;
THD *thd;
Item_sum **sum_funcs, ***sum_funcs_end;
/** second copy of sumfuncs (for queries with 2 temporary tables */
@@ -1158,6 +1234,8 @@ public:
Item *having;
Item *tmp_having; ///< To store having when processed temporary table
Item *having_history; ///< Store having for explain
+ ORDER *group_list_for_estimates;
+ bool having_is_correlated;
ulonglong select_options;
/*
Bitmap of allowed types of the join caches that
@@ -1196,26 +1274,6 @@ public:
*/
bool filesort_found_rows;
- /**
- Copy of this JOIN to be used with temporary tables.
-
- tmp_join is used when the JOIN needs to be "reusable" (e.g. in a
- subquery that gets re-executed several times) and we know will use
- temporary tables for materialization. The materialization to a
- temporary table overwrites the JOIN structure to point to the
- temporary table after the materialization is done. This is where
- tmp_join is used : it's a copy of the JOIN before the
- materialization and is used in restoring before re-execution by
- overwriting the current JOIN structure with the saved copy.
- Because of this we should pay extra care of not freeing up helper
- structures that are referenced by the original contents of the
- JOIN. We can check for this by making sure the "current" join is
- not the temporary copy, e.g. !tmp_join || tmp_join != join
-
- We should free these sub-structures at JOIN::destroy() if the
- "current" join has a copy is not that copy.
- */
- JOIN *tmp_join;
ROLLUP rollup; ///< Used with rollup
bool mixed_implicit_grouping;
@@ -1237,6 +1295,19 @@ public:
GROUP/ORDER BY.
*/
bool simple_order, simple_group;
+
+ /*
+ ordered_index_usage is set if an ordered index access
+ should be used instead of a filesort when computing
+ ORDER/GROUP BY.
+ */
+ enum
+ {
+ ordered_index_void, // No ordered index avail.
+ ordered_index_group_by, // Use index for GROUP BY
+ ordered_index_order_by // Use index for ORDER BY
+ } ordered_index_usage;
+
/**
Is set only in case if we have a GROUP BY clause
and no ORDER BY after constant elimination of 'order'.
@@ -1289,10 +1360,19 @@ public:
List<Item> exec_const_order_group_cond;
SQL_SELECT *select; ///<created in optimisation phase
JOIN_TAB *return_tab; ///<used only for outer joins
- Item **ref_pointer_array; ///<used pointer reference for this select
- // Copy of above to be used with different lists
- Item **items0, **items1, **items2, **items3, **current_ref_pointer_array;
- uint ref_pointer_array_size; ///< size of above in bytes
+
+ /*
+ Used pointer reference for this select.
+ select_lex->ref_pointer_array contains five "slices" of the same length:
+ |========|========|========|========|========|
+ ref_ptrs items0 items1 items2 items3
+ */
+ Ref_ptr_array ref_ptrs;
+ // Copy of the initial slice above, to be used with different lists
+ Ref_ptr_array items0, items1, items2, items3;
+ // Used by rollup, to restore ref_ptrs after overwriting it.
+ Ref_ptr_array current_ref_ptrs;
+
const char *zero_result_cause; ///< not 0 if exec must return zero result
bool union_part; ///< this subselect is part of union
@@ -1320,20 +1400,12 @@ public:
/* SJM nests that are executed with SJ-Materialization strategy */
List<SJ_MATERIALIZATION_INFO> sjm_info_list;
- /*
- storage for caching buffers allocated during query execution.
- These buffers allocations need to be cached as the thread memory pool is
- cleared only at the end of the execution of the whole query and not caching
- allocations that occur in repetition at execution time will result in
- excessive memory usage.
- Note: make_simple_join always creates an execution plan that accesses
- a single table, thus it is sufficient to have a one-element array for
- table_reexec.
- */
- SORT_FIELD *sortorder; // make_unireg_sortorder()
- TABLE *table_reexec[1]; // make_simple_join()
- JOIN_TAB *join_tab_reexec; // make_simple_join()
- /* end of allocation caching storage */
+ /** TRUE <=> ref_pointer_array is set to items3. */
+ bool set_group_rpa;
+ /** Exec time only: TRUE <=> current group has been sent */
+ bool group_sent;
+
+ JOIN_TAB *sort_and_group_aggr_tab;
JOIN(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg,
select_result *result_arg)
@@ -1345,12 +1417,13 @@ public:
void init(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg,
select_result *result_arg)
{
- join_tab= join_tab_save= 0;
+ join_tab= 0;
table= 0;
table_count= 0;
top_join_tab_count= 0;
const_tables= 0;
const_table_map= 0;
+ aggr_tables= 0;
eliminated_tables= 0;
join_list= 0;
implicit_grouping= FALSE;
@@ -1360,25 +1433,21 @@ public:
duplicate_rows= send_records= 0;
found_records= 0;
fetch_limit= HA_POS_ERROR;
- join_examined_rows= 0;
- exec_tmp_table1= 0;
- exec_tmp_table2= 0;
- sortorder= 0;
- table_reexec[0]= 0;
- join_tab_reexec= 0;
thd= thd_arg;
sum_funcs= sum_funcs2= 0;
procedure= 0;
having= tmp_having= having_history= 0;
+ having_is_correlated= false;
+ group_list_for_estimates= 0;
select_options= select_options_arg;
result= result_arg;
lock= thd_arg->lock;
select_lex= 0; //for safety
- tmp_join= 0;
select_distinct= MY_TEST(select_options & SELECT_DISTINCT);
no_order= 0;
simple_order= 0;
simple_group= 0;
+ ordered_index_usage= ordered_index_void;
need_distinct= 0;
skip_sort_order= 0;
need_tmp= 0;
@@ -1386,8 +1455,11 @@ public:
error= 0;
select= 0;
return_tab= 0;
- ref_pointer_array= items0= items1= items2= items3= 0;
- ref_pointer_array_size= 0;
+ ref_ptrs.reset();
+ items0.reset();
+ items1.reset();
+ items2.reset();
+ items3.reset();
zero_result_cause= 0;
optimization_state= JOIN::NOT_OPTIMIZED;
have_query_plan= QEP_NOT_PRESENT_YET;
@@ -1401,8 +1473,6 @@ public:
positions= best_positions= 0;
pushdown_query= 0;
original_join_tab= 0;
- do_select_call_count= 0;
-
explain= NULL;
all_fields= fields_arg;
@@ -1415,23 +1485,33 @@ public:
rollup.state= ROLLUP::STATE_NONE;
no_const_tables= FALSE;
+ first_select= sub_select;
+ set_group_rpa= false;
+ group_sent= 0;
+
outer_ref_cond= pseudo_bits_cond= NULL;
in_to_exists_where= NULL;
in_to_exists_having= NULL;
- pre_sort_join_tab= NULL;
emb_sjm_nest= NULL;
sjm_lookup_tables= 0;
sjm_scan_tables= 0;
+ }
- /*
- The following is needed because JOIN::cleanup(true) may be called for
- joins for which JOIN::optimize was aborted with an error before a proper
- query plan was produced
- */
- table_access_tabs= NULL;
+ /* True if the plan guarantees that it will be returned zero or one row */
+ bool only_const_tables() { return const_tables == table_count; }
+ /* Number of tables actually joined at the top level */
+ uint exec_join_tab_cnt() { return tables_list ? top_join_tab_count : 0; }
+
+ /*
+ Number of tables in the join which also includes the temporary tables
+ created for GROUP BY, DISTINCT , WINDOW FUNCTION etc.
+ */
+ uint total_join_tab_cnt()
+ {
+ return exec_join_tab_cnt() + aggr_tables - 1;
}
- int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num,
+ int prepare(TABLE_LIST *tables, uint wind_num,
COND *conds, uint og_num, ORDER *order, bool skip_order_by,
ORDER *group, Item *having, ORDER *proc_param, SELECT_LEX *select,
SELECT_LEX_UNIT *unit);
@@ -1441,26 +1521,55 @@ public:
int reinit();
int init_execution();
void exec();
+
void exec_inner();
+ bool prepare_result(List<Item> **columns_list);
int destroy();
void restore_tmp();
bool alloc_func_list();
bool flatten_subqueries();
bool optimize_unflattened_subqueries();
bool optimize_constant_subqueries();
+ int init_join_caches();
bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields,
bool before_group_by, bool recompute= FALSE);
- inline void set_items_ref_array(Item **ptr)
+ /// Initialzes a slice, see comments for ref_ptrs above.
+ Ref_ptr_array ref_ptr_array_slice(size_t slice_num)
{
- memcpy((char*) ref_pointer_array, (char*) ptr, ref_pointer_array_size);
- current_ref_pointer_array= ptr;
+ size_t slice_sz= select_lex->ref_pointer_array.size() / 5U;
+ DBUG_ASSERT(select_lex->ref_pointer_array.size() % 5 == 0);
+ DBUG_ASSERT(slice_num < 5U);
+ return Ref_ptr_array(&select_lex->ref_pointer_array[slice_num * slice_sz],
+ slice_sz);
}
- inline void init_items_ref_array()
+
+ /**
+ Overwrites one slice with the contents of another slice.
+ In the normal case, dst and src have the same size().
+ However: the rollup slices may have smaller size than slice_sz.
+ */
+ void copy_ref_ptr_array(Ref_ptr_array dst_arr, Ref_ptr_array src_arr)
+ {
+ DBUG_ASSERT(dst_arr.size() >= src_arr.size());
+ void *dest= dst_arr.array();
+ const void *src= src_arr.array();
+ memcpy(dest, src, src_arr.size() * src_arr.element_size());
+ }
+
+ /// Overwrites 'ref_ptrs' and remembers the the source as 'current'.
+ void set_items_ref_array(Ref_ptr_array src_arr)
+ {
+ copy_ref_ptr_array(ref_ptrs, src_arr);
+ current_ref_ptrs= src_arr;
+ }
+
+ /// Initializes 'items0' and remembers that it is 'current'.
+ void init_items_ref_array()
{
- items0= ref_pointer_array + all_fields.elements;
- memcpy(items0, ref_pointer_array, ref_pointer_array_size);
- current_ref_pointer_array= items0;
+ items0= ref_ptr_array_slice(1);
+ copy_ref_ptr_array(items0, ref_ptrs);
+ current_ref_ptrs= items0;
}
bool rollup_init();
@@ -1468,19 +1577,11 @@ public:
bool rollup_make_fields(List<Item> &all_fields, List<Item> &fields,
Item_sum ***func);
int rollup_send_data(uint idx);
- int rollup_write_data(uint idx, TABLE *table);
- /**
- Release memory and, if possible, the open tables held by this execution
- plan (and nested plans). It's used to release some tables before
- the end of execution in order to increase concurrency and reduce
- memory consumption.
- */
+ int rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param, TABLE *table);
void join_free();
/** Cleanup this JOIN, possibly for reuse */
void cleanup(bool full);
void clear();
- bool save_join_tab();
- bool init_save_join_tab();
bool send_row_on_empty_set()
{
return (do_send_rows && implicit_grouping && !group_optimized_away &&
@@ -1499,6 +1600,8 @@ public:
return (table_map(1) << table_count) - 1;
}
void drop_unused_derived_keys();
+ bool get_best_combination();
+ bool add_sorting_to_table(JOIN_TAB *tab, ORDER *order);
inline void eval_select_list_used_tables();
/*
Return the table for which an index scan can be used to satisfy
@@ -1560,16 +1663,45 @@ public:
int save_explain_data_intern(Explain_query *output, bool need_tmp_table,
bool need_order, bool distinct,
const char *message);
- JOIN_TAB *first_breadth_first_optimization_tab() { return table_access_tabs; }
- JOIN_TAB *first_breadth_first_execution_tab() { return join_tab; }
+ JOIN_TAB *first_breadth_first_tab() { return join_tab; }
private:
/**
+ Create a temporary table to be used for processing DISTINCT/ORDER
+ BY/GROUP BY.
+
+ @note Will modify JOIN object wrt sort/group attributes
+
+ @param tab the JOIN_TAB object to attach created table to
+ @param tmp_table_fields List of items that will be used to define
+ column types of the table.
+ @param tmp_table_group Group key to use for temporary table, NULL if none.
+ @param save_sum_fields If true, do not replace Item_sum items in
+ @c tmp_fields list with Item_field items referring
+ to fields in temporary table.
+
+ @returns false on success, true on failure
+ */
+ bool create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *tmp_table_fields,
+ ORDER *tmp_table_group,
+ bool save_sum_fields,
+ bool distinct,
+ bool keep_row_ordermake);
+ /**
+ Optimize distinct when used on a subset of the tables.
+
+ E.g.,: SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b
+ In this case we can stop scanning t2 when we have found one t1.a
+ */
+ void optimize_distinct();
+
+ /**
TRUE if the query contains an aggregate function but has no GROUP
BY clause.
*/
bool implicit_grouping;
- bool make_simple_join(JOIN *join, TABLE *tmp_table);
void cleanup_item_list(List<Item> &items) const;
+ bool add_having_as_table_cond(JOIN_TAB *tab);
+ bool make_aggr_tables_info();
};
enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS};
@@ -1594,7 +1726,7 @@ extern const char *join_type_str[];
void count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param,
List<Item> &fields, bool reset_with_sum_func);
bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
- Item **ref_pointer_array,
+ Ref_ptr_array ref_pointer_array,
List<Item> &new_list1, List<Item> &new_list2,
uint elements, List<Item> &fields);
void copy_fields(TMP_TABLE_PARAM *param);
@@ -1649,7 +1781,7 @@ public:
enum store_key_result result;
THD *thd= to_field->table->in_use;
enum_check_fields saved_count_cuted_fields= thd->count_cuted_fields;
- ulonglong sql_mode= thd->variables.sql_mode;
+ sql_mode_t orig_sql_mode= thd->variables.sql_mode;
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
@@ -1658,7 +1790,7 @@ public:
result= copy_inner();
thd->count_cuted_fields= saved_count_cuted_fields;
- thd->variables.sql_mode= sql_mode;
+ thd->variables.sql_mode= orig_sql_mode;
return result;
}
@@ -1833,23 +1965,20 @@ bool error_if_full_join(JOIN *join);
int report_error(TABLE *table, int error);
int safe_index_read(JOIN_TAB *tab);
int get_quick_record(SQL_SELECT *select);
-SORT_FIELD *make_unireg_sortorder(THD *thd, JOIN *join,
- table_map first_table_map,
- ORDER *order, uint *length,
- SORT_FIELD *sortorder);
-int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
- List<Item> &fields, List <Item> &all_fields, ORDER *order);
-int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
+int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
+ List<Item> &fields, List <Item> &all_fields, ORDER *order,
+ bool from_window_spec= false);
+int setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
List<Item> &fields, List<Item> &all_fields, ORDER *order,
- bool *hidden_group_fields);
+ bool *hidden_group_fields, bool from_window_spec= false);
bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
- Item **ref_pointer_array);
+ Ref_ptr_array ref_pointer_array);
int join_read_key2(THD *thd, struct st_join_table *tab, TABLE *table,
struct st_table_ref *table_ref);
bool handle_select(THD *thd, LEX *lex, select_result *result,
ulong setup_tables_done_option);
-bool mysql_select(THD *thd, Item ***rref_pointer_array,
+bool mysql_select(THD *thd,
TABLE_LIST *tables, uint wild_num, List<Item> &list,
COND *conds, uint og_num, ORDER *order, ORDER *group,
Item *having, ORDER *proc_param, ulonglong select_type,
@@ -1873,7 +2002,193 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
All methods presume that there is at least one field to change.
*/
-TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list);
+
+class Virtual_tmp_table: public TABLE
+{
+ /**
+ Destruct collected fields. This method is called on errors only,
+ when we could not make the virtual temporary table completely,
+ e.g. when some of the fields could not be created or added.
+
+ This is needed to avoid memory leaks, as some fields can be BLOB
+ variants and thus can have String onboard. Strings must be destructed
+ as they store data on the heap (not on MEM_ROOT).
+ */
+ void destruct_fields()
+ {
+ for (uint i= 0; i < s->fields; i++)
+ delete field[i]; // to invoke the field destructor
+ s->fields= 0; // safety
+ }
+
+protected:
+ /**
+ The number of the fields that are going to be in the table.
+ We remember the number of the fields at init() time, and
+ at open() we check that all of the fields were really added.
+ */
+ uint m_alloced_field_count;
+
+ /**
+ Setup field pointers and null-bit pointers.
+ */
+ void setup_field_pointers();
+
+public:
+ /**
+ Create a new empty virtual temporary table on the thread mem_root.
+ After creation, the caller must:
+ - call init()
+ - populate the table with new fields using add().
+ - call open().
+ @param thd - Current thread.
+ */
+ static void *operator new(size_t size, THD *thd) throw();
+ static void operator delete(void *ptr, size_t size) {TRASH_FREE(ptr, size);}
+
+ Virtual_tmp_table(THD *thd) : m_alloced_field_count(0)
+ {
+ reset();
+ temp_pool_slot= MY_BIT_NONE;
+ in_use= thd;
+ }
+
+ ~Virtual_tmp_table()
+ {
+ if (s)
+ destruct_fields();
+ }
+
+ /**
+ Allocate components for the given number of fields.
+ - fields[]
+ - s->blob_fields[],
+ - bitmaps: def_read_set, def_write_set, tmp_set, eq_join_set, cond_set.
+ @param field_count - The number of fields we plan to add to the table.
+ @returns false - on success.
+ @returns true - on error.
+ */
+ bool init(uint field_count);
+
+ /**
+ Add one Field to the end of the field array, update members:
+ s->reclength, s->fields, s->blob_fields, s->null_fuelds.
+ */
+ bool add(Field *new_field)
+ {
+ DBUG_ASSERT(s->fields < m_alloced_field_count);
+ new_field->init(this);
+ field[s->fields]= new_field;
+ s->reclength+= new_field->pack_length();
+ if (!(new_field->flags & NOT_NULL_FLAG))
+ s->null_fields++;
+ if (new_field->flags & BLOB_FLAG)
+ {
+ // Note, s->blob_fields was incremented in Field_blob::Field_blob
+ DBUG_ASSERT(s->blob_fields);
+ DBUG_ASSERT(s->blob_fields <= m_alloced_field_count);
+ s->blob_field[s->blob_fields - 1]= s->fields;
+ }
+ s->fields++;
+ return false;
+ }
+
+ /**
+ Add fields from a Column_definition list
+ @returns false - on success.
+ @returns true - on error.
+ */
+ bool add(List<Column_definition> &field_list);
+
+ /**
+ Open a virtual table for read/write:
+ - Setup end markers in TABLE::field and TABLE_SHARE::blob_fields,
+ - Allocate a buffer in TABLE::record[0].
+ - Set field pointers (Field::ptr, Field::null_pos, Field::null_bit) to
+ the allocated record.
+ This method is called when all of the fields have been added to the table.
+ After calling this method the table is ready for read and write operations.
+ @return false - on success
+ @return true - on error (e.g. could not allocate the record buffer).
+ */
+ bool open();
+};
+
+
+/**
+ Create a reduced TABLE object with properly set up Field list from a
+ list of field definitions.
+
+ The created table doesn't have a table handler associated with
+ it, has no keys, no group/distinct, no copy_funcs array.
+ The sole purpose of this TABLE object is to use the power of Field
+ class to read/write data to/from table->record[0]. Then one can store
+ the record in any container (RB tree, hash, etc).
+ The table is created in THD mem_root, so are the table's fields.
+ Consequently, if you don't BLOB fields, you don't need to free it.
+
+ @param thd connection handle
+ @param field_list list of column definitions
+
+ @return
+ 0 if out of memory, or a
+ TABLE object ready for read and write in case of success
+*/
+
+inline TABLE *
+create_virtual_tmp_table(THD *thd, List<Column_definition> &field_list)
+{
+ Virtual_tmp_table *table;
+ if (!(table= new(thd) Virtual_tmp_table(thd)))
+ return NULL;
+
+ /*
+ If "simulate_create_virtual_tmp_table_out_of_memory" debug option
+ is enabled, we now enable "simulate_out_of_memory". This effectively
+ makes table->init() fail on OOM inside multi_alloc_root().
+ This is done to test that ~Virtual_tmp_table() called from the "delete"
+ below correcly handles OOM.
+ */
+ DBUG_EXECUTE_IF("simulate_create_virtual_tmp_table_out_of_memory",
+ DBUG_SET("+d,simulate_out_of_memory"););
+
+ if (table->init(field_list.elements) ||
+ table->add(field_list) ||
+ table->open())
+ {
+ delete table;
+ return NULL;
+ }
+ return table;
+}
+
+
+/**
+ Create a new virtual temporary table consisting of a single field.
+ SUM(DISTINCT expr) and similar numeric aggregate functions use this.
+ @param thd - Current thread
+ @param field - The field that will be added into the table.
+ @return NULL - On error.
+ @return !NULL - A pointer to the created table that is ready
+ for read and write.
+*/
+inline TABLE *
+create_virtual_tmp_table(THD *thd, Field *field)
+{
+ Virtual_tmp_table *table;
+ DBUG_ASSERT(field);
+ if (!(table= new(thd) Virtual_tmp_table(thd)))
+ return NULL;
+ if (table->init(1) ||
+ table->add(field) ||
+ table->open())
+ {
+ delete table;
+ return NULL;
+ }
+ return table;
+}
+
int test_if_item_cache_changed(List<Cached_item> &list);
int join_init_read_record(JOIN_TAB *tab);
@@ -1956,6 +2271,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options);
+bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
+ TMP_ENGINE_COLUMNDEF *start_recinfo,
+ TMP_ENGINE_COLUMNDEF **recinfo,
+ ulonglong options);
bool open_tmp_table(TABLE *table);
void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
double prev_record_reads(POSITION *positions, uint idx, table_map found_ref);
@@ -1992,4 +2311,11 @@ public:
int execute(JOIN *join);
};
+bool test_if_order_compatible(SQL_I_List<ORDER> &a, SQL_I_List<ORDER> &b);
+int test_if_group_changed(List<Cached_item> &list);
+int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
+
+JOIN_TAB *first_explain_order_tab(JOIN* join);
+JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab);
+
#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index 0138c3e5a3b..ca721b9e366 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -162,7 +162,7 @@ bool servers_init(bool dont_read_servers_table)
/*
To be able to run this from boot, we allocate a temporary THD
*/
- if (!(thd=new THD))
+ if (!(thd=new THD(0)))
DBUG_RETURN(TRUE);
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -205,8 +205,8 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
free_root(&mem, MYF(0));
init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
- if (init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0,
- FALSE))
+ if (init_read_record(&read_record_info,thd,table=tables[0].table, NULL, NULL,
+ 1,0, FALSE))
DBUG_RETURN(1);
while (!(read_record_info.read_record(&read_record_info)))
{
@@ -350,8 +350,8 @@ get_server_from_table_to_cache(TABLE *table)
DBUG_PRINT("info", ("server->socket %s", server->socket));
if (my_hash_insert(&servers_cache, (uchar*) server))
{
- DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
- server->server_name, (long unsigned int) server));
+ DBUG_PRINT("info", ("had a problem inserting server %s at %p",
+ server->server_name, server));
// error handling needed here
DBUG_RETURN(TRUE);
}
@@ -431,13 +431,13 @@ insert_server_record_into_cache(FOREIGN_SERVER *server)
We succeded in insertion of the server to the table, now insert
the server to the cache
*/
- DBUG_PRINT("info", ("inserting server %s at %lx, length %d",
- server->server_name, (long unsigned int) server,
+ DBUG_PRINT("info", ("inserting server %s at %p, length %d",
+ server->server_name, server,
server->server_name_length));
if (my_hash_insert(&servers_cache, (uchar*) server))
{
- DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
- server->server_name, (long unsigned int) server));
+ DBUG_PRINT("info", ("had a problem inserting server %s at %p",
+ server->server_name, server));
// error handling needed here
error= 1;
}
@@ -804,8 +804,8 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing,
*/
if (my_hash_insert(&servers_cache, (uchar*)altered))
{
- DBUG_PRINT("info", ("had a problem inserting server %s at %lx",
- altered->server_name, (long unsigned int) altered));
+ DBUG_PRINT("info", ("had a problem inserting server %s at %p",
+ altered->server_name,altered));
error= ER_OUT_OF_RESOURCES;
}
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 46914ea14c4..a29a6871b78 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -39,7 +39,6 @@
#include "tztime.h" // struct Time_zone
#include "sql_acl.h" // TABLE_ACLS, check_grant, DB_ACLS, acl_get,
// check_grant_db
-#include "filesort.h" // filesort_free_buffers
#include "sp.h"
#include "sp_head.h"
#include "sp_pcontext.h"
@@ -93,6 +92,20 @@ enum enum_i_s_events_fields
#define USERNAME_WITH_HOST_CHAR_LENGTH (USERNAME_CHAR_LENGTH + HOSTNAME_LENGTH + 2)
+
+static const LEX_STRING trg_action_time_type_names[]=
+{
+ { C_STRING_WITH_LEN("BEFORE") },
+ { C_STRING_WITH_LEN("AFTER") }
+};
+
+static const LEX_STRING trg_event_type_names[]=
+{
+ { C_STRING_WITH_LEN("INSERT") },
+ { C_STRING_WITH_LEN("UPDATE") },
+ { C_STRING_WITH_LEN("DELETE") }
+};
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
static const char *grant_names[]={
"select","insert","update","delete","create","drop","reload","shutdown",
@@ -774,6 +787,57 @@ static void dispose_db_dir(void *ptr)
}
+/*
+ Append an element into @@ignore_db_dirs
+
+ This is a function to be called after regular option processing has been
+ finalized.
+*/
+
+void ignore_db_dirs_append(const char *dirname_arg)
+{
+ char *new_entry_buf;
+ LEX_STRING *new_entry;
+ size_t len= strlen(dirname_arg);
+
+ if (!my_multi_malloc(0,
+ &new_entry, sizeof(LEX_STRING),
+ &new_entry_buf, len + 1,
+ NullS))
+ return;
+
+ memcpy(new_entry_buf, dirname_arg, len+1);
+ new_entry->str = new_entry_buf;
+ new_entry->length= len;
+
+ if (my_hash_insert(&ignore_db_dirs_hash, (uchar *)new_entry))
+ {
+ // Either the name is already there or out-of-memory.
+ my_free(new_entry);
+ return;
+ }
+
+ // Append the name to the option string.
+ size_t curlen= strlen(opt_ignore_db_dirs);
+ // Add one for comma and one for \0.
+ size_t newlen= curlen + len + 1 + 1;
+ char *new_db_dirs;
+ if (!(new_db_dirs= (char*)my_malloc(newlen ,MYF(0))))
+ {
+ // This is not a critical condition
+ return;
+ }
+
+ memcpy(new_db_dirs, opt_ignore_db_dirs, curlen);
+ if (curlen != 0)
+ new_db_dirs[curlen]=',';
+ memcpy(new_db_dirs + (curlen + ((curlen!=0)?1:0)), dirname_arg, len+1);
+
+ if (opt_ignore_db_dirs)
+ my_free(opt_ignore_db_dirs);
+ opt_ignore_db_dirs= new_db_dirs;
+}
+
bool
ignore_db_dirs_process_additions()
{
@@ -1069,7 +1133,7 @@ public:
}
bool handle_condition(THD *thd, uint sql_errno, const char * /* sqlstate */,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char *message, Sql_condition ** /* cond_hdl */)
{
/*
@@ -1169,7 +1233,7 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list,
Temporary tables should be opened for SHOW CREATE TABLE, but not
for SHOW CREATE VIEW.
*/
- if (open_temporary_tables(thd, table_list))
+ if (thd->open_temporary_tables(table_list))
goto exit;
/*
@@ -1206,7 +1270,7 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list,
bool open_error=
open_tables(thd, &table_list, &counter,
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL) ||
- mysql_handle_derived(lex, DT_PREPARE);
+ mysql_handle_derived(lex, DT_INIT | DT_PREPARE);
thd->pop_internal_handler();
if (open_error && (thd->killed || thd->is_error()))
goto exit;
@@ -1464,7 +1528,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
if (open_normal_and_derived_tables(thd, table_list,
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_VOID_RETURN;
table= table_list->table;
@@ -1511,14 +1575,13 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
static const char *require_quotes(const char *name, uint name_length)
{
- uint length;
bool pure_digit= TRUE;
const char *end= name + name_length;
for (; name < end ; name++)
{
uchar chr= (uchar) *name;
- length= my_mbcharlen(system_charset_info, chr);
+ int length= my_charlen(system_charset_info, name, end);
if (length == 1 && !system_charset_info->ident_map[chr])
return name;
if (length == 1 && (chr < '0' || chr > '9'))
@@ -1576,24 +1639,25 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
if (packet->append(&quote_char, 1, quote_charset))
return true;
- for (name_end= name+length ; name < name_end ; name+= length)
+ for (name_end= name+length ; name < name_end ; )
{
uchar chr= (uchar) *name;
- length= my_mbcharlen(system_charset_info, chr);
+ int char_length= my_charlen(system_charset_info, name, name_end);
/*
- my_mbcharlen can return 0 on a wrong multibyte
+ charlen can return 0 and negative numbers on a wrong multibyte
sequence. It is possible when upgrading from 4.0,
and identifier contains some accented characters.
The manual says it does not work. So we'll just
- change length to 1 not to hang in the endless loop.
+ change char_length to 1 not to hang in the endless loop.
*/
- if (!length)
- length= 1;
- if (length == 1 && chr == (uchar) quote_char &&
+ if (char_length <= 0)
+ char_length= 1;
+ if (char_length == 1 && chr == (uchar) quote_char &&
packet->append(&quote_char, 1, quote_charset))
return true;
- if (packet->append(name, length, system_charset_info))
+ if (packet->append(name, char_length, system_charset_info))
return true;
+ name+= char_length;
}
return packet->append(&quote_char, 1, quote_charset);
}
@@ -1684,9 +1748,11 @@ static bool print_on_update_clause(Field *field, String *val, bool lcase)
val->append(STRING_WITH_LEN("on update "));
else
val->append(STRING_WITH_LEN("ON UPDATE "));
- val->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
+ val->append(STRING_WITH_LEN("current_timestamp"));
if (field->decimals() > 0)
val->append_parenthesized(field->decimals());
+ else
+ val->append(STRING_WITH_LEN("()"));
return true;
}
return false;
@@ -1697,64 +1763,62 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value,
bool quoted)
{
bool has_default;
- bool has_now_default;
enum enum_field_types field_type= field->type();
- /*
- We are using CURRENT_TIMESTAMP instead of NOW because it is
- more standard
- */
- has_now_default= field->has_insert_default_function();
-
- has_default= (field_type != FIELD_TYPE_BLOB &&
- !(field->flags & NO_DEFAULT_VALUE_FLAG) &&
- field->unireg_check != Field::NEXT_NUMBER &&
- !((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
- && has_now_default));
+ has_default= (field->default_value ||
+ (!(field->flags & NO_DEFAULT_VALUE_FLAG) &&
+ field->unireg_check != Field::NEXT_NUMBER));
def_value->length(0);
if (has_default)
{
- if (has_now_default)
+ StringBuffer<MAX_FIELD_WIDTH> str(field->charset());
+ if (field->default_value)
{
- def_value->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
- if (field->decimals() > 0)
- def_value->append_parenthesized(field->decimals());
+ field->default_value->print(&str);
+ if (field->default_value->expr->need_parentheses_in_default())
+ {
+ def_value->set_charset(&my_charset_utf8mb4_general_ci);
+ def_value->append('(');
+ def_value->append(str);
+ def_value->append(')');
+ }
+ else
+ def_value->append(str);
}
else if (!field->is_null())
{ // Not null by default
- char tmp[MAX_FIELD_WIDTH];
- String type(tmp, sizeof(tmp), field->charset());
if (field_type == MYSQL_TYPE_BIT)
{
- longlong dec= field->val_int();
- char *ptr= longlong2str(dec, tmp + 2, 2);
- uint32 length= (uint32) (ptr - tmp);
- tmp[0]= 'b';
- tmp[1]= '\'';
- tmp[length]= '\'';
- type.length(length + 1);
+ str.qs_append('b');
+ str.qs_append('\'');
+ str.qs_append(field->val_int(), 2);
+ str.qs_append('\'');
quoted= 0;
}
else
- field->val_str(&type);
- if (type.length())
{
- String def_val;
+ field->val_str(&str);
+ if (!field->str_needs_quotes())
+ quoted= 0;
+ }
+ if (str.length())
+ {
+ StringBuffer<MAX_FIELD_WIDTH> def_val;
uint dummy_errors;
/* convert to system_charset_info == utf8 */
- def_val.copy(type.ptr(), type.length(), field->charset(),
+ def_val.copy(str.ptr(), str.length(), field->charset(),
system_charset_info, &dummy_errors);
if (quoted)
append_unescaped(def_value, def_val.ptr(), def_val.length());
else
- def_value->append(def_val.ptr(), def_val.length());
+ def_value->append(def_val);
}
else if (quoted)
- def_value->append(STRING_WITH_LEN("''"));
+ def_value->set(STRING_WITH_LEN("''"), system_charset_info);
}
else if (field->maybe_null() && quoted)
- def_value->append(STRING_WITH_LEN("NULL")); // Null as default
+ def_value->set(STRING_WITH_LEN("NULL"), system_charset_info); // Null as default
else
return 0;
@@ -1841,8 +1905,8 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
List<Item> field_list;
char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH];
const char *alias;
- String type(tmp, sizeof(tmp), system_charset_info);
- String def_value(def_value_buf, sizeof(def_value_buf), system_charset_info);
+ String type;
+ String def_value;
Field **ptr,*field;
uint primary_key;
KEY *key_info;
@@ -1935,12 +1999,8 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" "));
append_identifier(thd,packet,field->field_name, strlen(field->field_name));
packet->append(' ');
- // check for surprises from the previous call to Field::sql_type()
- if (type.ptr() != tmp)
- type.set(tmp, sizeof(tmp), system_charset_info);
- else
- type.set_charset(system_charset_info);
+ type.set(tmp, sizeof(tmp), system_charset_info);
field->sql_type(type);
packet->append(type.ptr(), type.length(), system_charset_info);
@@ -1964,13 +2024,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
if (field->vcol_info)
{
- packet->append(STRING_WITH_LEN(" AS ("));
- packet->append(field->vcol_info->expr_str.str,
- field->vcol_info->expr_str.length,
- system_charset_info);
+ StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
+ field->vcol_info->print(&str);
+ packet->append(STRING_WITH_LEN(" GENERATED ALWAYS AS ("));
+ packet->append(str);
packet->append(STRING_WITH_LEN(")"));
- if (field->stored_in_db)
- packet->append(STRING_WITH_LEN(" PERSISTENT"));
+ if (field->vcol_info->stored_in_db)
+ packet->append(STRING_WITH_LEN(" STORED"));
else
packet->append(STRING_WITH_LEN(" VIRTUAL"));
}
@@ -1987,6 +2047,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" NULL"));
}
+ def_value.set(def_value_buf, sizeof(def_value_buf), system_charset_info);
if (get_field_default_value(thd, field, &def_value, 1))
{
packet->append(STRING_WITH_LEN(" DEFAULT "));
@@ -2004,6 +2065,14 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
!(sql_mode & MODE_NO_FIELD_OPTIONS))
packet->append(STRING_WITH_LEN(" AUTO_INCREMENT"));
}
+ if (field->check_constraint)
+ {
+ StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
+ field->check_constraint->print(&str);
+ packet->append(STRING_WITH_LEN(" CHECK ("));
+ packet->append(str);
+ packet->append(STRING_WITH_LEN(")"));
+ }
if (field->comment.length)
{
@@ -2093,6 +2162,28 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
file->free_foreign_key_create_info(for_str);
}
+ /* Add table level check constraints */
+ if (share->table_check_constraints)
+ {
+ for (uint i= share->field_check_constraints;
+ i < share->table_check_constraints ; i++)
+ {
+ StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
+ Virtual_column_info *check= table->check_constraints[i];
+ check->print(&str);
+
+ packet->append(STRING_WITH_LEN(",\n "));
+ if (check->name.length)
+ {
+ packet->append(STRING_WITH_LEN("CONSTRAINT "));
+ append_identifier(thd, packet, check->name.str, check->name.length);
+ }
+ packet->append(STRING_WITH_LEN(" CHECK ("));
+ packet->append(str);
+ packet->append(STRING_WITH_LEN(")"));
+ }
+ }
+
packet->append(STRING_WITH_LEN("\n)"));
if (show_table_options)
{
@@ -2246,20 +2337,14 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
*/
uint part_syntax_len;
char *part_syntax;
- String comment_start;
- table->part_info->set_show_version_string(&comment_start);
- if ((part_syntax= generate_partition_syntax(table->part_info,
+ if ((part_syntax= generate_partition_syntax(thd, table->part_info,
&part_syntax_len,
- FALSE,
show_table_options,
- NULL, NULL,
- comment_start.c_ptr())))
+ NULL, NULL)))
{
- packet->append(comment_start);
- if (packet->append(part_syntax, part_syntax_len) ||
- packet->append(STRING_WITH_LEN(" */")))
+ packet->append('\n');
+ if (packet->append(part_syntax, part_syntax_len))
error= 1;
- my_free(part_syntax);
}
}
}
@@ -2459,7 +2544,7 @@ public:
size_t size __attribute__((unused)))
{ TRASH_FREE(ptr, size); }
- ulong thread_id;
+ my_thread_id thread_id;
uint32 os_thread_id;
ulonglong start_time;
uint command;
@@ -2633,7 +2718,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
while ((thd_info=thread_infos.get()))
{
protocol->prepare_for_resend();
- protocol->store((ulonglong) thd_info->thread_id);
+ protocol->store(thd_info->thread_id);
protocol->store(thd_info->user, system_charset_info);
protocol->store(thd_info->host, system_charset_info);
protocol->store(thd_info->db, system_charset_info);
@@ -2705,7 +2790,7 @@ int select_result_explain_buffer::send_data(List<Item> &items)
DBUG_ENTER("select_result_explain_buffer::send_data");
/*
- Switch to the recieveing thread, so that we correctly count memory used
+ Switch to the receiveing thread, so that we correctly count memory used
by it. This is needed as it's the receiving thread that will free the
memory.
*/
@@ -2907,7 +2992,7 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond)
}
else
{
- my_error(ER_NO_SUCH_THREAD, MYF(0), thread_id);
+ my_error(ER_NO_SUCH_THREAD, MYF(0), (ulong) thread_id);
DBUG_RETURN(1);
}
}
@@ -3124,7 +3209,7 @@ int add_status_vars(SHOW_VAR *list)
if (status_vars_inited)
mysql_mutex_lock(&LOCK_show_status);
if (!all_status_vars.buffer && // array is not allocated yet - do it now
- my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20, MYF(0)))
+ my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 250, 50, MYF(0)))
{
res= 1;
goto err;
@@ -3241,6 +3326,132 @@ void remove_status_vars(SHOW_VAR *list)
}
+/**
+ @brief Returns the value of a system or a status variable.
+
+ @param thd [in] The handle of the current THD.
+ @param variable [in] Details of the variable.
+ @param value_type [in] Variable type.
+ @param show_type [in] Variable show type.
+ @param charset [out] Character set of the value.
+ @param buff [in,out] Buffer to store the value.
+ (Needs to have enough memory
+ to hold the value of variable.)
+ @param length [out] Length of the value.
+
+ @return Pointer to the value buffer.
+*/
+
+const char* get_one_variable(THD *thd,
+ const SHOW_VAR *variable,
+ enum_var_type value_type, SHOW_TYPE show_type,
+ system_status_var *status_var,
+ const CHARSET_INFO **charset, char *buff,
+ size_t *length)
+{
+ void *value= variable->value;
+ const char *pos= buff;
+ const char *end= buff;
+
+
+ if (show_type == SHOW_SYS)
+ {
+ sys_var *var= (sys_var *) value;
+ show_type= var->show_type();
+ value= var->value_ptr(thd, value_type, &null_lex_str);
+ *charset= var->charset(thd);
+ }
+
+ /*
+ note that value may be == buff. All SHOW_xxx code below
+ should still work in this case
+ */
+ switch (show_type) {
+ case SHOW_DOUBLE_STATUS:
+ value= ((char *) status_var + (intptr) value);
+ /* fall through */
+ case SHOW_DOUBLE:
+ /* 6 is the default precision for '%f' in sprintf() */
+ end= buff + my_fcvt(*(double *) value, 6, buff, NULL);
+ break;
+ case SHOW_LONG_STATUS:
+ value= ((char *) status_var + (intptr) value);
+ /* fall through */
+ case SHOW_ULONG:
+ case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status()
+ end= int10_to_str(*(long*) value, buff, 10);
+ break;
+ case SHOW_LONGLONG_STATUS:
+ value= ((char *) status_var + (intptr) value);
+ /* fall through */
+ case SHOW_ULONGLONG:
+ end= longlong10_to_str(*(longlong*) value, buff, 10);
+ break;
+ case SHOW_HA_ROWS:
+ end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10);
+ break;
+ case SHOW_BOOL:
+ end= strmov(buff, *(bool*) value ? "ON" : "OFF");
+ break;
+ case SHOW_MY_BOOL:
+ end= strmov(buff, *(my_bool*) value ? "ON" : "OFF");
+ break;
+ case SHOW_UINT:
+ end= int10_to_str((long) *(uint*) value, buff, 10);
+ break;
+ case SHOW_SINT:
+ end= int10_to_str((long) *(int*) value, buff, -10);
+ break;
+ case SHOW_SLONG:
+ end= int10_to_str(*(long*) value, buff, -10);
+ break;
+ case SHOW_SLONGLONG:
+ end= longlong10_to_str(*(longlong*) value, buff, -10);
+ break;
+ case SHOW_HAVE:
+ {
+ SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value;
+ pos= show_comp_option_name[(int) tmp];
+ end= strend(pos);
+ break;
+ }
+ case SHOW_CHAR:
+ {
+ if (!(pos= (char*)value))
+ pos= "";
+ end= strend(pos);
+ break;
+ }
+ case SHOW_CHAR_PTR:
+ {
+ if (!(pos= *(char**) value))
+ pos= "";
+
+ end= strend(pos);
+ break;
+ }
+ case SHOW_LEX_STRING:
+ {
+ LEX_STRING *ls=(LEX_STRING*)value;
+ if (!(pos= ls->str))
+ end= pos= "";
+ else
+ end= pos + ls->length;
+ break;
+ }
+ case SHOW_UNDEF:
+ break; // Return empty string
+ case SHOW_SYS: // Cannot happen
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+
+ *length= (size_t) (end - pos);
+ return pos;
+}
+
+
static bool show_status_array(THD *thd, const char *wild,
SHOW_VAR *variables,
enum enum_var_type scope,
@@ -3265,7 +3476,7 @@ static bool show_status_array(THD *thd, const char *wild,
prefix_end=strnmov(name_buffer, prefix, sizeof(name_buffer)-1);
if (*prefix)
*prefix_end++= '_';
- len=name_buffer + sizeof(name_buffer) - prefix_end;
+ len=(int)(name_buffer + sizeof(name_buffer) - prefix_end);
#ifdef WITH_WSREP
bool is_wsrep_var= FALSE;
@@ -3283,7 +3494,7 @@ static bool show_status_array(THD *thd, const char *wild,
for (; variables->name; variables++)
{
- bool wild_checked= 0;
+ bool wild_checked= false;
strnmov(prefix_end, variables->name, len);
name_buffer[sizeof(name_buffer)-1]=0; /* Safety */
@@ -3349,113 +3560,25 @@ static bool show_status_array(THD *thd, const char *wild,
else
{
if ((wild_checked ||
- (wild && wild[0] && wild_case_compare(system_charset_info,
- name_buffer, wild))) &&
+ !(wild && wild[0] && wild_case_compare(system_charset_info,
+ name_buffer, wild))) &&
(!cond || cond->val_int()))
{
- void *value=var->value;
- const char *pos, *end; // We assign a lot of const's
+ const char *pos; // We assign a lot of const's
+ size_t length;
if (show_type == SHOW_SYS)
- {
- sys_var *var= (sys_var *) value;
- show_type= var->show_type();
mysql_mutex_lock(&LOCK_global_system_variables);
- value= var->value_ptr(thd, scope, &null_lex_str);
- charset= var->charset(thd);
- }
+ pos= get_one_variable(thd, var, scope, show_type, status_var,
+ &charset, buff, &length);
- pos= end= buff;
- /*
- note that value may be == buff. All SHOW_xxx code below
- should still work in this case
- */
- switch (show_type) {
- case SHOW_DOUBLE_STATUS:
- value= ((char *) status_var + (intptr) value);
- /* fall through */
- case SHOW_DOUBLE:
- /* 6 is the default precision for '%f' in sprintf() */
- end= buff + my_fcvt(*(double *) value, 6, buff, NULL);
- break;
- case SHOW_LONG_STATUS:
- value= ((char *) status_var + (intptr) value);
- /* fall through */
- case SHOW_ULONG:
- case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status()
- end= int10_to_str(*(long*) value, buff, 10);
- break;
- case SHOW_LONGLONG_STATUS:
- value= ((char *) status_var + (intptr) value);
- /* fall through */
- case SHOW_ULONGLONG:
- end= longlong10_to_str(*(longlong*) value, buff, 10);
- break;
- case SHOW_HA_ROWS:
- end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10);
- break;
- case SHOW_BOOL:
- end= strmov(buff, *(bool*) value ? "ON" : "OFF");
- break;
- case SHOW_MY_BOOL:
- end= strmov(buff, *(my_bool*) value ? "ON" : "OFF");
- break;
- case SHOW_UINT:
- end= int10_to_str((long) *(uint*) value, buff, 10);
- break;
- case SHOW_SINT:
- end= int10_to_str((long) *(int*) value, buff, -10);
- break;
- case SHOW_SLONG:
- end= int10_to_str(*(long*) value, buff, -10);
- break;
- case SHOW_SLONGLONG:
- end= longlong10_to_str(*(longlong*) value, buff, -10);
- break;
- case SHOW_HAVE:
- {
- SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value;
- pos= show_comp_option_name[(int) tmp];
- end= strend(pos);
- break;
- }
- case SHOW_CHAR:
- {
- if (!(pos= (char*)value))
- pos= "";
- end= strend(pos);
- break;
- }
- case SHOW_CHAR_PTR:
- {
- if (!(pos= *(char**) value))
- pos= "";
-
- end= strend(pos);
- break;
- }
- case SHOW_LEX_STRING:
- {
- LEX_STRING *ls=(LEX_STRING*)value;
- if (!(pos= ls->str))
- end= pos= "";
- else
- end= pos + ls->length;
- break;
- }
- case SHOW_UNDEF:
- break; // Return empty string
- case SHOW_SYS: // Cannot happen
- default:
- DBUG_ASSERT(0);
- break;
- }
- table->field[1]->store(pos, (uint32) (end - pos), charset);
+ table->field[1]->store(pos, (uint32) length, charset);
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
table->field[1]->set_notnull();
-
- if (var->type == SHOW_SYS)
+ if (show_type == SHOW_SYS)
mysql_mutex_unlock(&LOCK_global_system_variables);
+
if (schema_table_store_record(thd, table))
{
res= TRUE;
@@ -3621,7 +3744,7 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
/* Lookup value is database name */
if (!cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0))
+ strlen(item_field->field_name)))
{
thd->make_lex_string(&lookup_field_vals->db_value,
tmp_str->ptr(), tmp_str->length());
@@ -3630,7 +3753,7 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
else if (!cs->coll->strnncollsp(cs, (uchar *) field_name2,
strlen(field_name2),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0))
+ strlen(item_field->field_name)))
{
thd->make_lex_string(&lookup_field_vals->table_value,
tmp_str->ptr(), tmp_str->length());
@@ -3725,10 +3848,10 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
if (table->table != item_field->field->table ||
(cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0) &&
+ strlen(item_field->field_name)) &&
cs->coll->strnncollsp(cs, (uchar *) field_name2, strlen(field_name2),
(uchar *) item_field->field_name,
- strlen(item_field->field_name), 0)))
+ strlen(item_field->field_name))))
return 0;
}
else if (item->type() == Item::EXPR_CACHE_ITEM)
@@ -4274,13 +4397,13 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
*/
lex->sql_command= SQLCOM_SHOW_FIELDS;
thd->force_read_stats= get_schema_table_idx(schema_table) == SCH_STATISTICS;
- result= (open_temporary_tables(thd, table_list) ||
+ result= (thd->open_temporary_tables(table_list) ||
open_normal_and_derived_tables(thd, table_list,
(MYSQL_OPEN_IGNORE_FLUSH |
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
(can_deadlock ?
MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)),
- DT_PREPARE | DT_CREATE));
+ DT_INIT | DT_PREPARE | DT_CREATE));
(void) read_statistics_for_tables_if_needed(thd, table_list);
thd->force_read_stats= false;
@@ -4345,6 +4468,7 @@ end:
all tables open within this Open_tables_state.
*/
thd->temporary_tables= NULL;
+
close_thread_tables(thd);
/*
Release metadata lock we might have acquired.
@@ -4623,7 +4747,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table,
goto end;
}
- share= tdc_acquire_share_shortlived(thd, &table_list, GTS_TABLE | GTS_VIEW);
+ share= tdc_acquire_share(thd, &table_list, GTS_TABLE | GTS_VIEW);
if (!share)
{
if (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE ||
@@ -4677,7 +4801,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE *table,
table_list.view= (LEX*) share->is_view;
res= schema_table->process_table(thd, &table_list, table,
res, db_name, table_name);
- free_root(&tbl.mem_root, MYF(0));
+ closefrm(&tbl);
}
@@ -4715,16 +4839,14 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl)
{
- if (sql_errno == ER_PARSE_ERROR ||
- sql_errno == ER_TRG_NO_DEFINER ||
- sql_errno == ER_TRG_NO_CREATION_CTX)
+ if (sql_errno == ER_TRG_NO_DEFINER || sql_errno == ER_TRG_NO_CREATION_CTX)
return true;
- if (level != Sql_condition::WARN_LEVEL_ERROR)
+ if (*level != Sql_condition::WARN_LEVEL_ERROR)
return false;
if (!thd->get_stmt_da()->is_error())
@@ -5078,10 +5200,11 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
if (share->tmp_table == SYSTEM_TMP_TABLE)
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
- else if (share->tmp_table)
- table->field[3]->store(STRING_WITH_LEN("LOCAL TEMPORARY"), cs);
else
+ {
+ DBUG_ASSERT(share->tmp_table == NO_TMP_TABLE);
table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
+ }
for (int i= 4; i < 20; i++)
{
@@ -5358,7 +5481,7 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs,
*/
tmp_buff= strchr(column_type.c_ptr_safe(), ' ');
table->field[offset]->store(column_type.ptr(),
- (tmp_buff ? tmp_buff - column_type.ptr() :
+ (tmp_buff ? (uint)(tmp_buff - column_type.ptr()) :
column_type.length()), cs);
is_blob= (field->type() == MYSQL_TYPE_BLOB);
@@ -5463,6 +5586,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
TABLE *show_table;
Field **ptr, *field;
int count;
+ bool quoted_defaults= lex->sql_command != SQLCOM_SHOW_FIELDS;
DBUG_ENTER("get_schema_column_record");
if (res)
@@ -5532,7 +5656,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
cs);
table->field[4]->store((longlong) count, TRUE);
- if (get_field_default_value(thd, field, &type, 0))
+ if (get_field_default_value(thd, field, &type, quoted_defaults))
{
table->field[5]->store(type.ptr(), type.length(), cs);
table->field[5]->set_notnull();
@@ -5551,13 +5675,24 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
table->field[17]->store(STRING_WITH_LEN("auto_increment"), cs);
if (print_on_update_clause(field, &type, true))
table->field[17]->store(type.ptr(), type.length(), cs);
+
if (field->vcol_info)
{
- if (field->stored_in_db)
- table->field[17]->store(STRING_WITH_LEN("PERSISTENT"), cs);
+ String gen_s(tmp,sizeof(tmp), system_charset_info);
+ gen_s.length(0);
+ field->vcol_info->print(&gen_s);
+ table->field[21]->store(gen_s.ptr(), gen_s.length(), cs);
+ table->field[21]->set_notnull();
+ table->field[20]->store(STRING_WITH_LEN("ALWAYS"), cs);
+
+ if (field->vcol_info->stored_in_db)
+ table->field[17]->store(STRING_WITH_LEN("STORED GENERATED"), cs);
else
- table->field[17]->store(STRING_WITH_LEN("VIRTUAL"), cs);
+ table->field[17]->store(STRING_WITH_LEN("VIRTUAL GENERATED"), cs);
}
+ else
+ table->field[20]->store(STRING_WITH_LEN("NEVER"), cs);
+
table->field[19]->store(field->comment.str, field->comment.length, cs);
if (schema_table_store_record(thd, table))
DBUG_RETURN(1);
@@ -5837,7 +5972,6 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
if (sp)
{
Field *field;
- Create_field *field_def;
String tmp_string;
if (routine_type == TYPE_ENUM_FUNCTION)
{
@@ -5849,14 +5983,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
get_field(thd->mem_root, proc_table->field[MYSQL_PROC_MYSQL_TYPE],
&tmp_string);
table->field[15]->store(tmp_string.ptr(), tmp_string.length(), cs);
- field_def= &sp->m_return_field_def;
- field= make_field(&share, thd->mem_root,
- (uchar*) 0, field_def->length,
- (uchar*) "", 0, field_def->pack_flag,
- field_def->sql_type, field_def->charset,
- field_def->geom_type, field_def->srid, Field::NONE,
- field_def->interval, "");
-
+ field= sp->m_return_field_def.make_field(&share, thd->mem_root, "");
field->table= &tbl;
tbl.in_use= thd;
store_column_type(table, field, cs, 6);
@@ -5875,7 +6002,6 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
{
const char *tmp_buff;
sp_variable *spvar= spcont->find_variable(i);
- field_def= &spvar->field_def;
switch (spvar->mode) {
case sp_variable::MODE_IN:
tmp_buff= "IN";
@@ -5904,12 +6030,8 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
&tmp_string);
table->field[15]->store(tmp_string.ptr(), tmp_string.length(), cs);
- field= make_field(&share, thd->mem_root, (uchar*) 0, field_def->length,
- (uchar*) "", 0, field_def->pack_flag,
- field_def->sql_type, field_def->charset,
- field_def->geom_type, field_def->srid, Field::NONE,
- field_def->interval, spvar->name.str);
-
+ field= spvar->field_def.make_field(&share, thd->mem_root,
+ spvar->name.str);
field->table= &tbl;
tbl.in_use= thd;
store_column_type(table, field, cs, 6);
@@ -5996,18 +6118,11 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
TABLE_SHARE share;
TABLE tbl;
Field *field;
- Create_field *field_def= &sp->m_return_field_def;
bzero((char*) &tbl, sizeof(TABLE));
(void) build_table_filename(path, sizeof(path), "", "", "", 0);
init_tmp_table_share(thd, &share, "", 0, "", path);
- field= make_field(&share, thd->mem_root, (uchar*) 0,
- field_def->length,
- (uchar*) "", 0, field_def->pack_flag,
- field_def->sql_type, field_def->charset,
- field_def->geom_type, field_def->srid, Field::NONE,
- field_def->interval, "");
-
+ field= sp->m_return_field_def.make_field(&share, thd->mem_root, "");
field->table= &tbl;
tbl.in_use= thd;
store_column_type(table, field, cs, 5);
@@ -6354,7 +6469,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
table->field[5]->store(STRING_WITH_LEN("NO"), cs);
}
- definer_len= (strxmov(definer, tables->definer.user.str, "@",
+ definer_len= (uint)(strxmov(definer, tables->definer.user.str, "@",
tables->definer.host.str, NullS) - definer);
table->field[6]->store(definer, definer_len, cs);
if (tables->view_suid)
@@ -6401,6 +6516,40 @@ bool store_constraints(THD *thd, TABLE *table, LEX_STRING *db_name,
return schema_table_store_record(thd, table);
}
+static int get_check_constraints_record(THD *thd, TABLE_LIST *tables,
+ TABLE *table, bool res,
+ LEX_STRING *db_name,
+ LEX_STRING *table_name)
+{
+ DBUG_ENTER("get_check_constraints_record");
+ if(res)
+ {
+ if (thd->is_error())
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ if(!tables->view)
+ {
+ StringBuffer<MAX_FIELD_WIDTH> str(system_charset_info);
+ for (uint i= 0; i < tables->table->s->table_check_constraints; i++)
+ {
+ Virtual_column_info *check= tables->table->check_constraints[i];
+ table->field[0]->store(STRING_WITH_LEN("def"), system_charset_info);
+ table->field[3]->store(check->name.str, check->name.length,
+ system_charset_info);
+ str.length(0);
+ check->print(&str);
+ table->field[4]->store(str.ptr(), str.length(), system_charset_info);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
+ }
+ }
+
+ DBUG_RETURN(0);
+}
static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
@@ -6447,6 +6596,19 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
}
}
+ // Table check constraints
+ for ( uint i = 0; i < show_table->s->table_check_constraints; i++ )
+ {
+ Virtual_column_info *check = show_table->check_constraints[ i ];
+
+ if ( store_constraints( thd, table, db_name, table_name, check->name.str,
+ check->name.length,
+ STRING_WITH_LEN( "CHECK" ) ) )
+ {
+ DBUG_RETURN( 1 );
+ }
+ }
+
show_table->file->get_foreign_key_list(thd, &f_key_list);
FOREIGN_KEY_INFO *f_key_info;
List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
@@ -6463,43 +6625,56 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
}
-static bool store_trigger(THD *thd, TABLE *table, LEX_STRING *db_name,
- LEX_STRING *table_name, LEX_STRING *trigger_name,
- enum trg_event_type event,
- enum trg_action_time_type timing,
- LEX_STRING *trigger_stmt,
- ulong sql_mode,
- LEX_STRING *definer_buffer,
- LEX_STRING *client_cs_name,
- LEX_STRING *connection_cl_name,
- LEX_STRING *db_cl_name)
+static bool store_trigger(THD *thd, Trigger *trigger,
+ TABLE *table, LEX_STRING *db_name,
+ LEX_STRING *table_name)
{
CHARSET_INFO *cs= system_charset_info;
LEX_STRING sql_mode_rep;
+ MYSQL_TIME timestamp;
+ char definer_holder[USER_HOST_BUFF_SIZE];
+ LEX_STRING definer_buffer, trigger_stmt, trigger_body;
+ definer_buffer.str= definer_holder;
+
+ trigger->get_trigger_info(&trigger_stmt, &trigger_body, &definer_buffer);
restore_record(table, s->default_values);
table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
- table->field[2]->store(trigger_name->str, trigger_name->length, cs);
- table->field[3]->store(trg_event_type_names[event].str,
- trg_event_type_names[event].length, cs);
+ table->field[2]->store(trigger->name.str, trigger->name.length, cs);
+ table->field[3]->store(trg_event_type_names[trigger->event].str,
+ trg_event_type_names[trigger->event].length, cs);
table->field[4]->store(STRING_WITH_LEN("def"), cs);
table->field[5]->store(db_name->str, db_name->length, cs);
table->field[6]->store(table_name->str, table_name->length, cs);
- table->field[9]->store(trigger_stmt->str, trigger_stmt->length, cs);
+ table->field[7]->store(trigger->action_order);
+ table->field[9]->store(trigger_body.str, trigger_body.length, cs);
table->field[10]->store(STRING_WITH_LEN("ROW"), cs);
- table->field[11]->store(trg_action_time_type_names[timing].str,
- trg_action_time_type_names[timing].length, cs);
+ table->field[11]->store(trg_action_time_type_names[trigger->action_time].str,
+ trg_action_time_type_names[trigger->action_time].length, cs);
table->field[14]->store(STRING_WITH_LEN("OLD"), cs);
table->field[15]->store(STRING_WITH_LEN("NEW"), cs);
- sql_mode_string_representation(thd, sql_mode, &sql_mode_rep);
+ if (trigger->create_time)
+ {
+ table->field[16]->set_notnull();
+ thd->variables.time_zone->gmt_sec_to_TIME(&timestamp,
+ (my_time_t)(trigger->create_time/100));
+ /* timestamp is with 6 digits */
+ timestamp.second_part= (trigger->create_time % 100) * 10000;
+ ((Field_temporal_with_date*) table->field[16])->store_time_dec(&timestamp,
+ 2);
+ }
+
+ sql_mode_string_representation(thd, trigger->sql_mode, &sql_mode_rep);
table->field[17]->store(sql_mode_rep.str, sql_mode_rep.length, cs);
- table->field[18]->store(definer_buffer->str, definer_buffer->length, cs);
- table->field[19]->store(client_cs_name->str, client_cs_name->length, cs);
- table->field[20]->store(connection_cl_name->str,
- connection_cl_name->length, cs);
- table->field[21]->store(db_cl_name->str, db_cl_name->length, cs);
+ table->field[18]->store(definer_buffer.str, definer_buffer.length, cs);
+ table->field[19]->store(trigger->client_cs_name.str,
+ trigger->client_cs_name.length, cs);
+ table->field[20]->store(trigger->connection_cl_name.str,
+ trigger->connection_cl_name.length, cs);
+ table->field[21]->store(trigger->db_cl_name.str,
+ trigger->db_cl_name.length, cs);
return schema_table_store_record(thd, table);
}
@@ -6536,35 +6711,16 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
{
for (timing= 0; timing < (int)TRG_ACTION_MAX; timing++)
{
- LEX_STRING trigger_name;
- LEX_STRING trigger_stmt;
- ulong sql_mode;
- char definer_holder[USER_HOST_BUFF_SIZE];
- LEX_STRING definer_buffer;
- LEX_STRING client_cs_name;
- LEX_STRING connection_cl_name;
- LEX_STRING db_cl_name;
-
- definer_buffer.str= definer_holder;
- if (triggers->get_trigger_info(thd, (enum trg_event_type) event,
- (enum trg_action_time_type)timing,
- &trigger_name, &trigger_stmt,
- &sql_mode,
- &definer_buffer,
- &client_cs_name,
- &connection_cl_name,
- &db_cl_name))
- continue;
-
- if (store_trigger(thd, table, db_name, table_name, &trigger_name,
- (enum trg_event_type) event,
- (enum trg_action_time_type) timing, &trigger_stmt,
- sql_mode,
- &definer_buffer,
- &client_cs_name,
- &connection_cl_name,
- &db_cl_name))
- DBUG_RETURN(1);
+ Trigger *trigger;
+ for (trigger= triggers->
+ get_trigger((enum trg_event_type) event,
+ (enum trg_action_time_type) timing) ;
+ trigger;
+ trigger= trigger->next)
+ {
+ if (store_trigger(thd, trigger, table, db_name, table_name))
+ DBUG_RETURN(1);
+ }
}
}
}
@@ -6841,7 +6997,7 @@ get_partition_column_description(THD *thd,
{
part_column_list_val *col_val= &list_value->col_val_array[i];
if (col_val->max_value)
- tmp_str.append(partition_keywords[PKW_MAXVALUE].str);
+ tmp_str.append(STRING_WITH_LEN("MAXVALUE"));
else if (col_val->null_value)
tmp_str.append("NULL");
else
@@ -6918,27 +7074,21 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
case LIST_PARTITION:
tmp_res.length(0);
if (part_info->part_type == RANGE_PARTITION)
- tmp_res.append(partition_keywords[PKW_RANGE].str,
- partition_keywords[PKW_RANGE].length);
+ tmp_res.append(STRING_WITH_LEN("RANGE"));
else
- tmp_res.append(partition_keywords[PKW_LIST].str,
- partition_keywords[PKW_LIST].length);
+ tmp_res.append(STRING_WITH_LEN("LIST"));
if (part_info->column_list)
- tmp_res.append(partition_keywords[PKW_COLUMNS].str,
- partition_keywords[PKW_COLUMNS].length);
+ tmp_res.append(STRING_WITH_LEN(" COLUMNS"));
table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs);
break;
case HASH_PARTITION:
tmp_res.length(0);
if (part_info->linear_hash_ind)
- tmp_res.append(partition_keywords[PKW_LINEAR].str,
- partition_keywords[PKW_LINEAR].length);
+ tmp_res.append(STRING_WITH_LEN("LINEAR "));
if (part_info->list_of_part_fields)
- tmp_res.append(partition_keywords[PKW_KEY].str,
- partition_keywords[PKW_KEY].length);
+ tmp_res.append(STRING_WITH_LEN("KEY"));
else
- tmp_res.append(partition_keywords[PKW_HASH].str,
- partition_keywords[PKW_HASH].length);
+ tmp_res.append(STRING_WITH_LEN("HASH"));
table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs);
break;
default:
@@ -6951,8 +7101,9 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
/* Partition expression */
if (part_info->part_expr)
{
- table->field[9]->store(part_info->part_func_string,
- part_info->part_func_len, cs);
+ StringBuffer<STRING_BUFFER_USUAL_SIZE> str(cs);
+ part_info->part_expr->print_for_table_def(&str);
+ table->field[9]->store(str.ptr(), str.length(), str.charset());
}
else if (part_info->list_of_part_fields)
{
@@ -6966,22 +7117,20 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
/* Subpartition method */
tmp_res.length(0);
if (part_info->linear_hash_ind)
- tmp_res.append(partition_keywords[PKW_LINEAR].str,
- partition_keywords[PKW_LINEAR].length);
+ tmp_res.append(STRING_WITH_LEN("LINEAR "));
if (part_info->list_of_subpart_fields)
- tmp_res.append(partition_keywords[PKW_KEY].str,
- partition_keywords[PKW_KEY].length);
+ tmp_res.append(STRING_WITH_LEN("KEY"));
else
- tmp_res.append(partition_keywords[PKW_HASH].str,
- partition_keywords[PKW_HASH].length);
+ tmp_res.append(STRING_WITH_LEN("HASH"));
table->field[8]->store(tmp_res.ptr(), tmp_res.length(), cs);
table->field[8]->set_notnull();
/* Subpartition expression */
if (part_info->subpart_expr)
{
- table->field[10]->store(part_info->subpart_func_string,
- part_info->subpart_func_len, cs);
+ StringBuffer<STRING_BUFFER_USUAL_SIZE> str(cs);
+ part_info->subpart_expr->print_for_table_def(&str);
+ table->field[10]->store(str.ptr(), str.length(), str.charset());
}
else if (part_info->list_of_subpart_fields)
{
@@ -7022,8 +7171,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
if (part_elem->range_value != LONGLONG_MAX)
table->field[11]->store((longlong) part_elem->range_value, FALSE);
else
- table->field[11]->store(partition_keywords[PKW_MAXVALUE].str,
- partition_keywords[PKW_MAXVALUE].length, cs);
+ table->field[11]->store(STRING_WITH_LEN("MAXVALUE"), cs);
}
table->field[11]->set_notnull();
}
@@ -7338,7 +7486,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond)
COND *partial_cond= make_cond_for_info_schema(thd, cond, tables);
- mysql_rwlock_rdlock(&LOCK_system_variables_hash);
+ mysql_prlock_rdlock(&LOCK_system_variables_hash);
/*
Avoid recursive LOCK_system_variables_hash acquisition in
@@ -7353,7 +7501,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond)
res= show_status_array(thd, wild, enumerate_sys_vars(thd, sorted_vars, scope),
scope, NULL, "", tables->table,
upper_case_names, partial_cond);
- mysql_rwlock_unlock(&LOCK_system_variables_hash);
+ mysql_prlock_unlock(&LOCK_system_variables_hash);
DBUG_RETURN(res);
}
@@ -7648,6 +7796,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
strlen(fields_info->field_name),
fields_info->field_type)))
DBUG_RETURN(0);
+ item->decimals= fields_info->field_length;
break;
case MYSQL_TYPE_FLOAT:
case MYSQL_TYPE_DOUBLE:
@@ -7677,7 +7826,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
item->max_length+= 1;
if (item->decimals > 0)
item->max_length+= 1;
- item->set_name(fields_info->field_name,
+ item->set_name(thd, fields_info->field_name,
strlen(fields_info->field_name), cs);
break;
case MYSQL_TYPE_TINY_BLOB:
@@ -7700,7 +7849,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
{
DBUG_RETURN(0);
}
- item->set_name(fields_info->field_name,
+ item->set_name(thd, fields_info->field_name,
strlen(fields_info->field_name), cs);
break;
}
@@ -7760,7 +7909,7 @@ static int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
Item_field(thd, context, NullS, NullS, field_info->field_name);
if (field)
{
- field->set_name(field_info->old_name,
+ field->set_name(thd, field_info->old_name,
strlen(field_info->old_name),
system_charset_info);
if (add_item_to_list(thd, field))
@@ -7795,7 +7944,7 @@ int make_schemata_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
buffer.append(lex->wild->ptr());
buffer.append(')');
}
- field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
+ field->set_name(thd, buffer.ptr(), buffer.length(), system_charset_info);
}
return 0;
}
@@ -7822,15 +7971,15 @@ int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
NullS, NullS, field_info->field_name);
if (add_item_to_list(thd, field))
return 1;
- field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
+ field->set_name(thd, buffer.ptr(), buffer.length(), system_charset_info);
if (thd->lex->verbose)
{
- field->set_name(buffer.ptr(), buffer.length(), system_charset_info);
+ field->set_name(thd, buffer.ptr(), buffer.length(), system_charset_info);
field_info= &schema_table->fields_info[3];
field= new (thd->mem_root) Item_field(thd, context, NullS, NullS, field_info->field_name);
if (add_item_to_list(thd, field))
return 1;
- field->set_name(field_info->old_name, strlen(field_info->old_name),
+ field->set_name(thd, field_info->old_name, strlen(field_info->old_name),
system_charset_info);
}
return 0;
@@ -7855,7 +8004,7 @@ int make_columns_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
NullS, NullS, field_info->field_name);
if (field)
{
- field->set_name(field_info->old_name,
+ field->set_name(thd, field_info->old_name,
strlen(field_info->old_name),
system_charset_info);
if (add_item_to_list(thd, field))
@@ -7880,7 +8029,7 @@ int make_character_sets_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
NullS, NullS, field_info->field_name);
if (field)
{
- field->set_name(field_info->old_name,
+ field->set_name(thd, field_info->old_name,
strlen(field_info->old_name),
system_charset_info);
if (add_item_to_list(thd, field))
@@ -7905,7 +8054,7 @@ int make_proc_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
NullS, NullS, field_info->field_name);
if (field)
{
- field->set_name(field_info->old_name,
+ field->set_name(thd, field_info->old_name,
strlen(field_info->old_name),
system_charset_info);
if (add_item_to_list(thd, field))
@@ -8259,8 +8408,6 @@ bool get_schema_tables_result(JOIN *join,
table_list->table->file->extra(HA_EXTRA_NO_CACHE);
table_list->table->file->extra(HA_EXTRA_RESET_STATE);
table_list->table->file->ha_delete_all_rows();
- free_io_cache(table_list->table);
- filesort_free_buffers(table_list->table,1);
table_list->table->null_row= 0;
}
else
@@ -8519,10 +8666,13 @@ ST_FIELD_INFO columns_fields_info[]=
OPEN_FRM_ONLY},
{"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type", OPEN_FRM_ONLY},
{"COLUMN_KEY", 3, MYSQL_TYPE_STRING, 0, 0, "Key", OPEN_FRM_ONLY},
- {"EXTRA", 27, MYSQL_TYPE_STRING, 0, 0, "Extra", OPEN_FRM_ONLY},
+ {"EXTRA", 30, MYSQL_TYPE_STRING, 0, 0, "Extra", OPEN_FRM_ONLY},
{"PRIVILEGES", 80, MYSQL_TYPE_STRING, 0, 0, "Privileges", OPEN_FRM_ONLY},
{"COLUMN_COMMENT", COLUMN_COMMENT_MAXLEN, MYSQL_TYPE_STRING, 0, 0,
"Comment", OPEN_FRM_ONLY},
+ {"IS_GENERATED", 6, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
+ {"GENERATION_EXPRESSION", MAX_FIELD_VARCHARLENGTH, MYSQL_TYPE_STRING, 0, 1,
+ 0, OPEN_FRM_ONLY},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};
@@ -8857,7 +9007,8 @@ ST_FIELD_INFO triggers_fields_info[]=
OPEN_FRM_ONLY},
{"ACTION_REFERENCE_OLD_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
- {"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 1, "Created", OPEN_FRM_ONLY},
+ /* 2 here indicates 2 decimals */
+ {"CREATED", 2, MYSQL_TYPE_DATETIME, 0, 1, "Created", OPEN_FRM_ONLY},
{"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, "sql_mode", OPEN_FRM_ONLY},
{"DEFINER", DEFINER_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "Definer", OPEN_FRM_ONLY},
{"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
@@ -8961,7 +9112,7 @@ ST_FIELD_INFO processlist_fields_info[]=
{"MAX_STAGE", 2, MYSQL_TYPE_TINY, 0, 0, "Max_stage", SKIP_OPEN_TABLE},
{"PROGRESS", 703, MYSQL_TYPE_DECIMAL, 0, 0, "Progress",
SKIP_OPEN_TABLE},
- {"MEMORY_USED", 7, MYSQL_TYPE_LONG, 0, 0, "Memory_used", SKIP_OPEN_TABLE},
+ {"MEMORY_USED", 7, MYSQL_TYPE_LONGLONG, 0, 0, "Memory_used", SKIP_OPEN_TABLE},
{"EXAMINED_ROWS", 7, MYSQL_TYPE_LONG, 0, 0, "Examined_rows", SKIP_OPEN_TABLE},
{"QUERY_ID", 4, MYSQL_TYPE_LONGLONG, 0, 0, 0, SKIP_OPEN_TABLE},
{"INFO_BINARY", PROCESS_LIST_INFO_WIDTH, MYSQL_TYPE_BLOB, 0, 1,
@@ -8994,7 +9145,7 @@ ST_FIELD_INFO plugin_fields_info[]=
ST_FIELD_INFO files_fields_info[]=
{
{"FILE_ID", 4, MYSQL_TYPE_LONGLONG, 0, 0, 0, SKIP_OPEN_TABLE},
- {"FILE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"FILE_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"FILE_TYPE", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
SKIP_OPEN_TABLE},
@@ -9214,6 +9365,15 @@ ST_FIELD_INFO spatial_ref_sys_fields_info[]=
};
#endif /*HAVE_SPATIAL*/
+ST_FIELD_INFO check_constraints_fields_info[]=
+{
+ {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
+ {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
+ {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
+ {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
+ {"CHECK_CLAUSE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE }
+};
/*
Description of ST_FIELD_INFO in table.h
@@ -9230,6 +9390,8 @@ ST_SCHEMA_TABLE schema_tables[]=
fill_schema_applicable_roles, 0, 0, -1, -1, 0, 0},
{"CHARACTER_SETS", charsets_fields_info, 0,
fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0, 0},
+ {"CHECK_CONSTRAINTS", check_constraints_fields_info, 0, get_all_tables, 0,
+ get_check_constraints_record, 1, 2, 0, OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"COLLATIONS", collation_fields_info, 0,
fill_schema_collation, make_old_format, 0, -1, -1, 0, 0},
{"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info,
@@ -9388,35 +9550,35 @@ int finalize_schema_table(st_plugin_int *plugin)
DBUG_RETURN(0);
}
+/*
+ This is used to create a timestamp field
+*/
+
+MYSQL_TIME zero_time={ 0,0,0,0,0,0,0,0, MYSQL_TIMESTAMP_TIME };
/**
Output trigger information (SHOW CREATE TRIGGER) to the client.
@param thd Thread context.
- @param triggers List of triggers for the table.
- @param trigger_idx Index of the trigger to dump.
+ @param trigger Trigger to dump
@return Operation status
@retval TRUE Error.
@retval FALSE Success.
*/
-static bool show_create_trigger_impl(THD *thd,
- Table_triggers_list *triggers,
- int trigger_idx)
+static bool show_create_trigger_impl(THD *thd, Trigger *trigger)
{
int ret_code;
Protocol *p= thd->protocol;
List<Item> fields;
- LEX_STRING trg_name;
- ulonglong trg_sql_mode;
- LEX_STRING trg_sql_mode_str;
+ LEX_STRING trg_sql_mode_str, trg_body;
LEX_STRING trg_sql_original_stmt;
- LEX_STRING trg_client_cs_name;
- LEX_STRING trg_connection_cl_name;
- LEX_STRING trg_db_cl_name;
+ LEX_STRING trg_definer;
CHARSET_INFO *trg_client_cs;
MEM_ROOT *mem_root= thd->mem_root;
+ char definer_holder[USER_HOST_BUFF_SIZE];
+ trg_definer.str= definer_holder;
/*
TODO: Check privileges here. This functionality will be added by
@@ -9430,20 +9592,12 @@ static bool show_create_trigger_impl(THD *thd,
/* Prepare trigger "object". */
- triggers->get_trigger_info(thd,
- trigger_idx,
- &trg_name,
- &trg_sql_mode,
- &trg_sql_original_stmt,
- &trg_client_cs_name,
- &trg_connection_cl_name,
- &trg_db_cl_name);
-
- sql_mode_string_representation(thd, trg_sql_mode, &trg_sql_mode_str);
+ trigger->get_trigger_info(&trg_sql_original_stmt, &trg_body, &trg_definer);
+ sql_mode_string_representation(thd, trigger->sql_mode, &trg_sql_mode_str);
/* Resolve trigger client character set. */
- if (resolve_charset(trg_client_cs_name.str, NULL, &trg_client_cs))
+ if (resolve_charset(trigger->client_cs_name.str, NULL, &trg_client_cs))
return TRUE;
/* Send header. */
@@ -9485,6 +9639,11 @@ static bool show_create_trigger_impl(THD *thd,
MY_CS_NAME_SIZE),
mem_root);
+ Item_datetime_literal *tmp= (new (mem_root)
+ Item_datetime_literal(thd, &zero_time, 2));
+ tmp->set_name(thd, STRING_WITH_LEN("Created"), system_charset_info);
+ fields.push_back(tmp, mem_root);
+
if (p->send_result_set_metadata(&fields,
Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
@@ -9494,8 +9653,8 @@ static bool show_create_trigger_impl(THD *thd,
p->prepare_for_resend();
- p->store(trg_name.str,
- trg_name.length,
+ p->store(trigger->name.str,
+ trigger->name.length,
system_charset_info);
p->store(trg_sql_mode_str.str,
@@ -9506,18 +9665,30 @@ static bool show_create_trigger_impl(THD *thd,
trg_sql_original_stmt.length,
trg_client_cs);
- p->store(trg_client_cs_name.str,
- trg_client_cs_name.length,
+ p->store(trigger->client_cs_name.str,
+ trigger->client_cs_name.length,
system_charset_info);
- p->store(trg_connection_cl_name.str,
- trg_connection_cl_name.length,
+ p->store(trigger->connection_cl_name.str,
+ trigger->connection_cl_name.length,
system_charset_info);
- p->store(trg_db_cl_name.str,
- trg_db_cl_name.length,
+ p->store(trigger->db_cl_name.str,
+ trigger->db_cl_name.length,
system_charset_info);
+ if (trigger->create_time)
+ {
+ MYSQL_TIME timestamp;
+ thd->variables.time_zone->gmt_sec_to_TIME(&timestamp,
+ (my_time_t)(trigger->create_time/100));
+ timestamp.second_part= (trigger->create_time % 100) * 10000;
+ p->store(&timestamp, 2);
+ }
+ else
+ p->store_null();
+
+
ret_code= p->write();
if (!ret_code)
@@ -9609,7 +9780,7 @@ bool show_create_trigger(THD *thd, const sp_name *trg_name)
TABLE_LIST *lst= get_trigger_table(thd, trg_name);
uint num_tables; /* NOTE: unused, only to pass to open_tables(). */
Table_triggers_list *triggers;
- int trigger_idx;
+ Trigger *trigger;
bool error= TRUE;
if (!lst)
@@ -9650,9 +9821,9 @@ bool show_create_trigger(THD *thd, const sp_name *trg_name)
goto exit;
}
- trigger_idx= triggers->find_trigger_by_name(&trg_name->m_name);
+ trigger= triggers->find_trigger(&trg_name->m_name, 0);
- if (trigger_idx < 0)
+ if (!trigger)
{
my_error(ER_TRG_CORRUPTED_FILE, MYF(0),
(const char *) trg_name->m_db.str,
@@ -9661,7 +9832,7 @@ bool show_create_trigger(THD *thd, const sp_name *trg_name)
goto exit;
}
- error= show_create_trigger_impl(thd, triggers, trigger_idx);
+ error= show_create_trigger_impl(thd, trigger);
/*
NOTE: if show_create_trigger_impl() failed, that means we could not
@@ -9845,7 +10016,7 @@ char *thd_get_error_context_description(THD *thd, char *buffer,
mysql_mutex_lock(&LOCK_thread_count);
len= my_snprintf(header, sizeof(header),
- "MySQL thread id %lu, OS thread handle 0x%lx, query id %lu",
+ "MySQL thread id %lu, OS thread handle %lu, query id %lu",
thd->thread_id, (ulong) thd->real_id, (ulong) thd->query_id);
str.length(0);
str.append(header, len);
diff --git a/sql/sql_show.h b/sql/sql_show.h
index dbae2a42b39..e93b855450c 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -131,6 +131,12 @@ bool get_schema_tables_result(JOIN *join,
enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table);
TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list);
+const char* get_one_variable(THD *thd, const SHOW_VAR *variable,
+ enum_var_type value_type, SHOW_TYPE show_type,
+ system_status_var *status_var,
+ const CHARSET_INFO **charset, char *buff,
+ size_t *length);
+
/* These functions were under INNODB_COMPATIBILITY_HOOKS */
int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
THD *find_thread_by_id(longlong id, bool query_id= false);
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index d30ddfb6eec..6c97ad7e9ab 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -16,15 +16,13 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#include "m_string.h" /* memset */
-#include "my_global.h" /* uchar */
#include "my_base.h" /* ha_rows */
#include "my_sys.h" /* qsort2_cmp */
#include "queues.h"
typedef struct st_buffpek BUFFPEK;
-typedef struct st_sort_field SORT_FIELD;
+struct SORT_FIELD;
class Field;
struct TABLE;
@@ -71,7 +69,6 @@ public:
uint rec_length; // Length of sorted records.
uint sort_length; // Length of sorted columns.
uint ref_length; // Length of record ref.
- uint addon_length; // Length of added packed fields.
uint res_length; // Length of records in final sorted file/buffer.
uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
@@ -81,6 +78,8 @@ public:
SORT_FIELD *local_sortorder;
SORT_FIELD *end;
SORT_ADDON_FIELD *addon_field; // Descriptors for companion fields.
+ LEX_STRING addon_buf; // Buffer & length of added packed fields.
+
uchar *unique_buff;
bool not_killable;
char* tmp_buffer;
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index b435971a4d6..3c26f58073d 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -28,6 +28,7 @@
#include "key.h"
#include "sql_statistics.h"
#include "opt_range.h"
+#include "uniques.h"
#include "my_atomic.h"
#include "sql_show.h"
#include "sql_partition.h"
@@ -843,7 +844,7 @@ public:
else
{
stat_field->set_notnull();
- stat_field->store(table->collected_stats->cardinality);
+ stat_field->store(table->collected_stats->cardinality,true);
}
}
@@ -1055,20 +1056,24 @@ public:
switch (i) {
case COLUMN_STAT_MIN_VALUE:
if (table_field->type() == MYSQL_TYPE_BIT)
- stat_field->store(table_field->collected_stats->min_value->val_int());
+ stat_field->store(table_field->collected_stats->min_value->val_int(),true);
else
{
table_field->collected_stats->min_value->val_str(&val);
- stat_field->store(val.ptr(), val.length(), &my_charset_bin);
+ uint32 length= Well_formed_prefix(val.charset(), val.ptr(),
+ MY_MIN(val.length(), stat_field->field_length)).length();
+ stat_field->store(val.ptr(), length, &my_charset_bin);
}
break;
case COLUMN_STAT_MAX_VALUE:
if (table_field->type() == MYSQL_TYPE_BIT)
- stat_field->store(table_field->collected_stats->max_value->val_int());
+ stat_field->store(table_field->collected_stats->max_value->val_int(),true);
else
{
table_field->collected_stats->max_value->val_str(&val);
- stat_field->store(val.ptr(), val.length(), &my_charset_bin);
+ uint32 length= Well_formed_prefix(val.charset(), val.ptr(),
+ MY_MIN(val.length(), stat_field->field_length)).length();
+ stat_field->store(val.ptr(), length, &my_charset_bin);
}
break;
case COLUMN_STAT_NULLS_RATIO:
@@ -1632,7 +1637,7 @@ public:
of the parameters to be passed to the constructor of the Unique object.
*/
- Count_distinct_field(Field *field, uint max_heap_table_size)
+ Count_distinct_field(Field *field, size_t max_heap_table_size)
{
table_field= field;
tree_key_length= field->pack_length();
@@ -1730,7 +1735,7 @@ class Count_distinct_field_bit: public Count_distinct_field
{
public:
- Count_distinct_field_bit(Field *field, uint max_heap_table_size)
+ Count_distinct_field_bit(Field *field, size_t max_heap_table_size)
{
table_field= field;
tree_key_length= sizeof(ulonglong);
@@ -1802,8 +1807,9 @@ private:
public:
bool is_single_comp_pk;
+ bool is_partial_fields_present;
- Index_prefix_calc(TABLE *table, KEY *key_info)
+ Index_prefix_calc(THD *thd, TABLE *table, KEY *key_info)
: index_table(table), index_info(key_info)
{
uint i;
@@ -1813,7 +1819,7 @@ public:
prefixes= 0;
LINT_INIT_STRUCT(calc_state);
- is_single_comp_pk= FALSE;
+ is_partial_fields_present= is_single_comp_pk= FALSE;
uint pk= table->s->primary_key;
if ((uint) (table->key_info - key_info) == pk &&
table->key_info[pk].user_defined_key_parts == 1)
@@ -1824,9 +1830,9 @@ public:
}
if ((calc_state=
- (Prefix_calc_state *) sql_alloc(sizeof(Prefix_calc_state)*key_parts)))
+ (Prefix_calc_state *) thd->alloc(sizeof(Prefix_calc_state)*key_parts)))
{
- uint keyno= key_info-table->key_info;
+ uint keyno= (uint)(key_info-table->key_info);
for (i= 0, state= calc_state; i < key_parts; i++, state++)
{
/*
@@ -1835,10 +1841,14 @@ public:
calculating the values of 'avg_frequency' for prefixes.
*/
if (!key_info->key_part[i].field->part_of_key.is_set(keyno))
+ {
+ is_partial_fields_present= TRUE;
break;
+ }
if (!(state->last_prefix=
- new Cached_item_field(key_info->key_part[i].field)))
+ new (thd->mem_root) Cached_item_field(thd,
+ key_info->key_part[i].field)))
break;
state->entry_count= state->prefix_count= 0;
prefixes++;
@@ -2442,7 +2452,7 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
inline
void Column_statistics_collected::init(THD *thd, Field *table_field)
{
- uint max_heap_table_size= thd->variables.max_heap_table_size;
+ size_t max_heap_table_size= (size_t)thd->variables.max_heap_table_size;
TABLE *table= table_field->table;
uint pk= table->s->primary_key;
@@ -2625,7 +2635,7 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
if (key_info->flags & (HA_FULLTEXT|HA_SPATIAL))
DBUG_RETURN(rc);
- Index_prefix_calc index_prefix_calc(table, key_info);
+ Index_prefix_calc index_prefix_calc(thd, table, key_info);
DEBUG_SYNC(table->in_use, "statistics_collection_start1");
DEBUG_SYNC(table->in_use, "statistics_collection_start2");
@@ -2636,9 +2646,13 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
DBUG_RETURN(rc);
}
- table->key_read= 1;
- table->file->extra(HA_EXTRA_KEYREAD);
-
+ /*
+ Request "only index read" in case of absence of fields which are
+ partially in the index to avoid problems with partitioning (for example)
+ which want to get whole field value.
+ */
+ if (!index_prefix_calc.is_partial_fields_present)
+ table->file->ha_start_keyread(index);
table->file->ha_index_init(index, TRUE);
rc= table->file->ha_index_first(table->record[0]);
while (rc != HA_ERR_END_OF_FILE)
@@ -2652,7 +2666,7 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
index_prefix_calc.add();
rc= table->file->ha_index_next(table->record[0]);
}
- table->key_read= 0;
+ table->file->ha_end_keyread();
table->file->ha_index_end();
rc= (rc == HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;
@@ -3052,7 +3066,7 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
}
}
}
-
+
table->stats_is_read= TRUE;
DBUG_RETURN(0);
@@ -3778,14 +3792,14 @@ double get_column_avg_frequency(Field * field)
*/
if (!table->s->field)
{
- res= table->stat_records();
+ res= (double)table->stat_records();
return res;
}
- Column_statistics *col_stats= table->s->field[field->field_index]->read_stats;
+ Column_statistics *col_stats= field->read_stats;
if (!col_stats)
- res= table->stat_records();
+ res= (double)table->stat_records();
else
res= col_stats->get_avg_frequency();
return res;
@@ -3810,7 +3824,10 @@ double get_column_avg_frequency(Field * field)
using the statistical data from the table column_stats.
@retval
- The required estimate of the rows in the column range
+ - The required estimate of the rows in the column range
+ - If there is some kind of error, this function should return DBL_MAX (and
+ not HA_POS_ERROR as that is an integer constant).
+
*/
double get_column_range_cardinality(Field *field,
@@ -3820,8 +3837,8 @@ double get_column_range_cardinality(Field *field,
{
double res;
TABLE *table= field->table;
- Column_statistics *col_stats= table->field[field->field_index]->read_stats;
- double tab_records= table->stat_records();
+ Column_statistics *col_stats= field->read_stats;
+ double tab_records= (double)table->stat_records();
if (!col_stats)
return tab_records;
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 6530c2d6c8f..8600db8890d 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -21,7 +21,7 @@ enum enum_use_stat_tables_mode
{
NEVER,
COMPLEMENTARY,
- PEFERABLY,
+ PREFERABLY,
} Use_stat_tables_mode;
typedef
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index c22e33182c6..615de8b545a 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -1,4 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2016, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -31,9 +32,9 @@
** String functions
*****************************************************************************/
-bool String::real_alloc(uint32 length)
+bool String::real_alloc(size_t length)
{
- uint32 arg_length= ALIGN_SIZE(length + 1);
+ size_t arg_length= ALIGN_SIZE(length + 1);
DBUG_ASSERT(arg_length > length);
if (arg_length <= length)
return TRUE; /* Overflow */
@@ -45,7 +46,8 @@ bool String::real_alloc(uint32 length)
(thread_specific ?
MY_THREAD_SPECIFIC : 0)))))
return TRUE;
- Alloced_length=arg_length;
+ DBUG_ASSERT(length < UINT_MAX32);
+ Alloced_length=(uint32) arg_length;
alloced=1;
}
Ptr[0]=0;
@@ -80,7 +82,7 @@ bool String::real_alloc(uint32 length)
@retval true An error occurred when attempting to allocate memory.
*/
-bool String::realloc_raw(uint32 alloc_length)
+bool String::realloc_raw(size_t alloc_length)
{
if (Alloced_length <= alloc_length)
{
@@ -112,7 +114,8 @@ bool String::realloc_raw(uint32 alloc_length)
else
return TRUE; // Signal error
Ptr= new_ptr;
- Alloced_length= len;
+ DBUG_ASSERT(len < UINT_MAX32);
+ Alloced_length= (uint32)len;
}
return FALSE;
}
@@ -136,10 +139,10 @@ bool String::set_real(double num,uint decimals, CHARSET_INFO *cs)
size_t len;
str_charset=cs;
- if (decimals >= NOT_FIXED_DEC)
+ if (decimals >= FLOATING_POINT_DECIMALS)
{
len= my_gcvt(num, MY_GCVT_ARG_DOUBLE, sizeof(buff) - 1, buff, NULL);
- return copy(buff, len, &my_charset_latin1, cs, &dummy_errors);
+ return copy(buff, (uint)len, &my_charset_latin1, cs, &dummy_errors);
}
len= my_fcvt(num, decimals, buff, NULL);
return copy(buff, (uint32) len, &my_charset_latin1, cs,
@@ -179,26 +182,45 @@ bool String::copy(const String &str)
return FALSE;
}
-bool String::copy(const char *str,uint32 arg_length, CHARSET_INFO *cs)
+bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs)
{
+ DBUG_ASSERT(arg_length < UINT_MAX32);
if (alloc(arg_length))
return TRUE;
- if (Ptr == str && arg_length == str_length)
+ if (Ptr == str && arg_length == uint32(str_length))
{
/*
This can happen in some cases. This code is here mainly to avoid
warnings from valgrind, but can also be an indication of error.
*/
- DBUG_PRINT("warning", ("Copying string on itself: %p %u",
+ DBUG_PRINT("warning", ("Copying string on itself: %p %zu",
str, arg_length));
}
- else if ((str_length=arg_length))
+ else if ((str_length=uint32(arg_length)))
memcpy(Ptr,str,arg_length);
Ptr[arg_length]=0;
str_charset=cs;
return FALSE;
}
+/*
+ Copy string, where strings may overlap.
+ Same as String::copy, but use memmove instead of memcpy to avoid warnings
+ from valgrind
+*/
+
+bool String::copy_or_move(const char *str,size_t arg_length, CHARSET_INFO *cs)
+{
+ DBUG_ASSERT(arg_length < UINT_MAX32);
+ if (alloc(arg_length))
+ return TRUE;
+ if ((str_length=uint32(arg_length)))
+ memmove(Ptr,str,arg_length);
+ Ptr[arg_length]=0;
+ str_charset=cs;
+ return FALSE;
+}
+
/*
Checks that the source string can be just copied to the destination string
@@ -332,8 +354,9 @@ bool String::set_or_copy_aligned(const char *str,uint32 arg_length,
/* How many bytes are in incomplete character */
uint32 offset= (arg_length % cs->mbminlen);
- if (!offset) /* All characters are complete, just copy */
+ if (!offset)
{
+ /* All characters are complete, just use given string */
set(str, arg_length, cs);
return FALSE;
}
@@ -448,8 +471,10 @@ bool String::append(const String &s)
Append an ASCII string to the a string of the current character set
*/
-bool String::append(const char *s,uint32 arg_length)
+bool String::append(const char *s,size_t size)
{
+ DBUG_ASSERT(size <= UINT_MAX32);
+ uint32 arg_length= (uint32) size;
if (!arg_length)
return FALSE;
@@ -488,6 +513,14 @@ bool String::append(const char *s)
return append(s, (uint) strlen(s));
}
+bool String::append_longlong(longlong val)
+{
+ if (realloc(str_length+MAX_BIGINT_WIDTH+2))
+ return TRUE;
+ char *end= (char*) longlong10_to_str(val, (char*) Ptr + str_length, -10);
+ str_length= (uint32)(end - Ptr);
+ return FALSE;
+}
bool String::append_ulonglong(ulonglong val)
@@ -495,7 +528,7 @@ bool String::append_ulonglong(ulonglong val)
if (realloc(str_length+MAX_BIGINT_WIDTH+2))
return TRUE;
char *end= (char*) longlong10_to_str(val, (char*) Ptr + str_length, 10);
- str_length= end - Ptr;
+ str_length= (uint32) (end - Ptr);
return FALSE;
}
@@ -591,7 +624,7 @@ bool String::append_with_prefill(const char *s,uint32 arg_length,
uint32 String::numchars() const
{
- return str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length);
+ return (uint32) str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length);
}
int String::charpos(longlong i,uint32 offset)
@@ -608,8 +641,8 @@ int String::strstr(const String &s,uint32 offset)
if (!s.length())
return ((int) offset); // Empty string is always found
- register const char *str = Ptr+offset;
- register const char *search=s.ptr();
+ const char *str = Ptr+offset;
+ const char *search=s.ptr();
const char *end=Ptr+str_length-s.length()+1;
const char *search_end=s.ptr()+s.length();
skip:
@@ -617,7 +650,7 @@ skip:
{
if (*str++ == *search)
{
- register char *i,*j;
+ char *i,*j;
i=(char*) str; j=(char*) search+1;
while (j != search_end)
if (*i++ != *j++) goto skip;
@@ -638,8 +671,8 @@ int String::strrstr(const String &s,uint32 offset)
{
if (!s.length())
return offset; // Empty string is always found
- register const char *str = Ptr+offset-1;
- register const char *search=s.ptr()+s.length()-1;
+ const char *str = Ptr+offset-1;
+ const char *search=s.ptr()+s.length()-1;
const char *end=Ptr+s.length()-2;
const char *search_end=s.ptr()-1;
@@ -648,7 +681,7 @@ skip:
{
if (*str-- == *search)
{
- register char *i,*j;
+ char *i,*j;
i=(char*) str; j=(char*) search-1;
while (j != search_end)
if (*i-- != *j--) goto skip;
@@ -720,7 +753,7 @@ void String::qs_append(const char *str, uint32 len)
void String::qs_append(double d)
{
char *buff = Ptr + str_length;
- str_length+= my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff,
+ str_length+= (uint32) my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff,
NULL);
}
@@ -741,7 +774,7 @@ void String::qs_append(int i)
void String::qs_append(ulonglong i)
{
char *buff= Ptr + str_length;
- char *end= longlong10_to_str(i, buff,10);
+ char *end= longlong10_to_str(i, buff, 10);
str_length+= (int) (end-buff);
}
@@ -768,7 +801,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs)
{
return cs->coll->strnncollsp(cs,
(uchar *) s->ptr(),s->length(),
- (uchar *) t->ptr(),t->length(), 0);
+ (uchar *) t->ptr(),t->length());
}
@@ -1025,11 +1058,11 @@ String_copier::well_formed_copy(CHARSET_INFO *to_cs,
my_charset_same(from_cs, to_cs))
{
m_cannot_convert_error_pos= NULL;
- return to_cs->cset->copy_fix(to_cs, to, to_length, from, from_length,
- nchars, &m_native_copy_status);
+ return (uint) to_cs->cset->copy_fix(to_cs, to, to_length, from, from_length,
+ nchars, this);
}
- return my_convert_fix(to_cs, to, to_length, from_cs, from, from_length,
- nchars, this);
+ return (uint) my_convert_fix(to_cs, to, to_length, from_cs, from, from_length,
+ nchars, this, this);
}
@@ -1164,5 +1197,5 @@ uint convert_to_printable(char *to, size_t to_len,
memcpy(dots, STRING_WITH_LEN("...\0"));
else
*t= '\0';
- return t - to;
+ return (uint) (t - to);
}
diff --git a/sql/sql_string.h b/sql/sql_string.h
index ba28dbeea26..7a1701f6ef3 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -3,7 +3,7 @@
/*
Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2013, Monty Program Ab.
+ Copyright (c) 2008, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,11 +27,13 @@
#include "m_ctype.h" /* my_charset_bin */
#include "my_sys.h" /* alloc_root, my_free, my_realloc */
#include "m_string.h" /* TRASH */
+#include "sql_list.h"
class String;
typedef struct st_io_cache IO_CACHE;
typedef struct st_mem_root MEM_ROOT;
+#include "pack.h"
int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
inline uint32 copy_and_convert(char *to, uint32 to_length,
@@ -43,13 +45,48 @@ inline uint32 copy_and_convert(char *to, uint32 to_length,
}
-class String_copier: private MY_STRCONV_STATUS
+class String_copy_status: protected MY_STRCOPY_STATUS
{
public:
const char *source_end_pos() const
- { return m_native_copy_status.m_source_end_pos; }
+ { return m_source_end_pos; }
const char *well_formed_error_pos() const
- { return m_native_copy_status.m_well_formed_error_pos; }
+ { return m_well_formed_error_pos; }
+};
+
+
+class Well_formed_prefix_status: public String_copy_status
+{
+public:
+ Well_formed_prefix_status(CHARSET_INFO *cs,
+ const char *str, const char *end, size_t nchars)
+ { cs->cset->well_formed_char_length(cs, str, end, nchars, this); }
+};
+
+
+class Well_formed_prefix: public Well_formed_prefix_status
+{
+ const char *m_str; // The beginning of the string
+public:
+ Well_formed_prefix(CHARSET_INFO *cs, const char *str, const char *end,
+ size_t nchars)
+ :Well_formed_prefix_status(cs, str, end, nchars), m_str(str)
+ { }
+ Well_formed_prefix(CHARSET_INFO *cs, const char *str, size_t length,
+ size_t nchars)
+ :Well_formed_prefix_status(cs, str, str + length, nchars), m_str(str)
+ { }
+ Well_formed_prefix(CHARSET_INFO *cs, const char *str, size_t length)
+ :Well_formed_prefix_status(cs, str, str + length, length), m_str(str)
+ { }
+ size_t length() const { return m_source_end_pos - m_str; }
+};
+
+
+class String_copier: public String_copy_status,
+ protected MY_STRCONV_STATUS
+{
+public:
const char *cannot_convert_error_pos() const
{ return m_cannot_convert_error_pos; }
const char *most_important_error_pos() const
@@ -61,12 +98,12 @@ public:
Convert a string between character sets.
"dstcs" and "srccs" cannot be &my_charset_bin.
*/
- uint convert_fix(CHARSET_INFO *dstcs, char *dst, uint dst_length,
- CHARSET_INFO *srccs, const char *src, uint src_length,
- uint nchars)
+ size_t convert_fix(CHARSET_INFO *dstcs, char *dst, uint dst_length,
+ CHARSET_INFO *srccs, const char *src, uint src_length,
+ uint nchars)
{
return my_convert_fix(dstcs, dst, dst_length,
- srccs, src, src_length, nchars, this);
+ srccs, src, src_length, nchars, this, this);
}
/*
Copy a string. Fix bad bytes/characters to '?'.
@@ -93,7 +130,7 @@ uint convert_to_printable(char *to, size_t to_len,
const char *from, size_t from_len,
CHARSET_INFO *from_cs, size_t nbytes= 0);
-class String
+class String : public Sql_alloc
{
char *Ptr;
uint32 str_length,Alloced_length, extra_alloc;
@@ -143,16 +180,6 @@ public:
alloced= thread_specific= 0;
str_charset=str.str_charset;
}
- static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
- { return (void*) alloc_root(mem_root, (uint) size); }
- static void operator delete(void *ptr_arg, size_t size)
- {
- (void) ptr_arg;
- (void) size;
- TRASH_FREE(ptr_arg, size);
- }
- static void operator delete(void *, MEM_ROOT *)
- { /* never called */ }
~String() { free(); }
/* Mark variable thread specific it it's not allocated already */
@@ -318,22 +345,22 @@ public:
Ptr=0;
str_length=0; /* Safety */
}
- inline bool alloc(uint32 arg_length)
+ inline bool alloc(size_t arg_length)
{
if (arg_length < Alloced_length)
return 0;
return real_alloc(arg_length);
}
- bool real_alloc(uint32 arg_length); // Empties old string
- bool realloc_raw(uint32 arg_length);
- bool realloc(uint32 arg_length)
+ bool real_alloc(size_t arg_length); // Empties old string
+ bool realloc_raw(size_t arg_length);
+ bool realloc(size_t arg_length)
{
if (realloc_raw(arg_length))
return TRUE;
Ptr[arg_length]=0; // This make other funcs shorter
return FALSE;
}
- bool realloc_with_extra(uint32 arg_length)
+ bool realloc_with_extra(size_t arg_length)
{
if (extra_alloc < 4096)
extra_alloc= extra_alloc*2+128;
@@ -342,7 +369,7 @@ public:
Ptr[arg_length]=0; // This make other funcs shorter
return FALSE;
}
- bool realloc_with_extra_if_needed(uint32 arg_length)
+ bool realloc_with_extra_if_needed(size_t arg_length)
{
if (arg_length < Alloced_length)
{
@@ -352,14 +379,16 @@ public:
return realloc_with_extra(arg_length);
}
// Shrink the buffer, but only if it is allocated on the heap.
- inline void shrink(uint32 arg_length)
+ inline void shrink(size_t arg_length)
{
if (!is_alloced())
return;
if (ALIGN_SIZE(arg_length+1) < Alloced_length)
{
char *new_ptr;
- if (!(new_ptr=(char*) my_realloc(Ptr,arg_length,MYF(0))))
+ if (!(new_ptr=(char*)
+ my_realloc(Ptr, arg_length,MYF((thread_specific ?
+ MY_THREAD_SPECIFIC : 0)))))
{
Alloced_length = 0;
real_alloc(arg_length);
@@ -367,7 +396,7 @@ public:
else
{
Ptr=new_ptr;
- Alloced_length=arg_length;
+ Alloced_length=(uint32)arg_length;
}
}
}
@@ -390,7 +419,8 @@ public:
bool copy(); // Alloc string if not alloced
bool copy(const String &s); // Allocate new string
- bool copy(const char *s,uint32 arg_length, CHARSET_INFO *cs); // Allocate new string
+ bool copy(const char *s,size_t arg_length, CHARSET_INFO *cs); // Allocate new string
+ bool copy_or_move(const char *s,size_t arg_length, CHARSET_INFO *cs);
static bool needs_conversion(uint32 arg_length,
CHARSET_INFO *cs_from, CHARSET_INFO *cs_to,
uint32 *offset);
@@ -428,13 +458,12 @@ public:
}
bool append(const String &s);
bool append(const char *s);
- bool append(const LEX_STRING *ls)
- {
- return append(ls->str, ls->length);
- }
- bool append(const char *s, uint32 arg_length);
- bool append(const char *s, uint32 arg_length, CHARSET_INFO *cs);
+ bool append(const LEX_STRING *ls) { return append(ls->str, (uint32) ls->length); }
+ bool append(const LEX_CSTRING *ls) { return append(ls->str, (uint32) ls->length); }
+ bool append(const char *s, size_t size);
+ bool append(const char *s, uint arg_length, CHARSET_INFO *cs);
bool append_ulonglong(ulonglong val);
+ bool append_longlong(longlong val);
bool append(IO_CACHE* file, uint32 arg_length);
bool append_with_prefill(const char *s, uint32 arg_length,
uint32 full_length, char fill_char);
@@ -495,6 +524,11 @@ public:
{
Ptr[str_length++] = c;
}
+ void q_append2b(const uint32 n)
+ {
+ int2store(Ptr + str_length, n);
+ str_length += 2;
+ }
void q_append(const uint32 n)
{
int4store(Ptr + str_length, n);
@@ -510,10 +544,11 @@ public:
float8store(Ptr + str_length, *d);
str_length += 8;
}
- void q_append(const char *data, uint32 data_len)
+ void q_append(const char *data, size_t data_len)
{
memcpy(Ptr + str_length, data, data_len);
- str_length += data_len;
+ DBUG_ASSERT(str_length <= UINT_MAX32 - data_len);
+ str_length += (uint)data_len;
}
void write_at_position(int position, uint32 value)
@@ -543,6 +578,12 @@ public:
qs_append((ulonglong)i);
}
void qs_append(ulonglong i);
+ void qs_append(longlong i, int radix)
+ {
+ char *buff= Ptr + str_length;
+ char *end= ll2str(i, buff, radix, 0);
+ str_length+= (int) (end-buff);
+ }
/* Inline (general) functions used by the protocol functions */
@@ -559,6 +600,7 @@ public:
return Ptr+ old_length; /* Area to use */
}
+
inline bool append(const char *s, uint32 arg_length, uint32 step_alloc)
{
uint32 new_length= arg_length + str_length;
@@ -585,7 +627,9 @@ public:
}
bool append_for_single_quote(const char *st)
{
- return append_for_single_quote(st, strlen(st));
+ size_t len= strlen(st);
+ DBUG_ASSERT(len < UINT_MAX32);
+ return append_for_single_quote(st, (uint32) len);
}
/* Swap two string objects. Efficient way to exchange data without memcpy. */
@@ -597,9 +641,7 @@ public:
}
uint well_formed_length() const
{
- int dummy_error;
- return charset()->cset->well_formed_len(charset(), ptr(), ptr() + length(),
- length(), &dummy_error);
+ return (uint) Well_formed_prefix(charset(), ptr(), length()).length();
}
bool is_ascii() const
{
@@ -623,6 +665,20 @@ public:
{
return !sortcmp(this, other, cs);
}
+ void q_net_store_length(ulonglong length)
+ {
+ DBUG_ASSERT(Alloced_length >= (str_length + net_length_size(length)));
+ char *pos= (char *) net_store_length((uchar *)(Ptr + str_length), length);
+ str_length= uint32(pos - Ptr);
+ }
+ void q_net_store_data(const uchar *from, size_t length)
+ {
+ DBUG_ASSERT(length < UINT_MAX32);
+ DBUG_ASSERT(Alloced_length >= (str_length + length +
+ net_length_size(length)));
+ q_net_store_length(length);
+ q_append((const char *)from, (uint32) length);
+ }
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 57333e7dc30..bb6945c7e01 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -25,20 +25,18 @@
#include "sql_table.h"
#include "sql_parse.h" // test_if_data_home_dir
#include "sql_cache.h" // query_cache_*
-#include "sql_base.h" // open_table_uncached, lock_table_names
+#include "sql_base.h" // lock_table_names
#include "lock.h" // mysql_unlock_tables
#include "strfunc.h" // find_type2, find_set
#include "sql_truncate.h" // regenerate_locked_table
#include "sql_partition.h" // mem_alloc_error,
- // generate_partition_syntax,
// partition_info
// NOT_A_PARTITION_ID
#include "sql_db.h" // load_db_opt_by_name
#include "sql_time.h" // make_truncated_value_warning
#include "records.h" // init_read_record, end_read_record
#include "filesort.h" // filesort_free_buffers
-#include "sql_select.h" // setup_order,
- // make_unireg_sortorder
+#include "sql_select.h" // setup_order
#include "sql_handler.h" // mysql_ha_rm_tables
#include "discover.h" // readfrm
#include "my_pthread.h" // pthread_mutex_t
@@ -56,6 +54,7 @@
#include "transaction.h"
#include "sql_audit.h"
+
#ifdef __WIN__
#include <io.h>
#endif
@@ -63,7 +62,11 @@
const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
-static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
+static char *make_unique_key_name(THD *thd, const char *field_name, KEY *start,
+ KEY *end);
+static void make_unique_constraint_name(THD *thd, LEX_STRING *name,
+ List<Virtual_column_info> *vcol,
+ uint *nr);
static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
List<Create_field> &create, bool ignore,
uint order_num, ORDER *order,
@@ -71,7 +74,7 @@ static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
Alter_info::enum_enable_or_disable keys_onoff,
Alter_table_ctx *alter_ctx);
-static bool prepare_blob_field(THD *thd, Create_field *sql_field);
+static bool prepare_blob_field(THD *thd, Column_definition *sql_field);
static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
uint *, handler *, KEY **, uint *, int);
static uint blob_length_by_type(enum_field_types type);
@@ -89,7 +92,7 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
{
uint res;
uint errors;
- const char *conv_name;
+ const char *conv_name, *conv_name_end;
char tmp_name[FN_REFLEN];
char conv_string[FN_REFLEN];
int quote;
@@ -110,22 +113,24 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
{
DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors));
conv_name= name;
+ conv_name_end= name + name_len;
}
else
{
DBUG_PRINT("info", ("conv '%s' -> '%s'", conv_name, conv_string));
conv_name= conv_string;
+ conv_name_end= conv_string + res;
}
- quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '"';
+ quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '`';
if (quote != EOF && (end_p - to_p > 2))
{
*(to_p++)= (char) quote;
while (*conv_name && (end_p - to_p - 1) > 0)
{
- uint length= my_mbcharlen(system_charset_info, *conv_name);
- if (!length)
+ int length= my_charlen(system_charset_info, conv_name, conv_name_end);
+ if (length <= 0)
length= 1;
if (length == 1 && *conv_name == (char) quote)
{
@@ -225,7 +230,7 @@ uint explain_filename(THD* thd,
{
db_name= table_name;
/* calculate the length */
- db_name_len= tmp_p - db_name;
+ db_name_len= (int)(tmp_p - db_name);
tmp_p++;
table_name= tmp_p;
}
@@ -247,7 +252,7 @@ uint explain_filename(THD* thd,
case 's':
if ((tmp_p[1] == 'P' || tmp_p[1] == 'p') && tmp_p[2] == '#')
{
- part_name_len= tmp_p - part_name - 1;
+ part_name_len= (int)(tmp_p - part_name - 1);
subpart_name= tmp_p + 3;
tmp_p+= 3;
}
@@ -279,7 +284,7 @@ uint explain_filename(THD* thd,
}
if (part_name)
{
- table_name_len= part_name - table_name - 3;
+ table_name_len= (int)(part_name - table_name - 3);
if (subpart_name)
subpart_name_len= strlen(subpart_name);
else
@@ -352,7 +357,7 @@ uint explain_filename(THD* thd,
to_p= strnmov(to_p, " */", end_p - to_p);
}
DBUG_PRINT("exit", ("to '%s'", to));
- DBUG_RETURN(to_p - to);
+ DBUG_RETURN((uint)(to_p - to));
}
@@ -548,7 +553,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
pos= strxnmov(pos, end - pos, tbbuff, ext, NullS);
DBUG_PRINT("exit", ("buff: '%s'", buff));
- DBUG_RETURN(pos - buff);
+ DBUG_RETURN((uint)(pos - buff));
}
@@ -572,7 +577,7 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
DBUG_ENTER("build_tmptable_filename");
char *p= strnmov(buff, mysql_tmpdir, bufflen);
- my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x",
+ my_snprintf(p, bufflen - (p - buff), "/%s%lx_%llx_%x",
tmp_file_prefix, current_pid,
thd->thread_id, thd->tmp_table++);
@@ -1168,6 +1173,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
action in the log entry by stepping up the phase.
*/
}
+ /* fall through */
case DDL_LOG_RENAME_ACTION:
{
error= TRUE;
@@ -1639,7 +1645,7 @@ void execute_ddl_log_recovery()
/*
To be able to run this from boot, we allocate a temporary THD
*/
- if (!(thd=new THD))
+ if (!(thd=new THD(0)))
DBUG_VOID_RETURN;
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -1813,15 +1819,10 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
partition_info *part_info= lpt->table->part_info;
if (part_info)
{
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE, TRUE,
- lpt->create_info,
- lpt->alter_info,
- NULL)))
- {
+ part_syntax_buf= generate_partition_syntax_for_frm(lpt->thd, part_info,
+ &syntax_len, lpt->create_info, lpt->alter_info);
+ if (!part_syntax_buf)
DBUG_RETURN(TRUE);
- }
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
}
@@ -1897,12 +1898,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
{
TABLE_SHARE *share= lpt->table->s;
char *tmp_part_syntax_str;
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE, TRUE,
- lpt->create_info,
- lpt->alter_info,
- NULL)))
+ part_syntax_buf= generate_partition_syntax_for_frm(lpt->thd,
+ part_info, &syntax_len, lpt->create_info, lpt->alter_info);
+ if (!part_syntax_buf)
{
error= 1;
goto err;
@@ -2028,7 +2026,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
LEX_STRING db_name= { table->db, table->db_length };
LEX_STRING table_name= { table->table_name, table->table_name_length };
if (table->open_type == OT_BASE_ONLY ||
- !find_temporary_table(thd, table))
+ !thd->find_temporary_table(table))
(void) delete_statistics_for_table(thd, &db_name, &table_name);
}
}
@@ -2131,7 +2129,7 @@ static uint32 comment_length(THD *thd, uint32 comment_pos,
for (query+= 3; query < query_end; query++)
{
if (query[-1] == '*' && query[0] == '/')
- return (char*) query - *comment_start + 1;
+ return (uint32)((char*) query - *comment_start + 1);
}
return 0;
}
@@ -2270,9 +2268,9 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
size_t db_length= table->db_length;
handlerton *table_type= 0;
- DBUG_PRINT("table", ("table_l: '%s'.'%s' table: 0x%lx s: 0x%lx",
- table->db, table->table_name, (long) table->table,
- table->table ? (long) table->table->s : (long) -1));
+ DBUG_PRINT("table", ("table_l: '%s'.'%s' table: %p s: %p",
+ table->db, table->table_name, table->table,
+ table->table ? table->table->s : NULL));
/*
If we are in locked tables mode and are dropping a temporary table,
@@ -2281,25 +2279,20 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
*/
DBUG_ASSERT(!(thd->locked_tables_mode &&
table->open_type != OT_BASE_ONLY &&
- find_temporary_table(thd, table) &&
+ thd->find_temporary_table(table) &&
table->mdl_request.ticket != NULL));
- /*
- drop_temporary_table may return one of the following error codes:
- . 0 - a temporary table was successfully dropped.
- . 1 - a temporary table was not found.
- . -1 - a temporary table is used by an outer statement.
- */
if (table->open_type == OT_BASE_ONLY || !is_temporary_table(table))
error= 1;
else
{
table_creation_was_logged= table->table->s->table_creation_was_logged;
- if ((error= drop_temporary_table(thd, table->table, &is_trans)) == -1)
+ if (thd->drop_temporary_table(table->table, &is_trans, true))
{
- DBUG_ASSERT(thd->in_sub_stmt);
+ error= 1;
goto err;
}
+ error= 0;
table->table= 0;
}
@@ -2520,13 +2513,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
mysql_audit_drop_table(thd, table);
}
- DBUG_PRINT("table", ("table: 0x%lx s: 0x%lx", (long) table->table,
- table->table ? (long) table->table->s : (long) -1));
-
- DBUG_EXECUTE_IF("bug43138",
- my_printf_error(ER_BAD_TABLE_ERROR,
- ER_THD(thd, ER_BAD_TABLE_ERROR), MYF(0),
- table->table_name););
+ DBUG_PRINT("table", ("table: %p s: %p", table->table,
+ table->table ? table->table->s : NULL));
}
DEBUG_SYNC(thd, "rm_table_no_locks_before_binlog");
thd->thread_specific_used|= (trans_tmp_table_deleted ||
@@ -2537,12 +2525,9 @@ err:
{
DBUG_ASSERT(errors);
if (errors == 1 && was_view)
- my_printf_error(ER_IT_IS_A_VIEW, ER_THD(thd, ER_IT_IS_A_VIEW), MYF(0),
- wrong_tables.c_ptr_safe());
+ my_error(ER_IT_IS_A_VIEW, MYF(0), wrong_tables.c_ptr_safe());
else if (errors > 1 || !thd->is_error())
- my_printf_error(ER_BAD_TABLE_ERROR, ER_THD(thd, ER_BAD_TABLE_ERROR),
- MYF(0),
- wrong_tables.c_ptr_safe());
+ my_error(ER_BAD_TABLE_ERROR, MYF(0), wrong_tables.c_ptr_safe());
error= 1;
}
@@ -2722,7 +2707,7 @@ bool quick_rm_table(THD *thd, handlerton *base, const char *db,
bool error= 0;
DBUG_ENTER("quick_rm_table");
- uint path_length= table_path ?
+ size_t path_length= table_path ?
(strxnmov(path, sizeof(path) - 1, table_path, reg_ext, NullS) - path) :
build_table_filename(path, sizeof(path)-1, db, table_name, reg_ext, flags);
if (mysql_file_delete(key_file_frm, path, MYF(0)))
@@ -2905,11 +2890,12 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
1 Error
*/
-int prepare_create_field(Create_field *sql_field,
+int prepare_create_field(Column_definition *sql_field,
uint *blob_columns,
- longlong table_flags)
+ ulonglong table_flags)
{
- unsigned int dup_val_count;
+ uint dup_val_count;
+ uint decimals= sql_field->decimals;
DBUG_ENTER("prepare_create_field");
/*
@@ -2929,15 +2915,13 @@ int prepare_create_field(Create_field *sql_field,
if (sql_field->charset->state & MY_CS_BINSORT)
sql_field->pack_flag|=FIELDFLAG_BINARY;
sql_field->length=8; // Unireg field length
- sql_field->unireg_check=Field::BLOB_FIELD;
(*blob_columns)++;
break;
case MYSQL_TYPE_GEOMETRY:
#ifdef HAVE_SPATIAL
if (!(table_flags & HA_CAN_GEOMETRY))
{
- my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED),
- MYF(0), "GEOMETRY");
+ my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "GEOMETRY");
DBUG_RETURN(1);
}
sql_field->pack_flag=FIELDFLAG_GEOM |
@@ -2946,11 +2930,10 @@ int prepare_create_field(Create_field *sql_field,
if (sql_field->charset->state & MY_CS_BINSORT)
sql_field->pack_flag|=FIELDFLAG_BINARY;
sql_field->length=8; // Unireg field length
- sql_field->unireg_check=Field::BLOB_FIELD;
(*blob_columns)++;
break;
#else
- my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0),
+ my_error(ER_FEATURE_DISABLED, MYF(0),
sym_group_geom.name, sym_group_geom.needed_define);
DBUG_RETURN(1);
#endif /*HAVE_SPATIAL*/
@@ -2965,8 +2948,7 @@ int prepare_create_field(Create_field *sql_field,
if ((sql_field->length / sql_field->charset->mbmaxlen) >
MAX_FIELD_CHARLENGTH)
{
- my_printf_error(ER_TOO_BIG_FIELDLENGTH, ER(ER_TOO_BIG_FIELDLENGTH),
- MYF(0), sql_field->field_name,
+ my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
static_cast<ulong>(MAX_FIELD_CHARLENGTH));
DBUG_RETURN(1);
}
@@ -2983,7 +2965,6 @@ int prepare_create_field(Create_field *sql_field,
FIELDFLAG_INTERVAL;
if (sql_field->charset->state & MY_CS_BINSORT)
sql_field->pack_flag|=FIELDFLAG_BINARY;
- sql_field->unireg_check=Field::INTERVAL_FIELD;
if (check_duplicates_in_interval("ENUM",sql_field->field_name,
sql_field->interval,
sql_field->charset, &dup_val_count))
@@ -2994,7 +2975,6 @@ int prepare_create_field(Create_field *sql_field,
FIELDFLAG_BITFIELD;
if (sql_field->charset->state & MY_CS_BINSORT)
sql_field->pack_flag|=FIELDFLAG_BINARY;
- sql_field->unireg_check=Field::BIT_FIELD;
if (check_duplicates_in_interval("SET",sql_field->field_name,
sql_field->interval,
sql_field->charset, &dup_val_count))
@@ -3027,11 +3007,20 @@ int prepare_create_field(Create_field *sql_field,
FIELDFLAG_DECIMAL) |
(sql_field->flags & ZEROFILL_FLAG ?
FIELDFLAG_ZEROFILL : 0) |
- (sql_field->decimals << FIELDFLAG_DEC_SHIFT));
+ (decimals << FIELDFLAG_DEC_SHIFT));
break;
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
+ /*
+ User specified FLOAT() or DOUBLE() without precision. Change to
+ FLOATING_POINT_DECIMALS to keep things compatible with earlier MariaDB
+ versions.
+ */
+ if (decimals >= FLOATING_POINT_DECIMALS)
+ decimals= FLOATING_POINT_DECIMALS;
+ /* fall through */
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_TIMESTAMP2:
- /* fall-through */
default:
sql_field->pack_flag=(FIELDFLAG_NUMBER |
(sql_field->flags & UNSIGNED_FLAG ? 0 :
@@ -3039,7 +3028,7 @@ int prepare_create_field(Create_field *sql_field,
(sql_field->flags & ZEROFILL_FLAG ?
FIELDFLAG_ZEROFILL : 0) |
f_settype((uint) sql_field->sql_type) |
- (sql_field->decimals << FIELDFLAG_DEC_SHIFT));
+ (decimals << FIELDFLAG_DEC_SHIFT));
break;
}
if (!(sql_field->flags & NOT_NULL_FLAG) ||
@@ -3092,19 +3081,20 @@ CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
@param column_definitions The list of column definitions, in the physical
order in which they appear in the table.
- */
+*/
+
void promote_first_timestamp_column(List<Create_field> *column_definitions)
{
- List_iterator<Create_field> it(*column_definitions);
+ List_iterator_fast<Create_field> it(*column_definitions);
Create_field *column_definition;
while ((column_definition= it++) != NULL)
{
- if (is_timestamp_type(column_definition->sql_type) || // TIMESTAMP
+ if (is_timestamp_type(column_definition->sql_type) || // TIMESTAMP
column_definition->unireg_check == Field::TIMESTAMP_OLD_FIELD) // Legacy
{
if ((column_definition->flags & NOT_NULL_FLAG) != 0 && // NOT NULL,
- column_definition->def == NULL && // no constant default,
+ column_definition->default_value == NULL && // no constant default,
column_definition->unireg_check == Field::NONE && // no function default
column_definition->vcol_info == NULL)
{
@@ -3142,8 +3132,8 @@ static void check_duplicate_key(THD *thd, Key *key, KEY *key_info,
if (!key->key_create_info.check_for_duplicate_indexes || key->generated)
return;
- List_iterator<Key> key_list_iterator(*key_list);
- List_iterator<Key_part_spec> key_column_iterator(key->columns);
+ List_iterator_fast<Key> key_list_iterator(*key_list);
+ List_iterator_fast<Key_part_spec> key_column_iterator(key->columns);
Key *k;
while ((k= key_list_iterator++))
@@ -3167,7 +3157,7 @@ static void check_duplicate_key(THD *thd, Key *key, KEY *key_info,
Check that the keys have identical columns in the same order.
*/
- List_iterator<Key_part_spec> k_column_iterator(k->columns);
+ List_iterator_fast<Key_part_spec> k_column_iterator(k->columns);
bool all_columns_are_identical= true;
@@ -3242,7 +3232,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
KEY_PART_INFO *key_part_info;
int field_no,dup_no;
int select_field_pos,auto_increment=0;
- List_iterator<Create_field> it(alter_info->create_list);
+ List_iterator_fast<Create_field> it(alter_info->create_list);
List_iterator<Create_field> it2(alter_info->create_list);
uint total_uneven_bit_length= 0;
int select_field_count= C_CREATE_SELECT(create_table_mode);
@@ -3285,41 +3275,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
!(sql_field->charset= find_bin_collation(sql_field->charset)))
DBUG_RETURN(TRUE);
- /*
- Convert the default value from client character
- set into the column character set if necessary.
- */
- if (sql_field->def &&
- save_cs != sql_field->def->collation.collation &&
- (sql_field->sql_type == MYSQL_TYPE_VAR_STRING ||
- sql_field->sql_type == MYSQL_TYPE_STRING ||
- sql_field->sql_type == MYSQL_TYPE_SET ||
- sql_field->sql_type == MYSQL_TYPE_ENUM))
- {
- /*
- Starting from 5.1 we work here with a copy of Create_field
- created by the caller, not with the instance that was
- originally created during parsing. It's OK to create
- a temporary item and initialize with it a member of the
- copy -- this item will be thrown away along with the copy
- at the end of execution, and thus not introduce a dangling
- pointer in the parsed tree of a prepared statement or a
- stored procedure statement.
- */
- sql_field->def= sql_field->def->safe_charset_converter(thd, save_cs);
-
- if (sql_field->def == NULL)
- {
- /* Could not convert */
- my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
- DBUG_RETURN(TRUE);
- }
- }
-
- /* Virtual fields are always NULL */
- if (sql_field->vcol_info)
- sql_field->flags&= ~NOT_NULL_FLAG;
-
if (sql_field->sql_type == MYSQL_TYPE_SET ||
sql_field->sql_type == MYSQL_TYPE_ENUM)
{
@@ -3385,36 +3340,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (sql_field->sql_type == MYSQL_TYPE_SET)
{
uint32 field_length;
- if (sql_field->def != NULL)
- {
- char *not_used;
- uint not_used2;
- bool not_found= 0;
- String str, *def= sql_field->def->val_str(&str);
- if (def == NULL) /* SQL "NULL" maps to NULL */
- {
- if ((sql_field->flags & NOT_NULL_FLAG) != 0)
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
- DBUG_RETURN(TRUE);
- }
-
- /* else, NULL is an allowed value */
- (void) find_set(interval, NULL, 0,
- cs, &not_used, &not_used2, &not_found);
- }
- else /* not NULL */
- {
- (void) find_set(interval, def->ptr(), def->length(),
- cs, &not_used, &not_used2, &not_found);
- }
-
- if (not_found)
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
- DBUG_RETURN(TRUE);
- }
- }
calculate_interval_lengths(cs, interval, &dummy, &field_length);
sql_field->length= field_length + (interval->count - 1);
}
@@ -3422,29 +3347,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
uint32 field_length;
DBUG_ASSERT(sql_field->sql_type == MYSQL_TYPE_ENUM);
- if (sql_field->def != NULL)
- {
- String str, *def= sql_field->def->val_str(&str);
- if (def == NULL) /* SQL "NULL" maps to NULL */
- {
- if ((sql_field->flags & NOT_NULL_FLAG) != 0)
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
- DBUG_RETURN(TRUE);
- }
-
- /* else, the defaults yield the correct length for NULLs. */
- }
- else /* not NULL */
- {
- def->length(cs->cset->lengthsp(cs, def->ptr(), def->length()));
- if (find_type2(interval, def->ptr(), def->length(), cs) == 0) /* not found */
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
- DBUG_RETURN(TRUE);
- }
- }
- }
calculate_interval_lengths(cs, interval, &field_length, &dummy);
sql_field->length= field_length;
}
@@ -3464,6 +3366,79 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (prepare_blob_field(thd, sql_field))
DBUG_RETURN(TRUE);
+ /*
+ Convert the default value from client character
+ set into the column character set if necessary.
+ We can only do this for constants as we have not yet run fix_fields.
+ */
+ if (sql_field->default_value &&
+ sql_field->default_value->expr->basic_const_item() &&
+ (!sql_field->field ||
+ sql_field->field->default_value != sql_field->default_value) &&
+ save_cs != sql_field->default_value->expr->collation.collation &&
+ (sql_field->sql_type == MYSQL_TYPE_VAR_STRING ||
+ sql_field->sql_type == MYSQL_TYPE_STRING ||
+ sql_field->sql_type == MYSQL_TYPE_SET ||
+ sql_field->sql_type == MYSQL_TYPE_TINY_BLOB ||
+ sql_field->sql_type == MYSQL_TYPE_MEDIUM_BLOB ||
+ sql_field->sql_type == MYSQL_TYPE_LONG_BLOB ||
+ sql_field->sql_type == MYSQL_TYPE_BLOB ||
+ sql_field->sql_type == MYSQL_TYPE_ENUM))
+ {
+ Item *item;
+ if (!(item= sql_field->default_value->expr->
+ safe_charset_converter(thd, save_cs)))
+ {
+ /* Could not convert */
+ my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
+ DBUG_RETURN(TRUE);
+ }
+ /* Fix for prepare statement */
+ thd->change_item_tree(&sql_field->default_value->expr, item);
+ }
+
+ /* Virtual fields are always NULL */
+ if (sql_field->vcol_info)
+ sql_field->flags&= ~NOT_NULL_FLAG;
+
+ if (sql_field->default_value &&
+ sql_field->default_value->expr->basic_const_item() &&
+ (sql_field->sql_type == MYSQL_TYPE_SET ||
+ sql_field->sql_type == MYSQL_TYPE_ENUM))
+ {
+ StringBuffer<MAX_FIELD_WIDTH> str;
+ String *def= sql_field->default_value->expr->val_str(&str);
+ bool not_found;
+ if (def == NULL) /* SQL "NULL" maps to NULL */
+ {
+ not_found= sql_field->flags & NOT_NULL_FLAG;
+ }
+ else
+ {
+ not_found= false;
+ if (sql_field->sql_type == MYSQL_TYPE_SET)
+ {
+ char *not_used;
+ uint not_used2;
+ find_set(sql_field->interval, def->ptr(), def->length(),
+ sql_field->charset, &not_used, &not_used2, &not_found);
+ }
+ else /* MYSQL_TYPE_ENUM */
+ {
+ def->length(sql_field->charset->cset->lengthsp(sql_field->charset,
+ def->ptr(), def->length()));
+ not_found= !find_type2(sql_field->interval, def->ptr(),
+ def->length(), sql_field->charset);
+ }
+ }
+
+ if (not_found)
+ {
+ my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
+ DBUG_RETURN(TRUE);
+ }
+ }
+
if (!(sql_field->flags & NOT_NULL_FLAG))
null_fields++;
@@ -3501,7 +3476,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
file->ha_table_flags() & HA_CAN_BIT_FIELD)
total_uneven_bit_length-= sql_field->length & 7;
- sql_field->def= dup_field->def;
+ sql_field->default_value= dup_field->default_value;
sql_field->sql_type= dup_field->sql_type;
/*
@@ -3536,7 +3511,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->create_length_to_internal_length();
sql_field->interval= dup_field->interval;
sql_field->vcol_info= dup_field->vcol_info;
- sql_field->stored_in_db= dup_field->stored_in_db;
it2.remove(); // Remove first (create) definition
select_field_pos--;
break;
@@ -3578,14 +3552,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
(virtual fields) and update their offset later
(see the next loop).
*/
- if (sql_field->stored_in_db)
+ if (sql_field->stored_in_db())
record_offset+= sql_field->pack_length;
}
/* Update virtual fields' offset*/
it.rewind();
while ((sql_field=it++))
{
- if (!sql_field->stored_in_db)
+ if (!sql_field->stored_in_db())
{
sql_field->offset= record_offset;
record_offset+= sql_field->pack_length;
@@ -3616,7 +3590,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
therefore mark it as unsafe.
*/
if (select_field_count > 0 && auto_increment)
- thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_SELECT_AUTOINC);
+ thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_SELECT_AUTOINC);
/* Create keys */
@@ -3856,9 +3830,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (!my_strcasecmp(system_charset_info,
column->field_name.str, dup_column->field_name.str))
{
- my_printf_error(ER_DUP_FIELDNAME,
- ER_THD(thd, ER_DUP_FIELDNAME),MYF(0),
- column->field_name.str);
+ my_error(ER_DUP_FIELDNAME, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
}
@@ -3934,16 +3906,21 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
#endif
- if (!sql_field->stored_in_db)
+ if (sql_field->vcol_info)
{
- /* Key fields must always be physically stored. */
- my_error(ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (key->type == Key::PRIMARY && sql_field->vcol_info)
- {
- my_error(ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN, MYF(0));
- DBUG_RETURN(TRUE);
+ if (key->type == Key::PRIMARY)
+ {
+ my_error(ER_PRIMARY_KEY_BASED_ON_GENERATED_COLUMN, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (sql_field->vcol_info->flags & VCOL_NOT_STRICTLY_DETERMINISTIC)
+ {
+ /* use check_expression() to report an error */
+ check_expression(sql_field->vcol_info, sql_field->field_name,
+ VCOL_GENERATED_STORED);
+ DBUG_ASSERT(thd->is_error());
+ DBUG_RETURN(TRUE);
+ }
}
if (!(sql_field->flags & NOT_NULL_FLAG))
{
@@ -3996,7 +3973,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key->type == Key::MULTIPLE)
{
/* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY,
ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
@@ -4093,7 +4070,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
primary_key=1;
}
else if (!(key_name= key->name.str))
- key_name=make_unique_key_name(sql_field->field_name,
+ key_name=make_unique_key_name(thd, sql_field->field_name,
*key_info_buffer, key_info);
if (check_if_keyname_exists(key_name, *key_info_buffer, key_info))
{
@@ -4163,7 +4140,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
it is NOT NULL, not an AUTO_INCREMENT field, not a TIMESTAMP and not
updated trough a NOW() function.
*/
- if (!sql_field->def &&
+ if (!sql_field->default_value &&
!sql_field->has_default_function() &&
(sql_field->flags & NOT_NULL_FLAG) &&
(!is_timestamp_type(sql_field->sql_type) ||
@@ -4174,7 +4151,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
if (thd->variables.sql_mode & MODE_NO_ZERO_DATE &&
- !sql_field->def && !sql_field->vcol_info &&
+ !sql_field->default_value && !sql_field->vcol_info &&
is_timestamp_type(sql_field->sql_type) &&
!opt_explicit_defaults_for_timestamp &&
(sql_field->flags & NOT_NULL_FLAG) &&
@@ -4199,6 +4176,46 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
+ /* Check table level constraints */
+ create_info->check_constraint_list= &alter_info->check_constraint_list;
+ {
+ uint nr= 1;
+ List_iterator_fast<Virtual_column_info> c_it(alter_info->check_constraint_list);
+ Virtual_column_info *check;
+ while ((check= c_it++))
+ {
+ if (!check->name.length)
+ make_unique_constraint_name(thd, &check->name,
+ &alter_info->check_constraint_list,
+ &nr);
+ {
+ /* Check that there's no repeating constraint names. */
+ List_iterator_fast<Virtual_column_info>
+ dup_it(alter_info->check_constraint_list);
+ Virtual_column_info *dup_check;
+ while ((dup_check= dup_it++) && dup_check != check)
+ {
+ if (check->name.length == dup_check->name.length &&
+ my_strcasecmp(system_charset_info,
+ check->name.str, dup_check->name.str) == 0)
+ {
+ my_error(ER_DUP_CONSTRAINT_NAME, MYF(0), "CHECK", check->name.str);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+
+ if (check_string_char_length(&check->name, 0, NAME_CHAR_LEN,
+ system_charset_info, 1))
+ {
+ my_error(ER_TOO_LONG_IDENT, MYF(0), check->name.str);
+ DBUG_RETURN(TRUE);
+ }
+ if (check_expression(check, check->name.str, VCOL_CHECK_TABLE))
+ DBUG_RETURN(TRUE);
+ }
+ }
+
/* Give warnings for not supported table options */
#if defined(WITH_ARIA_STORAGE_ENGINE)
extern handlerton *maria_hton;
@@ -4267,7 +4284,7 @@ bool validate_comment_length(THD *thd, LEX_STRING *comment, size_t max_len,
create_info Table create information
DESCRIPTION
- If the table character set was not given explicitely,
+ If the table character set was not given explicitly,
let's fetch the database default character set and
apply it to the table.
*/
@@ -4304,7 +4321,7 @@ static void set_table_default_charset(THD *thd,
In this case the error is given
*/
-static bool prepare_blob_field(THD *thd, Create_field *sql_field)
+static bool prepare_blob_field(THD *thd, Column_definition *sql_field)
{
DBUG_ENTER("prepare_blob_field");
@@ -4314,7 +4331,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field)
/* Convert long VARCHAR columns to TEXT or BLOB */
char warn_buff[MYSQL_ERRMSG_SIZE];
- if (sql_field->def || thd->is_strict_mode())
+ if (thd->is_strict_mode())
{
my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
static_cast<ulong>(MAX_FIELD_VARCHARLENGTH /
@@ -4339,7 +4356,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field)
sql_field->sql_type == FIELD_TYPE_MEDIUM_BLOB)
{
/* The user has given a length to the blob column */
- sql_field->sql_type= get_blob_type_from_length(sql_field->length);
+ sql_field->sql_type= get_blob_type_from_length((ulong)sql_field->length);
sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0);
}
sql_field->length= 0;
@@ -4363,7 +4380,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field)
*/
-void sp_prepare_create_field(THD *thd, Create_field *sql_field)
+void sp_prepare_create_field(THD *thd, Column_definition *sql_field)
{
if (sql_field->sql_type == MYSQL_TYPE_SET ||
sql_field->sql_type == MYSQL_TYPE_ENUM)
@@ -4393,7 +4410,7 @@ void sp_prepare_create_field(THD *thd, Create_field *sql_field)
FIELDFLAG_TREAT_BIT_AS_CHAR;
}
sql_field->create_length_to_internal_length();
- DBUG_ASSERT(sql_field->def == 0);
+ DBUG_ASSERT(sql_field->default_value == 0);
/* Can't go wrong as sql_field->def is not defined */
(void) prepare_blob_field(thd, sql_field);
}
@@ -4542,12 +4559,9 @@ handler *mysql_create_frm_image(THD *thd,
We reverse the partitioning parser and generate a standard format
for syntax stored in frm file.
*/
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE, TRUE,
- create_info,
- alter_info,
- NULL)))
+ part_syntax_buf= generate_partition_syntax_for_frm(thd, part_info,
+ &syntax_len, create_info, alter_info);
+ if (!part_syntax_buf)
goto err;
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
@@ -4673,7 +4687,8 @@ err:
which was created.
@param[out] key_count Number of keys in table which was created.
- If one creates a temporary table, this is automatically opened
+ If one creates a temporary table, its is automatically opened and its
+ TABLE_SHARE is added to THD::all_temp_tables list.
Note that this function assumes that caller already have taken
exclusive metadata lock on table being created or used some other
@@ -4733,20 +4748,22 @@ int create_table_impl(THD *thd,
/* Check if table exists */
if (create_info->tmp_table())
{
- TABLE *tmp_table;
- if (find_and_use_temporary_table(thd, db, table_name, &tmp_table))
- goto err;
+ /*
+ If a table exists, it must have been pre-opened. Try looking for one
+ in-use in THD::all_temp_tables list of TABLE_SHAREs.
+ */
+ TABLE *tmp_table= thd->find_temporary_table(db, table_name);
+
if (tmp_table)
{
bool table_creation_was_logged= tmp_table->s->table_creation_was_logged;
if (options.or_replace())
{
- bool tmp;
/*
We are using CREATE OR REPLACE on an existing temporary table
Remove the old table so that we can re-create it.
*/
- if (drop_temporary_table(thd, tmp_table, &tmp))
+ if (thd->drop_temporary_table(tmp_table, NULL, true))
goto err;
}
else if (options.if_not_exists())
@@ -4879,7 +4896,12 @@ int create_table_impl(THD *thd,
file= mysql_create_frm_image(thd, orig_db, orig_table_name, create_info,
alter_info, create_table_mode, key_info,
key_count, frm);
- if (!file)
+ /*
+ TODO: remove this check of thd->is_error() (now it intercept
+ errors in some val_*() methoids and bring some single place to
+ such error interception).
+ */
+ if (!file || thd->is_error())
goto err;
if (rea_create_table(thd, frm, path, db, table_name, create_info,
file, frm_only))
@@ -4889,17 +4911,12 @@ int create_table_impl(THD *thd,
create_info->table= 0;
if (!frm_only && create_info->tmp_table())
{
- /*
- Open a table (skipping table cache) and add it into
- THD::temporary_tables list.
- */
-
- TABLE *table= open_table_uncached(thd, create_info->db_type, frm, path,
- db, table_name, true, true);
+ TABLE *table= thd->create_and_open_tmp_table(create_info->db_type, frm,
+ path, db, table_name, true);
if (!table)
{
- (void) rm_temporary_table(create_info->db_type, path);
+ (void) thd->rm_temporary_table(create_info->db_type, path);
goto err;
}
@@ -4931,7 +4948,7 @@ int create_table_impl(THD *thd,
open_table_from_share(thd, &share, "", 0, (uint) READ_ALL,
0, &table, true));
if (!result)
- (void) closefrm(&table, 0);
+ (void) closefrm(&table);
free_table_share(&share);
@@ -4990,7 +5007,8 @@ int mysql_create_table_no_lock(THD *thd,
// Check if we hit FN_REFLEN bytes along with file extension.
if (length+reg_ext_length > FN_REFLEN)
{
- my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(path)-1, path);
+ my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), (int) sizeof(path)-1,
+ path);
return true;
}
}
@@ -5141,7 +5159,7 @@ check_if_keyname_exists(const char *name, KEY *start, KEY *end)
static char *
-make_unique_key_name(const char *field_name,KEY *start,KEY *end)
+make_unique_key_name(THD *thd, const char *field_name,KEY *start,KEY *end)
{
char buff[MAX_FIELD_NAME],*buff_end;
@@ -5159,11 +5177,43 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end)
*buff_end= '_';
int10_to_str(i, buff_end+1, 10);
if (!check_if_keyname_exists(buff,start,end))
- return sql_strdup(buff);
+ return thd->strdup(buff);
}
return (char*) "not_specified"; // Should never happen
}
+/**
+ Make an unique name for constraints without a name
+*/
+
+static void make_unique_constraint_name(THD *thd, LEX_STRING *name,
+ List<Virtual_column_info> *vcol,
+ uint *nr)
+{
+ char buff[MAX_FIELD_NAME], *end;
+ List_iterator_fast<Virtual_column_info> it(*vcol);
+
+ end=strmov(buff, "CONSTRAINT_");
+ for (;;)
+ {
+ Virtual_column_info *check;
+ char *real_end= int10_to_str((*nr)++, end, 10);
+ it.rewind();
+ while ((check= it++))
+ {
+ if (check->name.str &&
+ !my_strcasecmp(system_charset_info, buff, check->name.str))
+ break;
+ }
+ if (!check) // Found unique name
+ {
+ name->length= (size_t) (real_end - buff);
+ name->str= thd->strmake(buff, name->length);
+ return;
+ }
+ }
+}
+
/****************************************************************************
** Alter a table definition
@@ -5222,7 +5272,7 @@ mysql_rename_table(handlerton *base, const char *old_db,
// Check if we hit FN_REFLEN bytes along with file extension.
if (length+reg_ext_length > FN_REFLEN)
{
- my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(to)-1, to);
+ my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), (int) sizeof(to)-1, to);
DBUG_RETURN(TRUE);
}
@@ -5885,7 +5935,19 @@ drop_create_field:
}
}
}
- else /* Alter_drop::KEY */
+ else if (drop->type == Alter_drop::CHECK_CONSTRAINT)
+ {
+ for (uint i=table->s->field_check_constraints; i < table->s->table_check_constraints; i++)
+ {
+ if (my_strcasecmp(system_charset_info, drop->name,
+ table->check_constraints[i]->name.str) == 0)
+ {
+ remove_drop= FALSE;
+ break;
+ }
+ }
+ }
+ else /* Alter_drop::KEY and Alter_drop::FOREIGN_KEY */
{
uint n_key;
if (drop->type != Alter_drop::FOREIGN_KEY)
@@ -5942,7 +6004,7 @@ drop_create_field:
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_CANT_DROP_FIELD_OR_KEY,
ER_THD(thd, ER_CANT_DROP_FIELD_OR_KEY),
- drop->name);
+ drop->type_name(), drop->name);
drop_it.remove();
}
else
@@ -6029,7 +6091,7 @@ drop_create_field:
Key_part_spec *kp;
if ((kp= part_it++))
chkname= kp->field_name.str;
- if (keyname == NULL)
+ if (chkname == NULL)
continue;
}
if (key->type == chk_key->type &&
@@ -6132,6 +6194,39 @@ remove_key:
}
#endif /*WITH_PARTITION_STORAGE_ENGINE*/
+ /* ADD CONSTRAINT IF NOT EXISTS. */
+ {
+ List_iterator<Virtual_column_info> it(alter_info->check_constraint_list);
+ Virtual_column_info *check;
+ TABLE_SHARE *share= table->s;
+ uint c;
+ while ((check=it++))
+ {
+ if (!(check->flags & Alter_info::CHECK_CONSTRAINT_IF_NOT_EXISTS) &&
+ check->name.length)
+ continue;
+ check->flags= 0;
+ for (c= share->field_check_constraints;
+ c < share->table_check_constraints ; c++)
+ {
+ Virtual_column_info *dup= table->check_constraints[c];
+ if (dup->name.length == check->name.length &&
+ my_strcasecmp(system_charset_info,
+ check->name.str, dup->name.str) == 0)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_DUP_CONSTRAINT_NAME, ER_THD(thd, ER_DUP_CONSTRAINT_NAME),
+ "CHECK", check->name.str);
+ it.remove();
+ if (alter_info->check_constraint_list.elements == 0)
+ alter_info->flags&= ~Alter_info::ALTER_ADD_CHECK_CONSTRAINT;
+
+ break;
+ }
+ }
+ }
+ }
+
DBUG_VOID_RETURN;
}
@@ -6207,7 +6302,7 @@ static int compare_uint(const uint *s, const uint *t)
static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
Alter_inplace_info *ha_alter_info)
{
- Field **f_ptr, *field;
+ Field **f_ptr, *field, *old_field;
List_iterator_fast<Create_field> new_field_it;
Create_field *new_field;
KEY_PART_INFO *key_part, *new_part;
@@ -6223,11 +6318,6 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
alter_info->key_list.elements)))
DBUG_RETURN(true);
- /* First we setup ha_alter_flags based on what was detected by parser. */
- if (alter_info->flags & Alter_info::ALTER_ADD_COLUMN)
- ha_alter_info->handler_flags|= Alter_inplace_info::ADD_COLUMN;
- if (alter_info->flags & Alter_info::ALTER_DROP_COLUMN)
- ha_alter_info->handler_flags|= Alter_inplace_info::DROP_COLUMN;
/*
Comparing new and old default values of column is cumbersome.
So instead of using such a comparison for detecting if default
@@ -6265,13 +6355,17 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
/* Check for: ALTER TABLE FORCE, ALTER TABLE ENGINE and OPTIMIZE TABLE. */
if (alter_info->flags & Alter_info::ALTER_RECREATE)
ha_alter_info->handler_flags|= Alter_inplace_info::RECREATE_TABLE;
+ if (alter_info->flags & Alter_info::ALTER_ADD_CHECK_CONSTRAINT)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_ADD_CHECK_CONSTRAINT;
+ if (alter_info->flags & Alter_info::ALTER_DROP_CHECK_CONSTRAINT)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_DROP_CHECK_CONSTRAINT;
/*
If we altering table with old VARCHAR fields we will be automatically
upgrading VARCHAR column types.
*/
if (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar)
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_STORED_COLUMN_TYPE;
/*
Go through fields in old version of table and detect changes to them.
@@ -6285,20 +6379,23 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
about nature of changes than those provided from parser.
*/
bool maybe_alter_vcol= false;
- for (f_ptr= table->field; (field= *f_ptr); f_ptr++)
+ uint field_stored_index= 0;
+ for (f_ptr= table->field; (field= *f_ptr); f_ptr++,
+ field_stored_index+= field->stored_in_db())
{
/* Clear marker for renamed or dropped field
which we are going to set later. */
field->flags&= ~(FIELD_IS_RENAMED | FIELD_IS_DROPPED);
/* Use transformed info to evaluate flags for storage engine. */
- uint new_field_index= 0;
+ uint new_field_index= 0, new_field_stored_index= 0;
new_field_it.init(alter_info->create_list);
while ((new_field= new_field_it++))
{
if (new_field->field == field)
break;
new_field_index++;
+ new_field_stored_index+= new_field->stored_in_db();
}
if (new_field)
@@ -6313,7 +6410,12 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
{
case IS_EQUAL_NO:
/* New column type is incompatible with old one. */
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
+ if (field->stored_in_db())
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_STORED_COLUMN_TYPE;
+ else
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_VIRTUAL_COLUMN_TYPE;
if (table->s->tmp_table == NO_TMP_TABLE)
{
delete_statistics_for_column(thd, table, field);
@@ -6358,22 +6460,59 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
default:
DBUG_ASSERT(0);
/* Safety. */
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_STORED_COLUMN_TYPE;
}
- /*
- Check if the column is computed and either
- is stored or is used in the partitioning expression.
- */
- if (field->vcol_info &&
- (field->stored_in_db || field->vcol_info->is_in_partitioning_expr()))
+ if (field->vcol_info || new_field->vcol_info)
{
- if (is_equal == IS_EQUAL_NO ||
- !new_field->vcol_info ||
- !field->vcol_info->is_equal(new_field->vcol_info))
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
- else
- maybe_alter_vcol= true;
+ /* base <-> virtual or stored <-> virtual */
+ if (field->stored_in_db() != new_field->stored_in_db())
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_STORED_COLUMN_TYPE |
+ Alter_inplace_info::ALTER_VIRTUAL_COLUMN_TYPE;
+ if (field->vcol_info && new_field->vcol_info)
+ {
+ bool value_changes= is_equal == IS_EQUAL_NO;
+ Alter_inplace_info::HA_ALTER_FLAGS alter_expr;
+ if (field->stored_in_db())
+ alter_expr= Alter_inplace_info::ALTER_STORED_GCOL_EXPR;
+ else
+ alter_expr= Alter_inplace_info::ALTER_VIRTUAL_GCOL_EXPR;
+ if (!field->vcol_info->is_equal(new_field->vcol_info))
+ {
+ ha_alter_info->handler_flags|= alter_expr;
+ value_changes= true;
+ }
+
+ if ((ha_alter_info->handler_flags & Alter_inplace_info::ALTER_COLUMN_DEFAULT)
+ && !(ha_alter_info->handler_flags & alter_expr))
+ { /*
+ a DEFAULT value of a some column was changed. see if this vcol
+ uses DEFAULT() function. The check is kind of expensive, so don't
+ do it if ALTER_COLUMN_VCOL is already set.
+ */
+ if (field->vcol_info->expr->walk(
+ &Item::check_func_default_processor, 0, 0))
+ {
+ ha_alter_info->handler_flags|= alter_expr;
+ value_changes= true;
+ }
+ }
+
+ if (field->vcol_info->is_in_partitioning_expr() ||
+ field->flags & PART_KEY_FLAG)
+ {
+ if (value_changes)
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_COLUMN_VCOL;
+ else
+ maybe_alter_vcol= true;
+ }
+ }
+ else /* base <-> stored */
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_STORED_COLUMN_TYPE;
}
/* Check if field was renamed */
@@ -6406,8 +6545,18 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
/*
Detect changes in column order.
*/
- if (field->field_index != new_field_index)
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_ORDER;
+ if (field->stored_in_db())
+ {
+ if (field_stored_index != new_field_stored_index)
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_STORED_COLUMN_ORDER;
+ }
+ else
+ {
+ if (field->field_index != new_field_index)
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_VIRTUAL_COLUMN_ORDER;
+ }
/* Detect changes in storage type of column */
if (new_field->field_storage_type() != field->field_storage_type())
@@ -6430,27 +6579,28 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
}
else
{
- /*
- Field is not present in new version of table and therefore was dropped.
- Corresponding storage engine flag should be already set.
- */
- DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::DROP_COLUMN);
+ // Field is not present in new version of table and therefore was dropped.
field->flags|= FIELD_IS_DROPPED;
+ if (field->stored_in_db())
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_STORED_COLUMN;
+ else
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_VIRTUAL_COLUMN;
}
}
if (maybe_alter_vcol)
{
/*
- No virtual column was altered, but perhaps one of the other columns was,
- and that column was part of the vcol expression?
- We don't detect this correctly (FIXME), so let's just say that a vcol
- *might* be affected if any other column was altered.
+ What if one of the normal columns was altered and it was part of the some
+ virtual column expression? Currently we don't detect this correctly
+ (FIXME), so let's just say that a vcol *might* be affected if any other
+ column was altered.
*/
if (ha_alter_info->handler_flags &
- ( Alter_inplace_info::ALTER_COLUMN_TYPE
- | Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE
- | Alter_inplace_info::ALTER_COLUMN_OPTION ))
+ ( Alter_inplace_info::ALTER_STORED_COLUMN_TYPE
+ | Alter_inplace_info::ALTER_VIRTUAL_COLUMN_TYPE
+ | Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE
+ | Alter_inplace_info::ALTER_COLUMN_OPTION ))
ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
}
@@ -6459,18 +6609,17 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
{
if (! new_field->field)
{
- /*
- Field is not present in old version of table and therefore was added.
- Again corresponding storage engine flag should be already set.
- */
- DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN);
-
- if (new_field->vcol_info &&
- (new_field->stored_in_db || new_field->vcol_info->is_in_partitioning_expr()))
- {
- ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
- }
- break;
+ // Field is not present in old version of table and therefore was added.
+ if (new_field->vcol_info)
+ if (new_field->stored_in_db())
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ADD_STORED_GENERATED_COLUMN;
+ else
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ADD_VIRTUAL_COLUMN;
+ else
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ADD_STORED_BASE_COLUMN;
}
}
@@ -6478,6 +6627,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
Go through keys and check if the original ones are compatible
with new table.
*/
+ uint old_field_len= 0;
KEY *table_key;
KEY *table_key_end= table->key_info + table->s->keys;
KEY *new_key;
@@ -6547,17 +6697,35 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
key_part < end;
key_part++, new_part++)
{
+ new_field= get_field_by_index(alter_info, new_part->fieldnr);
+ old_field= table->field[key_part->fieldnr - 1];
/*
+ If there is a change in index length due to column expansion
+ like varchar(X) changed to varchar(X + N) and has a compatible
+ packed data representation, we mark it for fast/INPLACE change
+ in index definition. InnoDB supports INPLACE for this cases
+
Key definition has changed if we are using a different field or
- if the used key part length is different. It makes sense to
- check lengths first as in case when fields differ it is likely
- that lengths differ too and checking fields is more expensive
- in general case.
+ if the user key part length is different.
*/
- if (key_part->length != new_part->length)
- goto index_changed;
+ old_field_len= old_field->pack_length();
- new_field= get_field_by_index(alter_info, new_part->fieldnr);
+ if (old_field->type() == MYSQL_TYPE_VARCHAR)
+ {
+ old_field_len= (old_field->pack_length()
+ - ((Field_varstring*) old_field)->length_bytes);
+ }
+
+ if (key_part->length == old_field_len &&
+ key_part->length < new_part->length &&
+ (key_part->field->is_equal((Create_field*) new_field)
+ == IS_EQUAL_PACK_LENGTH))
+ {
+ ha_alter_info->handler_flags |=
+ Alter_inplace_info::ALTER_COLUMN_INDEX_LENGTH;
+ }
+ else if (key_part->length != new_part->length)
+ goto index_changed;
/*
For prefix keys KEY_PART_INFO::field points to cloned Field
@@ -6594,7 +6762,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
table_key;
ha_alter_info->index_add_buffer
[ha_alter_info->index_add_count++]=
- new_key - ha_alter_info->key_info_buffer;
+ (uint)(new_key - ha_alter_info->key_info_buffer);
/* Mark all old fields which are used in newly created index. */
DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
}
@@ -6618,7 +6786,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
/* Key not found. Add the offset of the key to the add buffer. */
ha_alter_info->index_add_buffer
[ha_alter_info->index_add_count++]=
- new_key - ha_alter_info->key_info_buffer;
+ (uint)(new_key - ha_alter_info->key_info_buffer);
DBUG_PRINT("info", ("index added: '%s'", new_key->name));
}
else
@@ -6997,6 +7165,14 @@ static bool is_inplace_alter_impossible(TABLE *table,
if (!table->s->mysql_version)
DBUG_RETURN(true);
+ /*
+ If we are using a MySQL 5.7 table with virtual fields, ALTER TABLE must
+ recreate the table as we need to rewrite generated fields
+ */
+ if (table->s->mysql_version > 50700 && table->s->mysql_version < 100000 &&
+ table->s->virtual_fields)
+ DBUG_RETURN(TRUE);
+
DBUG_RETURN(false);
}
@@ -7046,6 +7222,7 @@ static bool mysql_inplace_alter_table(THD *thd,
HA_CREATE_INFO *create_info= ha_alter_info->create_info;
Alter_info *alter_info= ha_alter_info->alter_info;
bool reopen_tables= false;
+ bool res;
DBUG_ENTER("mysql_inplace_alter_table");
@@ -7180,11 +7357,12 @@ static bool mysql_inplace_alter_table(THD *thd,
DEBUG_SYNC(thd, "alter_table_inplace_after_lock_downgrade");
THD_STAGE_INFO(thd, stage_alter_inplace);
- if (table->file->ha_inplace_alter_table(altered_table,
- ha_alter_info))
- {
+ /* We can abort alter table for any table type */
+ thd->abort_on_warning= !ha_alter_info->ignore && thd->is_strict_mode();
+ res= table->file->ha_inplace_alter_table(altered_table, ha_alter_info);
+ thd->abort_on_warning= false;
+ if (res)
goto rollback;
- }
// Upgrade to EXCLUSIVE before commit.
if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME))
@@ -7220,15 +7398,21 @@ static bool mysql_inplace_alter_table(THD *thd,
HA_EXTRA_NOT_USED,
NULL);
table_list->table= table= NULL;
- close_temporary_table(thd, altered_table, true, false);
+
+ thd->drop_temporary_table(altered_table, NULL, false);
/*
Replace the old .FRM with the new .FRM, but keep the old name for now.
Rename to the new name (if needed) will be handled separately below.
+
+ TODO: remove this check of thd->is_error() (now it intercept
+ errors in some val_*() methoids and bring some single place to
+ such error interception).
*/
if (mysql_rename_table(db_type, alter_ctx->new_db, alter_ctx->tmp_name,
alter_ctx->db, alter_ctx->alias,
- FN_FROM_IS_TMP | NO_HA_TABLE))
+ FN_FROM_IS_TMP | NO_HA_TABLE) ||
+ thd->is_error())
{
// Since changes were done in-place, we can't revert them.
(void) quick_rm_table(thd, db_type,
@@ -7310,7 +7494,7 @@ static bool mysql_inplace_alter_table(THD *thd,
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
/* QQ; do something about metadata locks ? */
}
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
// Delete temporary .frm/.par
(void) quick_rm_table(thd, create_info->db_type, alter_ctx->new_db,
alter_ctx->tmp_name, FN_IS_TMP | NO_HA_TABLE);
@@ -7406,14 +7590,17 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
List_iterator<Create_field> find_it(new_create_list);
List_iterator<Create_field> field_it(new_create_list);
List<Key_part_spec> key_parts;
+ List<Virtual_column_info> new_constraint_list;
uint db_create_options= (table->s->db_create_options
& ~(HA_OPTION_PACK_RECORD));
+ Item::func_processor_rename column_rename_param;
uint used_fields;
KEY *key_info=table->key_info;
bool rc= TRUE;
bool modified_primary_key= FALSE;
Create_field *def;
Field **f_ptr,*field;
+ MY_BITMAP *dropped_fields= NULL; // if it's NULL - no dropped fields
DBUG_ENTER("mysql_prepare_alter_table");
/*
@@ -7456,6 +7643,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (!(used_fields & HA_CREATE_USED_CONNECTION))
create_info->connect_string= table->s->connect_string;
+ column_rename_param.db_name.str= table->s->db.str;
+ column_rename_param.db_name.length= table->s->db.length;
+ column_rename_param.table_name.str= table->s->table_name.str;
+ column_rename_param.table_name.length= table->s->table_name.length;
+ if (column_rename_param.fields.copy(&alter_info->create_list, thd->mem_root))
+ DBUG_RETURN(1); // OOM
+
restore_record(table, s->default_values); // Empty record for DEFAULT
if ((create_info->fields_option_struct= (ha_field_option_struct**)
@@ -7470,6 +7664,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
/*
First collect all fields from table which isn't in drop_list
*/
+ bitmap_clear_all(&table->tmp_set);
for (f_ptr=table->field ; (field= *f_ptr) ; f_ptr++)
{
Alter_drop *drop;
@@ -7480,25 +7675,44 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
while ((drop=drop_it++))
{
if (drop->type == Alter_drop::COLUMN &&
- !my_strcasecmp(system_charset_info,field->field_name, drop->name))
- {
- /* Reset auto_increment value if it was dropped */
- if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
- !(used_fields & HA_CREATE_USED_AUTO))
- {
- create_info->auto_increment_value=0;
- create_info->used_fields|=HA_CREATE_USED_AUTO;
- }
+ !my_strcasecmp(system_charset_info, field->field_name, drop->name))
break;
- }
}
if (drop)
{
+ /* Reset auto_increment value if it was dropped */
+ if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
+ !(used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=0;
+ create_info->used_fields|=HA_CREATE_USED_AUTO;
+ }
if (table->s->tmp_table == NO_TMP_TABLE)
(void) delete_statistics_for_column(thd, table, field);
drop_it.remove();
+ dropped_fields= &table->tmp_set;
+ bitmap_set_bit(dropped_fields, field->field_index);
continue;
}
+
+ /*
+ If we are doing a rename of a column, update all references in virtual
+ column expressions, constraints and defaults to use the new column name
+ */
+ if (alter_info->flags & Alter_info::ALTER_RENAME_COLUMN)
+ {
+ if (field->vcol_info)
+ field->vcol_info->expr->walk(&Item::rename_fields_processor, 1,
+ &column_rename_param);
+ if (field->check_constraint)
+ field->check_constraint->expr->walk(&Item::rename_fields_processor, 1,
+ &column_rename_param);
+ if (field->default_value)
+ field->default_value->expr->walk(&Item::rename_fields_processor, 1,
+ &column_rename_param);
+ table->m_needs_reopen= 1; // because new column name is on thd->mem_root
+ }
+
/* Check if field is changed */
def_it.rewind();
while ((def=def_it++))
@@ -7516,9 +7730,9 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
of the list for now. Their positions will be corrected later.
*/
new_create_list.push_back(def, thd->mem_root);
- if (field->stored_in_db != def->stored_in_db)
+ if (field->stored_in_db() != def->stored_in_db())
{
- my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, MYF(0));
+ my_error(ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN, MYF(0));
goto err;
}
if (!def->after)
@@ -7550,12 +7764,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
if (alter)
{
- if (def->sql_type == MYSQL_TYPE_BLOB)
- {
- my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change);
- goto err;
- }
- if ((def->def=alter->def)) // Use new default
+ if ((def->default_value= alter->default_value))
def->flags&= ~NO_DEFAULT_VALUE_FLAG;
else
def->flags|= NO_DEFAULT_VALUE_FLAG;
@@ -7664,12 +7873,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
if (alter)
{
- if (def->sql_type == MYSQL_TYPE_BLOB)
- {
- my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change);
- goto err;
- }
- if ((def->def=alter->def)) // Use new default
+ if ((def->default_value= alter->default_value)) // Use new default
def->flags&= ~NO_DEFAULT_VALUE_FLAG;
else
def->flags|= NO_DEFAULT_VALUE_FLAG;
@@ -7727,6 +7931,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
continue;
}
+ const char *dropped_key_part= NULL;
KEY_PART_INFO *key_part= key_info->key_part;
key_parts.empty();
bool delete_index_stat= FALSE;
@@ -7756,6 +7961,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (table->s->primary_key == i)
modified_primary_key= TRUE;
delete_index_stat= TRUE;
+ dropped_key_part= key_part_name;
continue; // Field is removed
}
key_part_length= key_part->length;
@@ -7838,6 +8044,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
key_type= Key::PRIMARY;
else
key_type= Key::UNIQUE;
+ if (dropped_key_part)
+ {
+ my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), dropped_key_part);
+ goto err;
+ }
}
else if (key_info->flags & HA_FULLTEXT)
key_type= Key::FULLTEXT;
@@ -7868,6 +8079,58 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
}
+ /* Add all table level constraints which are not in the drop list */
+ if (table->s->table_check_constraints)
+ {
+ TABLE_SHARE *share= table->s;
+
+ for (uint i= share->field_check_constraints;
+ i < share->table_check_constraints ; i++)
+ {
+ Virtual_column_info *check= table->check_constraints[i];
+ Alter_drop *drop;
+ drop_it.rewind();
+ while ((drop=drop_it++))
+ {
+ if (drop->type == Alter_drop::CHECK_CONSTRAINT &&
+ !my_strcasecmp(system_charset_info, check->name.str, drop->name))
+ {
+ drop_it.remove();
+ break;
+ }
+ }
+ /* see if the constraint depends on *only* on dropped fields */
+ if (!drop && dropped_fields)
+ {
+ table->default_column_bitmaps();
+ bitmap_clear_all(table->read_set);
+ check->expr->walk(&Item::register_field_in_read_map, 1, 0);
+ if (bitmap_is_subset(table->read_set, dropped_fields))
+ drop= (Alter_drop*)1;
+ else if (bitmap_is_overlapping(dropped_fields, table->read_set))
+ {
+ bitmap_intersect(table->read_set, dropped_fields);
+ uint field_nr= bitmap_get_first_set(table->read_set);
+ my_error(ER_BAD_FIELD_ERROR, MYF(0),
+ table->field[field_nr]->field_name, "CHECK");
+ goto err;
+ }
+ }
+ if (!drop)
+ {
+ if (alter_info->flags & Alter_info::ALTER_RENAME_COLUMN)
+ {
+ check->expr->walk(&Item::rename_fields_processor, 1,
+ &column_rename_param);
+ table->m_needs_reopen= 1; // because new column name is on thd->mem_root
+ }
+ new_constraint_list.push_back(check, thd->mem_root);
+ }
+ }
+ }
+ /* Add new constraints */
+ new_constraint_list.append(&alter_info->check_constraint_list);
+
if (alter_info->drop_list.elements)
{
Alter_drop *drop;
@@ -7876,8 +8139,9 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
switch (drop->type) {
case Alter_drop::KEY:
case Alter_drop::COLUMN:
- my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
- alter_info->drop_list.head()->name);
+ case Alter_drop::CHECK_CONSTRAINT:
+ my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop->type_name(),
+ alter_info->drop_list.head()->name);
goto err;
case Alter_drop::FOREIGN_KEY:
// Leave the DROP FOREIGN KEY names in the alter_info->drop_list.
@@ -7885,12 +8149,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
}
}
- if (alter_info->alter_list.elements)
- {
- my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
- alter_info->alter_list.head()->name);
- goto err;
- }
if (!create_info->comment.str)
{
@@ -7923,6 +8181,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
rc= FALSE;
alter_info->create_list.swap(new_create_list);
alter_info->key_list.swap(new_key_list);
+ alter_info->check_constraint_list.swap(new_constraint_list);
err:
DBUG_RETURN(rc);
}
@@ -8169,11 +8428,14 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table,
DBUG_RETURN(true);
case FK_COLUMN_DROPPED:
{
- char buff[NAME_LEN*2+2];
- strxnmov(buff, sizeof(buff)-1, f_key->foreign_db->str, ".",
- f_key->foreign_table->str, NullS);
+ StringBuffer<NAME_LEN*2+2> buff(system_charset_info);
+ LEX_STRING *db= f_key->foreign_db, *tbl= f_key->foreign_table;
+
+ append_identifier(thd, &buff, db->str, db->length);
+ buff.append('.');
+ append_identifier(thd, &buff, tbl->str,tbl->length);
my_error(ER_FK_COLUMN_CANNOT_DROP_CHILD, MYF(0), bad_column_name,
- f_key->foreign_id->str, buff);
+ f_key->foreign_id->str, buff.c_ptr());
DBUG_RETURN(true);
}
default:
@@ -8245,6 +8507,72 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table,
DBUG_RETURN(false);
}
+/**
+ Rename temporary table and/or turn indexes on/off without touching .FRM.
+ Its a variant of simple_rename_or_index_change() to be used exclusively
+ for temporary tables.
+
+ @param thd Thread handler
+ @param table_list TABLE_LIST for the table to change
+ @param keys_onoff ENABLE or DISABLE KEYS?
+ @param alter_ctx ALTER TABLE runtime context.
+
+ @return Operation status
+ @retval false Success
+ @retval true Failure
+*/
+static bool
+simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
+ Alter_info::enum_enable_or_disable keys_onoff,
+ Alter_table_ctx *alter_ctx)
+{
+ DBUG_ENTER("simple_tmp_rename_or_index_change");
+
+ TABLE *table= table_list->table;
+ bool error= false;
+
+ DBUG_ASSERT(table->s->tmp_table);
+
+ if (keys_onoff != Alter_info::LEAVE_AS_IS)
+ {
+ THD_STAGE_INFO(thd, stage_manage_keys);
+ error= alter_table_manage_keys(table, table->file->indexes_are_disabled(),
+ keys_onoff);
+ }
+
+ if (!error && alter_ctx->is_table_renamed())
+ {
+ THD_STAGE_INFO(thd, stage_rename);
+
+ /*
+ If THD::rename_temporary_table() fails, there is no need to rename it
+ back to the original name (unlike the case for non-temporary tables),
+ as it was an allocation error and the table was not renamed.
+ */
+ error= thd->rename_temporary_table(table, alter_ctx->new_db,
+ alter_ctx->new_alias);
+ }
+
+ if (!error)
+ {
+ int res= 0;
+ /*
+ We do not replicate alter table statement on temporary tables under
+ ROW-based replication.
+ */
+ if (!thd->is_current_stmt_binlog_format_row())
+ {
+ res= write_bin_log(thd, true, thd->query(), thd->query_length());
+ }
+ if (res != 0)
+ error= true;
+ else
+ my_ok(thd);
+ }
+
+ DBUG_RETURN(error);
+}
+
/**
Rename table and/or turn indexes on/off without touching .FRM
@@ -8281,6 +8609,7 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0))
DBUG_RETURN(true);
+ THD_STAGE_INFO(thd, stage_manage_keys);
error= alter_table_manage_keys(table,
table->file->indexes_are_disabled(),
keys_onoff);
@@ -8501,7 +8830,12 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (table->s->tmp_table != NO_TMP_TABLE)
{
- if (find_temporary_table(thd, alter_ctx.new_db, alter_ctx.new_name))
+ /*
+ Check whether a temporary table exists with same requested new name.
+ If such table exists, there must be a corresponding TABLE_SHARE in
+ THD::all_temp_tables list.
+ */
+ if (thd->find_tmp_table_share(alter_ctx.new_db, alter_ctx.new_name))
{
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias);
DBUG_RETURN(true);
@@ -8640,6 +8974,64 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
THD_STAGE_INFO(thd, stage_setup);
+ if (alter_info->flags & Alter_info::ALTER_DROP_CHECK_CONSTRAINT)
+ {
+ /*
+ ALTER TABLE DROP CONSTRAINT
+ should be replaced with ... DROP [FOREIGN] KEY
+ if the constraint is the FOREIGN KEY or UNIQUE one.
+ */
+
+ List_iterator<Alter_drop> drop_it(alter_info->drop_list);
+ Alter_drop *drop;
+ List <FOREIGN_KEY_INFO> fk_child_key_list;
+ table->file->get_foreign_key_list(thd, &fk_child_key_list);
+
+ alter_info->flags&= ~Alter_info::ALTER_DROP_CHECK_CONSTRAINT;
+
+ while ((drop= drop_it++))
+ {
+ if (drop->type == Alter_drop::CHECK_CONSTRAINT)
+ {
+ {
+ /* Test if there is a FOREIGN KEY with this name. */
+ FOREIGN_KEY_INFO *f_key;
+ List_iterator<FOREIGN_KEY_INFO> fk_key_it(fk_child_key_list);
+
+ while ((f_key= fk_key_it++))
+ {
+ if (my_strcasecmp(system_charset_info, f_key->foreign_id->str,
+ drop->name) == 0)
+ {
+ drop->type= Alter_drop::FOREIGN_KEY;
+ alter_info->flags|= Alter_info::DROP_FOREIGN_KEY;
+ goto do_continue;
+ }
+ }
+ }
+
+ {
+ /* Test if there is an UNIQUE with this name. */
+ uint n_key;
+
+ for (n_key=0; n_key < table->s->keys; n_key++)
+ {
+ if ((table->key_info[n_key].flags & HA_NOSAME) &&
+ my_strcasecmp(system_charset_info,
+ drop->name, table->key_info[n_key].name) == 0)
+ {
+ drop->type= Alter_drop::KEY;
+ alter_info->flags|= Alter_info::ALTER_DROP_INDEX;
+ goto do_continue;
+ }
+ }
+ }
+ }
+ alter_info->flags|= Alter_info::ALTER_DROP_CHECK_CONSTRAINT;
+do_continue:;
+ }
+ }
+
handle_if_exists_options(thd, table, alter_info);
/*
@@ -8654,29 +9046,48 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, 0L, 0L, alter_ctx.tmp_name);
- if (write_bin_log(thd, true, thd->query(), thd->query_length()))
- DBUG_RETURN(true);
+ /* We don't replicate alter table statement on temporary tables */
+ if (table->s->tmp_table == NO_TMP_TABLE ||
+ !thd->is_current_stmt_binlog_format_row())
+ {
+ if (write_bin_log(thd, true, thd->query(), thd->query_length()))
+ DBUG_RETURN(true);
+ }
DBUG_RETURN(false);
}
+ /*
+ Test if we are only doing RENAME or KEYS ON/OFF. This works
+ as we are testing if flags == 0 above.
+ */
if (!(alter_info->flags & ~(Alter_info::ALTER_RENAME |
Alter_info::ALTER_KEYS_ONOFF)) &&
alter_info->requested_algorithm !=
- Alter_info::ALTER_TABLE_ALGORITHM_COPY &&
- !table->s->tmp_table) // no need to touch frm
+ Alter_info::ALTER_TABLE_ALGORITHM_COPY) // No need to touch frm.
{
- // This requires X-lock, no other lock levels supported.
- if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_DEFAULT &&
- alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)
+ bool res;
+
+ if (!table->s->tmp_table)
{
- my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0),
- "LOCK=NONE/SHARED", "LOCK=EXCLUSIVE");
- DBUG_RETURN(true);
+ // This requires X-lock, no other lock levels supported.
+ if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_DEFAULT &&
+ alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)
+ {
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0),
+ "LOCK=NONE/SHARED", "LOCK=EXCLUSIVE");
+ DBUG_RETURN(true);
+ }
+ res= simple_rename_or_index_change(thd, table_list,
+ alter_info->keys_onoff,
+ &alter_ctx);
+ }
+ else
+ {
+ res= simple_tmp_rename_or_index_change(thd, table_list,
+ alter_info->keys_onoff,
+ &alter_ctx);
}
- bool res= simple_rename_or_index_change(thd, table_list,
- alter_info->keys_onoff,
- &alter_ctx);
DBUG_RETURN(res);
}
@@ -8767,11 +9178,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
alter_info->requested_algorithm !=
Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
|| is_inplace_alter_impossible(table, create_info, alter_info)
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- || (partition_changed &&
- !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION))
-#endif
- )
+ || IF_PARTITIONING((partition_changed &&
+ !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)), 0))
{
if (alter_info->requested_algorithm ==
Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
@@ -8939,11 +9347,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
// We assume that the table is non-temporary.
DBUG_ASSERT(!table->s->tmp_table);
- if (!(altered_table= open_table_uncached(thd, new_db_type, &frm,
- alter_ctx.get_tmp_path(),
- alter_ctx.new_db,
- alter_ctx.tmp_name,
- true, false)))
+ if (!(altered_table=
+ thd->create_and_open_tmp_table(new_db_type, &frm,
+ alter_ctx.get_tmp_path(),
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ false)))
goto err_new_table_cleanup;
/* Set markers for fields in TABLE object for altered table. */
@@ -8957,7 +9365,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
altered_table->column_bitmaps_set_no_signal(&altered_table->s->all_set,
&altered_table->s->all_set);
restore_record(altered_table, s->default_values); // Create empty record
- if (altered_table->default_field && altered_table->update_default_fields())
+ /* Check that we can call default functions with default field values */
+ altered_table->reset_default_fields();
+ if (altered_table->default_field &&
+ altered_table->update_default_fields(0, 1))
goto err_new_table_cleanup;
// Ask storage engine whether to use copy or in-place
@@ -8983,7 +9394,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
ha_alter_info.report_unsupported_error("LOCK=NONE/SHARED",
"LOCK=EXCLUSIVE");
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
goto err_new_table_cleanup;
}
break;
@@ -8994,7 +9405,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Alter_info::ALTER_TABLE_LOCK_NONE)
{
ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED");
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
goto err_new_table_cleanup;
}
break;
@@ -9008,7 +9419,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
ha_alter_info.report_unsupported_error("ALGORITHM=INPLACE",
"ALGORITHM=COPY");
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
goto err_new_table_cleanup;
}
// COPY with LOCK=NONE is not supported, no point in trying.
@@ -9016,7 +9427,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Alter_info::ALTER_TABLE_LOCK_NONE)
{
ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED");
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
goto err_new_table_cleanup;
}
// Otherwise use COPY
@@ -9024,7 +9435,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
break;
case HA_ALTER_ERROR:
default:
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
goto err_new_table_cleanup;
}
@@ -9043,7 +9454,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
else
{
- close_temporary_table(thd, altered_table, true, false);
+ thd->drop_temporary_table(altered_table, NULL, false);
}
}
@@ -9087,46 +9498,27 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
goto err_new_table_cleanup;
if (ha_create_table(thd, alter_ctx.get_tmp_path(),
- alter_ctx.new_db, alter_ctx.tmp_name,
+ alter_ctx.new_db, alter_ctx.new_name,
create_info, &frm))
goto err_new_table_cleanup;
/* Mark that we have created table in storage engine. */
no_ha_table= false;
- if (create_info->tmp_table())
- {
- if (!open_table_uncached(thd, new_db_type, &frm,
- alter_ctx.get_tmp_path(),
- alter_ctx.new_db, alter_ctx.tmp_name,
- true, true))
- goto err_new_table_cleanup;
- }
+ new_table=
+ thd->create_and_open_tmp_table(new_db_type, &frm, alter_ctx.get_tmp_path(),
+ alter_ctx.new_db, alter_ctx.new_name, true);
+ if (!new_table)
+ goto err_new_table_cleanup;
/* Open the table since we need to copy the data. */
if (table->s->tmp_table != NO_TMP_TABLE)
{
- TABLE_LIST tbl;
- tbl.init_one_table(alter_ctx.new_db, strlen(alter_ctx.new_db),
- alter_ctx.tmp_name, strlen(alter_ctx.tmp_name),
- alter_ctx.tmp_name, TL_READ_NO_INSERT);
- /* Table is in thd->temporary_tables */
- if (open_temporary_table(thd, &tbl))
- goto err_new_table_cleanup;
- new_table= tbl.table;
- DBUG_ASSERT(new_table);
+ /* in case of alter temp table send the tracker in OK packet */
+ SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL);
}
else
{
- /* table is a normal table: Create temporary table in same directory */
- /* Open our intermediate table. */
- new_table= open_table_uncached(thd, new_db_type, &frm,
- alter_ctx.get_tmp_path(),
- alter_ctx.new_db, alter_ctx.tmp_name,
- true, true);
- if (!new_table)
- goto err_new_table_cleanup;
-
/*
Normally, an attempt to modify an FK parent table will cause
FK children to be prelocked, so the table-being-altered cannot
@@ -9173,6 +9565,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
}
}
+
/*
Note: In case of MERGE table, we do not attach children. We do not
copy data for MERGE tables. Only the children have data.
@@ -9237,10 +9630,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
new_table->s->table_creation_was_logged=
table->s->table_creation_was_logged;
/* Remove link to old table and rename the new one */
- close_temporary_table(thd, table, true, true);
+ thd->drop_temporary_table(table, NULL, true);
/* Should pass the 'new_name' as we store table name in the cache */
- if (rename_temporary_table(thd, new_table,
- alter_ctx.new_db, alter_ctx.new_name))
+ if (thd->rename_temporary_table(new_table, alter_ctx.new_db,
+ alter_ctx.new_name))
goto err_new_table_cleanup;
/* We don't replicate alter table statement on temporary tables */
if (!thd->is_current_stmt_binlog_format_row() &&
@@ -9252,10 +9645,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
/*
Close the intermediate table that will be the new table, but do
- not delete it! Even altough MERGE tables do not have their children
- attached here it is safe to call close_temporary_table().
+ not delete it! Even though MERGE tables do not have their children
+ attached here it is safe to call THD::drop_temporary_table().
*/
- close_temporary_table(thd, new_table, true, false);
+ thd->drop_temporary_table(new_table, NULL, false);
new_table= NULL;
DEBUG_SYNC(thd, "alter_table_before_rename_result_table");
@@ -9395,17 +9788,6 @@ end_temporary:
err_new_table_cleanup:
my_free(const_cast<uchar*>(frm.str));
- if (new_table)
- {
- /* close_temporary_table() frees the new_table pointer. */
- close_temporary_table(thd, new_table, true, true);
- }
- else
- (void) quick_rm_table(thd, new_db_type,
- alter_ctx.new_db, alter_ctx.tmp_name,
- (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0)),
- alter_ctx.get_tmp_path());
-
/*
No default value was provided for a DATE/DATETIME field, the
current sql_mode doesn't allow the '0000-00-00' value and
@@ -9437,14 +9819,29 @@ err_new_table_cleanup:
thd->abort_on_warning= true;
make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
f_val, strlength(f_val), t_type,
+ new_table->s,
alter_ctx.datetime_field->field_name);
thd->abort_on_warning= save_abort_on_warning;
}
+ if (new_table)
+ {
+ thd->drop_temporary_table(new_table, NULL, true);
+ }
+ else
+ (void) quick_rm_table(thd, new_db_type,
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0)),
+ alter_ctx.get_tmp_path());
+
+
DBUG_RETURN(true);
err_with_mdl_after_alter:
/* the table was altered. binlog the operation */
+ DBUG_ASSERT(!(mysql_bin_log.is_open() &&
+ thd->is_current_stmt_binlog_format_row() &&
+ (create_info->tmp_table())));
write_bin_log(thd, true, thd->query(), thd->query_length());
err_with_mdl:
@@ -9521,17 +9918,15 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
int error= 1;
Copy_field *copy= NULL, *copy_end;
ha_rows found_count= 0, delete_count= 0;
- uint length= 0;
- SORT_FIELD *sortorder;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
- ha_rows examined_rows;
- ha_rows found_rows;
bool auto_increment_field_copied= 0;
bool cleanup_done= 0;
- ulonglong save_sql_mode= thd->variables.sql_mode;
+ bool init_read_record_done= 0;
+ sql_mode_t save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
Field **dfield_ptr= to->default_field;
DBUG_ENTER("copy_data_between_tables");
@@ -9559,6 +9954,11 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
+ /* Set read map for all fields in from table */
+ from->default_column_bitmaps();
+ bitmap_set_all(from->read_set);
+ from->file->column_bitmaps_signal();
+
/* We can abort alter table for any table type */
thd->abort_on_warning= !ignore && thd->is_strict_mode();
@@ -9596,7 +9996,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
Old fields keep their current values, and therefore should not be
present in the set of autoupdate fields.
*/
- if ((*ptr)->has_insert_default_function())
+ if ((*ptr)->default_value)
{
*(dfield_ptr++)= *ptr;
++to->s->default_fields;
@@ -9612,17 +10012,17 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
to->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
+ bool save_abort_on_warning= thd->abort_on_warning;
+ thd->abort_on_warning= false;
my_snprintf(warn_buff, sizeof(warn_buff),
"ORDER BY ignored as there is a user-defined clustered index"
" in the table '%-.192s'", from->s->table_name.str);
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
warn_buff);
+ thd->abort_on_warning= save_abort_on_warning;
}
else
{
- from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
bzero((char *) &tables, sizeof(tables));
tables.table= from;
tables.alias= tables.table_name= from->s->table_name.str;
@@ -9630,16 +10030,14 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
THD_STAGE_INFO(thd, stage_sorting);
Filesort_tracker dummy_tracker(false);
+ Filesort fsort(order, HA_POS_ERROR, true, NULL);
+
if (thd->lex->select_lex.setup_ref_array(thd, order_num) ||
setup_order(thd, thd->lex->select_lex.ref_pointer_array,
- &tables, fields, all_fields, order) ||
- !(sortorder= make_unireg_sortorder(thd, NULL, 0, order, &length, NULL)) ||
- (from->sort.found_records= filesort(thd, from, sortorder, length,
- NULL, HA_POS_ERROR,
- true,
- &examined_rows, &found_rows,
- &dummy_tracker)) ==
- HA_POS_ERROR)
+ &tables, fields, all_fields, order))
+ goto err;
+
+ if (!(file_sort= filesort(thd, from, &fsort, &dummy_tracker)))
goto err;
}
thd_progress_next_stage(thd);
@@ -9648,19 +10046,24 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
THD_STAGE_INFO(thd, stage_copy_to_tmp_table);
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
- to->mark_virtual_columns_for_write(TRUE);
- if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE))
+ /* Add virtual columns to vcol_set to ensure they are updated */
+ if (to->vfield)
+ to->mark_virtual_columns_for_write(TRUE);
+ if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, file_sort, 1, 1,
+ FALSE))
goto err;
+ init_read_record_done= 1;
if (ignore && !alter_ctx->fk_error_if_delete_row)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->get_stmt_da()->reset_current_row_for_warning();
restore_record(to, s->default_values); // Create empty record
- if (to->default_field && to->update_default_fields())
- goto err;
+ to->reset_default_fields();
thd->progress.max_counter= from->file->records();
time_to_report_progress= MY_HOW_OFTEN_TO_WRITE/10;
+ if (!ignore) /* for now, InnoDB needs the undo log for ALTER IGNORE */
+ to->file->extra(HA_EXTRA_BEGIN_ALTER_COPY);
while (!(error=info.read_record(&info)))
{
@@ -9670,8 +10073,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
error= 1;
break;
}
- if (from->vfield)
- update_virtual_fields(thd, from);
if (++thd->progress.counter >= time_to_report_progress)
{
time_to_report_progress+= MY_HOW_OFTEN_TO_WRITE/10;
@@ -9685,26 +10086,32 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
error= 1;
break;
}
- if (to->next_number_field)
- {
- if (auto_increment_field_copied)
- to->auto_increment_field_not_null= TRUE;
- else
- to->next_number_field->reset();
- }
-
+
for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++)
{
copy_ptr->do_copy(copy_ptr);
}
prev_insert_id= to->file->next_insert_id;
+ if (to->default_field)
+ to->update_default_fields(0, ignore);
if (to->vfield)
- update_virtual_fields(thd, to, VCOL_UPDATE_FOR_WRITE);
+ to->update_virtual_fields(to->file, VCOL_UPDATE_FOR_WRITE);
+
+ /* This will set thd->is_error() if fatal failure */
+ if (to->verify_constraints(ignore) == VIEW_CHECK_SKIP)
+ continue;
if (thd->is_error())
{
error= 1;
break;
}
+ if (to->next_number_field)
+ {
+ if (auto_increment_field_copied)
+ to->auto_increment_field_not_null= TRUE;
+ else
+ to->next_number_field->reset();
+ }
error=to->file->ha_write_row(to->record[0]);
to->auto_increment_field_not_null= FALSE;
if (error)
@@ -9766,22 +10173,25 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
found_count++;
thd->get_stmt_da()->inc_current_row_for_warning();
}
- end_read_record(&info);
- free_io_cache(from);
THD_STAGE_INFO(thd, stage_enabling_keys);
thd_progress_next_stage(thd);
- if (error > 0)
+ if (error > 0 && !from->s->tmp_table)
{
/* We are going to drop the temporary table */
to->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
}
if (to->file->ha_end_bulk_insert() && error <= 0)
{
- to->file->print_error(my_errno,MYF(0));
+ /* Give error, if not already given */
+ if (!thd->is_error())
+ to->file->print_error(my_errno,MYF(0));
error= 1;
}
+ if (!ignore)
+ to->file->extra(HA_EXTRA_END_ALTER_COPY);
+
cleanup_done= 1;
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
@@ -9789,12 +10199,17 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
error= 1;
err:
+ /* Free resources */
+ if (init_read_record_done)
+ end_read_record(&info);
+ delete [] copy;
+ delete file_sort;
+
thd->variables.sql_mode= save_sql_mode;
thd->abort_on_warning= 0;
*copied= found_count;
*deleted=delete_count;
to->file->ha_release_auto_increment();
- delete [] copy;
if (!cleanup_done)
{
@@ -9806,7 +10221,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (to->file->ha_external_lock(thd,F_UNLCK))
error=1;
- if (error < 0 && to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME))
+ if (error < 0 && !from->s->tmp_table &&
+ to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME))
error= 1;
thd_progress_end(thd);
DBUG_RETURN(error > 0 ? -1 : 0);
@@ -9859,6 +10275,18 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy)
}
+static void flush_checksum(ha_checksum *row_crc, uchar **checksum_start,
+ size_t *checksum_length)
+{
+ if (*checksum_start)
+ {
+ *row_crc= my_checksum(*row_crc, *checksum_start, *checksum_length);
+ *checksum_start= NULL;
+ *checksum_length= 0;
+ }
+}
+
+
bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
HA_CHECK_OPT *check_opt)
{
@@ -9911,7 +10339,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
/* Allow to open real tables only. */
table->required_type= FRMTYPE_TABLE;
- if (open_temporary_tables(thd, table) ||
+ if (thd->open_temporary_tables(table) ||
open_and_lock_tables(thd, table, FALSE, 0))
{
t= NULL;
@@ -9935,13 +10363,13 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
if (!(check_opt->flags & T_EXTEND) &&
(((t->file->ha_table_flags() & HA_HAS_OLD_CHECKSUM) && thd->variables.old_mode) ||
((t->file->ha_table_flags() & HA_HAS_NEW_CHECKSUM) && !thd->variables.old_mode)))
- protocol->store((ulonglong)t->file->checksum());
+ protocol->store((ulonglong)t->file->checksum());
else if (check_opt->flags & T_QUICK)
- protocol->store_null();
+ protocol->store_null();
else
{
- /* calculating table's checksum */
- ha_checksum crc= 0;
+ /* calculating table's checksum */
+ ha_checksum crc= 0;
DBUG_ASSERT(t->s->last_null_bit_pos < 8);
uchar null_mask= (t->s->last_null_bit_pos ?
(256 - (1 << t->s->last_null_bit_pos)):
@@ -9949,12 +10377,12 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
t->use_all_columns();
- if (t->file->ha_rnd_init(1))
- protocol->store_null();
- else
- {
- for (;;)
- {
+ if (t->file->ha_rnd_init(1))
+ protocol->store_null();
+ else
+ {
+ for (;;)
+ {
if (thd->killed)
{
/*
@@ -9965,7 +10393,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
thd->protocol->remove_last_row();
goto err;
}
- ha_checksum row_crc= 0;
+ ha_checksum row_crc= 0;
int error= t->file->ha_rnd_next(t->record[0]);
if (unlikely(error))
{
@@ -9973,22 +10401,27 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
continue;
break;
}
- if (t->s->null_bytes)
+ if (t->s->null_bytes)
{
/* fix undefined null bits */
t->record[0][t->s->null_bytes-1] |= null_mask;
if (!(t->s->db_create_options & HA_OPTION_PACK_RECORD))
t->record[0][0] |= 1;
- row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes);
+ row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes);
}
- for (uint i= 0; i < t->s->fields; i++ )
- {
- Field *f= t->field[i];
+ uchar *checksum_start= NULL;
+ size_t checksum_length= 0;
+ for (uint i= 0; i < t->s->fields; i++ )
+ {
+ Field *f= t->field[i];
if (! thd->variables.old_mode && f->is_real_null(0))
+ {
+ flush_checksum(&row_crc, &checksum_start, &checksum_length);
continue;
+ }
/*
BLOB and VARCHAR have pointers in their field, we must convert
to string; GEOMETRY is implemented on top of BLOB.
@@ -10000,6 +10433,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
case MYSQL_TYPE_GEOMETRY:
case MYSQL_TYPE_BIT:
{
+ flush_checksum(&row_crc, &checksum_start, &checksum_length);
String tmp;
f->val_str(&tmp);
row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(),
@@ -10007,16 +10441,20 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
break;
}
default:
- row_crc= my_checksum(row_crc, f->ptr, f->pack_length());
+ if (!checksum_start)
+ checksum_start= f->ptr;
+ DBUG_ASSERT(checksum_start + checksum_length == f->ptr);
+ checksum_length+= f->pack_length();
break;
- }
- }
+ }
+ }
+ flush_checksum(&row_crc, &checksum_start, &checksum_length);
- crc+= row_crc;
- }
- protocol->store((ulonglong)crc);
+ crc+= row_crc;
+ }
+ protocol->store((ulonglong)crc);
t->file->ha_rnd_end();
- }
+ }
}
trans_rollback_stmt(thd);
close_thread_tables(thd);
@@ -10088,7 +10526,7 @@ bool check_engine(THD *thd, const char *db_name,
if (no_substitution)
{
const char *engine_name= ha_resolve_storage_engine_name(req_engine);
- my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), engine_name, engine_name);
+ my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), engine_name);
DBUG_RETURN(TRUE);
}
*new_engine= enf_engine;
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 474fe9cd90b..5abb25d1ea5 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2006, 2014, Oracle and/or its affiliates.
- Copyright (c) 2011, 2014, Monty Program Ab.
+ Copyright (c) 2011, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,6 +23,7 @@
class Alter_info;
class Alter_table_ctx;
+class Column_definition;
class Create_field;
struct TABLE_LIST;
class THD;
@@ -250,10 +251,10 @@ bool quick_rm_table(THD *thd, handlerton *base, const char *db,
const char *table_name, uint flags,
const char *table_path=0);
void close_cached_table(THD *thd, TABLE *table);
-void sp_prepare_create_field(THD *thd, Create_field *sql_field);
-int prepare_create_field(Create_field *sql_field,
+void sp_prepare_create_field(THD *thd, Column_definition *sql_field);
+int prepare_create_field(Column_definition *sql_field,
uint *blob_columns,
- longlong table_flags);
+ ulonglong table_flags);
CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
HA_CREATE_INFO *create_info);
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 8e7525893eb..b7b70f0f55c 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -73,22 +73,22 @@ print_where(COND *cond,const char *info, enum_query_type query_type)
DBUG_UNLOCK_FILE;
}
+#ifdef EXTRA_DEBUG
/* This is for debugging purposes */
-
-
static my_bool print_cached_tables_callback(TDC_element *element,
void *arg __attribute__((unused)))
{
TABLE *entry;
mysql_mutex_lock(&element->LOCK_table_share);
- TDC_element::All_share_tables_list::Iterator it(element->all_tables);
+ All_share_tables_list::Iterator it(element->all_tables);
while ((entry= it++))
{
THD *in_use= entry->in_use;
- printf("%-14.14s %-32s%6ld%8ld%6d %s\n",
- entry->s->db.str, entry->s->table_name.str, element->version,
- in_use ? in_use->thread_id : 0,
+ printf("%-14.14s %-32s%6lu%8ld%6d %s\n",
+ entry->s->db.str, entry->s->table_name.str,
+ (ulong) element->version,
+ in_use ? (long) in_use->thread_id : (long) 0,
entry->db_stat ? 1 : 0,
in_use ? lock_descriptions[(int)entry->reginfo.lock_type] :
"Not in use");
@@ -107,11 +107,13 @@ static void print_cached_tables(void)
tdc_iterate(0, (my_hash_walk_action) print_cached_tables_callback, NULL, true);
- printf("\nCurrent refresh version: %ld\n", tdc_refresh_version());
+ printf("\nCurrent refresh version: %ld\n",
+ (long) tdc_refresh_version());
fflush(stdout);
/* purecov: end */
return;
}
+#endif
void TEST_filesort(SORT_FIELD *sortorder,uint s_length)
@@ -171,7 +173,7 @@ TEST_join(JOIN *join)
in order not to garble the tabular output below.
*/
String ref_key_parts[MAX_TABLES];
- int tables_in_range= jt_range->end - jt_range->start;
+ int tables_in_range= (int)(jt_range->end - jt_range->start);
for (i= 0; i < tables_in_range; i++)
{
JOIN_TAB *tab= jt_range->start + i;
@@ -432,7 +434,7 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data,
if (table && table->s->tmp_table == NO_TMP_TABLE)
{
TABLE_LOCK_INFO table_lock_info;
- table_lock_info.thread_id= table->in_use->thread_id;
+ table_lock_info.thread_id= (ulong)table->in_use->thread_id;
memcpy(table_lock_info.table_name, table->s->table_cache_key.str,
table->s->table_cache_key.length);
table_lock_info.table_name[strlen(table_lock_info.table_name)]='.';
@@ -560,15 +562,17 @@ void mysql_print_status()
{
char current_dir[FN_REFLEN];
STATUS_VAR tmp;
+ uint count;
- calc_sum_of_all_status(&tmp);
+ count= calc_sum_of_all_status(&tmp);
printf("\nStatus information:\n\n");
(void) my_getwd(current_dir, sizeof(current_dir),MYF(0));
printf("Current dir: %s\n", current_dir);
- printf("Running threads: %d Stack size: %ld\n", thread_count,
+ printf("Running threads: %d Cached threads: %lu Stack size: %ld\n",
+ count, cached_thread_count,
(long) my_thread_stack_size);
+#ifdef EXTRA_DEBUG
thr_print_locks(); // Write some debug info
-#ifndef DBUG_OFF
print_cached_tables();
#endif
/* Print key cache status */
@@ -613,29 +617,35 @@ Next alarm time: %lu\n",
display_table_locks();
#ifdef HAVE_MALLINFO
struct mallinfo info= mallinfo();
+ char llbuff[10][22];
printf("\nMemory status:\n\
-Non-mmapped space allocated from system: %d\n\
-Number of free chunks: %d\n\
-Number of fastbin blocks: %d\n\
-Number of mmapped regions: %d\n\
-Space in mmapped regions: %d\n\
-Maximum total allocated space: %d\n\
-Space available in freed fastbin blocks: %d\n\
-Total allocated space: %d\n\
-Total free space: %d\n\
-Top-most, releasable space: %d\n\
-Estimated memory (with thread stack): %ld\n",
- (int) info.arena ,
- (int) info.ordblks,
- (int) info.smblks,
- (int) info.hblks,
- (int) info.hblkhd,
- (int) info.usmblks,
- (int) info.fsmblks,
- (int) info.uordblks,
- (int) info.fordblks,
- (int) info.keepcost,
- (long) (thread_count * my_thread_stack_size + info.hblkhd + info.arena));
+Non-mmapped space allocated from system: %s\n\
+Number of free chunks: %lu\n\
+Number of fastbin blocks: %lu\n\
+Number of mmapped regions: %lu\n\
+Space in mmapped regions: %s\n\
+Maximum total allocated space: %s\n\
+Space available in freed fastbin blocks: %s\n\
+Total allocated space: %s\n\
+Total free space: %s\n\
+Top-most, releasable space: %s\n\
+Estimated memory (with thread stack): %s\n\
+Global memory allocated by server: %s\n\
+Memory allocated by threads: %s\n",
+ llstr(info.arena, llbuff[0]),
+ (ulong) info.ordblks,
+ (ulong) info.smblks,
+ (ulong) info.hblks,
+ llstr(info.hblkhd, llbuff[1]),
+ llstr(info.usmblks, llbuff[2]),
+ llstr(info.fsmblks, llbuff[3]),
+ llstr(info.uordblks, llbuff[4]),
+ llstr(info.fordblks, llbuff[5]),
+ llstr(info.keepcost, llbuff[6]),
+ llstr((count + cached_thread_count)* my_thread_stack_size + info.hblkhd + info.arena, llbuff[7]),
+ llstr(tmp.global_memory_used, llbuff[8]),
+ llstr(tmp.local_memory_used, llbuff[9]));
+
#endif
#ifdef HAVE_EVENT_SCHEDULER
diff --git a/sql/sql_test.h b/sql/sql_test.h
index 3c1ee188eeb..867582a9569 100644
--- a/sql/sql_test.h
+++ b/sql/sql_test.h
@@ -22,7 +22,7 @@ class JOIN;
struct TABLE_LIST;
typedef class Item COND;
typedef class st_select_lex SELECT_LEX;
-typedef struct st_sort_field SORT_FIELD;
+struct SORT_FIELD;
#ifndef DBUG_OFF
void print_where(COND *cond,const char *info, enum_query_type query_type);
diff --git a/sql/sql_time.cc b/sql/sql_time.cc
index c4d875e4178..bba8c974ccb 100644
--- a/sql/sql_time.cc
+++ b/sql/sql_time.cc
@@ -223,7 +223,7 @@ check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
{
ErrConvTime str(ltime);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, ts_type, 0);
+ &str, ts_type, 0, 0);
return true;
}
return false;
@@ -240,7 +240,7 @@ adjust_time_range_with_warn(MYSQL_TIME *ltime, uint dec)
return true;
if (warnings)
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, MYSQL_TIMESTAMP_TIME, NullS);
+ &str, MYSQL_TIMESTAMP_TIME, 0, NullS);
return false;
}
@@ -274,7 +274,7 @@ to_ascii(CHARSET_INFO *cs,
*dst++= static_cast<char>(wc);
}
*dst= '\0';
- return dst - dst0;
+ return (uint)(dst - dst0);
}
@@ -329,7 +329,7 @@ str_to_datetime_with_warn(CHARSET_INFO *cs,
ret_val ? Sql_condition::WARN_LEVEL_WARN :
Sql_condition::time_warn_level(status.warnings),
str, length, flags & TIME_TIME_ONLY ?
- MYSQL_TIMESTAMP_TIME : l_time->time_type, NullS);
+ MYSQL_TIMESTAMP_TIME : l_time->time_type, 0, NullS);
DBUG_EXECUTE_IF("str_to_datetime_warn",
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_YES, str););
@@ -353,7 +353,7 @@ str_to_datetime_with_warn(CHARSET_INFO *cs,
static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
MYSQL_TIME *ltime, ulonglong fuzzydate,
const ErrConv *str,
- const char *field_name)
+ const TABLE_SHARE *s, const char *field_name)
{
int was_cut;
longlong res;
@@ -387,14 +387,15 @@ static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
Sql_condition::WARN_LEVEL_WARN, str,
res < 0 ? MYSQL_TIMESTAMP_ERROR
: mysql_type_to_time_type(f_type),
- field_name);
+ s, field_name);
}
return res < 0;
}
bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
- ulonglong fuzzydate, const char *field_name)
+ ulonglong fuzzydate,
+ const TABLE_SHARE *s, const char *field_name)
{
const ErrConvDouble str(value);
bool neg= value < 0;
@@ -408,28 +409,30 @@ bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
longlong nr= static_cast<ulonglong>(floor(value));
uint sec_part= static_cast<ulong>((value - floor(value))*TIME_SECOND_PART_FACTOR);
return number_to_time_with_warn(neg, nr, sec_part, ltime, fuzzydate, &str,
- field_name);
+ s, field_name);
}
bool decimal_to_datetime_with_warn(const my_decimal *value, MYSQL_TIME *ltime,
- ulonglong fuzzydate, const char *field_name)
+ ulonglong fuzzydate,
+ const TABLE_SHARE *s, const char *field_name)
{
const ErrConvDecimal str(value);
ulonglong nr;
ulong sec_part;
bool neg= my_decimal2seconds(value, &nr, &sec_part);
return number_to_time_with_warn(neg, nr, sec_part, ltime, fuzzydate, &str,
- field_name);
+ s, field_name);
}
bool int_to_datetime_with_warn(bool neg, ulonglong value, MYSQL_TIME *ltime,
- ulonglong fuzzydate, const char *field_name)
+ ulonglong fuzzydate,
+ const TABLE_SHARE *s, const char *field_name)
{
- const ErrConvInteger str(neg ? -value : value, !neg);
+ const ErrConvInteger str(neg ? - (longlong) value : (longlong) value, !neg);
return number_to_time_with_warn(neg, value, 0, ltime,
- fuzzydate, &str, field_name);
+ fuzzydate, &str, s, field_name);
}
@@ -856,7 +859,7 @@ void make_truncated_value_warning(THD *thd,
Sql_condition::enum_warning_level level,
const ErrConv *sval,
timestamp_type time_type,
- const char *field_name)
+ const TABLE_SHARE *s, const char *field_name)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
const char *type_str;
@@ -875,10 +878,21 @@ void make_truncated_value_warning(THD *thd,
break;
}
if (field_name)
+ {
+ const char *db_name= s->db.str;
+ const char *table_name= s->table_name.str;
+
+ if (!db_name)
+ db_name= "";
+ if (!table_name)
+ table_name= "";
+
cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
ER_THD(thd, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- type_str, sval->ptr(), field_name,
+ type_str, sval->ptr(),
+ db_name, table_name, field_name,
(ulong) thd->get_stmt_da()->current_row_for_warning());
+ }
else
{
if (time_type > MYSQL_TIMESTAMP_ERROR)
@@ -1205,7 +1219,7 @@ make_date_with_warn(MYSQL_TIME *ltime, ulonglong fuzzy_date,
/* e.g. negative time */
ErrConvTime str(ltime);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, ts_type, 0);
+ &str, ts_type, 0, 0);
return true;
}
if ((ltime->time_type= ts_type) == MYSQL_TIMESTAMP_DATE)
@@ -1369,7 +1383,7 @@ time_to_datetime_with_warn(THD *thd,
{
ErrConvTime str(from);
make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- &str, MYSQL_TIMESTAMP_DATETIME, 0);
+ &str, MYSQL_TIMESTAMP_DATETIME, 0, 0);
return true;
}
return false;
diff --git a/sql/sql_time.h b/sql/sql_time.h
index e0cab5cfa66..260e6e36268 100644
--- a/sql/sql_time.h
+++ b/sql/sql_time.h
@@ -44,13 +44,13 @@ bool str_to_datetime_with_warn(CHARSET_INFO *cs, const char *str,
ulonglong flags);
bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
ulonglong fuzzydate,
- const char *name);
+ const TABLE_SHARE *s, const char *name);
bool decimal_to_datetime_with_warn(const my_decimal *value, MYSQL_TIME *ltime,
ulonglong fuzzydate,
- const char *name);
+ const TABLE_SHARE *s, const char *name);
bool int_to_datetime_with_warn(bool neg, ulonglong value, MYSQL_TIME *ltime,
ulonglong fuzzydate,
- const char *name);
+ const TABLE_SHARE *s, const char *name);
bool time_to_datetime(THD *thd, const MYSQL_TIME *tm, MYSQL_TIME *dt);
bool time_to_datetime_with_warn(THD *thd,
@@ -120,15 +120,15 @@ void make_truncated_value_warning(THD *thd,
Sql_condition::enum_warning_level level,
const ErrConv *str_val,
timestamp_type time_type,
- const char *field_name);
+ const TABLE_SHARE *s, const char *field_name);
static inline void make_truncated_value_warning(THD *thd,
Sql_condition::enum_warning_level level, const char *str_val,
uint str_length, timestamp_type time_type,
- const char *field_name)
+ const TABLE_SHARE *s, const char *field_name)
{
const ErrConvString str(str_val, str_length, &my_charset_bin);
- make_truncated_value_warning(thd, level, &str, time_type, field_name);
+ make_truncated_value_warning(thd, level, &str, time_type, s, field_name);
}
extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type,
@@ -194,7 +194,7 @@ inline bool parse_date_time_format(timestamp_type format_type,
{
return parse_date_time_format(format_type,
date_time_format->format.str,
- date_time_format->format.length,
+ (uint) date_time_format->format.length,
date_time_format);
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index f6dd48131bf..a39e0845a19 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -25,7 +25,7 @@
#include "sql_parse.h" // parse_sql
#include "parse_file.h"
#include "sp.h"
-#include "sql_base.h" // find_temporary_table
+#include "sql_base.h"
#include "sql_show.h" // append_definer, append_identifier
#include "sql_table.h" // build_table_filename,
// check_n_cut_mysql50_prefix
@@ -35,30 +35,17 @@
#include "sp_cache.h" // sp_invalidate_cache
#include <mysys_err.h>
-/*************************************************************************/
-
-template <class T>
-inline T *alloc_type(MEM_ROOT *m)
+LEX_STRING *make_lex_string(LEX_STRING *lex_str, const char* str, uint length,
+ MEM_ROOT *mem_root)
{
- return (T *) alloc_root(m, sizeof (T));
-}
-
-/*
- NOTE: Since alloc_type() is declared as inline, alloc_root() calls should
- be inlined by the compiler. So, implementation of alloc_root() is not
- needed. However, let's put the implementation in object file just in case
- of stupid MS or other old compilers.
-*/
-
-template LEX_STRING *alloc_type<LEX_STRING>(MEM_ROOT *m);
-template ulonglong *alloc_type<ulonglong>(MEM_ROOT *m);
-
-inline LEX_STRING *alloc_lex_string(MEM_ROOT *m)
-{
- return alloc_type<LEX_STRING>(m);
+ if (!(lex_str->str= strmake_root(mem_root, str, length)))
+ return 0;
+ lex_str->length= length;
+ return lex_str;
}
/*************************************************************************/
+
/**
Trigger_creation_ctx -- creation context of triggers.
*/
@@ -74,7 +61,12 @@ public:
const LEX_STRING *connection_cl_name,
const LEX_STRING *db_cl_name);
-public:
+ Trigger_creation_ctx(CHARSET_INFO *client_cs,
+ CHARSET_INFO *connection_cl,
+ CHARSET_INFO *db_cl)
+ :Stored_program_creation_ctx(client_cs, connection_cl, db_cl)
+ { }
+
virtual Stored_program_creation_ctx *clone(MEM_ROOT *mem_root)
{
return new (mem_root) Trigger_creation_ctx(m_client_cs,
@@ -88,16 +80,9 @@ protected:
return new Trigger_creation_ctx(thd);
}
-private:
Trigger_creation_ctx(THD *thd)
:Stored_program_creation_ctx(thd)
{ }
-
- Trigger_creation_ctx(CHARSET_INFO *client_cs,
- CHARSET_INFO *connection_cl,
- CHARSET_INFO *db_cl)
- :Stored_program_creation_ctx(client_cs, connection_cl, db_cl)
- { }
};
/**************************************************************************
@@ -221,6 +206,11 @@ static File_option triggers_file_parameters[]=
my_offsetof(class Table_triggers_list, db_cl_names),
FILE_OPTIONS_STRLIST
},
+ {
+ { C_STRING_WITH_LEN("created") },
+ my_offsetof(class Table_triggers_list, create_times),
+ FILE_OPTIONS_ULLLIST
+ },
{ { 0, 0 }, 0, FILE_OPTIONS_STRING }
};
@@ -235,9 +225,12 @@ File_option sql_modes_parameters=
This must be kept up to date whenever a new option is added to the list
above, as it specifies the number of required parameters of the trigger in
.trg file.
+ This defines the maximum number of parameters that is read. If there are
+ more paramaters in the file they are ignored. Less number of parameters
+ is regarded as ok.
*/
-static const int TRG_NUM_REQUIRED_PARAMETERS= 6;
+static const int TRG_NUM_REQUIRED_PARAMETERS= 7;
/*
Structure representing contents of .TRN file which are used to support
@@ -265,20 +258,6 @@ static File_option trigname_file_parameters[]=
};
-const LEX_STRING trg_action_time_type_names[]=
-{
- { C_STRING_WITH_LEN("BEFORE") },
- { C_STRING_WITH_LEN("AFTER") }
-};
-
-const LEX_STRING trg_event_type_names[]=
-{
- { C_STRING_WITH_LEN("INSERT") },
- { C_STRING_WITH_LEN("UPDATE") },
- { C_STRING_WITH_LEN("DELETE") }
-};
-
-
class Handle_old_incorrect_sql_modes_hook: public Unknown_key_hook
{
private:
@@ -316,7 +295,7 @@ private:
Also, if possible, grabs name of the trigger being parsed so it can be
used to correctly drop problematic trigger.
*/
-class Deprecated_trigger_syntax_handler : public Internal_error_handler
+class Deprecated_trigger_syntax_handler : public Internal_error_handler
{
private:
@@ -330,7 +309,7 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- Sql_condition::enum_warning_level level,
+ Sql_condition::enum_warning_level *level,
const char* message,
Sql_condition ** cond_hdl)
{
@@ -356,6 +335,38 @@ public:
};
+Trigger::~Trigger()
+{
+ delete body;
+}
+
+
+/**
+ Call a Table_triggers_list function for all triggers
+
+ @return 0 ok
+ @return # Something went wrong. Pointer to the trigger that mailfuncted
+ returned
+*/
+
+Trigger* Table_triggers_list::for_all_triggers(Triggers_processor func,
+ void *arg)
+{
+ for (uint i= 0; i < (uint)TRG_EVENT_MAX; i++)
+ {
+ for (uint j= 0; j < (uint)TRG_ACTION_MAX; j++)
+ {
+ for (Trigger *trigger= get_trigger(i,j) ;
+ trigger ;
+ trigger= trigger->next)
+ if ((trigger->*func)(arg))
+ return trigger;
+ }
+ }
+ return 0;
+}
+
+
/**
Create or drop trigger for table.
@@ -379,6 +390,7 @@ public:
@retval
TRUE error
*/
+
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
{
/*
@@ -394,7 +406,6 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
bool lock_upgrade_done= FALSE;
MDL_ticket *mdl_ticket= NULL;
Query_tables_list backup;
-
DBUG_ENTER("mysql_create_or_drop_trigger");
/* Charset of the buffer for statement must be system one. */
@@ -513,7 +524,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DBUG_ASSERT(tables->next_global == 0);
/* We do not allow creation of triggers on temporary tables. */
- if (create && find_temporary_table(thd, tables))
+ if (create && thd->find_tmp_table_share(tables))
{
my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
goto end;
@@ -523,7 +534,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
tables->required_type= FRMTYPE_TABLE;
/*
Also prevent DROP TRIGGER from opening temporary table which might
- shadow base table on which trigger to be dropped is defined.
+ shadow the subject table on which trigger to be dropped is defined.
*/
tables->open_type= OT_BASE_ONLY;
@@ -592,9 +603,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
end:
if (!result)
- {
result= write_bin_log(thd, TRUE, stmt_query.ptr(), stmt_query.length());
- }
/*
If we are under LOCK TABLES we should restore original state of
@@ -619,8 +628,8 @@ WSREP_ERROR_LABEL:
/**
- Build stmt_query to write it in the bin-log
- and get the trigger definer.
+ Build stmt_query to write it in the bin-log, the statement to write in
+ the trigger file and the trigger definer.
@param thd current thread context (including trigger definition in
LEX)
@@ -628,7 +637,8 @@ WSREP_ERROR_LABEL:
trigger is created.
@param[out] stmt_query after successful return, this string contains
well-formed statement for creation this trigger.
-
+ @param[out] trigger_def query to be stored in trigger file. As stmt_query,
+ but without "OR REPLACE" and no FOLLOWS/PRECEDES.
@param[out] trg_definer The triggger definer.
@param[out] trg_definer_holder Used as a buffer for definer.
@@ -640,12 +650,16 @@ WSREP_ERROR_LABEL:
simultaneously NULL-strings (non-SUID/old trigger) or valid strings
(SUID/new trigger).
*/
+
static void build_trig_stmt_query(THD *thd, TABLE_LIST *tables,
- String *stmt_query,
+ String *stmt_query, String *trigger_def,
LEX_STRING *trg_definer,
char trg_definer_holder[])
{
+ LEX_STRING stmt_definition;
LEX *lex= thd->lex;
+ uint prefix_trimmed, suffix_trimmed;
+ size_t original_length;
/*
Create a query with the full trigger definition.
@@ -653,6 +667,8 @@ static void build_trig_stmt_query(THD *thd, TABLE_LIST *tables,
*/
stmt_query->append(STRING_WITH_LEN("CREATE "));
+ trigger_def->copy(*stmt_query);
+
if (lex->create_info.or_replace())
stmt_query->append(STRING_WITH_LEN("OR REPLACE "));
@@ -661,18 +677,42 @@ static void build_trig_stmt_query(THD *thd, TABLE_LIST *tables,
/* SUID trigger */
lex->definer->set_lex_string(trg_definer, trg_definer_holder);
append_definer(thd, stmt_query, &lex->definer->user, &lex->definer->host);
+ append_definer(thd, trigger_def, &lex->definer->user, &lex->definer->host);
}
else
{
*trg_definer= empty_lex_str;
}
- LEX_STRING stmt_definition;
- stmt_definition.str= (char*) thd->lex->stmt_definition_begin;
- stmt_definition.length= thd->lex->stmt_definition_end -
- thd->lex->stmt_definition_begin;
- trim_whitespace(thd->charset(), &stmt_definition);
+
+ /* Create statement for binary logging */
+ stmt_definition.str= (char*) lex->stmt_definition_begin;
+ stmt_definition.length= (lex->stmt_definition_end -
+ lex->stmt_definition_begin);
+ original_length= stmt_definition.length;
+ trim_whitespace(thd->charset(), &stmt_definition, &prefix_trimmed);
+ suffix_trimmed= original_length - stmt_definition.length - prefix_trimmed;
+
stmt_query->append(stmt_definition.str, stmt_definition.length);
+
+ /* Create statement for storing trigger (without trigger order) */
+ if (lex->trg_chistics.ordering_clause == TRG_ORDER_NONE)
+ trigger_def->append(stmt_definition.str, stmt_definition.length);
+ else
+ {
+ /* Copy data before FOLLOWS/PRECEDES trigger_name */
+ trigger_def->append(stmt_definition.str,
+ (lex->trg_chistics.ordering_clause_begin -
+ lex->stmt_definition_begin) - prefix_trimmed);
+ /* Copy data after FOLLOWS/PRECEDES trigger_name */
+ trigger_def->append(stmt_definition.str +
+ (lex->trg_chistics.ordering_clause_end -
+ lex->stmt_definition_begin)
+ - prefix_trimmed,
+ (lex->stmt_definition_end -
+ lex->trg_chistics.ordering_clause_end) -
+ suffix_trimmed);
+ }
}
@@ -699,6 +739,7 @@ static void build_trig_stmt_query(THD *thd, TABLE_LIST *tables,
@retval
True error
*/
+
bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
String *stmt_query)
{
@@ -706,51 +747,27 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
TABLE *table= tables->table;
char file_buff[FN_REFLEN], trigname_buff[FN_REFLEN];
LEX_STRING file, trigname_file;
- LEX_STRING *trg_def;
- ulonglong *trg_sql_mode;
char trg_definer_holder[USER_HOST_BUFF_SIZE];
- LEX_STRING *trg_definer;
Item_trigger_field *trg_field;
struct st_trigname trigname;
- LEX_STRING *trg_client_cs_name;
- LEX_STRING *trg_connection_cl_name;
- LEX_STRING *trg_db_cl_name;
- sp_head *trg_body= bodies[lex->trg_chistics.event]
- [lex->trg_chistics.action_time];
+ String trigger_definition;
+ Trigger *trigger= 0;
+ bool trigger_dropped= 0;
+ DBUG_ENTER("create_trigger");
if (check_for_broken_triggers())
- return true;
+ DBUG_RETURN(true);
/* Trigger must be in the same schema as target table. */
if (my_strcasecmp(table_alias_charset, table->s->db.str,
lex->spname->m_db.str))
{
my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0));
- return true;
- }
-
- /*
- We don't allow creation of several triggers of the same type yet.
- If a trigger with the same type already exists:
- a. Throw a ER_NOT_SUPPORTED_YET error,
- if the old and the new trigger names are different;
- b. Or continue, if the old and the new trigger names are the same:
- - either to recreate the trigger on "CREATE OR REPLACE"
- - or send a "already exists" warning on "CREATE IF NOT EXISTS"
- - or send an "alredy exists" error on normal CREATE.
- */
- if (trg_body != 0 &&
- my_strcasecmp(table_alias_charset,
- trg_body->m_name.str, lex->spname->m_name.str))
- {
- my_error(ER_NOT_SUPPORTED_YET, MYF(0),
- "multiple triggers with the same action time"
- " and event for one table");
- return true;
+ DBUG_RETURN(true);
}
if (sp_process_definer(thd))
- return true;
+ DBUG_RETURN(true);
/*
Let us check if all references to fields in old/new versions of row in
@@ -779,7 +796,20 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
if (!trg_field->fixed &&
trg_field->fix_fields(thd, (Item **)0))
- return true;
+ DBUG_RETURN(true);
+ }
+
+ /* Ensure anchor trigger exists */
+ if (lex->trg_chistics.ordering_clause != TRG_ORDER_NONE)
+ {
+ if (!(trigger= find_trigger(&lex->trg_chistics.anchor_trigger_name, 0)) ||
+ trigger->event != lex->trg_chistics.event ||
+ trigger->action_time != lex->trg_chistics.action_time)
+ {
+ my_error(ER_REFERENCED_TRG_DOES_NOT_EXIST, MYF(0),
+ lex->trg_chistics.anchor_trigger_name.str);
+ DBUG_RETURN(true);
+ }
}
/*
@@ -803,10 +833,13 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
if (lex->create_info.or_replace())
{
String drop_trg_query;
- drop_trg_query.append("DROP TRIGGER ");
- drop_trg_query.append(lex->spname->m_name.str);
- if (drop_trigger(thd, tables, &drop_trg_query))
- return 1;
+ /*
+ The following can fail if the trigger is for another table or
+ there exists a .TRN file but there was no trigger for it in
+ the .TRG file
+ */
+ if (unlikely(drop_trigger(thd, tables, &drop_trg_query)))
+ DBUG_RETURN(true);
}
else if (lex->create_info.if_not_exists())
{
@@ -815,54 +848,48 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
ER_THD(thd, ER_TRG_ALREADY_EXISTS),
trigname_buff);
LEX_STRING trg_definer_tmp;
- build_trig_stmt_query(thd, tables, stmt_query,
+ String trigger_def;
+
+ /*
+ Log query with IF NOT EXISTS to binary log. This is in line with
+ CREATE TABLE IF NOT EXISTS.
+ */
+ build_trig_stmt_query(thd, tables, stmt_query, &trigger_def,
&trg_definer_tmp, trg_definer_holder);
- return false;
+ DBUG_RETURN(false);
}
else
{
my_error(ER_TRG_ALREADY_EXISTS, MYF(0));
- return true;
+ DBUG_RETURN(true);
}
}
trigname.trigger_table.str= tables->table_name;
trigname.trigger_table.length= tables->table_name_length;
- if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
- (uchar*)&trigname, trigname_file_parameters))
- return true;
-
/*
- Soon we will invalidate table object and thus Table_triggers_list object
- so don't care about place to which trg_def->ptr points and other
- invariants (e.g. we don't bother to update names_list)
-
- QQ: Hmm... probably we should not care about setting up active thread
- mem_root too.
+ We are not using lex->sphead here as an argument to Trigger() as we are
+ going to access lex->sphead later in build_trig_stmt_query()
*/
- if (!(trg_def= alloc_lex_string(&table->mem_root)) ||
- definitions_list.push_back(trg_def, &table->mem_root) ||
-
- !(trg_sql_mode= alloc_type<ulonglong>(&table->mem_root)) ||
- definition_modes_list.push_back(trg_sql_mode, &table->mem_root) ||
+ if (!(trigger= new (&table->mem_root) Trigger(this, 0)))
+ goto err_without_cleanup;
- !(trg_definer= alloc_lex_string(&table->mem_root)) ||
- definers_list.push_back(trg_definer, &table->mem_root) ||
-
- !(trg_client_cs_name= alloc_lex_string(&table->mem_root)) ||
- client_cs_names.push_back(trg_client_cs_name, &table->mem_root) ||
+ /* Create trigger_name.TRN file to ensure trigger name is unique */
+ if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
+ (uchar*)&trigname, trigname_file_parameters))
+ goto err_without_cleanup;
- !(trg_connection_cl_name= alloc_lex_string(&table->mem_root)) ||
- connection_cl_names.push_back(trg_connection_cl_name, &table->mem_root) ||
+ /* Populate the trigger object */
- !(trg_db_cl_name= alloc_lex_string(&table->mem_root)) ||
- db_cl_names.push_back(trg_db_cl_name, &table->mem_root))
- {
- goto err_with_cleanup;
- }
+ trigger->sql_mode= thd->variables.sql_mode;
+ /* Time with 2 decimals, like in MySQL 5.7 */
+ trigger->create_time= ((ulonglong) thd->query_start())*100 + thd->query_start_sec_part()/10000;
+ build_trig_stmt_query(thd, tables, stmt_query, &trigger_definition,
+ &trigger->definer, trg_definer_holder);
- *trg_sql_mode= thd->variables.sql_mode;
+ trigger->definition.str= trigger_definition.c_ptr();
+ trigger->definition.length= trigger_definition.length();
/*
Fill character set information:
@@ -870,34 +897,105 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
- connection collation contains pair {character set, collation};
- database collation contains pair {character set, collation};
*/
-
- lex_string_set(trg_client_cs_name, thd->charset()->csname);
-
- lex_string_set(trg_connection_cl_name,
+ lex_string_set(&trigger->client_cs_name, thd->charset()->csname);
+ lex_string_set(&trigger->connection_cl_name,
thd->variables.collation_connection->name);
-
- lex_string_set(trg_db_cl_name,
+ lex_string_set(&trigger->db_cl_name,
get_default_db_collation(thd, tables->db)->name);
- build_trig_stmt_query(thd, tables, stmt_query,
- trg_definer, trg_definer_holder);
+ /* Add trigger in it's correct place */
+ add_trigger(lex->trg_chistics.event,
+ lex->trg_chistics.action_time,
+ lex->trg_chistics.ordering_clause,
+ &lex->trg_chistics.anchor_trigger_name,
+ trigger);
- trg_def->str= stmt_query->c_ptr_safe();
- trg_def->length= stmt_query->length();
-
- /* Create trigger definition file. */
+ /* Create trigger definition file .TRG */
+ if (unlikely(create_lists_needed_for_files(thd->mem_root)))
+ goto err_with_cleanup;
if (!sql_create_definition_file(NULL, &file, &triggers_file_type,
(uchar*)this, triggers_file_parameters))
- return false;
+ DBUG_RETURN(false);
err_with_cleanup:
+ /* Delete .TRN file */
mysql_file_delete(key_file_trn, trigname_buff, MYF(MY_WME));
- return true;
+
+err_without_cleanup:
+ delete trigger; // Safety, not critical
+
+ if (trigger_dropped)
+ {
+ String drop_trg_query;
+ drop_trg_query.append("DROP TRIGGER /* generated by failed CREATE TRIGGER */ ");
+ drop_trg_query.append(lex->spname->m_name.str);
+ /*
+ We dropped an existing trigger and was not able to recreate it because
+ of an internal error. Ensure it's also dropped on the slave.
+ */
+ write_bin_log(thd, FALSE, drop_trg_query.ptr(), drop_trg_query.length());
+ }
+ DBUG_RETURN(true);
}
/**
+ Empty all list used to load and create .TRG file
+*/
+
+void Table_triggers_list::empty_lists()
+{
+ definitions_list.empty();
+ definition_modes_list.empty();
+ definers_list.empty();
+ client_cs_names.empty();
+ connection_cl_names.empty();
+ db_cl_names.empty();
+ create_times.empty();
+}
+
+
+/**
+ Create list of all trigger parameters for sql_create_definition_file()
+*/
+
+struct create_lists_param
+{
+ MEM_ROOT *root;
+};
+
+
+bool Table_triggers_list::create_lists_needed_for_files(MEM_ROOT *root)
+{
+ create_lists_param param;
+
+ empty_lists();
+ param.root= root;
+
+ return for_all_triggers(&Trigger::add_to_file_list, &param);
+}
+
+
+bool Trigger::add_to_file_list(void* param_arg)
+{
+ create_lists_param *param= (create_lists_param*) param_arg;
+ MEM_ROOT *mem_root= param->root;
+
+ if (base->definitions_list.push_back(&definition, mem_root) ||
+ base->definition_modes_list.push_back(&sql_mode, mem_root) ||
+ base->definers_list.push_back(&definer, mem_root) ||
+ base->client_cs_names.push_back(&client_cs_name, mem_root) ||
+ base->connection_cl_names.push_back(&connection_cl_name, mem_root) ||
+ base->db_cl_names.push_back(&db_cl_name, mem_root) ||
+ base->create_times.push_back(&create_time, mem_root))
+ return 1;
+ return 0;
+}
+
+
+
+/**
Deletes the .TRG file for a table.
@param path char buffer of size FN_REFLEN to be used
@@ -954,17 +1052,57 @@ static bool rm_trigname_file(char *path, const char *db,
TRUE Error
*/
-static bool save_trigger_file(Table_triggers_list *triggers, const char *db,
- const char *table_name)
+bool Table_triggers_list::save_trigger_file(THD *thd, const char *db,
+ const char *table_name)
{
char file_buff[FN_REFLEN];
LEX_STRING file;
+ if (create_lists_needed_for_files(thd->mem_root))
+ return true;
+
file.length= build_table_filename(file_buff, FN_REFLEN - 1, db, table_name,
TRG_EXT, 0);
file.str= file_buff;
return sql_create_definition_file(NULL, &file, &triggers_file_type,
- (uchar*)triggers, triggers_file_parameters);
+ (uchar*) this, triggers_file_parameters);
+}
+
+
+/**
+ Find a trigger with a given name
+
+ @param name Name of trigger
+ @param remove_from_list If set, remove trigger if found
+*/
+
+Trigger *Table_triggers_list::find_trigger(const LEX_STRING *name,
+ bool remove_from_list)
+{
+ for (uint i= 0; i < (uint)TRG_EVENT_MAX; i++)
+ {
+ for (uint j= 0; j < (uint)TRG_ACTION_MAX; j++)
+ {
+ Trigger **parent, *trigger;
+
+ for (parent= &triggers[i][j];
+ (trigger= *parent);
+ parent= &trigger->next)
+ {
+ if (my_strcasecmp(table_alias_charset,
+ trigger->name.str, name->str) == 0)
+ {
+ if (remove_from_list)
+ {
+ *parent= trigger->next;
+ count--;
+ }
+ return trigger;
+ }
+ }
+ }
+ }
+ return 0;
}
@@ -989,81 +1127,65 @@ static bool save_trigger_file(Table_triggers_list *triggers, const char *db,
@retval
True error
*/
+
bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables,
String *stmt_query)
{
- const char *sp_name= thd->lex->spname->m_name.str; // alias
-
- LEX_STRING *name;
+ const LEX_STRING *sp_name= &thd->lex->spname->m_name; // alias
char path[FN_REFLEN];
+ Trigger *trigger;
- List_iterator_fast<LEX_STRING> it_name(names_list);
+ stmt_query->set(thd->query(), thd->query_length(), stmt_query->charset());
- List_iterator<ulonglong> it_mod(definition_modes_list);
- List_iterator<LEX_STRING> it_def(definitions_list);
- List_iterator<LEX_STRING> it_definer(definers_list);
- List_iterator<LEX_STRING> it_client_cs_name(client_cs_names);
- List_iterator<LEX_STRING> it_connection_cl_name(connection_cl_names);
- List_iterator<LEX_STRING> it_db_cl_name(db_cl_names);
-
- stmt_query->append(thd->query(), thd->query_length());
-
- while ((name= it_name++))
+ /* Find and delete trigger from list */
+ if (!(trigger= find_trigger(sp_name, true)))
{
- it_def++;
- it_mod++;
- it_definer++;
- it_client_cs_name++;
- it_connection_cl_name++;
- it_db_cl_name++;
-
- if (my_strcasecmp(table_alias_charset, sp_name, name->str) == 0)
- {
- /*
- Again we don't care much about other things required for
- clean trigger removing since table will be reopened anyway.
- */
- it_def.remove();
- it_mod.remove();
- it_definer.remove();
- it_client_cs_name.remove();
- it_connection_cl_name.remove();
- it_db_cl_name.remove();
-
- if (definitions_list.is_empty())
- {
- /*
- TODO: Probably instead of removing .TRG file we should move
- to archive directory but this should be done as part of
- parse_file.cc functionality (because we will need it
- elsewhere).
- */
- if (rm_trigger_file(path, tables->db, tables->table_name))
- return 1;
- }
- else
- {
- if (save_trigger_file(this, tables->db, tables->table_name))
- return 1;
- }
+ my_message(ER_TRG_DOES_NOT_EXIST, ER_THD(thd, ER_TRG_DOES_NOT_EXIST),
+ MYF(0));
+ return 1;
+ }
- if (rm_trigname_file(path, tables->db, sp_name))
- return 1;
- return 0;
- }
+ if (!count) // If no more triggers
+ {
+ /*
+ TODO: Probably instead of removing .TRG file we should move
+ to archive directory but this should be done as part of
+ parse_file.cc functionality (because we will need it
+ elsewhere).
+ */
+ if (rm_trigger_file(path, tables->db, tables->table_name))
+ return 1;
+ }
+ else
+ {
+ if (save_trigger_file(thd, tables->db, tables->table_name))
+ return 1;
}
- my_message(ER_TRG_DOES_NOT_EXIST, ER_THD(thd, ER_TRG_DOES_NOT_EXIST),
- MYF(0));
- return 1;
+ if (rm_trigname_file(path, tables->db, sp_name->str))
+ return 1;
+
+ delete trigger;
+ return 0;
}
Table_triggers_list::~Table_triggers_list()
{
- for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
- for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
- delete bodies[i][j];
+ DBUG_ENTER("Table_triggers_list::~Table_triggers_list");
+
+ for (uint i= 0; i < (uint)TRG_EVENT_MAX; i++)
+ {
+ for (uint j= 0; j < (uint)TRG_ACTION_MAX; j++)
+ {
+ Trigger *next, *trigger;
+ for (trigger= get_trigger(i,j) ; trigger ; trigger= next)
+ {
+ next= trigger->next;
+ delete trigger;
+ }
+ }
+ }
/* Free blobs used in insert */
if (record0_field)
@@ -1073,6 +1195,8 @@ Table_triggers_list::~Table_triggers_list()
if (record1_field)
for (Field **fld_ptr= record1_field; *fld_ptr; fld_ptr++)
delete *fld_ptr;
+
+ DBUG_VOID_RETURN;
}
@@ -1088,13 +1212,14 @@ Table_triggers_list::~Table_triggers_list()
@retval
True error
*/
+
bool Table_triggers_list::prepare_record_accessors(TABLE *table)
{
Field **fld, **trg_fld;
- if ((bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] ||
- bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE])
- && (table->s->stored_fields != table->s->null_fields))
+ if ((has_triggers(TRG_EVENT_INSERT,TRG_ACTION_BEFORE) ||
+ has_triggers(TRG_EVENT_UPDATE,TRG_ACTION_BEFORE)) &&
+ (table->s->stored_fields != table->s->null_fields))
{
int null_bytes= (table->s->fields - table->s->null_fields + 7)/8;
@@ -1134,10 +1259,10 @@ bool Table_triggers_list::prepare_record_accessors(TABLE *table)
else
record0_field= table->field;
- if (bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] ||
- bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER] ||
- bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
- bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER])
+ if (has_triggers(TRG_EVENT_UPDATE,TRG_ACTION_BEFORE) ||
+ has_triggers(TRG_EVENT_UPDATE,TRG_ACTION_AFTER) ||
+ has_triggers(TRG_EVENT_DELETE,TRG_ACTION_BEFORE) ||
+ has_triggers(TRG_EVENT_DELETE,TRG_ACTION_AFTER))
{
if (!(record1_field= (Field **)alloc_root(&table->mem_root,
(table->s->fields + 1) *
@@ -1188,7 +1313,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
LEX_STRING path;
File_parser *parser;
LEX_STRING save_db;
-
DBUG_ENTER("Table_triggers_list::check_n_load");
path.length= build_table_filename(path_buff, FN_REFLEN - 1,
@@ -1199,195 +1323,55 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
if (access(path_buff, F_OK))
DBUG_RETURN(0);
- /*
- File exists so we got to load triggers.
- FIXME: A lot of things to do here e.g. how about other funcs and being
- more paranoical ?
- */
+ /* File exists so we got to load triggers */
if ((parser= sql_parse_prepare(&path, &table->mem_root, 1)))
{
if (is_equal(&triggers_file_type, parser->type()))
{
- Table_triggers_list *triggers=
- new (&table->mem_root) Table_triggers_list(table);
Handle_old_incorrect_sql_modes_hook sql_modes_hook(path.str);
+ LEX_STRING *trg_create_str;
+ ulonglong *trg_sql_mode, *trg_create_time;
+ Trigger *trigger;
+ Table_triggers_list *trigger_list=
+ new (&table->mem_root) Table_triggers_list(table);
+ if (unlikely(!trigger_list))
+ goto error;
- if (!triggers)
- DBUG_RETURN(1);
-
- /*
- We don't have the following attributes in old versions of .TRG file, so
- we should initialize the list for safety:
- - sql_modes;
- - definers;
- - character sets (client, connection, database);
- */
- triggers->definition_modes_list.empty();
- triggers->definers_list.empty();
- triggers->client_cs_names.empty();
- triggers->connection_cl_names.empty();
- triggers->db_cl_names.empty();
-
- if (parser->parse((uchar*)triggers, &table->mem_root,
+ if (parser->parse((uchar*)trigger_list, &table->mem_root,
triggers_file_parameters,
TRG_NUM_REQUIRED_PARAMETERS,
&sql_modes_hook))
- DBUG_RETURN(1);
-
- List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
- LEX_STRING *trg_create_str;
- ulonglong *trg_sql_mode;
-
- if (triggers->definition_modes_list.is_empty() &&
- !triggers->definitions_list.is_empty())
- {
- /*
- It is old file format => we should fill list of sql_modes.
+ goto error;
- We use one mode (current) for all triggers, because we have not
- information about mode in old format.
- */
- if (!(trg_sql_mode= alloc_type<ulonglong>(&table->mem_root)))
- {
- DBUG_RETURN(1); // EOM
- }
- *trg_sql_mode= global_system_variables.sql_mode;
- while (it++)
- {
- if (triggers->definition_modes_list.push_back(trg_sql_mode,
- &table->mem_root))
- {
- DBUG_RETURN(1); // EOM
- }
- }
- it.rewind();
- }
+ List_iterator_fast<LEX_STRING> it(trigger_list->definitions_list);
- if (triggers->definers_list.is_empty() &&
- !triggers->definitions_list.is_empty())
+ if (!trigger_list->definitions_list.is_empty() &&
+ (trigger_list->client_cs_names.is_empty() ||
+ trigger_list->connection_cl_names.is_empty() ||
+ trigger_list->db_cl_names.is_empty()))
{
- /*
- It is old file format => we should fill list of definers.
-
- If there is no definer information, we should not switch context to
- definer when checking privileges. I.e. privileges for such triggers
- are checked for "invoker" rather than for "definer".
- */
-
- LEX_STRING *trg_definer;
-
- if (!(trg_definer= alloc_lex_string(&table->mem_root)))
- DBUG_RETURN(1); // EOM
-
- trg_definer->str= (char*) "";
- trg_definer->length= 0;
-
- while (it++)
- {
- if (triggers->definers_list.push_back(trg_definer,
- &table->mem_root))
- {
- DBUG_RETURN(1); // EOM
- }
- }
-
- it.rewind();
- }
-
- if (!triggers->definitions_list.is_empty() &&
- (triggers->client_cs_names.is_empty() ||
- triggers->connection_cl_names.is_empty() ||
- triggers->db_cl_names.is_empty()))
- {
- /*
- It is old file format => we should fill lists of character sets.
- */
-
- LEX_STRING *trg_client_cs_name;
- LEX_STRING *trg_connection_cl_name;
- LEX_STRING *trg_db_cl_name;
-
- if (!triggers->client_cs_names.is_empty() ||
- !triggers->connection_cl_names.is_empty() ||
- !triggers->db_cl_names.is_empty())
- {
- my_error(ER_TRG_CORRUPTED_FILE, MYF(0),
- (const char *) db,
- (const char *) table_name);
-
- DBUG_RETURN(1); // EOM
- }
-
+ /* We will later use the current character sets */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRG_NO_CREATION_CTX,
ER_THD(thd, ER_TRG_NO_CREATION_CTX),
(const char*) db,
(const char*) table_name);
-
- if (!(trg_client_cs_name= alloc_lex_string(&table->mem_root)) ||
- !(trg_connection_cl_name= alloc_lex_string(&table->mem_root)) ||
- !(trg_db_cl_name= alloc_lex_string(&table->mem_root)))
- {
- DBUG_RETURN(1); // EOM
- }
-
- /*
- Backward compatibility: assume that the query is in the current
- character set.
- */
-
- lex_string_set(trg_client_cs_name,
- thd->variables.character_set_client->csname);
-
- lex_string_set(trg_connection_cl_name,
- thd->variables.collation_connection->name);
-
- lex_string_set(trg_db_cl_name,
- thd->variables.collation_database->name);
-
- while (it++)
- {
- if (triggers->client_cs_names.push_back(trg_client_cs_name,
- &table->mem_root) ||
-
- triggers->connection_cl_names.push_back(trg_connection_cl_name,
- &table->mem_root) ||
-
- triggers->db_cl_names.push_back(trg_db_cl_name,
- &table->mem_root))
- {
- DBUG_RETURN(1); // EOM
- }
- }
-
- it.rewind();
}
- DBUG_ASSERT(triggers->definition_modes_list.elements ==
- triggers->definitions_list.elements);
- DBUG_ASSERT(triggers->definers_list.elements ==
- triggers->definitions_list.elements);
- DBUG_ASSERT(triggers->client_cs_names.elements ==
- triggers->definitions_list.elements);
- DBUG_ASSERT(triggers->connection_cl_names.elements ==
- triggers->definitions_list.elements);
- DBUG_ASSERT(triggers->db_cl_names.elements ==
- triggers->definitions_list.elements);
-
- table->triggers= triggers;
+ table->triggers= trigger_list;
status_var_increment(thd->status_var.feature_trigger);
- List_iterator_fast<ulonglong> itm(triggers->definition_modes_list);
- List_iterator_fast<LEX_STRING> it_definer(triggers->definers_list);
- List_iterator_fast<LEX_STRING> it_client_cs_name(triggers->client_cs_names);
- List_iterator_fast<LEX_STRING> it_connection_cl_name(triggers->connection_cl_names);
- List_iterator_fast<LEX_STRING> it_db_cl_name(triggers->db_cl_names);
+ List_iterator_fast<ulonglong> itm(trigger_list->definition_modes_list);
+ List_iterator_fast<LEX_STRING> it_definer(trigger_list->definers_list);
+ List_iterator_fast<LEX_STRING> it_client_cs_name(trigger_list->client_cs_names);
+ List_iterator_fast<LEX_STRING> it_connection_cl_name(trigger_list->connection_cl_names);
+ List_iterator_fast<LEX_STRING> it_db_cl_name(trigger_list->db_cl_names);
+ List_iterator_fast<ulonglong> it_create_times(trigger_list->create_times);
LEX *old_lex= thd->lex;
LEX lex;
sp_rcontext *save_spcont= thd->spcont;
- ulonglong save_sql_mode= thd->variables.sql_mode;
- LEX_STRING *on_table_name;
+ sql_mode_t save_sql_mode= thd->variables.sql_mode;
thd->lex= &lex;
@@ -1397,30 +1381,55 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
while ((trg_create_str= it++))
{
sp_head *sp;
- trg_sql_mode= itm++;
- LEX_STRING *trg_definer= it_definer++;
+ sql_mode_t sql_mode;
+ LEX_STRING *trg_definer;
+ Trigger_creation_ctx *creation_ctx;
+
+ /*
+ It is old file format then sql_mode may not be filled in.
+ We use one mode (current) for all triggers, because we have not
+ information about mode in old format.
+ */
+ sql_mode= ((trg_sql_mode= itm++) ? *trg_sql_mode :
+ (ulonglong) global_system_variables.sql_mode);
- thd->variables.sql_mode= (ulong)*trg_sql_mode;
+ trg_create_time= it_create_times++; // May be NULL if old file
+ trg_definer= it_definer++; // May be NULL if old file
+
+ thd->variables.sql_mode= sql_mode;
Parser_state parser_state;
if (parser_state.init(thd, trg_create_str->str, trg_create_str->length))
goto err_with_lex_cleanup;
- Trigger_creation_ctx *creation_ctx=
- Trigger_creation_ctx::create(thd,
- db,
- table_name,
- it_client_cs_name++,
- it_connection_cl_name++,
- it_db_cl_name++);
+ if (!trigger_list->client_cs_names.is_empty())
+ creation_ctx= Trigger_creation_ctx::create(thd,
+ db,
+ table_name,
+ it_client_cs_name++,
+ it_connection_cl_name++,
+ it_db_cl_name++);
+ else
+ {
+ /* Old file with not stored character sets. Use current */
+ creation_ctx= new
+ Trigger_creation_ctx(thd->variables.character_set_client,
+ thd->variables.collation_connection,
+ thd->variables.collation_database);
+ }
lex_start(thd);
thd->spcont= NULL;
+ /* The following is for catching parse errors */
+ lex.trg_chistics.event= TRG_EVENT_MAX;
+ lex.trg_chistics.action_time= TRG_ACTION_MAX;
Deprecated_trigger_syntax_handler error_handler;
thd->push_internal_handler(&error_handler);
+
bool parse_error= parse_sql(thd, & parser_state, creation_ctx);
thd->pop_internal_handler();
+ DBUG_ASSERT(!parse_error || lex.sphead == 0);
/*
Not strictly necessary to invoke this method here, since we know
@@ -1432,68 +1441,73 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
*/
lex.set_trg_event_type_for_tables();
+ if (lex.sphead)
+ lex.sphead->set_info(0, 0, &lex.sp_chistics, sql_mode);
+
+ if (unlikely(!(trigger= (new (&table->mem_root)
+ Trigger(trigger_list, lex.sphead)))))
+ goto err_with_lex_cleanup;
+ lex.sphead= NULL; /* Prevent double cleanup. */
+
+ sp= trigger->body;
+
+ trigger->sql_mode= sql_mode;
+ trigger->definition= *trg_create_str;
+ trigger->create_time= trg_create_time ? *trg_create_time : 0;
+ trigger->name= sp ? sp->m_name : empty_lex_str;
+ trigger->on_table_name.str= (char*) lex.raw_trg_on_table_name_begin;
+ trigger->on_table_name.length= (lex.raw_trg_on_table_name_end -
+ lex.raw_trg_on_table_name_begin);
+
+ /* Copy pointers to character sets to make trigger easier to use */
+ lex_string_set(&trigger->client_cs_name,
+ creation_ctx->get_client_cs()->csname);
+ lex_string_set(&trigger->connection_cl_name,
+ creation_ctx->get_connection_cl()->name);
+ lex_string_set(&trigger->db_cl_name,
+ creation_ctx->get_db_cl()->name);
+
+ /* event can only be TRG_EVENT_MAX in case of fatal parse errors */
+ if (lex.trg_chistics.event != TRG_EVENT_MAX)
+ trigger_list->add_trigger(lex.trg_chistics.event,
+ lex.trg_chistics.action_time,
+ TRG_ORDER_NONE,
+ &lex.trg_chistics.anchor_trigger_name,
+ trigger);
+
if (parse_error)
{
- if (!triggers->m_has_unparseable_trigger)
- triggers->set_parse_error_message(error_handler.get_error_message());
+ LEX_STRING *name;
+
+ /*
+ In case of errors, disable all triggers for the table, but keep
+ the wrong trigger around to allow the user to fix it
+ */
+ if (!trigger_list->m_has_unparseable_trigger)
+ trigger_list->set_parse_error_message(error_handler.get_error_message());
/* Currently sphead is always set to NULL in case of a parse error */
DBUG_ASSERT(lex.sphead == 0);
- if (error_handler.get_trigger_name())
- {
- LEX_STRING *trigger_name;
- const LEX_STRING *orig_trigger_name= error_handler.get_trigger_name();
-
- if (!(trigger_name= alloc_lex_string(&table->mem_root)) ||
- !(trigger_name->str= strmake_root(&table->mem_root,
- orig_trigger_name->str,
- orig_trigger_name->length)))
- goto err_with_lex_cleanup;
-
- trigger_name->length= orig_trigger_name->length;
+ lex_end(&lex);
- if (triggers->names_list.push_back(trigger_name,
- &table->mem_root))
- goto err_with_lex_cleanup;
- }
- else
+ if ((name= error_handler.get_trigger_name()))
{
- /*
- The Table_triggers_list is not constructed as a list of
- trigger objects as one would expect, but rather of lists of
- properties of equal length. Thus, even if we don't get the
- trigger name, we still fill all in all the lists with
- placeholders as we might otherwise create a skew in the
- lists. Obviously, this has to be refactored.
- */
- LEX_STRING *empty= alloc_lex_string(&table->mem_root);
- if (!empty)
- goto err_with_lex_cleanup;
-
- empty->str= const_cast<char*>("");
- empty->length= 0;
- if (triggers->names_list.push_back(empty, &table->mem_root))
+ if (!(make_lex_string(&trigger->name, name->str,
+ name->length, &table->mem_root)))
goto err_with_lex_cleanup;
}
- lex_end(&lex);
+ trigger->definer= ((!trg_definer || !trg_definer->length) ?
+ empty_lex_str : *trg_definer);
continue;
}
- lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
-
- int event= lex.trg_chistics.event;
- int action_time= lex.trg_chistics.action_time;
-
- sp= triggers->bodies[event][action_time]= lex.sphead;
- lex.sphead= NULL; /* Prevent double cleanup. */
-
- sp->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
+ sp->set_info(0, 0, &lex.sp_chistics, sql_mode);
sp->set_creation_ctx(creation_ctx);
- if (!trg_definer->length)
+ if (!trg_definer || !trg_definer->length)
{
/*
This trigger was created/imported from the previous version of
- MySQL, which does not support triggers definers. We should emit
+ MySQL, which does not support trigger_list definers. We should emit
warning here.
*/
@@ -1509,34 +1523,26 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
*/
sp->set_definer((char*) "", 0);
+ trigger->definer= empty_lex_str;
/*
- Triggers without definer information are executed under the
+ trigger_list without definer information are executed under the
authorization of the invoker.
*/
sp->m_chistics->suid= SP_IS_NOT_SUID;
}
else
+ {
sp->set_definer(trg_definer->str, trg_definer->length);
+ trigger->definer= *trg_definer;
+ }
- if (triggers->names_list.push_back(&sp->m_name, &table->mem_root))
- goto err_with_lex_cleanup;
-
- if (!(on_table_name= alloc_lex_string(&table->mem_root)))
- goto err_with_lex_cleanup;
-
- on_table_name->str= (char*) lex.raw_trg_on_table_name_begin;
- on_table_name->length= lex.raw_trg_on_table_name_end
- - lex.raw_trg_on_table_name_begin;
-
- if (triggers->on_table_names_list.push_back(on_table_name, &table->mem_root))
- goto err_with_lex_cleanup;
#ifndef DBUG_OFF
/*
Let us check that we correctly update trigger definitions when we
- rename tables with triggers.
-
+ rename tables with trigger_list.
+
In special cases like "RENAME TABLE `#mysql50#somename` TO `somename`"
or "ALTER DATABASE `#mysql50#somename` UPGRADE DATA DIRECTORY NAME"
we might be given table or database name with "#mysql50#" prefix (and
@@ -1561,11 +1567,9 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
/*
Gather all Item_trigger_field objects representing access to fields
in old/new versions of row in trigger into lists containing all such
- objects for the triggers with same action and timing.
+ objects for the trigger_list with same action and timing.
*/
- triggers->trigger_fields[lex.trg_chistics.event]
- [lex.trg_chistics.action_time]=
- lex.trg_table_fields.first;
+ trigger->trigger_fields= lex.trg_table_fields.first;
/*
Also let us bind these objects to Field objects in table being
opened.
@@ -1580,8 +1584,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
trg_field= trg_field->next_trg_field)
{
trg_field->setup_field(thd, table,
- &triggers->subject_table_grants[lex.trg_chistics.event]
- [lex.trg_chistics.action_time]);
+ &trigger->subject_table_grants);
}
lex_end(&lex);
@@ -1591,43 +1594,88 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
- if (!names_only && triggers->prepare_record_accessors(table))
- DBUG_RETURN(1);
+ if (!names_only && trigger_list->prepare_record_accessors(table))
+ goto error;
+ /* Ensure no one is accidently using the temporary load lists */
+ trigger_list->empty_lists();
DBUG_RETURN(0);
err_with_lex_cleanup:
- // QQ: anything else ?
lex_end(&lex);
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
thd->reset_db(save_db.str, save_db.length);
- DBUG_RETURN(1);
+ /* Fall through to error */
}
+ }
+error:
+ if (!thd->is_error())
+ {
/*
We don't care about this error message much because .TRG files will
be merged into .FRM anyway.
*/
my_error(ER_WRONG_OBJECT, MYF(0),
table_name, TRG_EXT + 1, "TRIGGER");
- DBUG_RETURN(1);
}
-
DBUG_RETURN(1);
}
/**
+ Add trigger in the correct position according to ordering clause
+ Also update action order
+
+ If anchor_trigger doesn't exist, add it last.
+*/
+
+void Table_triggers_list::add_trigger(trg_event_type event,
+ trg_action_time_type action_time,
+ trigger_order_type ordering_clause,
+ LEX_STRING *anchor_trigger_name,
+ Trigger *trigger)
+{
+ Trigger **parent= &triggers[event][action_time];
+ uint position= 0;
+
+ for ( ; *parent ; parent= &(*parent)->next, position++)
+ {
+ if (ordering_clause != TRG_ORDER_NONE &&
+ !my_strcasecmp(table_alias_charset, anchor_trigger_name->str,
+ (*parent)->name.str))
+ {
+ if (ordering_clause == TRG_ORDER_FOLLOWS)
+ {
+ parent= &(*parent)->next; // Add after this one
+ position++;
+ }
+ break;
+ }
+ }
+
+ /* Add trigger where parent points to */
+ trigger->next= *parent;
+ *parent= trigger;
+
+ /* Update action_orders and position */
+ trigger->event= event;
+ trigger->action_time= action_time;
+ trigger->action_order= ++position;
+ while ((trigger= trigger->next))
+ trigger->action_order= ++position;
+
+ count++;
+}
+
+
+/**
Obtains and returns trigger metadata.
@param thd current thread context
- @param event trigger event type
- @param time_type trigger action time
- @param trigger_name returns name of trigger
@param trigger_stmt returns statement of trigger
- @param sql_mode returns sql_mode of trigger
@param definer returns definer/creator of trigger. The caller is
responsible to allocate enough space for storing
definer information.
@@ -1638,106 +1686,34 @@ err_with_lex_cleanup:
True error
*/
-bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event,
- trg_action_time_type time_type,
- LEX_STRING *trigger_name,
- LEX_STRING *trigger_stmt,
- ulong *sql_mode,
- LEX_STRING *definer,
- LEX_STRING *client_cs_name,
- LEX_STRING *connection_cl_name,
- LEX_STRING *db_cl_name)
+void Trigger::get_trigger_info(LEX_STRING *trigger_stmt,
+ LEX_STRING *trigger_body, LEX_STRING *definer)
{
- sp_head *body;
DBUG_ENTER("get_trigger_info");
- if ((body= bodies[event][time_type]))
- {
- Stored_program_creation_ctx *creation_ctx=
- bodies[event][time_type]->get_creation_ctx();
-
- *trigger_name= body->m_name;
- *trigger_stmt= body->m_body_utf8;
- *sql_mode= body->m_sql_mode;
-
- if (body->m_chistics->suid == SP_IS_NOT_SUID)
- {
- definer->str[0]= 0;
- definer->length= 0;
- }
- else
- {
- definer->length= strxmov(definer->str, body->m_definer_user.str, "@",
- body->m_definer_host.str, NullS) - definer->str;
- }
- lex_string_set(client_cs_name,
- creation_ctx->get_client_cs()->csname);
-
- lex_string_set(connection_cl_name,
- creation_ctx->get_connection_cl()->name);
-
- lex_string_set(db_cl_name,
- creation_ctx->get_db_cl()->name);
-
- DBUG_RETURN(0);
+ *trigger_stmt= definition;
+ if (!body)
+ {
+ /* Parse error */
+ *trigger_body= definition;
+ *definer= empty_lex_str;
+ DBUG_VOID_RETURN;
}
- DBUG_RETURN(1);
-}
-
+ *trigger_body= body->m_body_utf8;
-void Table_triggers_list::get_trigger_info(THD *thd,
- int trigger_idx,
- LEX_STRING *trigger_name,
- ulonglong *sql_mode,
- LEX_STRING *sql_original_stmt,
- LEX_STRING *client_cs_name,
- LEX_STRING *connection_cl_name,
- LEX_STRING *db_cl_name)
-{
- List_iterator_fast<LEX_STRING> it_trigger_name(names_list);
- List_iterator_fast<ulonglong> it_sql_mode(definition_modes_list);
- List_iterator_fast<LEX_STRING> it_sql_orig_stmt(definitions_list);
- List_iterator_fast<LEX_STRING> it_client_cs_name(client_cs_names);
- List_iterator_fast<LEX_STRING> it_connection_cl_name(connection_cl_names);
- List_iterator_fast<LEX_STRING> it_db_cl_name(db_cl_names);
-
- for (int i = 0; i < trigger_idx; ++i)
+ if (body->m_chistics->suid == SP_IS_NOT_SUID)
{
- it_trigger_name.next_fast();
- it_sql_mode.next_fast();
- it_sql_orig_stmt.next_fast();
-
- it_client_cs_name.next_fast();
- it_connection_cl_name.next_fast();
- it_db_cl_name.next_fast();
+ *definer= empty_lex_str;
}
-
- *trigger_name= *(it_trigger_name++);
- *sql_mode= *(it_sql_mode++);
- *sql_original_stmt= *(it_sql_orig_stmt++);
-
- *client_cs_name= *(it_client_cs_name++);
- *connection_cl_name= *(it_connection_cl_name++);
- *db_cl_name= *(it_db_cl_name++);
-}
-
-
-int Table_triggers_list::find_trigger_by_name(const LEX_STRING *trg_name)
-{
- List_iterator_fast<LEX_STRING> it(names_list);
-
- for (int i = 0; ; ++i)
+ else
{
- LEX_STRING *cur_name= it++;
-
- if (!cur_name)
- return -1;
-
- if (strcmp(cur_name->str, trg_name->str) == 0)
- return i;
+ definer->length= strxmov(definer->str, body->m_definer_user.str, "@",
+ body->m_definer_host.str, NullS) - definer->str;
}
+ DBUG_VOID_RETURN;
}
+
/**
Find trigger's table from trigger identifier and add it to
the statement table list.
@@ -1828,38 +1804,37 @@ bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name)
}
if (table.triggers)
{
- LEX_STRING *trigger;
- List_iterator_fast<LEX_STRING> it_name(table.triggers->names_list);
-
- while ((trigger= it_name++))
+ for (uint i= 0; i < (uint)TRG_EVENT_MAX; i++)
{
- /*
- Trigger, which body we failed to parse during call
- Table_triggers_list::check_n_load(), might be missing name.
- Such triggers have zero-length name and are skipped here.
- */
- if (trigger->length == 0)
- continue;
- if (rm_trigname_file(path, db, trigger->str))
+ for (uint j= 0; j < (uint)TRG_ACTION_MAX; j++)
{
- /*
- Instead of immediately bailing out with error if we were unable
- to remove .TRN file we will try to drop other files.
- */
- result= 1;
- continue;
+ Trigger *trigger;
+ for (trigger= table.triggers->get_trigger(i,j) ;
+ trigger ;
+ trigger= trigger->next)
+ {
+ /*
+ Trigger, which body we failed to parse during call
+ Table_triggers_list::check_n_load(), might be missing name.
+ Such triggers have zero-length name and are skipped here.
+ */
+ if (trigger->name.length &&
+ rm_trigname_file(path, db, trigger->name.str))
+ {
+ /*
+ Instead of immediately bailing out with error if we were unable
+ to remove .TRN file we will try to drop other files.
+ */
+ result= 1;
+ }
+ }
}
}
-
if (rm_trigger_file(path, db, name))
- {
result= 1;
- goto end;
- }
+ delete table.triggers;
}
end:
- if (table.triggers)
- delete table.triggers;
free_root(&table.mem_root, MYF(0));
DBUG_RETURN(result);
}
@@ -1881,6 +1856,16 @@ end:
TRUE Failure
*/
+struct change_table_name_param
+{
+ THD *thd;
+ const char *old_db_name;
+ const char *new_db_name;
+ LEX_STRING *new_table_name;
+ Trigger *stopper;
+};
+
+
bool
Table_triggers_list::change_table_name_in_triggers(THD *thd,
const char *old_db_name,
@@ -1888,57 +1873,23 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name)
{
+ struct change_table_name_param param;
+ sql_mode_t save_sql_mode= thd->variables.sql_mode;
char path_buff[FN_REFLEN];
- LEX_STRING *def, *on_table_name, new_def;
- ulonglong save_sql_mode= thd->variables.sql_mode;
- List_iterator_fast<LEX_STRING> it_def(definitions_list);
- List_iterator_fast<LEX_STRING> it_on_table_name(on_table_names_list);
- List_iterator_fast<ulonglong> it_mode(definition_modes_list);
- size_t on_q_table_name_len, before_on_len;
- String buff;
- DBUG_ASSERT(definitions_list.elements == on_table_names_list.elements &&
- definitions_list.elements == definition_modes_list.elements);
+ param.thd= thd;
+ param.new_table_name= new_table_name;
- while ((def= it_def++))
- {
- on_table_name= it_on_table_name++;
- thd->variables.sql_mode= (ulong) *(it_mode++);
-
- /* Construct CREATE TRIGGER statement with new table name. */
- buff.length(0);
-
- /* WARNING: 'on_table_name' is supposed to point inside 'def' */
- DBUG_ASSERT(on_table_name->str > def->str);
- DBUG_ASSERT(on_table_name->str < (def->str + def->length));
- before_on_len= on_table_name->str - def->str;
-
- buff.append(def->str, before_on_len);
- buff.append(STRING_WITH_LEN("ON "));
- append_identifier(thd, &buff, new_table_name->str, new_table_name->length);
- buff.append(STRING_WITH_LEN(" "));
- on_q_table_name_len= buff.length() - before_on_len;
- buff.append(on_table_name->str + on_table_name->length,
- def->length - (before_on_len + on_table_name->length));
- /*
- It is OK to allocate some memory on table's MEM_ROOT since this
- table instance will be thrown out at the end of rename anyway.
- */
- new_def.str= (char*) memdup_root(&trigger_table->mem_root, buff.ptr(),
- buff.length());
- new_def.length= buff.length();
- on_table_name->str= new_def.str + before_on_len;
- on_table_name->length= on_q_table_name_len;
- *def= new_def;
- }
+ for_all_triggers(&Trigger::change_table_name, &param);
thd->variables.sql_mode= save_sql_mode;
if (thd->is_fatal_error)
return TRUE; /* OOM */
- if (save_trigger_file(this, new_db_name, new_table_name->str))
+ if (save_trigger_file(thd, new_db_name, new_table_name->str))
return TRUE;
+
if (rm_trigger_file(path_buff, old_db_name, old_table_name->str))
{
(void) rm_trigger_file(path_buff, new_db_name, new_table_name->str);
@@ -1948,6 +1899,47 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
}
+bool Trigger::change_table_name(void* param_arg)
+{
+ change_table_name_param *param= (change_table_name_param*) param_arg;
+ THD *thd= param->thd;
+ LEX_STRING *new_table_name= param->new_table_name;
+
+ LEX_STRING *def= &definition, new_def;
+ size_t on_q_table_name_len, before_on_len;
+ String buff;
+
+ thd->variables.sql_mode= sql_mode;
+
+ /* Construct CREATE TRIGGER statement with new table name. */
+ buff.length(0);
+
+ /* WARNING: 'on_table_name' is supposed to point inside 'def' */
+ DBUG_ASSERT(on_table_name.str > def->str);
+ DBUG_ASSERT(on_table_name.str < (def->str + def->length));
+ before_on_len= on_table_name.str - def->str;
+
+ buff.append(def->str, before_on_len);
+ buff.append(STRING_WITH_LEN("ON "));
+ append_identifier(thd, &buff, new_table_name->str, new_table_name->length);
+ buff.append(STRING_WITH_LEN(" "));
+ on_q_table_name_len= buff.length() - before_on_len;
+ buff.append(on_table_name.str + on_table_name.length,
+ def->length - (before_on_len + on_table_name.length));
+ /*
+ It is OK to allocate some memory on table's MEM_ROOT since this
+ table instance will be thrown out at the end of rename anyway.
+ */
+ new_def.str= (char*) memdup_root(&base->trigger_table->mem_root, buff.ptr(),
+ buff.length());
+ new_def.length= buff.length();
+ on_table_name.str= new_def.str + before_on_len;
+ on_table_name.length= on_q_table_name_len;
+ definition= new_def;
+ return 0;
+}
+
+
/**
Iterate though Table_triggers_list::names_list list and update
.TRN files after renaming triggers' subject table.
@@ -1965,42 +1957,56 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
for which update failed.
*/
-LEX_STRING*
+Trigger *
Table_triggers_list::change_table_name_in_trignames(const char *old_db_name,
const char *new_db_name,
LEX_STRING *new_table_name,
- LEX_STRING *stopper)
+ Trigger *trigger)
+{
+ struct change_table_name_param param;
+ param.old_db_name= old_db_name;
+ param.new_db_name= new_db_name;
+ param.new_table_name= new_table_name;
+ param.stopper= trigger;
+
+ return for_all_triggers(&Trigger::change_on_table_name, &param);
+}
+
+
+bool Trigger::change_on_table_name(void* param_arg)
{
+ change_table_name_param *param= (change_table_name_param*) param_arg;
+
char trigname_buff[FN_REFLEN];
struct st_trigname trigname;
LEX_STRING trigname_file;
- LEX_STRING *trigger;
- List_iterator_fast<LEX_STRING> it_name(names_list);
- while ((trigger= it_name++) != stopper)
+ if (param->stopper == this)
+ return 0; // Stop processing
+
+ trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
+ param->new_db_name, name.str,
+ TRN_EXT, 0);
+ trigname_file.str= trigname_buff;
+
+ trigname.trigger_table= *param->new_table_name;
+
+ if (base->create_lists_needed_for_files(current_thd->mem_root))
+ return true;
+
+ if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
+ (uchar*)&trigname, trigname_file_parameters))
+ return true;
+
+ /* Remove stale .TRN file in case of database upgrade */
+ if (param->old_db_name)
{
- trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
- new_db_name, trigger->str,
- TRN_EXT, 0);
- trigname_file.str= trigname_buff;
-
- trigname.trigger_table= *new_table_name;
-
- if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
- (uchar*)&trigname, trigname_file_parameters))
- return trigger;
-
- /* Remove stale .TRN file in case of database upgrade */
- if (old_db_name)
+ if (rm_trigname_file(trigname_buff, param->old_db_name, name.str))
{
- if (rm_trigname_file(trigname_buff, old_db_name, trigger->str))
- {
- (void) rm_trigname_file(trigname_buff, new_db_name, trigger->str);
- return trigger;
- }
+ (void) rm_trigname_file(trigname_buff, param->new_db_name, name.str);
+ return 1;
}
}
-
return 0;
}
@@ -2034,8 +2040,8 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
{
TABLE table;
bool result= 0;
- bool upgrading50to51= FALSE;
- LEX_STRING *err_trigname;
+ bool upgrading50to51= FALSE;
+ Trigger *err_trigger;
DBUG_ENTER("change_table_name");
table.reset();
@@ -2070,7 +2076,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
moving table with them between two schemas raises too many questions.
(E.g. what should happen if in new schema we already have trigger
with same name ?).
-
+
In case of "ALTER DATABASE `#mysql50#db1` UPGRADE DATA DIRECTORY NAME"
we will be given table name with "#mysql50#" prefix
To remove this prefix we use check_n_cut_mysql50_prefix().
@@ -2078,7 +2084,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
if (my_strcasecmp(table_alias_charset, db, new_db))
{
char dbname[SAFE_NAME_LEN + 1];
- if (check_n_cut_mysql50_prefix(db, dbname, sizeof(dbname)) &&
+ if (check_n_cut_mysql50_prefix(db, dbname, sizeof(dbname)) &&
!my_strcasecmp(table_alias_charset, dbname, new_db))
{
upgrading50to51= TRUE;
@@ -2097,8 +2103,8 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
result= 1;
goto end;
}
- if ((err_trigname= table.triggers->change_table_name_in_trignames(
- upgrading50to51 ? db : NULL,
+ if ((err_trigger= table.triggers->
+ change_table_name_in_trignames( upgrading50to51 ? db : NULL,
new_db, &new_table_name, 0)))
{
/*
@@ -2109,7 +2115,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
*/
(void) table.triggers->change_table_name_in_trignames(
upgrading50to51 ? new_db : NULL, db,
- &old_table_name, err_trigname);
+ &old_table_name, err_trigger);
(void) table.triggers->change_table_name_in_triggers(
thd, db, new_db,
&new_table_name, &old_table_name);
@@ -2117,7 +2123,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
goto end;
}
}
-
+
end:
delete table.triggers;
free_root(&table.mem_root, MYF(0));
@@ -2148,17 +2154,15 @@ bool Table_triggers_list::process_triggers(THD *thd,
{
bool err_status;
Sub_statement_state statement_state;
- sp_head *sp_trigger= bodies[event][time_type];
+ Trigger *trigger;
SELECT_LEX *save_current_select;
if (check_for_broken_triggers())
- return true;
+ return TRUE;
- if (sp_trigger == NULL)
+ if (!(trigger= get_trigger(event, time_type)))
return FALSE;
- status_var_increment(thd->status_var.executed_triggers);
-
if (old_row_is_record1)
{
old_field= record1_field;
@@ -2185,12 +2189,16 @@ bool Table_triggers_list::process_triggers(THD *thd,
in case of failure during trigger execution.
*/
save_current_select= thd->lex->current_select;
- thd->lex->current_select= NULL;
- err_status=
- sp_trigger->execute_trigger(thd,
- &trigger_table->s->db,
- &trigger_table->s->table_name,
- &subject_table_grants[event][time_type]);
+
+ do {
+ thd->lex->current_select= NULL;
+ err_status=
+ trigger->body->execute_trigger(thd,
+ &trigger_table->s->db,
+ &trigger_table->s->table_name,
+ &trigger->subject_table_grants);
+ status_var_increment(thd->status_var.executed_triggers);
+ } while (!err_status && (trigger= trigger->next));
thd->lex->current_select= save_current_select;
thd->restore_sub_statement_state(&statement_state);
@@ -2228,11 +2236,15 @@ add_tables_and_routines_for_triggers(THD *thd,
{
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
{
- /* We can have only one trigger per action type currently */
- sp_head *trigger= table_list->table->triggers->bodies[i][j];
+ Trigger *triggers= table_list->table->triggers->get_trigger(i,j);
- if (trigger)
+ for ( ; triggers ; triggers= triggers->next)
{
+ sp_head *trigger= triggers->body;
+
+ if (!triggers->body) // Parse error
+ continue;
+
MDL_key key(MDL_key::TRIGGER, trigger->m_db.str, trigger->m_name.str);
if (sp_add_used_routine(prelocking_ctx, thd->stmt_arena,
@@ -2255,37 +2267,6 @@ add_tables_and_routines_for_triggers(THD *thd,
/**
- Check if any of the marked fields are used in the trigger.
-
- @param used_fields Bitmap over fields to check
- @param event_type Type of event triggers for which we are going to inspect
- @param action_time Type of trigger action time we are going to inspect
-*/
-
-bool Table_triggers_list::is_fields_updated_in_trigger(MY_BITMAP *used_fields,
- trg_event_type event_type,
- trg_action_time_type action_time)
-{
- Item_trigger_field *trg_field;
- sp_head *sp= bodies[event_type][action_time];
- DBUG_ASSERT(used_fields->n_bits == trigger_table->s->fields);
-
- for (trg_field= sp->m_trg_table_fields.first; trg_field;
- trg_field= trg_field->next_trg_field)
- {
- /* We cannot check fields which does not present in table. */
- if (trg_field->field_idx != (uint)-1)
- {
- if (bitmap_is_set(used_fields, trg_field->field_idx) &&
- trg_field->get_settable_routine_parameter())
- return true;
- }
- }
- return false;
-}
-
-
-/**
Mark fields of subject table which we read/set in its triggers
as such.
@@ -2302,25 +2283,34 @@ void Table_triggers_list::mark_fields_used(trg_event_type event)
{
int action_time;
Item_trigger_field *trg_field;
+ DBUG_ENTER("Table_triggers_list::mark_fields_used");
for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
{
- for (trg_field= trigger_fields[event][action_time]; trg_field;
- trg_field= trg_field->next_trg_field)
+ for (Trigger *trigger= get_trigger(event,action_time);
+ trigger ;
+ trigger= trigger->next)
{
- /* We cannot mark fields which does not present in table. */
- if (trg_field->field_idx != (uint)-1)
+ for (trg_field= trigger->trigger_fields;
+ trg_field;
+ trg_field= trg_field->next_trg_field)
{
- bitmap_set_bit(trigger_table->read_set, trg_field->field_idx);
- if (trg_field->get_settable_routine_parameter())
- bitmap_set_bit(trigger_table->write_set, trg_field->field_idx);
- if (trigger_table->field[trg_field->field_idx]->vcol_info)
- trigger_table->mark_virtual_col(trigger_table->
- field[trg_field->field_idx]);
+ /* We cannot mark fields which does not present in table. */
+ if (trg_field->field_idx != (uint)-1)
+ {
+ DBUG_PRINT("info", ("marking field: %d", trg_field->field_idx));
+ bitmap_set_bit(trigger_table->read_set, trg_field->field_idx);
+ if (trg_field->get_settable_routine_parameter())
+ bitmap_set_bit(trigger_table->write_set, trg_field->field_idx);
+ if (trigger_table->field[trg_field->field_idx]->vcol_info)
+ trigger_table->mark_virtual_col(trigger_table->
+ field[trg_field->field_idx]);
+ }
}
}
}
trigger_table->file->column_bitmaps_signal();
+ DBUG_VOID_RETURN;
}
@@ -2531,4 +2521,3 @@ bool load_table_name_for_trigger(THD *thd,
DBUG_RETURN(FALSE);
}
-
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index fa858a0582b..9d1c79cc7cf 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -3,6 +3,7 @@
/*
Copyright (c) 2004, 2011, Oracle and/or its affiliates.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -51,22 +52,92 @@ enum trg_action_time_type
TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1, TRG_ACTION_MAX
};
+enum trigger_order_type
+{
+ TRG_ORDER_NONE= 0,
+ TRG_ORDER_FOLLOWS= 1,
+ TRG_ORDER_PRECEDES= 2
+};
+
+
+struct st_trg_execution_order
+{
+ /**
+ FOLLOWS or PRECEDES as specified in the CREATE TRIGGER statement.
+ */
+ enum trigger_order_type ordering_clause;
+
+ /**
+ Trigger name referenced in the FOLLOWS/PRECEDES clause of the
+ CREATE TRIGGER statement.
+ */
+ LEX_STRING anchor_trigger_name;
+};
-/**
- This class holds all information about triggers of table.
- TODO: Will it be merged into TABLE in the future ?
+class Table_triggers_list;
+
+/**
+ The trigger object
*/
-class Table_triggers_list: public Sql_alloc
+class Trigger :public Sql_alloc
{
- /** Triggers as SPs grouped by event, action_time */
- sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
+public:
+ Trigger(Table_triggers_list *base_arg, sp_head *code):
+ base(base_arg), body(code), next(0), trigger_fields(0), action_order(0)
+ {
+ bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
+ }
+ ~Trigger();
+ Table_triggers_list *base;
+ sp_head *body;
+ Trigger *next; /* Next trigger of same type */
+
/**
Heads of the lists linking items for all fields used in triggers
grouped by event and action_time.
*/
- Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
+ Item_trigger_field *trigger_fields;
+ LEX_STRING name;
+ LEX_STRING on_table_name; /* Raw table name */
+ LEX_STRING definition;
+ LEX_STRING definer;
+
+ /* Character sets used */
+ LEX_STRING client_cs_name;
+ LEX_STRING connection_cl_name;
+ LEX_STRING db_cl_name;
+
+ GRANT_INFO subject_table_grants;
+ sql_mode_t sql_mode;
+ /* Store create time. Can't be mysql_time_t as this holds also sub seconds */
+ ulonglong create_time;
+ trg_event_type event;
+ trg_action_time_type action_time;
+ uint action_order;
+
+ bool is_fields_updated_in_trigger(MY_BITMAP *used_fields);
+ void get_trigger_info(LEX_STRING *stmt, LEX_STRING *body,
+ LEX_STRING *definer);
+ /* Functions executed over each active trigger */
+ bool change_on_table_name(void* param_arg);
+ bool change_table_name(void* param_arg);
+ bool add_to_file_list(void* param_arg);
+};
+
+typedef bool (Trigger::*Triggers_processor)(void *arg);
+
+/**
+ This class holds all information about triggers of table.
+*/
+
+class Table_triggers_list: public Sql_alloc
+{
+ friend class Trigger;
+
+ /* Points to first trigger for a certain type */
+ Trigger *triggers[TRG_EVENT_MAX][TRG_ACTION_MAX];
/**
Copy of TABLE::Field array which all fields made nullable
(using extra_null_bitmap, if needed). Used for NEW values in
@@ -90,22 +161,6 @@ class Table_triggers_list: public Sql_alloc
/* TABLE instance for which this triggers list object was created */
TABLE *trigger_table;
- /**
- Names of triggers.
- Should correspond to order of triggers on definitions_list,
- used in CREATE/DROP TRIGGER for looking up trigger by name.
- */
- List<LEX_STRING> names_list;
- /**
- List of "ON table_name" parts in trigger definitions, used for
- updating trigger definitions during RENAME TABLE.
- */
- List<LEX_STRING> on_table_names_list;
-
- /**
- Grant information for each trigger (pair: subject table, trigger definer).
- */
- GRANT_INFO subject_table_grants[TRG_EVENT_MAX][TRG_ACTION_MAX];
/**
This flag indicates that one of the triggers was not parsed successfully,
@@ -127,6 +182,7 @@ class Table_triggers_list: public Sql_alloc
the trigger file.
*/
char m_parse_error_message[MYSQL_ERRMSG_SIZE];
+ uint count; /* Number of triggers */
public:
/**
@@ -138,6 +194,8 @@ public:
List of sql modes for triggers
*/
List<ulonglong> definition_modes_list;
+ /** Create times for triggers */
+ List<ulonglong> create_times;
List<LEX_STRING> definers_list;
@@ -152,11 +210,9 @@ public:
Table_triggers_list(TABLE *table_arg)
:record0_field(0), extra_null_bitmap(0), record1_field(0),
trigger_table(table_arg),
- m_has_unparseable_trigger(false)
+ m_has_unparseable_trigger(false), count(0)
{
- bzero((char *)bodies, sizeof(bodies));
- bzero((char *)trigger_fields, sizeof(trigger_fields));
- bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
+ bzero((char *) triggers, sizeof(triggers));
}
~Table_triggers_list();
@@ -165,26 +221,9 @@ public:
bool process_triggers(THD *thd, trg_event_type event,
trg_action_time_type time_type,
bool old_row_is_record1);
-
- bool get_trigger_info(THD *thd, trg_event_type event,
- trg_action_time_type time_type,
- LEX_STRING *trigger_name, LEX_STRING *trigger_stmt,
- ulong *sql_mode,
- LEX_STRING *definer,
- LEX_STRING *client_cs_name,
- LEX_STRING *connection_cl_name,
- LEX_STRING *db_cl_name);
-
- void get_trigger_info(THD *thd,
- int trigger_idx,
- LEX_STRING *trigger_name,
- ulonglong *sql_mode,
- LEX_STRING *sql_original_stmt,
- LEX_STRING *client_cs_name,
- LEX_STRING *connection_cl_name,
- LEX_STRING *db_cl_name);
-
- int find_trigger_by_name(const LEX_STRING *trigger_name);
+ void empty_lists();
+ bool create_lists_needed_for_files(MEM_ROOT *root);
+ bool save_trigger_file(THD *thd, const char *db, const char *table_name);
static bool check_n_load(THD *thd, const char *db, const char *table_name,
TABLE *table, bool names_only);
@@ -194,15 +233,32 @@ public:
const char *old_table,
const char *new_db,
const char *new_table);
+ void add_trigger(trg_event_type event_type,
+ trg_action_time_type action_time,
+ trigger_order_type ordering_clause,
+ LEX_STRING *anchor_trigger_name,
+ Trigger *trigger);
+ Trigger *get_trigger(trg_event_type event_type,
+ trg_action_time_type action_time)
+ {
+ return triggers[event_type][action_time];
+ }
+ /* Simpler version of the above, to avoid casts in the code */
+ Trigger *get_trigger(uint event_type, uint action_time)
+ {
+ return get_trigger((trg_event_type) event_type,
+ (trg_action_time_type) action_time);
+ }
+
bool has_triggers(trg_event_type event_type,
trg_action_time_type action_time)
{
- return (bodies[event_type][action_time] != NULL);
+ return get_trigger(event_type,action_time) != 0;
}
bool has_delete_triggers()
{
- return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
- bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]);
+ return (has_triggers(TRG_EVENT_DELETE,TRG_ACTION_BEFORE) ||
+ has_triggers(TRG_EVENT_DELETE,TRG_ACTION_AFTER));
}
void mark_fields_used(trg_event_type event);
@@ -215,24 +271,24 @@ public:
Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list);
- bool is_fields_updated_in_trigger(MY_BITMAP *used_fields,
- trg_event_type event_type,
- trg_action_time_type action_time);
-
Field **nullable_fields() { return record0_field; }
void reset_extra_null_bitmap()
{
- int null_bytes= (trigger_table->s->stored_fields -
- trigger_table->s->null_fields + 7)/8;
+ size_t null_bytes= (trigger_table->s->stored_fields -
+ trigger_table->s->null_fields + 7)/8;
bzero(extra_null_bitmap, null_bytes);
}
+ Trigger *find_trigger(const LEX_STRING *name, bool remove_from_list);
+
+ Trigger* for_all_triggers(Triggers_processor func, void *arg);
+
private:
bool prepare_record_accessors(TABLE *table);
- LEX_STRING* change_table_name_in_trignames(const char *old_db_name,
- const char *new_db_name,
- LEX_STRING *new_table_name,
- LEX_STRING *stopper);
+ Trigger *change_table_name_in_trignames(const char *old_db_name,
+ const char *new_db_name,
+ LEX_STRING *new_table_name,
+ Trigger *trigger);
bool change_table_name_in_triggers(THD *thd,
const char *old_db_name,
const char *new_db_name,
@@ -257,9 +313,6 @@ inline Field **TABLE::field_to_fill()
}
-extern const LEX_STRING trg_action_time_type_names[];
-extern const LEX_STRING trg_event_type_names[];
-
bool add_table_for_trigger(THD *thd,
const sp_name *trg_name,
bool continue_if_not_exist,
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 7d52419ae18..d172dee56b6 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -16,6 +16,9 @@
#include "sql_type.h"
#include "sql_const.h"
+#include "sql_class.h"
+#include "item.h"
+#include "log.h"
static Type_handler_tiny type_handler_tiny;
static Type_handler_short type_handler_short;
@@ -27,9 +30,13 @@ static Type_handler_bit type_handler_bit;
static Type_handler_float type_handler_float;
static Type_handler_double type_handler_double;
static Type_handler_time type_handler_time;
+static Type_handler_time2 type_handler_time2;
static Type_handler_date type_handler_date;
+static Type_handler_newdate type_handler_newdate;
static Type_handler_datetime type_handler_datetime;
+static Type_handler_datetime2 type_handler_datetime2;
static Type_handler_timestamp type_handler_timestamp;
+static Type_handler_timestamp2 type_handler_timestamp2;
static Type_handler_olddecimal type_handler_olddecimal;
static Type_handler_newdecimal type_handler_newdecimal;
static Type_handler_null type_handler_null;
@@ -39,7 +46,11 @@ static Type_handler_tiny_blob type_handler_tiny_blob;
static Type_handler_medium_blob type_handler_medium_blob;
static Type_handler_long_blob type_handler_long_blob;
static Type_handler_blob type_handler_blob;
+#ifdef HAVE_SPATIAL
static Type_handler_geometry type_handler_geometry;
+#endif
+static Type_handler_enum type_handler_enum;
+static Type_handler_set type_handler_set;
/**
@@ -111,8 +122,7 @@ Type_handler_hybrid_field_type::Type_handler_hybrid_field_type()
const Type_handler *
-Type_handler_hybrid_field_type::get_handler_by_field_type(enum_field_types type)
- const
+Type_handler::get_handler_by_field_type(enum_field_types type)
{
switch (type) {
case MYSQL_TYPE_DECIMAL: return &type_handler_olddecimal;
@@ -127,7 +137,7 @@ Type_handler_hybrid_field_type::get_handler_by_field_type(enum_field_types type)
case MYSQL_TYPE_FLOAT: return &type_handler_float;
case MYSQL_TYPE_DOUBLE: return &type_handler_double;
case MYSQL_TYPE_NULL: return &type_handler_null;
- case MYSQL_TYPE_VARCHAR: return &type_handler_varchar;
+ case MYSQL_TYPE_VARCHAR: return &type_handler_varchar;
case MYSQL_TYPE_TINY_BLOB: return &type_handler_tiny_blob;
case MYSQL_TYPE_MEDIUM_BLOB: return &type_handler_medium_blob;
case MYSQL_TYPE_LONG_BLOB: return &type_handler_long_blob;
@@ -136,17 +146,467 @@ Type_handler_hybrid_field_type::get_handler_by_field_type(enum_field_types type)
case MYSQL_TYPE_STRING: return &type_handler_string;
case MYSQL_TYPE_ENUM: return &type_handler_varchar; // Map to VARCHAR
case MYSQL_TYPE_SET: return &type_handler_varchar; // Map to VARCHAR
- case MYSQL_TYPE_GEOMETRY: return &type_handler_geometry;
+ case MYSQL_TYPE_GEOMETRY:
+#ifdef HAVE_SPATIAL
+ return &type_handler_geometry;
+#else
+ return NULL;
+#endif
+ case MYSQL_TYPE_TIMESTAMP: return &type_handler_timestamp2;// Map to timestamp2
+ case MYSQL_TYPE_TIMESTAMP2: return &type_handler_timestamp2;
+ case MYSQL_TYPE_DATE: return &type_handler_newdate; // Map to newdate
+ case MYSQL_TYPE_TIME: return &type_handler_time2; // Map to time2
+ case MYSQL_TYPE_TIME2: return &type_handler_time2;
+ case MYSQL_TYPE_DATETIME: return &type_handler_datetime2; // Map to datetime2
+ case MYSQL_TYPE_DATETIME2: return &type_handler_datetime2;
+ case MYSQL_TYPE_NEWDATE:
+ /*
+ NEWDATE is actually a real_type(), not a field_type(),
+ but it's used around the code in field_type() context.
+ We should probably clean up the code not to use MYSQL_TYPE_NEWDATE
+ in field_type() context and add DBUG_ASSERT(0) here.
+ */
+ return &type_handler_newdate;
+ };
+ DBUG_ASSERT(0);
+ return &type_handler_string;
+}
+
+
+const Type_handler *
+Type_handler::get_handler_by_real_type(enum_field_types type)
+{
+ switch (type) {
+ case MYSQL_TYPE_DECIMAL: return &type_handler_olddecimal;
+ case MYSQL_TYPE_NEWDECIMAL: return &type_handler_newdecimal;
+ case MYSQL_TYPE_TINY: return &type_handler_tiny;
+ case MYSQL_TYPE_SHORT: return &type_handler_short;
+ case MYSQL_TYPE_LONG: return &type_handler_long;
+ case MYSQL_TYPE_LONGLONG: return &type_handler_longlong;
+ case MYSQL_TYPE_INT24: return &type_handler_int24;
+ case MYSQL_TYPE_YEAR: return &type_handler_year;
+ case MYSQL_TYPE_BIT: return &type_handler_bit;
+ case MYSQL_TYPE_FLOAT: return &type_handler_float;
+ case MYSQL_TYPE_DOUBLE: return &type_handler_double;
+ case MYSQL_TYPE_NULL: return &type_handler_null;
+ case MYSQL_TYPE_VARCHAR: return &type_handler_varchar;
+ case MYSQL_TYPE_TINY_BLOB: return &type_handler_tiny_blob;
+ case MYSQL_TYPE_MEDIUM_BLOB: return &type_handler_medium_blob;
+ case MYSQL_TYPE_LONG_BLOB: return &type_handler_long_blob;
+ case MYSQL_TYPE_BLOB: return &type_handler_blob;
+ case MYSQL_TYPE_VAR_STRING:
+ /*
+ VAR_STRING is actually a field_type(), not a real_type(),
+ but it's used around the code in real_type() context.
+ We should clean up the code and add DBUG_ASSERT(0) here.
+ */
+ return &type_handler_string;
+ case MYSQL_TYPE_STRING: return &type_handler_string;
+ case MYSQL_TYPE_ENUM: return &type_handler_enum;
+ case MYSQL_TYPE_SET: return &type_handler_set;
+ case MYSQL_TYPE_GEOMETRY:
+#ifdef HAVE_SPATIAL
+ return &type_handler_geometry;
+#else
+ return NULL;
+#endif
case MYSQL_TYPE_TIMESTAMP: return &type_handler_timestamp;
- case MYSQL_TYPE_TIMESTAMP2: return &type_handler_timestamp;
+ case MYSQL_TYPE_TIMESTAMP2: return &type_handler_timestamp2;
case MYSQL_TYPE_DATE: return &type_handler_date;
case MYSQL_TYPE_TIME: return &type_handler_time;
- case MYSQL_TYPE_TIME2: return &type_handler_time;
+ case MYSQL_TYPE_TIME2: return &type_handler_time2;
case MYSQL_TYPE_DATETIME: return &type_handler_datetime;
- case MYSQL_TYPE_DATETIME2: return &type_handler_datetime;
- case MYSQL_TYPE_NEWDATE: return &type_handler_date;
+ case MYSQL_TYPE_DATETIME2: return &type_handler_datetime2;
+ case MYSQL_TYPE_NEWDATE: return &type_handler_newdate;
};
DBUG_ASSERT(0);
return &type_handler_string;
}
+
+/**
+ Create a DOUBLE field by default.
+*/
+Field *
+Type_handler::make_num_distinct_aggregator_field(MEM_ROOT *mem_root,
+ const Item *item) const
+{
+ return new(mem_root)
+ Field_double(NULL, item->max_length,
+ (uchar *) (item->maybe_null ? "" : 0),
+ item->maybe_null ? 1 : 0, Field::NONE,
+ item->name, item->decimals, 0, item->unsigned_flag);
+}
+
+
+Field *
+Type_handler_float::make_num_distinct_aggregator_field(MEM_ROOT *mem_root,
+ const Item *item)
+ const
+{
+ return new(mem_root)
+ Field_float(NULL, item->max_length,
+ (uchar *) (item->maybe_null ? "" : 0),
+ item->maybe_null ? 1 : 0, Field::NONE,
+ item->name, item->decimals, 0, item->unsigned_flag);
+}
+
+
+Field *
+Type_handler_decimal_result::make_num_distinct_aggregator_field(
+ MEM_ROOT *mem_root,
+ const Item *item)
+ const
+{
+ DBUG_ASSERT(item->decimals <= DECIMAL_MAX_SCALE);
+ return new (mem_root)
+ Field_new_decimal(NULL, item->max_length,
+ (uchar *) (item->maybe_null ? "" : 0),
+ item->maybe_null ? 1 : 0, Field::NONE,
+ item->name, item->decimals, 0, item->unsigned_flag);
+}
+
+
+Field *
+Type_handler_int_result::make_num_distinct_aggregator_field(MEM_ROOT *mem_root,
+ const Item *item)
+ const
+{
+ /**
+ Make a longlong field for all INT-alike types. It could create
+ smaller fields for TINYINT, SMALLINT, MEDIUMINT, INT though.
+ */
+ return new(mem_root)
+ Field_longlong(NULL, item->max_length,
+ (uchar *) (item->maybe_null ? "" : 0),
+ item->maybe_null ? 1 : 0, Field::NONE,
+ item->name, 0, item->unsigned_flag);
+}
+
+
+/***********************************************************************/
+
+#define TMPNAME ""
+
+Field *Type_handler_tiny::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ /*
+ As we don't know if the integer was signed or not on the master,
+ assume we have same sign on master and slave. This is true when not
+ using conversions so it should be true also when using conversions.
+ */
+ bool unsigned_flag= ((Field_num*) target)->unsigned_flag;
+ return new (table->in_use->mem_root)
+ Field_tiny(NULL, 4 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*zerofill*/, unsigned_flag);
+}
+
+
+Field *Type_handler_short::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ bool unsigned_flag= ((Field_num*) target)->unsigned_flag;
+ return new (table->in_use->mem_root)
+ Field_short(NULL, 6 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*zerofill*/, unsigned_flag);
+}
+
+
+Field *Type_handler_int24::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ bool unsigned_flag= ((Field_num*) target)->unsigned_flag;
+ return new (table->in_use->mem_root)
+ Field_medium(NULL, 9 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*zerofill*/, unsigned_flag);
+}
+
+
+Field *Type_handler_long::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ bool unsigned_flag= ((Field_num*) target)->unsigned_flag;
+ return new (table->in_use->mem_root)
+ Field_long(NULL, 11 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*zerofill*/, unsigned_flag);
+}
+
+
+Field *Type_handler_longlong::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ bool unsigned_flag= ((Field_num*) target)->unsigned_flag;
+ return new (table->in_use->mem_root)
+ Field_longlong(NULL, 20 /*max_length*/,(uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*zerofill*/, unsigned_flag);
+}
+
+
+
+Field *Type_handler_float::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new (table->in_use->mem_root)
+ Field_float(NULL, 12 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*dec*/, 0/*zerofill*/, 0/*unsigned_flag*/);
+}
+
+
+Field *Type_handler_double::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new (table->in_use->mem_root)
+ Field_double(NULL, 22 /*max_length*/, (uchar *) "", 1, Field::NONE,
+ TMPNAME, 0/*dec*/, 0/*zerofill*/, 0/*unsigned_flag*/);
+}
+
+
+Field *Type_handler_newdecimal::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ int precision= metadata >> 8;
+ uint decimals= metadata & 0x00ff;
+ uint32 max_length= my_decimal_precision_to_length(precision, decimals, false);
+ DBUG_ASSERT(decimals <= DECIMAL_MAX_SCALE);
+ return new (table->in_use->mem_root)
+ Field_new_decimal(NULL, max_length, (uchar *) "", 1, Field::NONE,
+ TMPNAME, decimals, 0/*zerofill*/, 0/*unsigned*/);
+}
+
+
+Field *Type_handler_olddecimal::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ sql_print_error("In RBR mode, Slave received incompatible DECIMAL field "
+ "(old-style decimal field) from Master while creating "
+ "conversion table. Please consider changing datatype on "
+ "Master to new style decimal by executing ALTER command for"
+ " column Name: %s.%s.%s.",
+ target->table->s->db.str,
+ target->table->s->table_name.str,
+ target->field_name);
+ return NULL;
+}
+
+
+Field *Type_handler_year::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_year(NULL, 4, (uchar *) "", 1, Field::NONE, TMPNAME);
+}
+
+
+Field *Type_handler_null::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_null(NULL, 0, Field::NONE, TMPNAME, target->charset());
+}
+
+
+Field *Type_handler_timestamp::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new_Field_timestamp(table->in_use->mem_root, NULL, (uchar *) "", 1,
+ Field::NONE, TMPNAME, table->s, target->decimals());
+}
+
+
+Field *Type_handler_timestamp2::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_timestampf(NULL, (uchar *) "", 1, Field::NONE,
+ TMPNAME, table->s, metadata);
+}
+
+
+Field *Type_handler_newdate::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_newdate(NULL, (uchar *) "", 1, Field::NONE, TMPNAME);
+}
+
+
+Field *Type_handler_date::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_date(NULL, (uchar *) "", 1, Field::NONE, TMPNAME);
+}
+
+
+Field *Type_handler_time::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new_Field_time(table->in_use->mem_root, NULL, (uchar *) "", 1,
+ Field::NONE, TMPNAME, target->decimals());
+}
+
+
+Field *Type_handler_time2::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_timef(NULL, (uchar *) "", 1, Field::NONE, TMPNAME, metadata);
+}
+
+
+Field *Type_handler_datetime::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new_Field_datetime(table->in_use->mem_root, NULL, (uchar *) "", 1,
+ Field::NONE, TMPNAME, target->decimals());
+}
+
+
+Field *Type_handler_datetime2::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_datetimef(NULL, (uchar *) "", 1,
+ Field::NONE, TMPNAME, metadata);
+}
+
+
+Field *Type_handler_bit::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ DBUG_ASSERT((metadata & 0xff) <= 7);
+ uint32 max_length= 8 * (metadata >> 8U) + (metadata & 0x00ff);
+ return new(table->in_use->mem_root)
+ Field_bit_as_char(NULL, max_length, (uchar *) "", 1,
+ Field::NONE, TMPNAME);
+}
+
+
+Field *Type_handler_string::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ /* This is taken from Field_string::unpack. */
+ uint32 max_length= (((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0x00ff);
+ return new(table->in_use->mem_root)
+ Field_string(NULL, max_length, (uchar *) "", 1,
+ Field::NONE, TMPNAME, target->charset());
+}
+
+
+Field *Type_handler_varchar::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ return new(table->in_use->mem_root)
+ Field_varstring(NULL, metadata, HA_VARCHAR_PACKLENGTH(metadata),
+ (uchar *) "", 1, Field::NONE, TMPNAME,
+ table->s, target->charset());
+}
+
+
+Field *Type_handler_blob_common::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ uint pack_length= metadata & 0x00ff;
+ if (pack_length < 1 || pack_length > 4)
+ return NULL; // Broken binary log?
+ return new(table->in_use->mem_root)
+ Field_blob(NULL, (uchar *) "", 1, Field::NONE, TMPNAME,
+ table->s, pack_length, target->charset());
+}
+
+
+#ifdef HAVE_SPATIAL
+Field *Type_handler_geometry::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ DBUG_ASSERT(target->type() == MYSQL_TYPE_GEOMETRY);
+ /*
+ We do not do not update feature_gis statistics here:
+ status_var_increment(target->table->in_use->status_var.feature_gis);
+ as this is only a temporary field.
+ The statistics was already incremented when "target" was created.
+ */
+ return new(table->in_use->mem_root)
+ Field_geom(NULL, (uchar *) "", 1, Field::NONE, TMPNAME, table->s, 4,
+ ((const Field_geom*) target)->geom_type,
+ ((const Field_geom*) target)->srid);
+}
+#endif
+
+Field *Type_handler_enum::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ DBUG_ASSERT(target->type() == MYSQL_TYPE_STRING);
+ DBUG_ASSERT(target->real_type() == MYSQL_TYPE_ENUM);
+ return new(table->in_use->mem_root)
+ Field_enum(NULL, target->field_length,
+ (uchar *) "", 1, Field::NONE, TMPNAME,
+ metadata & 0x00ff/*pack_length()*/,
+ ((const Field_enum*) target)->typelib, target->charset());
+}
+
+
+Field *Type_handler_set::make_conversion_table_field(TABLE *table,
+ uint metadata,
+ const Field *target)
+ const
+{
+ DBUG_ASSERT(target->type() == MYSQL_TYPE_STRING);
+ DBUG_ASSERT(target->real_type() == MYSQL_TYPE_SET);
+ return new(table->in_use->mem_root)
+ Field_set(NULL, target->field_length,
+ (uchar *) "", 1, Field::NONE, TMPNAME,
+ metadata & 0x00ff/*pack_length()*/,
+ ((const Field_enum*) target)->typelib, target->charset());
+}
diff --git a/sql/sql_type.h b/sql/sql_type.h
index f5a42e8d97d..8141b7ef45f 100644
--- a/sql/sql_type.h
+++ b/sql/sql_type.h
@@ -23,12 +23,26 @@
#include "mysqld.h"
+class Field;
+class Item;
+class Type_std_attributes;
+class Sort_param;
+struct TABLE;
+struct SORT_FIELD_ATTR;
+
class Type_handler
{
protected:
const Type_handler *string_type_handler(uint max_octet_length) const;
+ void make_sort_key_longlong(uchar *to,
+ bool maybe_null, bool null_value,
+ bool unsigned_flag,
+ longlong value) const;
public:
+ static const Type_handler *get_handler_by_field_type(enum_field_types type);
+ static const Type_handler *get_handler_by_real_type(enum_field_types type);
virtual enum_field_types field_type() const= 0;
+ virtual enum_field_types real_field_type() const { return field_type(); }
virtual Item_result result_type() const= 0;
virtual Item_result cmp_type() const= 0;
virtual const Type_handler*
@@ -36,6 +50,48 @@ public:
CHARSET_INFO *cs) const
{ return this; }
virtual ~Type_handler() {}
+ /**
+ Makes a temporary table Field to handle numeric aggregate functions,
+ e.g. SUM(DISTINCT expr), AVG(DISTINCT expr), etc.
+ */
+ virtual Field *make_num_distinct_aggregator_field(MEM_ROOT *,
+ const Item *) const;
+ /**
+ Makes a temporary table Field to handle RBR replication type conversion.
+ @param TABLE - The conversion table the field is going to be added to.
+ It's used to access to table->in_use->mem_root,
+ to create the new field on the table memory root,
+ as well as to increment statistics in table->share
+ (e.g. table->s->blob_count).
+ @param metadata - Metadata from the binary log.
+ @param target - The field in the target table on the slave.
+
+ Note, the data types of "target" and of "this" are not necessarily
+ always the same, in general case it's possible that:
+ this->field_type() != target->field_type()
+ and/or
+ this->real_type( ) != target->real_type()
+
+ This method decodes metadata according to this->real_type()
+ and creates a new field also according to this->real_type().
+
+ In some cases it lurks into "target", to get some extra information, e.g.:
+ - unsigned_flag for numeric fields
+ - charset() for string fields
+ - typelib and field_length for SET and ENUM
+ - geom_type and srid for GEOMETRY
+ This information is not available in the binary log, so
+ we assume that these fields are the same on the master and on the slave.
+ */
+ virtual Field *make_conversion_table_field(TABLE *TABLE,
+ uint metadata,
+ const Field *target) const= 0;
+ virtual void make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const= 0;
+ virtual void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const= 0;
};
@@ -47,6 +103,11 @@ public:
Item_result result_type() const { return REAL_RESULT; }
Item_result cmp_type() const { return REAL_RESULT; }
virtual ~Type_handler_real_result() {}
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
};
@@ -56,6 +117,12 @@ public:
Item_result result_type() const { return DECIMAL_RESULT; }
Item_result cmp_type() const { return DECIMAL_RESULT; }
virtual ~Type_handler_decimal_result() {};
+ Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
};
@@ -65,6 +132,12 @@ public:
Item_result result_type() const { return INT_RESULT; }
Item_result cmp_type() const { return INT_RESULT; }
virtual ~Type_handler_int_result() {}
+ Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
};
@@ -74,6 +147,11 @@ public:
Item_result result_type() const { return STRING_RESULT; }
Item_result cmp_type() const { return TIME_RESULT; }
virtual ~Type_handler_temporal_result() {}
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
};
@@ -86,6 +164,11 @@ public:
const Type_handler *
type_handler_adjusted_to_max_octet_length(uint max_octet_length,
CHARSET_INFO *cs) const;
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
};
@@ -114,6 +197,8 @@ class Type_handler_tiny: public Type_handler_int_result
public:
virtual ~Type_handler_tiny() {}
enum_field_types field_type() const { return MYSQL_TYPE_TINY; }
+ Field *make_conversion_table_field(TABLE *TABLE, uint metadata,
+ const Field *target) const;
};
@@ -122,6 +207,8 @@ class Type_handler_short: public Type_handler_int_result
public:
virtual ~Type_handler_short() {}
enum_field_types field_type() const { return MYSQL_TYPE_SHORT; }
+ Field *make_conversion_table_field(TABLE *TABLE, uint metadata,
+ const Field *target) const;
};
@@ -130,6 +217,8 @@ class Type_handler_long: public Type_handler_int_result
public:
virtual ~Type_handler_long() {}
enum_field_types field_type() const { return MYSQL_TYPE_LONG; }
+ Field *make_conversion_table_field(TABLE *TABLE, uint metadata,
+ const Field *target) const;
};
@@ -138,6 +227,8 @@ class Type_handler_longlong: public Type_handler_int_result
public:
virtual ~Type_handler_longlong() {}
enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ Field *make_conversion_table_field(TABLE *TABLE, uint metadata,
+ const Field *target) const;
};
@@ -146,6 +237,8 @@ class Type_handler_int24: public Type_handler_int_result
public:
virtual ~Type_handler_int24() {}
enum_field_types field_type() const { return MYSQL_TYPE_INT24; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -154,6 +247,8 @@ class Type_handler_year: public Type_handler_int_result
public:
virtual ~Type_handler_year() {}
enum_field_types field_type() const { return MYSQL_TYPE_YEAR; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -162,6 +257,8 @@ class Type_handler_bit: public Type_handler_int_result
public:
virtual ~Type_handler_bit() {}
enum_field_types field_type() const { return MYSQL_TYPE_BIT; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -170,6 +267,9 @@ class Type_handler_float: public Type_handler_real_result
public:
virtual ~Type_handler_float() {}
enum_field_types field_type() const { return MYSQL_TYPE_FLOAT; }
+ Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -178,6 +278,8 @@ class Type_handler_double: public Type_handler_real_result
public:
virtual ~Type_handler_double() {}
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -186,6 +288,19 @@ class Type_handler_time: public Type_handler_temporal_result
public:
virtual ~Type_handler_time() {}
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_time2: public Type_handler_temporal_result
+{
+public:
+ virtual ~Type_handler_time2() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
+ enum_field_types real_field_type() const { return MYSQL_TYPE_TIME2; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -194,6 +309,18 @@ class Type_handler_date: public Type_handler_temporal_result
public:
virtual ~Type_handler_date() {}
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_newdate: public Type_handler_temporal_result
+{
+public:
+ virtual ~Type_handler_newdate() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -202,6 +329,19 @@ class Type_handler_datetime: public Type_handler_temporal_result
public:
virtual ~Type_handler_datetime() {}
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_datetime2: public Type_handler_temporal_result
+{
+public:
+ virtual ~Type_handler_datetime2() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ enum_field_types real_field_type() const { return MYSQL_TYPE_DATETIME2; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -210,6 +350,19 @@ class Type_handler_timestamp: public Type_handler_temporal_result
public:
virtual ~Type_handler_timestamp() {}
enum_field_types field_type() const { return MYSQL_TYPE_TIMESTAMP; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_timestamp2: public Type_handler_temporal_result
+{
+public:
+ virtual ~Type_handler_timestamp2() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_TIMESTAMP; }
+ enum_field_types real_field_type() const { return MYSQL_TYPE_TIMESTAMP2; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -218,6 +371,8 @@ class Type_handler_olddecimal: public Type_handler_decimal_result
public:
virtual ~Type_handler_olddecimal() {}
enum_field_types field_type() const { return MYSQL_TYPE_DECIMAL; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -226,6 +381,8 @@ class Type_handler_newdecimal: public Type_handler_decimal_result
public:
virtual ~Type_handler_newdecimal() {}
enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -234,6 +391,8 @@ class Type_handler_null: public Type_handler_string_result
public:
virtual ~Type_handler_null() {}
enum_field_types field_type() const { return MYSQL_TYPE_NULL; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -242,6 +401,8 @@ class Type_handler_string: public Type_handler_string_result
public:
virtual ~Type_handler_string() {}
enum_field_types field_type() const { return MYSQL_TYPE_STRING; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -250,10 +411,21 @@ class Type_handler_varchar: public Type_handler_string_result
public:
virtual ~Type_handler_varchar() {}
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
-class Type_handler_tiny_blob: public Type_handler_string_result
+class Type_handler_blob_common: public Type_handler_string_result
+{
+public:
+ virtual ~Type_handler_blob_common() { }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_tiny_blob: public Type_handler_blob_common
{
public:
virtual ~Type_handler_tiny_blob() {}
@@ -261,7 +433,7 @@ public:
};
-class Type_handler_medium_blob: public Type_handler_string_result
+class Type_handler_medium_blob: public Type_handler_blob_common
{
public:
virtual ~Type_handler_medium_blob() {}
@@ -269,7 +441,7 @@ public:
};
-class Type_handler_long_blob: public Type_handler_string_result
+class Type_handler_long_blob: public Type_handler_blob_common
{
public:
virtual ~Type_handler_long_blob() {}
@@ -277,7 +449,7 @@ public:
};
-class Type_handler_blob: public Type_handler_string_result
+class Type_handler_blob: public Type_handler_blob_common
{
public:
virtual ~Type_handler_blob() {}
@@ -285,11 +457,37 @@ public:
};
+#ifdef HAVE_SPATIAL
class Type_handler_geometry: public Type_handler_string_result
{
public:
virtual ~Type_handler_geometry() {}
enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+#endif
+
+
+class Type_handler_enum: public Type_handler_string_result
+{
+public:
+ virtual ~Type_handler_enum() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ virtual enum_field_types real_field_type() const { return MYSQL_TYPE_ENUM; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
+};
+
+
+class Type_handler_set: public Type_handler_string_result
+{
+public:
+ virtual ~Type_handler_set() {}
+ enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ virtual enum_field_types real_field_type() const { return MYSQL_TYPE_SET; }
+ Field *make_conversion_table_field(TABLE *, uint metadata,
+ const Field *target) const;
};
@@ -306,9 +504,11 @@ class Type_handler_hybrid_field_type: public Type_handler
{
const Type_handler *m_type_handler;
const Type_handler *get_handler_by_result_type(Item_result type) const;
- const Type_handler *get_handler_by_field_type(enum_field_types type) const;
public:
Type_handler_hybrid_field_type();
+ Type_handler_hybrid_field_type(const Type_handler *handler)
+ :m_type_handler(handler)
+ { }
Type_handler_hybrid_field_type(enum_field_types type)
:m_type_handler(get_handler_by_field_type(type))
{ }
@@ -316,8 +516,16 @@ public:
:m_type_handler(other->m_type_handler)
{ }
enum_field_types field_type() const { return m_type_handler->field_type(); }
+ enum_field_types real_field_type() const
+ {
+ return m_type_handler->real_field_type();
+ }
Item_result result_type() const { return m_type_handler->result_type(); }
Item_result cmp_type() const { return m_type_handler->cmp_type(); }
+ void set_handler(const Type_handler *other)
+ {
+ m_type_handler= other;
+ }
const Type_handler *set_handler_by_result_type(Item_result type)
{
return (m_type_handler= get_handler_by_result_type(type));
@@ -335,6 +543,10 @@ public:
{
return (m_type_handler= get_handler_by_field_type(type));
}
+ const Type_handler *set_handler_by_real_type(enum_field_types type)
+ {
+ return (m_type_handler= get_handler_by_real_type(type));
+ }
const Type_handler *
type_handler_adjusted_to_max_octet_length(uint max_octet_length,
CHARSET_INFO *cs) const
@@ -343,6 +555,42 @@ public:
m_type_handler->type_handler_adjusted_to_max_octet_length(max_octet_length,
cs);
}
+ Field *make_num_distinct_aggregator_field(MEM_ROOT *mem_root,
+ const Item *item) const
+ {
+ return m_type_handler->make_num_distinct_aggregator_field(mem_root, item);
+ }
+ Field *make_conversion_table_field(TABLE *table, uint metadata,
+ const Field *target) const
+ {
+ return m_type_handler->make_conversion_table_field(table, metadata, target);
+ }
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+ {
+ m_type_handler->make_sort_key(to, item, sort_field, param);
+ }
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const
+ {
+ m_type_handler->sortlength(thd, item, attr);
+ }
+
};
+
+/**
+ This class is used for Item_type_holder, which preserves real_type.
+*/
+class Type_handler_hybrid_real_field_type:
+ public Type_handler_hybrid_field_type
+{
+public:
+ Type_handler_hybrid_real_field_type(enum_field_types type)
+ :Type_handler_hybrid_field_type(get_handler_by_real_type(type))
+ { }
+};
+
+
#endif /* SQL_TYPE_H_INCLUDED */
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 4ccd4948b58..05698ce82cc 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -154,7 +154,7 @@ void udf_init()
mysql_rwlock_init(key_rwlock_THR_LOCK_udf, &THR_LOCK_udf);
init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0, MYF(0));
- THD *new_thd = new THD;
+ THD *new_thd = new THD(0);
if (!new_thd ||
my_hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
{
@@ -180,7 +180,8 @@ void udf_init()
}
table= tables.table;
- if (init_read_record(&read_record_info, new_thd, table, NULL,1,0,FALSE))
+ if (init_read_record(&read_record_info, new_thd, table, NULL, NULL, 1, 0,
+ FALSE))
{
sql_print_error("Could not initialize init_read_record; udf's not "
"loaded");
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 72926a26e13..94b8c662a06 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -28,6 +28,8 @@
#include "sql_cursor.h"
#include "sql_base.h" // fill_record
#include "filesort.h" // filesort_free_buffers
+#include "sql_view.h"
+#include "sql_cte.h"
bool mysql_union(THD *thd, LEX *lex, select_result *result,
SELECT_LEX_UNIT *unit, ulong setup_tables_done_option)
@@ -99,6 +101,28 @@ int select_union::send_data(List<Item> &values)
}
+int select_union_recursive::send_data(List<Item> &values)
+{
+ int rc= select_union::send_data(values);
+
+ if (write_err != HA_ERR_FOUND_DUPP_KEY &&
+ write_err != HA_ERR_FOUND_DUPP_UNIQUE)
+ {
+ int err;
+ if ((err= incr_table->file->ha_write_tmp_row(table->record[0])))
+ {
+ bool is_duplicate;
+ rc= create_internal_tmp_table_from_heap(thd, incr_table,
+ tmp_table_param.start_recinfo,
+ &tmp_table_param.recinfo,
+ err, 1, &is_duplicate);
+ }
+ }
+
+ return rc;
+}
+
+
bool select_union::send_eof()
{
return 0;
@@ -161,7 +185,7 @@ select_union::create_result_table(THD *thd_arg, List<Item> *column_types,
table->keys_in_use_for_query.clear_all();
for (uint i=0; i < table->s->fields; i++)
- table->field[i]->flags &= ~PART_KEY_FLAG;
+ table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
if (create_table)
{
@@ -171,9 +195,53 @@ select_union::create_result_table(THD *thd_arg, List<Item> *column_types,
return FALSE;
}
+bool
+select_union_recursive::create_result_table(THD *thd_arg,
+ List<Item> *column_types,
+ bool is_union_distinct,
+ ulonglong options,
+ const char *alias,
+ bool bit_fields_as_long,
+ bool create_table,
+ bool keep_row_order)
+{
+ if (select_union::create_result_table(thd_arg, column_types,
+ is_union_distinct, options,
+ "", bit_fields_as_long,
+ create_table, keep_row_order))
+ return true;
+
+ if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
+ (ORDER*) 0, false, 1,
+ options, HA_POS_ERROR, "",
+ true, keep_row_order)))
+ return true;
+
+ incr_table->keys_in_use_for_query.clear_all();
+ for (uint i=0; i < table->s->fields; i++)
+ incr_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
+
+ TABLE *rec_table= 0;
+ if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
+ (ORDER*) 0, false, 1,
+ options, HA_POS_ERROR, alias,
+ true, keep_row_order)))
+ return true;
+
+ rec_table->keys_in_use_for_query.clear_all();
+ for (uint i=0; i < table->s->fields; i++)
+ rec_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
+
+ if (rec_tables.push_back(rec_table))
+ return true;
+
+ return false;
+}
+
/**
- Reset and empty the temporary table that stores the materialized query result.
+ Reset and empty the temporary table that stores the materialized query
+ result.
@note The cleanup performed here is exactly the same as for the two temp
tables of JOIN - exec_tmp_table_[1 | 2].
@@ -183,11 +251,47 @@ void select_union::cleanup()
{
table->file->extra(HA_EXTRA_RESET_STATE);
table->file->ha_delete_all_rows();
- free_io_cache(table);
- filesort_free_buffers(table,0);
}
+void select_union_recursive::cleanup()
+{
+ if (table)
+ {
+ select_union::cleanup();
+ free_tmp_table(thd, table);
+ }
+
+ if (incr_table)
+ {
+ if (incr_table->is_created())
+ {
+ incr_table->file->extra(HA_EXTRA_RESET_STATE);
+ incr_table->file->ha_delete_all_rows();
+ }
+ free_tmp_table(thd, incr_table);
+ }
+
+ List_iterator<TABLE> it(rec_tables);
+ TABLE *tab;
+ while ((tab= it++))
+ {
+ if (tab->is_created())
+ {
+ tab->file->extra(HA_EXTRA_RESET_STATE);
+ tab->file->ha_delete_all_rows();
+ }
+ /*
+ The table will be closed later in close_thread_tables(),
+ because it might be used in the statements like
+ ANALYZE WITH r AS (...) SELECT * from r
+ where r is defined through recursion.
+ */
+ tab->next= thd->rec_tables;
+ thd->rec_tables= tab;
+ }
+}
+
/**
Replace the current result with new_result and prepare it.
@@ -327,25 +431,47 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg,
order=order->next)
{
(*order->item)->walk(&Item::change_context_processor, 0,
- (uchar*) &fake_select_lex->context);
+ &fake_select_lex->context);
(*order->item)->walk(&Item::set_fake_select_as_master_processor, 0,
- (uchar*) fake_select_lex);
+ fake_select_lex);
}
}
+
+
bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
ulong additional_options)
{
SELECT_LEX *lex_select_save= thd_arg->lex->current_select;
SELECT_LEX *sl, *first_sl= first_select();
+ bool is_recursive= with_element && with_element->is_recursive;
+ bool is_rec_result_table_created= false;
select_result *tmp_result;
bool is_union_select;
bool instantiate_tmp_table= false;
DBUG_ENTER("st_select_lex_unit::prepare");
- DBUG_ASSERT(thd == thd_arg && thd == current_thd);
+ DBUG_ASSERT(thd == thd_arg);
+ DBUG_ASSERT(thd == current_thd);
+
+ if (is_recursive && (sl= first_sl->next_select()))
+ {
+ SELECT_LEX *next_sl;
+ for ( ; ; sl= next_sl)
+ {
+ next_sl= sl->next_select();
+ if (!next_sl)
+ break;
+ if (next_sl->with_all_modifier != sl->with_all_modifier)
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "mix of ALL and DISTINCT UNION operations in recursive CTE spec");
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
- describe= MY_TEST(additional_options & SELECT_DESCRIBE);
+ describe= additional_options & SELECT_DESCRIBE;
/*
Save fake_select_lex in case we don't need it for anything but
@@ -390,7 +516,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
/* Global option */
- if (is_union_select)
+ if (is_union_select || is_recursive)
{
if (is_union() && !union_needs_tmp_table())
{
@@ -406,8 +532,27 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
}
else
{
- if (!(tmp_result= union_result=
- new (thd_arg->mem_root) select_union(thd_arg)))
+ if (!is_recursive)
+ union_result= new (thd_arg->mem_root) select_union(thd_arg);
+ else
+ {
+ with_element->rec_result=
+ new (thd_arg->mem_root) select_union_recursive(thd_arg);
+ union_result= with_element->rec_result;
+ if (fake_select_lex)
+ {
+ if (fake_select_lex->order_list.first ||
+ fake_select_lex->explicit_limit)
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "global ORDER_BY/LIMIT in recursive CTE spec");
+ goto err;
+ }
+ fake_select_lex->cleanup();
+ fake_select_lex= NULL;
+ }
+ }
+ if (!(tmp_result= union_result))
goto err; /* purecov: inspected */
instantiate_tmp_table= true;
}
@@ -416,9 +561,9 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
tmp_result= sel_result;
sl->context.resolve_in_select_list= TRUE;
-
+
for (;sl; sl= sl->next_select())
- {
+ {
bool can_skip_order_by;
sl->options|= SELECT_NO_UNLOCK;
JOIN *join= new JOIN(thd_arg, sl->item_list,
@@ -438,8 +583,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
can_skip_order_by= is_union_select && !(sl->braces && sl->explicit_limit);
- saved_error= join->prepare(&sl->ref_pointer_array,
- sl->table_list.first,
+ saved_error= join->prepare(sl->table_list.first,
sl->with_wild,
sl->where,
(can_skip_order_by ? 0 :
@@ -476,10 +620,17 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
Use items list of underlaid select for derived tables to preserve
information about fields lengths and exact types
*/
- if (!is_union_select)
+ if (!is_union_select && !is_recursive)
types= first_sl->item_list;
else if (sl == first_sl)
{
+ if (with_element)
+ {
+ if (with_element->rename_columns_of_derived_unit(thd, this))
+ goto err;
+ if (check_duplicate_names(thd, sl->item_list, 0))
+ goto err;
+ }
types.empty();
List_iterator_fast<Item> it(sl->item_list);
Item *item_tmp;
@@ -514,15 +665,45 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
ER_THD(thd, ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0));
goto err;
}
- List_iterator_fast<Item> it(sl->item_list);
- List_iterator_fast<Item> tp(types);
- Item *type, *item_tmp;
- while ((type= tp++, item_tmp= it++))
+ if (!is_rec_result_table_created)
{
- if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp))
- DBUG_RETURN(TRUE);
+ List_iterator_fast<Item> it(sl->item_list);
+ List_iterator_fast<Item> tp(types);
+ Item *type, *item_tmp;
+ while ((type= tp++, item_tmp= it++))
+ {
+ if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp))
+ DBUG_RETURN(TRUE);
+ }
}
}
+ if (is_recursive)
+ {
+ if (!with_element->is_anchor(sl))
+ sl->uncacheable|= UNCACHEABLE_UNITED;
+ if(!is_rec_result_table_created &&
+ (!sl->next_select() ||
+ sl->next_select() == with_element->first_recursive))
+ {
+ ulonglong create_options;
+ create_options= (first_sl->options | thd_arg->variables.option_bits |
+ TMP_TABLE_ALL_COLUMNS);
+ if (union_result->create_result_table(thd, &types,
+ MY_TEST(union_distinct),
+ create_options, derived->alias,
+ false,
+ instantiate_tmp_table, false))
+ goto err;
+ if (!derived->table)
+ {
+ derived->table= with_element->rec_result->rec_tables.head();
+ if (derived->derived_result)
+ derived->derived_result->table= derived->table;
+ }
+ with_element->mark_as_with_prepared_anchor();
+ is_rec_result_table_created= true;
+ }
+ }
}
/*
@@ -548,7 +729,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
while ((type= tp++))
{
- if (type->result_type() == STRING_RESULT &&
+ if (type->cmp_type() == STRING_RESULT &&
type->collation.derivation == DERIVATION_NONE)
{
my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), "UNION");
@@ -576,8 +757,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
ORDER *ord;
Item_func::Functype ft= Item_func::FT_FUNC;
for (ord= global_parameters()->order_list.first; ord; ord= ord->next)
- if ((*ord->item)->walk (&Item::find_function_processor, FALSE,
- (uchar *) &ft))
+ if ((*ord->item)->walk (&Item::find_function_processor, FALSE, &ft))
{
my_error (ER_CANT_USE_OPTION_HERE, MYF(0), "MATCH()");
goto err;
@@ -596,9 +776,11 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
if (global_parameters()->ftfunc_list->elements)
create_options= create_options | TMP_TABLE_FORCE_MYISAM;
- if (union_result->create_result_table(thd, &types, MY_TEST(union_distinct),
- create_options, "", false,
- instantiate_tmp_table))
+
+ if (!is_recursive &&
+ union_result->create_result_table(thd, &types, MY_TEST(union_distinct),
+ create_options, "", false,
+ instantiate_tmp_table, false))
goto err;
if (fake_select_lex && !fake_select_lex->first_cond_optimization)
{
@@ -683,13 +865,10 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
DBUG_RETURN(TRUE);
}
saved_error= fake_select_lex->join->
- prepare(&fake_select_lex->ref_pointer_array,
- fake_select_lex->table_list.first,
- 0, 0,
- global_parameters()->order_list.elements, // og_num
- global_parameters()->order_list.first, // order
- false, NULL, NULL, NULL,
- fake_select_lex, this);
+ prepare(fake_select_lex->table_list.first, 0, 0,
+ global_parameters()->order_list.elements, // og_num
+ global_parameters()->order_list.first, // order
+ false, NULL, NULL, NULL, fake_select_lex, this);
fake_select_lex->table_list.empty();
}
}
@@ -720,6 +899,10 @@ bool st_select_lex_unit::optimize()
if (optimized && !uncacheable && !describe)
DBUG_RETURN(FALSE);
+ if (with_element && with_element->is_recursive && optimize_started)
+ DBUG_RETURN(FALSE);
+ optimize_started= true;
+
if (uncacheable || !item || !item->assigned() || describe)
{
if (item)
@@ -730,7 +913,7 @@ bool st_select_lex_unit::optimize()
{
item->assigned(0); // We will reinit & rexecute unit
item->reset();
- if (table->created)
+ if (table->is_created())
{
table->file->ha_delete_all_rows();
table->file->info(HA_STATUS_VARIABLE);
@@ -801,7 +984,8 @@ bool st_select_lex_unit::exec()
if (executed && !uncacheable && !describe)
DBUG_RETURN(FALSE);
executed= 1;
- if (!(uncacheable & ~UNCACHEABLE_EXPLAIN) && item)
+ if (!(uncacheable & ~UNCACHEABLE_EXPLAIN) && item &&
+ !item->with_recursive_reference)
item->make_const();
saved_error= optimize();
@@ -816,7 +1000,7 @@ bool st_select_lex_unit::exec()
if (uncacheable || !item || !item->assigned() || describe)
{
- if (!fake_select_lex)
+ if (!fake_select_lex && !(with_element && with_element->is_recursive))
union_result->cleanup();
for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select())
{
@@ -857,7 +1041,7 @@ bool st_select_lex_unit::exec()
{
records_at_start= table->file->stats.records;
sl->join->exec();
- if (sl == union_distinct)
+ if (sl == union_distinct && !(with_element && with_element->is_recursive))
{
// This is UNION DISTINCT, so there should be a fake_select_lex
DBUG_ASSERT(fake_select_lex != NULL);
@@ -974,13 +1158,13 @@ bool st_select_lex_unit::exec()
Don't add more sum_items if we have already done JOIN::prepare
for this (with a different join object)
*/
- if (!fake_select_lex->ref_pointer_array)
+ if (fake_select_lex->ref_pointer_array.is_null())
fake_select_lex->n_child_sum_items+= global_parameters()->n_sum_items;
if (!was_executed)
save_union_explain_part2(thd->lex->explain);
- saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array,
+ saved_error= mysql_select(thd,
&result_table_list,
0, item_list, NULL,
global_parameters()->order_list.elements,
@@ -1003,7 +1187,7 @@ bool st_select_lex_unit::exec()
to reset them back, we re-do all of the actions (yes it is ugly):
*/ // psergey-todo: is the above really necessary anymore??
join->init(thd, item_list, fake_select_lex->options, result);
- saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array,
+ saved_error= mysql_select(thd,
&result_table_list,
0, item_list, NULL,
global_parameters()->order_list.elements,
@@ -1039,6 +1223,125 @@ err:
}
+/**
+ @brief
+ Execute the union of the specification of a recursive with table
+
+ @details
+ The method is performed only for the units that are specifications
+ if recursive with table T. If the specification contains an anchor
+ part then the first call of this method executes only this part
+ while the following calls execute the recursive part. If there are
+ no anchors each call executes the whole unit.
+ Before the excution the method cleans up the temporary table
+ to where the new rows of the recursive table are sent.
+ After the execution the unit these rows are copied to the
+ temporary tables created for recursive references of T.
+ If the specification if T is restricted (standards compliant)
+ then these temporary tables are cleaned up before new rows
+ are copied into them.
+
+ @retval
+ false on success
+ true on failure
+*/
+
+bool st_select_lex_unit::exec_recursive()
+{
+ st_select_lex *lex_select_save= thd->lex->current_select;
+ st_select_lex *start= with_element->first_recursive;
+ TABLE *incr_table= with_element->rec_result->incr_table;
+ st_select_lex *end= NULL;
+ bool is_unrestricted= with_element->is_unrestricted();
+ List_iterator_fast<TABLE> li(with_element->rec_result->rec_tables);
+ TMP_TABLE_PARAM *tmp_table_param= &with_element->rec_result->tmp_table_param;
+ ha_rows examined_rows= 0;
+ bool was_executed= executed;
+ TABLE *rec_table;
+
+ DBUG_ENTER("st_select_lex_unit::exec_recursive");
+
+ executed= 1;
+ create_explain_query_if_not_exists(thd->lex, thd->mem_root);
+ if (!was_executed)
+ save_union_explain(thd->lex->explain);
+
+ if (with_element->level == 0)
+ {
+ if (!incr_table->is_created() &&
+ instantiate_tmp_table(incr_table,
+ tmp_table_param->keyinfo,
+ tmp_table_param->start_recinfo,
+ &tmp_table_param->recinfo,
+ 0))
+ DBUG_RETURN(1);
+ incr_table->file->extra(HA_EXTRA_WRITE_CACHE);
+ incr_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ start= first_select();
+ if (with_element->with_anchor)
+ end= with_element->first_recursive;
+ }
+ else if ((saved_error= incr_table->file->ha_delete_all_rows()))
+ goto err;
+
+ for (st_select_lex *sl= start ; sl != end; sl= sl->next_select())
+ {
+ thd->lex->current_select= sl;
+ set_limit(sl);
+ sl->join->exec();
+ saved_error= sl->join->error;
+ if (!saved_error)
+ {
+ examined_rows+= thd->get_examined_row_count();
+ thd->set_examined_row_count(0);
+ if (union_result->flush())
+ {
+ thd->lex->current_select= lex_select_save;
+ DBUG_RETURN(1);
+ }
+ }
+ if (saved_error)
+ {
+ thd->lex->current_select= lex_select_save;
+ goto err;
+
+ }
+ }
+
+ thd->inc_examined_row_count(examined_rows);
+
+ incr_table->file->info(HA_STATUS_VARIABLE);
+ if (with_element->level && incr_table->file->stats.records == 0)
+ with_element->set_as_stabilized();
+ else
+ with_element->level++;
+
+ while ((rec_table= li++))
+ {
+ saved_error=
+ incr_table->insert_all_rows_into_tmp_table(thd, rec_table,
+ tmp_table_param,
+ !is_unrestricted);
+ if (!with_element->rec_result->first_rec_table_to_update)
+ with_element->rec_result->first_rec_table_to_update= rec_table;
+ if (with_element->level == 1 && rec_table->reginfo.join_tab)
+ rec_table->reginfo.join_tab->preread_init_done= true;
+ }
+ for (Item_subselect *sq= with_element->sq_with_rec_ref.first;
+ sq;
+ sq= sq->next_with_rec_ref)
+ {
+ sq->reset();
+ sq->engine->force_reexecution();
+ }
+
+ thd->lex->current_select= lex_select_save;
+err:
+ thd->lex->set_limit_rows_examined();
+ DBUG_RETURN(saved_error);
+}
+
+
bool st_select_lex_unit::cleanup()
{
int error= 0;
@@ -1048,35 +1351,44 @@ bool st_select_lex_unit::cleanup()
{
DBUG_RETURN(FALSE);
}
- cleaned= 1;
-
- if (union_result)
+ /*
+ When processing a PS/SP or an EXPLAIN command cleanup of a unit can
+ be performed immediately when the unit is reached in the cleanup
+ traversal initiated by the cleanup of the main unit.
+ */
+ if (!thd->stmt_arena->is_stmt_prepare() && !thd->lex->describe &&
+ with_element && with_element->is_recursive && union_result)
{
- delete union_result;
- union_result=0; // Safety
- if (table)
- free_tmp_table(thd, table);
- table= 0; // Safety
+ select_union_recursive *result= with_element->rec_result;
+ if (++result->cleanup_count == with_element->rec_outer_references)
+ {
+ /*
+ Perform cleanup for with_element and for all with elements
+ mutually recursive with it.
+ */
+ cleaned= 1;
+ with_element->get_next_mutually_recursive()->spec->cleanup();
+ }
+ else
+ {
+ /*
+ Just increment by 1 cleanup_count for with_element and
+ for all with elements mutually recursive with it.
+ */
+ With_element *with_elem= with_element;
+ while ((with_elem= with_elem->get_next_mutually_recursive()) !=
+ with_element)
+ with_elem->rec_result->cleanup_count++;
+ DBUG_RETURN(FALSE);
+ }
}
+ cleaned= 1;
for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
error|= sl->cleanup();
-
+
if (fake_select_lex)
{
- JOIN *join;
- if ((join= fake_select_lex->join))
- {
- join->tables_list= 0;
- join->table_count= 0;
- join->top_join_tab_count= 0;
- if (join->tmp_join && join->tmp_join != join)
- {
- join->tmp_join->tables_list= 0;
- join->tmp_join->table_count= 0;
- join->tmp_join->top_join_tab_count= 0;
- }
- }
error|= fake_select_lex->cleanup();
/*
There are two cases when we should clean order items:
@@ -1098,6 +1410,28 @@ bool st_select_lex_unit::cleanup()
}
}
+ if (with_element && with_element->is_recursive)
+ {
+ if (union_result)
+ {
+ ((select_union_recursive *) union_result)->cleanup();
+ delete union_result;
+ union_result= 0;
+ }
+ with_element->mark_as_cleaned();
+ }
+ else
+ {
+ if (union_result)
+ {
+ delete union_result;
+ union_result=0; // Safety
+ if (table)
+ free_tmp_table(thd, table);
+ table= 0; // Safety
+ }
+ }
+
DBUG_RETURN(error);
}
@@ -1105,6 +1439,9 @@ bool st_select_lex_unit::cleanup()
void st_select_lex_unit::reinit_exec_mechanism()
{
prepared= optimized= executed= 0;
+ optimize_started= 0;
+ if (with_element && with_element->is_recursive)
+ with_element->reset_recursive_for_exec();
}
@@ -1141,7 +1478,9 @@ bool st_select_lex_unit::change_result(select_result_interceptor *new_result,
Get column type information for this unit.
SYNOPSIS
- st_select_lex_unit::get_unit_column_types()
+ st_select_lex_unit::get_column_types()
+ @param for_cursor if true return the list the fields
+ retrieved by the cursor
DESCRIPTION
For a single-select the column types are taken
@@ -1155,7 +1494,7 @@ bool st_select_lex_unit::change_result(select_result_interceptor *new_result,
st_select_lex_unit::prepare()
*/
-List<Item> *st_select_lex_unit::get_unit_column_types()
+List<Item> *st_select_lex_unit::get_column_types(bool for_cursor)
{
SELECT_LEX *sl= first_select();
bool is_procedure= MY_TEST(sl->join->procedure);
@@ -1175,7 +1514,7 @@ List<Item> *st_select_lex_unit::get_unit_column_types()
return &types;
}
- return &sl->item_list;
+ return for_cursor ? sl->join->fields : &sl->item_list;
}
@@ -1208,6 +1547,7 @@ bool st_select_lex::cleanup()
}
inner_refs_list.empty();
exclude_from_table_unique_test= FALSE;
+ hidden_bit_fields= 0;
DBUG_RETURN(error);
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 78aa059f64f..80ecd820046 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -80,7 +80,7 @@ bool compare_record(const TABLE *table)
{
if (field->real_maybe_null())
{
- uchar null_byte_index= field->null_ptr - table->record[0];
+ uchar null_byte_index= (uchar)(field->null_ptr - table->record[0]);
if (((table->record[0][null_byte_index]) & field->null_bit) !=
((table->record[1][null_byte_index]) & field->null_bit))
@@ -95,8 +95,8 @@ bool compare_record(const TABLE *table)
/*
The storage engine has read all columns, so it's safe to compare all bits
- including those not in the write_set. This is cheaper than the field-by-field
- comparison done above.
+ including those not in the write_set. This is cheaper than the
+ field-by-field comparison done above.
*/
if (table->s->can_cmp_whole_record)
return cmp_record(table,record[1]);
@@ -191,7 +191,7 @@ static void prepare_record_for_error_message(int error, TABLE *table)
/* Create unique_map with all fields used by that index. */
my_bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
- table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
+ table->mark_columns_used_by_index(keynr, &unique_map);
/* Subtract read_set and write_set. */
bitmap_subtract(&unique_map, table->read_set);
@@ -254,7 +254,7 @@ int mysql_update(THD *thd,
ha_rows *found_return, ha_rows *updated_return)
{
bool using_limit= limit != HA_POS_ERROR;
- bool safe_update= MY_TEST(thd->variables.option_bits & OPTION_SAFE_UPDATES);
+ bool safe_update= thd->variables.option_bits & OPTION_SAFE_UPDATES;
bool used_key_is_modified= FALSE, transactional_table, will_batch;
bool can_compare_record;
int res;
@@ -270,6 +270,7 @@ int mysql_update(THD *thd,
key_map old_covering_keys;
TABLE *table;
SQL_SELECT *select= NULL;
+ SORT_INFO *file_sort= 0;
READ_RECORD info;
SELECT_LEX *select_lex= &thd->lex->select_lex;
ulonglong id;
@@ -342,7 +343,8 @@ int mysql_update(THD *thd,
if (table_list->is_view())
unfix_fields(fields);
- if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
+ if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
+ fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
if (table_list->view && check_fields(thd, fields))
{
@@ -353,15 +355,13 @@ int mysql_update(THD *thd,
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
DBUG_RETURN(1);
}
- if (table->default_field)
- table->mark_default_fields_for_write();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Check values */
table_list->grant.want_privilege= table->grant.want_privilege=
(SELECT_ACL & ~table->grant.privilege);
#endif
- if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, NULL, 0))
+ if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, NULL, 0))
{
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(1); /* purecov: inspected */
@@ -373,7 +373,7 @@ int mysql_update(THD *thd,
switch_to_nullable_trigger_fields(fields, table);
switch_to_nullable_trigger_fields(values, table);
- /* Apply the IN=>EXISTS transformation to all subqueries and optimize them. */
+ /* Apply the IN=>EXISTS transformation to all subqueries and optimize them */
if (select_lex->optimize_unflattened_subqueries(false))
DBUG_RETURN(TRUE);
@@ -394,14 +394,6 @@ int mysql_update(THD *thd,
}
}
- /*
- If a timestamp field settable on UPDATE is present then to avoid wrong
- update force the table handler to retrieve write-only fields to be able
- to compare records and detect data change.
- */
- if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) &&
- table->default_field && table->has_default_function(true))
- bitmap_union(table->read_set, table->write_set);
// Don't count on usage of 'only index' when calculating which key to use
table->covering_keys.clear_all();
@@ -422,7 +414,7 @@ int mysql_update(THD *thd,
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
set_statistics_for_table(thd, table);
- select= make_select(table, 0, 0, conds, 0, &error);
+ select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
if (error || !limit || thd->is_error() ||
(select && select->check_quick(thd, safe_update, limit)))
{
@@ -544,16 +536,9 @@ int mysql_update(THD *thd,
/*
We can't update table directly; We must first search after all
matching rows before updating the table!
- */
- MY_BITMAP *save_read_set= table->read_set;
- MY_BITMAP *save_write_set= table->write_set;
- if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
- table->add_read_columns_used_by_index(query_plan.index);
- else
- table->use_all_columns();
-
- /* note: We avoid sorting if we sort on the used index */
+ note: We avoid sorting if we sort on the used index
+ */
if (query_plan.using_filesort)
{
/*
@@ -561,28 +546,15 @@ int mysql_update(THD *thd,
to update
NOTE: filesort will call table->prepare_for_position()
*/
- uint length= 0;
- SORT_FIELD *sortorder;
- ha_rows examined_rows;
- ha_rows found_rows;
-
- table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL |
- MY_THREAD_SPECIFIC));
+ Filesort fsort(order, limit, true, select);
+
Filesort_tracker *fs_tracker=
thd->lex->explain->get_upd_del_plan()->filesort_tracker;
- if (!(sortorder=make_unireg_sortorder(thd, NULL, 0, order, &length, NULL)) ||
- (table->sort.found_records= filesort(thd, table, sortorder, length,
- select, limit,
- true,
- &examined_rows, &found_rows,
- fs_tracker))
- == HA_POS_ERROR)
- {
+ if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
goto err;
- }
- thd->inc_examined_row_count(examined_rows);
+ thd->inc_examined_row_count(file_sort->examined_rows);
+
/*
Filesort has already found and selected the rows we want to update,
so we don't need the where clause
@@ -592,6 +564,14 @@ int mysql_update(THD *thd,
}
else
{
+ MY_BITMAP *save_read_set= table->read_set;
+ MY_BITMAP *save_write_set= table->write_set;
+
+ if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
+ table->prepare_for_keyread(query_plan.index);
+ else
+ table->use_all_columns();
+
/*
We are doing a search on a key that is updated. In this case
we go trough the matching rows, save a pointer to them and
@@ -624,7 +604,7 @@ int mysql_update(THD *thd,
*/
if (query_plan.index == MAX_KEY || (select && select->quick))
- error= init_read_record(&info, thd, table, select, 0, 1, FALSE);
+ error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
else
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
reverse);
@@ -641,8 +621,6 @@ int mysql_update(THD *thd,
while (!(error=info.read_record(&info)) && !thd->killed)
{
explain->buf_tracker.on_record_read();
- if (table->vfield)
- update_virtual_fields(thd, table, VCOL_UPDATE_FOR_READ);
thd->inc_examined_row_count(1);
if (!select || (error= select->skip_record(thd)) > 0)
{
@@ -666,8 +644,9 @@ int mysql_update(THD *thd,
else
{
/*
- Don't try unlocking the row if skip_record reported an error since in
- this case the transaction might have been rolled back already.
+ Don't try unlocking the row if skip_record reported an
+ error since in this case the transaction might have been
+ rolled back already.
*/
if (error < 0)
{
@@ -703,13 +682,12 @@ int mysql_update(THD *thd,
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
error=1; /* purecov: inspected */
select->file=tempfile; // Read row ptrs from this file
- // select->file was copied, update self-references.
- setup_io_cache(&select->file);
if (error >= 0)
goto err;
+
+ table->file->ha_end_keyread();
+ table->column_bitmaps_set(save_read_set, save_write_set);
}
- table->disable_keyread();
- table->column_bitmaps_set(save_read_set, save_write_set);
}
if (ignore)
@@ -718,7 +696,7 @@ int mysql_update(THD *thd,
if (select && select->quick && select->quick->reset())
goto err;
table->file->try_semi_consistent_read(1);
- if (init_read_record(&info, thd, table, select, 0, 1, FALSE))
+ if (init_read_record(&info, thd, table, select, file_sort, 0, 1, FALSE))
goto err;
updated= found= 0;
@@ -759,8 +737,6 @@ int mysql_update(THD *thd,
while (!(error=info.read_record(&info)) && !thd->killed)
{
explain->tracker.on_record_read();
- if (table->vfield)
- update_virtual_fields(thd, table, VCOL_UPDATE_FOR_READ);
thd->inc_examined_row_count(1);
if (!select || select->skip_record(thd) > 0)
{
@@ -777,7 +753,7 @@ int mysql_update(THD *thd,
if (!can_compare_record || compare_record(table))
{
- if (table->default_field && table->update_default_fields())
+ if (table->default_field && table->update_default_fields(1, ignore))
{
error= 1;
break;
@@ -1024,6 +1000,7 @@ int mysql_update(THD *thd,
}
DBUG_ASSERT(transactional_table || !updated || thd->transaction.stmt.modified_non_trans_table);
free_underlaid_joins(thd, select_lex);
+ delete file_sort;
/* If LAST_INSERT_ID(X) was used, report X */
id= thd->arg_of_last_insert_id_function ?
@@ -1057,8 +1034,9 @@ int mysql_update(THD *thd,
err:
delete select;
+ delete file_sort;
free_underlaid_joins(thd, select_lex);
- table->disable_keyread();
+ table->file->ha_end_keyread();
thd->abort_on_warning= 0;
DBUG_RETURN(1);
@@ -1246,10 +1224,8 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
{
// Partitioned key is updated
my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
- tl->belong_to_view ? tl->belong_to_view->alias
- : tl->alias,
- tl2->belong_to_view ? tl2->belong_to_view->alias
- : tl2->alias);
+ tl->top_table()->alias,
+ tl2->top_table()->alias);
return true;
}
@@ -1267,10 +1243,8 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
{
// Clustered primary key is updated
my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
- tl->belong_to_view ? tl->belong_to_view->alias
- : tl->alias,
- tl2->belong_to_view ? tl2->belong_to_view->alias
- : tl2->alias);
+ tl->top_table()->alias,
+ tl2->top_table()->alias);
return true;
}
}
@@ -1432,7 +1406,8 @@ int mysql_multi_update_prepare(THD *thd)
if (lex->select_lex.handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
- if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
+ if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
+ *fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(TRUE);
for (tl= table_list; tl ; tl= tl->next_local)
@@ -1470,11 +1445,13 @@ int mysql_multi_update_prepare(THD *thd)
{
if (!tl->single_table_updatable() || check_key_in_view(thd, tl))
{
- my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
+ my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
+ tl->top_table()->alias, "UPDATE");
DBUG_RETURN(TRUE);
}
- DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
+ DBUG_PRINT("info",("setting table `%s` for update",
+ tl->top_table()->alias));
/*
If table will be updated we should not downgrade lock for it and
leave it as is.
@@ -1620,7 +1597,7 @@ bool mysql_multi_update(THD *thd,
thd->abort_on_warning= !ignore && thd->is_strict_mode();
List<Item> total_list;
- res= mysql_select(thd, &select_lex->ref_pointer_array,
+ res= mysql_select(thd,
table_list, select_lex->with_wild,
total_list,
conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
@@ -1716,7 +1693,8 @@ int multi_update::prepare(List<Item> &not_used_values,
reference tables
*/
- int error= setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, NULL, 0);
+ int error= setup_fields(thd, Ref_ptr_array(),
+ *values, MARK_COLUMNS_READ, 0, NULL, 0);
ti.rewind();
while ((table_ref= ti++))
@@ -1729,17 +1707,8 @@ int multi_update::prepare(List<Item> &not_used_values,
{
table->read_set= &table->def_read_set;
bitmap_union(table->read_set, &table->tmp_set);
- /*
- If a timestamp field settable on UPDATE is present then to avoid wrong
- update force the table handler to retrieve write-only fields to be able
- to compare records and detect data change.
- */
- if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) &&
- table->default_field && table->has_default_function(true))
- bitmap_union(table->read_set, table->write_set);
}
}
-
if (error)
DBUG_RETURN(1);
@@ -1832,6 +1801,21 @@ void multi_update::update_used_tables()
}
}
+void multi_update::prepare_to_read_rows()
+{
+ /*
+ update column maps now. it cannot be done in ::prepare() before the
+ optimizer, because the optimize might reset them (in
+ SELECT_LEX::update_used_tables()), it cannot be done in
+ ::initialize_tables() after the optimizer, because the optimizer
+ might read rows from const tables
+ */
+
+ for (TABLE_LIST *tl= update_tables; tl; tl= tl->next_local)
+ tl->table->mark_columns_needed_for_update();
+}
+
+
/*
Check if table is safe to update on fly
@@ -1948,12 +1932,10 @@ multi_update::initialize_tables(JOIN *join)
{
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
{
- table->mark_columns_needed_for_update();
table_to_update= table; // Update table on the fly
continue;
}
}
- table->mark_columns_needed_for_update();
table->prepare_for_position();
/*
@@ -2043,7 +2025,7 @@ loop_end:
/* Make an unique key over the first field to avoid duplicated updates */
bzero((char*) &group, sizeof(group));
- group.asc= 1;
+ group.direction= ORDER::ORDER_ASC;
group.item= (Item**) temp_fields.head_ref();
tmp_param->quick_group=1;
@@ -2130,11 +2112,11 @@ int multi_update::send_data(List<Item> &not_used_values)
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
- if (fill_record_n_invoke_before_triggers(thd, table, *fields_for_table[offset],
+ if (fill_record_n_invoke_before_triggers(thd, table,
+ *fields_for_table[offset],
*values_for_table[offset], 0,
TRG_EVENT_UPDATE))
DBUG_RETURN(1);
-
/*
Reset the table->auto_increment_field_not_null as it is valid for
only one row.
@@ -2145,7 +2127,7 @@ int multi_update::send_data(List<Item> &not_used_values)
{
int error;
- if (table->default_field && table->update_default_fields())
+ if (table->default_field && table->update_default_fields(1, ignore))
DBUG_RETURN(1);
if ((error= cur_table->view_check_option(thd, ignore)) !=
@@ -2329,6 +2311,26 @@ int multi_update::do_updates()
do_update= 0; // Don't retry this function
if (!found)
DBUG_RETURN(0);
+
+ /*
+ Update read_set to include all fields that virtual columns may depend on.
+ Usually they're already in the read_set, but if the previous access
+ method was keyread, only the virtual column itself will be in read_set,
+ not its dependencies
+ */
+ while(TABLE *tbl= check_opt_it++)
+ {
+ if (tbl->vcol_set)
+ {
+ bitmap_clear_all(tbl->vcol_set);
+ for (Field **vf= tbl->vfield; *vf; vf++)
+ {
+ if (bitmap_is_set(tbl->read_set, (*vf)->field_index))
+ tbl->mark_virtual_col(*vf);
+ }
+ }
+ }
+
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
bool can_compare_record;
@@ -2346,6 +2348,17 @@ int multi_update::do_updates()
goto err;
}
table->file->extra(HA_EXTRA_NO_CACHE);
+ /*
+ We have to clear the base record, if we have virtual indexed
+ blob fields, as some storage engines will access the blob fields
+ to calculate the keys to see if they have changed. Without
+ clearing the blob pointers will contain random values which can
+ cause a crash.
+ This is a workaround for engines that access columns not present in
+ either read or write set.
+ */
+ if (table->vfield)
+ empty_record(table);
check_opt_it.rewind();
while(TABLE *tbl= check_opt_it++)
@@ -2415,6 +2428,11 @@ int multi_update::do_updates()
field_num++;
} while ((tbl= check_opt_it++));
+ if (table->vfield &&
+ table->update_virtual_fields(table->file,
+ VCOL_UPDATE_INDEXED_FOR_UPDATE))
+ goto err2;
+
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
@@ -2435,10 +2453,11 @@ int multi_update::do_updates()
if (!can_compare_record || compare_record(table))
{
int error;
- if (table->default_field && (error= table->update_default_fields()))
+ if (table->default_field &&
+ (error= table->update_default_fields(1, ignore)))
goto err2;
if (table->vfield &&
- update_virtual_fields(thd, table, VCOL_UPDATE_FOR_WRITE))
+ table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
goto err2;
if ((error= cur_table->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 4d7c3de9337..0cff47437c8 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2004, 2013, Oracle and/or its affiliates.
- Copyright (c) 2011, 2015, MariaDB
+ Copyright (c) 2011, 2016, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -35,6 +35,7 @@
#include "sp_cache.h"
#include "datadict.h" // dd_frm_is_view()
#include "sql_derived.h"
+#include "sql_cte.h" // check_dependencies_in_with_clauses()
#define MD5_BUFF_LENGTH 33
@@ -58,7 +59,7 @@ static int mysql_register_view(THD *, TABLE_LIST *, enum_view_create_mode);
NAME_LEN, it is truncated.
*/
-static void make_unique_view_field_name(Item *target,
+static void make_unique_view_field_name(THD *thd, Item *target,
List<Item> &item_list,
Item *last_element)
{
@@ -96,7 +97,7 @@ static void make_unique_view_field_name(Item *target,
}
target->orig_name= target->name;
- target->set_name(buff, name_len, system_charset_info);
+ target->set_name(thd, buff, name_len, system_charset_info);
}
@@ -123,7 +124,7 @@ static void make_unique_view_field_name(Item *target,
isn't allowed
*/
-bool check_duplicate_names(List<Item> &item_list, bool gen_unique_view_name)
+bool check_duplicate_names(THD *thd, List<Item> &item_list, bool gen_unique_view_name)
{
Item *item;
List_iterator_fast<Item> it(item_list);
@@ -144,9 +145,9 @@ bool check_duplicate_names(List<Item> &item_list, bool gen_unique_view_name)
if (!gen_unique_view_name)
goto err;
if (item->is_autogenerated_name)
- make_unique_view_field_name(item, item_list, item);
+ make_unique_view_field_name(thd, item, item_list, item);
else if (check->is_autogenerated_name)
- make_unique_view_field_name(check, item_list, item);
+ make_unique_view_field_name(thd, check, item_list, item);
else
goto err;
}
@@ -167,7 +168,7 @@ err:
@param item_list List of Items which should be checked
*/
-static void make_valid_column_names(List<Item> &item_list)
+void make_valid_column_names(THD *thd, List<Item> &item_list)
{
Item *item;
uint name_len;
@@ -181,7 +182,7 @@ static void make_valid_column_names(List<Item> &item_list)
continue;
name_len= my_snprintf(buff, NAME_LEN, "Name_exp_%u", column_no);
item->orig_name= item->name;
- item->set_name(buff, name_len, system_charset_info);
+ item->set_name(thd, buff, name_len, system_charset_info);
}
DBUG_VOID_RETURN;
@@ -216,7 +217,7 @@ fill_defined_view_parts (THD *thd, TABLE_LIST *view)
decoy= *view;
decoy.mdl_request.key.mdl_key_init(&view->mdl_request.key);
- if (tdc_open_view(thd, &decoy, decoy.alias, OPEN_VIEW_NO_PARSE))
+ if (tdc_open_view(thd, &decoy, OPEN_VIEW_NO_PARSE))
return TRUE;
if (!lex->definer)
@@ -428,6 +429,13 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
lex->link_first_table_back(view, link_to_local);
view->open_type= OT_BASE_ONLY;
+
+ if (check_dependencies_in_with_clauses(lex->with_clauses_list))
+ {
+ res= TRUE;
+ goto err;
+ }
+
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
/*
@@ -439,7 +447,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
view->mdl_request.set_type(MDL_EXCLUSIVE);
}
- if (open_temporary_tables(thd, lex->query_tables) ||
+ if (thd->open_temporary_tables(lex->query_tables) ||
open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
{
view= lex->unlink_first_table(&link_to_local);
@@ -549,16 +557,16 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
}
while ((item= it++, name= nm++))
{
- item->set_name(name->str, (uint) name->length, system_charset_info);
+ item->set_name(thd, name->str, (uint) name->length, system_charset_info);
item->is_autogenerated_name= FALSE;
}
}
/* Check if the auto generated column names are conforming. */
for (sl= select_lex; sl; sl= sl->next_select())
- make_valid_column_names(sl->item_list);
+ make_valid_column_names(thd, sl->item_list);
- if (check_duplicate_names(select_lex->item_list, 1))
+ if (check_duplicate_names(thd, select_lex->item_list, 1))
{
res= TRUE;
goto err;
@@ -635,7 +643,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (!res && mysql_bin_log.is_open())
{
- String buff;
+ StringBuffer<128> buff(thd->variables.character_set_client);
+ DBUG_ASSERT(buff.charset()->mbminlen == 1);
const LEX_STRING command[3]=
{{ C_STRING_WITH_LEN("CREATE ") },
{ C_STRING_WITH_LEN("ALTER ") },
@@ -905,7 +914,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
view_query.length(0);
is_query.length(0);
{
- ulong sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES;
+ sql_mode_t sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES;
thd->variables.sql_mode&= ~MODE_ANSI_QUOTES;
lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL |
@@ -1142,7 +1151,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
bool result, view_is_mergeable;
TABLE_LIST *UNINIT_VAR(view_main_select_tables);
DBUG_ENTER("mysql_make_view");
- DBUG_PRINT("info", ("table: 0x%lx (%s)", (ulong) table, table->table_name));
+ DBUG_PRINT("info", ("table: %p (%s)", table, table->table_name));
if (table->required_type == FRMTYPE_TABLE)
{
@@ -1337,7 +1346,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
view_select= &lex->select_lex;
view_select->select_number= ++thd->lex->stmt_lex->current_select_number;
- ulonglong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
/* switch off modes which can prevent normal parsing of VIEW
- MODE_REAL_AS_FLOAT affect only CREATE TABLE parsing
+ MODE_PIPES_AS_CONCAT affect expression parsing
@@ -1387,6 +1396,9 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
TABLE_LIST *tbl;
Security_context *security_ctx= 0;
+ if (check_dependencies_in_with_clauses(thd->lex->with_clauses_list))
+ goto err;
+
/*
Check rights to run commands (ANALYZE SELECT, EXPLAIN SELECT &
SHOW CREATE) which show underlying tables.
@@ -1615,6 +1627,8 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
sl->context.error_processor_data= (void *)table;
}
+ view_select->master_unit()->is_view= true;
+
/*
check MERGE algorithm ability
- algorithm is not explicit TEMPORARY TABLE
diff --git a/sql/sql_view.h b/sql/sql_view.h
index ce83dc656ad..b9eb92198f8 100644
--- a/sql/sql_view.h
+++ b/sql/sql_view.h
@@ -51,12 +51,17 @@ int view_repair(THD *thd, TABLE_LIST *view, HA_CHECK_OPT *check_opt);
extern TYPELIB updatable_views_with_limit_typelib;
-bool check_duplicate_names(List<Item>& item_list, bool gen_unique_view_names);
+bool check_duplicate_names(THD *thd, List<Item>& item_list,
+ bool gen_unique_view_names);
bool mysql_rename_view(THD *thd, const char *new_db, const char *new_name,
TABLE_LIST *view);
+void make_valid_column_names(THD *thd, List<Item> &item_list);
+
#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL)
extern const LEX_STRING view_type;
+void make_valid_column_names(List<Item> &item_list);
+
#endif /* SQL_VIEW_INCLUDED */
diff --git a/sql/sql_window.cc b/sql/sql_window.cc
new file mode 100644
index 00000000000..310cf5bfd91
--- /dev/null
+++ b/sql/sql_window.cc
@@ -0,0 +1,3069 @@
+#include "sql_select.h"
+#include "sql_list.h"
+#include "item_windowfunc.h"
+#include "filesort.h"
+#include "sql_base.h"
+#include "sql_window.h"
+#include "my_dbug.h"
+
+
+bool
+Window_spec::check_window_names(List_iterator_fast<Window_spec> &it)
+{
+ if (window_names_are_checked)
+ return false;
+ char *name= this->name();
+ char *ref_name= window_reference();
+ it.rewind();
+ Window_spec *win_spec;
+ while((win_spec= it++) && win_spec != this)
+ {
+ char *win_spec_name= win_spec->name();
+ if (!win_spec_name)
+ break;
+ if (name && my_strcasecmp(system_charset_info, name, win_spec_name) == 0)
+ {
+ my_error(ER_DUP_WINDOW_NAME, MYF(0), name);
+ return true;
+ }
+ if (ref_name &&
+ my_strcasecmp(system_charset_info, ref_name, win_spec_name) == 0)
+ {
+ if (partition_list->elements)
+ {
+ my_error(ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC, MYF(0),
+ ref_name);
+ return true;
+ }
+ if (win_spec->order_list->elements && order_list->elements)
+ {
+ my_error(ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC, MYF(0), ref_name);
+ return true;
+ }
+ if (win_spec->window_frame)
+ {
+ my_error(ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC, MYF(0), ref_name);
+ return true;
+ }
+ referenced_win_spec= win_spec;
+ if (partition_list->elements == 0)
+ partition_list= win_spec->partition_list;
+ if (order_list->elements == 0)
+ order_list= win_spec->order_list;
+ }
+ }
+ if (ref_name && !referenced_win_spec)
+ {
+ my_error(ER_WRONG_WINDOW_SPEC_NAME, MYF(0), ref_name);
+ return true;
+ }
+ window_names_are_checked= true;
+ return false;
+}
+
+void
+Window_spec::print(String *str, enum_query_type query_type)
+{
+ str->append('(');
+ if (partition_list->first)
+ {
+ str->append(STRING_WITH_LEN(" partition by "));
+ st_select_lex::print_order(str, partition_list->first, query_type);
+ }
+ if (order_list->first)
+ {
+ str->append(STRING_WITH_LEN(" order by "));
+ st_select_lex::print_order(str, order_list->first, query_type);
+ }
+ if (window_frame)
+ window_frame->print(str, query_type);
+ str->append(')');
+}
+
+bool
+Window_frame::check_frame_bounds()
+{
+ if ((top_bound->is_unbounded() &&
+ top_bound->precedence_type == Window_frame_bound::FOLLOWING) ||
+ (bottom_bound->is_unbounded() &&
+ bottom_bound->precedence_type == Window_frame_bound::PRECEDING) ||
+ (top_bound->precedence_type == Window_frame_bound::CURRENT &&
+ bottom_bound->precedence_type == Window_frame_bound::PRECEDING) ||
+ (bottom_bound->precedence_type == Window_frame_bound::CURRENT &&
+ top_bound->precedence_type == Window_frame_bound::FOLLOWING))
+ {
+ my_error(ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS, MYF(0));
+ return true;
+ }
+
+ return false;
+}
+
+
+void
+Window_frame::print(String *str, enum_query_type query_type)
+{
+ switch (units) {
+ case UNITS_ROWS:
+ str->append(STRING_WITH_LEN(" rows "));
+ break;
+ case UNITS_RANGE:
+ str->append(STRING_WITH_LEN(" range "));
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ str->append(STRING_WITH_LEN("between "));
+ top_bound->print(str, query_type);
+ str->append(STRING_WITH_LEN(" and "));
+ bottom_bound->print(str, query_type);
+
+ if (exclusion != EXCL_NONE)
+ {
+ str->append(STRING_WITH_LEN(" exclude "));
+ switch (exclusion) {
+ case EXCL_CURRENT_ROW:
+ str->append(STRING_WITH_LEN(" current row "));
+ break;
+ case EXCL_GROUP:
+ str->append(STRING_WITH_LEN(" group "));
+ break;
+ case EXCL_TIES:
+ str->append(STRING_WITH_LEN(" ties "));
+ break;
+ default:
+ DBUG_ASSERT(0);
+ ;
+ }
+ }
+}
+
+
+void
+Window_frame_bound::print(String *str, enum_query_type query_type)
+{
+ if (precedence_type == CURRENT)
+ {
+ str->append(STRING_WITH_LEN(" current row "));
+ return;
+ }
+ if (is_unbounded())
+ str->append(STRING_WITH_LEN(" unbounded "));
+ else
+ offset->print(str ,query_type);
+ switch (precedence_type) {
+ case PRECEDING:
+ str->append(STRING_WITH_LEN(" preceding "));
+ break;
+ case FOLLOWING:
+ str->append(STRING_WITH_LEN(" following "));
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+}
+
+/*
+ Setup window functions in a select
+*/
+
+int
+setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
+ List<Item> &fields, List<Item> &all_fields,
+ List<Window_spec> &win_specs, List<Item_window_func> &win_funcs)
+{
+ Window_spec *win_spec;
+ DBUG_ENTER("setup_windows");
+ List_iterator<Window_spec> it(win_specs);
+
+ /*
+ Move all unnamed specifications after the named ones.
+ We could have avoided it if we had built two separate lists for
+ named and unnamed specifications.
+ */
+ Query_arena *arena, backup;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+ uint i = 0;
+ uint elems= win_specs.elements;
+ while ((win_spec= it++) && i++ < elems)
+ {
+ if (win_spec->name() == NULL)
+ {
+ it.remove();
+ win_specs.push_back(win_spec);
+ }
+ }
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+
+ it.rewind();
+
+ List_iterator_fast<Window_spec> itp(win_specs);
+
+ while ((win_spec= it++))
+ {
+ bool hidden_group_fields;
+ if (win_spec->check_window_names(itp) ||
+ setup_group(thd, ref_pointer_array, tables, fields, all_fields,
+ win_spec->partition_list->first, &hidden_group_fields,
+ true) ||
+ setup_order(thd, ref_pointer_array, tables, fields, all_fields,
+ win_spec->order_list->first, true) ||
+ (win_spec->window_frame &&
+ win_spec->window_frame->check_frame_bounds()))
+ {
+ DBUG_RETURN(1);
+ }
+
+ if (win_spec->window_frame &&
+ win_spec->window_frame->exclusion != Window_frame::EXCL_NONE)
+ {
+ my_error(ER_FRAME_EXCLUSION_NOT_SUPPORTED, MYF(0));
+ DBUG_RETURN(1);
+ }
+ /*
+ For "win_func() OVER (ORDER BY order_list RANGE BETWEEN ...)",
+ - ORDER BY order_list must not be ommitted
+ - the list must have a single element.
+ */
+ if (win_spec->window_frame &&
+ win_spec->window_frame->units == Window_frame::UNITS_RANGE)
+ {
+ if (win_spec->order_list->elements != 1)
+ {
+ my_error(ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ /*
+ "The declared type of SK shall be numeric, datetime, or interval"
+ we don't support datetime or interval, yet.
+ */
+ Item_result rtype= win_spec->order_list->first->item[0]->result_type();
+ if (rtype != REAL_RESULT && rtype != INT_RESULT &&
+ rtype != DECIMAL_RESULT)
+ {
+ my_error(ER_WRONG_TYPE_FOR_RANGE_FRAME, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ /*
+ "The declared type of UVS shall be numeric if the declared type of SK
+ is numeric; otherwise, it shall be an interval type that may be added
+ to or subtracted from the declared type of SK"
+ */
+ Window_frame_bound *bounds[]= {win_spec->window_frame->top_bound,
+ win_spec->window_frame->bottom_bound,
+ NULL};
+ for (Window_frame_bound **pbound= &bounds[0]; *pbound; pbound++)
+ {
+ if (!(*pbound)->is_unbounded() &&
+ ((*pbound)->precedence_type == Window_frame_bound::FOLLOWING ||
+ (*pbound)->precedence_type == Window_frame_bound::PRECEDING))
+ {
+ Item_result rtype= (*pbound)->offset->result_type();
+ if (rtype != REAL_RESULT && rtype != INT_RESULT &&
+ rtype != DECIMAL_RESULT)
+ {
+ my_error(ER_WRONG_TYPE_FOR_RANGE_FRAME, MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ }
+
+ /* "ROWS PRECEDING|FOLLOWING $n" must have a numeric $n */
+ if (win_spec->window_frame &&
+ win_spec->window_frame->units == Window_frame::UNITS_ROWS)
+ {
+ Window_frame_bound *bounds[]= {win_spec->window_frame->top_bound,
+ win_spec->window_frame->bottom_bound,
+ NULL};
+ for (Window_frame_bound **pbound= &bounds[0]; *pbound; pbound++)
+ {
+ if (!(*pbound)->is_unbounded() &&
+ ((*pbound)->precedence_type == Window_frame_bound::FOLLOWING ||
+ (*pbound)->precedence_type == Window_frame_bound::PRECEDING))
+ {
+ Item *offset= (*pbound)->offset;
+ if (offset->result_type() != INT_RESULT)
+ {
+ my_error(ER_WRONG_TYPE_FOR_ROWS_FRAME, MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Sorting window functions to minimize the number of table scans
+// performed during the computation of these functions
+/////////////////////////////////////////////////////////////////////////////
+
+#define CMP_LT -2 // Less than
+#define CMP_LT_C -1 // Less than and compatible
+#define CMP_EQ 0 // Equal to
+#define CMP_GT_C 1 // Greater than and compatible
+#define CMP_GT 2 // Greater then
+
+static
+int compare_order_elements(ORDER *ord1, ORDER *ord2)
+{
+ if (*ord1->item == *ord2->item && ord1->direction == ord2->direction)
+ return CMP_EQ;
+ Item *item1= (*ord1->item)->real_item();
+ Item *item2= (*ord2->item)->real_item();
+ DBUG_ASSERT(item1->type() == Item::FIELD_ITEM &&
+ item2->type() == Item::FIELD_ITEM);
+ ptrdiff_t cmp= ((Item_field *) item1)->field - ((Item_field *) item2)->field;
+ if (cmp == 0)
+ {
+ if (ord1->direction == ord2->direction)
+ return CMP_EQ;
+ return ord1->direction > ord2->direction ? CMP_GT : CMP_LT;
+ }
+ else
+ return cmp > 0 ? CMP_GT : CMP_LT;
+}
+
+static
+int compare_order_lists(SQL_I_List<ORDER> *part_list1,
+ SQL_I_List<ORDER> *part_list2)
+{
+ if (part_list1 == part_list2)
+ return CMP_EQ;
+ ORDER *elem1= part_list1->first;
+ ORDER *elem2= part_list2->first;
+ for ( ; elem1 && elem2; elem1= elem1->next, elem2= elem2->next)
+ {
+ int cmp;
+ // remove all constants as we don't need them for comparision
+ while(elem1 && ((*elem1->item)->real_item())->const_item())
+ {
+ elem1= elem1->next;
+ continue;
+ }
+
+ while(elem2 && ((*elem2->item)->real_item())->const_item())
+ {
+ elem2= elem2->next;
+ continue;
+ }
+
+ if (!elem1 || !elem2)
+ break;
+
+ if ((cmp= compare_order_elements(elem1, elem2)))
+ return cmp;
+ }
+ if (elem1)
+ return CMP_GT_C;
+ if (elem2)
+ return CMP_LT_C;
+ return CMP_EQ;
+}
+
+
+static
+int compare_window_frame_bounds(Window_frame_bound *win_frame_bound1,
+ Window_frame_bound *win_frame_bound2,
+ bool is_bottom_bound)
+{
+ int res;
+ if (win_frame_bound1->precedence_type != win_frame_bound2->precedence_type)
+ {
+ res= win_frame_bound1->precedence_type > win_frame_bound2->precedence_type ?
+ CMP_GT : CMP_LT;
+ if (is_bottom_bound)
+ res= -res;
+ return res;
+ }
+
+ if (win_frame_bound1->is_unbounded() && win_frame_bound2->is_unbounded())
+ return CMP_EQ;
+
+ if (!win_frame_bound1->is_unbounded() && !win_frame_bound2->is_unbounded())
+ {
+ if (win_frame_bound1->offset->eq(win_frame_bound2->offset, true))
+ return CMP_EQ;
+ else
+ {
+ res= strcmp(win_frame_bound1->offset->name,
+ win_frame_bound2->offset->name);
+ res= res > 0 ? CMP_GT : CMP_LT;
+ if (is_bottom_bound)
+ res= -res;
+ return res;
+ }
+ }
+
+ /*
+ Here we have:
+ win_frame_bound1->is_unbounded() != win_frame_bound1->is_unbounded()
+ */
+ return is_bottom_bound != win_frame_bound1->is_unbounded() ? CMP_LT : CMP_GT;
+}
+
+
+static
+int compare_window_frames(Window_frame *win_frame1,
+ Window_frame *win_frame2)
+{
+ int cmp;
+
+ if (win_frame1 == win_frame2)
+ return CMP_EQ;
+
+ if (!win_frame1)
+ return CMP_LT;
+
+ if (!win_frame2)
+ return CMP_GT;
+
+ if (win_frame1->units != win_frame2->units)
+ return win_frame1->units > win_frame2->units ? CMP_GT : CMP_LT;
+
+ cmp= compare_window_frame_bounds(win_frame1->top_bound,
+ win_frame2->top_bound,
+ false);
+ if (cmp)
+ return cmp;
+
+ cmp= compare_window_frame_bounds(win_frame1->bottom_bound,
+ win_frame2->bottom_bound,
+ true);
+ if (cmp)
+ return cmp;
+
+ if (win_frame1->exclusion != win_frame2->exclusion)
+ return win_frame1->exclusion > win_frame2->exclusion ? CMP_GT_C : CMP_LT_C;
+
+ return CMP_EQ;
+}
+
+static
+int compare_window_spec_joined_lists(Window_spec *win_spec1,
+ Window_spec *win_spec2)
+{
+ win_spec1->join_partition_and_order_lists();
+ win_spec2->join_partition_and_order_lists();
+ int cmp= compare_order_lists(win_spec1->partition_list,
+ win_spec2->partition_list);
+ win_spec1->disjoin_partition_and_order_lists();
+ win_spec2->disjoin_partition_and_order_lists();
+ return cmp;
+}
+
+
+static
+int compare_window_funcs_by_window_specs(Item_window_func *win_func1,
+ Item_window_func *win_func2,
+ void *arg)
+{
+ int cmp;
+ Window_spec *win_spec1= win_func1->window_spec;
+ Window_spec *win_spec2= win_func2->window_spec;
+ if (win_spec1 == win_spec2)
+ return CMP_EQ;
+ cmp= compare_order_lists(win_spec1->partition_list,
+ win_spec2->partition_list);
+ if (cmp == CMP_EQ)
+ {
+ /*
+ Partition lists contain the same elements.
+ Let's use only one of the lists.
+ */
+ if (!win_spec1->name() && win_spec2->name())
+ win_spec1->partition_list= win_spec2->partition_list;
+ else
+ win_spec2->partition_list= win_spec1->partition_list;
+
+ cmp= compare_order_lists(win_spec1->order_list,
+ win_spec2->order_list);
+
+ if (cmp != CMP_EQ)
+ return cmp;
+
+ /*
+ Order lists contain the same elements.
+ Let's use only one of the lists.
+ */
+ if (!win_spec1->name() && win_spec2->name())
+ win_spec1->order_list= win_spec2->order_list;
+ else
+ win_spec2->order_list= win_spec1->order_list;
+
+ cmp= compare_window_frames(win_spec1->window_frame,
+ win_spec2->window_frame);
+
+ if (cmp != CMP_EQ)
+ return cmp;
+
+ /* Window frames are equal. Let's use only one of them. */
+ if (!win_spec1->name() && win_spec2->name())
+ win_spec1->window_frame= win_spec2->window_frame;
+ else
+ win_spec2->window_frame= win_spec1->window_frame;
+
+ return CMP_EQ;
+ }
+
+ if (cmp == CMP_GT || cmp == CMP_LT)
+ return cmp;
+
+ /* one of the partitions lists is the proper beginning of the another */
+ cmp= compare_window_spec_joined_lists(win_spec1, win_spec2);
+
+ if (CMP_LT_C <= cmp && cmp <= CMP_GT_C)
+ cmp= win_spec1->partition_list->elements <
+ win_spec2->partition_list->elements ? CMP_GT_C : CMP_LT_C;
+
+ return cmp;
+}
+
+
+#define SORTORDER_CHANGE_FLAG 1
+#define PARTITION_CHANGE_FLAG 2
+#define FRAME_CHANGE_FLAG 4
+
+typedef int (*Item_window_func_cmp)(Item_window_func *f1,
+ Item_window_func *f2,
+ void *arg);
+/*
+ @brief
+ Sort window functions so that those that can be computed together are
+ adjacent.
+
+ @detail
+ Sort window functions by their
+ - required sorting order,
+ - partition list,
+ - window frame compatibility.
+
+ The changes between the groups are marked by setting item_window_func->marker.
+*/
+
+static
+void order_window_funcs_by_window_specs(List<Item_window_func> *win_func_list)
+{
+ if (win_func_list->elements == 0)
+ return;
+
+ bubble_sort<Item_window_func>(win_func_list,
+ compare_window_funcs_by_window_specs,
+ NULL);
+
+ List_iterator_fast<Item_window_func> it(*win_func_list);
+ Item_window_func *prev= it++;
+ prev->marker= SORTORDER_CHANGE_FLAG |
+ PARTITION_CHANGE_FLAG |
+ FRAME_CHANGE_FLAG;
+ Item_window_func *curr;
+ while ((curr= it++))
+ {
+ Window_spec *win_spec_prev= prev->window_spec;
+ Window_spec *win_spec_curr= curr->window_spec;
+ curr->marker= 0;
+ if (!(win_spec_prev->partition_list == win_spec_curr->partition_list &&
+ win_spec_prev->order_list == win_spec_curr->order_list))
+ {
+ int cmp;
+ if (win_spec_prev->partition_list == win_spec_curr->partition_list)
+ cmp= compare_order_lists(win_spec_prev->order_list,
+ win_spec_curr->order_list);
+ else
+ cmp= compare_window_spec_joined_lists(win_spec_prev, win_spec_curr);
+ if (!(CMP_LT_C <= cmp && cmp <= CMP_GT_C))
+ {
+ curr->marker= SORTORDER_CHANGE_FLAG |
+ PARTITION_CHANGE_FLAG |
+ FRAME_CHANGE_FLAG;
+ }
+ else if (win_spec_prev->partition_list != win_spec_curr->partition_list)
+ {
+ curr->marker|= PARTITION_CHANGE_FLAG | FRAME_CHANGE_FLAG;
+ }
+ }
+ else if (win_spec_prev->window_frame != win_spec_curr->window_frame)
+ curr->marker|= FRAME_CHANGE_FLAG;
+
+ prev= curr;
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Window Frames support
+/////////////////////////////////////////////////////////////////////////////
+
+// note: make rr_from_pointers static again when not need it here anymore
+int rr_from_pointers(READ_RECORD *info);
+
+
+/////////////////////////////////////////////////////////////////////////////
+
+
+/*
+ A cursor over a sequence of rowids. One can
+ - Move to next rowid
+ - jump to given number in the sequence
+ - Know the number of the current rowid (i.e. how many rowids have been read)
+*/
+
+class Rowid_seq_cursor
+{
+public:
+ Rowid_seq_cursor() : io_cache(NULL), ref_buffer(0) {}
+ virtual ~Rowid_seq_cursor()
+ {
+ if (ref_buffer)
+ my_free(ref_buffer);
+ if (io_cache)
+ {
+ end_slave_io_cache(io_cache);
+ my_free(io_cache);
+ io_cache= NULL;
+ }
+ }
+
+private:
+ /* Length of one rowid element */
+ size_t ref_length;
+
+ /* If io_cache=!NULL, use it */
+ IO_CACHE *io_cache;
+ uchar *ref_buffer; /* Buffer for the last returned rowid */
+ ha_rows rownum; /* Number of the rowid that is about to be returned */
+ ha_rows current_ref_buffer_rownum;
+ bool ref_buffer_valid;
+
+ /* The following are used when we are reading from an array of pointers */
+ uchar *cache_start;
+ uchar *cache_pos;
+ uchar *cache_end;
+public:
+
+ void init(READ_RECORD *info)
+ {
+ ref_length= info->ref_length;
+ if (info->read_record == rr_from_pointers)
+ {
+ io_cache= NULL;
+ cache_start= info->cache_pos;
+ cache_pos= info->cache_pos;
+ cache_end= info->cache_end;
+ }
+ else
+ {
+ //DBUG_ASSERT(info->read_record == rr_from_tempfile);
+ rownum= 0;
+ io_cache= (IO_CACHE*)my_malloc(sizeof(IO_CACHE), MYF(0));
+ init_slave_io_cache(info->io_cache, io_cache);
+
+ ref_buffer= (uchar*)my_malloc(ref_length, MYF(0));
+ ref_buffer_valid= false;
+ }
+ }
+
+ virtual int next()
+ {
+ /* Allow multiple next() calls in EOF state. */
+ if (at_eof())
+ return -1;
+
+ if (io_cache)
+ {
+ rownum++;
+ }
+ else
+ {
+ cache_pos+= ref_length;
+ DBUG_ASSERT(cache_pos <= cache_end);
+ }
+ return 0;
+ }
+
+ virtual int prev()
+ {
+ if (io_cache)
+ {
+ if (rownum == 0)
+ return -1;
+
+ rownum--;
+ return 0;
+ }
+ else
+ {
+ /* Allow multiple prev() calls when positioned at the start. */
+ if (cache_pos == cache_start)
+ return -1;
+ cache_pos-= ref_length;
+ DBUG_ASSERT(cache_pos >= cache_start);
+ return 0;
+ }
+ }
+
+ ha_rows get_rownum() const
+ {
+ if (io_cache)
+ return rownum;
+ else
+ return (cache_pos - cache_start) / ref_length;
+ }
+
+ void move_to(ha_rows row_number)
+ {
+ if (io_cache)
+ {
+ rownum= row_number;
+ }
+ else
+ {
+ cache_pos= MY_MIN(cache_end, cache_start + row_number * ref_length);
+ DBUG_ASSERT(cache_pos <= cache_end);
+ }
+ }
+
+protected:
+ bool at_eof()
+ {
+ if (io_cache)
+ {
+ return rownum * ref_length >= io_cache->end_of_file;
+ }
+ else
+ return (cache_pos == cache_end);
+ }
+
+ bool get_curr_rowid(uchar **row_id)
+ {
+ if (io_cache)
+ {
+ DBUG_ASSERT(!at_eof());
+ if (!ref_buffer_valid || current_ref_buffer_rownum != rownum)
+ {
+ seek_io_cache(io_cache, rownum * ref_length);
+ if (my_b_read(io_cache,ref_buffer,ref_length))
+ {
+ /* Error reading from file. */
+ return true;
+ }
+ ref_buffer_valid= true;
+ current_ref_buffer_rownum = rownum;
+ }
+ *row_id = ref_buffer;
+ return false;
+ }
+ else
+ {
+ *row_id= cache_pos;
+ return false;
+ }
+ }
+};
+
+
+/*
+ Cursor which reads from rowid sequence and also retrieves table rows.
+*/
+
+class Table_read_cursor : public Rowid_seq_cursor
+{
+public:
+ virtual ~Table_read_cursor() {}
+
+ void init(READ_RECORD *info)
+ {
+ Rowid_seq_cursor::init(info);
+ table= info->table;
+ record= info->record;
+ }
+
+ virtual int fetch()
+ {
+ if (at_eof())
+ return -1;
+
+ uchar* curr_rowid;
+ if (get_curr_rowid(&curr_rowid))
+ return -1;
+ return table->file->ha_rnd_pos(record, curr_rowid);
+ }
+
+private:
+ /* The table that is acccesed by this cursor. */
+ TABLE *table;
+ /* Buffer where to store the table's record data. */
+ uchar *record;
+
+ // TODO(spetrunia): should move_to() also read row here?
+};
+
+
+/*
+ A cursor which only moves within a partition. The scan stops at the partition
+ end, and it needs an explicit command to move to the next partition.
+
+ This cursor can not move backwards.
+*/
+
+class Partition_read_cursor : public Table_read_cursor
+{
+public:
+ Partition_read_cursor(THD *thd, SQL_I_List<ORDER> *partition_list) :
+ bound_tracker(thd, partition_list) {}
+
+ void init(READ_RECORD *info)
+ {
+ Table_read_cursor::init(info);
+ bound_tracker.init();
+ end_of_partition= false;
+ }
+
+ /*
+ Informs the cursor that we need to move into the next partition.
+ The next partition is provided in two ways:
+ - in table->record[0]..
+ - rownum parameter has the row number.
+ */
+ void on_next_partition(ha_rows rownum)
+ {
+ /* Remember the sort key value from the new partition */
+ move_to(rownum);
+ bound_tracker.check_if_next_group();
+ end_of_partition= false;
+
+ }
+
+ /*
+ This returns -1 when end of partition was reached.
+ */
+ int next()
+ {
+ int res;
+ if (end_of_partition)
+ return -1;
+
+ if ((res= Table_read_cursor::next()) ||
+ (res= fetch()))
+ {
+ /* TODO(cvicentiu) This does not consider table read failures.
+ Perhaps assuming end of table like this is fine in that case. */
+
+ /* This row is the final row in the table. To maintain semantics
+ that cursors always point to the last valid row, move back one step,
+ but mark end_of_partition as true. */
+ Table_read_cursor::prev();
+ end_of_partition= true;
+ return res;
+ }
+
+ if (bound_tracker.compare_with_cache())
+ {
+ /* This row is part of a new partition, don't move
+ forward any more untill we get informed of a new partition. */
+ Table_read_cursor::prev();
+ end_of_partition= true;
+ return -1;
+ }
+ return 0;
+ }
+
+private:
+ Group_bound_tracker bound_tracker;
+ bool end_of_partition;
+};
+
+/////////////////////////////////////////////////////////////////////////////
+
+/*
+ Window frame bound cursor. Abstract interface.
+
+ @detail
+ The cursor moves within the partition that the current row is in.
+ It may be ahead or behind the current row.
+
+ The cursor also assumes that the current row moves forward through the
+ partition and will move to the next adjacent partition after this one.
+
+ List of all cursor classes:
+ Frame_cursor
+ Frame_range_n_top
+ Frame_range_n_bottom
+
+ Frame_range_current_row_top
+ Frame_range_current_row_bottom
+
+ Frame_n_rows_preceding
+ Frame_n_rows_following
+
+ Frame_rows_current_row_top = Frame_n_rows_preceding(0)
+ Frame_rows_current_row_bottom
+
+ // These handle both RANGE and ROWS-type bounds
+ Frame_unbounded_preceding
+ Frame_unbounded_following
+
+ // This is not used as a frame bound, it counts rows in the partition:
+ Frame_unbounded_following_set_count : public Frame_unbounded_following
+
+ @todo
+ - if we want to allocate this on the MEM_ROOT we should make sure
+ it is not re-allocated for every subquery execution.
+*/
+
+class Frame_cursor : public Sql_alloc
+{
+public:
+ Frame_cursor() : sum_functions(), perform_no_action(false) {}
+
+ virtual void init(READ_RECORD *info) {};
+
+ bool add_sum_func(Item_sum* item)
+ {
+ return sum_functions.push_back(item);
+ }
+ /*
+ Current row has moved to the next partition and is positioned on the first
+ row there. Position the frame bound accordingly.
+
+ @param first - TRUE means this is the first partition
+ @param item - Put or remove rows from there.
+
+ @detail
+ - if first==false, the caller guarantees that tbl->record[0] points at the
+ first row in the new partition.
+ - if first==true, we are just starting in the first partition and no such
+ guarantee is provided.
+
+ - The callee may move tbl->file and tbl->record[0] to point to some other
+ row.
+ */
+ virtual void pre_next_partition(ha_rows rownum) {};
+ virtual void next_partition(ha_rows rownum)=0;
+
+ /*
+ The current row has moved one row forward.
+ Move this frame bound accordingly, and update the value of aggregate
+ function as necessary.
+ */
+ virtual void pre_next_row() {};
+ virtual void next_row()=0;
+
+ virtual bool is_outside_computation_bounds() const { return false; };
+
+ virtual ~Frame_cursor() {}
+
+ /*
+ Regular frame cursors add or remove values from the sum functions they
+ manage. By calling this method, they will only perform the required
+ movement within the table, but no adding/removing will happen.
+ */
+ void set_no_action()
+ {
+ perform_no_action= true;
+ }
+
+ /* Retrieves the row number that this cursor currently points at. */
+ virtual ha_rows get_curr_rownum() const= 0;
+
+protected:
+ inline void add_value_to_items()
+ {
+ if (perform_no_action)
+ return;
+
+ List_iterator_fast<Item_sum> it(sum_functions);
+ Item_sum *item_sum;
+ while ((item_sum= it++))
+ {
+ item_sum->add();
+ }
+ }
+
+ inline void remove_value_from_items()
+ {
+ if (perform_no_action)
+ return;
+
+ List_iterator_fast<Item_sum> it(sum_functions);
+ Item_sum *item_sum;
+ while ((item_sum= it++))
+ {
+ item_sum->remove();
+ }
+ }
+
+ /* Clear all sum functions handled by this cursor. */
+ void clear_sum_functions()
+ {
+ List_iterator_fast<Item_sum> iter_sum_func(sum_functions);
+ Item_sum *sum_func;
+ while ((sum_func= iter_sum_func++))
+ {
+ sum_func->clear();
+ }
+ }
+
+ /* Sum functions that this cursor handles. */
+ List<Item_sum> sum_functions;
+
+private:
+ bool perform_no_action;
+};
+
+/*
+ A class that owns cursor objects associated with a specific window function.
+*/
+class Cursor_manager
+{
+public:
+ bool add_cursor(Frame_cursor *cursor)
+ {
+ return cursors.push_back(cursor);
+ }
+
+ void initialize_cursors(READ_RECORD *info)
+ {
+ List_iterator_fast<Frame_cursor> iter(cursors);
+ Frame_cursor *fc;
+ while ((fc= iter++))
+ fc->init(info);
+ }
+
+ void notify_cursors_partition_changed(ha_rows rownum)
+ {
+ List_iterator_fast<Frame_cursor> iter(cursors);
+ Frame_cursor *cursor;
+ while ((cursor= iter++))
+ cursor->pre_next_partition(rownum);
+
+ iter.rewind();
+ while ((cursor= iter++))
+ cursor->next_partition(rownum);
+ }
+
+ void notify_cursors_next_row()
+ {
+ List_iterator_fast<Frame_cursor> iter(cursors);
+ Frame_cursor *cursor;
+ while ((cursor= iter++))
+ cursor->pre_next_row();
+
+ iter.rewind();
+ while ((cursor= iter++))
+ cursor->next_row();
+ }
+
+ ~Cursor_manager() { cursors.delete_elements(); }
+
+private:
+ /* List of the cursors that this manager owns. */
+ List<Frame_cursor> cursors;
+};
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+// RANGE-type frames
+//////////////////////////////////////////////////////////////////////////////
+
+/*
+ Frame_range_n_top handles the top end of RANGE-type frame.
+
+ That is, it handles:
+ RANGE BETWEEN n PRECEDING AND ...
+ RANGE BETWEEN n FOLLOWING AND ...
+
+ Top of the frame doesn't need to check for partition end, since bottom will
+ reach it before.
+*/
+
+class Frame_range_n_top : public Frame_cursor
+{
+ Partition_read_cursor cursor;
+
+ Cached_item_item *range_expr;
+
+ Item *n_val;
+ Item *item_add;
+
+ const bool is_preceding;
+
+ bool end_of_partition;
+
+ /*
+ 1 when order_list uses ASC ordering
+ -1 when order_list uses DESC ordering
+ */
+ int order_direction;
+public:
+ Frame_range_n_top(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list,
+ bool is_preceding_arg, Item *n_val_arg) :
+ cursor(thd, partition_list), n_val(n_val_arg), item_add(NULL),
+ is_preceding(is_preceding_arg)
+ {
+ DBUG_ASSERT(order_list->elements == 1);
+ Item *src_expr= order_list->first->item[0];
+ if (order_list->first->direction == ORDER::ORDER_ASC)
+ order_direction= 1;
+ else
+ order_direction= -1;
+
+ range_expr= (Cached_item_item*) new_Cached_item(thd, src_expr, FALSE);
+
+ bool use_minus= is_preceding;
+ if (order_direction == -1)
+ use_minus= !use_minus;
+
+ if (use_minus)
+ item_add= new (thd->mem_root) Item_func_minus(thd, src_expr, n_val);
+ else
+ item_add= new (thd->mem_root) Item_func_plus(thd, src_expr, n_val);
+
+ item_add->fix_fields(thd, &item_add);
+ }
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ // Save the value of FUNC(current_row)
+ range_expr->fetch_value_from(item_add);
+
+ cursor.on_next_partition(rownum);
+ end_of_partition= false;
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ walk_till_non_peer();
+ }
+
+ void pre_next_row()
+ {
+ if (end_of_partition)
+ return;
+ range_expr->fetch_value_from(item_add);
+ }
+
+ void next_row()
+ {
+ if (end_of_partition)
+ return;
+ /*
+ Ok, our cursor is at the first row R where
+ (prev_row + n) >= R
+ We need to check about the current row.
+ */
+ walk_till_non_peer();
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+
+ bool is_outside_computation_bounds() const
+ {
+ if (end_of_partition)
+ return true;
+ return false;
+ }
+
+private:
+ void walk_till_non_peer()
+ {
+ if (cursor.fetch()) // ERROR
+ return;
+ // Current row is not a peer.
+ if (order_direction * range_expr->cmp_read_only() <= 0)
+ return;
+ remove_value_from_items();
+
+ int res;
+ while (!(res= cursor.next()))
+ {
+ /* Note, no need to fetch the value explicitly here. The partition
+ read cursor will fetch it to check if the partition has changed.
+ TODO(cvicentiu) make this piece of information not necessary by
+ reimplementing Partition_read_cursor.
+ */
+ if (order_direction * range_expr->cmp_read_only() <= 0)
+ break;
+ remove_value_from_items();
+ }
+ if (res)
+ end_of_partition= true;
+ }
+
+};
+
+
+/*
+ Frame_range_n_bottom handles bottom end of RANGE-type frame.
+
+ That is, it handles frame bounds in form:
+ RANGE BETWEEN ... AND n PRECEDING
+ RANGE BETWEEN ... AND n FOLLOWING
+
+ Bottom end moves first so it needs to check for partition end
+ (todo: unless it's PRECEDING and in that case it doesnt)
+ (todo: factor out common parts with Frame_range_n_top into
+ a common ancestor)
+*/
+
+class Frame_range_n_bottom: public Frame_cursor
+{
+ Partition_read_cursor cursor;
+
+ Cached_item_item *range_expr;
+
+ Item *n_val;
+ Item *item_add;
+
+ const bool is_preceding;
+
+ bool end_of_partition;
+
+ /*
+ 1 when order_list uses ASC ordering
+ -1 when order_list uses DESC ordering
+ */
+ int order_direction;
+public:
+ Frame_range_n_bottom(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list,
+ bool is_preceding_arg, Item *n_val_arg) :
+ cursor(thd, partition_list), n_val(n_val_arg), item_add(NULL),
+ is_preceding(is_preceding_arg), added_values(false)
+ {
+ DBUG_ASSERT(order_list->elements == 1);
+ Item *src_expr= order_list->first->item[0];
+
+ if (order_list->first->direction == ORDER::ORDER_ASC)
+ order_direction= 1;
+ else
+ order_direction= -1;
+
+ range_expr= (Cached_item_item*) new_Cached_item(thd, src_expr, FALSE);
+
+ bool use_minus= is_preceding;
+ if (order_direction == -1)
+ use_minus= !use_minus;
+
+ if (use_minus)
+ item_add= new (thd->mem_root) Item_func_minus(thd, src_expr, n_val);
+ else
+ item_add= new (thd->mem_root) Item_func_plus(thd, src_expr, n_val);
+
+ item_add->fix_fields(thd, &item_add);
+ }
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ // Save the value of FUNC(current_row)
+ range_expr->fetch_value_from(item_add);
+
+ cursor.on_next_partition(rownum);
+ end_of_partition= false;
+ added_values= false;
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ cursor.move_to(rownum);
+ walk_till_non_peer();
+ }
+
+ void pre_next_row()
+ {
+ if (end_of_partition)
+ return;
+ range_expr->fetch_value_from(item_add);
+ }
+
+ void next_row()
+ {
+ if (end_of_partition)
+ return;
+ /*
+ Ok, our cursor is at the first row R where
+ (prev_row + n) >= R
+ We need to check about the current row.
+ */
+ walk_till_non_peer();
+ }
+
+ bool is_outside_computation_bounds() const
+ {
+ if (!added_values)
+ return true;
+ return false;
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ if (end_of_partition)
+ return cursor.get_rownum(); // Cursor does not pass over partition bound.
+ else
+ return cursor.get_rownum() - 1; // Cursor is placed on first non peer.
+ }
+
+private:
+ bool added_values;
+
+ void walk_till_non_peer()
+ {
+ cursor.fetch();
+ // Current row is not a peer.
+ if (order_direction * range_expr->cmp_read_only() < 0)
+ return;
+
+ add_value_to_items(); // Add current row.
+ added_values= true;
+ int res;
+ while (!(res= cursor.next()))
+ {
+ if (order_direction * range_expr->cmp_read_only() < 0)
+ break;
+ add_value_to_items();
+ }
+ if (res)
+ end_of_partition= true;
+ }
+};
+
+
+/*
+ RANGE BETWEEN ... AND CURRENT ROW, bottom frame bound for CURRENT ROW
+ ...
+ | peer1
+ | peer2 <----- current_row
+ | peer3
+ +-peer4 <----- the cursor points here. peer4 itself is included.
+ nonpeer1
+ nonpeer2
+
+ This bound moves in front of the current_row. It should be a the first row
+ that is still a peer of the current row.
+*/
+
+class Frame_range_current_row_bottom: public Frame_cursor
+{
+ Partition_read_cursor cursor;
+
+ Group_bound_tracker peer_tracker;
+
+ bool dont_move;
+public:
+ Frame_range_current_row_bottom(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list) :
+ cursor(thd, partition_list), peer_tracker(thd, order_list)
+ {
+ }
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ peer_tracker.init();
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ // Save the value of the current_row
+ peer_tracker.check_if_next_group();
+ cursor.on_next_partition(rownum);
+ // Add the current row now because our cursor has already seen it
+ add_value_to_items();
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ walk_till_non_peer();
+ }
+
+ void pre_next_row()
+ {
+ dont_move= !peer_tracker.check_if_next_group();
+ }
+
+ void next_row()
+ {
+ // Check if our cursor is pointing at a peer of the current row.
+ // If not, move forward until that becomes true
+ if (dont_move)
+ {
+ /*
+ Our current is not a peer of the current row.
+ No need to move the bound.
+ */
+ return;
+ }
+ walk_till_non_peer();
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+
+private:
+ void walk_till_non_peer()
+ {
+ /*
+ Walk forward until we've met first row that's not a peer of the current
+ row
+ */
+ while (!cursor.next())
+ {
+ if (peer_tracker.compare_with_cache())
+ {
+ cursor.prev(); // Move to our peer.
+ break;
+ }
+
+ add_value_to_items();
+ }
+ }
+};
+
+
+/*
+ RANGE BETWEEN CURRENT ROW AND .... Top CURRENT ROW, RANGE-type frame bound
+
+ nonpeer1
+ nonpeer2
+ +-peer1 <----- the cursor points here. peer1 itself is included.
+ | peer2
+ | peer3 <----- current_row
+ | peer4
+ ...
+
+ It moves behind the current_row. It is located right after the first peer of
+ the current_row.
+*/
+
+class Frame_range_current_row_top : public Frame_cursor
+{
+ Group_bound_tracker bound_tracker;
+
+ Table_read_cursor cursor;
+ Group_bound_tracker peer_tracker;
+
+ bool move;
+public:
+ Frame_range_current_row_top(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list) :
+ bound_tracker(thd, partition_list), cursor(), peer_tracker(thd, order_list),
+ move(false)
+ {}
+
+ void init(READ_RECORD *info)
+ {
+ bound_tracker.init();
+
+ cursor.init(info);
+ peer_tracker.init();
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ // Fetch the value from the first row
+ peer_tracker.check_if_next_group();
+ cursor.move_to(rownum);
+ }
+
+ void next_partition(ha_rows rownum) {}
+
+ void pre_next_row()
+ {
+ // Check if the new current_row is a peer of the row that our cursor is
+ // pointing to.
+ move= peer_tracker.check_if_next_group();
+ }
+
+ void next_row()
+ {
+ if (move)
+ {
+ /*
+ Our cursor is pointing at the first row that was a peer of the previous
+ current row. Or, it was the first row in the partition.
+ */
+ if (cursor.fetch())
+ return;
+
+ // todo: need the following check ?
+ if (!peer_tracker.compare_with_cache())
+ return;
+ remove_value_from_items();
+
+ do
+ {
+ if (cursor.next() || cursor.fetch())
+ return;
+ if (!peer_tracker.compare_with_cache())
+ return;
+ remove_value_from_items();
+ }
+ while (1);
+ }
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+};
+
+
+/////////////////////////////////////////////////////////////////////////////
+// UNBOUNDED frame bounds (shared between RANGE and ROWS)
+/////////////////////////////////////////////////////////////////////////////
+
+/*
+ UNBOUNDED PRECEDING frame bound
+*/
+class Frame_unbounded_preceding : public Frame_cursor
+{
+public:
+ Frame_unbounded_preceding(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list)
+ {}
+
+ void init(READ_RECORD *info) {}
+
+ void next_partition(ha_rows rownum)
+ {
+ /*
+ UNBOUNDED PRECEDING frame end just stays on the first row of the
+ partition. We are top of the frame, so we don't need to update the sum
+ function.
+ */
+ curr_rownum= rownum;
+ }
+
+ void next_row()
+ {
+ /* Do nothing, UNBOUNDED PRECEDING frame end doesn't move. */
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return curr_rownum;
+ }
+
+private:
+ ha_rows curr_rownum;
+};
+
+
+/*
+ UNBOUNDED FOLLOWING frame bound
+*/
+
+class Frame_unbounded_following : public Frame_cursor
+{
+protected:
+ Partition_read_cursor cursor;
+
+public:
+ Frame_unbounded_following(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list) :
+ cursor(thd, partition_list) {}
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ cursor.on_next_partition(rownum);
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ /* Activate the first row */
+ cursor.fetch();
+ add_value_to_items();
+
+ /* Walk to the end of the partition, updating the SUM function */
+ while (!cursor.next())
+ {
+ add_value_to_items();
+ }
+ }
+
+ void next_row()
+ {
+ /* Do nothing, UNBOUNDED FOLLOWING frame end doesn't move */
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+};
+
+
+class Frame_unbounded_following_set_count : public Frame_unbounded_following
+{
+public:
+ Frame_unbounded_following_set_count(
+ THD *thd,
+ SQL_I_List<ORDER> *partition_list, SQL_I_List<ORDER> *order_list) :
+ Frame_unbounded_following(thd, partition_list, order_list) {}
+
+ void next_partition(ha_rows rownum)
+ {
+ ha_rows num_rows_in_partition= 0;
+ if (cursor.fetch())
+ return;
+ num_rows_in_partition++;
+
+ /* Walk to the end of the partition, find how many rows there are. */
+ while (!cursor.next())
+ num_rows_in_partition++;
+
+ List_iterator_fast<Item_sum> it(sum_functions);
+ Item_sum* item;
+ while ((item= it++))
+ {
+ Item_sum_window_with_row_count* item_with_row_count =
+ static_cast<Item_sum_window_with_row_count *>(item);
+ item_with_row_count->set_row_count(num_rows_in_partition);
+ }
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+};
+
+/////////////////////////////////////////////////////////////////////////////
+// ROWS-type frame bounds
+/////////////////////////////////////////////////////////////////////////////
+/*
+ ROWS $n PRECEDING frame bound
+
+*/
+class Frame_n_rows_preceding : public Frame_cursor
+{
+ /* Whether this is top of the frame or bottom */
+ const bool is_top_bound;
+ const ha_rows n_rows;
+
+ /* Number of rows that we need to skip before our cursor starts moving */
+ ha_rows n_rows_behind;
+
+ Table_read_cursor cursor;
+public:
+ Frame_n_rows_preceding(bool is_top_bound_arg, ha_rows n_rows_arg) :
+ is_top_bound(is_top_bound_arg), n_rows(n_rows_arg), n_rows_behind(0)
+ {}
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ /*
+ Position our cursor to point at the first row in the new partition
+ (for rownum=0, it is already there, otherwise, it lags behind)
+ */
+ cursor.move_to(rownum);
+ /* Cursor is in the same spot as current row. */
+ n_rows_behind= 0;
+
+ /*
+ Suppose the bound is ROWS 2 PRECEDING, and current row is row#n:
+ ...
+ n-3
+ n-2 --- bound row
+ n-1
+ n --- current_row
+ ...
+ The bound should point at row #(n-2). Bounds are inclusive, so
+ - bottom bound should add row #(n-2) into the window function
+ - top bound should remove row (#n-3) from the window function.
+ */
+ move_cursor_if_possible();
+
+ }
+
+ void next_row()
+ {
+ n_rows_behind++;
+ move_cursor_if_possible();
+ }
+
+ bool is_outside_computation_bounds() const
+ {
+ /* As a bottom boundary, rows have not yet been added. */
+ if (!is_top_bound && n_rows - n_rows_behind)
+ return true;
+ return false;
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+
+private:
+ void move_cursor_if_possible()
+ {
+ longlong rows_difference= n_rows - n_rows_behind;
+ if (rows_difference > 0) /* We still have to wait. */
+ return;
+
+ /* The cursor points to the first row in the frame. */
+ if (rows_difference == 0)
+ {
+ if (!is_top_bound)
+ {
+ cursor.fetch();
+ add_value_to_items();
+ }
+ /* For top bound we don't have to remove anything as nothing was added. */
+ return;
+ }
+
+ /* We need to catch up by one row. */
+ DBUG_ASSERT(rows_difference == -1);
+
+ if (is_top_bound)
+ {
+ cursor.fetch();
+ remove_value_from_items();
+ cursor.next();
+ }
+ else
+ {
+ cursor.next();
+ cursor.fetch();
+ add_value_to_items();
+ }
+ /* We've advanced one row. We are no longer behind. */
+ n_rows_behind--;
+ }
+};
+
+
+/*
+ ROWS ... CURRENT ROW, Bottom bound.
+
+ This case is moved to separate class because here we don't need to maintain
+ our own cursor, or check for partition bound.
+*/
+
+class Frame_rows_current_row_bottom : public Frame_cursor
+{
+public:
+
+ Frame_rows_current_row_bottom() : curr_rownum(0) {}
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ add_value_to_items();
+ curr_rownum= rownum;
+ }
+
+ void next_partition(ha_rows rownum) {}
+
+ void pre_next_row()
+ {
+ /* Temp table's current row is current_row. Add it to the window func */
+ add_value_to_items();
+ }
+
+ void next_row()
+ {
+ curr_rownum++;
+ };
+
+ ha_rows get_curr_rownum() const
+ {
+ return curr_rownum;
+ }
+
+private:
+ ha_rows curr_rownum;
+};
+
+
+/*
+ ROWS-type CURRENT ROW, top bound.
+
+ This serves for processing "ROWS BETWEEN CURRENT ROW AND ..." frames.
+
+ n-1
+ n --+ --- current_row, and top frame bound
+ n+1 |
+ ... |
+
+ when the current_row moves to row #n, this frame bound should remove the
+ row #(n-1) from the window function.
+
+ In other words, we need what "ROWS PRECEDING 0" provides.
+*/
+class Frame_rows_current_row_top: public Frame_n_rows_preceding
+
+{
+public:
+ Frame_rows_current_row_top() :
+ Frame_n_rows_preceding(true /*top*/, 0 /* n_rows */)
+ {}
+};
+
+
+/*
+ ROWS $n FOLLOWING frame bound.
+*/
+
+class Frame_n_rows_following : public Frame_cursor
+{
+ /* Whether this is top of the frame or bottom */
+ const bool is_top_bound;
+ const ha_rows n_rows;
+
+ Partition_read_cursor cursor;
+ bool at_partition_end;
+public:
+ Frame_n_rows_following(THD *thd,
+ SQL_I_List<ORDER> *partition_list,
+ SQL_I_List<ORDER> *order_list,
+ bool is_top_bound_arg, ha_rows n_rows_arg) :
+ is_top_bound(is_top_bound_arg), n_rows(n_rows_arg),
+ cursor(thd, partition_list)
+ {
+ }
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ at_partition_end= false;
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ at_partition_end= false;
+
+ cursor.on_next_partition(rownum);
+ }
+
+ /* Move our cursor to be n_rows ahead. */
+ void next_partition(ha_rows rownum)
+ {
+ if (is_top_bound)
+ next_part_top(rownum);
+ else
+ next_part_bottom(rownum);
+ }
+
+ void next_row()
+ {
+ if (is_top_bound)
+ next_row_top();
+ else
+ next_row_bottom();
+ }
+
+ bool is_outside_computation_bounds() const
+ {
+ /*
+ The top bound can go over the current partition. In this case,
+ the sum function has 0 values added to it.
+ */
+ if (at_partition_end && is_top_bound)
+ return true;
+ return false;
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return cursor.get_rownum();
+ }
+
+private:
+ void next_part_top(ha_rows rownum)
+ {
+ for (ha_rows i= 0; i < n_rows; i++)
+ {
+ if (cursor.fetch())
+ break;
+ remove_value_from_items();
+ if (cursor.next())
+ at_partition_end= true;
+ }
+ }
+
+ void next_part_bottom(ha_rows rownum)
+ {
+ if (cursor.fetch())
+ return;
+ add_value_to_items();
+
+ for (ha_rows i= 0; i < n_rows; i++)
+ {
+ if (cursor.next())
+ {
+ at_partition_end= true;
+ break;
+ }
+ add_value_to_items();
+ }
+ return;
+ }
+
+ void next_row_top()
+ {
+ if (cursor.fetch()) // PART END OR FAILURE
+ {
+ at_partition_end= true;
+ return;
+ }
+ remove_value_from_items();
+ if (cursor.next())
+ {
+ at_partition_end= true;
+ return;
+ }
+ }
+
+ void next_row_bottom()
+ {
+ if (at_partition_end)
+ return;
+
+ if (cursor.next())
+ {
+ at_partition_end= true;
+ return;
+ }
+
+ add_value_to_items();
+
+ }
+};
+
+/*
+ A cursor that performs a table scan between two indices. The indices
+ are provided by the two cursors representing the top and bottom bound
+ of the window function's frame definition.
+
+ Each scan clears the sum function.
+
+ NOTE:
+ The cursor does not alter the top and bottom cursors.
+ This type of cursor is expensive computational wise. This is only to be
+ used when the sum functions do not support removal.
+*/
+class Frame_scan_cursor : public Frame_cursor
+{
+public:
+ Frame_scan_cursor(const Frame_cursor &top_bound,
+ const Frame_cursor &bottom_bound) :
+ top_bound(top_bound), bottom_bound(bottom_bound) {}
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ /* TODO(cvicentiu) Sum functions get cleared on next partition anyway during
+ the window function computation algorithm. Either perform this only in
+ cursors, or remove it from pre_next_partition.
+ */
+ curr_rownum= rownum;
+ clear_sum_functions();
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ compute_values_for_current_row();
+ }
+
+ void pre_next_row()
+ {
+ clear_sum_functions();
+ }
+
+ void next_row()
+ {
+ curr_rownum++;
+ compute_values_for_current_row();
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return curr_rownum;
+ }
+
+private:
+ const Frame_cursor &top_bound;
+ const Frame_cursor &bottom_bound;
+ Table_read_cursor cursor;
+ ha_rows curr_rownum;
+
+ /* Scan the rows between the top bound and bottom bound. Add all the values
+ between them, top bound row and bottom bound row inclusive. */
+ void compute_values_for_current_row()
+ {
+ if (top_bound.is_outside_computation_bounds() ||
+ bottom_bound.is_outside_computation_bounds())
+ return;
+
+ ha_rows start_rownum= top_bound.get_curr_rownum();
+ ha_rows bottom_rownum= bottom_bound.get_curr_rownum();
+ DBUG_PRINT("info", ("COMPUTING (%llu %llu)", start_rownum, bottom_rownum));
+
+ cursor.move_to(start_rownum);
+
+ for (ha_rows idx= start_rownum; idx <= bottom_rownum; idx++)
+ {
+ if (cursor.fetch()) //EOF
+ break;
+ add_value_to_items();
+ if (cursor.next()) // EOF
+ break;
+ }
+ }
+};
+
+/* A cursor that follows a target cursor. Each time a new row is added,
+ the window functions are cleared and only have the row at which the target
+ is point at added to them.
+
+ The window functions are cleared if the bounds or the position cursors are
+ outside computational bounds.
+*/
+class Frame_positional_cursor : public Frame_cursor
+{
+ public:
+ Frame_positional_cursor(const Frame_cursor &position_cursor) :
+ position_cursor(position_cursor), top_bound(NULL),
+ bottom_bound(NULL), offset(NULL), overflowed(false),
+ negative_offset(false) {}
+
+ Frame_positional_cursor(const Frame_cursor &position_cursor,
+ const Frame_cursor &top_bound,
+ const Frame_cursor &bottom_bound,
+ Item &offset,
+ bool negative_offset) :
+ position_cursor(position_cursor), top_bound(&top_bound),
+ bottom_bound(&bottom_bound), offset(&offset),
+ negative_offset(negative_offset) {}
+
+ void init(READ_RECORD *info)
+ {
+ cursor.init(info);
+ }
+
+ void pre_next_partition(ha_rows rownum)
+ {
+ /* The offset is dependant on the current row values. We can only get
+ * it here accurately. When fetching other rows, it changes. */
+ save_offset_value();
+ }
+
+ void next_partition(ha_rows rownum)
+ {
+ save_positional_value();
+ }
+
+ void pre_next_row()
+ {
+ /* The offset is dependant on the current row values. We can only get
+ * it here accurately. When fetching other rows, it changes. */
+ save_offset_value();
+ }
+
+ void next_row()
+ {
+ save_positional_value();
+ }
+
+ ha_rows get_curr_rownum() const
+ {
+ return position_cursor.get_curr_rownum();
+ }
+
+private:
+ /* Check if a our position is within bounds.
+ * The position is passed as a parameter to avoid recalculating it. */
+ bool position_is_within_bounds()
+ {
+ if (!offset)
+ return !position_cursor.is_outside_computation_bounds();
+
+ if (overflowed)
+ return false;
+
+ /* No valid bound to compare to. */
+ if (position_cursor.is_outside_computation_bounds() ||
+ top_bound->is_outside_computation_bounds() ||
+ bottom_bound->is_outside_computation_bounds())
+ return false;
+
+ /* We are over the bound. */
+ if (position < top_bound->get_curr_rownum())
+ return false;
+ if (position > bottom_bound->get_curr_rownum())
+ return false;
+
+ return true;
+ }
+
+ /* Get the current position, accounting for the offset value, if present.
+ NOTE: This function does not check over/underflow.
+ */
+ void get_current_position()
+ {
+ position = position_cursor.get_curr_rownum();
+ overflowed= false;
+ if (offset)
+ {
+ if (offset_value < 0 &&
+ position + offset_value > position)
+ {
+ overflowed= true;
+ }
+ if (offset_value > 0 &&
+ position + offset_value < position)
+ {
+ overflowed= true;
+ }
+ position += offset_value;
+ }
+ }
+
+ void save_offset_value()
+ {
+ if (offset)
+ offset_value= offset->val_int() * (negative_offset ? -1 : 1);
+ else
+ offset_value= 0;
+ }
+
+ void save_positional_value()
+ {
+ get_current_position();
+ if (!position_is_within_bounds())
+ clear_sum_functions();
+ else
+ {
+ cursor.move_to(position);
+ cursor.fetch();
+ add_value_to_items();
+ }
+ }
+
+ const Frame_cursor &position_cursor;
+ const Frame_cursor *top_bound;
+ const Frame_cursor *bottom_bound;
+ Item *offset;
+ Table_read_cursor cursor;
+ ha_rows position;
+ longlong offset_value;
+ bool overflowed;
+
+ bool negative_offset;
+};
+
+
+/*
+ Get a Frame_cursor for a frame bound. This is a "factory function".
+*/
+Frame_cursor *get_frame_cursor(THD *thd, Window_spec *spec, bool is_top_bound)
+{
+ Window_frame *frame= spec->window_frame;
+ if (!frame)
+ {
+ /*
+ The docs say this about the lack of frame clause:
+
+ Let WD be a window structure descriptor.
+ ...
+ If WD has no window framing clause, then
+ Case:
+ i) If the window ordering clause of WD is not present, then WF is the
+ window partition of R.
+ ii) Otherwise, WF consists of all rows of the partition of R that
+ precede R or are peers of R in the window ordering of the window
+ partition defined by the window ordering clause.
+
+ For case #ii, the frame bounds essentially are "RANGE BETWEEN UNBOUNDED
+ PRECEDING AND CURRENT ROW".
+ For the case #i, without ordering clause all rows are considered peers,
+ so again the same frame bounds can be used.
+ */
+ if (is_top_bound)
+ return new Frame_unbounded_preceding(thd,
+ spec->partition_list,
+ spec->order_list);
+ else
+ return new Frame_range_current_row_bottom(thd,
+ spec->partition_list,
+ spec->order_list);
+ }
+
+ Window_frame_bound *bound= is_top_bound? frame->top_bound :
+ frame->bottom_bound;
+
+ if (bound->precedence_type == Window_frame_bound::PRECEDING ||
+ bound->precedence_type == Window_frame_bound::FOLLOWING)
+ {
+ bool is_preceding= (bound->precedence_type ==
+ Window_frame_bound::PRECEDING);
+
+ if (bound->offset == NULL) /* this is UNBOUNDED */
+ {
+ /* The following serve both RANGE and ROWS: */
+ if (is_preceding)
+ return new Frame_unbounded_preceding(thd,
+ spec->partition_list,
+ spec->order_list);
+
+ return new Frame_unbounded_following(thd,
+ spec->partition_list,
+ spec->order_list);
+ }
+
+ if (frame->units == Window_frame::UNITS_ROWS)
+ {
+ ha_rows n_rows= bound->offset->val_int();
+ /* These should be handled in the parser */
+ DBUG_ASSERT(!bound->offset->null_value);
+ DBUG_ASSERT((longlong) n_rows >= 0);
+ if (is_preceding)
+ return new Frame_n_rows_preceding(is_top_bound, n_rows);
+
+ return new Frame_n_rows_following(
+ thd, spec->partition_list, spec->order_list,
+ is_top_bound, n_rows);
+ }
+ else
+ {
+ if (is_top_bound)
+ return new Frame_range_n_top(
+ thd, spec->partition_list, spec->order_list,
+ is_preceding, bound->offset);
+
+ return new Frame_range_n_bottom(thd,
+ spec->partition_list, spec->order_list,
+ is_preceding, bound->offset);
+ }
+ }
+
+ if (bound->precedence_type == Window_frame_bound::CURRENT)
+ {
+ if (frame->units == Window_frame::UNITS_ROWS)
+ {
+ if (is_top_bound)
+ return new Frame_rows_current_row_top;
+
+ return new Frame_rows_current_row_bottom;
+ }
+ else
+ {
+ if (is_top_bound)
+ return new Frame_range_current_row_top(
+ thd, spec->partition_list, spec->order_list);
+
+ return new Frame_range_current_row_bottom(
+ thd, spec->partition_list, spec->order_list);
+ }
+ }
+ return NULL;
+}
+
+static
+void add_special_frame_cursors(THD *thd, Cursor_manager *cursor_manager,
+ Item_window_func *window_func)
+{
+ Window_spec *spec= window_func->window_spec;
+ Item_sum *item_sum= window_func->window_func();
+ DBUG_PRINT("info", ("Get arg count: %d", item_sum->get_arg_count()));
+ Frame_cursor *fc;
+ switch (item_sum->sum_func())
+ {
+ case Item_sum::CUME_DIST_FUNC:
+ fc= new Frame_unbounded_preceding(thd,
+ spec->partition_list,
+ spec->order_list);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ fc= new Frame_range_current_row_bottom(thd,
+ spec->partition_list,
+ spec->order_list);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ break;
+ case Item_sum::LEAD_FUNC:
+ case Item_sum::LAG_FUNC:
+ {
+ Frame_cursor *bottom_bound= new Frame_unbounded_following(thd,
+ spec->partition_list,
+ spec->order_list);
+ Frame_cursor *top_bound= new Frame_unbounded_preceding(thd,
+ spec->partition_list,
+ spec->order_list);
+ Frame_cursor *current_row_pos= new Frame_rows_current_row_bottom;
+ cursor_manager->add_cursor(bottom_bound);
+ cursor_manager->add_cursor(top_bound);
+ cursor_manager->add_cursor(current_row_pos);
+ DBUG_ASSERT(item_sum->fixed);
+ bool negative_offset= item_sum->sum_func() == Item_sum::LAG_FUNC;
+ fc= new Frame_positional_cursor(*current_row_pos,
+ *top_bound, *bottom_bound,
+ *item_sum->get_arg(1),
+ negative_offset);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ break;
+ }
+ case Item_sum::FIRST_VALUE_FUNC:
+ {
+ Frame_cursor *bottom_bound= get_frame_cursor(thd, spec, false);
+ Frame_cursor *top_bound= get_frame_cursor(thd, spec, true);
+ cursor_manager->add_cursor(bottom_bound);
+ cursor_manager->add_cursor(top_bound);
+ DBUG_ASSERT(item_sum->fixed);
+ Item *offset_item= new (thd->mem_root) Item_int(thd, 0);
+ offset_item->fix_fields(thd, &offset_item);
+ fc= new Frame_positional_cursor(*top_bound,
+ *top_bound, *bottom_bound,
+ *offset_item, false);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ break;
+ }
+ case Item_sum::LAST_VALUE_FUNC:
+ {
+ Frame_cursor *bottom_bound= get_frame_cursor(thd, spec, false);
+ Frame_cursor *top_bound= get_frame_cursor(thd, spec, true);
+ cursor_manager->add_cursor(bottom_bound);
+ cursor_manager->add_cursor(top_bound);
+ DBUG_ASSERT(item_sum->fixed);
+ Item *offset_item= new (thd->mem_root) Item_int(thd, 0);
+ offset_item->fix_fields(thd, &offset_item);
+ fc= new Frame_positional_cursor(*bottom_bound,
+ *top_bound, *bottom_bound,
+ *offset_item, false);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ break;
+ }
+ case Item_sum::NTH_VALUE_FUNC:
+ {
+ Frame_cursor *bottom_bound= get_frame_cursor(thd, spec, false);
+ Frame_cursor *top_bound= get_frame_cursor(thd, spec, true);
+ cursor_manager->add_cursor(bottom_bound);
+ cursor_manager->add_cursor(top_bound);
+ DBUG_ASSERT(item_sum->fixed);
+ Item *int_item= new (thd->mem_root) Item_int(thd, 1);
+ Item *offset_func= new (thd->mem_root)
+ Item_func_minus(thd, item_sum->get_arg(1),
+ int_item);
+ offset_func->fix_fields(thd, &offset_func);
+ fc= new Frame_positional_cursor(*top_bound,
+ *top_bound, *bottom_bound,
+ *offset_func, false);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ break;
+ }
+ default:
+ fc= new Frame_unbounded_preceding(
+ thd, spec->partition_list, spec->order_list);
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+
+ fc= new Frame_rows_current_row_bottom;
+ fc->add_sum_func(item_sum);
+ cursor_manager->add_cursor(fc);
+ }
+}
+
+
+static bool is_computed_with_remove(Item_sum::Sumfunctype sum_func)
+{
+ switch (sum_func)
+ {
+ case Item_sum::CUME_DIST_FUNC:
+ case Item_sum::ROW_NUMBER_FUNC:
+ case Item_sum::RANK_FUNC:
+ case Item_sum::DENSE_RANK_FUNC:
+ case Item_sum::NTILE_FUNC:
+ case Item_sum::FIRST_VALUE_FUNC:
+ case Item_sum::LAST_VALUE_FUNC:
+ return false;
+ default:
+ return true;
+ }
+}
+/*
+ Create required frame cursors for the list of window functions.
+ Register all functions to their appropriate cursors.
+ If the window functions share the same frame specification,
+ those window functions will be registered to the same cursor.
+*/
+void get_window_functions_required_cursors(
+ THD *thd,
+ List<Item_window_func>& window_functions,
+ List<Cursor_manager> *cursor_managers)
+{
+ List_iterator_fast<Item_window_func> it(window_functions);
+ Item_window_func* item_win_func;
+ Item_sum *sum_func;
+ while ((item_win_func= it++))
+ {
+ Cursor_manager *cursor_manager = new Cursor_manager();
+ sum_func = item_win_func->window_func();
+ Frame_cursor *fc;
+ /*
+ Some window functions require the partition size for computing values.
+ Add a cursor that retrieves it as the first one in the list if necessary.
+ */
+ if (item_win_func->requires_partition_size())
+ {
+ fc= new Frame_unbounded_following_set_count(thd,
+ item_win_func->window_spec->partition_list,
+ item_win_func->window_spec->order_list);
+ fc->add_sum_func(sum_func);
+ cursor_manager->add_cursor(fc);
+ }
+
+ /*
+ If it is not a regular window function that follows frame specifications,
+ and/or specific cursors are required. ROW_NUM, RANK, NTILE and others
+ follow such rules. Check is_frame_prohibited check for the full list.
+
+ TODO(cvicentiu) This approach is messy. Every time a function allows
+ computation in a certain way, we have to add an extra method to this
+ factory function. It is better to have window functions output
+ their own cursors, as needed. This way, the logic is bound
+ only to the implementation of said window function. Regular aggregate
+ functions can keep the default frame generating code, overwrite it or
+ add to it.
+ */
+ if (item_win_func->is_frame_prohibited() ||
+ item_win_func->requires_special_cursors())
+ {
+ add_special_frame_cursors(thd, cursor_manager, item_win_func);
+ cursor_managers->push_back(cursor_manager);
+ continue;
+ }
+
+ Frame_cursor *frame_bottom= get_frame_cursor(thd,
+ item_win_func->window_spec, false);
+ Frame_cursor *frame_top= get_frame_cursor(thd,
+ item_win_func->window_spec, true);
+
+ frame_bottom->add_sum_func(sum_func);
+ frame_top->add_sum_func(sum_func);
+
+ /*
+ The order of these cursors is important. A sum function
+ must first add values (via frame_bottom) then remove them via
+ frame_top. Removing items first doesn't make sense in the case of all
+ window functions.
+ */
+ cursor_manager->add_cursor(frame_bottom);
+ cursor_manager->add_cursor(frame_top);
+ if (is_computed_with_remove(sum_func->sum_func()) &&
+ !sum_func->supports_removal())
+ {
+ frame_bottom->set_no_action();
+ frame_top->set_no_action();
+ Frame_cursor *scan_cursor= new Frame_scan_cursor(*frame_top,
+ *frame_bottom);
+ scan_cursor->add_sum_func(sum_func);
+ cursor_manager->add_cursor(scan_cursor);
+
+ }
+ cursor_managers->push_back(cursor_manager);
+ }
+}
+
+/**
+ Helper function that takes a list of window functions and writes
+ their values in the current table record.
+*/
+static
+bool save_window_function_values(List<Item_window_func>& window_functions,
+ TABLE *tbl, uchar *rowid_buf)
+{
+ List_iterator_fast<Item_window_func> iter(window_functions);
+ tbl->file->ha_rnd_pos(tbl->record[0], rowid_buf);
+ store_record(tbl, record[1]);
+ while (Item_window_func *item_win= iter++)
+ item_win->save_in_field(item_win->result_field, true);
+
+ int err= tbl->file->ha_update_row(tbl->record[1], tbl->record[0]);
+ if (err && err != HA_ERR_RECORD_IS_THE_SAME)
+ return true;
+
+ return false;
+}
+
+/*
+ TODO(cvicentiu) update this comment to reflect the new execution.
+
+ Streamed window function computation with window frames.
+
+ We make a single pass over the ordered temp.table, but we're using three
+ cursors:
+ - current row - the row that we're computing window func value for)
+ - start_bound - the start of the frame
+ - bottom_bound - the end of the frame
+
+ All three cursors move together.
+
+ @todo
+ Provided bounds have their 'cursors'... is it better to re-clone their
+ cursors or re-position them onto the current row?
+
+ @detail
+ ROWS BETWEEN 3 PRECEDING -- frame start
+ AND 3 FOLLOWING -- frame end
+
+ /------ frame end (aka BOTTOM)
+ Dataset start |
+ --------====*=======[*]========*========-------->> dataset end
+ | \
+ | +-------- current row
+ |
+ \-------- frame start ("TOP")
+
+ - frame_end moves forward and adds rows into the aggregate function.
+ - frame_start follows behind and removes rows from the aggregate function.
+ - current_row is the row where the value of aggregate function is stored.
+
+ @TODO: Only the first cursor needs to check for run-out-of-partition
+ condition (Others can catch up by counting rows?)
+
+*/
+bool compute_window_func(THD *thd,
+ List<Item_window_func>& window_functions,
+ List<Cursor_manager>& cursor_managers,
+ TABLE *tbl,
+ SORT_INFO *filesort_result)
+{
+ List_iterator_fast<Item_window_func> iter_win_funcs(window_functions);
+ List_iterator_fast<Cursor_manager> iter_cursor_managers(cursor_managers);
+ uint err;
+
+ READ_RECORD info;
+
+ if (init_read_record(&info, current_thd, tbl, NULL/*select*/, filesort_result,
+ 0, 1, FALSE))
+ return true;
+
+ Cursor_manager *cursor_manager;
+ while ((cursor_manager= iter_cursor_managers++))
+ cursor_manager->initialize_cursors(&info);
+
+ /* One partition tracker for each window function. */
+ List<Group_bound_tracker> partition_trackers;
+ Item_window_func *win_func;
+ while ((win_func= iter_win_funcs++))
+ {
+ Group_bound_tracker *tracker= new Group_bound_tracker(thd,
+ win_func->window_spec->partition_list);
+ // TODO(cvicentiu) This should be removed and placed in constructor.
+ tracker->init();
+ partition_trackers.push_back(tracker);
+ }
+
+ List_iterator_fast<Group_bound_tracker> iter_part_trackers(partition_trackers);
+ ha_rows rownum= 0;
+ uchar *rowid_buf= (uchar*) my_malloc(tbl->file->ref_length, MYF(0));
+
+ while (true)
+ {
+ if ((err= info.read_record(&info)))
+ break; // End of file.
+
+ /* Remember current row so that we can restore it before computing
+ each window function. */
+ tbl->file->position(tbl->record[0]);
+ memcpy(rowid_buf, tbl->file->ref, tbl->file->ref_length);
+
+ iter_win_funcs.rewind();
+ iter_part_trackers.rewind();
+ iter_cursor_managers.rewind();
+
+ Group_bound_tracker *tracker;
+ while ((win_func= iter_win_funcs++) &&
+ (tracker= iter_part_trackers++) &&
+ (cursor_manager= iter_cursor_managers++))
+ {
+ if (tracker->check_if_next_group() || (rownum == 0))
+ {
+ /* TODO(cvicentiu)
+ Clearing window functions should happen through cursors. */
+ win_func->window_func()->clear();
+ cursor_manager->notify_cursors_partition_changed(rownum);
+ }
+ else
+ {
+ cursor_manager->notify_cursors_next_row();
+ }
+ /* Return to current row after notifying cursors for each window
+ function. */
+ tbl->file->ha_rnd_pos(tbl->record[0], rowid_buf);
+ }
+
+ /* We now have computed values for each window function. They can now
+ be saved in the current row. */
+ save_window_function_values(window_functions, tbl, rowid_buf);
+
+ rownum++;
+ }
+
+ my_free(rowid_buf);
+ partition_trackers.delete_elements();
+ end_read_record(&info);
+
+ return false;
+}
+
+/* Make a list that is a concation of two lists of ORDER elements */
+
+static ORDER* concat_order_lists(MEM_ROOT *mem_root, ORDER *list1, ORDER *list2)
+{
+ if (!list1)
+ {
+ list1= list2;
+ list2= NULL;
+ }
+
+ ORDER *res= NULL; // first element in the new list
+ ORDER *prev= NULL; // last element in the new list
+ ORDER *cur_list= list1; // this goes through list1, list2
+ while (cur_list)
+ {
+ for (ORDER *cur= cur_list; cur; cur= cur->next)
+ {
+ ORDER *copy= (ORDER*)alloc_root(mem_root, sizeof(ORDER));
+ memcpy(copy, cur, sizeof(ORDER));
+ if (prev)
+ prev->next= copy;
+ prev= copy;
+ if (!res)
+ res= copy;
+ }
+
+ cur_list= (cur_list == list1)? list2: NULL;
+ }
+
+ if (prev)
+ prev->next= NULL;
+
+ return res;
+}
+
+bool Window_func_runner::add_function_to_run(Item_window_func *win_func)
+{
+
+ Item_sum *sum_func= win_func->window_func();
+ sum_func->setup_window_func(current_thd, win_func->window_spec);
+
+ Item_sum::Sumfunctype type= win_func->window_func()->sum_func();
+
+ switch (type)
+ {
+ /* Distinct is not yet supported. */
+ case Item_sum::GROUP_CONCAT_FUNC:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "GROUP_CONCAT() aggregate as window function");
+ return true;
+ case Item_sum::SUM_DISTINCT_FUNC:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "SUM(DISTINCT) aggregate as window function");
+ return true;
+ case Item_sum::AVG_DISTINCT_FUNC:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "AVG(DISTINCT) aggregate as window function");
+ return true;
+ case Item_sum::COUNT_DISTINCT_FUNC:
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "COUNT(DISTINCT) aggregate as window function");
+ return true;
+ default:
+ break;
+ }
+
+ return window_functions.push_back(win_func);
+}
+
+
+/*
+ Compute the value of window function for all rows.
+*/
+bool Window_func_runner::exec(THD *thd, TABLE *tbl, SORT_INFO *filesort_result)
+{
+ List_iterator_fast<Item_window_func> it(window_functions);
+ Item_window_func *win_func;
+ while ((win_func= it++))
+ {
+ win_func->set_phase_to_computation();
+ // TODO(cvicentiu) Setting the aggregator should probably be done during
+ // setup of Window_funcs_sort.
+ win_func->window_func()->set_aggregator(Aggregator::SIMPLE_AGGREGATOR);
+ }
+ it.rewind();
+
+ List<Cursor_manager> cursor_managers;
+ get_window_functions_required_cursors(thd, window_functions,
+ &cursor_managers);
+
+ /* Go through the sorted array and compute the window function */
+ bool is_error= compute_window_func(thd,
+ window_functions,
+ cursor_managers,
+ tbl, filesort_result);
+ while ((win_func= it++))
+ {
+ win_func->set_phase_to_retrieval();
+ }
+
+ cursor_managers.delete_elements();
+
+ return is_error;
+}
+
+
+bool Window_funcs_sort::exec(JOIN *join, bool keep_filesort_result)
+{
+ THD *thd= join->thd;
+ JOIN_TAB *join_tab= join->join_tab + join->total_join_tab_cnt();
+
+ /* Sort the table based on the most specific sorting criteria of
+ the window functions. */
+ if (create_sort_index(thd, join, join_tab, filesort))
+ return true;
+
+ TABLE *tbl= join_tab->table;
+ SORT_INFO *filesort_result= join_tab->filesort_result;
+
+ bool is_error= runner.exec(thd, tbl, filesort_result);
+
+ if (!keep_filesort_result)
+ {
+ delete join_tab->filesort_result;
+ join_tab->filesort_result= NULL;
+ }
+ return is_error;
+}
+
+
+bool Window_funcs_sort::setup(THD *thd, SQL_SELECT *sel,
+ List_iterator<Item_window_func> &it,
+ JOIN_TAB *join_tab)
+{
+ Window_spec *spec;
+ Item_window_func *win_func= it.peek();
+ Item_window_func *win_func_with_longest_order= NULL;
+ int longest_order_elements= -1;
+
+ /* The iterator should point to a valid function at the start of execution. */
+ DBUG_ASSERT(win_func);
+ do
+ {
+ spec= win_func->window_spec;
+ int win_func_order_elements= spec->partition_list->elements +
+ spec->order_list->elements;
+ if (win_func_order_elements > longest_order_elements)
+ {
+ win_func_with_longest_order= win_func;
+ longest_order_elements= win_func_order_elements;
+ }
+ if (runner.add_function_to_run(win_func))
+ return true;
+ it++;
+ win_func= it.peek();
+ } while (win_func && !(win_func->marker & SORTORDER_CHANGE_FLAG));
+
+ /*
+ The sort criteria must be taken from the last win_func in the group of
+ adjacent win_funcs that do not have SORTORDER_CHANGE_FLAG. This is
+ because the sort order must be the most specific sorting criteria defined
+ within the window function group. This ensures that we sort the table
+ in a way that the result is valid for all window functions belonging to
+ this Window_funcs_sort.
+ */
+ spec= win_func_with_longest_order->window_spec;
+
+ ORDER* sort_order= concat_order_lists(thd->mem_root,
+ spec->partition_list->first,
+ spec->order_list->first);
+ if (sort_order == NULL) // No partition or order by clause.
+ {
+ /* TODO(cvicentiu) This is used as a way to allow an empty OVER ()
+ clause for window functions. However, a better approach is
+ to not call Filesort at all in this case and just read whatever order
+ the temporary table has.
+ Due to cursors not working for out_of_memory cases (yet!), we have to run
+ filesort to generate a sort buffer of the results.
+ In this case we sort by the first field of the temporary table.
+ We should have this field available, even if it is a window_function
+ field. We don't care of the particular sorting result in this case.
+ */
+ ORDER *order= (ORDER *)alloc_root(thd->mem_root, sizeof(ORDER));
+ memset(order, 0, sizeof(*order));
+ Item *item= new (thd->mem_root) Item_field(thd, join_tab->table->field[0]);
+ order->item= (Item **)alloc_root(thd->mem_root, 2 * sizeof(Item *));
+ order->item[1]= NULL;
+ order->item[0]= item;
+ order->field= join_tab->table->field[0];
+ sort_order= order;
+ }
+ filesort= new (thd->mem_root) Filesort(sort_order, HA_POS_ERROR, true, NULL);
+
+ /* Apply the same condition that the subsequent sort has. */
+ filesort->select= sel;
+
+ return false;
+}
+
+
+bool Window_funcs_computation::setup(THD *thd,
+ List<Item_window_func> *window_funcs,
+ JOIN_TAB *tab)
+{
+ order_window_funcs_by_window_specs(window_funcs);
+
+ SQL_SELECT *sel= NULL;
+ /*
+ If the tmp table is filtered during sorting
+ (ex: SELECT with HAVING && ORDER BY), we must make sure to keep the
+ filtering conditions when we perform sorting for window function
+ computation.
+ */
+ if (tab->filesort && tab->filesort->select)
+ {
+ sel= tab->filesort->select;
+ DBUG_ASSERT(!sel->quick);
+ }
+
+ Window_funcs_sort *srt;
+ List_iterator<Item_window_func> iter(*window_funcs);
+ while (iter.peek())
+ {
+ if (!(srt= new Window_funcs_sort()) ||
+ srt->setup(thd, sel, iter, tab))
+ {
+ return true;
+ }
+ win_func_sorts.push_back(srt, thd->mem_root);
+ }
+ return false;
+}
+
+
+bool Window_funcs_computation::exec(JOIN *join, bool keep_last_filesort_result)
+{
+ List_iterator<Window_funcs_sort> it(win_func_sorts);
+ Window_funcs_sort *srt;
+ uint counter= 0; /* Count how many sorts we've executed. */
+ /* Execute each sort */
+ while ((srt = it++))
+ {
+ counter++;
+ bool keep_filesort_result= keep_last_filesort_result &&
+ counter == win_func_sorts.elements;
+ if (srt->exec(join, keep_filesort_result))
+ return true;
+ }
+ return false;
+}
+
+
+void Window_funcs_computation::cleanup()
+{
+ List_iterator<Window_funcs_sort> it(win_func_sorts);
+ Window_funcs_sort *srt;
+ while ((srt = it++))
+ {
+ srt->cleanup();
+ delete srt;
+ }
+}
+
+
+Explain_aggr_window_funcs*
+Window_funcs_computation::save_explain_plan(MEM_ROOT *mem_root,
+ bool is_analyze)
+{
+ Explain_aggr_window_funcs *xpl= new Explain_aggr_window_funcs;
+ List_iterator<Window_funcs_sort> it(win_func_sorts);
+ Window_funcs_sort *srt;
+ while ((srt = it++))
+ {
+ Explain_aggr_filesort *eaf=
+ new Explain_aggr_filesort(mem_root, is_analyze, srt->filesort);
+ xpl->sorts.push_back(eaf, mem_root);
+ }
+ return xpl;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Unneeded comments (will be removed when we develop a replacement for
+// the feature that was attempted here
+/////////////////////////////////////////////////////////////////////////////
+ /*
+ TODO Get this code to set can_compute_window_function during preparation,
+ not during execution.
+
+ The reason for this is the following:
+ Our single scan optimization for window functions without tmp table,
+ is valid, if and only if, we only need to perform one sorting operation,
+ via filesort. The cases where we need to perform one sorting operation only:
+
+ * A select with only one window function.
+ * A select with multiple window functions, but they must have their
+ partition and order by clauses compatible. This means that one ordering
+ is acceptable for both window functions.
+
+ For example:
+ partition by a, b, c; order by d, e results in sorting by a b c d e.
+ partition by a; order by d results in sorting by a d.
+
+ This kind of sorting is compatible. The less specific partition does
+ not care for the order of b and c columns so it is valid if we sort
+ by those in case of equality over a.
+
+ partition by a, b; order by d, e results in sorting by a b d e
+ partition by a; order by e results in sorting by a e
+
+ This sorting is incompatible due to the order by clause. The partition by
+ clause is compatible, (partition by a) is a prefix for (partition by a, b)
+ However, order by e is not a prefix for order by d, e, thus it is not
+ compatible.
+
+ The rule for having compatible sorting is thus:
+ Each partition order must contain the other window functions partitions
+ prefixes, or be a prefix itself. This must hold true for all partitions.
+ Analog for the order by clause.
+ */
+#if 0
+ List<Item_window_func> window_functions;
+ SQL_I_List<ORDER> largest_partition;
+ SQL_I_List<ORDER> largest_order_by;
+ bool can_compute_window_live = !need_tmp;
+ // Construct the window_functions item list and check if they can be
+ // computed using only one sorting.
+ //
+ // TODO: Perhaps group functions into compatible sorting bins
+ // to minimize the number of sorting passes required to compute all of them.
+ while ((item= it++))
+ {
+ if (item->type() == Item::WINDOW_FUNC_ITEM)
+ {
+ Item_window_func *item_win = (Item_window_func *) item;
+ window_functions.push_back(item_win);
+ if (!can_compute_window_live)
+ continue; // No point checking since we have to perform multiple sorts.
+ Window_spec *spec = item_win->window_spec;
+ // Having an empty partition list on one window function and a
+ // not empty list on a separate window function causes the sorting
+ // to be incompatible.
+ //
+ // Example:
+ // over (partition by a, order by x) && over (order by x).
+ //
+ // The first function requires an ordering by a first and then by x,
+ // while the seond function requires an ordering by x first.
+ // The same restriction is not required for the order by clause.
+ if (largest_partition.elements && !spec->partition_list.elements)
+ {
+ can_compute_window_live= FALSE;
+ continue;
+ }
+ can_compute_window_live= test_if_order_compatible(largest_partition,
+ spec->partition_list);
+ if (!can_compute_window_live)
+ continue;
+
+ can_compute_window_live= test_if_order_compatible(largest_order_by,
+ spec->order_list);
+ if (!can_compute_window_live)
+ continue;
+
+ if (largest_partition.elements < spec->partition_list.elements)
+ largest_partition = spec->partition_list;
+ if (largest_order_by.elements < spec->order_list.elements)
+ largest_order_by = spec->order_list;
+ }
+ }
+ if (can_compute_window_live && window_functions.elements && table_count == 1)
+ {
+ ha_rows examined_rows = 0;
+ ha_rows found_rows = 0;
+ ha_rows filesort_retval;
+ SORT_FIELD *s_order= (SORT_FIELD *) my_malloc(sizeof(SORT_FIELD) *
+ (largest_partition.elements + largest_order_by.elements) + 1,
+ MYF(MY_WME | MY_ZEROFILL | MY_THREAD_SPECIFIC));
+
+ size_t pos= 0;
+ for (ORDER* curr = largest_partition.first; curr; curr=curr->next, pos++)
+ s_order[pos].item = *curr->item;
+
+ for (ORDER* curr = largest_order_by.first; curr; curr=curr->next, pos++)
+ s_order[pos].item = *curr->item;
+
+ table[0]->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
+ MYF(MY_WME | MY_ZEROFILL|
+ MY_THREAD_SPECIFIC));
+
+
+ filesort_retval= filesort(thd, table[0], s_order,
+ (largest_partition.elements + largest_order_by.elements),
+ this->select, HA_POS_ERROR, FALSE,
+ &examined_rows, &found_rows,
+ this->explain->ops_tracker.report_sorting(thd));
+ table[0]->sort.found_records= filesort_retval;
+
+ join_tab->read_first_record = join_init_read_record;
+ join_tab->records= found_rows;
+
+ my_free(s_order);
+ }
+ else
+#endif
+
+
diff --git a/sql/sql_window.h b/sql/sql_window.h
new file mode 100644
index 00000000000..e0c1563e5bb
--- /dev/null
+++ b/sql/sql_window.h
@@ -0,0 +1,235 @@
+
+#ifndef SQL_WINDOW_INCLUDED
+#define SQL_WINDOW_INCLUDED
+
+#include "my_global.h"
+#include "item.h"
+#include "filesort.h"
+#include "records.h"
+
+class Item_window_func;
+
+/*
+ Window functions module.
+
+ Each instance of window function has its own element in SELECT_LEX::window_specs.
+*/
+
+
+class Window_frame_bound : public Sql_alloc
+{
+
+public:
+
+ enum Bound_precedence_type
+ {
+ PRECEDING,
+ CURRENT, // Used for CURRENT ROW window frame bounds
+ FOLLOWING
+ };
+
+ Bound_precedence_type precedence_type;
+
+
+ /*
+ For UNBOUNDED PRECEDING / UNBOUNDED FOLLOWING window frame bounds
+ precedence type is seto to PRECEDING / FOLLOWING and
+ offset is set to NULL.
+ The offset is not meaningful with precedence type CURRENT
+ */
+ Item *offset;
+
+ Window_frame_bound(Bound_precedence_type prec_type,
+ Item *offset_val)
+ : precedence_type(prec_type), offset(offset_val) {}
+
+ bool is_unbounded() { return offset == NULL; }
+
+ void print(String *str, enum_query_type query_type);
+
+};
+
+
+class Window_frame : public Sql_alloc
+{
+
+public:
+
+ enum Frame_units
+ {
+ UNITS_ROWS,
+ UNITS_RANGE
+ };
+
+ enum Frame_exclusion
+ {
+ EXCL_NONE,
+ EXCL_CURRENT_ROW,
+ EXCL_GROUP,
+ EXCL_TIES
+ };
+
+ Frame_units units;
+
+ Window_frame_bound *top_bound;
+
+ Window_frame_bound *bottom_bound;
+
+ Frame_exclusion exclusion;
+
+ Window_frame(Frame_units win_frame_units,
+ Window_frame_bound *win_frame_top_bound,
+ Window_frame_bound *win_frame_bottom_bound,
+ Frame_exclusion win_frame_exclusion)
+ : units(win_frame_units), top_bound(win_frame_top_bound),
+ bottom_bound(win_frame_bottom_bound), exclusion(win_frame_exclusion) {}
+
+ bool check_frame_bounds();
+
+ void print(String *str, enum_query_type query_type);
+
+};
+
+class Window_spec : public Sql_alloc
+{
+ bool window_names_are_checked;
+ public:
+ virtual ~Window_spec() {}
+
+ LEX_STRING *window_ref;
+
+ SQL_I_List<ORDER> *partition_list;
+
+ SQL_I_List<ORDER> *order_list;
+
+ Window_frame *window_frame;
+
+ Window_spec *referenced_win_spec;
+
+ Window_spec(LEX_STRING *win_ref,
+ SQL_I_List<ORDER> *part_list,
+ SQL_I_List<ORDER> *ord_list,
+ Window_frame *win_frame)
+ : window_names_are_checked(false), window_ref(win_ref),
+ partition_list(part_list), order_list(ord_list),
+ window_frame(win_frame), referenced_win_spec(NULL) {}
+
+ virtual char *name() { return NULL; }
+
+ bool check_window_names(List_iterator_fast<Window_spec> &it);
+
+ char *window_reference() { return window_ref ? window_ref->str : NULL; }
+
+ void join_partition_and_order_lists()
+ {
+ *(partition_list->next)= order_list->first;
+ }
+
+ void disjoin_partition_and_order_lists()
+ {
+ *(partition_list->next)= NULL;
+ }
+
+ void print(String *str, enum_query_type query_type);
+
+};
+
+class Window_def : public Window_spec
+{
+ public:
+
+ LEX_STRING *window_name;
+
+ Window_def(LEX_STRING *win_name,
+ LEX_STRING *win_ref,
+ SQL_I_List<ORDER> *part_list,
+ SQL_I_List<ORDER> *ord_list,
+ Window_frame *win_frame)
+ : Window_spec(win_ref, part_list, ord_list, win_frame),
+ window_name(win_name) {}
+
+ char *name() { return window_name->str; }
+
+};
+
+int setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
+ List<Item> &fields, List<Item> &all_fields,
+ List<Window_spec> &win_specs, List<Item_window_func> &win_funcs);
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Classes that make window functions computation a part of SELECT's query plan
+//////////////////////////////////////////////////////////////////////////////
+
+class Frame_cursor;
+/*
+ This handles computation of one window function.
+
+ Currently, we make a spearate filesort() call for each window function.
+*/
+
+class Window_func_runner : public Sql_alloc
+{
+public:
+ /* Add the function to be computed during the execution pass */
+ bool add_function_to_run(Item_window_func *win_func);
+
+ /* Compute and fill the fields in the table. */
+ bool exec(THD *thd, TABLE *tbl, SORT_INFO *filesort_result);
+
+private:
+ /* A list of window functions for which this Window_func_runner will compute
+ values during the execution phase. */
+ List<Item_window_func> window_functions;
+};
+
+
+/*
+ Represents a group of window functions that require the same sorting of
+ rows and so share the filesort() call.
+
+*/
+
+class Window_funcs_sort : public Sql_alloc
+{
+public:
+ bool setup(THD *thd, SQL_SELECT *sel, List_iterator<Item_window_func> &it,
+ st_join_table *join_tab);
+ bool exec(JOIN *join, bool keep_filesort_result);
+ void cleanup() { delete filesort; }
+
+ friend class Window_funcs_computation;
+
+private:
+ Window_func_runner runner;
+
+ /* Window functions can be computed over this sorting */
+ Filesort *filesort;
+};
+
+
+struct st_join_table;
+class Explain_aggr_window_funcs;
+
+/*
+ This is a "window function computation phase": a single object of this class
+ takes care of computing all window functions in a SELECT.
+
+ - JOIN optimizer is exected to call setup() during query optimization.
+ - JOIN::exec() should call exec() once it has collected join output in a
+ temporary table.
+*/
+
+class Window_funcs_computation : public Sql_alloc
+{
+ List<Window_funcs_sort> win_func_sorts;
+public:
+ bool setup(THD *thd, List<Item_window_func> *window_funcs, st_join_table *tab);
+ bool exec(JOIN *join, bool keep_last_filesort_result);
+
+ Explain_aggr_window_funcs *save_explain_plan(MEM_ROOT *mem_root, bool is_analyze);
+ void cleanup();
+};
+
+
+#endif /* SQL_WINDOW_INCLUDED */
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 5111f0690ab..5b8c3c6c7ec 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -54,6 +54,9 @@
#include "sql_handler.h" // Sql_cmd_handler_*
#include "sql_signal.h"
#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics
+#include "sql_cte.h"
+#include "sql_window.h"
+#include "item_windowfunc.h"
#include "event_parse_data.h"
#include "create_options.h"
#include <myisam.h>
@@ -131,8 +134,7 @@ static void my_parse_error_intern(THD *thd, const char *err_text,
/* Push an error into the error stack */
ErrConvString err(yytext, strlen(yytext),
thd->variables.character_set_client);
- my_printf_error(ER_PARSE_ERROR, ER_THD(thd, ER_PARSE_ERROR), MYF(0),
- err_text, err.ptr(), lip->yylineno);
+ my_error(ER_PARSE_ERROR, MYF(0), err_text, err.ptr(), lip->yylineno);
}
@@ -349,7 +351,7 @@ int case_stmt_action_when(LEX *lex, Item *when, bool simple)
*/
return !MY_TEST(i) ||
- sp->push_backpatch(i, ctx->push_label(thd, empty_lex_str, 0)) ||
+ sp->push_backpatch(thd, i, ctx->push_label(thd, empty_lex_str, 0)) ||
sp->add_cont_backpatch(i) ||
sp->add_instr(i);
}
@@ -383,7 +385,7 @@ int case_stmt_action_then(LEX *lex)
(jump from instruction 4 to 12, 7 to 12 ... in the example)
*/
- return sp->push_backpatch(i, ctx->last_label());
+ return sp->push_backpatch(lex->thd, i, ctx->last_label());
}
static bool
@@ -467,7 +469,8 @@ set_local_variable(THD *thd, sp_variable *spv, Item *val)
sp_set= new (thd->mem_root)
sp_instr_set(lex->sphead->instructions(), lex->spcont,
- spv->offset, it, spv->type, lex, TRUE);
+ spv->offset, it, spv->sql_type(),
+ lex, TRUE);
return (sp_set == NULL || lex->sphead->add_instr(sp_set));
}
@@ -562,11 +565,12 @@ create_item_for_sp_var(THD *thd, LEX_STRING name, sp_variable *spvar,
DBUG_ASSERT(spc && spvar);
/* Position and length of the SP variable name in the query. */
- pos_in_q= start_in_q - lex->sphead->m_tmp_query;
- len_in_q= end_in_q - start_in_q;
+ pos_in_q= (uint)(start_in_q - lex->sphead->m_tmp_query);
+ len_in_q= (uint)(end_in_q - start_in_q);
item= new (thd->mem_root)
- Item_splocal(thd, name, spvar->offset, spvar->type, pos_in_q, len_in_q);
+ Item_splocal(thd, name, spvar->offset, spvar->sql_type(),
+ pos_in_q, len_in_q);
#ifndef DBUG_OFF
if (item)
@@ -703,47 +707,13 @@ bool add_select_to_union_list(LEX *lex, bool is_union_distinct,
return TRUE;
mysql_init_select(lex);
lex->current_select->linkage=UNION_TYPE;
+ lex->current_select->with_all_modifier= !is_union_distinct;
if (is_union_distinct) /* UNION DISTINCT - remember position */
lex->current_select->master_unit()->union_distinct=
lex->current_select;
return FALSE;
}
-/**
- @brief Initializes a SELECT_LEX for a query within parentheses (aka
- braces).
-
- @return false if successful, true if an error was reported. In the latter
- case parsing should stop.
- */
-bool setup_select_in_parentheses(LEX *lex)
-{
- SELECT_LEX * sel= lex->current_select;
- /*
- if (sel->set_braces(1))
- {
- my_parse_error(lex->thd, ER_SYNTAX_ERROR);
- return TRUE;
- }
- */
- DBUG_ASSERT(sel->braces);
- if (sel->linkage == UNION_TYPE &&
- !sel->master_unit()->first_select()->braces &&
- sel->master_unit()->first_select()->linkage ==
- UNION_TYPE)
- {
- my_parse_error(lex->thd, ER_SYNTAX_ERROR);
- return TRUE;
- }
- if (sel->linkage == UNION_TYPE &&
- sel->olap != UNSPECIFIED_OLAP_TYPE &&
- sel->master_unit()->fake_select_lex)
- {
- my_error(ER_WRONG_USAGE, MYF(0), "CUBE/ROLLUP", "ORDER BY");
- return TRUE;
- }
- return FALSE;
-}
static bool add_create_index_prepare(LEX *lex, Table_ident *table)
{
@@ -885,7 +855,7 @@ static void add_key_to_list(LEX *lex, LEX_STRING *field_name,
lex->alter_info.key_list.push_back(key, mem_root);
}
-void LEX::init_last_field(Create_field *field, const char *field_name,
+void LEX::init_last_field(Column_definition *field, const char *field_name,
CHARSET_INFO *cs)
{
last_field= field;
@@ -893,28 +863,25 @@ void LEX::init_last_field(Create_field *field, const char *field_name,
field->field_name= field_name;
/* reset LEX fields that are used in Create_field::set_and_check() */
- length= 0;
- dec= 0;
charset= cs;
}
-void LEX::set_last_field_type(enum enum_field_types field_type)
+void LEX::set_last_field_type(const Lex_field_type_st &type)
{
- last_field->sql_type= field_type;
- last_field->create_if_not_exists= check_exists;
+ last_field->sql_type= type.field_type();
last_field->charset= charset;
- if (length)
+ if (type.length())
{
int err;
- last_field->length= my_strtoll10(length, NULL, &err);
+ last_field->length= my_strtoll10(type.length(), NULL, &err);
if (err)
last_field->length= ~0ULL; // safety
}
else
last_field->length= 0;
- last_field->decimals= dec ? (uint)atoi(dec) : 0;
+ last_field->decimals= type.dec() ? (uint)atoi(type.dec()) : 0;
}
bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
@@ -944,6 +911,19 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
MYSQL_YYABORT; \
} while(0)
+Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
+{
+ Virtual_column_info *v= new (thd->mem_root) Virtual_column_info();
+ if (!v)
+ {
+ mem_alloc_error(sizeof(Virtual_column_info));
+ return 0;
+ }
+ v->expr= expr;
+ v->utf8= 0; /* connection charset */
+ return v;
+}
+
%}
%union {
int num;
@@ -956,8 +936,13 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
LEX_SYMBOL symbol;
struct sys_var_with_base variable;
struct { int vars, conds, hndlrs, curs; } spblock;
+ Lex_length_and_dec_st Lex_length_and_dec;
+ Lex_cast_type_st Lex_cast_type;
+ Lex_field_type_st Lex_field_type;
+ Lex_dyncol_type_st Lex_dyncol_type;
/* pointers */
+ Create_field *create_field;
CHARSET_INFO *charset;
Condition_information_item *cond_info_item;
DYNCALL_CREATE_DEF *dyncol_def;
@@ -974,11 +959,13 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
List<Item> *item_list;
List<Statement_information_item> *stmt_info_list;
List<String> *string_list;
+ List<LEX_STRING> *lex_str_list;
Statement_information_item *stmt_info_item;
String *string;
TABLE_LIST *table_list;
Table_ident *table;
char *simple_string;
+ const char *const_simple_string;
chooser_compare_func_creator boolfunc2creator;
class my_var *myvar;
class sp_condition_value *spcondvalue;
@@ -986,13 +973,18 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
class sp_label *splabel;
class sp_name *spname;
class sp_variable *spvar;
+ class With_clause *with_clause;
+ class Virtual_column_info *virtual_column;
+
handlerton *db_type;
st_select_lex *select_lex;
struct p_elem_val *p_elem_value;
+ class Window_frame *window_frame;
+ class Window_frame_bound *window_frame_bound;
udf_func *udf;
+ st_trg_execution_order trg_execution_order;
/* enums */
- enum Cast_target cast_type;
enum Condition_information_item::Name cond_info_item_name;
enum enum_diag_condition_item_name diag_condition_item_name;
enum Diagnostics_information::Which_area diag_area;
@@ -1015,6 +1007,10 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin)
enum sp_variable::enum_mode spvar_mode;
enum thr_lock_type lock_type;
enum enum_mysql_timestamp_type date_time_type;
+ enum Window_frame_bound::Bound_precedence_type bound_precedence_type;
+ enum Window_frame::Frame_units frame_units;
+ enum Window_frame::Frame_exclusion frame_exclusion;
+ enum trigger_order_type trigger_action_order_type;
DDL_options_st object_ddl_options;
}
@@ -1026,15 +1022,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%parse-param { THD *thd }
%lex-param { THD *thd }
/*
- Currently there are 160 shift/reduce conflicts.
+ Currently there are 102 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 162
+%expect 102
/*
Comments for TOKENS.
For each token, please include in the same line a comment that contains
the following tags:
+ SQL-2011-N : Non Reserved keywird as per SQL-2011
SQL-2003-R : Reserved keyword as per SQL-2003
SQL-2003-N : Non Reserved keyword as per SQL-2003
SQL-1999-R : Reserved keyword as per SQL-1999
@@ -1152,6 +1149,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CREATE /* SQL-2003-R */
%token CROSS /* SQL-2003-R */
%token CUBE_SYM /* SQL-2003-R */
+%token CUME_DIST_SYM
%token CURDATE /* MYSQL-FUNC */
%token CURRENT_SYM /* SQL-2003-R */
%token CURRENT_USER /* SQL-2003-R */
@@ -1181,8 +1179,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token DEFINER_SYM
%token DELAYED_SYM
%token DELAY_KEY_WRITE_SYM
-%token DELETE_SYM /* SQL-2003-R */
%token DELETE_DOMAIN_ID_SYM
+%token DELETE_SYM /* SQL-2003-R */
+%token DENSE_RANK_SYM
%token DESC /* SQL-2003-N */
%token DESCRIBE /* SQL-2003-R */
%token DES_KEY_FILE
@@ -1223,6 +1222,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token EVERY_SYM /* SQL-2003-N */
%token EXCHANGE_SYM
%token EXAMINED_SYM
+%token EXCLUDE_SYM /* SQL-2011-N */
%token EXECUTE_SYM /* SQL-2003-R */
%token EXISTS /* SQL-2003-R */
%token EXIT_SYM
@@ -1236,11 +1236,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token FAULTS_SYM
%token FETCH_SYM /* SQL-2003-R */
%token FILE_SYM
+%token FIRST_VALUE_SYM /* SQL-2011 */
%token FIRST_SYM /* SQL-2003-N */
%token FIXED_SYM
%token FLOAT_NUM
%token FLOAT_SYM /* SQL-2003-R */
%token FLUSH_SYM
+%token FOLLOWS_SYM /* MYSQL trigger*/
+%token FOLLOWING_SYM /* SQL-2011-N */
%token FORCE_SYM
%token FOREIGN /* SQL-2003-R */
%token FOR_SYM /* SQL-2003-R */
@@ -1262,6 +1265,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token GRANTS
%token GROUP_SYM /* SQL-2003-R */
%token GROUP_CONCAT_SYM
+%token LAG_SYM /* SQL-2011 */
+%token LEAD_SYM /* SQL-2011 */
%token HANDLER_SYM
%token HARD_SYM
%token HASH_SYM
@@ -1284,6 +1289,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token IGNORE_DOMAIN_IDS_SYM
%token IGNORE_SYM
%token IGNORE_SERVER_IDS_SYM
+%token IMMEDIATE_SYM /* SQL-2003-R */
%token IMPORT
%token INDEXES
%token INDEX_SYM
@@ -1307,6 +1313,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token ISSUER_SYM
%token ITERATE_SYM
%token JOIN_SYM /* SQL-2003-R */
+%token JSON_SYM
%token KEYS
%token KEY_BLOCK_SIZE
%token KEY_SYM /* SQL-2003-N */
@@ -1342,6 +1349,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token LOOP_SYM
%token LOW_PRIORITY
%token MASTER_CONNECT_RETRY_SYM
+%token MASTER_DELAY_SYM
%token MASTER_GTID_POS_SYM
%token MASTER_HOST_SYM
%token MASTER_LOG_FILE_SYM
@@ -1415,10 +1423,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token NO_SYM /* SQL-2003-R */
%token NO_WAIT_SYM
%token NO_WRITE_TO_BINLOG
+%token NTILE_SYM
%token NULL_SYM /* SQL-2003-R */
%token NUM
%token NUMBER_SYM /* SQL-2003-N */
%token NUMERIC_SYM /* SQL-2003-R */
+%token NTH_VALUE_SYM /* SQL-2011 */
%token NVARCHAR_SYM
%token OFFSET_SYM
%token OLD_PASSWORD_SYM
@@ -1435,9 +1445,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token ORDER_SYM /* SQL-2003-R */
%token OR_OR_SYM /* OPERATOR */
%token OR_SYM /* SQL-2003-R */
+%token OTHERS_SYM /* SQL-2011-N */
%token OUTER
%token OUTFILE
%token OUT_SYM /* SQL-2003-R */
+%token OVER_SYM
%token OWNER_SYM
%token PACK_KEYS_SYM
%token PAGE_SYM
@@ -1450,6 +1462,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token PARTITIONS_SYM
%token PARTITIONING_SYM
%token PASSWORD_SYM
+%token PERCENT_RANK_SYM
%token PERSISTENT_SYM
%token PHASE_SYM
%token PLUGINS_SYM
@@ -1458,6 +1471,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token POLYGON
%token PORT_SYM
%token POSITION_SYM /* SQL-2003-N */
+%token PRECEDES_SYM /* MYSQL */
+%token PRECEDING_SYM /* SQL-2011-N */
%token PRECISION /* SQL-2003-R */
%token PREPARE_SYM /* SQL-2003-R */
%token PRESERVE_SYM
@@ -1475,6 +1490,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token QUERY_SYM
%token QUICK
%token RANGE_SYM /* SQL-2003-R */
+%token RANK_SYM
%token READS_SYM /* SQL-2003-R */
%token READ_ONLY_SYM
%token READ_SYM /* SQL-2003-N */
@@ -1482,6 +1498,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token REAL /* SQL-2003-R */
%token REBUILD_SYM
%token RECOVER_SYM
+%token RECURSIVE_SYM
%token REDOFILE_SYM
%token REDO_BUFFER_SIZE_SYM
%token REDUNDANT_SYM
@@ -1520,10 +1537,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token ROLLBACK_SYM /* SQL-2003-R */
%token ROLLUP_SYM /* SQL-2003-R */
%token ROUTINE_SYM /* SQL-2003-N */
-%token ROWS_SYM /* SQL-2003-R */
-%token ROW_FORMAT_SYM
%token ROW_SYM /* SQL-2003-R */
+%token ROWS_SYM /* SQL-2003-R */
%token ROW_COUNT_SYM /* SQL-2003-N */
+%token ROW_FORMAT_SYM
+%token ROW_NUMBER_SYM
%token RTREE_SYM
%token SAVEPOINT_SYM /* SQL-2003-R */
%token SCHEDULE_SYM
@@ -1587,6 +1605,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token STD_SYM
%token STOP_SYM
%token STORAGE_SYM
+%token STORED_SYM
%token STRAIGHT_JOIN
%token STRING_SYM
%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */
@@ -1614,6 +1633,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TEXT_SYM
%token THAN_SYM
%token THEN_SYM /* SQL-2003-R */
+%token TIES_SYM /* SQL-2011-N */
%token TIMESTAMP /* SQL-2003-R */
%token TIMESTAMP_ADD
%token TIMESTAMP_DIFF
@@ -1634,6 +1654,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TYPE_SYM /* SQL-2003-N */
%token UDF_RETURNS_SYM
%token ULONGLONG_NUM
+%token UNBOUNDED_SYM /* SQL-2011-N */
%token UNCOMMITTED_SYM /* SQL-2003-N */
%token UNDEFINED_SYM
%token UNDERSCORE_CHARSET
@@ -1675,6 +1696,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token WEIGHT_STRING_SYM
%token WHEN_SYM /* SQL-2003-R */
%token WHERE /* SQL-2003-R */
+%token WINDOW_SYM
%token WHILE_SYM
%token WITH /* SQL-2003-R */
%token WITH_CUBE_SYM /* INTERNAL */
@@ -1719,7 +1741,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty
- opt_constraint constraint opt_ident
+ opt_constraint constraint opt_ident ident_table_alias
%type <lex_str_ptr>
opt_table_alias
@@ -1729,13 +1751,25 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
table_ident_opt_wild create_like
%type <simple_string>
- remember_name remember_end opt_db remember_tok_start
+ remember_name remember_end opt_db
+ remember_tok_start remember_tok_end
wild_and_where
+ field_length opt_field_length opt_field_length_default_1
+
+%type <const_simple_string>
+ opt_place
%type <string>
text_string hex_or_bin_String opt_gconcat_separator
-%type <field_type> type_with_opt_collate int_type real_type field_type
+%type <field_type> int_type real_type
+
+%type <Lex_field_type> type_with_opt_collate field_type
+
+%type <Lex_dyncol_type> opt_dyncol_type dyncol_type
+ numeric_dyncol_type temporal_dyncol_type string_dyncol_type
+
+%type <create_field> field_spec column_def
%type <geom_type> spatial_type
@@ -1744,17 +1778,17 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
udf_type opt_local opt_no_write_to_binlog
opt_temporary all_or_any opt_distinct
opt_ignore_leaves fulltext_options union_option
- opt_not opt_union_order_or_limit
- union_opt select_derived_init transaction_access_mode_types
+ opt_not
+ select_derived_init transaction_access_mode_types
opt_natural_language_mode opt_query_expansion
opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
- optional_flush_tables_arguments opt_dyncol_type dyncol_type
+ optional_flush_tables_arguments
opt_time_precision kill_type kill_option int_num
opt_default_time_precision
case_stmt_body opt_bin_mod
opt_if_exists_table_element opt_if_not_exists_table_element
- opt_into opt_procedure_clause
+ opt_recursive
%type <object_ddl_options>
create_or_replace
@@ -1793,15 +1827,18 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
literal text_literal insert_ident order_ident temporal_literal
simple_ident expr opt_expr opt_else sum_expr in_sum_expr
variable variable_aux bool_pri
- predicate bit_expr
- table_wild simple_expr udf_expr
+ predicate bit_expr parenthesized_expr
+ table_wild simple_expr column_default_non_parenthesized_expr udf_expr
expr_or_default set_expr_or_default
- geometry_function
- signed_literal now_or_signed_literal opt_escape
+ geometry_function signed_literal expr_or_literal
+ opt_escape
sp_opt_default
simple_ident_nospvar simple_ident_q
field_or_var limit_option
part_func_expr
+ window_func_expr
+ window_func
+ simple_window_func
function_call_keyword
function_call_nonkeyword
function_call_generic
@@ -1837,9 +1874,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <table_list>
join_table_list join_table
table_factor table_ref esc_table_ref
+ table_primary_ident table_primary_derived
select_derived derived_table_list
select_derived_union
-
+ derived_query_specification
%type <date_time_type> date_time_type;
%type <interval> interval
@@ -1853,9 +1891,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <ha_rkey_mode> handler_rkey_mode
-%type <cast_type> cast_type
+%type <Lex_cast_type> cast_type cast_type_numeric cast_type_temporal
+
+%type <Lex_length_and_dec> precision opt_precision float_options
-%type <symbol> keyword keyword_sp
+%type <symbol> keyword keyword_sp keyword_alias
%type <lex_user> user grant_user grant_role user_or_role current_role
admin_option_for_role user_maybe_role
@@ -1875,8 +1915,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <variable> internal_variable_name
%type <select_lex> subselect
- get_select_lex query_specification
+ get_select_lex get_select_lex_derived
+ query_specification
+ query_term_union_not_ready
+ query_term_union_ready
query_expression_body
+ select_paren_derived
%type <boolfunc2creator> comp_op
@@ -1886,6 +1930,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <myvar> select_outvar
+%type <virtual_column> opt_check_constraint check_constraint virtual_column_func
+ column_default_expr
+
%type <NONE>
analyze_stmt_command
query verb_clause create change select do drop insert replace insert2
@@ -1899,7 +1946,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
persistent_column_stat_spec persistent_index_stat_spec
table_column_list table_index_list table_index_name
check start checksum
- field_list field_list_item field_spec kill column_def key_def
+ field_list field_list_item kill key_def constraint_def
keycache_list keycache_list_or_parts assign_to_keycache
assign_to_keycache_parts
preload_list preload_list_or_parts preload_keys preload_keys_parts
@@ -1907,13 +1954,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
opt_limit_clause delete_limit_clause fields opt_values values
procedure_list procedure_list2 procedure_item
field_def handler opt_generated_always
- opt_precision opt_ignore opt_column opt_restrict
- grant revoke set lock unlock string_list field_options field_option
- field_opt_list opt_binary table_lock_list table_lock
+ opt_ignore opt_column opt_restrict
+ grant revoke set lock unlock string_list field_options
+ opt_binary table_lock_list table_lock
ref_list opt_match_clause opt_on_update_delete use
opt_delete_options opt_delete_option varchar nchar nvarchar
opt_outer table_list table_name table_alias_ref_list table_alias_ref
- opt_place
opt_attribute opt_attribute_list attribute column_list column_list_id
opt_column_list grant_privileges grant_ident grant_list grant_option
object_privilege object_privilege_list user_list user_and_role_list
@@ -1926,9 +1972,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
handler_rkey_function handler_read_or_scan
single_multi table_wild_list table_wild_one opt_wild
union_clause union_list
- precision subselect_start opt_and charset
+ subselect_start opt_and charset
subselect_end select_var_list select_var_list_init help
- field_length opt_field_length
opt_extended_describe shutdown
opt_format_json
prepare prepare_src execute deallocate
@@ -1951,12 +1996,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
definer_opt no_definer definer get_diagnostics
parse_vcol_expr vcol_opt_specifier vcol_opt_attribute
vcol_opt_attribute_list vcol_attribute
+ opt_serial_attribute opt_serial_attribute_list serial_attribute
explainable_command
opt_delete_gtid_domain
END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
%type <NONE> sp_proc_stmt_statement sp_proc_stmt_return
+ sp_proc_stmt_in_returns_clause
%type <NONE> sp_proc_stmt_compound_ok
%type <NONE> sp_proc_stmt_if
%type <NONE> sp_labeled_control sp_unlabeled_control
@@ -1982,6 +2029,9 @@ END_OF_INPUT
%type <NONE> signal_stmt resignal_stmt
%type <diag_condition_item_name> signal_condition_information_item_name
+%type <trg_execution_order> trigger_follows_precedes_clause;
+%type <trigger_action_order_type> trigger_action_order;
+
%type <diag_area> which_area;
%type <diag_info> diagnostics_information;
%type <stmt_info_item> statement_information_item;
@@ -1991,12 +2041,27 @@ END_OF_INPUT
%type <cond_info_item_name> condition_information_item_name;
%type <cond_info_list> condition_information;
+%type <NONE> opt_window_clause window_def_list window_def window_spec
+%type <lex_str_ptr> window_name
+%type <NONE> opt_window_ref opt_window_frame_clause
+%type <frame_units> window_frame_units;
+%type <NONE> window_frame_extent;
+%type <frame_exclusion> opt_window_frame_exclusion;
+%type <window_frame_bound> window_frame_start window_frame_bound;
+
+
%type <NONE>
'-' '+' '*' '/' '%' '(' ')'
',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM
THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM DELETE_SYM
ROLE_SYM
+%type <with_clause> opt_with_clause with_clause
+
+%type <lex_str_ptr> query_name
+
+%type <lex_str_list> opt_with_column_list
+
%%
@@ -2152,23 +2217,20 @@ prepare:
PREPARE_SYM ident FROM prepare_src
{
LEX *lex= thd->lex;
+ if (lex->table_or_sp_used())
+ my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
+ "PREPARE..FROM"));
lex->sql_command= SQLCOM_PREPARE;
lex->prepared_stmt_name= $2;
}
;
prepare_src:
- TEXT_STRING_sys
- {
- LEX *lex= thd->lex;
- lex->prepared_stmt_code= $1;
- lex->prepared_stmt_code_is_varref= FALSE;
- }
- | '@' ident_or_text
+ { Lex->expr_allows_subselect= false; }
+ expr
{
- LEX *lex= thd->lex;
- lex->prepared_stmt_code= $2;
- lex->prepared_stmt_code_is_varref= TRUE;
+ Lex->prepared_stmt_code= $2;
+ Lex->expr_allows_subselect= true;
}
;
@@ -2181,11 +2243,27 @@ execute:
}
execute_using
{}
+ | EXECUTE_SYM IMMEDIATE_SYM prepare_src
+ {
+ if (Lex->table_or_sp_used())
+ my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
+ "EXECUTE IMMEDIATE"));
+ Lex->sql_command= SQLCOM_EXECUTE_IMMEDIATE;
+ }
+ execute_using
+ {}
;
execute_using:
/* nothing */
- | USING execute_var_list
+ | USING { Lex->expr_allows_subselect= false; }
+ execute_var_list
+ {
+ if (Lex->table_or_sp_used())
+ my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
+ "EXECUTE..USING"));
+ Lex->expr_allows_subselect= true;
+ }
;
execute_var_list:
@@ -2194,12 +2272,9 @@ execute_var_list:
;
execute_var_ident:
- '@' ident_or_text
+ expr_or_default
{
- LEX *lex=Lex;
- LEX_STRING *lexstr= (LEX_STRING*)thd->memdup(&$2, sizeof(LEX_STRING));
- if (!lexstr || lex->prepared_stmt_params.push_back(lexstr,
- thd->mem_root))
+ if (Lex->prepared_stmt_params.push_back($1, thd->mem_root))
MYSQL_YYABORT;
}
;
@@ -2257,6 +2332,16 @@ master_def:
{
Lex->mi.connect_retry = $3;
}
+ | MASTER_DELAY_SYM '=' ulong_num
+ {
+ if ($3 > MASTER_DELAY_MAX)
+ {
+ my_error(ER_MASTER_DELAY_VALUE_OUT_OF_RANGE, MYF(0),
+ (uint) $3, (uint) MASTER_DELAY_MAX);
+ }
+ else
+ Lex->mi.sql_delay = $3;
+ }
| MASTER_SSL_SYM '=' ulong_num
{
Lex->mi.ssl= $3 ?
@@ -2389,9 +2474,9 @@ master_file_def:
If the user specified a value < BIN_LOG_HEADER_SIZE, adjust it
instead of causing subsequent errors.
We need to do it in this file, because only there we know that
- MASTER_LOG_POS has been explicitely specified. On the contrary
+ MASTER_LOG_POS has been explicitly specified. On the contrary
in change_master() (sql_repl.cc) we cannot distinguish between 0
- (MASTER_LOG_POS explicitely specified as 0) and 0 (unspecified),
+ (MASTER_LOG_POS explicitly specified as 0) and 0 (unspecified),
whereas we want to distinguish (specified 0 means "read the binlog
from 0" (4 in fact), unspecified means "don't change the position
(keep the preceding value)").
@@ -2540,6 +2625,7 @@ create:
}
view_or_trigger_or_sp_or_event { }
| create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list
+ opt_require_clause opt_resource_options
{
if (Lex->set_command_with_check(SQLCOM_CREATE_USER, $1 | $3))
MYSQL_YYABORT;
@@ -2590,6 +2676,7 @@ server_option:
{
MYSQL_YYABORT_UNLESS(Lex->server_options.host.str == 0);
Lex->server_options.host= $2;
+ my_casedn_str(system_charset_info, Lex->server_options.host.str);
}
| DATABASE TEXT_STRING_sys
{
@@ -2928,9 +3015,7 @@ sp_param_name_and_type:
LEX *lex= Lex;
sp_variable *spvar= $<spvar>2;
- spvar->type= $3;
- if (lex->sphead->fill_field_definition(thd, lex, $3,
- lex->last_field))
+ if (lex->sphead->fill_field_definition(thd, lex, lex->last_field))
{
MYSQL_YYABORT;
}
@@ -3017,7 +3102,6 @@ sp_decl:
LEX *lex= Lex;
sp_pcontext *pctx= lex->spcont;
uint num_vars= pctx->context_var_count();
- enum enum_field_types var_type= $4;
Item *dflt_value_item= $5;
if (!dflt_value_item)
@@ -3040,11 +3124,10 @@ sp_decl:
if (!last)
spvar->field_def= *lex->last_field;
- spvar->type= var_type;
spvar->default_value= dflt_value_item;
spvar->field_def.field_name= spvar->name.str;
- if (lex->sphead->fill_field_definition(thd, lex, var_type,
+ if (lex->sphead->fill_field_definition(thd, lex,
&spvar->field_def))
{
MYSQL_YYABORT;
@@ -3054,10 +3137,10 @@ sp_decl:
/* The last instruction is responsible for freeing LEX. */
- sp_instr_set *is= new (thd->mem_root)
- sp_instr_set(lex->sphead->instructions(),
+ sp_instr_set *is= new (lex->thd->mem_root)
+ sp_instr_set(lex->sphead->instructions(),
pctx, var_idx, dflt_value_item,
- var_type, lex, last);
+ $4.field_type(), lex, last);
if (is == NULL || lex->sphead->add_instr(is))
MYSQL_YYABORT;
}
@@ -3101,10 +3184,10 @@ sp_decl:
/* For continue handlers, mark end of handler scope. */
if ($2 == sp_handler::CONTINUE &&
- sp->push_backpatch(i, ctx->last_label()))
+ sp->push_backpatch(thd, i, ctx->last_label()))
MYSQL_YYABORT;
- if (sp->push_backpatch(i, ctx->push_label(thd, empty_lex_str, 0)))
+ if (sp->push_backpatch(thd, i, ctx->push_label(thd, empty_lex_str, 0)))
MYSQL_YYABORT;
}
sp_hcond_list sp_proc_stmt
@@ -3129,7 +3212,7 @@ sp_decl:
sp_instr_hreturn(sp->instructions(), ctx);
if (i == NULL ||
sp->add_instr(i) ||
- sp->push_backpatch(i, lex->spcont->last_label())) /* Block end */
+ sp->push_backpatch(thd, i, lex->spcont->last_label())) /* Block end */
MYSQL_YYABORT;
}
lex->sphead->backpatch(hlab);
@@ -3594,18 +3677,31 @@ sp_opt_default:
| DEFAULT expr { $$ = $2; }
;
-sp_proc_stmt:
- sp_proc_stmt_statement
- | sp_proc_stmt_return
+/*
+ ps_proc_stmt_in_returns_clause is a statement that is allowed
+ in the RETURNS clause of a stored function definition directly,
+ without the BEGIN..END block.
+ It should not include any syntax structures starting with '(', to avoid
+ shift/reduce conflicts with the rule "field_type" and its sub-rules
+ that scan an optional length, like CHAR(1) or YEAR(4).
+ See MDEV-9166.
+*/
+sp_proc_stmt_in_returns_clause:
+ sp_proc_stmt_return
| sp_labeled_block
| sp_unlabeled_block
| sp_labeled_control
+ | sp_proc_stmt_compound_ok
+ ;
+
+sp_proc_stmt:
+ sp_proc_stmt_in_returns_clause
+ | sp_proc_stmt_statement
| sp_proc_stmt_leave
| sp_proc_stmt_iterate
| sp_proc_stmt_open
| sp_proc_stmt_fetch
| sp_proc_stmt_close
- | sp_proc_stmt_compound_ok
;
sp_proc_stmt_compound_ok:
@@ -3748,7 +3844,7 @@ sp_proc_stmt_leave:
i= new (thd->mem_root) sp_instr_jump(ip, ctx);
if (i == NULL)
MYSQL_YYABORT;
- sp->push_backpatch(i, lab); /* Jumping forward */
+ sp->push_backpatch(thd, i, lab); /* Jumping forward */
sp->add_instr(i);
}
;
@@ -3898,7 +3994,7 @@ sp_if:
sp_instr_jump_if_not *i= new (thd->mem_root)
sp_instr_jump_if_not(ip, ctx, $2, lex);
if (i == NULL ||
- sp->push_backpatch(i, ctx->push_label(thd, empty_lex_str, 0)) ||
+ sp->push_backpatch(thd, i, ctx->push_label(thd, empty_lex_str, 0)) ||
sp->add_cont_backpatch(i) ||
sp->add_instr(i))
MYSQL_YYABORT;
@@ -3915,7 +4011,7 @@ sp_if:
sp->add_instr(i))
MYSQL_YYABORT;
sp->backpatch(ctx->pop_label());
- sp->push_backpatch(i, ctx->push_label(thd, empty_lex_str, 0));
+ sp->push_backpatch(thd, i, ctx->push_label(thd, empty_lex_str, 0));
}
sp_elseifs
{
@@ -4200,7 +4296,7 @@ while_body:
sp_instr_jump_if_not(ip, lex->spcont, $1, lex);
if (i == NULL ||
/* Jumping forward */
- sp->push_backpatch(i, lex->spcont->last_label()) ||
+ sp->push_backpatch(thd, i, lex->spcont->last_label()) ||
sp->new_cont_backpatch(i) ||
sp->add_instr(i))
MYSQL_YYABORT;
@@ -4712,7 +4808,11 @@ create_body:
conflict that prevents the rule above from parsing a syntax like
CREATE TABLE t1 (SELECT 1);
*/
- | '(' create_select ')' { Select->set_braces(1);} union_opt {}
+ | '(' create_select_query_specification ')'
+ | '(' create_select_query_specification ')'
+ { Select->set_braces(1);} union_list {}
+ | '(' create_select_query_specification ')'
+ { Select->set_braces(1);} union_order_or_limit {}
| create_like
{
@@ -4733,12 +4833,27 @@ create_like:
opt_create_select:
/* empty */ {}
- | opt_duplicate opt_as create_select
- { Select->set_braces(0);}
- union_clause {}
- | opt_duplicate opt_as '(' create_select ')'
- { Select->set_braces(1);}
- union_opt {}
+ | opt_duplicate opt_as create_select_query_expression
+ ;
+
+create_select_query_expression:
+ opt_with_clause SELECT_SYM create_select_part2 opt_table_expression
+ create_select_part4
+ {
+ Select->set_braces(0);
+ Select->set_with_clause($1);
+ }
+ union_clause
+ | opt_with_clause SELECT_SYM create_select_part2
+ create_select_part3_union_not_ready create_select_part4
+ {
+ Select->set_with_clause($1);
+ }
+ | '(' create_select_query_specification ')'
+ | '(' create_select_query_specification ')'
+ { Select->set_braces(1);} union_list {}
+ | '(' create_select_query_specification ')'
+ { Select->set_braces(1);} union_order_or_limit {}
;
opt_create_partitioning:
@@ -4757,7 +4872,7 @@ opt_create_partitioning:
/*
This part of the parser is about handling of the partition information.
- It's first version was written by Mikael Ronström with lots of answers to
+ It's first version was written by Mikael Ronstrm with lots of answers to
questions provided by Antony Curtis.
The partition grammar can be called from three places.
@@ -4834,7 +4949,9 @@ partition_entry:
;
partition:
- BY part_type_def opt_num_parts opt_sub_part part_defs
+ BY
+ { Lex->safe_to_cache_query= 1; }
+ part_type_def opt_num_parts opt_sub_part part_defs
;
part_type_def:
@@ -4923,7 +5040,7 @@ part_func:
'(' remember_name part_func_expr remember_end ')'
{
partition_info *part_info= Lex->part_info;
- if (part_info->set_part_expr($2+1, $3, $4, FALSE))
+ if (part_info->set_part_expr(thd, $2 + 1, $3, $4, FALSE))
{ MYSQL_YYABORT; }
part_info->num_columns= 1;
part_info->column_list= FALSE;
@@ -4933,7 +5050,7 @@ part_func:
sub_part_func:
'(' remember_name part_func_expr remember_end ')'
{
- if (Lex->part_info->set_part_expr($2+1, $3, $4, TRUE))
+ if (Lex->part_info->set_part_expr(thd, $2 + 1, $3, $4, TRUE))
{ MYSQL_YYABORT; }
}
;
@@ -4991,11 +5108,7 @@ sub_part_field_item:
part_func_expr:
bit_expr
{
- LEX *lex= Lex;
- bool not_corr_func;
- not_corr_func= !lex->safe_to_cache_query;
- lex->safe_to_cache_query= 1;
- if (not_corr_func)
+ if (!Lex->safe_to_cache_query)
{
my_parse_error(thd, ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR);
MYSQL_YYABORT;
@@ -5131,6 +5244,27 @@ opt_part_values:
part_info->part_type= LIST_PARTITION;
}
part_values_in {}
+ | DEFAULT
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ if (! lex->is_partition_management())
+ {
+ if (part_info->part_type != LIST_PARTITION)
+ my_yyabort_error((ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "DEFAULT"));
+ }
+ else
+ part_info->part_type= LIST_PARTITION;
+ if (part_info->init_column_part(thd))
+ {
+ MYSQL_YYABORT;
+ }
+ if (part_info->add_max_value(thd))
+ {
+ MYSQL_YYABORT;
+ }
+ }
;
part_func_max:
@@ -5415,8 +5549,15 @@ opt_part_option:
End of partition parser part
*/
-create_select:
- SELECT_SYM
+create_select_query_specification:
+ opt_with_clause SELECT_SYM create_select_part2 create_select_part3
+ create_select_part4
+ {
+ Select->set_with_clause($1);
+ }
+ ;
+
+create_select_part2:
{
LEX *lex=Lex;
if (lex->sql_command == SQLCOM_INSERT)
@@ -5435,7 +5576,20 @@ create_select:
{
Select->parsing_place= NO_MATTER;
}
- table_expression
+ ;
+
+create_select_part3:
+ opt_table_expression
+ | create_select_part3_union_not_ready
+ ;
+
+create_select_part3_union_not_ready:
+ table_expression order_or_limit
+ | order_or_limit
+ ;
+
+create_select_part4:
+ opt_select_lock_type
{
/*
The following work only with the local list, the global list
@@ -5849,13 +6003,16 @@ field_list:
;
field_list_item:
- column_def
+ column_def { }
| key_def
+ | constraint_def
;
column_def:
- field_spec opt_check_constraint
+ field_spec
+ { $$= $1; }
| field_spec references
+ { $$= $1; }
;
key_def:
@@ -5939,16 +6096,36 @@ key_def:
/* Only used for ALTER TABLE. Ignored otherwise. */
lex->alter_info.flags|= Alter_info::ADD_FOREIGN_KEY;
}
- | opt_constraint check_constraint { }
- ;
+ ;
+
+constraint_def:
+ opt_constraint check_constraint
+ {
+ Lex->add_constraint(&$1, $2, FALSE);
+ }
+ ;
opt_check_constraint:
- /* empty */
- | check_constraint
+ /* empty */ { $$= (Virtual_column_info*) 0; }
+ | check_constraint { $$= $1;}
;
check_constraint:
CHECK_SYM '(' expr ')'
+ {
+ Virtual_column_info *v=
+ add_virtual_expression(thd, $3);
+ if (!v)
+ {
+ MYSQL_YYABORT;
+ }
+ $$= v;
+ }
+ ;
+
+opt_constraint_no_id:
+ /* Empty */ {}
+ | CONSTRAINT {}
;
opt_constraint:
@@ -5974,29 +6151,59 @@ field_spec:
MYSQL_YYABORT;
lex->init_last_field(f, $1.str, NULL);
+ $<create_field>$= f;
}
- field_type { Lex->set_last_field_type($3); }
- field_def
+ field_type_or_serial opt_check_constraint
{
LEX *lex=Lex;
- Create_field *f= lex->last_field;
+ $$= $<create_field>2;
+
+ $$->check_constraint= $4;
- if (f->check(thd))
+ if ($$->check(thd))
MYSQL_YYABORT;
- lex->alter_info.create_list.push_back(f, thd->mem_root);
+ lex->alter_info.create_list.push_back($$, thd->mem_root);
- if (f->flags & PRI_KEY_FLAG)
+ $$->create_if_not_exists= Lex->check_exists;
+ if ($$->flags & PRI_KEY_FLAG)
add_key_to_list(lex, &$1, Key::PRIMARY, Lex->check_exists);
- else if (f->flags & (UNIQUE_FLAG | UNIQUE_KEY_FLAG))
+ else if ($$->flags & UNIQUE_KEY_FLAG)
add_key_to_list(lex, &$1, Key::UNIQUE, Lex->check_exists);
}
;
+field_type_or_serial:
+ field_type { Lex->set_last_field_type($1); } field_def
+ | SERIAL_SYM
+ {
+ Lex_field_type_st type;
+ type.set(MYSQL_TYPE_LONGLONG);
+ Lex->set_last_field_type(type);
+ Lex->last_field->flags|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG
+ | UNSIGNED_FLAG | UNIQUE_KEY_FLAG;
+ }
+ opt_serial_attribute
+ ;
+
+opt_serial_attribute:
+ /* empty */ {}
+ | opt_serial_attribute_list {}
+ ;
+
+opt_serial_attribute_list:
+ opt_serial_attribute_list serial_attribute {}
+ | serial_attribute
+ ;
+
+
field_def:
opt_attribute
- | opt_generated_always AS
- '(' virtual_column_func ')'
+ | opt_generated_always AS virtual_column_func
+ {
+ Lex->last_field->vcol_info= $3;
+ Lex->last_field->flags&= ~NOT_NULL_FLAG; // undo automatic NOT NULL for timestamps
+ }
vcol_opt_specifier vcol_opt_attribute
;
@@ -6018,6 +6225,10 @@ vcol_opt_specifier:
{
Lex->last_field->vcol_info->set_stored_in_db_flag(TRUE);
}
+ | STORED_SYM
+ {
+ Lex->last_field->vcol_info->set_stored_in_db_flag(TRUE);
+ }
;
vcol_opt_attribute:
@@ -6034,136 +6245,143 @@ vcol_attribute:
UNIQUE_SYM
{
LEX *lex=Lex;
- lex->last_field->flags|= UNIQUE_FLAG;
+ lex->last_field->flags|= UNIQUE_KEY_FLAG;
lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| UNIQUE_SYM KEY_SYM
{
LEX *lex=Lex;
- lex->last_field->flags|= UNIQUE_FLAG;
+ lex->last_field->flags|= UNIQUE_KEY_FLAG;
lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| COMMENT_SYM TEXT_STRING_sys { Lex->last_field->comment= $2; }
;
parse_vcol_expr:
- PARSE_VCOL_EXPR_SYM '(' virtual_column_func ')'
+ PARSE_VCOL_EXPR_SYM
{
/*
"PARSE_VCOL_EXPR" can only be used by the SQL server
when reading a '*.frm' file.
Prevent the end user from invoking this command.
*/
- if (!Lex->parse_vcol_expr)
- my_yyabort_error((ER_SYNTAX_ERROR, MYF(0)));
+ MYSQL_YYABORT_UNLESS(Lex->parse_vcol_expr);
+ }
+ expr
+ {
+ Virtual_column_info *v= add_virtual_expression(thd, $3);
+ if (!v)
+ MYSQL_YYABORT;
+ Lex->last_field->vcol_info= v;
}
;
+parenthesized_expr:
+ subselect
+ {
+ $$= new (thd->mem_root) Item_singlerow_subselect(thd, $1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | expr
+ | expr ',' expr_list
+ {
+ $3->push_front($1, thd->mem_root);
+ $$= new (thd->mem_root) Item_row(thd, *$3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
virtual_column_func:
- remember_name expr remember_end
+ '(' parenthesized_expr ')'
{
- Virtual_column_info *v= new (thd->mem_root) Virtual_column_info();
+ Virtual_column_info *v=
+ add_virtual_expression(thd, $2);
if (!v)
{
- mem_alloc_error(sizeof(Virtual_column_info));
MYSQL_YYABORT;
}
- uint expr_len= (uint)($3 - $1) - 1;
- v->expr_str.str= (char* ) thd->memdup($1 + 1, expr_len);
- v->expr_str.length= expr_len;
- v->expr_item= $2;
- Lex->last_field->vcol_info= v;
+ $$= v;
+ }
+ ;
+
+expr_or_literal: column_default_non_parenthesized_expr | signed_literal ;
+
+column_default_expr:
+ virtual_column_func
+ | expr_or_literal
+ {
+ if (!($$= add_virtual_expression(thd, $1)))
+ MYSQL_YYABORT;
}
;
field_type:
- int_type opt_field_length field_options { $$=$1; }
- | real_type opt_precision field_options { $$=$1; }
+ int_type opt_field_length field_options { $$.set($1, $2); }
+ | real_type opt_precision field_options { $$.set($1, $2); }
| FLOAT_SYM float_options field_options
{
- $$=MYSQL_TYPE_FLOAT;
- if (Lex->length && !Lex->dec)
+ $$.set(MYSQL_TYPE_FLOAT, $2);
+ if ($2.length() && !$2.dec())
{
int err;
- ulonglong tmp_length= my_strtoll10(Lex->length, NULL, &err);
+ ulonglong tmp_length= my_strtoll10($2.length(), NULL, &err);
if (err || tmp_length > PRECISION_FOR_DOUBLE)
my_yyabort_error((ER_WRONG_FIELD_SPEC, MYF(0),
Lex->last_field->field_name));
if (tmp_length > PRECISION_FOR_FLOAT)
- $$= MYSQL_TYPE_DOUBLE;
- Lex->length= 0;
+ $$.set(MYSQL_TYPE_DOUBLE);
+ else
+ $$.set(MYSQL_TYPE_FLOAT);
}
}
- | BIT_SYM
+ | BIT_SYM opt_field_length_default_1
{
- Lex->length= (char*) "1";
- $$=MYSQL_TYPE_BIT;
- }
- | BIT_SYM field_length
- {
- $$=MYSQL_TYPE_BIT;
+ $$.set(MYSQL_TYPE_BIT, $2);
}
| BOOL_SYM
{
- Lex->length= (char*) "1";
- $$=MYSQL_TYPE_TINY;
+ $$.set(MYSQL_TYPE_TINY, "1");
}
| BOOLEAN_SYM
{
- Lex->length= (char*) "1";
- $$=MYSQL_TYPE_TINY;
- }
- | char field_length opt_binary
- {
- $$=MYSQL_TYPE_STRING;
+ $$.set(MYSQL_TYPE_TINY, "1");
}
- | char opt_binary
+ | char opt_field_length_default_1 opt_binary
{
- Lex->length= (char*) "1";
- $$=MYSQL_TYPE_STRING;
+ $$.set(MYSQL_TYPE_STRING, $2);
}
- | nchar field_length opt_bin_mod
+ | nchar opt_field_length_default_1 opt_bin_mod
{
- $$=MYSQL_TYPE_STRING;
+ $$.set(MYSQL_TYPE_STRING, $2);
bincmp_collation(national_charset_info, $3);
}
- | nchar opt_bin_mod
- {
- Lex->length= (char*) "1";
- $$=MYSQL_TYPE_STRING;
- bincmp_collation(national_charset_info, $2);
- }
- | BINARY field_length
+ | BINARY opt_field_length_default_1
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_STRING;
- }
- | BINARY
- {
- Lex->length= (char*) "1";
- Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_STRING;
+ $$.set(MYSQL_TYPE_STRING, $2);
}
| varchar field_length opt_binary
{
- $$= MYSQL_TYPE_VARCHAR;
+ $$.set(MYSQL_TYPE_VARCHAR, $2);
}
| nvarchar field_length opt_bin_mod
{
- $$= MYSQL_TYPE_VARCHAR;
+ $$.set(MYSQL_TYPE_VARCHAR, $2);
bincmp_collation(national_charset_info, $3);
}
| VARBINARY field_length
{
Lex->charset=&my_charset_bin;
- $$= MYSQL_TYPE_VARCHAR;
+ $$.set(MYSQL_TYPE_VARCHAR, $2);
}
| YEAR_SYM opt_field_length field_options
{
- if (Lex->length)
+ if ($2)
{
errno= 0;
- ulong length= strtoul(Lex->length, NULL, 10);
+ ulong length= strtoul($2, NULL, 10);
if (errno == 0 && length <= MAX_FIELD_BLOBLENGTH && length != 4)
{
char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1];
@@ -6174,18 +6392,18 @@ field_type:
buff, "YEAR(4)");
}
}
- $$=MYSQL_TYPE_YEAR;
+ $$.set(MYSQL_TYPE_YEAR, $2);
}
| DATE_SYM
- { $$=MYSQL_TYPE_DATE; }
+ { $$.set(MYSQL_TYPE_DATE); }
| TIME_SYM opt_field_length
- { $$= opt_mysql56_temporal_format ?
- MYSQL_TYPE_TIME2 : MYSQL_TYPE_TIME; }
+ { $$.set(opt_mysql56_temporal_format ?
+ MYSQL_TYPE_TIME2 : MYSQL_TYPE_TIME, $2); }
| TIMESTAMP opt_field_length
{
if (thd->variables.sql_mode & MODE_MAXDB)
- $$= opt_mysql56_temporal_format ?
- MYSQL_TYPE_DATETIME2 : MYSQL_TYPE_DATETIME;
+ $$.set(opt_mysql56_temporal_format ?
+ MYSQL_TYPE_DATETIME2 : MYSQL_TYPE_DATETIME, $2);
else
{
/*
@@ -6194,29 +6412,29 @@ field_type:
*/
if (!opt_explicit_defaults_for_timestamp)
Lex->last_field->flags|= NOT_NULL_FLAG;
- $$= opt_mysql56_temporal_format ? MYSQL_TYPE_TIMESTAMP2
- : MYSQL_TYPE_TIMESTAMP;
+ $$.set(opt_mysql56_temporal_format ? MYSQL_TYPE_TIMESTAMP2
+ : MYSQL_TYPE_TIMESTAMP, $2);
}
}
| DATETIME opt_field_length
- { $$= opt_mysql56_temporal_format ?
- MYSQL_TYPE_DATETIME2 : MYSQL_TYPE_DATETIME; }
+ { $$.set(opt_mysql56_temporal_format ?
+ MYSQL_TYPE_DATETIME2 : MYSQL_TYPE_DATETIME, $2); }
| TINYBLOB
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_TINY_BLOB;
+ $$.set(MYSQL_TYPE_TINY_BLOB);
}
| BLOB_SYM opt_field_length
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_BLOB;
+ $$.set(MYSQL_TYPE_BLOB, $2);
}
| spatial_type float_options srid_option
{
#ifdef HAVE_SPATIAL
Lex->charset=&my_charset_bin;
Lex->last_field->geom_type= $1;
- $$=MYSQL_TYPE_GEOMETRY;
+ $$.set(MYSQL_TYPE_GEOMETRY, $2);
#else
my_yyabort_error((ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name,
sym_group_geom.needed_define));
@@ -6225,57 +6443,51 @@ field_type:
| MEDIUMBLOB
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_MEDIUM_BLOB;
+ $$.set(MYSQL_TYPE_MEDIUM_BLOB);
}
| LONGBLOB
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_LONG_BLOB;
+ $$.set(MYSQL_TYPE_LONG_BLOB);
}
| LONG_SYM VARBINARY
{
Lex->charset=&my_charset_bin;
- $$=MYSQL_TYPE_MEDIUM_BLOB;
+ $$.set(MYSQL_TYPE_MEDIUM_BLOB);
}
| LONG_SYM varchar opt_binary
- { $$=MYSQL_TYPE_MEDIUM_BLOB; }
+ { $$.set(MYSQL_TYPE_MEDIUM_BLOB); }
| TINYTEXT opt_binary
- { $$=MYSQL_TYPE_TINY_BLOB; }
+ { $$.set(MYSQL_TYPE_TINY_BLOB); }
| TEXT_SYM opt_field_length opt_binary
- { $$=MYSQL_TYPE_BLOB; }
+ { $$.set(MYSQL_TYPE_BLOB, $2); }
| MEDIUMTEXT opt_binary
- { $$=MYSQL_TYPE_MEDIUM_BLOB; }
+ { $$.set(MYSQL_TYPE_MEDIUM_BLOB); }
| LONGTEXT opt_binary
- { $$=MYSQL_TYPE_LONG_BLOB; }
+ { $$.set(MYSQL_TYPE_LONG_BLOB); }
| DECIMAL_SYM float_options field_options
- { $$=MYSQL_TYPE_NEWDECIMAL;}
+ { $$.set(MYSQL_TYPE_NEWDECIMAL, $2);}
| NUMERIC_SYM float_options field_options
- { $$=MYSQL_TYPE_NEWDECIMAL;}
+ { $$.set(MYSQL_TYPE_NEWDECIMAL, $2);}
| FIXED_SYM float_options field_options
- { $$=MYSQL_TYPE_NEWDECIMAL;}
+ { $$.set(MYSQL_TYPE_NEWDECIMAL, $2);}
| ENUM '(' string_list ')' opt_binary
- { $$=MYSQL_TYPE_ENUM; }
+ { $$.set(MYSQL_TYPE_ENUM); }
| SET '(' string_list ')' opt_binary
- { $$=MYSQL_TYPE_SET; }
+ { $$.set(MYSQL_TYPE_SET); }
| LONG_SYM opt_binary
- { $$=MYSQL_TYPE_MEDIUM_BLOB; }
- | SERIAL_SYM
+ { $$.set(MYSQL_TYPE_MEDIUM_BLOB); }
+ | JSON_SYM
{
- $$=MYSQL_TYPE_LONGLONG;
- Lex->last_field->flags|= (AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG |
- UNIQUE_FLAG);
+ Lex->charset= &my_charset_utf8mb4_bin;
+ $$.set(MYSQL_TYPE_LONG_BLOB);
}
;
spatial_type:
GEOMETRY_SYM { $$= Field::GEOM_GEOMETRY; }
| GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; }
- | POINT_SYM
- {
- Lex->length= const_cast<char*>(STRINGIFY_ARG
- (MAX_LEN_GEOM_POINT_FIELD));
- $$= Field::GEOM_POINT;
- }
+ | POINT_SYM { $$= Field::GEOM_POINT; }
| MULTIPOINT { $$= Field::GEOM_MULTIPOINT; }
| LINESTRING { $$= Field::GEOM_LINESTRING; }
| MULTILINESTRING { $$= Field::GEOM_MULTILINESTRING; }
@@ -6336,53 +6548,41 @@ srid_option:
;
float_options:
- /* empty */
- { Lex->dec=Lex->length= (char*)0; }
- | field_length
- { Lex->dec= (char*)0; }
- | precision
- {}
+ /* empty */ { $$.set(0, 0); }
+ | field_length { $$.set($1, 0); }
+ | precision { $$= $1; }
;
precision:
- '(' NUM ',' NUM ')'
- {
- LEX *lex=Lex;
- lex->length=$2.str;
- lex->dec=$4.str;
- }
+ '(' NUM ',' NUM ')' { $$.set($2.str, $4.str); }
;
field_options:
/* empty */ {}
- | field_opt_list {}
- ;
-
-field_opt_list:
- field_opt_list field_option {}
- | field_option {}
- ;
-
-field_option:
- SIGNED_SYM {}
+ | SIGNED_SYM {}
| UNSIGNED { Lex->last_field->flags|= UNSIGNED_FLAG;}
| ZEROFILL { Lex->last_field->flags|= UNSIGNED_FLAG | ZEROFILL_FLAG; }
+ | UNSIGNED ZEROFILL { Lex->last_field->flags|= UNSIGNED_FLAG | ZEROFILL_FLAG; }
+ | ZEROFILL UNSIGNED { Lex->last_field->flags|= UNSIGNED_FLAG | ZEROFILL_FLAG; }
;
field_length:
- '(' LONG_NUM ')' { Lex->length= $2.str; }
- | '(' ULONGLONG_NUM ')' { Lex->length= $2.str; }
- | '(' DECIMAL_NUM ')' { Lex->length= $2.str; }
- | '(' NUM ')' { Lex->length= $2.str; };
+ '(' LONG_NUM ')' { $$= $2.str; }
+ | '(' ULONGLONG_NUM ')' { $$= $2.str; }
+ | '(' DECIMAL_NUM ')' { $$= $2.str; }
+ | '(' NUM ')' { $$= $2.str; };
opt_field_length:
- /* empty */ { Lex->length=(char*) 0; /* use default length */ }
- | field_length { }
- ;
+ /* empty */ { $$= (char*) 0; /* use default length */ }
+ | field_length { $$= $1; }
+
+opt_field_length_default_1:
+ /* empty */ { $$= (char*) "1"; }
+ | field_length { $$= $1; }
opt_precision:
- /* empty */ {}
- | precision {}
+ /* empty */ { $$.set(0, 0); }
+ | precision { $$= $1; }
;
opt_attribute:
@@ -6397,8 +6597,7 @@ opt_attribute_list:
attribute:
NULL_SYM { Lex->last_field->flags&= ~ NOT_NULL_FLAG; }
- | not NULL_SYM { Lex->last_field->flags|= NOT_NULL_FLAG; }
- | DEFAULT now_or_signed_literal { Lex->last_field->def= $2; }
+ | DEFAULT column_default_expr { Lex->last_field->default_value= $2; }
| ON UPDATE_SYM NOW_SYM opt_default_time_precision
{
Item *item= new (thd->mem_root) Item_func_now_local(thd, $4);
@@ -6410,28 +6609,9 @@ attribute:
| SERIAL_SYM DEFAULT VALUE_SYM
{
LEX *lex=Lex;
- lex->last_field->flags|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_FLAG;
- lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
- }
- | opt_primary KEY_SYM
- {
- LEX *lex=Lex;
- lex->last_field->flags|= PRI_KEY_FLAG | NOT_NULL_FLAG;
+ lex->last_field->flags|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_KEY_FLAG;
lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
- | UNIQUE_SYM
- {
- LEX *lex=Lex;
- lex->last_field->flags|= UNIQUE_FLAG;
- lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
- }
- | UNIQUE_SYM KEY_SYM
- {
- LEX *lex=Lex;
- lex->last_field->flags|= UNIQUE_KEY_FLAG;
- lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
- }
- | COMMENT_SYM TEXT_STRING_sys { Lex->last_field->comment= $2; }
| COLLATE_SYM collation_name
{
if (Lex->charset && !my_charset_same(Lex->charset,$2))
@@ -6439,6 +6619,18 @@ attribute:
$2->name,Lex->charset->csname));
Lex->last_field->charset= $2;
}
+ | serial_attribute
+ ;
+
+serial_attribute:
+ not NULL_SYM { Lex->last_field->flags|= NOT_NULL_FLAG; }
+ | opt_primary KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->last_field->flags|= PRI_KEY_FLAG | NOT_NULL_FLAG;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
+ }
+ | vcol_attribute
| IDENT_sys equal TEXT_STRING_sys
{
if ($3.length > ENGINE_OPTION_MAX_LENGTH)
@@ -6483,18 +6675,6 @@ type_with_opt_collate:
}
;
-
-now_or_signed_literal:
- NOW_SYM opt_default_time_precision
- {
- $$= new (thd->mem_root) Item_func_now_local(thd, $2);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | signed_literal
- { $$=$1; }
- ;
-
charset:
CHAR_SYM SET {}
| CHARSET {}
@@ -7115,6 +7295,13 @@ alter:
lex->sql_command= SQLCOM_ALTER_SERVER;
lex->server_options.reset($3);
} OPTIONS_SYM '(' server_options_list ')' { }
+ /* ALTER USER foo is allowed for MySQL compatibility. */
+ | ALTER opt_if_exists USER_SYM clear_privileges grant_list
+ opt_require_clause opt_resource_options
+ {
+ Lex->create_info.set($2);
+ Lex->sql_command= SQLCOM_ALTER_USER;
+ }
;
ev_alter_on_schedule_completion:
@@ -7172,7 +7359,7 @@ alter_commands:
| remove_partitioning
| partitioning
/*
- This part was added for release 5.1 by Mikael Ronström.
+ This part was added for release 5.1 by Mikael Ronstrm.
From here we insert a number of commands to manage the partitions of a
partitioned table such as adding partitions, dropping partitions,
reorganising partitions in various manners. In future releases the list
@@ -7402,6 +7589,7 @@ alter_list_item:
add_column column_def opt_place
{
Lex->create_last_non_select_table= Lex->last_table();
+ $2->after= $3;
}
| ADD key_def
{
@@ -7413,19 +7601,31 @@ alter_list_item:
Lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN |
Alter_info::ALTER_ADD_INDEX;
}
+ | ADD constraint_def
+ {
+ Lex->alter_info.flags|= Alter_info::ALTER_ADD_CHECK_CONSTRAINT;
+ }
+ | ADD CONSTRAINT IF_SYM not EXISTS field_ident check_constraint
+ {
+ Lex->alter_info.flags|= Alter_info::ALTER_ADD_CHECK_CONSTRAINT;
+ Lex->add_constraint(&$6, $7, TRUE);
+ }
| CHANGE opt_column opt_if_exists_table_element field_ident
field_spec opt_place
{
- Lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN;
+ Lex->alter_info.flags|= (Alter_info::ALTER_CHANGE_COLUMN |
+ Alter_info::ALTER_RENAME_COLUMN);
Lex->create_last_non_select_table= Lex->last_table();
- Lex->last_field->change= $4.str;
+ $5->change= $4.str;
+ $5->after= $6;
}
| MODIFY_SYM opt_column opt_if_exists_table_element
field_spec opt_place
{
Lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN;
Lex->create_last_non_select_table= Lex->last_table();
- Lex->last_field->change= Lex->last_field->field_name;
+ $4->change= $4->field_name;
+ $4->after= $5;
}
| DROP opt_column opt_if_exists_table_element field_ident opt_restrict
{
@@ -7437,6 +7637,17 @@ alter_list_item:
lex->alter_info.drop_list.push_back(ad, thd->mem_root);
lex->alter_info.flags|= Alter_info::ALTER_DROP_COLUMN;
}
+ | DROP CONSTRAINT opt_if_exists_table_element field_ident
+ {
+ LEX *lex=Lex;
+ Alter_drop *ad= (new (thd->mem_root)
+ Alter_drop(Alter_drop::CHECK_CONSTRAINT,
+ $4.str, $3));
+ if (ad == NULL)
+ MYSQL_YYABORT;
+ lex->alter_info.drop_list.push_back(ad, thd->mem_root);
+ lex->alter_info.flags|= Alter_info::ALTER_DROP_CHECK_CONSTRAINT;
+ }
| DROP FOREIGN KEY_SYM opt_if_exists_table_element field_ident
{
LEX *lex=Lex;
@@ -7447,7 +7658,7 @@ alter_list_item:
lex->alter_info.drop_list.push_back(ad, thd->mem_root);
lex->alter_info.flags|= Alter_info::DROP_FOREIGN_KEY;
}
- | DROP PRIMARY_SYM KEY_SYM
+ | DROP opt_constraint_no_id PRIMARY_SYM KEY_SYM
{
LEX *lex=Lex;
Alter_drop *ad= (new (thd->mem_root)
@@ -7480,7 +7691,7 @@ alter_list_item:
lex->alter_info.keys_onoff= Alter_info::ENABLE;
lex->alter_info.flags|= Alter_info::ALTER_KEYS_ONOFF;
}
- | ALTER opt_column field_ident SET DEFAULT signed_literal
+ | ALTER opt_column field_ident SET DEFAULT column_default_expr
{
LEX *lex=Lex;
Alter_column *ac= new (thd->mem_root) Alter_column($3.str,$6);
@@ -7493,7 +7704,7 @@ alter_list_item:
{
LEX *lex=Lex;
Alter_column *ac= (new (thd->mem_root)
- Alter_column($3.str, (Item*) 0));
+ Alter_column($3.str, (Virtual_column_info*) 0));
if (ac == NULL)
MYSQL_YYABORT;
lex->alter_info.alter_list.push_back(ac, thd->mem_root);
@@ -7625,15 +7836,15 @@ opt_restrict:
;
opt_place:
- /* empty */ {}
+ /* empty */ { $$= NULL; }
| AFTER_SYM ident
{
- store_position_for_column($2.str);
+ $$= $2.str;
Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER;
}
| FIRST_SYM
{
- store_position_for_column(first_keyword);
+ $$= first_keyword;
Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER;
}
;
@@ -7660,6 +7871,7 @@ slave:
LEX *lex=Lex;
lex->sql_command = SQLCOM_SLAVE_ALL_START;
lex->type = 0;
+ /* If you change this code don't forget to update STOP SLAVE too */
}
{}
| STOP_SYM SLAVE optional_connection_name slave_thread_opts
@@ -7775,7 +7987,7 @@ checksum:
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_CHECKSUM;
- /* Will be overriden during execution. */
+ /* Will be overridden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
table_list opt_checksum_type
@@ -7801,7 +8013,7 @@ repair:
lex->no_write_to_binlog= $2;
lex->check_opt.init();
lex->alter_info.reset();
- /* Will be overriden during execution. */
+ /* Will be overridden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
repair_table_or_view
@@ -7843,7 +8055,7 @@ analyze:
lex->no_write_to_binlog= $2;
lex->check_opt.init();
lex->alter_info.reset();
- /* Will be overriden during execution. */
+ /* Will be overridden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
analyze_table_list
@@ -7974,7 +8186,7 @@ check: CHECK_SYM
lex->sql_command = SQLCOM_CHECK;
lex->check_opt.init();
lex->alter_info.reset();
- /* Will be overriden during execution. */
+ /* Will be overridden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
check_view_or_table
@@ -8021,7 +8233,7 @@ optimize:
lex->no_write_to_binlog= $2;
lex->check_opt.init();
lex->alter_info.reset();
- /* Will be overriden during execution. */
+ /* Will be overridden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
table_list
@@ -8209,17 +8421,26 @@ opt_ignore_leaves:
select:
- select_init
+ opt_with_clause select_init
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SELECT;
+ lex->current_select->set_with_clause($1);
}
;
-/* Need select_init2 for subselects. */
select_init:
- SELECT_SYM select_init2
- | '(' select_paren ')' union_opt
+ SELECT_SYM select_options_and_item_list select_init3
+ | '(' select_paren ')'
+ | '(' select_paren ')' union_list
+ | '(' select_paren ')' union_order_or_limit
+ ;
+
+union_list_part2:
+ SELECT_SYM select_options_and_item_list select_init3_union_query_term
+ | '(' select_paren_union_query_term ')'
+ | '(' select_paren_union_query_term ')' union_list
+ | '(' select_paren_union_query_term ')' union_order_or_limit
;
select_paren:
@@ -8230,69 +8451,151 @@ select_paren:
*/
Lex->current_select->set_braces(true);
}
- SELECT_SYM select_part2
+ SELECT_SYM select_options_and_item_list select_part3
+ opt_select_lock_type
{
- if (setup_select_in_parentheses(Lex))
- MYSQL_YYABORT;
+ DBUG_ASSERT(Lex->current_select->braces);
}
| '(' select_paren ')'
;
+select_paren_union_query_term:
+ {
+ /*
+ In order to correctly parse UNION's global ORDER BY we need to
+ set braces before parsing the clause.
+ */
+ Lex->current_select->set_braces(true);
+ }
+ SELECT_SYM select_options_and_item_list select_part3_union_query_term
+ opt_select_lock_type
+ {
+ DBUG_ASSERT(Lex->current_select->braces);
+ }
+ | '(' select_paren_union_query_term ')'
+ ;
+
+select_paren_view:
+ {
+ /*
+ In order to correctly parse UNION's global ORDER BY we need to
+ set braces before parsing the clause.
+ */
+ Lex->current_select->set_braces(true);
+ }
+ SELECT_SYM select_options_and_item_list select_part3_view
+ opt_select_lock_type
+ {
+ DBUG_ASSERT(Lex->current_select->braces);
+ }
+ | '(' select_paren_view ')'
+ ;
+
/* The equivalent of select_paren for nested queries. */
select_paren_derived:
{
Lex->current_select->set_braces(true);
}
SELECT_SYM select_part2_derived
- table_expression
+ opt_table_expression
+ opt_order_clause
+ opt_limit_clause
+ opt_select_lock_type
{
- if (setup_select_in_parentheses(Lex))
- MYSQL_YYABORT;
+ DBUG_ASSERT(Lex->current_select->braces);
+ $$= Lex->current_select->master_unit()->first_select();
}
- | '(' select_paren_derived ')'
+ | '(' select_paren_derived ')' { $$= $2; }
;
-select_init2:
- select_part2
+select_init3:
+ opt_table_expression
+ opt_select_lock_type
{
- LEX *lex= Lex;
/* Parentheses carry no meaning here */
- lex->current_select->set_braces(false);
+ Lex->current_select->set_braces(false);
}
union_clause
+ | select_part3_union_not_ready
+ opt_select_lock_type
+ {
+ /* Parentheses carry no meaning here */
+ Lex->current_select->set_braces(false);
+ }
;
-/*
- Theoretically we can merge all 3 right hand sides of the select_part2
- rule into one, however such a transformation adds one shift/reduce
- conflict more.
-*/
-select_part2:
- select_options_and_item_list
- opt_order_clause
- opt_limit_clause
+
+select_init3_union_query_term:
+ opt_table_expression
opt_select_lock_type
- | select_options_and_item_list into opt_select_lock_type
- | select_options_and_item_list
- opt_into
- from_clause
- opt_where_clause
- opt_group_clause
- opt_having_clause
- opt_order_clause
- opt_limit_clause
- opt_procedure_clause
- opt_into
+ {
+ /* Parentheses carry no meaning here */
+ Lex->current_select->set_braces(false);
+ }
+ union_clause
+ | select_part3_union_not_ready_noproc
opt_select_lock_type
{
- if ($2 && $10) /* double "INTO" clause */
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "INTO", "INTO"));
+ /* Parentheses carry no meaning here */
+ Lex->current_select->set_braces(false);
+ }
+ ;
- if ($9 && ($2 || $10)) /* "INTO" with "PROCEDURE ANALYSE" */
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "PROCEDURE", "INTO"));
+
+select_init3_view:
+ opt_table_expression opt_select_lock_type
+ {
+ Lex->current_select->set_braces(false);
+ }
+ | opt_table_expression opt_select_lock_type
+ {
+ Lex->current_select->set_braces(false);
+ }
+ union_list_view
+ | order_or_limit opt_select_lock_type
+ {
+ Lex->current_select->set_braces(false);
+ }
+ | table_expression order_or_limit opt_select_lock_type
+ {
+ Lex->current_select->set_braces(false);
}
;
+/*
+ The SELECT parts after select_item_list that cannot be followed by UNION.
+*/
+
+select_part3:
+ opt_table_expression
+ | select_part3_union_not_ready
+ ;
+
+select_part3_union_query_term:
+ opt_table_expression
+ | select_part3_union_not_ready_noproc
+ ;
+
+select_part3_view:
+ opt_table_expression
+ | order_or_limit
+ | table_expression order_or_limit
+ ;
+
+select_part3_union_not_ready:
+ select_part3_union_not_ready_noproc
+ | table_expression procedure_clause
+ | table_expression order_or_limit procedure_clause
+ ;
+
+select_part3_union_not_ready_noproc:
+ order_or_limit
+ | into opt_table_expression opt_order_clause opt_limit_clause
+ | table_expression into
+ | table_expression order_or_limit
+ | table_expression order_or_limit into
+ ;
+
select_options_and_item_list:
{
LEX *lex= Lex;
@@ -8307,24 +8610,25 @@ select_options_and_item_list:
}
;
+
+/**
+ <table expression>, as in the SQL standard.
+*/
table_expression:
- opt_from_clause
+ from_clause
opt_where_clause
opt_group_clause
opt_having_clause
- opt_order_clause
- opt_limit_clause
- opt_procedure_clause
- opt_select_lock_type
+ opt_window_clause
;
-from_clause:
- FROM table_reference_list
+opt_table_expression:
+ /* Empty */
+ | table_expression
;
-opt_from_clause:
- /* empty */
- | from_clause
+from_clause:
+ FROM table_reference_list
;
table_reference_list:
@@ -8446,11 +8750,11 @@ select_item:
check_column_name($4.str))
my_yyabort_error((ER_WRONG_COLUMN_NAME, MYF(0), $4.str));
$2->is_autogenerated_name= FALSE;
- $2->set_name($4.str, $4.length, system_charset_info);
+ $2->set_name(thd, $4.str, $4.length, system_charset_info);
}
else if (!$2->name)
{
- $2->set_name($1, (uint) ($3 - $1), thd->charset());
+ $2->set_name(thd, $1, (uint) ($3 - $1), thd->charset());
}
}
;
@@ -8461,6 +8765,12 @@ remember_tok_start:
}
;
+remember_tok_end:
+ {
+ $$= (char*) YYLIP->get_tok_end();
+ }
+ ;
+
remember_name:
{
$$= (char*) YYLIP->get_cpp_tok_start();
@@ -8723,8 +9033,7 @@ predicate:
Item_func_in *item= new (thd->mem_root) Item_func_in(thd, *$7);
if (item == NULL)
MYSQL_YYABORT;
- item->negate();
- $$= item;
+ $$= item->neg_transformer(thd);
}
| bit_expr BETWEEN_SYM bit_expr AND_SYM predicate
{
@@ -8738,8 +9047,7 @@ predicate:
item= new (thd->mem_root) Item_func_between(thd, $1, $4, $6);
if (item == NULL)
MYSQL_YYABORT;
- item->negate();
- $$= item;
+ $$= item->neg_transformer(thd);
}
| bit_expr SOUNDS_SYM LIKE bit_expr
{
@@ -8764,9 +9072,7 @@ predicate:
Lex->escape_used);
if (item == NULL)
MYSQL_YYABORT;
- $$= new (thd->mem_root) Item_func_not(thd, item);
- if ($$ == NULL)
- MYSQL_YYABORT;
+ $$= item->neg_transformer(thd);
}
| bit_expr REGEXP bit_expr
{
@@ -8911,92 +9217,44 @@ all_or_any:
opt_dyncol_type:
/* empty */
{
- LEX *lex= Lex;
- $$= DYN_COL_NULL; /* automatic type */
- lex->charset= NULL;
- lex->length= lex->dec= 0;
+ $$.set(DYN_COL_NULL); /* automatic type */
+ Lex->charset= NULL;
}
| AS dyncol_type { $$= $2; }
;
dyncol_type:
- INT_SYM
- {
- LEX *lex= Lex;
- $$= DYN_COL_INT;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | UNSIGNED INT_SYM
- {
- LEX *lex= Lex;
- $$= DYN_COL_UINT;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | DOUBLE_SYM
- {
- LEX *lex= Lex;
- $$= DYN_COL_DOUBLE;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | REAL
- {
- LEX *lex= Lex;
- $$= DYN_COL_DOUBLE;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | FLOAT_SYM
- {
- LEX *lex= Lex;
- $$= DYN_COL_DOUBLE;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | DECIMAL_SYM float_options
- {
- $$= DYN_COL_DECIMAL;
- Lex->charset= NULL;
- }
- | char
+ numeric_dyncol_type { $$= $1; Lex->charset= NULL; }
+ | temporal_dyncol_type { $$= $1; Lex->charset= NULL; }
+ | string_dyncol_type { $$= $1; }
+ ;
+
+numeric_dyncol_type:
+ INT_SYM { $$.set(DYN_COL_INT); }
+ | UNSIGNED INT_SYM { $$.set(DYN_COL_UINT); }
+ | DOUBLE_SYM { $$.set(DYN_COL_DOUBLE); }
+ | REAL { $$.set(DYN_COL_DOUBLE); }
+ | FLOAT_SYM { $$.set(DYN_COL_DOUBLE); }
+ | DECIMAL_SYM float_options { $$.set(DYN_COL_DECIMAL, $2); }
+ ;
+
+temporal_dyncol_type:
+ DATE_SYM { $$.set(DYN_COL_DATE); }
+ | TIME_SYM opt_field_length { $$.set(DYN_COL_TIME, 0, $2); }
+ | DATETIME opt_field_length { $$.set(DYN_COL_DATETIME, 0, $2); }
+ ;
+
+string_dyncol_type:
+ char
{ Lex->charset= thd->variables.collation_connection; }
opt_binary
{
- LEX *lex= Lex;
- $$= DYN_COL_STRING;
- lex->length= lex->dec= 0;
+ $$.set(DYN_COL_STRING);
}
| nchar
{
- LEX *lex= Lex;
- $$= DYN_COL_STRING;
- lex->charset= national_charset_info;
- lex->length= lex->dec= 0;
- }
- | DATE_SYM
- {
- LEX *lex= Lex;
- $$= DYN_COL_DATE;
- lex->charset= NULL;
- lex->length= lex->dec= 0;
- }
- | TIME_SYM opt_field_length
- {
- LEX *lex= Lex;
- $$= DYN_COL_TIME;
- lex->charset= NULL;
- lex->dec= lex->length;
- lex->length= 0;
- }
- | DATETIME opt_field_length
- {
- LEX *lex= Lex;
- $$= DYN_COL_DATETIME;
- lex->charset= NULL;
- lex->dec= lex->length;
- lex->length= 0;
+ $$.set(DYN_COL_STRING);
+ Lex->charset= national_charset_info;
}
;
@@ -9010,14 +9268,14 @@ dyncall_create_element:
MYSQL_YYABORT;
$$->key= $1;
$$->value= $3;
- $$->type= (DYNAMIC_COLUMN_TYPE)$4;
+ $$->type= (DYNAMIC_COLUMN_TYPE)$4.dyncol_type();
$$->cs= lex->charset;
- if (lex->length)
- $$->len= strtoul(lex->length, NULL, 10);
+ if ($4.length())
+ $$->len= strtoul($4.length(), NULL, 10);
else
$$->len= 0;
- if (lex->dec)
- $$->frac= strtoul(lex->dec, NULL, 10);
+ if ($4.dec())
+ $$->frac= strtoul($4.dec(), NULL, 10);
else
$$->len= 0;
}
@@ -9037,70 +9295,45 @@ dyncall_create_list:
}
;
-simple_expr:
+/*
+ Expressions that the parser allows in a column DEFAULT clause
+ without parentheses. These expressions cannot end with a COLLATE clause.
+
+ If we allowed any "expr" in DEFAULT clause, there would be a confusion
+ in queries like this:
+ CREATE TABLE t1 (a TEXT DEFAULT 'a' COLLATE latin1_bin);
+ It would be not clear what COLLATE stands for:
+ - the collation of the column `a`, or
+ - the collation of the string literal 'a'
+
+ This restriction allows to parse the above query unambiguiusly:
+ COLLATE belongs to the column rather than the literal.
+ If one needs COLLATE to belong to the literal, parentheses must be used:
+ CREATE TABLE t1 (a TEXT DEFAULT ('a' COLLATE latin1_bin));
+ Note: the COLLATE clause is rather meaningless here, but the query
+ is syntactically correct.
+
+ Note, some of the expressions are not actually allowed in DEFAULT,
+ e.g. sum_expr, window_func_expr, ROW(...), VALUES().
+ We could move them to simple_expr, but that would make
+ these two queries return a different error messages:
+ CREATE TABLE t1 (a INT DEFAULT AVG(1));
+ CREATE TABLE t1 (a INT DEFAULT (AVG(1)));
+ The first query would return "syntax error".
+ Currenly both return:
+ Function or expression 'avg(' is not allowed for 'DEFAULT' ...
+*/
+column_default_non_parenthesized_expr:
simple_ident
| function_call_keyword
| function_call_nonkeyword
| function_call_generic
| function_call_conflict
- | simple_expr COLLATE_SYM ident_or_text %prec NEG
- {
- Item *i1= new (thd->mem_root) Item_string(thd, $3.str,
- $3.length,
- thd->charset());
- if (i1 == NULL)
- MYSQL_YYABORT;
- $$= new (thd->mem_root) Item_func_set_collation(thd, $1, i1);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
| literal
| param_marker { $$= $1; }
| variable
| sum_expr
- | simple_expr OR_OR_SYM simple_expr
- {
- $$= new (thd->mem_root) Item_func_concat(thd, $1, $3);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | '+' simple_expr %prec NEG
- {
- $$= $2;
- }
- | '-' simple_expr %prec NEG
- {
- $$= new (thd->mem_root) Item_func_neg(thd, $2);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | '~' simple_expr %prec NEG
- {
- $$= new (thd->mem_root) Item_func_bit_neg(thd, $2);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | not2 simple_expr %prec NEG
- {
- $$= negate_expression(thd, $2);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | '(' subselect ')'
- {
- $$= new (thd->mem_root) Item_singlerow_subselect(thd, $2);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- | '(' expr ')'
- { $$= $2; }
- | '(' expr ',' expr_list ')'
- {
- $4->push_front($2, thd->mem_root);
- $$= new (thd->mem_root) Item_row(thd, *$4);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
+ | window_func_expr
| ROW_SYM '(' expr ',' expr_list ')'
{
$5->push_front($3, thd->mem_root);
@@ -9148,17 +9381,10 @@ simple_expr:
Select->add_ftfunc_to_list(thd, i1);
$$= i1;
}
- | BINARY simple_expr %prec NEG
- {
- $$= create_func_cast(thd, $2, ITEM_CAST_CHAR, NULL, NULL,
- &my_charset_bin);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
| CAST_SYM '(' expr AS cast_type ')'
{
LEX *lex= Lex;
- $$= create_func_cast(thd, $3, $5, lex->length, lex->dec,
+ $$= create_func_cast(thd, $3, $5.type(), $5.length(), $5.dec(),
lex->charset);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -9171,7 +9397,7 @@ simple_expr:
}
| CONVERT_SYM '(' expr ',' cast_type ')'
{
- $$= create_func_cast(thd, $3, $5, Lex->length, Lex->dec,
+ $$= create_func_cast(thd, $3, $5.type(), $5.length(), $5.dec(),
Lex->charset);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -9199,6 +9425,57 @@ simple_expr:
if ($$ == NULL)
MYSQL_YYABORT;
}
+ ;
+
+simple_expr:
+ column_default_non_parenthesized_expr
+ | simple_expr COLLATE_SYM ident_or_text %prec NEG
+ {
+ Item *i1= new (thd->mem_root) Item_string(thd, $3.str,
+ $3.length,
+ thd->charset());
+ if (i1 == NULL)
+ MYSQL_YYABORT;
+ $$= new (thd->mem_root) Item_func_set_collation(thd, $1, i1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | '(' parenthesized_expr ')' { $$= $2; }
+ | BINARY simple_expr %prec NEG
+ {
+ $$= create_func_cast(thd, $2, ITEM_CAST_CHAR, NULL, NULL,
+ &my_charset_bin);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | simple_expr OR_OR_SYM simple_expr
+ {
+ $$= new (thd->mem_root) Item_func_concat(thd, $1, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | '+' simple_expr %prec NEG
+ {
+ $$= $2;
+ }
+ | '-' simple_expr %prec NEG
+ {
+ $$= $2->neg(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | '~' simple_expr %prec NEG
+ {
+ $$= new (thd->mem_root) Item_func_bit_neg(thd, $2);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | not2 simple_expr %prec NEG
+ {
+ $$= negate_expression(thd, $2);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
| INTERVAL_SYM expr interval '+' expr %prec INTERVAL_SYM
/* we cannot put interval before - */
{
@@ -9607,8 +9884,8 @@ function_call_nonkeyword:
COLUMN_GET_SYM '(' expr ',' expr AS cast_type ')'
{
LEX *lex= Lex;
- $$= create_func_dyncol_get(thd, $3, $5, $7,
- lex->length, lex->dec,
+ $$= create_func_dyncol_get(thd, $3, $5, $7.type(),
+ $7.length(), $7.dec(),
lex->charset);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -9670,8 +9947,23 @@ function_call_conflict:
if ($$ == NULL)
MYSQL_YYABORT;
}
- | LAST_VALUE '(' expr_list ')'
+ /* LAST_VALUE here conflicts with the definition for window functions.
+ We have these 2 separate rules to remove the shift/reduce conflict.
+ */
+ | LAST_VALUE '(' expr ')'
{
+ List<Item> *list= new (thd->mem_root) List<Item>;
+ if (list == NULL)
+ MYSQL_YYABORT;
+ list->push_back($3, thd->mem_root);
+
+ $$= new (thd->mem_root) Item_func_last_value(thd, *list);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | LAST_VALUE '(' expr_list ',' expr ')'
+ {
+ $3->push_back($5, thd->mem_root);
$$= new (thd->mem_root) Item_func_last_value(thd, *$3);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -9743,15 +10035,7 @@ function_call_conflict:
}
| WEEK_SYM '(' expr ')'
{
- Item *i1;
- LEX_STRING name= {C_STRING_WITH_LEN("default_week_format")};
- if (!(i1= get_system_var(thd, OPT_SESSION,
- name, null_lex_str)))
- MYSQL_YYABORT;
- i1->set_name((const char *)
- STRING_WITH_LEN("@@default_week_format"),
- system_charset_info);
- $$= new (thd->mem_root) Item_func_week(thd, $3, i1);
+ $$= new (thd->mem_root) Item_func_week(thd, $3);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -10031,7 +10315,7 @@ udf_expr:
if ($4.str)
{
$2->is_autogenerated_name= FALSE;
- $2->set_name($4.str, $4.length, system_charset_info);
+ $2->set_name(thd, $4.str, $4.length, system_charset_info);
}
/*
A field has to have its proper name in order for name
@@ -10041,7 +10325,7 @@ udf_expr:
*/
else if ($2->type() != Item::FIELD_ITEM &&
$2->type() != Item::REF_ITEM /* For HAVING */ )
- $2->set_name($1, (uint) ($3 - $1), thd->charset());
+ $2->set_name(thd, $1, (uint) ($3 - $1), thd->charset());
$$= $2;
}
;
@@ -10185,6 +10469,152 @@ sum_expr:
}
;
+window_func_expr:
+ window_func OVER_SYM window_name
+ {
+ $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ if (Select->add_window_func((Item_window_func *) $$))
+ MYSQL_YYABORT;
+ }
+ |
+ window_func OVER_SYM window_spec
+ {
+ LEX *lex= Lex;
+ if (Select->add_window_spec(thd, lex->win_ref,
+ Select->group_list,
+ Select->order_list,
+ lex->win_frame))
+ MYSQL_YYABORT;
+ $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1,
+ thd->lex->win_spec);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ if (Select->add_window_func((Item_window_func *) $$))
+ MYSQL_YYABORT;
+ }
+ ;
+
+window_func:
+ simple_window_func
+ |
+ sum_expr
+ {
+ ((Item_sum *) $1)->mark_as_window_func_sum_expr();
+ }
+ ;
+
+simple_window_func:
+ ROW_NUMBER_SYM '(' ')'
+ {
+ $$= new (thd->mem_root) Item_sum_row_number(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ RANK_SYM '(' ')'
+ {
+ $$= new (thd->mem_root) Item_sum_rank(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ DENSE_RANK_SYM '(' ')'
+ {
+ $$= new (thd->mem_root) Item_sum_dense_rank(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ PERCENT_RANK_SYM '(' ')'
+ {
+ $$= new (thd->mem_root) Item_sum_percent_rank(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ CUME_DIST_SYM '(' ')'
+ {
+ $$= new (thd->mem_root) Item_sum_cume_dist(thd);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ NTILE_SYM '(' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_ntile(thd, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ FIRST_VALUE_SYM '(' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_first_value(thd, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ LAST_VALUE '(' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_last_value(thd, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ NTH_VALUE_SYM '(' expr ',' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_nth_value(thd, $3, $5);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ LEAD_SYM '(' expr ')'
+ {
+ /* No second argument defaults to 1. */
+ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1);
+ if (item_offset == NULL)
+ MYSQL_YYABORT;
+ $$= new (thd->mem_root) Item_sum_lead(thd, $3, item_offset);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ LEAD_SYM '(' expr ',' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_lead(thd, $3, $5);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ LAG_SYM '(' expr ')'
+ {
+ /* No second argument defaults to 1. */
+ Item* item_offset= new (thd->mem_root) Item_uint(thd, 1);
+ if (item_offset == NULL)
+ MYSQL_YYABORT;
+ $$= new (thd->mem_root) Item_sum_lag(thd, $3, item_offset);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ |
+ LAG_SYM '(' expr ',' expr ')'
+ {
+ $$= new (thd->mem_root) Item_sum_lag(thd, $3, $5);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+window_name:
+ ident
+ {
+ $$= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING));
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
variable:
'@'
{
@@ -10248,19 +10678,7 @@ opt_gconcat_separator:
opt_gorder_clause:
/* empty */
- | ORDER_SYM BY
- {
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if (sel->linkage != GLOBAL_OPTIONS_TYPE &&
- sel->olap != UNSPECIFIED_OLAP_TYPE &&
- (sel->linkage != UNION_TYPE || sel->braces))
- {
- my_yyabort_error((ER_WRONG_USAGE, MYF(0),
- "CUBE/ROLLUP", "ORDER BY"));
- }
- }
- gorder_list;
+ | ORDER_SYM BY gorder_list;
;
gorder_list:
@@ -10289,43 +10707,35 @@ in_sum_expr:
cast_type:
BINARY opt_field_length
- { $$=ITEM_CAST_CHAR; Lex->charset= &my_charset_bin; Lex->dec= 0; }
+ { $$.set(ITEM_CAST_CHAR, $2); Lex->charset= &my_charset_bin; }
| CHAR_SYM opt_field_length
{ Lex->charset= thd->variables.collation_connection; }
opt_binary
- { $$=ITEM_CAST_CHAR; Lex->dec= 0; }
+ { $$.set(ITEM_CAST_CHAR, $2); }
| NCHAR_SYM opt_field_length
- { $$=ITEM_CAST_CHAR; Lex->charset= national_charset_info; Lex->dec=0; }
- | INT_SYM
- { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | SIGNED_SYM
- { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | SIGNED_SYM INT_SYM
- { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | UNSIGNED
- { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | UNSIGNED INT_SYM
- { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | DATE_SYM
- { $$=ITEM_CAST_DATE; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; }
- | TIME_SYM opt_field_length
- {
- $$=ITEM_CAST_TIME;
- LEX *lex= Lex;
- lex->charset= NULL; lex->dec= lex->length; lex->length= (char*)0;
- }
- | DATETIME opt_field_length
{
- $$=ITEM_CAST_DATETIME;
- LEX *lex= Lex;
- lex->charset= NULL; lex->dec= lex->length; lex->length= (char*)0;
- }
- | DECIMAL_SYM float_options
- { $$=ITEM_CAST_DECIMAL; Lex->charset= NULL; }
- | DOUBLE_SYM
- { Lex->charset= NULL; Lex->length= Lex->dec= 0;}
- opt_precision
- { $$=ITEM_CAST_DOUBLE; }
+ Lex->charset= national_charset_info;
+ $$.set(ITEM_CAST_CHAR, $2, 0);
+ }
+ | cast_type_numeric { $$= $1; Lex->charset= NULL; }
+ | cast_type_temporal { $$= $1; Lex->charset= NULL; }
+ ;
+
+cast_type_numeric:
+ INT_SYM { $$.set(ITEM_CAST_SIGNED_INT); }
+ | SIGNED_SYM { $$.set(ITEM_CAST_SIGNED_INT); }
+ | SIGNED_SYM INT_SYM { $$.set(ITEM_CAST_SIGNED_INT); }
+ | UNSIGNED { $$.set(ITEM_CAST_UNSIGNED_INT); }
+ | UNSIGNED INT_SYM { $$.set(ITEM_CAST_UNSIGNED_INT); }
+ | DECIMAL_SYM float_options { $$.set(ITEM_CAST_DECIMAL, $2); }
+ | DOUBLE_SYM opt_precision { $$.set(ITEM_CAST_DOUBLE, $2); }
+ ;
+
+cast_type_temporal:
+ DATE_SYM { $$.set(ITEM_CAST_DATE); }
+ | TIME_SYM opt_field_length { $$.set(ITEM_CAST_TIME, 0, $2); }
+ | DATETIME opt_field_length { $$.set(ITEM_CAST_DATETIME, 0, $2); }
+ ;
opt_expr_list:
/* empty */ { $$= NULL; }
@@ -10397,7 +10807,7 @@ when_list:
/* Equivalent to <table reference> in the SQL:2003 standard. */
/* Warning - may return NULL in case of incomplete SELECT */
table_ref:
- table_factor { $$=$1; }
+ table_factor { $$= $1; }
| join_table
{
LEX *lex= Lex;
@@ -10598,6 +11008,11 @@ use_partition:
*/
/* Warning - may return NULL in case of incomplete SELECT */
table_factor:
+ table_primary_ident
+ | table_primary_derived
+ ;
+
+table_primary_ident:
{
SELECT_LEX *sel= Select;
sel->table_join_options= 0;
@@ -10613,43 +11028,28 @@ table_factor:
MYSQL_YYABORT;
Select->add_joined_table($$);
}
- | select_derived_init get_select_lex select_derived2
- {
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if ($1)
- {
- if (sel->set_braces(1))
- {
- my_parse_error(thd, ER_SYNTAX_ERROR);
- MYSQL_YYABORT;
- }
- }
- if ($2->init_nested_join(thd))
- MYSQL_YYABORT;
- $$= 0;
- /* incomplete derived tables return NULL, we must be
- nested in select_derived rule to be here. */
- }
- /*
- Represents a flattening of the following rules from the SQL:2003
- standard. This sub-rule corresponds to the sub-rule
- <table primary> ::= ... | <derived table> [ AS ] <correlation name>
-
- The following rules have been flattened into query_expression_body
- (since we have no <with clause>).
-
- <derived table> ::= <table subquery>
- <table subquery> ::= <subquery>
- <subquery> ::= <left paren> <query expression> <right paren>
- <query expression> ::= [ <with clause> ] <query expression body>
-
- For the time being we use the non-standard rule
- select_derived_union which is a compromise between the standard
- and our parser. Possibly this rule could be replaced by our
- query_expression_body.
- */
- | '(' get_select_lex select_derived_union ')' opt_table_alias
+ ;
+
+
+
+/*
+ Represents a flattening of the following rules from the SQL:2003
+ standard. This sub-rule corresponds to the sub-rule
+ <table primary> ::= ... | <derived table> [ AS ] <correlation name>
+
+ <derived table> ::= <table subquery>
+ <table subquery> ::= <subquery>
+ <subquery> ::= <left paren> <query expression> <right paren>
+ <query expression> ::= [ <with clause> ] <query expression body>
+
+ For the time being we use the non-standard rule
+ select_derived_union which is a compromise between the standard
+ and our parser. Possibly this rule could be replaced by our
+ query_expression_body.
+*/
+
+table_primary_derived:
+ '(' get_select_lex select_derived_union ')' opt_table_alias
{
/* Use $2 instead of Lex->current_select as derived table will
alter value of Lex->current_select. */
@@ -10712,6 +11112,25 @@ table_factor:
!$$->derived->first_select()->next_select())
$$->select_lex->add_where_field($$->derived->first_select());
}
+ /* Represents derived table with WITH clause */
+ | '(' get_select_lex subselect_start
+ with_clause query_expression_body
+ subselect_end ')' opt_table_alias
+ {
+ LEX *lex=Lex;
+ SELECT_LEX *sel= $2;
+ SELECT_LEX_UNIT *unit= $5->master_unit();
+ Table_ident *ti= new (thd->mem_root) Table_ident(unit);
+ if (ti == NULL)
+ MYSQL_YYABORT;
+ $5->set_with_clause($4);
+ lex->current_select= sel;
+ if (!($$= sel->add_table_to_list(lex->thd,
+ ti, $8, 0,
+ TL_READ, MDL_SHARED_READ)))
+ MYSQL_YYABORT;
+ sel->add_joined_table($$);
+ }
;
/*
@@ -10734,47 +11153,45 @@ table_factor:
subqueries have their own union rules.
*/
select_derived_union:
- select_derived opt_union_order_or_limit
+ select_derived
+ | select_derived union_order_or_limit
{
- if ($1 && $2)
+ if ($1)
{
my_parse_error(thd, ER_SYNTAX_ERROR);
MYSQL_YYABORT;
}
}
- | select_derived_union
- UNION_SYM
- union_option
+ | select_derived union_head_non_top
{
- if (add_select_to_union_list(Lex, (bool)$3, FALSE))
- MYSQL_YYABORT;
- }
- query_specification
- {
- /*
- Remove from the name resolution context stack the context of the
- last select in the union.
- */
- Lex->pop_context();
-
- if ($1 != NULL)
+ if ($1)
{
my_parse_error(thd, ER_SYNTAX_ERROR);
MYSQL_YYABORT;
}
}
- ;
+ union_list_derived_part2
+ | derived_query_specification opt_select_lock_type
+ | derived_query_specification order_or_limit opt_select_lock_type
+ | derived_query_specification opt_select_lock_type union_list_derived
+ ;
+
+union_list_derived_part2:
+ query_term_union_not_ready { Lex->pop_context(); }
+ | query_term_union_ready { Lex->pop_context(); }
+ | query_term_union_ready { Lex->pop_context(); } union_list_derived
+ ;
+
+union_list_derived:
+ union_head_non_top union_list_derived_part2
+ ;
+
/* The equivalent of select_init2 for nested queries. */
select_init2_derived:
select_part2_derived
{
- LEX *lex= Lex;
- if (lex->current_select->set_braces(0))
- {
- my_parse_error(thd, ER_SYNTAX_ERROR);
- MYSQL_YYABORT;
- }
+ Select->set_braces(0);
}
;
@@ -10795,19 +11212,15 @@ select_part2_derived:
/* handle contents of parentheses in join expression */
select_derived:
- get_select_lex
+ get_select_lex_derived derived_table_list
{
- if ($1->init_nested_join(thd))
- MYSQL_YYABORT;
- }
- derived_table_list
- {
- /* for normal joins, $3 != NULL and end_nested_join() != NULL,
+ LEX *lex= Lex;
+ /* for normal joins, $2 != NULL and end_nested_join() != NULL,
for derived tables, both must equal NULL */
- if (!($$= $1->end_nested_join(thd)) && $3)
+ if (!($$= $1->end_nested_join(lex->thd)) && $2)
MYSQL_YYABORT;
- if (!$3 && $$)
+ if (!$2 && $$)
{
my_parse_error(thd, ER_SYNTAX_ERROR);
MYSQL_YYABORT;
@@ -10815,6 +11228,20 @@ select_derived:
}
;
+/*
+ Similar to query_specification, but for derived tables.
+ Example: the inner parenthesized SELECT in this query:
+ SELECT * FROM (SELECT * FROM t1);
+*/
+derived_query_specification:
+ SELECT_SYM select_derived_init select_derived2
+ {
+ if ($2)
+ Select->set_braces(1);
+ $$= NULL;
+ }
+ ;
+
select_derived2:
{
LEX *lex= Lex;
@@ -10836,30 +11263,27 @@ select_derived2:
{
Select->parsing_place= NO_MATTER;
}
- table_expression
+ opt_table_expression
;
get_select_lex:
/* Empty */ { $$= Select; }
;
-select_derived_init:
- SELECT_SYM
+get_select_lex_derived:
+ get_select_lex
{
LEX *lex= Lex;
+ if ($1->init_nested_join(lex->thd))
+ MYSQL_YYABORT;
+ }
+ ;
- if (! lex->parsing_options.allows_derived)
- my_yyabort_error((ER_VIEW_SELECT_DERIVED, MYF(0)));
+select_derived_init:
+ {
+ LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- TABLE_LIST *embedding;
- if (!sel->embedding || sel->end_nested_join(thd))
- {
- /* we are not in parentheses */
- my_parse_error(thd, ER_SYNTAX_ERROR);
- MYSQL_YYABORT;
- }
- embedding= Select->embedding;
+ TABLE_LIST *embedding= lex->current_select->embedding;
$$= embedding &&
!embedding->nested_join->join_list.elements;
/* return true if we are deeply nested */
@@ -10997,7 +11421,7 @@ table_alias:
opt_table_alias:
/* empty */ { $$=0; }
- | table_alias ident
+ | table_alias ident_table_alias
{
$$= (LEX_STRING*) thd->memdup(&$2,sizeof(LEX_STRING));
if ($$ == NULL)
@@ -11112,6 +11536,155 @@ olap_opt:
;
/*
+ optional window clause in select
+*/
+
+opt_window_clause:
+ /* empty */
+ {}
+ | WINDOW_SYM
+ window_def_list
+ {}
+ ;
+
+window_def_list:
+ window_def_list ',' window_def
+ | window_def
+ ;
+
+window_def:
+ window_name AS window_spec
+ {
+ LEX *lex= Lex;
+ if (Select->add_window_def(thd, $1, lex->win_ref,
+ Select->group_list,
+ Select->order_list,
+ lex->win_frame ))
+ MYSQL_YYABORT;
+ }
+ ;
+
+window_spec:
+ '('
+ { Select->prepare_add_window_spec(thd); }
+ opt_window_ref opt_window_partition_clause
+ opt_window_order_clause opt_window_frame_clause
+ ')'
+ ;
+
+opt_window_ref:
+ /* empty */ {}
+ | ident
+ {
+ thd->lex->win_ref= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING));
+ if (thd->lex->win_ref == NULL)
+ MYSQL_YYABORT;
+ }
+
+opt_window_partition_clause:
+ /* empty */ { }
+ | PARTITION_SYM BY group_list
+ ;
+
+opt_window_order_clause:
+ /* empty */ { }
+ | ORDER_SYM BY order_list
+ ;
+
+opt_window_frame_clause:
+ /* empty */ {}
+ | window_frame_units window_frame_extent opt_window_frame_exclusion
+ {
+ LEX *lex= Lex;
+ lex->win_frame=
+ new (thd->mem_root) Window_frame($1,
+ lex->frame_top_bound,
+ lex->frame_bottom_bound,
+ $3);
+ if (lex->win_frame == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+window_frame_units:
+ ROWS_SYM { $$= Window_frame::UNITS_ROWS; }
+ | RANGE_SYM { $$= Window_frame::UNITS_RANGE; }
+ ;
+
+window_frame_extent:
+ window_frame_start
+ {
+ LEX *lex= Lex;
+ lex->frame_top_bound= $1;
+ lex->frame_bottom_bound=
+ new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::CURRENT, NULL);
+ if (lex->frame_bottom_bound == NULL)
+ MYSQL_YYABORT;
+ }
+ | BETWEEN_SYM window_frame_bound AND_SYM window_frame_bound
+ {
+ LEX *lex= Lex;
+ lex->frame_top_bound= $2;
+ lex->frame_bottom_bound= $4;
+ }
+ ;
+
+window_frame_start:
+ UNBOUNDED_SYM PRECEDING_SYM
+ {
+ $$= new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::PRECEDING, NULL);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | CURRENT_SYM ROW_SYM
+ {
+ $$= new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::CURRENT, NULL);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | literal PRECEDING_SYM
+ {
+ $$= new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::PRECEDING, $1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+window_frame_bound:
+ window_frame_start { $$= $1; }
+ | UNBOUNDED_SYM FOLLOWING_SYM
+ {
+ $$= new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::FOLLOWING, NULL);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | literal FOLLOWING_SYM
+ {
+ $$= new (thd->mem_root)
+ Window_frame_bound(Window_frame_bound::FOLLOWING, $1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+opt_window_frame_exclusion:
+ /* empty */ { $$= Window_frame::EXCL_NONE; }
+ | EXCLUDE_SYM CURRENT_SYM ROW_SYM
+ { $$= Window_frame::EXCL_CURRENT_ROW; }
+ | EXCLUDE_SYM GROUP_SYM
+ { $$= Window_frame::EXCL_GROUP; }
+ | EXCLUDE_SYM TIES_SYM
+ { $$= Window_frame::EXCL_TIES; }
+ | EXCLUDE_SYM NO_SYM OTHERS_SYM
+ { $$= Window_frame::EXCL_NONE; }
+ ;
+
+/*
Order by statement in ALTER TABLE
*/
@@ -11276,10 +11849,18 @@ limit_option:
sp_pcontext *spc = lex->spcont;
if (spc && (spv = spc->find_variable($1, false)))
{
+ uint pos_in_query= 0;
+ uint len_in_query= 0;
+ if (!lex->clone_spec_offset)
+ {
+ pos_in_query= (uint)(lip->get_tok_start() -
+ lex->sphead->m_tmp_query);
+ len_in_query= (uint)(lip->get_ptr() -
+ lip->get_tok_start());
+ }
splocal= new (thd->mem_root)
- Item_splocal(thd, $1, spv->offset, spv->type,
- lip->get_tok_start() - lex->sphead->m_tmp_query,
- lip->get_ptr() - lip->get_tok_start());
+ Item_splocal(thd, $1, spv->offset, spv->sql_type(),
+ pos_in_query, len_in_query);
if (splocal == NULL)
MYSQL_YYABORT;
#ifndef DBUG_OFF
@@ -11396,17 +11977,13 @@ choice:
| DEFAULT { $$= HA_CHOICE_UNDEF; }
;
-opt_procedure_clause:
- /* empty */ { $$= false; }
- | PROCEDURE_SYM ident /* Procedure name */
+procedure_clause:
+ PROCEDURE_SYM ident /* Procedure name */
{
LEX *lex=Lex;
- if (! lex->parsing_options.allows_select_procedure)
- my_yyabort_error((ER_VIEW_SELECT_CLAUSE, MYF(0), "PROCEDURE"));
+ DBUG_ASSERT(&lex->select_lex == lex->current_select);
- if (&lex->select_lex != lex->current_select)
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"));
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= &lex->proc_list.first;
@@ -11431,7 +12008,6 @@ opt_procedure_clause:
{
/* Subqueries are allowed from now.*/
Lex->expr_allows_subselect= true;
- $$= true;
}
;
@@ -11451,7 +12027,7 @@ procedure_item:
if (add_proc_to_list(thd, $2))
MYSQL_YYABORT;
if (!$2->name)
- $2->set_name($1, (uint) ($3 - $1), thd->charset());
+ $2->set_name(thd, $1, (uint) ($3 - $1), thd->charset());
}
;
@@ -11502,24 +12078,14 @@ select_outvar:
if (!Lex->spcont || !(t= Lex->spcont->find_variable($1, false)))
my_yyabort_error((ER_SP_UNDECLARED_VAR, MYF(0), $1.str));
$$ = Lex->result ? (new (thd->mem_root)
- my_var_sp($1, t->offset, t->type,
+ my_var_sp($1, t->offset, t->sql_type(),
Lex->sphead)) :
NULL;
}
;
-opt_into:
- /* empty */ { $$= false; }
- | into { $$= true; }
- ;
-
into:
- INTO
- {
- if (! Lex->parsing_options.allows_select_into)
- my_yyabort_error((ER_VIEW_SELECT_CLAUSE, MYF(0), "INTO"));
- }
- into_destination
+ INTO into_destination
;
into_destination:
@@ -11876,12 +12442,7 @@ fields:
insert_values:
VALUES values_list {}
| VALUE_SYM values_list {}
- | create_select
- { Select->set_braces(0);}
- union_clause {}
- | '(' create_select ')'
- { Select->set_braces(1);}
- union_opt {}
+ | create_select_query_expression {}
;
values_list:
@@ -11954,6 +12515,12 @@ expr_or_default:
if ($$ == NULL)
MYSQL_YYABORT;
}
+ | IGNORE_SYM
+ {
+ $$= new (thd->mem_root) Item_ignore_value(thd, Lex->current_context());
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
;
opt_insert_update:
@@ -12497,6 +13064,18 @@ show_param:
lex->sql_command= SQLCOM_SHOW_CREATE_TRIGGER;
lex->spname= $3;
}
+ | CREATE USER_SYM
+ {
+ Lex->sql_command= SQLCOM_SHOW_CREATE_USER;
+ if (!(Lex->grant_user= (LEX_USER*)thd->alloc(sizeof(LEX_USER))))
+ MYSQL_YYABORT;
+ Lex->grant_user->user= current_user;
+ }
+ | CREATE USER_SYM user
+ {
+ Lex->sql_command= SQLCOM_SHOW_CREATE_USER;
+ Lex->grant_user= $3;
+ }
| PROCEDURE_SYM STATUS_SYM wild_and_where
{
LEX *lex= Lex;
@@ -13178,7 +13757,7 @@ load_data_set_elem:
if (lex->update_list.push_back($1, thd->mem_root) ||
lex->value_list.push_back($4, thd->mem_root))
MYSQL_YYABORT;
- $4->set_name_no_truncate($3, (uint) ($5 - $3), thd->charset());
+ $4->set_name_no_truncate(thd, $3, (uint) ($5 - $3), thd->charset());
}
;
@@ -13303,20 +13882,28 @@ param_marker:
LEX *lex= thd->lex;
Lex_input_stream *lip= YYLIP;
Item_param *item;
+ bool rc;
if (! lex->parsing_options.allows_variable)
my_yyabort_error((ER_VIEW_SELECT_VARIABLE, MYF(0)));
- const char *query_start= lex->sphead ? lex->sphead->m_tmp_query
- : thd->query();
- item= new (thd->mem_root) Item_param(thd, lip->get_tok_start() -
- query_start);
- if (!($$= item) || lex->param_list.push_back(item, thd->mem_root))
+ const char *query_start= lex->sphead && !lex->clone_spec_offset ?
+ lex->sphead->m_tmp_query : lip->get_buf();
+ item= new (thd->mem_root) Item_param(thd,
+ (uint)(lip->get_tok_start() -
+ query_start));
+ if (!($$= item))
+ MYSQL_YYABORT;
+ if (!lex->clone_spec_offset)
+ rc= lex->param_list.push_back(item, thd->mem_root);
+ else
+ rc= item->add_as_clone(thd);
+ if (rc)
my_yyabort_error((ER_OUT_OF_RESOURCES, MYF(0)));
+
}
;
signed_literal:
- literal { $$ = $1; }
- | '+' NUM_literal { $$ = $2; }
+ '+' NUM_literal { $$ = $2; }
| '-' NUM_literal
{
$2->max_length++;
@@ -13344,13 +13931,13 @@ literal:
}
| FALSE_SYM
{
- $$= new (thd->mem_root) Item_int(thd, (char*) "FALSE",0,1);
+ $$= new (thd->mem_root) Item_bool(thd, (char*) "FALSE",0);
if ($$ == NULL)
MYSQL_YYABORT;
}
| TRUE_SYM
{
- $$= new (thd->mem_root) Item_int(thd, (char*) "TRUE",1,1);
+ $$= new (thd->mem_root) Item_bool(thd, (char*) "TRUE",1);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -13458,8 +14045,104 @@ temporal_literal:
;
+opt_with_clause:
+ /*empty */ { $$= 0; }
+ | with_clause
+ {
+ $$= $1;
+ }
+ ;
+
+
+with_clause:
+ WITH opt_recursive
+ {
+ With_clause *with_clause=
+ new With_clause($2, Lex->curr_with_clause);
+ if (with_clause == NULL)
+ MYSQL_YYABORT;
+ Lex->derived_tables|= DERIVED_WITH;
+ Lex->curr_with_clause= with_clause;
+ with_clause->add_to_list(Lex->with_clauses_list_last_next);
+ }
+ with_list
+ {
+ $$= Lex->curr_with_clause;
+ Lex->curr_with_clause= Lex->curr_with_clause->pop();
+ }
+ ;
+
+
+opt_recursive:
+ /*empty*/ { $$= 0; }
+ | RECURSIVE_SYM { $$= 1; }
+ ;
+
+
+with_list:
+ with_list_element
+ | with_list ',' with_list_element
+ ;
+
+
+with_list_element:
+ query_name
+ opt_with_column_list
+ {
+ $2= new List<LEX_STRING> (Lex->with_column_list);
+ if ($2 == NULL)
+ MYSQL_YYABORT;
+ Lex->with_column_list.empty();
+ }
+ AS '(' remember_tok_start subselect remember_tok_end ')'
+ {
+ LEX *lex= thd->lex;
+ const char *query_start= lex->sphead ? lex->sphead->m_tmp_query
+ : thd->query();
+ char *spec_start= $6 + 1;
+ With_element *elem= new With_element($1, *$2, $7->master_unit());
+ if (elem == NULL || Lex->curr_with_clause->add_with_element(elem))
+ MYSQL_YYABORT;
+ if (elem->set_unparsed_spec(thd, spec_start, $8,
+ (uint) (spec_start - query_start)))
+ MYSQL_YYABORT;
+ }
+ ;
+
+
+opt_with_column_list:
+ /* empty */
+ { $$= NULL; }
+ | '(' with_column_list ')'
+ { $$= NULL; }
+ ;
+
+
+with_column_list:
+ ident
+ {
+ Lex->with_column_list.push_back((LEX_STRING*)
+ thd->memdup(&$1, sizeof(LEX_STRING)));
+ }
+ | with_column_list ',' ident
+ {
+ Lex->with_column_list.push_back((LEX_STRING*)
+ thd->memdup(&$3, sizeof(LEX_STRING)));
+ }
+ ;
+
+
+query_name:
+ ident
+ {
+ $$= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING));
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
/**********************************************************************
** Creating different items.
**********************************************************************/
@@ -13511,10 +14194,18 @@ simple_ident:
my_yyabort_error((ER_VIEW_SELECT_VARIABLE, MYF(0)));
Item_splocal *splocal;
+ uint pos_in_query= 0;
+ uint len_in_query= 0;
+ if (!lex->clone_spec_offset)
+ {
+ pos_in_query= (uint)(lip->get_tok_start_prev() -
+ lex->sphead->m_tmp_query);
+ len_in_query= (uint)(lip->get_tok_end() -
+ lip->get_tok_start_prev());
+ }
splocal= new (thd->mem_root)
- Item_splocal(thd, $1, spv->offset, spv->type,
- lip->get_tok_start_prev() - lex->sphead->m_tmp_query,
- lip->get_tok_end() - lip->get_tok_start_prev());
+ Item_splocal(thd, $1, spv->offset, spv->sql_type(),
+ pos_in_query, len_in_query);
if (splocal == NULL)
MYSQL_YYABORT;
#ifndef DBUG_OFF
@@ -13767,10 +14458,7 @@ IDENT_sys:
if (thd->charset_is_system_charset)
{
CHARSET_INFO *cs= system_charset_info;
- int dummy_error;
- uint wlen= cs->cset->well_formed_len(cs, $1.str,
- $1.str+$1.length,
- $1.length, &dummy_error);
+ uint wlen= Well_formed_prefix(cs, $1.str, $1.length).length();
if (wlen < $1.length)
{
ErrConvString err($1.str, $1.length, &my_charset_bin);
@@ -13830,6 +14518,16 @@ TEXT_STRING_filesystem:
MYSQL_YYABORT;
}
}
+
+ident_table_alias:
+ IDENT_sys { $$= $1; }
+ | keyword_alias
+ {
+ $$.str= thd->strmake($1.str, $1.length);
+ if ($$.str == NULL)
+ MYSQL_YYABORT;
+ $$.length= $1.length;
+ }
;
ident:
@@ -13924,12 +14622,13 @@ user: user_maybe_role
}
;
-/* Keyword that we allow for identifiers (except SP labels) */
-keyword:
+/* Keywords which we allow as table aliases. */
+keyword_alias:
keyword_sp {}
| ASCII_SYM {}
| BACKUP_SYM {}
| BEGIN_SYM {}
+ | BINLOG_SYM {}
| BYTE_SYM {}
| CACHE_SYM {}
| CHARSET {}
@@ -13948,8 +14647,11 @@ keyword:
| DO_SYM {}
| END {}
| EXAMINED_SYM {}
+ | EXCLUDE_SYM {}
| EXECUTE_SYM {}
| FLUSH_SYM {}
+ | FOLLOWS_SYM {}
+ | FOLLOWING_SYM {}
| FORMAT_SYM {}
| GET_SYM {}
| HANDLER_SYM {}
@@ -13961,9 +14663,12 @@ keyword:
| OPEN_SYM {}
| OPTION {}
| OPTIONS_SYM {}
+ | OTHERS_SYM {}
| OWNER_SYM {}
| PARSER_SYM {}
| PORT_SYM {}
+ | PRECEDES_SYM {}
+ | PRECEDING_SYM {}
| PREPARE_SYM {}
| REMOVE_SYM {}
| REPAIR {}
@@ -13981,14 +14686,21 @@ keyword:
| SONAME_SYM {}
| START_SYM {}
| STOP_SYM {}
+ | STORED_SYM {}
+ | TIES_SYM {}
| TRUNCATE_SYM {}
| UNICODE_SYM {}
| UNINSTALL_SYM {}
+ | UNBOUNDED_SYM {}
| WRAPPER_SYM {}
| XA_SYM {}
| UPGRADE_SYM {}
;
+
+/* Keyword that we allow for identifiers (except SP labels) */
+keyword: keyword_alias | WINDOW_SYM {};
+
/*
* Keywords that we allow for labels in SPs.
* Anything that's the beginning of a statement or characteristics
@@ -14013,7 +14725,6 @@ keyword_sp:
| AUTO_SYM {}
| AVG_ROW_LENGTH {}
| AVG_SYM {}
- | BINLOG_SYM {}
| BIT_SYM {}
| BLOCK_SYM {}
| BOOL_SYM {}
@@ -14105,6 +14816,7 @@ keyword_sp:
| ID_SYM {}
| IDENTIFIED_SYM {}
| IGNORE_SERVER_IDS_SYM {}
+ | IMMEDIATE_SYM {} /* SQL-2003-R */
| INVOKER_SYM {}
| IMPORT {}
| INDEXES {}
@@ -14113,6 +14825,7 @@ keyword_sp:
| IPC_SYM {}
| ISOLATION {}
| ISSUER_SYM {}
+ | JSON_SYM {}
| INSERT_METHOD {}
| KEY_BLOCK_SIZE {}
| LAST_VALUE {}
@@ -14139,6 +14852,7 @@ keyword_sp:
| MASTER_PASSWORD_SYM {}
| MASTER_SERVER_ID_SYM {}
| MASTER_CONNECT_RETRY_SYM {}
+ | MASTER_DELAY_SYM {}
| MASTER_SSL_SYM {}
| MASTER_SSL_CA_SYM {}
| MASTER_SSL_CAPATH_SYM {}
@@ -14232,7 +14946,6 @@ keyword_sp:
| ROLE_SYM {}
| ROLLUP_SYM {}
| ROUTINE_SYM {}
- | ROWS_SYM {}
| ROW_COUNT_SYM {}
| ROW_FORMAT_SYM {}
| ROW_SYM {}
@@ -14359,8 +15072,9 @@ set_stmt_option_value_following_option_type_list:
*/
option_value_following_option_type
| set_stmt_option_value_following_option_type_list ',' option_value_following_option_type
+ ;
-// Start of option value list
+/* Start of option value list */
start_option_value_list:
option_value_no_option_type
{
@@ -14385,7 +15099,7 @@ start_option_value_list:
;
-// Start of option value list, option_type was given
+/* Start of option value list, option_type was given */
start_option_value_list_following_option_type:
option_value_following_option_type
{
@@ -14400,13 +15114,13 @@ start_option_value_list_following_option_type:
}
;
-// Remainder of the option value list after first option value.
+/* Remainder of the option value list after first option value. */
option_value_list_continued:
/* empty */
| ',' option_value_list
;
-// Repeating list of option values after first option value.
+/* Repeating list of option values after first option value. */
option_value_list:
{
sp_create_assignment_lex(thd, yychar == YYEMPTY);
@@ -14427,7 +15141,7 @@ option_value_list:
}
;
-// Wrapper around option values following the first option value in the stmt.
+/* Wrapper around option values following the first option value in the stmt. */
option_value:
option_type
{
@@ -14457,7 +15171,7 @@ opt_var_ident_type:
| SESSION_SYM '.' { $$=OPT_SESSION; }
;
-// Option values with preceding option_type.
+/* Option values with preceding option_type. */
option_value_following_option_type:
internal_variable_name equal set_expr_or_default
{
@@ -14481,7 +15195,7 @@ option_value_following_option_type:
}
;
-// Option values without preceding option_type.
+/* Option values without preceding option_type. */
option_value_no_option_type:
internal_variable_name equal set_expr_or_default
{
@@ -14525,7 +15239,7 @@ option_value_no_option_type:
struct sys_var_with_base tmp= $4;
if (tmp.var == trg_new_row_fake_var)
{
- my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), "NEW");
+ my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), 3, "NEW");
MYSQL_YYABORT;
}
/* Lookup if necessary: must be a system variable. */
@@ -15044,14 +15758,14 @@ grant:
grant_command:
grant_privileges ON opt_table grant_ident TO_SYM grant_list
- require_clause grant_options
+ opt_require_clause opt_grant_options
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_GRANT;
lex->type= 0;
}
| grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list
- require_clause grant_options
+ opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
@@ -15063,7 +15777,7 @@ grant_command:
lex->type= TYPE_ENUM_FUNCTION;
}
| grant_privileges ON PROCEDURE_SYM grant_ident TO_SYM grant_list
- require_clause grant_options
+ opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
@@ -15400,7 +16114,7 @@ column_list_id:
}
;
-require_clause:
+opt_require_clause:
/* empty */
| REQUIRE_SYM require_list
{
@@ -15420,24 +16134,8 @@ require_clause:
}
;
-grant_options:
- /* empty */ {}
- | WITH grant_option_list
- ;
-
-opt_grant_option:
- /* empty */ {}
- | WITH GRANT OPTION { Lex->grant |= GRANT_ACL;}
- ;
-
-grant_option_list:
- grant_option_list grant_option {}
- | grant_option {}
- ;
-
-grant_option:
- GRANT OPTION { Lex->grant |= GRANT_ACL;}
- | MAX_QUERIES_PER_HOUR ulong_num
+resource_option:
+ MAX_QUERIES_PER_HOUR ulong_num
{
LEX *lex=Lex;
lex->mqh.questions=$2;
@@ -15469,6 +16167,37 @@ grant_option:
}
;
+resource_option_list:
+ resource_option_list resource_option {}
+ | resource_option {}
+ ;
+
+opt_resource_options:
+ /* empty */ {}
+ | WITH resource_option_list
+ ;
+
+
+opt_grant_options:
+ /* empty */ {}
+ | WITH grant_option_list {}
+ ;
+
+opt_grant_option:
+ /* empty */ {}
+ | WITH GRANT OPTION { Lex->grant |= GRANT_ACL;}
+ ;
+
+grant_option_list:
+ grant_option_list grant_option {}
+ | grant_option {}
+ ;
+
+grant_option:
+ GRANT OPTION { Lex->grant |= GRANT_ACL;}
+ | resource_option {}
+ ;
+
begin:
BEGIN_SYM
{
@@ -15582,7 +16311,7 @@ union_list:
if (add_select_to_union_list(Lex, (bool)$2, TRUE))
MYSQL_YYABORT;
}
- select_init
+ union_list_part2
{
/*
Remove from the name resolution context stack the context of the
@@ -15592,16 +16321,18 @@ union_list:
}
;
-union_opt:
- opt_union_order_or_limit
- | union_list { $$= 1; }
+union_list_view:
+ UNION_SYM union_option
+ {
+ if (add_select_to_union_list(Lex, (bool)$2, TRUE))
+ MYSQL_YYABORT;
+ }
+ query_expression_body_view
+ {
+ Lex->pop_context();
+ }
;
-opt_union_order_or_limit:
- /* Empty */ { $$= 0; }
- | union_order_or_limit { $$= 1; }
- ;
-
union_order_or_limit:
{
LEX *lex= thd->lex;
@@ -15628,45 +16359,61 @@ order_or_limit:
| limit_clause
;
+/*
+ Start a UNION, for non-top level query expressions.
+*/
+union_head_non_top:
+ UNION_SYM union_option
+ {
+ if (add_select_to_union_list(Lex, (bool)$2, FALSE))
+ MYSQL_YYABORT;
+ }
+ ;
+
union_option:
/* empty */ { $$=1; }
| DISTINCT { $$=1; }
| ALL { $$=0; }
;
+/*
+ Corresponds to the SQL Standard
+ <query specification> ::=
+ SELECT [ <set quantifier> ] <select list> <table expression>
+
+ Notes:
+ - We allow more options in addition to <set quantifier>
+ - <table expression> is optional in MariaDB
+*/
query_specification:
- SELECT_SYM select_init2_derived
- table_expression
- {
- $$= Lex->current_select->master_unit()->first_select();
- }
- | '(' select_paren_derived ')'
- opt_union_order_or_limit
+ SELECT_SYM select_init2_derived opt_table_expression
{
$$= Lex->current_select->master_unit()->first_select();
}
;
+query_term_union_not_ready:
+ query_specification order_or_limit opt_select_lock_type { $$= $1; }
+ | '(' select_paren_derived ')' union_order_or_limit { $$= $2; }
+ ;
+
+query_term_union_ready:
+ query_specification opt_select_lock_type { $$= $1; }
+ | '(' select_paren_derived ')' { $$= $2; }
+ ;
+
query_expression_body:
- query_specification
- | query_expression_body
- UNION_SYM union_option
- {
- if (add_select_to_union_list(Lex, (bool)$3, FALSE))
- MYSQL_YYABORT;
- }
- query_specification
- {
- Lex->pop_context();
- $$= $1;
- }
+ query_term_union_not_ready { $$= $1; }
+ | query_term_union_ready { $$= $1; }
+ | query_term_union_ready union_list_derived { $$= $1; }
;
/* Corresponds to <query expression> in the SQL:2003 standard. */
subselect:
- subselect_start query_expression_body subselect_end
+ subselect_start opt_with_clause query_expression_body subselect_end
{
- $$= $2;
+ $3->set_with_clause($2);
+ $$= $3;
}
;
@@ -15890,29 +16637,32 @@ view_select:
{
LEX *lex= Lex;
lex->parsing_options.allows_variable= FALSE;
- lex->parsing_options.allows_select_into= FALSE;
- lex->parsing_options.allows_select_procedure= FALSE;
- lex->parsing_options.allows_derived= FALSE;
lex->create_view_select.str= (char *) YYLIP->get_cpp_ptr();
}
- view_select_aux view_check_option
+ opt_with_clause query_expression_body_view view_check_option
{
LEX *lex= Lex;
- uint len= YYLIP->get_cpp_ptr() - lex->create_view_select.str;
+ size_t len= YYLIP->get_cpp_ptr() - lex->create_view_select.str;
+ uint not_used;
void *create_view_select= thd->memdup(lex->create_view_select.str, len);
lex->create_view_select.length= len;
lex->create_view_select.str= (char *) create_view_select;
- trim_whitespace(thd->charset(), &lex->create_view_select);
+ trim_whitespace(thd->charset(), &lex->create_view_select,
+ &not_used);
lex->parsing_options.allows_variable= TRUE;
- lex->parsing_options.allows_select_into= TRUE;
- lex->parsing_options.allows_select_procedure= TRUE;
- lex->parsing_options.allows_derived= TRUE;
+ lex->current_select->set_with_clause($2);
}
;
-view_select_aux:
- SELECT_SYM select_init2
- | '(' select_paren ')' union_opt
+/*
+ SQL Standard <query expression body> for VIEWs.
+ Does not include INTO and PROCEDURE clauses.
+*/
+query_expression_body_view:
+ SELECT_SYM select_options_and_item_list select_init3_view
+ | '(' select_paren_view ')'
+ | '(' select_paren_view ')' union_order_or_limit
+ | '(' select_paren_view ')' union_list_view
;
view_check_option:
@@ -15932,6 +16682,28 @@ view_check_option:
**************************************************************************/
+trigger_action_order:
+ FOLLOWS_SYM
+ { $$= TRG_ORDER_FOLLOWS; }
+ | PRECEDES_SYM
+ { $$= TRG_ORDER_PRECEDES; }
+ ;
+
+trigger_follows_precedes_clause:
+ /* empty */
+ {
+ $$.ordering_clause= TRG_ORDER_NONE;
+ $$.anchor_trigger_name.str= NULL;
+ $$.anchor_trigger_name.length= 0;
+ }
+ |
+ trigger_action_order ident_or_text
+ {
+ $$.ordering_clause= $1;
+ $$.anchor_trigger_name= $2;
+ }
+ ;
+
trigger_tail:
TRIGGER_SYM
remember_name
@@ -15956,7 +16728,11 @@ trigger_tail:
}
EACH_SYM
ROW_SYM
- { /* $17 */
+ {
+ Lex->trg_chistics.ordering_clause_begin= YYLIP->get_cpp_ptr();
+ }
+ trigger_follows_precedes_clause /* $18 */
+ { /* $19 */
LEX *lex= thd->lex;
Lex_input_stream *lip= YYLIP;
@@ -15967,14 +16743,16 @@ trigger_tail:
lex->ident.str= $9;
lex->ident.length= $13 - $9;
lex->spname= $5;
+ (*static_cast<st_trg_execution_order*>(&lex->trg_chistics))= ($18);
+ lex->trg_chistics.ordering_clause_end= lip->get_cpp_ptr();
if (!make_sp_head(thd, $5, TYPE_ENUM_TRIGGER))
MYSQL_YYABORT;
- lex->sphead->set_body_start(thd, lip->get_cpp_ptr());
+ lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
}
- sp_proc_stmt /* $18 */
- { /* $19 */
+ sp_proc_stmt /* $20 */
+ { /* $21 */
LEX *lex= Lex;
sp_head *sp= lex->sphead;
@@ -16063,8 +16841,7 @@ sf_tail:
}
type_with_opt_collate /* $11 */
{ /* $12 */
- if (Lex->sphead->fill_field_definition(thd, Lex, $11,
- Lex->last_field))
+ if (Lex->sphead->fill_field_definition(thd, Lex, Lex->last_field))
MYSQL_YYABORT;
}
sp_c_chistics /* $13 */
@@ -16074,7 +16851,7 @@ sf_tail:
lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
}
- sp_proc_stmt /* $15 */
+ sp_proc_stmt_in_returns_clause /* $15 */
{
LEX *lex= thd->lex;
sp_head *sp= lex->sphead;
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index b8100e05ce5..e1b4dc8227f 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -158,7 +158,7 @@ uint find_type2(const TYPELIB *typelib, const char *x, uint length,
int pos;
const char *j;
DBUG_ENTER("find_type2");
- DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, (long) typelib));
+ DBUG_PRINT("enter",("x: '%.*s' lib: %p", length, x, typelib));
if (!typelib->count)
{
@@ -337,9 +337,9 @@ int find_string_in_array(LEX_STRING * const haystack, LEX_STRING * const needle,
const LEX_STRING *pos;
for (pos= haystack; pos->str; pos++)
if (!cs->coll->strnncollsp(cs, (uchar *) pos->str, pos->length,
- (uchar *) needle->str, needle->length, 0))
+ (uchar *) needle->str, needle->length))
{
- return (pos - haystack);
+ return (int)(pos - haystack);
}
return -1;
}
diff --git a/sql/structs.h b/sql/structs.h
index 2ab102d82f9..378b8c387e0 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -2,6 +2,7 @@
#define STRUCTS_INCLUDED
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -64,9 +65,11 @@ typedef struct st_keyfile_info { /* used with ha_info() */
typedef struct st_key_part_info { /* Info about a key part */
- Field *field;
- uint offset; /* offset in record (from 0) */
- uint null_offset; /* Offset to null_bit in record */
+ Field *field; /* the Field object for the indexed
+ prefix of the original table Field.
+ NOT necessarily the original Field */
+ uint offset; /* Offset in record (from 0) */
+ uint null_offset; /* Offset to null_bit in record */
/* Length of key part in bytes, excluding NULL flag and length bytes */
uint16 length;
/*
@@ -77,9 +80,8 @@ typedef struct st_key_part_info { /* Info about a key part */
*/
uint16 store_length;
uint16 key_type;
- /* Fieldnr begins counting from 1 */
- uint16 fieldnr; /* Fieldnum in UNIREG */
- uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */
+ uint16 fieldnr; /* Fieldnr begins counting from 1 */
+ uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */
uint8 type;
uint8 null_bit; /* Position to null_bit */
} KEY_PART_INFO ;
@@ -330,26 +332,26 @@ typedef struct st_index_stats
/* Bits in form->update */
-#define REG_MAKE_DUPP 1 /* Make a copy of record when read */
-#define REG_NEW_RECORD 2 /* Write a new record if not found */
-#define REG_UPDATE 4 /* Uppdate record */
-#define REG_DELETE 8 /* Delete found record */
-#define REG_PROG 16 /* User is updating database */
-#define REG_CLEAR_AFTER_WRITE 32
-#define REG_MAY_BE_UPDATED 64
-#define REG_AUTO_UPDATE 64 /* Used in D-forms for scroll-tables */
-#define REG_OVERWRITE 128
-#define REG_SKIP_DUP 256
+#define REG_MAKE_DUPP 1U /* Make a copy of record when read */
+#define REG_NEW_RECORD 2U /* Write a new record if not found */
+#define REG_UPDATE 4U /* Uppdate record */
+#define REG_DELETE 8U /* Delete found record */
+#define REG_PROG 16U /* User is updating database */
+#define REG_CLEAR_AFTER_WRITE 32U
+#define REG_MAY_BE_UPDATED 64U
+#define REG_AUTO_UPDATE 64U /* Used in D-forms for scroll-tables */
+#define REG_OVERWRITE 128U
+#define REG_SKIP_DUP 256U
/* Bits in form->status */
-#define STATUS_NO_RECORD (1+2) /* Record isn't usably */
-#define STATUS_GARBAGE 1
-#define STATUS_NOT_FOUND 2 /* No record in database when needed */
-#define STATUS_NO_PARENT 4 /* Parent record wasn't found */
-#define STATUS_NOT_READ 8 /* Record isn't read */
-#define STATUS_UPDATED 16 /* Record is updated by formula */
-#define STATUS_NULL_ROW 32 /* table->null_row is set */
-#define STATUS_DELETED 64
+#define STATUS_NO_RECORD (1U+2U) /* Record isn't usable */
+#define STATUS_GARBAGE 1U
+#define STATUS_NOT_FOUND 2U /* No record in database when needed */
+#define STATUS_NO_PARENT 4U /* Parent record wasn't found */
+#define STATUS_NOT_READ 8U /* Record isn't read */
+#define STATUS_UPDATED 16U /* Record is updated by formula */
+#define STATUS_NULL_ROW 32U /* table->null_row is set */
+#define STATUS_DELETED 64U
/*
Such interval is "discrete": it is the set of
@@ -557,4 +559,110 @@ public:
};
+struct Lex_length_and_dec_st
+{
+private:
+ const char *m_length;
+ const char *m_dec;
+public:
+ void set(const char *length, const char *dec)
+ {
+ m_length= length;
+ m_dec= dec;
+ }
+ const char *length() const { return m_length; }
+ const char *dec() const { return m_dec; }
+};
+
+
+struct Lex_field_type_st: public Lex_length_and_dec_st
+{
+private:
+ enum_field_types m_type;
+ void set(enum_field_types type, const char *length, const char *dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::set(length, dec);
+ }
+public:
+ void set(enum_field_types type, Lex_length_and_dec_st length_and_dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::operator=(length_and_dec);
+ }
+ void set(enum_field_types type, const char *length)
+ {
+ set(type, length, 0);
+ }
+ void set(enum_field_types type)
+ {
+ set(type, 0, 0);
+ }
+ enum_field_types field_type() const { return m_type; }
+};
+
+
+struct Lex_dyncol_type_st: public Lex_length_and_dec_st
+{
+private:
+ int m_type; // enum_dynamic_column_type is not visible here, so use int
+public:
+ void set(int type, const char *length, const char *dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::set(length, dec);
+ }
+ void set(int type, Lex_length_and_dec_st length_and_dec)
+ {
+ m_type= type;
+ Lex_length_and_dec_st::operator=(length_and_dec);
+ }
+ void set(int type, const char *length)
+ {
+ set(type, length, 0);
+ }
+ void set(int type)
+ {
+ set(type, 0, 0);
+ }
+ int dyncol_type() const { return m_type; }
+};
+
+
+class Load_data_param
+{
+protected:
+ CHARSET_INFO *m_charset; // Character set of the file
+ ulonglong m_fixed_length; // Sum of target field lengths for fixed format
+ bool m_is_fixed_length;
+ bool m_use_blobs;
+public:
+ Load_data_param(CHARSET_INFO *cs, bool is_fixed_length):
+ m_charset(cs),
+ m_fixed_length(0),
+ m_is_fixed_length(is_fixed_length),
+ m_use_blobs(false)
+ { }
+ bool add_outvar_field(THD *thd, const Field *field);
+ bool add_outvar_user_var(THD *thd);
+ CHARSET_INFO *charset() const { return m_charset; }
+ bool is_fixed_length() const { return m_is_fixed_length; }
+ bool use_blobs() const { return m_use_blobs; }
+};
+
+
+class Load_data_outvar
+{
+public:
+ virtual ~Load_data_outvar() {}
+ virtual bool load_data_set_null(THD *thd, const Load_data_param *param)= 0;
+ virtual bool load_data_set_value(THD *thd, const char *pos, uint length,
+ const Load_data_param *param)= 0;
+ virtual bool load_data_set_no_data(THD *thd, const Load_data_param *param)= 0;
+ virtual void load_data_print_for_log_event(THD *thd, class String *to) const= 0;
+ virtual bool load_data_add_outvar(THD *thd, Load_data_param *param) const= 0;
+ virtual uint load_data_fixed_length() const= 0;
+};
+
+
#endif /* STRUCTS_INCLUDED */
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 223015e81c2..2a793646c7f 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -1,7 +1,7 @@
/* Copyright (c) 2002, 2015, Oracle and/or its affiliates.
- Copyright (c) 2012, 2018, MariaDB
+ Copyright (c) 2012, 2018, MariaDB Corporation.
- This program is free software; you can redistribute it and/or modify
+ This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
@@ -62,6 +62,7 @@
#include "sql_repl.h"
#include "opt_range.h"
#include "rpl_parallel.h"
+#include <ssl_compat.h>
/*
The rule for this file: everything should be 'static'. When a sys_var
@@ -283,7 +284,7 @@ static Sys_var_long Sys_pfs_events_stages_history_size(
/**
Variable performance_schema_max_statement_classes.
The default number of statement classes is the sum of:
- - COM_END for all regular "statement/com/...",
+ - (COM_END - mariadb gap) for all regular "statement/com/...",
- 1 for "statement/com/new_packet", for unknown enum_server_command
- 1 for "statement/com/Error", for invalid enum_server_command
- SQLCOM_END for all regular "statement/sql/...",
@@ -295,7 +296,8 @@ static Sys_var_ulong Sys_pfs_max_statement_classes(
"Maximum number of statement instruments.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_statement_class_sizing),
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
- DEFAULT((ulong) SQLCOM_END + (ulong) COM_END + 4),
+ DEFAULT((ulong) SQLCOM_END +
+ (ulong) (COM_END -(COM_MDB_GAP_END - COM_MDB_GAP_BEG + 1)) + 4),
BLOCK_SIZE(1));
static Sys_var_long Sys_pfs_events_statements_history_long_size(
@@ -502,17 +504,29 @@ static bool binlog_format_check(sys_var *self, THD *thd, set_var *var)
/*
MariaDB Galera does not support STATEMENT or MIXED binlog format currently.
*/
- if (WSREP(thd) && var->save_result.ulonglong_value != BINLOG_FORMAT_ROW)
+ if ((WSREP(thd) || opt_support_flashback) &&
+ var->save_result.ulonglong_value != BINLOG_FORMAT_ROW)
{
// Push a warning to the error log.
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "MariaDB Galera does not support binlog format: %s",
+ "MariaDB Galera and flashback do not support binlog format: %s",
binlog_format_names[var->save_result.ulonglong_value]);
+ /*
+ We allow setting up binlog_format other then ROW for session scope when
+ wsrep/flasback is enabled.This is done because of 2 reasons
+ 1. User might want to run pt-table-checksum.
+ 2. SuperUser knows what is doing :-)
+ For refrence:- MDEV-7322
+ */
if (var->type == OPT_GLOBAL)
{
- WSREP_ERROR("MariaDB Galera does not support binlog format: %s",
- binlog_format_names[var->save_result.ulonglong_value]);
+ if (WSREP(thd))
+ WSREP_ERROR("MariaDB Galera does not support binlog format: %s",
+ binlog_format_names[var->save_result.ulonglong_value]);
+ else
+ my_error(ER_FLASHBACK_NOT_SUPPORTED,MYF(0),"binlog_format",
+ binlog_format_names[var->save_result.ulonglong_value]);
return true;
}
}
@@ -530,7 +544,8 @@ static bool binlog_format_check(sys_var *self, THD *thd, set_var *var)
switching @@SESSION.binlog_format from MIXED to STATEMENT when there are
open temp tables and we are logging in row format.
*/
- if (thd->temporary_tables && var->type == OPT_SESSION &&
+ if (thd->has_thd_temporary_tables() &&
+ var->type == OPT_SESSION &&
var->save_result.ulonglong_value == BINLOG_FORMAT_STMT &&
((thd->variables.binlog_format == BINLOG_FORMAT_MIXED &&
thd->is_current_stmt_binlog_format_row()) ||
@@ -565,7 +580,7 @@ static Sys_var_enum Sys_binlog_format(
"UDFs) or the UUID() function; for those, row-based binary logging is "
"automatically used.",
SESSION_VAR(binlog_format), CMD_LINE(REQUIRED_ARG, OPT_BINLOG_FORMAT),
- binlog_format_names, DEFAULT(BINLOG_FORMAT_STMT),
+ binlog_format_names, DEFAULT(BINLOG_FORMAT_MIXED),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(binlog_format_check),
ON_UPDATE(fix_binlog_format_after_update));
@@ -712,6 +727,8 @@ static Sys_var_struct Sys_character_set_client(
offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info),
NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_cs_client),
ON_UPDATE(fix_thd_charset));
+// for check changing
+export sys_var *Sys_character_set_client_ptr= &Sys_character_set_client;
static Sys_var_struct Sys_character_set_connection(
"character_set_connection", "The character set used for "
@@ -721,6 +738,8 @@ static Sys_var_struct Sys_character_set_connection(
offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info),
NO_MUTEX_GUARD, IN_BINLOG, ON_CHECK(check_charset_not_null),
ON_UPDATE(fix_thd_charset));
+// for check changing
+export sys_var *Sys_character_set_connection_ptr= &Sys_character_set_connection;
static Sys_var_struct Sys_character_set_results(
"character_set_results", "The character set used for returning "
@@ -728,6 +747,8 @@ static Sys_var_struct Sys_character_set_results(
SESSION_VAR(character_set_results), NO_CMD_LINE,
offsetof(CHARSET_INFO, csname), DEFAULT(&default_charset_info),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_charset));
+// for check changing
+export sys_var *Sys_character_set_results_ptr= &Sys_character_set_results;
static Sys_var_struct Sys_character_set_filesystem(
"character_set_filesystem", "The filesystem character set",
@@ -1204,7 +1225,7 @@ static Sys_var_ulong Sys_lock_wait_timeout(
"lock_wait_timeout",
"Timeout in seconds to wait for a lock before returning an error.",
SESSION_VAR(lock_wait_timeout), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1, LONG_TIMEOUT), DEFAULT(LONG_TIMEOUT), BLOCK_SIZE(1));
+ VALID_RANGE(1, LONG_TIMEOUT), DEFAULT(24 * 60 * 60), BLOCK_SIZE(1));
#ifdef HAVE_MLOCKALL
static Sys_var_mybool Sys_locked_in_memory(
@@ -1218,6 +1239,18 @@ static Sys_var_mybool Sys_log_bin(
"log_bin", "Whether the binary log is enabled",
READ_ONLY GLOBAL_VAR(opt_bin_log), NO_CMD_LINE, DEFAULT(FALSE));
+static Sys_var_mybool Sys_log_bin_compress(
+ "log_bin_compress", "Whether the binary log can be compressed",
+ GLOBAL_VAR(opt_bin_log_compress), CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+
+/* the min length is 10, means that Begin/Commit/Rollback would never be compressed! */
+static Sys_var_uint Sys_log_bin_compress_min_len(
+ "log_bin_compress_min_len",
+ "Minimum length of sql statement(in statement mode) or record(in row mode)"
+ "that can be compressed.",
+ GLOBAL_VAR(opt_bin_log_compress_min_len),
+ CMD_LINE(OPT_ARG), VALID_RANGE(10, 1024), DEFAULT(256), BLOCK_SIZE(1));
+
static Sys_var_mybool Sys_trust_function_creators(
"log_bin_trust_function_creators",
"If set to FALSE (the default), then when --log-bin is used, creation "
@@ -1251,13 +1284,13 @@ static Sys_var_mybool Sys_log_slow_admin_statements(
"Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to "
"the slow log if it is open.",
GLOBAL_VAR(opt_log_slow_admin_statements),
- CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
static Sys_var_mybool Sys_log_slow_slave_statements(
"log_slow_slave_statements",
"Log slow statements executed by slave thread to the slow log if it is open.",
GLOBAL_VAR(opt_log_slow_slave_statements),
- CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
static Sys_var_ulong Sys_log_warnings(
"log_warnings",
@@ -1265,7 +1298,7 @@ static Sys_var_ulong Sys_log_warnings(
"Value can be between 0 and 11. Higher values mean more verbosity",
SESSION_VAR(log_warnings),
CMD_LINE(OPT_ARG, 'W'),
- VALID_RANGE(0, UINT_MAX), DEFAULT(1), BLOCK_SIZE(1));
+ VALID_RANGE(0, UINT_MAX), DEFAULT(2), BLOCK_SIZE(1));
static bool update_cached_long_query_time(sys_var *self, THD *thd,
enum_var_type type)
@@ -1282,8 +1315,8 @@ static bool update_cached_long_query_time(sys_var *self, THD *thd,
static Sys_var_double Sys_long_query_time(
"long_query_time",
"Log all queries that have taken more than long_query_time seconds "
- "to execute to file. The argument will be treated as a decimal value "
- "with microsecond precision",
+ "to execute to the slow query log file. The argument will be treated "
+ "as a decimal value with microsecond precision",
SESSION_VAR(long_query_time_double),
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, LONG_TIMEOUT), DEFAULT(10),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
@@ -1385,7 +1418,7 @@ static Sys_var_ulong Sys_max_allowed_packet(
"max_allowed_packet",
"Max packet length to send to or receive from the server",
SESSION_VAR(max_allowed_packet), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1024, 1024*1024*1024), DEFAULT(4*1024*1024),
+ VALID_RANGE(1024, 1024*1024*1024), DEFAULT(16*1024*1024),
BLOCK_SIZE(1024), NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(check_max_allowed_packet));
@@ -1496,7 +1529,7 @@ static Sys_var_ulonglong Sys_max_heap_table_size(
"max_heap_table_size",
"Don't allow creation of heap tables bigger than this",
SESSION_VAR(max_heap_table_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(16384, (ulonglong)~(intptr)0), DEFAULT(16*1024*1024),
+ VALID_RANGE(16384, SIZE_T_MAX), DEFAULT(16*1024*1024),
BLOCK_SIZE(1024));
static ulong mdl_locks_cache_size;
@@ -1513,16 +1546,11 @@ static Sys_var_ulong Sys_metadata_locks_hash_instances(
VALID_RANGE(1, 1024), DEFAULT(8),
BLOCK_SIZE(1));
-/*
- "pseudo_thread_id" variable used in the test suite to detect 32/64bit
- systems. If you change it to something else then ulong then fix the tests
- in mysql-test/include/have_32bit.inc and have_64bit.inc.
-*/
-static Sys_var_ulong Sys_pseudo_thread_id(
+static Sys_var_ulonglong Sys_pseudo_thread_id(
"pseudo_thread_id",
"This variable is for internal server use",
SESSION_ONLY(pseudo_thread_id),
- NO_CMD_LINE, VALID_RANGE(0, ULONG_MAX), DEFAULT(0),
+ NO_CMD_LINE, VALID_RANGE(0, ULONGLONG_MAX), DEFAULT(0),
BLOCK_SIZE(1), NO_MUTEX_GUARD, IN_BINLOG,
ON_CHECK(check_has_super));
@@ -1860,6 +1888,8 @@ static Sys_var_last_gtid Sys_last_gtid(
"or the empty string if none.",
READ_ONLY sys_var::ONLY_SESSION, NO_CMD_LINE);
+export sys_var *Sys_last_gtid_ptr= &Sys_last_gtid; // for check changing
+
uchar *
Sys_var_last_gtid::session_value_ptr(THD *thd, const LEX_STRING *base)
@@ -1870,8 +1900,9 @@ Sys_var_last_gtid::session_value_ptr(THD *thd, const LEX_STRING *base)
bool first= true;
str.length(0);
- if ((thd->last_commit_gtid.seq_no > 0 &&
- rpl_slave_state_tostring_helper(&str, &thd->last_commit_gtid, &first)) ||
+ rpl_gtid gtid= thd->get_last_commit_gtid();
+ if ((gtid.seq_no > 0 &&
+ rpl_slave_state_tostring_helper(&str, &gtid, &first)) ||
!(p= thd->strmake(str.ptr(), str.length())))
{
my_error(ER_OUT_OF_RESOURCES, MYF(0));
@@ -1913,6 +1944,15 @@ static Sys_var_ulong Sys_slave_parallel_threads(
NOT_IN_BINLOG, ON_CHECK(check_slave_parallel_threads),
ON_UPDATE(fix_slave_parallel_threads));
+/* Alias for @@slave_parallel_threads to match what MySQL 5.7 uses. */
+static Sys_var_ulong Sys_slave_parallel_workers(
+ "slave_parallel_workers",
+ "Alias for slave_parallel_threads",
+ GLOBAL_VAR(opt_slave_parallel_threads), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0,16383), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG, ON_CHECK(check_slave_parallel_threads),
+ ON_UPDATE(fix_slave_parallel_threads));
+
static bool
check_slave_domain_parallel_threads(sys_var *self, THD *thd, set_var *var)
@@ -1982,7 +2022,7 @@ Sys_var_slave_parallel_mode::global_update(THD *thd, set_var *var)
if (mi->rli.slave_running)
{
my_error(ER_SLAVE_MUST_STOP, MYF(0),
- mi->connection_name.length, mi->connection_name.str);
+ (int) mi->connection_name.length, mi->connection_name.str);
res= true;
}
else
@@ -2172,6 +2212,12 @@ static Sys_var_uint Sys_max_prepared_stmt_count(
VALID_RANGE(0, UINT_MAX32), DEFAULT(16382), BLOCK_SIZE(1),
&PLock_prepared_stmt_count);
+static Sys_var_ulong Sys_max_recursive_iterations(
+ "max_recursive_iterations",
+ "Maximum number of iterations when executing recursive queries",
+ SESSION_VAR(max_recursive_iterations), CMD_LINE(OPT_ARG),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(UINT_MAX), BLOCK_SIZE(1));
+
static Sys_var_ulong Sys_max_sort_length(
"max_sort_length",
"The number of bytes to use when sorting BLOB or TEXT values (only "
@@ -2328,10 +2374,10 @@ export sys_var *Sys_old_passwords_ptr= &Sys_old_passwords; // for sql_acl.cc
static Sys_var_ulong Sys_open_files_limit(
"open_files_limit",
"If this is not 0, then mysqld will use this value to reserve file "
- "descriptors to use with setrlimit(). If this value is 0 then mysqld "
- "will reserve max_connections*5 or max_connections + table_cache*2 "
- "(whichever is larger) number of file descriptors",
- READ_ONLY GLOBAL_VAR(open_files_limit), CMD_LINE(REQUIRED_ARG),
+ "descriptors to use with setrlimit(). If this value is 0 or autoset "
+ "then mysqld will reserve max_connections*5 or max_connections + "
+ "table_cache*2 (whichever is larger) number of file descriptors",
+ AUTO_SET READ_ONLY GLOBAL_VAR(open_files_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, OS_FILE_LIMIT), DEFAULT(0), BLOCK_SIZE(1));
/// @todo change to enum
@@ -2373,17 +2419,6 @@ static Sys_var_ulong Sys_optimizer_use_condition_selectivity(
SESSION_VAR(optimizer_use_condition_selectivity), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 5), DEFAULT(1), BLOCK_SIZE(1));
-/** Warns about deprecated value 63 */
-static bool fix_optimizer_search_depth(sys_var *self, THD *thd,
- enum_var_type type)
-{
- SV *sv= type == OPT_GLOBAL ? &global_system_variables : &thd->variables;
- if (sv->optimizer_search_depth == MAX_TABLES+2)
- WARN_DEPRECATED(thd, 10, 2, "optimizer-search-depth=63",
- "a search depth less than 63");
- return false;
-}
-
static Sys_var_ulong Sys_optimizer_search_depth(
"optimizer_search_depth",
"Maximum depth of search performed by the query optimizer. Values "
@@ -2391,13 +2426,10 @@ static Sys_var_ulong Sys_optimizer_search_depth(
"query plans, but take longer to compile a query. Values smaller "
"than the number of tables in a relation result in faster "
"optimization, but may produce very bad query plans. If set to 0, "
- "the system will automatically pick a reasonable value; if set to "
- "63, the optimizer will switch to the original find_best search. "
- "NOTE: The value 63 and its associated behaviour is deprecated.",
+ "the system will automatically pick a reasonable value.",
SESSION_VAR(optimizer_search_depth), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, MAX_TABLES+2), DEFAULT(MAX_TABLES+1), BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
- ON_UPDATE(fix_optimizer_search_depth));
+ VALID_RANGE(0, MAX_TABLES+1), DEFAULT(MAX_TABLES+1), BLOCK_SIZE(1),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0));
/* this is used in the sigsegv handler */
export const char *optimizer_switch_names[]=
@@ -2424,6 +2456,7 @@ export const char *optimizer_switch_names[]=
"extended_keys",
"exists_to_in",
"orderby_uses_equalities",
+ "condition_pushdown_for_derived",
"default",
NullS
};
@@ -2477,7 +2510,7 @@ static Sys_var_uint Sys_protocol_version(
"protocol_version",
"The version of the client/server protocol used by the MariaDB server",
READ_ONLY GLOBAL_VAR(protocol_version), CMD_LINE_HELP_ONLY,
- VALID_RANGE(0, ~0), DEFAULT(PROTOCOL_VERSION), BLOCK_SIZE(1));
+ VALID_RANGE(0, ~0U), DEFAULT(PROTOCOL_VERSION), BLOCK_SIZE(1));
static Sys_var_proxy_user Sys_proxy_user(
"proxy_user", "The proxy user account name used when logging in",
@@ -2599,6 +2632,16 @@ static Sys_var_ulong Sys_div_precincrement(
SESSION_VAR(div_precincrement), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, DECIMAL_MAX_SCALE), DEFAULT(4), BLOCK_SIZE(1));
+static Sys_var_uint Sys_eq_range_index_dive_limit(
+ "eq_range_index_dive_limit",
+ "The optimizer will use existing index statistics instead of "
+ "doing index dives for equality ranges if the number of equality "
+ "ranges for the index is larger than or equal to this number. "
+ "If set to 0, index dives are always used.",
+ SESSION_VAR(eq_range_index_dive_limit), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, UINT_MAX32), DEFAULT(0),
+ BLOCK_SIZE(1));
+
static Sys_var_ulong Sys_range_alloc_block_size(
"range_alloc_block_size",
"Allocation block size for storing ranges during optimization",
@@ -2763,7 +2806,7 @@ static Sys_var_enum Sys_thread_handling(
#ifdef HAVE_QUERY_CACHE
static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type)
{
- ulong new_cache_size= query_cache.resize(query_cache_size);
+ ulong new_cache_size= query_cache.resize((ulong)query_cache_size);
/*
Note: query_cache_size is a global variable reflecting the
requested cache size. See also query_cache_size_arg
@@ -2890,7 +2933,6 @@ static bool fix_server_id(sys_var *self, THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
{
- server_id_supplied = 1;
thd->variables.server_id= global_system_variables.server_id;
/*
Historically, server_id was a global variable that is exported to
@@ -2907,7 +2949,7 @@ static Sys_var_ulong Sys_server_id(
"Uniquely identifies the server instance in the community of "
"replication partners",
SESSION_VAR(server_id), CMD_LINE(REQUIRED_ARG, OPT_SERVER_ID),
- VALID_RANGE(0, UINT_MAX32), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
+ VALID_RANGE(1, UINT_MAX32), DEFAULT(1), BLOCK_SIZE(1), NO_MUTEX_GUARD,
NOT_IN_BINLOG, ON_CHECK(check_has_super), ON_UPDATE(fix_server_id));
static Sys_var_mybool Sys_slave_compressed_protocol(
@@ -3026,7 +3068,7 @@ static Sys_var_ulonglong Sys_sort_buffer(
VALID_RANGE(MIN_SORT_MEMORY, SIZE_T_MAX), DEFAULT(MAX_SORT_MEMORY),
BLOCK_SIZE(1));
-export ulonglong expand_sql_mode(ulonglong sql_mode)
+export sql_mode_t expand_sql_mode(sql_mode_t sql_mode)
{
if (sql_mode & MODE_ANSI)
{
@@ -3080,7 +3122,7 @@ export ulonglong expand_sql_mode(ulonglong sql_mode)
static bool check_sql_mode(sys_var *self, THD *thd, set_var *var)
{
var->save_result.ulonglong_value=
- expand_sql_mode(var->save_result.ulonglong_value);
+ (ulonglong) expand_sql_mode(var->save_result.ulonglong_value);
return false;
}
static bool fix_sql_mode(sys_var *self, THD *thd, enum_var_type type)
@@ -3117,7 +3159,8 @@ static const char *sql_mode_names[]=
"PAD_CHAR_TO_FULL_LENGTH",
0
};
-export bool sql_mode_string_representation(THD *thd, ulonglong sql_mode,
+
+export bool sql_mode_string_representation(THD *thd, sql_mode_t sql_mode,
LEX_STRING *ls)
{
set_to_string(thd, ls, sql_mode, sql_mode_names);
@@ -3133,7 +3176,9 @@ static Sys_var_set Sys_sql_mode(
"Sets the sql mode",
SESSION_VAR(sql_mode), CMD_LINE(REQUIRED_ARG),
sql_mode_names,
- DEFAULT(MODE_NO_ENGINE_SUBSTITUTION |
+ DEFAULT(MODE_STRICT_TRANS_TABLES |
+ MODE_ERROR_FOR_DIVISION_BY_ZERO |
+ MODE_NO_ENGINE_SUBSTITUTION |
MODE_NO_AUTO_CREATE_USER),
NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(check_sql_mode), ON_UPDATE(fix_sql_mode));
@@ -3201,6 +3246,12 @@ static Sys_var_charptr Sys_ssl_crlpath(
READ_ONLY GLOBAL_VAR(opt_ssl_crlpath), SSL_OPT(OPT_SSL_CRLPATH),
IN_FS_CHARSET, DEFAULT(0));
+static Sys_var_mybool Sys_standard_compliant_cte(
+ "standard_compliant_cte",
+ "Allow only CTEs compliant to SQL standard",
+ SESSION_VAR(only_standard_compliant_cte), CMD_LINE(OPT_ARG),
+ DEFAULT(TRUE));
+
// why ENUM and not BOOL ?
static const char *updatable_views_with_limit_names[]= {"NO", "YES", 0};
@@ -3254,18 +3305,21 @@ static Sys_var_ulong Sys_table_cache_size(
BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_table_open_cache));
+static Sys_var_uint Sys_table_cache_instances(
+ "table_open_cache_instances", "Maximum number of table cache instances",
+ READ_ONLY GLOBAL_VAR(tc_instances), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1, 64), DEFAULT(8), BLOCK_SIZE(1));
+
static Sys_var_ulong Sys_thread_cache_size(
"thread_cache_size",
- "How many threads we should keep in a cache for reuse",
+ "How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time",
GLOBAL_VAR(thread_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 16384), DEFAULT(0), BLOCK_SIZE(1));
+ VALID_RANGE(0, 16384), DEFAULT(256), BLOCK_SIZE(1));
#ifdef HAVE_POOL_OF_THREADS
static bool fix_tp_max_threads(sys_var *, THD *, enum_var_type)
{
-#ifdef _WIN32
tp_set_max_threads(threadpool_max_threads);
-#endif
return false;
}
@@ -3278,8 +3332,6 @@ static bool fix_tp_min_threads(sys_var *, THD *, enum_var_type)
}
#endif
-
-#ifndef _WIN32
static bool check_threadpool_size(sys_var *self, THD *thd, set_var *var)
{
ulonglong v= var->save_result.ulonglong_value;
@@ -3304,7 +3356,6 @@ static bool fix_threadpool_stall_limit(sys_var*, THD*, enum_var_type)
tp_set_threadpool_stall_limit(threadpool_stall_limit);
return false;
}
-#endif
#ifdef _WIN32
static Sys_var_uint Sys_threadpool_min_threads(
@@ -3315,7 +3366,24 @@ static Sys_var_uint Sys_threadpool_min_threads(
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_tp_min_threads)
);
-#else
+
+static const char *threadpool_mode_names[]={ "windows", "generic", 0 };
+static Sys_var_enum Sys_threadpool_mode(
+ "thread_pool_mode",
+ "Chose implementation of the threadpool",
+ READ_ONLY GLOBAL_VAR(threadpool_mode), CMD_LINE(REQUIRED_ARG),
+ threadpool_mode_names, DEFAULT(TP_MODE_WINDOWS)
+ );
+#endif
+
+static const char *threadpool_priority_names[]={ "high", "low", "auto", 0 };
+static Sys_var_enum Sys_thread_pool_priority(
+ "thread_pool_priority",
+ "Threadpool priority. High priority connections usually start executing earlier than low priority."
+ "If priority set to 'auto', the the actual priority(low or high) is determined based on whether or not connection is inside transaction.",
+ SESSION_VAR(threadpool_priority), CMD_LINE(REQUIRED_ARG),
+ threadpool_priority_names, DEFAULT(TP_PRIORITY_AUTO));
+
static Sys_var_uint Sys_threadpool_idle_thread_timeout(
"thread_pool_idle_timeout",
"Timeout in seconds for an idle thread in the thread pool."
@@ -3350,15 +3418,22 @@ static Sys_var_uint Sys_threadpool_stall_limit(
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_threadpool_stall_limit)
);
-#endif /* !WIN32 */
+
static Sys_var_uint Sys_threadpool_max_threads(
"thread_pool_max_threads",
"Maximum allowed number of worker threads in the thread pool",
GLOBAL_VAR(threadpool_max_threads), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1, 65536), DEFAULT(1000), BLOCK_SIZE(1),
+ VALID_RANGE(1, 65536), DEFAULT(65536), BLOCK_SIZE(1),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_tp_max_threads)
);
+
+static Sys_var_uint Sys_threadpool_threadpool_prio_kickup_timer(
+ "thread_pool_prio_kickup_timer",
+ "The number of milliseconds before a dequeued low-priority statement is moved to the high-priority queue",
+ GLOBAL_VAR(threadpool_prio_kickup_timer), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(1000), BLOCK_SIZE(1)
+);
#endif /* HAVE_POOL_OF_THREADS */
/**
@@ -3410,6 +3485,20 @@ bool Sys_var_tx_read_only::session_update(THD *thd, set_var *var)
{
// @see Sys_var_tx_isolation::session_update() above for the rules.
thd->tx_read_only= var->save_result.ulonglong_value;
+
+#ifndef EMBEDDED_LIBRARY
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ {
+ Transaction_state_tracker *tst= (Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER);
+
+ if (var->type == OPT_DEFAULT)
+ tst->set_read_flags(thd,
+ thd->tx_read_only ? TX_READ_ONLY : TX_READ_WRITE);
+ else
+ tst->set_read_flags(thd, TX_READ_INHERIT);
+ }
+#endif //EMBEDDED_LIBRARY
}
return false;
}
@@ -3425,12 +3514,30 @@ static Sys_var_tx_read_only Sys_tx_read_only(
static Sys_var_ulonglong Sys_tmp_table_size(
"tmp_table_size",
+ "Alias for tmp_memory_table_size. "
"If an internal in-memory temporary table exceeds this size, MariaDB "
"will automatically convert it to an on-disk MyISAM or Aria table.",
- SESSION_VAR(tmp_table_size), CMD_LINE(REQUIRED_ARG),
+ SESSION_VAR(tmp_memory_table_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1024, (ulonglong)~(intptr)0), DEFAULT(16*1024*1024),
+ BLOCK_SIZE(1));
+
+static Sys_var_ulonglong Sys_tmp_memory_table_size(
+ "tmp_memory_table_size",
+ "If an internal in-memory temporary table exceeds this size, MariaDB "
+ "will automatically convert it to an on-disk MyISAM or Aria table. "
+ "Same as tmp_table_size.",
+ SESSION_VAR(tmp_memory_table_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1024, (ulonglong)~(intptr)0), DEFAULT(16*1024*1024),
BLOCK_SIZE(1));
+static Sys_var_ulonglong Sys_tmp_disk_table_size(
+ "tmp_disk_table_size",
+ "Max size for data for an internal temporary on-disk MyISAM or Aria table.",
+ SESSION_VAR(tmp_disk_table_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1024, (ulonglong)~(intptr)0),
+ DEFAULT((ulonglong)~(intptr)0),
+ BLOCK_SIZE(1));
+
static Sys_var_mybool Sys_timed_mutexes(
"timed_mutexes",
"Specify whether to time mutexes. Deprecated, has no effect.",
@@ -3438,7 +3545,6 @@ static Sys_var_mybool Sys_timed_mutexes(
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL),
DEPRECATED(""));
-static char *server_version_ptr;
static Sys_var_charptr Sys_version(
"version", "Server version number. It may also include a suffix "
"with configuration or build information. -debug indicates "
@@ -3473,21 +3579,32 @@ static Sys_var_charptr Sys_version_compile_os(
CMD_LINE_HELP_ONLY,
IN_SYSTEM_CHARSET, DEFAULT(SYSTEM_TYPE));
+static char *guess_malloc_library()
+{
+ if (strcmp(MALLOC_LIBRARY, "system") == 0)
+ {
+#ifdef HAVE_DLOPEN
+ typedef int (*mallctl_type)(const char*, void*, size_t*, void*, size_t);
+ mallctl_type mallctl_func;
+ mallctl_func= (mallctl_type)dlsym(RTLD_DEFAULT, "mallctl");
+ if (mallctl_func)
+ {
+ static char buf[128];
+ char *ver;
+ size_t len = sizeof(ver);
+ mallctl_func("version", &ver, &len, NULL, 0);
+ strxnmov(buf, sizeof(buf)-1, "jemalloc ", ver, NULL);
+ return buf;
+ }
+#endif
+ }
+ return const_cast<char*>(MALLOC_LIBRARY);
+}
static char *malloc_library;
static Sys_var_charptr Sys_malloc_library(
"version_malloc_library", "Version of the used malloc library",
READ_ONLY GLOBAL_VAR(malloc_library), CMD_LINE_HELP_ONLY,
- IN_SYSTEM_CHARSET, DEFAULT(MALLOC_LIBRARY));
-
-#ifdef HAVE_YASSL
-#include <openssl/ssl.h>
-#define SSL_LIBRARY "YaSSL " YASSL_VERSION
-#elif HAVE_OPENSSL
-#include <openssl/crypto.h>
-#define SSL_LIBRARY SSLeay_version(SSLEAY_VERSION)
-#else
-#error No SSL?
-#endif
+ IN_SYSTEM_CHARSET, DEFAULT(guess_malloc_library()));
static char *ssl_library;
static Sys_var_charptr Sys_ssl_library(
@@ -3595,7 +3712,8 @@ static bool fix_autocommit(sys_var *self, THD *thd, enum_var_type type)
{
thd->variables.option_bits&= ~OPTION_AUTOCOMMIT;
thd->mdl_context.release_transactional_locks();
- WSREP_DEBUG("autocommit, MDL TRX lock released: %lu", thd->thread_id);
+ WSREP_DEBUG("autocommit, MDL TRX lock released: %lld",
+ (longlong) thd->thread_id);
return true;
}
/*
@@ -3794,6 +3912,12 @@ static Sys_var_bit Sys_unique_checks(
REVERSE(OPTION_RELAXED_UNIQUE_CHECKS),
DEFAULT(TRUE), NO_MUTEX_GUARD, IN_BINLOG);
+static Sys_var_bit Sys_no_check_constraint(
+ "check_constraint_checks", "check_constraint_checks",
+ SESSION_VAR(option_bits), NO_CMD_LINE,
+ REVERSE(OPTION_NO_CHECK_CONSTRAINT_CHECKS),
+ DEFAULT(TRUE), NO_MUTEX_GUARD, IN_BINLOG);
+
#ifdef ENABLED_PROFILING
static bool update_profiling(sys_var *self, THD *thd, enum_var_type type)
{
@@ -4016,9 +4140,9 @@ static Sys_var_ulong Sys_default_week_format(
static Sys_var_ulonglong Sys_group_concat_max_len(
"group_concat_max_len",
- "The maximum length of the result of function GROUP_CONCAT()",
+ "The maximum length of the result of function GROUP_CONCAT()",
SESSION_VAR(group_concat_max_len), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(4, SIZE_T_MAX), DEFAULT(1024), BLOCK_SIZE(1));
+ VALID_RANGE(4, SIZE_T_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1));
static char *glob_hostname_ptr;
static Sys_var_charptr Sys_hostname(
@@ -4414,7 +4538,7 @@ bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var)
if (mi->rli.slave_running)
{
my_error(ER_SLAVE_MUST_STOP, MYF(0),
- mi->connection_name.length,
+ (int) mi->connection_name.length,
mi->connection_name.str);
result= true;
}
@@ -4617,7 +4741,7 @@ static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi)
{
if (mi->rli.slave_running)
{
- my_error(ER_SLAVE_MUST_STOP, MYF(0), mi->connection_name.length,
+ my_error(ER_SLAVE_MUST_STOP, MYF(0), (int) mi->connection_name.length,
mi->connection_name.str);
return true;
}
@@ -4658,7 +4782,7 @@ static Sys_var_multi_source_ulonglong Sys_slave_skip_counter(
static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi)
{
mi->rli.max_relay_log_size= thd->variables.max_relay_log_size;
- mi->rli.relay_log.set_max_size(mi->rli.max_relay_log_size);
+ mi->rli.relay_log.set_max_size((ulong)mi->rli.max_relay_log_size);
return false;
}
@@ -4678,6 +4802,12 @@ static Sys_var_charptr Sys_slave_skip_errors(
READ_ONLY GLOBAL_VAR(opt_slave_skip_errors), CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(0));
+static Sys_var_ulonglong Sys_read_binlog_speed_limit(
+ "read_binlog_speed_limit", "Maximum speed(KB/s) to read binlog from"
+ " master (0 = no limit)",
+ GLOBAL_VAR(opt_read_binlog_speed_limit), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, ULONG_MAX), DEFAULT(0), BLOCK_SIZE(1));
+
static Sys_var_ulonglong Sys_relay_log_space_limit(
"relay_log_space_limit", "Maximum space to use for all relay logs",
READ_ONLY GLOBAL_VAR(relay_log_space_limit), CMD_LINE(REQUIRED_ARG),
@@ -4757,8 +4887,7 @@ static bool check_locale(sys_var *self, THD *thd, set_var *var)
mysql_mutex_lock(&LOCK_error_messages);
res= (!locale->errmsgs->errmsgs &&
read_texts(ERRMSG_FILE, locale->errmsgs->language,
- &locale->errmsgs->errmsgs,
- ER_ERROR_LAST - ER_ERROR_FIRST + 1));
+ &locale->errmsgs->errmsgs));
mysql_mutex_unlock(&LOCK_error_messages);
if (res)
{
@@ -5297,7 +5426,7 @@ int default_regex_flags_pcre(const THD *thd)
int i, res;
for (i= res= 0; default_regex_flags_to_pcre[i]; i++)
{
- if (src & (1 << i))
+ if (src & (1ULL << i))
res|= default_regex_flags_to_pcre[i];
}
return res;
@@ -5343,7 +5472,7 @@ static Sys_var_ulong Sys_rowid_merge_buff_size(
"rowid_merge_buff_size",
"The size of the buffers used [NOT] IN evaluation via partial matching",
SESSION_VAR(rowid_merge_buff_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, ((ulonglong)~(intptr)0)/2), DEFAULT(8*1024*1024),
+ VALID_RANGE(0, LONG_MAX), DEFAULT(8*1024*1024),
BLOCK_SIZE(1));
static Sys_var_mybool Sys_userstat(
@@ -5358,7 +5487,7 @@ static Sys_var_mybool Sys_binlog_annotate_row_events(
"Tells the master to annotate RBR events with the statement that "
"caused these events",
SESSION_VAR(binlog_annotate_row_events), CMD_LINE(OPT_ARG),
- DEFAULT(FALSE));
+ DEFAULT(TRUE));
#ifdef HAVE_REPLICATION
static Sys_var_mybool Sys_replicate_annotate_row_events(
@@ -5366,7 +5495,7 @@ static Sys_var_mybool Sys_replicate_annotate_row_events(
"Tells the slave to write annotate rows events received from the master "
"to its own binary log. Ignored if log_slave_updates is not set",
READ_ONLY GLOBAL_VAR(opt_replicate_annotate_row_events),
- CMD_LINE(OPT_ARG), DEFAULT(0));
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
#endif
static Sys_var_ulonglong Sys_join_buffer_space_limit(
@@ -5569,3 +5698,78 @@ static Sys_var_ulonglong Sys_max_thread_mem(
"session variable MEM_USED", SESSION_VAR(max_mem_used),
CMD_LINE(REQUIRED_ARG), VALID_RANGE(8192, ULONGLONG_MAX),
DEFAULT(LONGLONG_MAX), BLOCK_SIZE(1));
+
+#ifndef EMBEDDED_LIBRARY
+
+static Sys_var_sesvartrack Sys_track_session_sys_vars(
+ "session_track_system_variables",
+ "Track changes in registered system variables. "
+ "For compatibility with MySQL defaults this variable should be set to "
+ "\"autocommit, character_set_client, character_set_connection, "
+ "character_set_results, time_zone\"",
+ CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET,
+ DEFAULT(""),
+ NO_MUTEX_GUARD);
+
+static bool update_session_track_schema(sys_var *self, THD *thd,
+ enum_var_type type)
+{
+ DBUG_ENTER("update_session_track_schema");
+ DBUG_RETURN(thd->session_tracker.get_tracker(CURRENT_SCHEMA_TRACKER)->
+ update(thd, NULL));
+}
+
+static Sys_var_mybool Sys_session_track_schema(
+ "session_track_schema",
+ "Track changes to the default schema.",
+ SESSION_VAR(session_track_schema),
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0),
+ ON_UPDATE(update_session_track_schema));
+
+
+static bool update_session_track_tx_info(sys_var *self, THD *thd,
+ enum_var_type type)
+{
+ DBUG_ENTER("update_session_track_tx_info");
+ DBUG_RETURN(thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER)->
+ update(thd, NULL));
+}
+
+static const char *session_track_transaction_info_names[]=
+ { "OFF", "STATE", "CHARACTERISTICS", NullS };
+
+static Sys_var_enum Sys_session_track_transaction_info(
+ "session_track_transaction_info",
+ "Track changes to the transaction attributes. OFF to disable; "
+ "STATE to track just transaction state (Is there an active transaction? "
+ "Does it have any data? etc.); CHARACTERISTICS to track transaction "
+ "state and report all statements needed to start a transaction with"
+ "the same characteristics (isolation level, read only/read write,"
+ "snapshot - but not any work done / data modified within the "
+ "transaction).",
+ SESSION_VAR(session_track_transaction_info),
+ CMD_LINE(REQUIRED_ARG), session_track_transaction_info_names,
+ DEFAULT(0), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
+ ON_UPDATE(update_session_track_tx_info));
+
+
+static bool update_session_track_state_change(sys_var *self, THD *thd,
+ enum_var_type type)
+{
+ DBUG_ENTER("update_session_track_state_change");
+ DBUG_RETURN(thd->session_tracker.get_tracker(SESSION_STATE_CHANGE_TRACKER)->
+ update(thd, NULL));
+}
+
+static Sys_var_mybool Sys_session_track_state_change(
+ "session_track_state_change",
+ "Track changes to the session state.",
+ SESSION_VAR(session_track_state_change),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0),
+ ON_UPDATE(update_session_track_state_change));
+
+#endif //EMBEDDED_LIBRARY
diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic
index cbc10c85351..ffd08608e19 100644
--- a/sql/sys_vars.ic
+++ b/sql/sys_vars.ic
@@ -142,9 +142,10 @@ public:
option.min_value= min_val;
option.max_value= max_val;
option.block_size= block_size;
- option.u_max_value= (uchar**)max_var_ptr();
- if (max_var_ptr())
- *max_var_ptr()= max_val;
+ if ((option.u_max_value= (uchar**) max_var_ptr()))
+ {
+ *((T*) option.u_max_value)= max_val;
+ }
global_var(T)= def_val;
SYSVAR_ASSERT(size == sizeof(T));
@@ -176,8 +177,8 @@ public:
var->save_result.ulonglong_value=
getopt_ull_limit_value(uv, &option, &unused);
- if (max_var_ptr() && (T)var->save_result.ulonglong_value > *max_var_ptr())
- var->save_result.ulonglong_value= *max_var_ptr();
+ if (max_var_ptr() && (T)var->save_result.ulonglong_value > get_max_var())
+ var->save_result.ulonglong_value= get_max_var();
fixed= fixed || var->save_result.ulonglong_value != uv;
}
@@ -193,8 +194,8 @@ public:
var->save_result.longlong_value=
getopt_ll_limit_value(v, &option, &unused);
- if (max_var_ptr() && (T)var->save_result.longlong_value > *max_var_ptr())
- var->save_result.longlong_value= *max_var_ptr();
+ if (max_var_ptr() && (T)var->save_result.longlong_value > get_max_var())
+ var->save_result.longlong_value= get_max_var();
fixed= fixed || var->save_result.longlong_value != v;
}
@@ -216,11 +217,7 @@ public:
void global_save_default(THD *thd, set_var *var)
{ var->save_result.ulonglong_value= option.def_value; }
private:
- T *max_var_ptr()
- {
- return scope() == SESSION ? (T*)(((uchar*)&max_system_variables) + offset)
- : 0;
- }
+ T get_max_var() { return *((T*) max_var_ptr()); }
uchar *default_value_ptr(THD *thd) { return (uchar*) &option.def_value; }
};
@@ -234,25 +231,25 @@ typedef Sys_var_integer<long, GET_LONG, SHOW_SLONG> Sys_var_long;
template<> uchar *Sys_var_int::default_value_ptr(THD *thd)
{
- thd->sys_var_tmp.int_value= option.def_value;
+ thd->sys_var_tmp.int_value= (int)option.def_value;
return (uchar*) &thd->sys_var_tmp.int_value;
}
template<> uchar *Sys_var_uint::default_value_ptr(THD *thd)
{
- thd->sys_var_tmp.uint_value= option.def_value;
+ thd->sys_var_tmp.uint_value= (uint)option.def_value;
return (uchar*) &thd->sys_var_tmp.uint_value;
}
template<> uchar *Sys_var_long::default_value_ptr(THD *thd)
{
- thd->sys_var_tmp.long_value= option.def_value;
+ thd->sys_var_tmp.long_value= (long)option.def_value;
return (uchar*) &thd->sys_var_tmp.long_value;
}
template<> uchar *Sys_var_ulong::default_value_ptr(THD *thd)
{
- thd->sys_var_tmp.ulong_value= option.def_value;
+ thd->sys_var_tmp.ulong_value= (ulong)option.def_value;
return (uchar*) &thd->sys_var_tmp.ulong_value;
}
@@ -264,6 +261,9 @@ class Sys_var_typelib: public sys_var
{
protected:
TYPELIB typelib;
+ virtual bool check_maximum(THD *thd, set_var *var,
+ const char *c_val, longlong i_val)
+ { return FALSE; }
public:
Sys_var_typelib(const char *name_arg,
const char *comment, int flag_args, ptrdiff_t off,
@@ -299,17 +299,14 @@ public:
return true;
else
var->save_result.ulonglong_value--;
- }
- else
- {
- longlong tmp=var->value->val_int();
- if (tmp < 0 || tmp >= typelib.count)
- return true;
- else
- var->save_result.ulonglong_value= tmp;
+ return check_maximum(thd, var, res->ptr(), 0);
}
- return false;
+ longlong tmp=var->value->val_int();
+ if (tmp < 0 || tmp >= typelib.count)
+ return true;
+ var->save_result.ulonglong_value= tmp;
+ return check_maximum(thd, var, 0, tmp);
}
};
@@ -344,10 +341,28 @@ public:
substitute)
{
option.var_type|= GET_ENUM;
+ option.min_value= 0;
+ option.max_value= ULONG_MAX;
global_var(ulong)= def_val;
+ if ((option.u_max_value= (uchar**)max_var_ptr()))
+ {
+ *((ulong *) option.u_max_value)= ULONG_MAX;
+ }
SYSVAR_ASSERT(def_val < typelib.count);
SYSVAR_ASSERT(size == sizeof(ulong));
}
+ bool check_maximum(THD *thd, set_var *var,
+ const char *c_val, longlong i_val)
+ {
+ if (!max_var_ptr() ||
+ var->save_result.ulonglong_value <= get_max_var())
+ return FALSE;
+ var->save_result.ulonglong_value= get_max_var();
+
+ return c_val ? throw_bounds_warning(thd, name.str, c_val) :
+ throw_bounds_warning(thd, name.str, TRUE,
+ var->value->unsigned_flag, i_val);
+ }
bool session_update(THD *thd, set_var *var)
{
session_var(thd, ulong)= static_cast<ulong>(var->save_result.ulonglong_value);
@@ -369,7 +384,9 @@ public:
uchar *global_value_ptr(THD *thd, const LEX_STRING *base)
{ return valptr(thd, global_var(ulong)); }
uchar *default_value_ptr(THD *thd)
- { return valptr(thd, option.def_value); }
+ { return valptr(thd, (ulong)option.def_value); }
+
+ ulong get_max_var() { return *((ulong *) max_var_ptr()); }
};
/**
@@ -416,7 +433,7 @@ public:
{ var->save_result.ulonglong_value= option.def_value; }
uchar *default_value_ptr(THD *thd)
{
- thd->sys_var_tmp.my_bool_value= option.def_value;
+ thd->sys_var_tmp.my_bool_value=(my_bool) option.def_value;
return (uchar*) &thd->sys_var_tmp.my_bool_value;
}
};
@@ -437,10 +454,10 @@ public:
does not destroy individual members of SV, there's no way to free
allocated string variables for every thread.
*/
-class Sys_var_charptr: public sys_var
+class Sys_var_charptr_base: public sys_var
{
public:
- Sys_var_charptr(const char *name_arg,
+ Sys_var_charptr_base(const char *name_arg,
const char *comment, int flag_args, ptrdiff_t off, size_t size,
CMD_LINE getopt,
enum charset_enum is_os_charset_arg,
@@ -462,8 +479,6 @@ public:
*/
option.var_type|= (flags & ALLOCATED) ? GET_STR_ALLOC : GET_STR;
global_var(const char*)= def_val;
- SYSVAR_ASSERT(scope() == GLOBAL);
- SYSVAR_ASSERT(size == sizeof(char *));
}
void cleanup()
{
@@ -502,31 +517,35 @@ public:
}
bool do_check(THD *thd, set_var *var)
{ return do_string_check(thd, var, charset(thd)); }
- bool session_update(THD *thd, set_var *var)
- {
- DBUG_ASSERT(FALSE);
- return true;
- }
- bool global_update(THD *thd, set_var *var)
+ bool session_update(THD *thd, set_var *var)= 0;
+ char *global_update_prepare(THD *thd, set_var *var)
{
char *new_val, *ptr= var->save_result.string_value.str;
size_t len=var->save_result.string_value.length;
if (ptr)
{
new_val= (char*)my_memdup(ptr, len+1, MYF(MY_WME));
- if (!new_val) return true;
+ if (!new_val) return 0;
new_val[len]=0;
}
else
new_val= 0;
+ return new_val;
+ }
+ void global_update_finish(char *new_val)
+ {
if (flags & ALLOCATED)
my_free(global_var(char*));
flags|= ALLOCATED;
global_var(char*)= new_val;
- return false;
}
- void session_save_default(THD *thd, set_var *var)
- { DBUG_ASSERT(FALSE); }
+ bool global_update(THD *thd, set_var *var)
+ {
+ char *new_val= global_update_prepare(thd, var);
+ global_update_finish(new_val);
+ return (new_val == 0 && var->save_result.string_value.str != 0);
+ }
+ void session_save_default(THD *thd, set_var *var)= 0;
void global_save_default(THD *thd, set_var *var)
{
char *ptr= (char*)(intptr)option.def_value;
@@ -535,6 +554,104 @@ public:
}
};
+class Sys_var_charptr: public Sys_var_charptr_base
+{
+public:
+ Sys_var_charptr(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt,
+ enum charset_enum is_os_charset_arg,
+ const char *def_val, PolyLock *lock=0,
+ enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG,
+ on_check_function on_check_func=0,
+ on_update_function on_update_func=0,
+ const char *substitute=0) :
+ Sys_var_charptr_base(name_arg, comment, flag_args, off, size, getopt,
+ is_os_charset_arg, def_val, lock, binlog_status_arg,
+ on_check_func, on_update_func, substitute)
+ {
+ SYSVAR_ASSERT(scope() == GLOBAL);
+ SYSVAR_ASSERT(size == sizeof(char *));
+ }
+
+ bool session_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(FALSE);
+ return true;
+ }
+ void session_save_default(THD *thd, set_var *var)
+ { DBUG_ASSERT(FALSE); }
+};
+
+#ifndef EMBEDDED_LIBRARY
+class Sys_var_sesvartrack: public Sys_var_charptr_base
+{
+public:
+ Sys_var_sesvartrack(const char *name_arg,
+ const char *comment,
+ CMD_LINE getopt,
+ enum charset_enum is_os_charset_arg,
+ const char *def_val, PolyLock *lock) :
+ Sys_var_charptr_base(name_arg, comment,
+ SESSION_VAR(session_track_system_variables),
+ getopt, is_os_charset_arg, def_val, lock,
+ VARIABLE_NOT_IN_BINLOG, 0, 0, 0)
+ {}
+ bool do_check(THD *thd, set_var *var)
+ {
+ if (Sys_var_charptr_base::do_check(thd, var) ||
+ sysvartrack_validate_value(thd, var->save_result.string_value.str,
+ var->save_result.string_value.length))
+ return TRUE;
+ return FALSE;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ char *new_val= global_update_prepare(thd, var);
+ if (new_val)
+ {
+ if (sysvartrack_reprint_value(thd, new_val,
+ var->save_result.string_value.length))
+ new_val= 0;
+ }
+ global_update_finish(new_val);
+ return (new_val == 0 && var->save_result.string_value.str != 0);
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ return sysvartrack_update(thd, var);
+ }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ var->save_result.string_value.str= global_var(char*);
+ var->save_result.string_value.length=
+ strlen(var->save_result.string_value.str);
+ /* parse and feel list with default values */
+ if (thd)
+ {
+ bool res=
+ sysvartrack_validate_value(thd,
+ var->save_result.string_value.str,
+ var->save_result.string_value.length);
+ DBUG_ASSERT(res == 0);
+ }
+ }
+ uchar *session_value_ptr(THD *thd, const LEX_STRING *base)
+ {
+ DBUG_ASSERT(thd != NULL);
+ size_t len= sysvartrack_value_len(thd);
+ char *res= (char *)thd->alloc(len + sizeof(char *));
+ if (res)
+ {
+ char *buf= res + sizeof(char *);
+ *((char**) res)= buf;
+ sysvartrack_value_construct(thd, buf, len);
+ }
+ return (uchar *)res;
+ }
+};
+#endif //EMBEDDED_LIBRARY
+
class Sys_var_proxy_user: public sys_var
{
@@ -1234,12 +1351,30 @@ public:
substitute)
{
option.var_type|= GET_SET;
+ option.min_value= 0;
+ option.max_value= ~0ULL;
global_var(ulonglong)= def_val;
+ if ((option.u_max_value= (uchar**)max_var_ptr()))
+ {
+ *((ulonglong*) option.u_max_value)= ~0ULL;
+ }
SYSVAR_ASSERT(typelib.count > 0);
SYSVAR_ASSERT(typelib.count <= 64);
SYSVAR_ASSERT(def_val <= my_set_bits(typelib.count));
SYSVAR_ASSERT(size == sizeof(ulonglong));
}
+ bool check_maximum(THD *thd, set_var *var,
+ const char *c_val, longlong i_val)
+ {
+ if (!max_var_ptr() ||
+ (var->save_result.ulonglong_value & ~(get_max_var())) == 0)
+ return FALSE;
+ var->save_result.ulonglong_value&= get_max_var();
+
+ return c_val ? throw_bounds_warning(thd, name.str, c_val) :
+ throw_bounds_warning(thd, name.str, TRUE,
+ var->value->unsigned_flag, i_val);
+ }
bool do_check(THD *thd, set_var *var)
{
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -1247,41 +1382,37 @@ public:
if (var->value->result_type() == STRING_RESULT)
{
+ char *error;
+ uint error_len;
+ bool not_used;
+
if (!(res=var->value->val_str_ascii(&str)))
return true;
- else
- {
- char *error;
- uint error_len;
- bool not_used;
- var->save_result.ulonglong_value=
- find_set(&typelib, res->ptr(), res->length(), NULL,
- &error, &error_len, &not_used);
- /*
- note, we only issue an error if error_len > 0.
- That is even while empty (zero-length) values are considered
- errors by find_set(), these errors are ignored here
- */
- if (error_len)
- {
- ErrConvString err(error, error_len, res->charset());
- my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name.str, err.ptr());
- return true;
- }
- }
- }
- else
- {
- longlong tmp=var->value->val_int();
- if ((tmp < 0 && ! var->value->unsigned_flag)
- || (ulonglong)tmp > my_set_bits(typelib.count))
+ var->save_result.ulonglong_value=
+ find_set(&typelib, res->ptr(), res->length(), NULL,
+ &error, &error_len, &not_used);
+ /*
+ note, we only issue an error if error_len > 0.
+ That is even while empty (zero-length) values are considered
+ errors by find_set(), these errors are ignored here
+ */
+ if (error_len)
+ {
+ ErrConvString err(error, error_len, res->charset());
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name.str, err.ptr());
return true;
- else
- var->save_result.ulonglong_value= tmp;
+ }
+ return check_maximum(thd, var, res->ptr(), 0);
}
- return false;
+ longlong tmp=var->value->val_int();
+ if ((tmp < 0 && ! var->value->unsigned_flag)
+ || (ulonglong)tmp > my_set_bits(typelib.count))
+ return true;
+
+ var->save_result.ulonglong_value= tmp;
+ return check_maximum(thd, var, 0, tmp);
}
bool session_update(THD *thd, set_var *var)
{
@@ -1305,6 +1436,8 @@ public:
{ return valptr(thd, global_var(ulonglong)); }
uchar *default_value_ptr(THD *thd)
{ return valptr(thd, option.def_value); }
+
+ ulonglong get_max_var() { return *((ulonglong*) max_var_ptr()); }
};
/**
@@ -1434,6 +1567,9 @@ public:
};
#if defined(ENABLED_DEBUG_SYNC)
+
+#include "debug_sync.h"
+
/**
The class for @@debug_sync session-only variable
*/
@@ -1462,14 +1598,19 @@ public:
String str(buff, sizeof(buff), system_charset_info), *res;
if (!(res=var->value->val_str(&str)))
- var->save_result.string_value.str= const_cast<char*>("");
+ var->save_result.string_value= empty_lex_str;
else
- var->save_result.string_value.str= thd->strmake(res->ptr(), res->length());
+ {
+ if (!thd->make_lex_string(&var->save_result.string_value,
+ res->ptr(), res->length()))
+ return true;
+ }
return false;
}
bool session_update(THD *thd, set_var *var)
{
- return debug_sync_update(thd, var->save_result.string_value.str);
+ return debug_sync_update(thd, var->save_result.string_value.str,
+ var->save_result.string_value.length);
}
bool global_update(THD *thd, set_var *var)
{
@@ -1487,7 +1628,6 @@ public:
}
uchar *session_value_ptr(THD *thd, const LEX_STRING *base)
{
- extern uchar *debug_sync_value_ptr(THD *thd);
return debug_sync_value_ptr(thd);
}
uchar *global_value_ptr(THD *thd, const LEX_STRING *base)
@@ -1970,7 +2110,47 @@ public:
if (var->type == OPT_SESSION && Sys_var_enum::session_update(thd, var))
return TRUE;
if (var->type == OPT_DEFAULT || !thd->in_active_multi_stmt_transaction())
+ {
+#ifndef EMBEDDED_LIBRARY
+ Transaction_state_tracker *tst= NULL;
+
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ tst= (Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER);
+#endif //EMBEDDED_LIBRARY
+
thd->tx_isolation= (enum_tx_isolation) var->save_result.ulonglong_value;
+
+#ifndef EMBEDDED_LIBRARY
+ if (var->type == OPT_DEFAULT)
+ {
+ enum enum_tx_isol_level l;
+ switch (thd->tx_isolation) {
+ case ISO_READ_UNCOMMITTED:
+ l= TX_ISOL_UNCOMMITTED;
+ break;
+ case ISO_READ_COMMITTED:
+ l= TX_ISOL_COMMITTED;
+ break;
+ case ISO_REPEATABLE_READ:
+ l= TX_ISOL_REPEATABLE;
+ break;
+ case ISO_SERIALIZABLE:
+ l= TX_ISOL_SERIALIZABLE;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ return TRUE;
+ }
+ if (tst)
+ tst->set_isol_level(thd, l);
+ }
+ else if (tst)
+ {
+ tst->set_isol_level(thd, TX_ISOL_INHERIT);
+ }
+#endif //EMBEDDED_LIBRARY
+ }
return FALSE;
}
};
diff --git a/sql/sys_vars_shared.h b/sql/sys_vars_shared.h
index ff050f63064..dfc020a187c 100644
--- a/sql/sys_vars_shared.h
+++ b/sql/sys_vars_shared.h
@@ -28,6 +28,7 @@
#include <sql_priv.h>
#include "set_var.h"
+extern bool throw_bounds_warning(THD *thd, const char *name,const char *v);
extern bool throw_bounds_warning(THD *thd, const char *name,
bool fixed, bool is_unsigned, longlong v);
extern bool throw_bounds_warning(THD *thd, const char *name, bool fixed,
diff --git a/sql/table.cc b/sql/table.cc
index 31f0d255847..2cf21c889e2 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -23,7 +23,6 @@
#include "key.h" // find_ref_key
#include "sql_table.h" // build_table_filename,
// primary_key_name
-#include "sql_trigger.h"
#include "sql_parse.h" // free_items
#include "strfunc.h" // unhex_type2
#include "sql_partition.h" // mysql_unpack_partition,
@@ -31,6 +30,7 @@
#include "sql_acl.h" // *_ACL, acl_getroot_no_password
#include "sql_base.h"
#include "create_options.h"
+#include "sql_trigger.h"
#include <m_ctype.h>
#include "my_md5.h"
#include "my_bit.h"
@@ -40,6 +40,16 @@
#include "discover.h"
#include "mdl.h" // MDL_wait_for_graph_visitor
#include "sql_view.h"
+#include "rpl_filter.h"
+#include "sql_cte.h"
+
+/* For MySQL 5.7 virtual fields */
+#define MYSQL57_GENERATED_FIELD 128
+#define MYSQL57_GCOL_HEADER_SIZE 4
+
+static Virtual_column_info * unpack_vcol_info_from_frm(THD *, MEM_ROOT *,
+ TABLE *, String *, Virtual_column_info **, bool *);
+static bool check_vcol_forward_refs(Field *, Virtual_column_info *);
/* INFORMATION_SCHEMA name */
LEX_STRING INFORMATION_SCHEMA_NAME= {C_STRING_WITH_LEN("information_schema")};
@@ -60,7 +70,9 @@ LEX_STRING SLOW_LOG_NAME= {C_STRING_WITH_LEN("slow_log")};
Keyword added as a prefix when parsing the defining expression for a
virtual column read from the column definition saved in the frm file
*/
-LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") };
+static LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") };
+
+static int64 last_table_id;
/* Functions defined in this file */
@@ -118,10 +130,7 @@ Default_object_creation_ctx::create_backup_ctx(THD *thd) const
void Default_object_creation_ctx::change_env(THD *thd) const
{
- thd->variables.character_set_client= m_client_cs;
- thd->variables.collation_connection= m_connection_cl;
-
- thd->update_charset();
+ thd->update_charset(m_client_cs, m_connection_cl);
}
/**************************************************************************
@@ -276,7 +285,8 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name)
SYNOPSIS
alloc_table_share()
- TABLE_LIST Take database and table name from there
+ db Database name
+ table_name Table name
key Table cache key (db \0 table_name \0...)
key_length Length of key
@@ -316,7 +326,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
share->normalized_path.length= path_length;
share->table_category= get_table_category(& share->db, & share->table_name);
share->open_errno= ENOENT;
- share->cached_row_logging_check= -1;
+ /* The following will be fixed in open_table_from_share */
+ share->cached_row_logging_check= 1;
init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
@@ -325,7 +336,20 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
&share->LOCK_share, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data,
&share->LOCK_ha_data, MY_MUTEX_INIT_FAST);
- tdc_assign_new_table_id(share);
+
+ DBUG_EXECUTE_IF("simulate_big_table_id",
+ if (last_table_id < UINT_MAX32)
+ last_table_id= UINT_MAX32 - 1;);
+ /*
+ There is one reserved number that cannot be used. Remember to
+ change this when 6-byte global table id's are introduced.
+ */
+ do
+ {
+ share->table_map_id=(ulong) my_atomic_add64_explicit(&last_table_id, 1,
+ MY_MEMORY_ORDER_RELAXED);
+ } while (unlikely(share->table_map_id == ~0UL ||
+ share->table_map_id == 0));
}
DBUG_RETURN(share);
}
@@ -339,7 +363,7 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
thd thread handle
share Share to fill
key Table_cache_key, as generated from tdc_create_key.
- must start with db name.
+ must start with db name.
key_length Length of key
table_name Table name
path Path to file (possible in lower case) without .frm
@@ -379,9 +403,9 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
share->path.str= (char*) path;
share->normalized_path.str= (char*) path;
share->path.length= share->normalized_path.length= strlen(path);
- share->frm_version= FRM_VER_TRUE_VARCHAR;
+ share->frm_version= FRM_VER_CURRENT;
- share->cached_row_logging_check= -1;
+ share->cached_row_logging_check= 0; // No row logging
/*
table_map_id is also used for MERGE tables to suppress repeated
@@ -522,7 +546,7 @@ inline bool is_system_table_name(const char *name, uint length)
my_tolower(ci, name[1]) == 'n' &&
my_tolower(ci, name[2]) == 'n' &&
my_tolower(ci, name[3]) == 'o')) ||
-
+
/* mysql.event table */
(my_tolower(ci, name[0]) == 'e' &&
my_tolower(ci, name[1]) == 'v' &&
@@ -697,8 +721,8 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
When in the future we support others schemes of extending of
secondary keys with components of the primary key we'll have
- to change the type of this flag for an enumeration type.
- */
+ to change the type of this flag for an enumeration type.
+ */
for (i=0 ; i < keys ; i++, keyinfo++)
{
@@ -895,6 +919,236 @@ static uint upgrade_collation(ulong mysql_version, uint cs_number)
}
+/*
+ In MySQL 5.7 the null bits for not stored virtual fields are last.
+ Calculate the position for these bits
+*/
+
+static void mysql57_calculate_null_position(TABLE_SHARE *share,
+ uchar **null_pos,
+ uint *null_bit_pos,
+ const uchar *strpos,
+ const uchar *vcol_screen_pos)
+{
+ uint field_pack_length= 17;
+
+ for (uint i=0 ; i < share->fields; i++, strpos+= field_pack_length)
+ {
+ uint field_length, pack_flag;
+ enum_field_types field_type;
+
+ if ((strpos[10] & MYSQL57_GENERATED_FIELD))
+ {
+ /* Skip virtual (not stored) generated field */
+ bool stored_in_db= vcol_screen_pos[3];
+ vcol_screen_pos+= (uint2korr(vcol_screen_pos + 1) +
+ MYSQL57_GCOL_HEADER_SIZE);
+ if (! stored_in_db)
+ continue;
+ }
+ field_length= uint2korr(strpos+3);
+ pack_flag= uint2korr(strpos+8);
+ field_type= (enum_field_types) (uint) strpos[13];
+ if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
+ {
+ if (((*null_bit_pos)+= field_length & 7) > 7)
+ {
+ (*null_pos)++;
+ (*null_bit_pos)-= 8;
+ }
+ }
+ if (f_maybe_null(pack_flag))
+ {
+ if (!((*null_bit_pos)= ((*null_bit_pos) + 1) & 7))
+ (*null_pos)++;
+ }
+ }
+}
+
+
+/** Parse TABLE_SHARE::vcol_defs
+
+ unpack_vcol_info_from_frm
+ 5.7
+ byte 1 = 1
+ byte 2,3 = expr length
+ byte 4 = stored_in_db
+ expression
+ 10.1-
+ byte 1 = 1 | 2
+ byte 2 = sql_type ; but TABLE::init_from_binary_frm_image()
+ byte 3 = stored_in_db ; has put expr_length here
+ [byte 4] = optional interval_id for sql_type (if byte 1 == 2)
+ expression
+ 10.2+
+ byte 1 = type
+ byte 2,3 = field_number
+ byte 4,5 = length of expression
+ byte 6 = length of name
+ name
+ expression
+*/
+bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
+ bool *error_reported)
+{
+ CHARSET_INFO *save_character_set_client= thd->variables.character_set_client;
+ CHARSET_INFO *save_collation= thd->variables.collation_connection;
+ Query_arena *backup_stmt_arena_ptr= thd->stmt_arena;
+ const uchar *pos= table->s->vcol_defs.str;
+ const uchar *end= pos + table->s->vcol_defs.length;
+ Field **field_ptr= table->field - 1;
+ Field **vfield_ptr= table->vfield;
+ Field **dfield_ptr= table->default_field;
+ Virtual_column_info **check_constraint_ptr= table->check_constraints;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
+ Query_arena backup_arena;
+ Virtual_column_info *vcol= 0;
+ StringBuffer<MAX_FIELD_WIDTH> expr_str;
+ bool res= 1;
+ DBUG_ENTER("parse_vcol_defs");
+
+ if (check_constraint_ptr)
+ memcpy(table->check_constraints + table->s->field_check_constraints,
+ table->s->check_constraints,
+ table->s->table_check_constraints * sizeof(Virtual_column_info*));
+
+ DBUG_ASSERT(table->expr_arena == NULL);
+ /*
+ We need to use CONVENTIONAL_EXECUTION here to ensure that
+ any new items created by fix_fields() are not reverted.
+ */
+ table->expr_arena= new (alloc_root(mem_root, sizeof(Query_arena)))
+ Query_arena(mem_root, Query_arena::STMT_CONVENTIONAL_EXECUTION);
+ if (!table->expr_arena)
+ DBUG_RETURN(1);
+
+ thd->set_n_backup_active_arena(table->expr_arena, &backup_arena);
+ thd->stmt_arena= table->expr_arena;
+ thd->update_charset(&my_charset_utf8mb4_general_ci, table->s->table_charset);
+ expr_str.append(&parse_vcol_keyword);
+ thd->variables.sql_mode &= ~MODE_NO_BACKSLASH_ESCAPES;
+
+ while (pos < end)
+ {
+ uint type, expr_length;
+ if (table->s->frm_version >= FRM_VER_EXPRESSSIONS)
+ {
+ uint field_nr, name_length;
+ /* see pack_expression() for how data is stored */
+ type= pos[0];
+ field_nr= uint2korr(pos+1);
+ expr_length= uint2korr(pos+3);
+ name_length= pos[5];
+ pos+= FRM_VCOL_NEW_HEADER_SIZE + name_length;
+ field_ptr= table->field + field_nr;
+ }
+ else
+ {
+ /*
+ see below in ::init_from_binary_frm_image for how data is stored
+ in versions below 10.2 (that includes 5.7 too)
+ */
+ while (*++field_ptr && !(*field_ptr)->vcol_info) /* no-op */;
+ if (!*field_ptr)
+ {
+ open_table_error(table->s, OPEN_FRM_CORRUPTED, 1);
+ goto end;
+ }
+ type= (*field_ptr)->vcol_info->stored_in_db
+ ? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL;
+ expr_length= uint2korr(pos+1);
+ if (table->s->mysql_version > 50700 && table->s->mysql_version < 100000)
+ pos+= 4; // MySQL from 5.7
+ else
+ pos+= pos[0] == 2 ? 4 : 3; // MariaDB from 5.2 to 10.1
+ }
+
+ expr_str.length(parse_vcol_keyword.length);
+ expr_str.append((char*)pos, expr_length);
+ thd->where= vcol_type_name(static_cast<enum_vcol_info_type>(type));
+
+ switch (type) {
+ case VCOL_GENERATED_VIRTUAL:
+ case VCOL_GENERATED_STORED:
+ vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
+ &((*field_ptr)->vcol_info), error_reported);
+ *(vfield_ptr++)= *field_ptr;
+ break;
+ case VCOL_DEFAULT:
+ vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
+ &((*field_ptr)->default_value),
+ error_reported);
+ *(dfield_ptr++)= *field_ptr;
+ if (vcol && (vcol->flags & (VCOL_NON_DETERMINISTIC | VCOL_SESSION_FUNC)))
+ table->s->non_determinstic_insert= true;
+ break;
+ case VCOL_CHECK_FIELD:
+ vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
+ &((*field_ptr)->check_constraint),
+ error_reported);
+ *check_constraint_ptr++= (*field_ptr)->check_constraint;
+ break;
+ case VCOL_CHECK_TABLE:
+ vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
+ check_constraint_ptr, error_reported);
+ check_constraint_ptr++;
+ break;
+ }
+ if (!vcol)
+ goto end;
+ pos+= expr_length;
+ }
+
+ /* Now, initialize CURRENT_TIMESTAMP fields */
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ Field *field= *field_ptr;
+ if (field->has_default_now_unireg_check())
+ {
+ expr_str.length(parse_vcol_keyword.length);
+ expr_str.append(STRING_WITH_LEN("current_timestamp("));
+ expr_str.append_ulonglong(field->decimals());
+ expr_str.append(')');
+ vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
+ &((*field_ptr)->default_value),
+ error_reported);
+ *(dfield_ptr++)= *field_ptr;
+ if (!field->default_value->expr)
+ goto end;
+ }
+ else if (field->has_update_default_function() && !field->default_value)
+ *(dfield_ptr++)= *field_ptr;
+ }
+
+ if (vfield_ptr)
+ *vfield_ptr= 0;
+
+ if (dfield_ptr)
+ *dfield_ptr= 0;
+
+ if (check_constraint_ptr)
+ *check_constraint_ptr= 0;
+
+ /* Check that expressions aren't referring to not yet initialized fields */
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ Field *field= *field_ptr;
+ if (check_vcol_forward_refs(field, field->vcol_info) ||
+ check_vcol_forward_refs(field, field->check_constraint) ||
+ check_vcol_forward_refs(field, field->default_value))
+ goto end;
+ }
+
+ res=0;
+end:
+ thd->restore_active_arena(table->expr_arena, &backup_arena);
+ thd->stmt_arena= backup_stmt_arena_ptr;
+ if (save_character_set_client)
+ thd->update_charset(save_character_set_client, save_collation);
+ thd->variables.sql_mode= saved_mode;
+ DBUG_RETURN(res);
+}
+
/**
Read data from a binary .frm file image into a TABLE_SHARE
@@ -919,14 +1173,13 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint new_frm_ver, field_pack_length, new_field_pack_flag;
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
- uint com_length, null_bit_pos;
- uint extra_rec_buf_length;
+ uint com_length, null_bit_pos, UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count;
uint i;
- bool use_hash;
+ bool use_hash, mysql57_null_bits= 0;
char *keynames, *names, *comment_pos;
const uchar *forminfo, *extra2;
const uchar *frm_image_end = frm_image + frm_length;
- uchar *record, *null_flags, *null_pos;
+ uchar *record, *null_flags, *null_pos, *UNINIT_VAR(mysql57_vcol_null_pos);
const uchar *disk_buff, *strpos;
ulong pos, record_offset;
ulong rec_buff_length;
@@ -939,7 +1192,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
my_bitmap_map *bitmaps;
bool null_bits_are_used;
uint vcol_screen_length, UNINIT_VAR(options_len);
- char *vcol_screen_pos;
+ uchar *vcol_screen_pos;
const uchar *options= 0;
uint UNINIT_VAR(gis_options_len);
const uchar *gis_options= 0;
@@ -947,14 +1200,13 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint len;
uint ext_key_parts= 0;
plugin_ref se_plugin= 0;
- keyinfo= &first_keyinfo;
- share->ext_key_parts= 0;
- MEM_ROOT **root_ptr, *old_root;
+ MEM_ROOT *old_root= thd->mem_root;
+ Virtual_column_info **table_check_constraints;
DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image");
- root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
- old_root= *root_ptr;
- *root_ptr= &share->mem_root;
+ keyinfo= &first_keyinfo;
+ share->ext_key_parts= 0;
+ thd->mem_root= &share->mem_root;
if (write && write_frm_image(frm_image, frm_length))
goto err;
@@ -962,6 +1214,16 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (frm_length < FRM_HEADER_SIZE + FRM_FORMINFO_SIZE)
goto err;
+ share->frm_version= frm_image[2];
+ /*
+ Check if .frm file created by MySQL 5.0. In this case we want to
+ display CHAR fields as CHAR and not as VARCHAR.
+ We do it this way as we want to keep the old frm version to enable
+ MySQL 4.1 to read these files.
+ */
+ if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && frm_image[33] == 5)
+ share->frm_version= FRM_VER_TRUE_VARCHAR;
+
new_field_pack_flag= frm_image[27];
new_frm_ver= (frm_image[2] - FRM_VER);
field_pack_length= new_frm_ver < 2 ? 11 : 17;
@@ -1050,16 +1312,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (forminfo + FRM_FORMINFO_SIZE >= frm_image_end)
goto err;
- share->frm_version= frm_image[2];
- /*
- Check if .frm file created by MySQL 5.0. In this case we want to
- display CHAR fields as CHAR and not as VARCHAR.
- We do it this way as we want to keep the old frm version to enable
- MySQL 4.1 to read these files.
- */
- if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && frm_image[33] == 5)
- share->frm_version= FRM_VER_TRUE_VARCHAR;
-
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (frm_image[61] && !share->default_part_plugin)
{
@@ -1087,7 +1339,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint cs_new= upgrade_collation(share->mysql_version, cs_org);
if (cs_org != cs_new)
share->incompatible_version|= HA_CREATE_USED_CHARSET;
-
+
share->avg_row_length= uint4korr(frm_image+34);
share->transactional= (ha_choice)
enum_value_with_check(thd, share, "transactional", frm_image[39] & 3, HA_CHOICE_MAX);
@@ -1101,6 +1353,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->null_field_first= 1;
share->stats_sample_pages= uint2korr(frm_image+42);
share->stats_auto_recalc= (enum_stats_auto_recalc)(frm_image[44]);
+ share->table_check_constraints= uint2korr(frm_image+45);
}
if (!share->table_charset)
{
@@ -1338,8 +1591,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
goto err; // wrong engine (someone changed the frm under our feet?)
- extra_rec_buf_length= uint2korr(frm_image+59);
- rec_buff_length= ALIGN_SIZE(share->reclength + 1 + extra_rec_buf_length);
+ rec_buff_length= ALIGN_SIZE(share->reclength + 1);
share->rec_buff_length= rec_buff_length;
if (!(record= (uchar *) alloc_root(&share->mem_root, rec_buff_length)))
goto err; /* purecov: inspected */
@@ -1359,8 +1611,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->null_fields= uint2korr(forminfo+282);
com_length= uint2korr(forminfo+284);
vcol_screen_length= uint2korr(forminfo+286);
- share->vfields= 0;
- share->default_fields= 0;
+ share->virtual_fields= share->default_expressions=
+ share->field_check_constraints= share->default_fields= 0;
share->stored_fields= share->fields;
if (forminfo[46] != (uchar)255)
{
@@ -1371,42 +1623,41 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d vcol_screen_length: %d", interval_count,interval_parts, keys,n_length,int_length, com_length, vcol_screen_length));
+ if (!multi_alloc_root(&share->mem_root,
+ &share->field, (uint)(share->fields+1)*sizeof(Field*),
+ &share->intervals, (uint)interval_count*sizeof(TYPELIB),
+ &share->check_constraints, (uint) share->table_check_constraints * sizeof(Virtual_column_info*),
+ &interval_array, (uint) (share->fields+interval_parts+ keys+3)*sizeof(char *),
+ &names, (uint) (n_length+int_length),
+ &comment_pos, (uint) com_length,
+ &vcol_screen_pos, vcol_screen_length,
+ NullS))
- if (!(field_ptr = (Field **)
- alloc_root(&share->mem_root,
- (uint) ((share->fields+1)*sizeof(Field*)+
- interval_count*sizeof(TYPELIB)+
- (share->fields+interval_parts+
- keys+3)*sizeof(char *)+
- (n_length+int_length+com_length+
- vcol_screen_length)))))
- goto err; /* purecov: inspected */
+ goto err;
- share->field= field_ptr;
+ field_ptr= share->field;
+ table_check_constraints= share->check_constraints;
read_length=(uint) (share->fields * field_pack_length +
pos+ (uint) (n_length+int_length+com_length+
vcol_screen_length));
strpos= disk_buff+pos;
- share->intervals= (TYPELIB*) (field_ptr+share->fields+1);
- interval_array= (const char **) (share->intervals+interval_count);
- names= (char*) (interval_array+share->fields+interval_parts+keys+3);
if (!interval_count)
share->intervals= 0; // For better debugging
- memcpy((char*) names, strpos+(share->fields*field_pack_length),
- (uint) (n_length+int_length));
- comment_pos= names+(n_length+int_length);
+
+ share->vcol_defs.str= vcol_screen_pos;
+ share->vcol_defs.length= vcol_screen_length;
+
+ memcpy(names, strpos+(share->fields*field_pack_length), n_length+int_length);
memcpy(comment_pos, disk_buff+read_length-com_length-vcol_screen_length,
com_length);
- vcol_screen_pos= names+(n_length+int_length+com_length);
memcpy(vcol_screen_pos, disk_buff+read_length-vcol_screen_length,
vcol_screen_length);
fix_type_pointers(&interval_array, &share->fieldnames, 1, &names);
if (share->fieldnames.count != share->fields)
goto err;
- fix_type_pointers(&interval_array, share->intervals, interval_count,
- &names);
+ fix_type_pointers(&interval_array, share->intervals, interval_count, &names);
{
/* Set ENUM and SET lengths */
@@ -1468,6 +1719,21 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->fields,0,0,
(my_hash_get_key) get_field_name,0,0);
+ if (share->mysql_version >= 50700 && share->mysql_version < 100000 &&
+ vcol_screen_length)
+ {
+ /*
+ MySQL 5.7 stores the null bits for not stored fields last.
+ Calculate the position for them.
+ */
+ mysql57_null_bits= 1;
+ mysql57_vcol_null_pos= null_pos;
+ mysql57_vcol_null_bit_pos= null_bit_pos;
+ mysql57_calculate_null_position(share, &mysql57_vcol_null_pos,
+ &mysql57_vcol_null_bit_pos,
+ strpos, vcol_screen_pos);
+ }
+
for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++)
{
uint pack_flag, interval_nr, unireg_type, recpos, field_length;
@@ -1478,8 +1744,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
Field::geometry_type geom_type= Field::GEOM_GEOMETRY;
LEX_STRING comment;
Virtual_column_info *vcol_info= 0;
- bool fld_stored_in_db= TRUE;
uint gis_length, gis_decimals, srid= 0;
+ Field::utype unireg_check;
if (new_frm_ver >= 3)
{
@@ -1536,7 +1802,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
{
if (!interval_nr) // Expect non-null expression
goto err;
- /*
+ /*
+ MariaDB version 10.0 version.
The interval_id byte in the .frm file stores the length of the
expression statement for a virtual column.
*/
@@ -1556,38 +1823,59 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
comment_pos+= comment_length;
}
+ if (unireg_type & MYSQL57_GENERATED_FIELD)
+ {
+ unireg_type&= MYSQL57_GENERATED_FIELD;
+
+ /*
+ MySQL 5.7 generated fields
+
+ byte 1 = 1
+ byte 2,3 = expr length
+ byte 4 = stored_in_db
+ byte 5.. = expr
+ */
+ if ((uint)(vcol_screen_pos)[0] != 1)
+ goto err;
+ vcol_info= new (&share->mem_root) Virtual_column_info();
+ vcol_info_length= uint2korr(vcol_screen_pos + 1);
+ DBUG_ASSERT(vcol_info_length);
+ vcol_info->stored_in_db= vcol_screen_pos[3];
+ vcol_info->utf8= 0;
+ vcol_screen_pos+= vcol_info_length + MYSQL57_GCOL_HEADER_SIZE;;
+ share->virtual_fields++;
+ vcol_info_length= 0;
+ }
+
if (vcol_info_length)
{
/*
+ Old virtual field information before 10.2
+
Get virtual column data stored in the .frm file as follows:
byte 1 = 1 | 2
byte 2 = sql_type
- byte 3 = flags (as of now, 0 - no flags, 1 - field is physically stored)
- [byte 4] = optional interval_id for sql_type (only if byte 1 == 2)
+ byte 3 = flags. 1 for stored_in_db
+ [byte 4] = optional interval_id for sql_type (if byte 1 == 2)
next byte ... = virtual column expression (text data)
*/
- vcol_info= new Virtual_column_info();
+
+ vcol_info= new (&share->mem_root) Virtual_column_info();
bool opt_interval_id= (uint)vcol_screen_pos[0] == 2;
field_type= (enum_field_types) (uchar) vcol_screen_pos[1];
if (opt_interval_id)
interval_nr= (uint)vcol_screen_pos[3];
else if ((uint)vcol_screen_pos[0] != 1)
goto err;
-
- fld_stored_in_db= (bool) (uint) vcol_screen_pos[2];
+ bool stored= vcol_screen_pos[2] & 1;
+ vcol_info->stored_in_db= stored;
+ vcol_info->set_vcol_type(stored ? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL);
vcol_expr_length= vcol_info_length -
- (uint)(FRM_VCOL_HEADER_SIZE(opt_interval_id));
- if (!(vcol_info->expr_str.str=
- (char *)memdup_root(&share->mem_root,
- vcol_screen_pos +
- (uint) FRM_VCOL_HEADER_SIZE(opt_interval_id),
- vcol_expr_length)))
- goto err;
- if (opt_interval_id)
- interval_nr= (uint) vcol_screen_pos[3];
- vcol_info->expr_str.length= vcol_expr_length;
+ (uint)(FRM_VCOL_OLD_HEADER_SIZE(opt_interval_id));
+ vcol_info->utf8= 0; // before 10.2.1 the charset was unknown
+ int2store(vcol_screen_pos+1, vcol_expr_length); // for parse_vcol_defs()
vcol_screen_pos+= vcol_info_length;
- share->vfields++;
+ share->virtual_fields++;
}
}
else
@@ -1606,7 +1894,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
/*
Try to choose the best 4.1 type:
- for 4.0 "CHAR(N) BINARY" or "VARCHAR(N) BINARY"
- try to find a binary collation for character set.
+ try to find a binary collation for character set.
- for other types (e.g. BLOB) just use my_charset_bin.
*/
if (!f_is_blob(pack_flag))
@@ -1624,13 +1912,17 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
bzero((char*) &comment, sizeof(comment));
}
+ /* Remove >32 decimals from old files */
+ if (share->mysql_version < 100200)
+ pack_flag&= ~FIELDFLAG_LONG_DECIMAL;
+
if (interval_nr && charset->mbminlen > 1)
{
/* Unescape UCS2 intervals from HEX notation */
TYPELIB *interval= share->intervals + interval_nr - 1;
unhex_type2(interval);
}
-
+
#ifndef TO_BE_DELETED_ON_PRODUCTION
if (field_type == MYSQL_TYPE_NEWDECIMAL && !share->mysql_version)
{
@@ -1658,27 +1950,35 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
#endif
+ if (mysql57_null_bits && vcol_info && !vcol_info->stored_in_db)
+ {
+ swap_variables(uchar*, null_pos, mysql57_vcol_null_pos);
+ swap_variables(uint, null_bit_pos, mysql57_vcol_null_bit_pos);
+ }
+
+ /* Convert pre-10.2.2 timestamps to use Field::default_value */
+ unireg_check= (Field::utype) MTYP_TYPENR(unireg_type);
*field_ptr= reg_field=
- make_field(share, &share->mem_root, record+recpos,
- (uint32) field_length,
- null_pos, null_bit_pos,
- pack_flag,
- field_type,
- charset,
- geom_type, srid,
- (Field::utype) MTYP_TYPENR(unireg_type),
- (interval_nr ?
- share->intervals+interval_nr-1 :
- (TYPELIB*) 0),
+ make_field(share, &share->mem_root, record+recpos, (uint32) field_length,
+ null_pos, null_bit_pos, pack_flag, field_type, charset,
+ geom_type, srid, unireg_check,
+ (interval_nr ? share->intervals+interval_nr-1 : NULL),
share->fieldnames.type_names[i]);
if (!reg_field) // Not supported field type
goto err;
+ if (unireg_check == Field::TIMESTAMP_DNUN_FIELD ||
+ unireg_check == Field::TIMESTAMP_DN_FIELD)
+ {
+ reg_field->default_value= new (&share->mem_root) Virtual_column_info();
+ reg_field->default_value->set_vcol_type(VCOL_DEFAULT);
+ reg_field->default_value->stored_in_db= 1;
+ share->default_expressions++;
+ }
reg_field->field_index= i;
reg_field->comment=comment;
reg_field->vcol_info= vcol_info;
- reg_field->stored_in_db= fld_stored_in_db;
if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
{
null_bits_are_used= 1;
@@ -1693,6 +1993,19 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (!(null_bit_pos= (null_bit_pos + 1) & 7))
null_pos++;
}
+
+ if (vcol_info)
+ {
+ vcol_info->name.str= const_cast<char*>(reg_field->field_name);
+ vcol_info->name.length = strlen(reg_field->field_name);
+ if (mysql57_null_bits && !vcol_info->stored_in_db)
+ {
+ /* MySQL 5.7 has null bits last */
+ swap_variables(uchar*, null_pos, mysql57_vcol_null_pos);
+ swap_variables(uint, null_bit_pos, mysql57_vcol_null_bit_pos);
+ }
+ }
+
if (f_no_default(pack_flag))
reg_field->flags|= NO_DEFAULT_VALUE_FLAG;
@@ -1701,21 +2014,32 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (use_hash && my_hash_insert(&share->name_hash, (uchar*) field_ptr))
goto err;
- if (!reg_field->stored_in_db)
+ if (!reg_field->stored_in_db())
{
share->stored_fields--;
if (share->stored_rec_length>=recpos)
share->stored_rec_length= recpos-1;
}
- if (reg_field->has_insert_default_function() ||
- reg_field->has_update_default_function())
- ++share->default_fields;
+ if (reg_field->has_update_default_function())
+ {
+ has_update_default_function= 1;
+ if (!reg_field->default_value)
+ share->default_fields++;
+ }
}
*field_ptr=0; // End marker
/* Sanity checks: */
DBUG_ASSERT(share->fields>=share->stored_fields);
DBUG_ASSERT(share->reclength>=share->stored_rec_length);
+ if (mysql57_null_bits)
+ {
+ /* We want to store the value for the last bits */
+ swap_variables(uchar*, null_pos, mysql57_vcol_null_pos);
+ swap_variables(uint, null_bit_pos, mysql57_vcol_null_bit_pos);
+ DBUG_ASSERT((null_pos + (null_bit_pos + 7) / 8) <= share->field[0]->ptr);
+ }
+
/* Fix key->name and key_part->field */
if (key_parts)
{
@@ -1787,6 +2111,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
+ key_first_info= keyinfo;
for (uint key=0 ; key < keys ; key++,keyinfo++)
{
uint usable_parts= 0;
@@ -1804,9 +2129,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->name_length+1);
}
- if (!key)
- key_first_info= keyinfo;
-
if (ext_key_parts > share->key_parts && key)
{
KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part +
@@ -1886,7 +2208,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->key_part= new_key_part;
}
}
-
+
/* Fix fulltext keys for old .frm files */
if (share->key_info[key].flags & HA_FULLTEXT)
share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
@@ -2060,6 +2382,99 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
null_length, 255);
}
+ /* Handle virtual expressions */
+ if (vcol_screen_length && share->frm_version >= FRM_VER_EXPRESSSIONS)
+ {
+ uchar *vcol_screen_end= vcol_screen_pos + vcol_screen_length;
+
+ /* Skip header */
+ vcol_screen_pos+= FRM_VCOL_NEW_BASE_SIZE;
+ share->vcol_defs.str+= FRM_VCOL_NEW_BASE_SIZE;
+ share->vcol_defs.length-= FRM_VCOL_NEW_BASE_SIZE;
+
+ /*
+ Read virtual columns, default values and check constraints
+ See pack_expression() for how data is stored
+ */
+ while (vcol_screen_pos < vcol_screen_end)
+ {
+ Virtual_column_info *vcol_info;
+ uint type= (uint) vcol_screen_pos[0];
+ uint field_nr= uint2korr(vcol_screen_pos+1);
+ uint expr_length= uint2korr(vcol_screen_pos+3);
+ uint name_length= (uint) vcol_screen_pos[5];
+
+ if (!(vcol_info= new (&share->mem_root) Virtual_column_info()))
+ goto err;
+
+ /* The following can only be true for check_constraints */
+
+ if (field_nr != UINT_MAX16)
+ {
+ DBUG_ASSERT(field_nr < share->fields);
+ reg_field= share->field[field_nr];
+ }
+ else
+ {
+ reg_field= 0;
+ DBUG_ASSERT(name_length);
+ }
+
+ vcol_screen_pos+= FRM_VCOL_NEW_HEADER_SIZE;
+ vcol_info->set_vcol_type((enum_vcol_info_type) type);
+ vcol_info->name.length= name_length;
+ if (name_length)
+ vcol_info->name.str= strmake_root(&share->mem_root,
+ (char*)vcol_screen_pos, name_length);
+ else
+ {
+ vcol_info->name.str= const_cast<char*>(reg_field->field_name);
+ vcol_info->name.length = strlen(reg_field->field_name);
+ }
+ vcol_screen_pos+= name_length + expr_length;
+
+ switch (type) {
+ case VCOL_GENERATED_VIRTUAL:
+ {
+ uint recpos;
+ reg_field->vcol_info= vcol_info;
+ share->virtual_fields++;
+ share->stored_fields--;
+ if (reg_field->flags & BLOB_FLAG)
+ share->virtual_not_stored_blob_fields++;
+ /* Correct stored_rec_length as non stored fields are last */
+ recpos= (uint) (reg_field->ptr - record);
+ if (share->stored_rec_length >= recpos)
+ share->stored_rec_length= recpos-1;
+ break;
+ }
+ case VCOL_GENERATED_STORED:
+ vcol_info->stored_in_db= 1;
+ DBUG_ASSERT(!reg_field->vcol_info);
+ reg_field->vcol_info= vcol_info;
+ share->virtual_fields++;
+ break;
+ case VCOL_DEFAULT:
+ vcol_info->stored_in_db= 1;
+ DBUG_ASSERT(!reg_field->default_value);
+ reg_field->default_value= vcol_info;
+ share->default_expressions++;
+ break;
+ case VCOL_CHECK_FIELD:
+ DBUG_ASSERT(!reg_field->check_constraint);
+ reg_field->check_constraint= vcol_info;
+ share->field_check_constraints++;
+ break;
+ case VCOL_CHECK_TABLE:
+ *(table_check_constraints++)= vcol_info;
+ break;
+ }
+ }
+ }
+ DBUG_ASSERT((uint) (table_check_constraints - share->check_constraints) ==
+ (uint) (share->table_check_constraints -
+ share->field_check_constraints));
+
if (options)
{
DBUG_ASSERT(options_len);
@@ -2102,7 +2517,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
the correct null_bytes can now be set, since bitfields have been taken
into account
*/
- share->null_bytes= (null_pos - (uchar*) null_flags +
+ share->null_bytes= (uint)(null_pos - (uchar*) null_flags +
(null_bit_pos + 7) / 8);
share->last_null_bit_pos= null_bit_pos;
share->null_bytes_for_compare= null_bits_are_used ? share->null_bytes : 0;
@@ -2111,11 +2526,33 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->column_bitmap_size= bitmap_buffer_size(share->fields);
+ bitmap_count= 1;
+ if (share->table_check_constraints)
+ {
+ feature_check_constraint++;
+ if (!(share->check_set= (MY_BITMAP*)
+ alloc_root(&share->mem_root, sizeof(*share->check_set))))
+ goto err;
+ bitmap_count++;
+ }
if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root,
- share->column_bitmap_size)))
+ share->column_bitmap_size *
+ bitmap_count)))
goto err;
my_bitmap_init(&share->all_set, bitmaps, share->fields, FALSE);
bitmap_set_all(&share->all_set);
+ if (share->check_set)
+ {
+ /*
+ Bitmap for fields used by CHECK constraint. Will be filled up
+ at first usage of table.
+ */
+ my_bitmap_init(share->check_set,
+ (my_bitmap_map*) ((uchar*) bitmaps +
+ share->column_bitmap_size),
+ share->fields, FALSE);
+ bitmap_clear_all(share->check_set);
+ }
delete handler_file;
#ifndef DBUG_OFF
@@ -2126,7 +2563,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->db_plugin= se_plugin;
share->error= OPEN_FRM_OK;
thd->status_var.opened_shares++;
- *root_ptr= old_root;
+ thd->mem_root= old_root;
DBUG_RETURN(0);
err:
@@ -2139,7 +2576,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (!thd->is_error())
open_table_error(share, OPEN_FRM_CORRUPTED, share->open_errno);
- *root_ptr= old_root;
+ thd->mem_root= old_root;
DBUG_RETURN(HA_ERR_NOT_A_TABLE);
}
@@ -2186,7 +2623,7 @@ static bool sql_unusable_for_discovery(THD *thd, handlerton *engine,
int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
const char *sql, size_t sql_length)
{
- ulonglong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
CHARSET_INFO *old_cs= thd->variables.character_set_client;
Parser_state parser_state;
bool error;
@@ -2200,7 +2637,6 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
handlerton *hton= plugin_hton(db_plugin);
LEX_CUSTRING frm= {0,0};
LEX_STRING db_backup= { thd->db, thd->db_length };
-
DBUG_ENTER("TABLE_SHARE::init_from_sql_statement_string");
/*
@@ -2310,28 +2746,63 @@ void TABLE_SHARE::free_frm_image(const uchar *frm)
}
-/*
- @brief
- Clear GET_FIXED_FIELDS_FLAG in all fields of a table
+static bool fix_vcol_expr(THD *thd, Virtual_column_info *vcol)
+{
+ DBUG_ENTER("fix_vcol_expr");
- @param
- table The table for whose fields the flags are to be cleared
+ const enum enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
- @note
- This routine is used for error handling purposes.
+ int error= vcol->expr->fix_fields(thd, &vcol->expr);
- @return
- none
-*/
+ thd->mark_used_columns= save_mark_used_columns;
+
+ if (unlikely(error))
+ {
+ StringBuffer<MAX_FIELD_WIDTH> str;
+ vcol->print(&str);
+ my_error(ER_ERROR_EVALUATING_EXPRESSION, MYF(0), str.c_ptr_safe());
+ DBUG_RETURN(1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+/** rerun fix_fields for vcols that returns time- or session- dependent values
-static void clear_field_flag(TABLE *table)
+ @note this is done for all vcols for INSERT/UPDATE/DELETE,
+ and only as needed for SELECTs.
+*/
+bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol)
{
- Field **ptr;
- DBUG_ENTER("clear_field_flag");
+ DBUG_ENTER("fix_session_vcol_expr");
+ if (!(vcol->flags & (VCOL_TIME_FUNC|VCOL_SESSION_FUNC)))
+ DBUG_RETURN(0);
- for (ptr= table->field; *ptr; ptr++)
- (*ptr)->flags&= (~GET_FIXED_FIELDS_FLAG);
- DBUG_VOID_RETURN;
+ vcol->expr->walk(&Item::cleanup_excluding_fields_processor, 0, 0);
+ DBUG_ASSERT(!vcol->expr->fixed);
+ DBUG_RETURN(fix_vcol_expr(thd, vcol));
+}
+
+
+/** invoke fix_session_vcol_expr for a vcol
+
+ @note this is called for generated column or a DEFAULT expression from
+ their corresponding fix_fields on SELECT.
+*/
+bool fix_session_vcol_expr_for_read(THD *thd, Field *field,
+ Virtual_column_info *vcol)
+{
+ DBUG_ENTER("fix_session_vcol_expr_for_read");
+ TABLE_LIST *tl= field->table->pos_in_table_list;
+ if (!tl || tl->lock_type >= TL_WRITE_ALLOW_WRITE)
+ DBUG_RETURN(0);
+ Security_context *save_security_ctx= thd->security_ctx;
+ if (tl->security_ctx)
+ thd->security_ctx= tl->security_ctx;
+ bool res= fix_session_vcol_expr(thd, vcol);
+ thd->security_ctx= save_security_ctx;
+ DBUG_RETURN(res);
}
@@ -2339,264 +2810,174 @@ static void clear_field_flag(TABLE *table)
@brief
Perform semantic analysis of the defining expression for a virtual column
- @param
- thd The thread object
- @param
- table The table containing the virtual column
- @param
- vcol_field The virtual field whose defining expression is to be analyzed
+ @param thd The thread object
+ @param table The table containing the virtual column
+ @param field Field if this is a DEFAULT or AS, otherwise NULL
+ @param vcol The Virtual_column object
+
@details
The function performs semantic analysis of the defining expression for
the virtual column vcol_field. The expression is used to compute the
values of this column.
- @note
- The function exploits the fact that the fix_fields method sets the flag
- GET_FIXED_FIELDS_FLAG for all fields in the item tree.
- This flag must always be unset before returning from this function
- since it is used for other purposes as well.
-
@retval
TRUE An error occurred, something was wrong with the function
@retval
FALSE Otherwise
*/
-bool fix_vcol_expr(THD *thd,
- TABLE *table,
- Field *vcol_field)
+static bool fix_and_check_vcol_expr(THD *thd, TABLE *table,
+ Virtual_column_info *vcol)
{
- Virtual_column_info *vcol_info= vcol_field->vcol_info;
- Item* func_expr= vcol_info->expr_item;
- bool result= TRUE;
- TABLE_LIST tables;
- int error= 0;
- const char *save_where;
- Field **ptr, *field;
- enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ Item* func_expr= vcol->expr;
+ DBUG_ENTER("fix_and_check_vcol_expr");
+ DBUG_PRINT("info", ("vcol: %p", vcol));
DBUG_ASSERT(func_expr);
- DBUG_ENTER("fix_vcol_expr");
- thd->mark_used_columns= MARK_COLUMNS_NONE;
+ if (func_expr->fixed)
+ DBUG_RETURN(0); // nothing to do
- save_where= thd->where;
- thd->where= "virtual column function";
+ if (fix_vcol_expr(thd, vcol))
+ DBUG_RETURN(1);
- /* Fix fields referenced to by the virtual column function */
- if (!func_expr->fixed)
- error= func_expr->fix_fields(thd, &vcol_info->expr_item);
- /* fix_fields could change the expression */
- func_expr= vcol_info->expr_item;
- /* Number of columns will be checked later */
+ if (vcol->flags)
+ DBUG_RETURN(0); // already checked, no need to do it again
- if (unlikely(error))
- {
- DBUG_PRINT("info",
- ("Field in virtual column expression does not belong to the table"));
- goto end;
- }
- thd->where= save_where;
+ /* fix_fields could've changed the expression */
+ func_expr= vcol->expr;
+
+ /* this was checked in check_expression(), but the frm could be mangled... */
if (unlikely(func_expr->result_type() == ROW_RESULT))
{
- my_error(ER_ROW_EXPR_FOR_VCOL, MYF(0));
- goto end;
+ my_error(ER_OPERAND_COLUMNS, MYF(0), 1);
+ DBUG_RETURN(1);
}
-#ifdef PARANOID
+
/*
Walk through the Item tree checking if all items are valid
- to be part of the virtual column
+ to be part of the virtual column
*/
- error= func_expr->walk(&Item::check_vcol_func_processor, 0, NULL);
- if (error)
+ Item::vcol_func_processor_result res;
+ res.errors= 0;
+
+ int error= func_expr->walk(&Item::check_vcol_func_processor, 0, &res);
+ if (error || (res.errors & VCOL_IMPOSSIBLE))
{
- my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), field_name);
- goto end;
+ // this can only happen if the frm was corrupted
+ my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name,
+ vcol->get_vcol_type_name(), vcol->name.str);
+ DBUG_RETURN(1);
}
-#endif
- if (unlikely(func_expr->const_item()))
+ else if (res.errors & VCOL_AUTO_INC)
{
- my_error(ER_CONST_EXPR_IN_VCOL, MYF(0));
- goto end;
- }
- /* Ensure that this virtual column is not based on another virtual field. */
- ptr= table->field;
- while ((field= *(ptr++)))
- {
- if ((field->flags & GET_FIXED_FIELDS_FLAG) &&
- (field->vcol_info))
- {
- my_error(ER_VCOL_BASED_ON_VCOL, MYF(0));
- goto end;
- }
- }
- result= FALSE;
+ /*
+ An auto_increment field may not be used in an expression for
+ a check constraint, a default value or a generated column
-end:
+ Note that this error condition is not detected during parsing
+ of the statement because the field item does not have a field
+ pointer at that time
+ */
+ myf warn= table->s->frm_version < FRM_VER_EXPRESSSIONS ? ME_JUST_WARNING : 0;
+ my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(warn),
+ "AUTO_INCREMENT", vcol->get_vcol_type_name(), res.name);
+ if (!warn)
+ DBUG_RETURN(1);
+ }
+ vcol->flags= res.errors;
- /* Clear GET_FIXED_FIELDS_FLAG for the fields of the table */
- clear_field_flag(table);
+ if (vcol->flags & VCOL_SESSION_FUNC)
+ table->s->vcols_need_refixing= true;
- table->get_fields_in_item_tree= FALSE;
- thd->mark_used_columns= save_mark_used_columns;
- table->map= 0; //Restore old value
-
- DBUG_RETURN(result);
+ DBUG_RETURN(0);
}
+
/*
@brief
Unpack the definition of a virtual column from its linear representation
- @param
- thd The thread object
- @param
- mem_root The mem_root object where to allocated memory
- @param
- table The table containing the virtual column
- @param
- field The field for the virtual
- @param
- vcol_expr The string representation of the defining expression
- @param[out]
- error_reported The flag to inform the caller that no other error
- messages are to be generated
+ @param thd The thread object
+ @param mem_root Where to allocate memory
+ @param table The table containing the virtual column
+ @param field Field if this is a DEFAULT or AS, otherwise NULL
+ @param vcol The Virtual_column object
+ @param[out] error_reported Flag to inform the caller that no
+ other error messages are to be generated
@details
- The function takes string representation 'vcol_expr' of the defining
- expression for the virtual field 'field' of the table 'table' and
- parses it, building an item object for it. The pointer to this item is
- placed into in field->vcol_info.expr_item. After this the function performs
- semantic analysis of the item by calling the the function fix_vcol_expr.
- Since the defining expression is part of the table definition the item for
- it is created in table->memroot within the special arena TABLE::expr_arena.
+
+ The function takes string expression from the 'vcol' object of the
+ table 'table' and parses it, building an item object for it. The
+ pointer to this item is placed into in a Virtual_column_info object
+ that is created. After this the function performs
+ semantic analysis of the item by calling the the function
+ fix_and_check_vcol_expr(). Since the defining expression is part of the table
+ definition the item for it is created in table->memroot within the
+ special arena TABLE::expr_arena or in the thd memroot for INSERT DELAYED
@note
- Before passing 'vcol_expr" to the parser the function embraces it in
- parenthesis and prepands it a special keyword.
+ Before passing 'vcol_expr' to the parser the function wraps it in
+ parentheses and prepends a special keyword.
- @retval
- FALSE If a success
- @retval
- TRUE Otherwise
+ @retval Virtual_column_info* Success
+ @retval NULL Error
*/
-bool unpack_vcol_info_from_frm(THD *thd,
- MEM_ROOT *mem_root,
- TABLE *table,
- Field *field,
- LEX_STRING *vcol_expr,
- bool *error_reported)
+
+static Virtual_column_info *
+unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root, TABLE *table,
+ String *expr_str, Virtual_column_info **vcol_ptr,
+ bool *error_reported)
{
- bool rc;
- char *vcol_expr_str;
- int str_len;
- CHARSET_INFO *old_character_set_client;
- Query_arena *backup_stmt_arena_ptr;
- Query_arena backup_arena;
- Query_arena *vcol_arena= 0;
Create_field vcol_storage; // placeholder for vcol_info
Parser_state parser_state;
+ Virtual_column_info *vcol= *vcol_ptr, *vcol_info= 0;
LEX *old_lex= thd->lex;
LEX lex;
+ bool error;
DBUG_ENTER("unpack_vcol_info_from_frm");
- DBUG_ASSERT(vcol_expr);
-
- old_character_set_client= thd->variables.character_set_client;
- backup_stmt_arena_ptr= thd->stmt_arena;
- /*
- Step 1: Construct the input string for the parser.
- The string to be parsed has to be of the following format:
- "PARSE_VCOL_EXPR (<expr_string_from_frm>)".
- */
+ DBUG_ASSERT(vcol->expr == NULL);
- if (!(vcol_expr_str= (char*) alloc_root(mem_root,
- vcol_expr->length +
- parse_vcol_keyword.length + 3)))
- {
- DBUG_RETURN(TRUE);
- }
- memcpy(vcol_expr_str,
- (char*) parse_vcol_keyword.str,
- parse_vcol_keyword.length);
- str_len= parse_vcol_keyword.length;
- memcpy(vcol_expr_str + str_len, "(", 1);
- str_len++;
- memcpy(vcol_expr_str + str_len,
- (char*) vcol_expr->str,
- vcol_expr->length);
- str_len+= vcol_expr->length;
- memcpy(vcol_expr_str + str_len, ")", 1);
- str_len++;
- memcpy(vcol_expr_str + str_len, "\0", 1);
- str_len++;
-
- if (parser_state.init(thd, vcol_expr_str, str_len))
- goto err;
-
- /*
- Step 2: Setup thd for parsing.
- */
- vcol_arena= table->expr_arena;
- if (!vcol_arena)
- {
- /*
- We need to use CONVENTIONAL_EXECUTION here to ensure that
- any new items created by fix_fields() are not reverted.
- */
- Query_arena expr_arena(mem_root,
- Query_arena::STMT_CONVENTIONAL_EXECUTION);
- if (!(vcol_arena= (Query_arena *) alloc_root(mem_root,
- sizeof(Query_arena))))
- goto err;
- *vcol_arena= expr_arena;
- table->expr_arena= vcol_arena;
- }
- thd->set_n_backup_active_arena(vcol_arena, &backup_arena);
- thd->stmt_arena= vcol_arena;
+ if (parser_state.init(thd, expr_str->c_ptr_safe(), expr_str->length()))
+ goto end;
if (init_lex_with_single_table(thd, table, &lex))
- goto err;
+ goto end;
- lex.parse_vcol_expr= TRUE;
+ lex.parse_vcol_expr= true;
lex.last_field= &vcol_storage;
- /*
- Step 3: Use the parser to build an Item object from vcol_expr_str.
- */
- if (parse_sql(thd, &parser_state, NULL))
- {
- goto err;
- }
- /* From now on use vcol_info generated by the parser. */
- field->vcol_info= vcol_storage.vcol_info;
+ error= parse_sql(thd, &parser_state, NULL);
+ if (error)
+ goto end;
- /* Validate the Item tree. */
- if (fix_vcol_expr(thd, table, field))
+ vcol_storage.vcol_info->set_vcol_type(vcol->get_vcol_type());
+ vcol_storage.vcol_info->stored_in_db= vcol->stored_in_db;
+ vcol_storage.vcol_info->name= vcol->name;
+ vcol_storage.vcol_info->utf8= vcol->utf8;
+ if (!fix_and_check_vcol_expr(thd, table, vcol_storage.vcol_info))
{
- *error_reported= TRUE;
- field->vcol_info= 0;
- goto err;
+ *vcol_ptr= vcol_info= vcol_storage.vcol_info; // Expression ok
+ DBUG_ASSERT(vcol_info->expr);
+ goto end;
}
- rc= FALSE;
- goto end;
+ *error_reported= TRUE;
-err:
- rc= TRUE;
- thd->free_items();
end:
- thd->stmt_arena= backup_stmt_arena_ptr;
- if (vcol_arena)
- thd->restore_active_arena(vcol_arena, &backup_arena);
end_lex_with_single_table(thd, table, old_lex);
- thd->variables.character_set_client= old_character_set_client;
- DBUG_RETURN(rc);
+ DBUG_RETURN(vcol_info);
}
-/*
- Read data from a binary .frm file from MySQL 3.23 - 5.0 into TABLE_SHARE
-*/
+static bool check_vcol_forward_refs(Field *field, Virtual_column_info *vcol)
+{
+ bool res= vcol &&
+ vcol->expr->walk(&Item::check_field_expression_processor, 0,
+ field);
+ return res;
+}
/*
Open a table based on a TABLE_SHARE
@@ -2630,13 +3011,16 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
{
enum open_frm_error error;
uint records, i, bitmap_size, bitmap_count;
+ size_t tmp_length;
+ const char *tmp_alias;
bool error_reported= FALSE;
uchar *record, *bitmaps;
- Field **field_ptr, **UNINIT_VAR(vfield_ptr), **UNINIT_VAR(dfield_ptr);
+ Field **field_ptr;
uint8 save_context_analysis_only= thd->lex->context_analysis_only;
+ TABLE_SHARE::enum_v_keys check_set_initialized= share->check_set_initialized;
DBUG_ENTER("open_table_from_share");
- DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
- share->table_name.str, (long) outparam));
+ DBUG_PRINT("enter",("name: '%s.%s' form: %p", share->db.str,
+ share->table_name.str, outparam));
thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_VIEW; // not a view
@@ -2656,11 +3040,17 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
}
init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
- if (outparam->alias.copy(alias, strlen(alias), table_alias_charset))
+ /*
+ We have to store the original alias in mem_root as constraints and virtual
+ functions may store pointers to it
+ */
+ tmp_length= strlen(alias);
+ if (!(tmp_alias= strmake_root(&outparam->mem_root, alias, tmp_length)))
goto err;
+
+ outparam->alias.set(tmp_alias, tmp_length, table_alias_charset);
outparam->quick_keys.init();
outparam->covering_keys.init();
- outparam->merge_keys.init();
outparam->intersect_keys.init();
outparam->keys_in_use_for_query.init();
@@ -2689,7 +3079,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
records++;
if (!(record= (uchar*) alloc_root(&outparam->mem_root,
- share->rec_buff_length * records)))
+ share->rec_buff_length * records)))
goto err; /* purecov: inspected */
MEM_NOACCESS(record, share->rec_buff_length * records);
@@ -2731,6 +3121,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
}
(*field_ptr)= 0; // End marker
+ DEBUG_SYNC(thd, "TABLE_after_field_clone");
+
if (share->found_next_number_field)
outparam->found_next_number_field=
outparam->field[(uint) (share->found_next_number_field - share->field)];
@@ -2761,7 +3153,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
key_info->key_part= key_part;
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
- key_info->user_defined_key_parts) ;
+ key_info->user_defined_key_parts) ;
for ( ; key_part < key_part_end; key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
@@ -2786,54 +3178,39 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
/*
Process virtual and default columns, if any.
*/
- if (share->vfields)
- {
- if (!(vfield_ptr = (Field **) alloc_root(&outparam->mem_root,
- (uint) ((share->vfields+1)*
- sizeof(Field*)))))
+ if (share->virtual_fields || share->default_fields ||
+ share->default_expressions || share->table_check_constraints)
+ {
+ Field **vfield_ptr, **dfield_ptr;
+ Virtual_column_info **check_constraint_ptr;
+
+ if (!multi_alloc_root(&outparam->mem_root,
+ &vfield_ptr, (uint) ((share->virtual_fields + 1)*
+ sizeof(Field*)),
+ &dfield_ptr, (uint) ((share->default_fields +
+ share->default_expressions +1)*
+ sizeof(Field*)),
+ &check_constraint_ptr,
+ (uint) ((share->table_check_constraints +
+ share->field_check_constraints + 1)*
+ sizeof(Virtual_column_info*)),
+ NullS))
goto err;
-
- outparam->vfield= vfield_ptr;
- }
-
- if (share->default_fields)
- {
- if (!(dfield_ptr = (Field **) alloc_root(&outparam->mem_root,
- (uint) ((share->default_fields+1)*
- sizeof(Field*)))))
- goto err;
-
- outparam->default_field= dfield_ptr;
- }
-
- if (share->vfields || share->default_fields)
- {
- /* Reuse the same loop both for virtual and default fields. */
- for (field_ptr= outparam->field; *field_ptr; field_ptr++)
+ if (share->virtual_fields)
+ outparam->vfield= vfield_ptr;
+ if (share->default_fields + share->default_expressions)
+ outparam->default_field= dfield_ptr;
+ if (share->table_check_constraints || share->field_check_constraints)
+ outparam->check_constraints= check_constraint_ptr;
+
+ if (parse_vcol_defs(thd, &outparam->mem_root, outparam, &error_reported))
{
- if (share->vfields && (*field_ptr)->vcol_info)
- {
- if (unpack_vcol_info_from_frm(thd,
- &outparam->mem_root,
- outparam,
- *field_ptr,
- &(*field_ptr)->vcol_info->expr_str,
- &error_reported))
- {
- error= OPEN_FRM_CORRUPTED;
- goto err;
- }
- *(vfield_ptr++)= *field_ptr;
- }
- if (share->default_fields &&
- ((*field_ptr)->has_insert_default_function() ||
- (*field_ptr)->has_update_default_function()))
- *(dfield_ptr++)= *field_ptr;
+ error= OPEN_FRM_CORRUPTED;
+ goto err;
}
- if (share->vfields)
- *vfield_ptr= 0; // End marker
- if (share->default_fields)
- *dfield_ptr= 0; // End marker
+
+ /* Update to use trigger fields */
+ switch_defaults_to_nullable_trigger_fields(outparam);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -2904,7 +3281,7 @@ partititon_err:
#endif
/* Check virtual columns against table's storage engine. */
- if (share->vfields &&
+ if (share->virtual_fields &&
(outparam->file &&
!(outparam->file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS)))
{
@@ -2917,14 +3294,10 @@ partititon_err:
/* Allocate bitmaps */
bitmap_size= share->column_bitmap_size;
- bitmap_count= 6;
- if (share->vfields)
- {
- if (!(outparam->def_vcol_set= (MY_BITMAP*)
- alloc_root(&outparam->mem_root, sizeof(*outparam->def_vcol_set))))
- goto err;
+ bitmap_count= 7;
+ if (share->virtual_fields)
bitmap_count++;
- }
+
if (!(bitmaps= (uchar*) alloc_root(&outparam->mem_root,
bitmap_size * bitmap_count)))
goto err;
@@ -2935,13 +3308,21 @@ partititon_err:
my_bitmap_init(&outparam->def_write_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
bitmaps+= bitmap_size;
- if (share->vfields)
+
+ /* Don't allocate vcol_bitmap if we don't need it */
+ if (share->virtual_fields)
{
- /* Don't allocate vcol_bitmap if we don't need it */
+ if (!(outparam->def_vcol_set= (MY_BITMAP*)
+ alloc_root(&outparam->mem_root, sizeof(*outparam->def_vcol_set))))
+ goto err;
my_bitmap_init(outparam->def_vcol_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
bitmaps+= bitmap_size;
}
+
+ my_bitmap_init(&outparam->has_value_set,
+ (my_bitmap_map*) bitmaps, share->fields, FALSE);
+ bitmaps+= bitmap_size;
my_bitmap_init(&outparam->tmp_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
bitmaps+= bitmap_size;
@@ -2960,13 +3341,8 @@ partititon_err:
/* The table struct is now initialized; Open the table */
if (db_stat)
{
- if (db_stat & HA_OPEN_TEMPORARY)
- ha_open_flags|= HA_OPEN_TMP_TABLE;
- else if ((db_stat & HA_WAIT_IF_LOCKED) ||
- (specialflag & SPECIAL_WAIT_IF_LOCKED))
+ if (specialflag & SPECIAL_WAIT_IF_LOCKED)
ha_open_flags|= HA_OPEN_WAIT_IF_LOCKED;
- else if (db_stat & (HA_ABORT_IF_LOCKED | HA_GET_INFO))
- ha_open_flags|= HA_OPEN_ABORT_IF_LOCKED;
else
ha_open_flags|= HA_OPEN_IGNORE_IF_LOCKED;
@@ -2996,11 +3372,23 @@ partititon_err:
if (share->db_type()->discover_table &&
(ha_err == ENOENT || ha_err == HA_ERR_NO_SUCH_TABLE))
error= OPEN_FRM_DISCOVER;
-
+
goto err;
}
}
+ outparam->mark_columns_used_by_virtual_fields();
+ if (!check_set_initialized &&
+ share->check_set_initialized == TABLE_SHARE::V_KEYS)
+ {
+ // copy PART_INDIRECT_KEY_FLAG that was set meanwhile by *some* thread
+ for (uint i= 0 ; i < share->fields ; i++)
+ {
+ if (share->field[i]->flags & PART_INDIRECT_KEY_FLAG)
+ outparam->field[i]->flags|= PART_INDIRECT_KEY_FLAG;
+ }
+ }
+
if (share->table_category == TABLE_CATEGORY_LOG)
{
outparam->no_replicate= TRUE;
@@ -3017,6 +3405,9 @@ partititon_err:
outparam->no_replicate= FALSE;
}
+ if (outparam->no_replicate || !binlog_filter->db_ok(outparam->s->db.str))
+ outparam->s->cached_row_logging_check= 0; // No row based replication
+
/* Increment the opened_tables counter, only when open flags set. */
if (db_stat)
thd->status_var.opened_tables++;
@@ -3035,6 +3426,8 @@ partititon_err:
outparam->file= 0; // For easier error checking
outparam->db_stat=0;
thd->lex->context_analysis_only= save_context_analysis_only;
+ if (outparam->expr_arena)
+ outparam->expr_arena->free_items();
free_root(&outparam->mem_root, MYF(0)); // Safe to call on bzero'd root
outparam->alias.free();
DBUG_RETURN (error);
@@ -3047,21 +3440,16 @@ partititon_err:
SYNOPSIS
closefrm()
table TABLE object to free
- free_share Is 1 if we also want to free table_share
*/
-int closefrm(register TABLE *table, bool free_share)
+int closefrm(TABLE *table)
{
int error=0;
DBUG_ENTER("closefrm");
- DBUG_PRINT("enter", ("table: 0x%lx", (long) table));
+ DBUG_PRINT("enter", ("table: %p", table));
if (table->db_stat)
- {
- if (table->s->deleting)
- table->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
error=table->file->ha_close();
- }
table->alias.free();
if (table->expr_arena)
table->expr_arena->free_items();
@@ -3084,13 +3472,6 @@ int closefrm(register TABLE *table, bool free_share)
table->part_info= 0;
}
#endif
- if (free_share)
- {
- if (table->s->tmp_table == NO_TMP_TABLE)
- tdc_release_share(table->s);
- else
- free_table_share(table->s);
- }
free_root(&table->mem_root, MYF(0));
DBUG_RETURN(error);
}
@@ -3098,7 +3479,7 @@ int closefrm(register TABLE *table, bool free_share)
/* Deallocate temporary blob storage */
-void free_blobs(register TABLE *table)
+void free_blobs(TABLE *table)
{
uint *ptr, *end;
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
@@ -3375,7 +3756,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
/* header */
fileinfo[0]=(uchar) 254;
fileinfo[1]= 1;
- fileinfo[2]= FRM_VER + 3 + MY_TEST(create_info->varchar);
+ fileinfo[2]= (create_info->expression_length == 0 ? FRM_VER_TRUE_VARCHAR :
+ FRM_VER_EXPRESSSIONS);
DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
@@ -3426,15 +3808,15 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
/* Bytes 41-46 were for RAID support; now reused for other purposes */
fileinfo[41]= (uchar) (csid >> 8);
int2store(fileinfo+42, create_info->stats_sample_pages & 0xffff);
- fileinfo[44]= (uchar) create_info->stats_auto_recalc;
- fileinfo[45]= 0;
- fileinfo[46]= 0;
+ fileinfo[44]= (uchar) create_info->stats_auto_recalc;
+ int2store(fileinfo+45, (create_info->check_constraint_list->elements+
+ create_info->field_check_constraints));
int4store(fileinfo+47, key_length);
tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
int4store(fileinfo+51, tmp);
int4store(fileinfo+55, create_info->extra_size);
/*
- 59-60 is reserved for extra_rec_buf_length,
+ 59-60 is unused since 10.2.4
61 for default_part_db_type
*/
int2store(fileinfo+62, create_info->key_block_size);
@@ -3452,6 +3834,7 @@ void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table)
create_info->table_options= share->db_create_options;
create_info->avg_row_length= share->avg_row_length;
create_info->row_type= share->row_type;
+ create_info->key_block_size= share->key_block_size;
create_info->default_table_charset= share->table_charset;
create_info->table_charset= 0;
create_info->comment= share->comment;
@@ -3488,11 +3871,11 @@ rename_file_ext(const char * from,const char * to,const char * ext)
bool get_field(MEM_ROOT *mem, Field *field, String *res)
{
- char buff[MAX_FIELD_WIDTH], *to;
- String str(buff,sizeof(buff),&my_charset_bin);
+ char *to;
+ StringBuffer<MAX_FIELD_WIDTH> str;
bool rc;
THD *thd= field->get_thd();
- ulonglong sql_mode_backup= thd->variables.sql_mode;
+ sql_mode_t sql_mode_backup= thd->variables.sql_mode;
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
field->val_str(&str);
@@ -3772,13 +4155,13 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
is backward compatible.
*/
}
- char buffer[1024];
+ else
+ {
+ StringBuffer<1024> sql_type(system_charset_info);
+ sql_type.extra_allocation(256); // Allocate min 256 characters at once
for (i=0 ; i < table_def->count; i++, field_def++)
{
- String sql_type(buffer, sizeof(buffer), system_charset_info);
sql_type.length(0);
- /* Allocate min 256 characters at once */
- sql_type.extra_allocation(256);
if (i < table->s->fields)
{
Field *field= table->field[i];
@@ -3859,6 +4242,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
error= TRUE;
}
}
+ }
if (table_def->primary_key_parts)
{
@@ -3977,7 +4361,7 @@ bool TABLE_SHARE::visit_subgraph(Wait_for_flush *wait_for_flush,
tdc->all_tables_refs++;
mysql_mutex_unlock(&tdc->LOCK_table_share);
- TDC_element::All_share_tables_list::Iterator tables_it(tdc->all_tables);
+ All_share_tables_list::Iterator tables_it(tdc->all_tables);
/*
In case of multiple searches running in parallel, avoid going
@@ -4130,7 +4514,7 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
s->table_name.str,
tl->alias);
/* Fix alias if table name changes. */
- if (strcmp(alias.c_ptr(), tl->alias))
+ if (!alias.alloced_length() || strcmp(alias.c_ptr(), tl->alias))
alias.copy(tl->alias, strlen(tl->alias), alias.charset());
tablenr= thd->current_tablenr++;
@@ -4167,7 +4551,7 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
(*f_ptr)->cond_selectivity= 1.0;
}
- DBUG_ASSERT(key_read == 0);
+ DBUG_ASSERT(!file->keyread_enabled());
restore_record(this, s->default_values);
@@ -4370,7 +4754,7 @@ bool TABLE_LIST::setup_underlying(THD *thd)
if (!view || (!field_translation && merge_underlying_list))
{
SELECT_LEX *select= get_single_select();
-
+
if (create_field_translation(thd))
DBUG_RETURN(TRUE);
@@ -4486,13 +4870,14 @@ bool TABLE_LIST::single_table_updatable()
{
if (!updatable)
return false;
- if (view_tables && view_tables->elements == 1)
+ if (view && view->select_lex.table_list.elements == 1)
{
/*
We need to check deeply only single table views. Multi-table views
will be turned to multi-table updates and then checked by leaf tables
*/
- return view_tables->head()->single_table_updatable();
+ return (((TABLE_LIST *)view->select_lex.table_list.first)->
+ single_table_updatable());
}
return true;
}
@@ -4637,7 +5022,7 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type)
void TABLE_LIST::hide_view_error(THD *thd)
{
- if (thd->killed || thd->get_internal_handler())
+ if ((thd->killed && !thd->is_error())|| thd->get_internal_handler())
return;
/* Hide "Unknown column" or "Unknown function" error */
DBUG_ASSERT(thd->is_error());
@@ -4723,7 +5108,7 @@ void TABLE_LIST::cleanup_items()
/*
- check CHECK OPTION condition
+ check CHECK OPTION condition both for view and underlying table
SYNOPSIS
TABLE_LIST::view_check_option()
@@ -4735,10 +5120,12 @@ void TABLE_LIST::cleanup_items()
VIEW_CHECK_SKIP FAILED, but continue
*/
+
int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
{
if (check_option)
{
+ /* VIEW's CHECK OPTION CLAUSE */
Counting_error_handler ceh;
thd->push_internal_handler(&ceh);
bool res= check_option->val_int() == 0;
@@ -4748,20 +5135,63 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
if (res)
{
TABLE_LIST *main_view= top_table();
- if (ignore_failure)
+ const char *name_db= (main_view->view ? main_view->view_db.str :
+ main_view->db);
+ const char *name_table= (main_view->view ? main_view->view_name.str :
+ main_view->table_name);
+ my_error(ER_VIEW_CHECK_FAILED, MYF(ignore_failure ? ME_JUST_WARNING : 0),
+ name_db, name_table);
+ return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
+ }
+ }
+ return table->verify_constraints(ignore_failure);
+}
+
+
+int TABLE::verify_constraints(bool ignore_failure)
+{
+ /*
+ We have to check is_error() first as we are checking it for each
+ constraint to catch fatal warnings.
+ */
+ if (in_use->is_error())
+ return (VIEW_CHECK_ERROR);
+
+ /* go trough check option clauses for fields and table */
+ if (check_constraints &&
+ !(in_use->variables.option_bits & OPTION_NO_CHECK_CONSTRAINT_CHECKS))
+ {
+ for (Virtual_column_info **chk= check_constraints ; *chk ; chk++)
+ {
+ /*
+ yes! NULL is ok.
+ see 4.23.3.4 Table check constraints, part 2, SQL:2016
+ */
+ if (((*chk)->expr->val_int() == 0 && !(*chk)->expr->null_value) ||
+ in_use->is_error())
{
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_VIEW_CHECK_FAILED,
- ER_THD(thd, ER_VIEW_CHECK_FAILED),
- main_view->view_db.str, main_view->view_name.str);
- return(VIEW_CHECK_SKIP);
+ StringBuffer<MAX_FIELD_WIDTH> field_error(system_charset_info);
+ enum_vcol_info_type vcol_type= (*chk)->get_vcol_type();
+ DBUG_ASSERT(vcol_type == VCOL_CHECK_TABLE ||
+ vcol_type == VCOL_CHECK_FIELD);
+ if (vcol_type == VCOL_CHECK_FIELD)
+ {
+ field_error.append(s->table_name.str);
+ field_error.append(".");
+ }
+ field_error.append((*chk)->name.str);
+ my_error(ER_CONSTRAINT_FAILED,
+ MYF(ignore_failure ? ME_JUST_WARNING : 0), field_error.c_ptr(),
+ s->db.str, s->table_name.str);
+ return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
}
- my_error(ER_VIEW_CHECK_FAILED, MYF(0), main_view->view_db.str,
- main_view->view_name.str);
- return(VIEW_CHECK_ERROR);
}
}
- return(VIEW_CHECK_OK);
+ /*
+ We have to check in_use() as checking constraints may have generated
+ warnings that should be treated as errors
+ */
+ return(!in_use->is_error() ? VIEW_CHECK_OK : VIEW_CHECK_ERROR);
}
@@ -5180,7 +5610,8 @@ void TABLE_LIST::set_check_merged()
It is not simple to check all, but at least this should be checked:
this select is not excluded or the exclusion came from above.
*/
- DBUG_ASSERT(!derived->first_select()->exclude_from_table_unique_test ||
+ DBUG_ASSERT(derived->is_excluded() ||
+ !derived->first_select()->exclude_from_table_unique_test ||
derived->outer_select()->
exclude_from_table_unique_test);
}
@@ -5193,6 +5624,7 @@ void TABLE_LIST::set_check_materialized()
if (view)
derived= &view->unit;
DBUG_ASSERT(derived);
+ DBUG_ASSERT(!derived->is_excluded());
if (!derived->first_select()->exclude_from_table_unique_test)
derived->set_unique_exclude();
else
@@ -5426,9 +5858,10 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
{
DBUG_RETURN(field);
}
+ Name_resolution_context *context= view->view ? &view->view->select_lex.context :
+ &thd->lex->select_lex.context;
Item *item= (new (thd->mem_root)
- Item_direct_view_ref(thd, &view->view->select_lex.context,
- field_ref, view->alias,
+ Item_direct_view_ref(thd, context, field_ref, view->alias,
name, view));
if (!item)
return NULL;
@@ -5549,6 +5982,8 @@ const char *Field_iterator_table_ref::get_table_name()
{
if (table_ref->view)
return table_ref->view_name.str;
+ if (table_ref->is_derived())
+ return table_ref->table->s->table_name.str;
else if (table_ref->is_natural_join)
return natural_join_it.column_ref()->table_name();
@@ -5651,8 +6086,8 @@ Field_iterator_table_ref::get_or_create_column_ref(THD *thd, TABLE_LIST *parent_
/* The field belongs to a merge view or information schema table. */
Field_translator *translated_field= view_field_it.field_translator();
nj_col= new Natural_join_column(translated_field, table_ref);
- field_count= table_ref->field_translation_end -
- table_ref->field_translation;
+ field_count= (uint)(table_ref->field_translation_end -
+ table_ref->field_translation);
}
else
{
@@ -5748,12 +6183,12 @@ void TABLE::clear_column_bitmaps()
Reset column read/write usage. It's identical to:
bitmap_clear_all(&table->def_read_set);
bitmap_clear_all(&table->def_write_set);
- if (s->vfields) bitmap_clear_all(table->def_vcol_set);
+ if (s->virtual_fields) bitmap_clear_all(table->def_vcol_set);
The code assumes that the bitmaps are allocated after each other, as
guaranteed by open_table_from_share()
*/
bzero((char*) def_read_set.bitmap,
- s->column_bitmap_size * (s->vfields ? 3 : 2));
+ s->column_bitmap_size * (s->virtual_fields ? 3 : 2));
column_bitmaps_set(&def_read_set, &def_write_set, def_vcol_set);
rpl_write_set= 0; // Safety
}
@@ -5783,50 +6218,34 @@ void TABLE::prepare_for_position()
}
-/*
- Mark that only fields from one key is used
-
- NOTE:
- This changes the bitmap to use the tmp bitmap
- After this, you can't access any other columns in the table until
- bitmaps are reset, for example with TABLE::clear_column_bitmaps()
- or TABLE::restore_column_maps_after_mark_index()
-*/
-
-void TABLE::mark_columns_used_by_index(uint index)
+MY_BITMAP *TABLE::prepare_for_keyread(uint index, MY_BITMAP *map)
{
- MY_BITMAP *bitmap= &tmp_set;
- DBUG_ENTER("TABLE::mark_columns_used_by_index");
-
- enable_keyread();
- bitmap_clear_all(bitmap);
- mark_columns_used_by_index_no_reset(index, bitmap);
- column_bitmaps_set(bitmap, bitmap);
- DBUG_VOID_RETURN;
+ MY_BITMAP *backup= read_set;
+ DBUG_ENTER("TABLE::prepare_for_keyread");
+ if (!no_keyread)
+ file->ha_start_keyread(index);
+ if (map != read_set || !(file->index_flags(index, 0, 1) & HA_CLUSTERED_INDEX))
+ {
+ mark_columns_used_by_index(index, map);
+ column_bitmaps_set(map);
+ }
+ DBUG_RETURN(backup);
}
/*
- Add fields used by a specified index to the table's read_set.
-
- NOTE:
- The original state can be restored with
- restore_column_maps_after_mark_index().
+ Mark that only fields from one key is used. Useful before keyread.
*/
-void TABLE::add_read_columns_used_by_index(uint index)
+void TABLE::mark_columns_used_by_index(uint index, MY_BITMAP *bitmap)
{
- MY_BITMAP *bitmap= &tmp_set;
- DBUG_ENTER("TABLE::add_read_columns_used_by_index");
+ DBUG_ENTER("TABLE::mark_columns_used_by_index");
- enable_keyread();
- bitmap_copy(bitmap, read_set);
+ bitmap_clear_all(bitmap);
mark_columns_used_by_index_no_reset(index, bitmap);
- column_bitmaps_set(bitmap, write_set);
DBUG_VOID_RETURN;
}
-
/*
Restore to use normal column maps after key read
@@ -5838,12 +6257,11 @@ void TABLE::add_read_columns_used_by_index(uint index)
when calling mark_columns_used_by_index
*/
-void TABLE::restore_column_maps_after_mark_index()
+void TABLE::restore_column_maps_after_keyread(MY_BITMAP *backup)
{
DBUG_ENTER("TABLE::restore_column_maps_after_mark_index");
-
- disable_keyread();
- default_column_bitmaps();
+ file->ha_end_keyread();
+ read_set= backup;
file->column_bitmaps_signal();
DBUG_VOID_RETURN;
}
@@ -5853,21 +6271,15 @@ void TABLE::restore_column_maps_after_mark_index()
mark columns used by key, but don't reset other fields
*/
-void TABLE::mark_columns_used_by_index_no_reset(uint index,
- MY_BITMAP *bitmap)
+void TABLE::mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *bitmap)
{
KEY_PART_INFO *key_part= key_info[index].key_part;
- KEY_PART_INFO *key_part_end= (key_part +
- key_info[index].user_defined_key_parts);
+ KEY_PART_INFO *key_part_end= (key_part + key_info[index].user_defined_key_parts);
for (;key_part != key_part_end; key_part++)
- {
bitmap_set_bit(bitmap, key_part->fieldnr-1);
- if (key_part->field->vcol_info &&
- key_part->field->vcol_info->expr_item)
- key_part->field->vcol_info->
- expr_item->walk(&Item::register_field_in_bitmap,
- 1, (uchar *) bitmap);
- }
+ if (file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX &&
+ s->primary_key != MAX_KEY && s->primary_key != index)
+ mark_columns_used_by_index_no_reset(s->primary_key, bitmap);
}
@@ -5914,6 +6326,7 @@ void TABLE::mark_auto_increment_column()
void TABLE::mark_columns_needed_for_delete()
{
+ bool need_signal= false;
mark_columns_per_binlog_row_image();
if (triggers)
@@ -5923,10 +6336,15 @@ void TABLE::mark_columns_needed_for_delete()
Field **reg_field;
for (reg_field= field ; *reg_field ; reg_field++)
{
- if ((*reg_field)->flags & PART_KEY_FLAG)
- bitmap_set_bit(read_set, (*reg_field)->field_index);
+ Field *cur_field= *reg_field;
+ if (cur_field->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG))
+ {
+ bitmap_set_bit(read_set, cur_field->field_index);
+ if (cur_field->vcol_info)
+ bitmap_set_bit(vcol_set, cur_field->field_index);
+ }
}
- file->column_bitmaps_signal();
+ need_signal= true;
}
if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
{
@@ -5940,9 +6358,17 @@ void TABLE::mark_columns_needed_for_delete()
else
{
mark_columns_used_by_index_no_reset(s->primary_key, read_set);
- file->column_bitmaps_signal();
+ need_signal= true;
}
}
+ if (check_constraints)
+ {
+ mark_check_constraint_columns_for_read();
+ need_signal= true;
+ }
+
+ if (need_signal)
+ file->column_bitmaps_signal();
}
@@ -5966,23 +6392,43 @@ void TABLE::mark_columns_needed_for_delete()
void TABLE::mark_columns_needed_for_update()
{
- DBUG_ENTER("mark_columns_needed_for_update");
+ DBUG_ENTER("TABLE::mark_columns_needed_for_update");
+ bool need_signal= false;
mark_columns_per_binlog_row_image();
if (triggers)
triggers->mark_fields_used(TRG_EVENT_UPDATE);
+ if (default_field)
+ mark_default_fields_for_write(FALSE);
+ if (vfield)
+ need_signal|= mark_virtual_columns_for_write(FALSE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
- /* Mark all used key columns for read */
- Field **reg_field;
- for (reg_field= field ; *reg_field ; reg_field++)
+ KEY *end= key_info + s->keys;
+ for (KEY *k= key_info; k < end; k++)
{
- /* Merge keys is all keys that had a column refered to in the query */
- if (merge_keys.is_overlapping((*reg_field)->part_of_key))
- bitmap_set_bit(read_set, (*reg_field)->field_index);
+ KEY_PART_INFO *kpend= k->key_part + k->ext_key_parts;
+ bool any_written= false, all_read= true;
+ for (KEY_PART_INFO *kp= k->key_part; kp < kpend; kp++)
+ {
+ int idx= kp->fieldnr - 1;
+ any_written|= bitmap_is_set(write_set, idx);
+ all_read&= bitmap_is_set(read_set, idx);
+ }
+ if (any_written && !all_read)
+ {
+ for (KEY_PART_INFO *kp= k->key_part; kp < kpend; kp++)
+ {
+ int idx= kp->fieldnr - 1;
+ if (bitmap_fast_test_and_set(read_set, idx))
+ continue;
+ if (field[idx]->vcol_info)
+ mark_virtual_col(field[idx]);
+ }
+ }
}
- file->column_bitmaps_signal();
+ need_signal= true;
}
if (file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_DELETE)
{
@@ -5996,11 +6442,28 @@ void TABLE::mark_columns_needed_for_update()
else
{
mark_columns_used_by_index_no_reset(s->primary_key, read_set);
- file->column_bitmaps_signal();
+ need_signal= true;
}
}
- /* Mark all virtual columns needed for update */
- mark_virtual_columns_for_write(FALSE);
+ if (check_constraints)
+ {
+ mark_check_constraint_columns_for_read();
+ need_signal= true;
+ }
+
+ /*
+ If a timestamp field settable on UPDATE is present then to avoid wrong
+ update force the table handler to retrieve write-only fields to be able
+ to compare records and detect data change.
+ */
+ if ((file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) &&
+ default_field && s->has_update_default_function)
+ {
+ bitmap_union(read_set, write_set);
+ need_signal= true;
+ }
+ if (need_signal)
+ file->column_bitmaps_signal();
DBUG_VOID_RETURN;
}
@@ -6014,6 +6477,7 @@ void TABLE::mark_columns_needed_for_update()
void TABLE::mark_columns_needed_for_insert()
{
+ DBUG_ENTER("mark_columns_needed_for_insert");
mark_columns_per_binlog_row_image();
if (triggers)
@@ -6029,8 +6493,14 @@ void TABLE::mark_columns_needed_for_insert()
}
if (found_next_number_field)
mark_auto_increment_column();
+ if (default_field)
+ mark_default_fields_for_write(TRUE);
/* Mark virtual columns for insert */
- mark_virtual_columns_for_write(TRUE);
+ if (vfield)
+ mark_virtual_columns_for_write(TRUE);
+ if (check_constraints)
+ mark_check_constraint_columns_for_read();
+ DBUG_VOID_RETURN;
}
/*
@@ -6137,14 +6607,18 @@ void TABLE::mark_columns_per_binlog_row_image()
mark_columns_used_by_index_no_reset(s->primary_key, read_set);
/* Only write columns that have changed */
rpl_write_set= write_set;
- if (default_field)
- mark_default_fields_for_write(rpl_write_set);
break;
default:
DBUG_ASSERT(FALSE);
}
}
+ /*
+ We have to ensure that all virtual columns that are part of read set
+ are calculated.
+ */
+ if (vcol_set)
+ bitmap_union(vcol_set, read_set);
file->column_bitmaps_signal();
}
@@ -6175,9 +6649,9 @@ bool TABLE::mark_virtual_col(Field *field)
DBUG_ASSERT(field->vcol_info);
if (!(res= bitmap_fast_test_and_set(vcol_set, field->field_index)))
{
- Item *vcol_item= field->vcol_info->expr_item;
+ Item *vcol_item= field->vcol_info->expr;
DBUG_ASSERT(vcol_item);
- vcol_item->walk(&Item::register_field_in_read_map, 1, (uchar *) 0);
+ vcol_item->walk(&Item::register_field_in_read_map, 1, 0);
}
return res;
}
@@ -6185,20 +6659,21 @@ bool TABLE::mark_virtual_col(Field *field)
/*
@brief Mark virtual columns for update/insert commands
-
- @param insert_fl <-> virtual columns are marked for insert command
+
+ @param insert_fl true if virtual columns are marked for insert command
+ For the moment this is not used, may be used in future.
@details
The function marks virtual columns used in a update/insert commands
in the vcol_set bitmap.
For an insert command a virtual column is always marked in write_set if
it is a stored column.
- If a virtual column is from write_set it is always marked in vcol_set.
+ If a virtual column is from write_set it is always marked in vcol_set.
If a stored virtual column is not from write_set but it is computed
through columns from write_set it is also marked in vcol_set, and,
besides, it is added to write_set.
- @return void
+ @return whether a bitmap was updated
@note
Let table t1 have columns a,b,c and let column c be a stored virtual
@@ -6208,76 +6683,107 @@ bool TABLE::mark_virtual_col(Field *field)
column b will be placed into read_set.
If column c was a virtual column, but not a stored virtual column
then it would not be added to any of the sets. Column b would not
- be added to read_set either.
+ be added to read_set either.
*/
-void TABLE::mark_virtual_columns_for_write(bool insert_fl)
+bool TABLE::mark_virtual_columns_for_write(bool insert_fl
+ __attribute__((unused)))
{
Field **vfield_ptr, *tmp_vfield;
- bool bitmap_updated= FALSE;
-
- if (!vfield)
- return;
-
- if (!vfield)
- return;
+ bool bitmap_updated= false;
+ DBUG_ENTER("mark_virtual_columns_for_write");
for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
{
tmp_vfield= *vfield_ptr;
if (bitmap_is_set(write_set, tmp_vfield->field_index))
- bitmap_updated= mark_virtual_col(tmp_vfield);
- else if (tmp_vfield->stored_in_db)
+ bitmap_updated|= mark_virtual_col(tmp_vfield);
+ else if (tmp_vfield->vcol_info->stored_in_db ||
+ (tmp_vfield->flags & (PART_KEY_FLAG | FIELD_IN_PART_FUNC_FLAG |
+ PART_INDIRECT_KEY_FLAG)))
{
- bool mark_fl= insert_fl;
- if (!mark_fl)
- {
- MY_BITMAP *save_read_set;
- Item *vcol_item= tmp_vfield->vcol_info->expr_item;
- DBUG_ASSERT(vcol_item);
- bitmap_clear_all(&tmp_set);
- save_read_set= read_set;
- read_set= &tmp_set;
- vcol_item->walk(&Item::register_field_in_read_map, 1, (uchar *) 0);
- read_set= save_read_set;
- bitmap_intersect(&tmp_set, write_set);
- mark_fl= !bitmap_is_clear_all(&tmp_set);
- }
- if (mark_fl)
- {
- bitmap_set_bit(write_set, tmp_vfield->field_index);
- mark_virtual_col(tmp_vfield);
- bitmap_updated= TRUE;
- }
- }
+ bitmap_set_bit(write_set, tmp_vfield->field_index);
+ mark_virtual_col(tmp_vfield);
+ bitmap_updated= true;
+ }
}
if (bitmap_updated)
file->column_bitmaps_signal();
+ DBUG_RETURN(bitmap_updated);
}
+/*
+ Mark fields used by check constraints into s->check_set.
+ Mark all fields used in an expression that is part of an index
+ with PART_INDIRECT_KEY_FLAG
-/**
- Check if a table has a default function either for INSERT or UPDATE-like
- operation
- @retval true there is a default function
- @retval false there is no default function
+ This is done once for the TABLE_SHARE the first time the table is opened.
+ The marking must be done non-destructively to handle the case when
+ this could be run in parallely by two threads
*/
-bool TABLE::has_default_function(bool is_update)
+void TABLE::mark_columns_used_by_virtual_fields(void)
{
- Field **dfield_ptr, *dfield;
- bool res= false;
- for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ MY_BITMAP *save_read_set;
+ Field **vfield_ptr;
+ TABLE_SHARE::enum_v_keys v_keys= TABLE_SHARE::NO_V_KEYS;
+
+ /* If there is virtual fields are already initialized */
+ if (s->check_set_initialized)
+ return;
+
+ if (s->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_lock(&s->LOCK_share);
+ if (s->check_set)
{
- dfield= (*dfield_ptr);
- if (is_update)
- res= dfield->has_update_default_function();
- else
- res= dfield->has_insert_default_function();
- if (res)
- return res;
+ /* Mark fields used by check constraint */
+ save_read_set= read_set;
+ read_set= s->check_set;
+
+ for (Virtual_column_info **chk= check_constraints ; *chk ; chk++)
+ (*chk)->expr->walk(&Item::register_field_in_read_map, 1, 0);
+ read_set= save_read_set;
}
- return res;
+
+ /*
+ mark all fields that part of a virtual indexed field with
+ PART_INDIRECT_KEY_FLAG. This is used to ensure that all fields
+ that are part of an index exits before write/delete/update.
+
+ As this code is only executed once per open share, it's reusing
+ existing functionality instead of adding an extra argument to
+ add_field_to_set_processor or adding another processor.
+ */
+ if (vfield)
+ {
+ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
+ {
+ if ((*vfield_ptr)->flags & PART_KEY_FLAG)
+ (*vfield_ptr)->vcol_info->expr->walk(&Item::add_field_to_set_processor,
+ 1, this);
+ }
+ for (uint i= 0 ; i < s->fields ; i++)
+ {
+ if (bitmap_is_set(&tmp_set, i))
+ {
+ s->field[i]->flags|= PART_INDIRECT_KEY_FLAG;
+ v_keys= TABLE_SHARE::V_KEYS;
+ }
+ }
+ bitmap_clear_all(&tmp_set);
+ }
+ s->check_set_initialized= v_keys;
+ if (s->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_unlock(&s->LOCK_share);
+}
+
+/* Add fields used by CHECK CONSTRAINT to read map */
+
+void TABLE::mark_check_constraint_columns_for_read(void)
+{
+ bitmap_union(read_set, s->check_set);
+ if (vcol_set)
+ bitmap_union(vcol_set, s->check_set);
}
@@ -6285,18 +6791,80 @@ bool TABLE::has_default_function(bool is_update)
Add all fields that have a default function to the table write set.
*/
-void TABLE::mark_default_fields_for_write(MY_BITMAP* bset)
+void TABLE::mark_default_fields_for_write(bool is_insert)
+{
+ DBUG_ENTER("mark_default_fields_for_write");
+ Field **field_ptr, *field;
+ for (field_ptr= default_field; *field_ptr; field_ptr++)
+ {
+ field= (*field_ptr);
+ if (is_insert && field->default_value)
+ {
+ bitmap_set_bit(write_set, field->field_index);
+ field->default_value->expr->
+ walk(&Item::register_field_in_read_map, 1, 0);
+ }
+ else if (!is_insert && field->has_update_default_function())
+ bitmap_set_bit(write_set, field->field_index);
+ }
+ DBUG_VOID_RETURN;
+}
+
+void TABLE::move_fields(Field **ptr, const uchar *to, const uchar *from)
{
- Field **dfield_ptr, *dfield;
- enum_sql_command cmd= in_use->lex->sql_command;
- for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ my_ptrdiff_t diff= to - from;
+ if (diff)
{
- dfield= (*dfield_ptr);
- if (((sql_command_flags[cmd] & CF_INSERTS_DATA) &&
- dfield->has_insert_default_function()) ||
- ((sql_command_flags[cmd] & CF_UPDATES_DATA) &&
- dfield->has_update_default_function()))
- bitmap_set_bit(bset, dfield->field_index);
+ do
+ {
+ (*ptr)->move_field_offset(diff);
+ } while (*(++ptr));
+ }
+}
+
+
+/*
+ Store all allocated virtual fields blob values
+ Used by InnoDB when calculating virtual fields for it's own internal
+ records
+*/
+
+void TABLE::remember_blob_values(String *blob_storage)
+{
+ Field **vfield_ptr;
+ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
+ {
+ if ((*vfield_ptr)->type() == MYSQL_TYPE_BLOB &&
+ !(*vfield_ptr)->vcol_info->stored_in_db)
+ {
+ Field_blob *blob= ((Field_blob*) *vfield_ptr);
+ memcpy((void*) blob_storage, (void*) &blob->value, sizeof(blob->value));
+ blob_storage++;
+ blob->value.release();
+ }
+ }
+}
+
+
+/*
+ Restore all allocated virtual fields blob values
+ Used by InnoDB when calculating virtual fields for it's own internal
+ records
+*/
+
+void TABLE::restore_blob_values(String *blob_storage)
+{
+ Field **vfield_ptr;
+ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
+ {
+ if ((*vfield_ptr)->type() == MYSQL_TYPE_BLOB &&
+ !(*vfield_ptr)->vcol_info->stored_in_db)
+ {
+ Field_blob *blob= ((Field_blob*) *vfield_ptr);
+ blob->value.free();
+ memcpy((void*) &blob->value, (void*) blob_storage, sizeof(blob->value));
+ blob_storage++;
+ }
}
}
@@ -6666,7 +7234,7 @@ void TABLE_LIST::reinit_before_use(THD *thd)
*/
Item_subselect *TABLE_LIST::containing_subselect()
-{
+{
return (select_lex ? select_lex->master_unit()->item : 0);
}
@@ -6696,7 +7264,7 @@ Item_subselect *TABLE_LIST::containing_subselect()
is equivalent to
USE INDEX (i1,i2)
and means "consider only i1 and i2".
-
+
Similarly
USE INDEX () USE INDEX (i1)
is equivalent to
@@ -6705,7 +7273,7 @@ Item_subselect *TABLE_LIST::containing_subselect()
It is OK to have the same index several times, e.g. "USE INDEX (i1,i1)" is
not an error.
-
+
Different kind of hints (USE/FORCE/IGNORE) are processed in the following
order:
1. All indexes in USE (or FORCE) INDEX are added to the mask.
@@ -6824,8 +7392,8 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl)
}
/*
- TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified) and
- create tbl->force_index_join instead.
+ TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified)
+ and create tbl->force_index_join instead.
Then use the correct force_index_XX instead of the global one.
*/
if (!index_join[INDEX_HINT_FORCE].is_clear_all() ||
@@ -6855,21 +7423,27 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl)
}
-size_t max_row_length(TABLE *table, const uchar *data)
+size_t max_row_length(TABLE *table, MY_BITMAP const *cols, const uchar *data)
{
TABLE_SHARE *table_s= table->s;
size_t length= table_s->reclength + 2 * table_s->fields;
uint *const beg= table_s->blob_field;
uint *const end= beg + table_s->blob_fields;
+ my_ptrdiff_t const rec_offset= (my_ptrdiff_t) (data - table->record[0]);
+ DBUG_ENTER("max_row_length");
for (uint *ptr= beg ; ptr != end ; ++ptr)
{
- Field_blob* const blob= (Field_blob*) table->field[*ptr];
- length+= blob->get_length((const uchar*)
- (data + blob->offset(table->record[0]))) +
- HA_KEY_BLOB_LENGTH;
+ Field * const field= table->field[*ptr];
+ if (bitmap_is_set(cols, field->field_index) &&
+ !field->is_null(rec_offset))
+ {
+ Field_blob * const blob= (Field_blob*) field;
+ length+= blob->get_length(rec_offset) + 8; /* max blob store length */
+ }
}
- return length;
+ DBUG_PRINT("exit", ("length: %lld", (longlong) length));
+ DBUG_RETURN(length);
}
@@ -6943,19 +7517,32 @@ bool is_simple_order(ORDER *order)
return TRUE;
}
+class Turn_errors_to_warnings_handler : public Internal_error_handler
+{
+public:
+ Turn_errors_to_warnings_handler() {}
+ bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ Sql_condition::enum_warning_level *level,
+ const char* msg,
+ Sql_condition ** cond_hdl)
+ {
+ *cond_hdl= NULL;
+ if (*level == Sql_condition::WARN_LEVEL_ERROR)
+ *level= Sql_condition::WARN_LEVEL_WARN;
+ return(0);
+ }
+};
+
/*
@brief Compute values for virtual columns used in query
- @param thd Thread handle
- @param table The TABLE object
- @param vcol_update_mode Specifies what virtual column are computed
+ @param update_mode Specifies what virtual column are computed
@details
The function computes the values of the virtual columns of the table and
stores them in the table record buffer.
- Only fields from vcol_set are computed: all of them, if vcol_update_mode is
- set to VCOL_UPDATE_FOR_WRITE, and, only those with the stored_in_db flag
- set to false, if vcol_update_mode is equal to VCOL_UPDATE_FOR_READ.
@retval
0 Success
@@ -6963,36 +7550,131 @@ bool is_simple_order(ORDER *order)
>0 Error occurred when storing a virtual field value
*/
-int update_virtual_fields(THD *thd, TABLE *table,
- enum enum_vcol_update_mode vcol_update_mode)
+int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode)
{
- DBUG_ENTER("update_virtual_fields");
- Field **vfield_ptr, *vfield;
- int error __attribute__ ((unused))= 0;
- DBUG_ASSERT(table && table->vfield);
-
+ DBUG_ENTER("TABLE::update_virtual_fields");
+ DBUG_PRINT("enter", ("update_mode: %d", update_mode));
+ Field **vfield_ptr, *vf;
Query_arena backup_arena;
- thd->set_n_backup_active_arena(table->expr_arena, &backup_arena);
+ Turn_errors_to_warnings_handler Suppress_errors;
+ int error;
+ bool handler_pushed= 0, update_all_columns= 1;
+ DBUG_ASSERT(vfield);
+
+ if (h->keyread_enabled())
+ DBUG_RETURN(0);
+
+ error= 0;
+ in_use->set_n_backup_active_arena(expr_arena, &backup_arena);
+
+ /* When reading or deleting row, ignore errors from virtual columns */
+ if (update_mode == VCOL_UPDATE_FOR_READ ||
+ update_mode == VCOL_UPDATE_FOR_DELETE ||
+ update_mode == VCOL_UPDATE_INDEXED)
+ {
+ in_use->push_internal_handler(&Suppress_errors);
+ handler_pushed= 1;
+ }
+ else if (update_mode == VCOL_UPDATE_FOR_REPLACE &&
+ in_use->is_current_stmt_binlog_format_row() &&
+ in_use->variables.binlog_row_image != BINLOG_ROW_IMAGE_MINIMAL)
+ {
+ /*
+ If we are doing a replace with not minimal binary logging, we have to
+ calculate all virtual columns.
+ */
+ update_all_columns= 1;
+ }
/* Iterate over virtual fields in the table */
- for (vfield_ptr= table->vfield; *vfield_ptr; vfield_ptr++)
+ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
{
- vfield= (*vfield_ptr);
- DBUG_ASSERT(vfield->vcol_info && vfield->vcol_info->expr_item);
- if (bitmap_is_set(table->vcol_set, vfield->field_index) &&
- (vcol_update_mode == VCOL_UPDATE_FOR_WRITE || !vfield->stored_in_db))
+ vf= (*vfield_ptr);
+ Virtual_column_info *vcol_info= vf->vcol_info;
+ DBUG_ASSERT(vcol_info);
+ DBUG_ASSERT(vcol_info->expr);
+
+ bool update= 0, swap_values= 0;
+ switch (update_mode) {
+ case VCOL_UPDATE_FOR_READ:
+ update= (!vcol_info->stored_in_db &&
+ bitmap_is_set(vcol_set, vf->field_index));
+ swap_values= 1;
+ break;
+ case VCOL_UPDATE_FOR_DELETE:
+ case VCOL_UPDATE_FOR_WRITE:
+ update= bitmap_is_set(vcol_set, vf->field_index);
+ break;
+ case VCOL_UPDATE_FOR_REPLACE:
+ update= ((!vcol_info->stored_in_db &&
+ (vf->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG)) &&
+ bitmap_is_set(vcol_set, vf->field_index)) ||
+ update_all_columns);
+ if (update && (vf->flags & BLOB_FLAG))
+ {
+ /*
+ The row has been read into record[1] and Field_blob::value
+ contains the value for record[0]. Swap value and read_value
+ to ensure that the virtual column data for the read row will
+ be in read_value at the end of this function
+ */
+ ((Field_blob*) vf)->swap_value_and_read_value();
+ /* Ensure we call swap_value_and_read_value() after update */
+ swap_values= 1;
+ }
+ break;
+ case VCOL_UPDATE_INDEXED:
+ case VCOL_UPDATE_INDEXED_FOR_UPDATE:
+ /* Read indexed fields that was not updated in VCOL_UPDATE_FOR_READ */
+ update= (!vcol_info->stored_in_db &&
+ (vf->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG)) &&
+ !bitmap_is_set(vcol_set, vf->field_index));
+ swap_values= 1;
+ break;
+ }
+
+ if (update)
{
+ int field_error __attribute__((unused)) = 0;
/* Compute the actual value of the virtual fields */
- error= vfield->vcol_info->expr_item->save_in_field(vfield, 0);
- DBUG_PRINT("info", ("field '%s' - updated", vfield->field_name));
+ if (vcol_info->expr->save_in_field(vf, 0))
+ field_error= error= 1;
+ DBUG_PRINT("info", ("field '%s' - updated error: %d",
+ vf->field_name, field_error));
+ if (swap_values && (vf->flags & BLOB_FLAG))
+ {
+ /*
+ Remember the read value to allow other update_virtual_field() calls
+ for the same blob field for the row to be updated.
+ Field_blob->read_value always contains the virtual column data for
+ any read row.
+ */
+ ((Field_blob*) vf)->swap_value_and_read_value();
+ }
}
else
{
- DBUG_PRINT("info", ("field '%s' - skipped", vfield->field_name));
+ DBUG_PRINT("info", ("field '%s' - skipped", vf->field_name));
}
}
- thd->restore_active_arena(table->expr_arena, &backup_arena);
- DBUG_RETURN(0);
+ if (handler_pushed)
+ in_use->pop_internal_handler();
+ in_use->restore_active_arena(expr_arena, &backup_arena);
+
+ /* Return 1 only of we got a fatal error, not a warning */
+ DBUG_RETURN(in_use->is_error());
+}
+
+int TABLE::update_virtual_field(Field *vf)
+{
+ Query_arena backup_arena;
+ DBUG_ENTER("TABLE::update_virtual_field");
+ in_use->set_n_backup_active_arena(expr_arena, &backup_arena);
+ bitmap_clear_all(&tmp_set);
+ vf->vcol_info->expr->walk(&Item::update_vcol_processor, 0, &tmp_set);
+ vf->vcol_info->expr->save_in_field(vf, 0);
+ in_use->restore_active_arena(expr_arena, &backup_arena);
+ DBUG_RETURN(in_use->is_error());
}
@@ -7006,47 +7688,65 @@ int update_virtual_fields(THD *thd, TABLE *table,
definition and the current operation one or the other kind of update
function is evaluated.
+ @param update_command True if command was an update else insert
+ @param ignore_errors True if we should ignore errors
+
@retval
0 Success
@retval
- >0 Error occurred when storing a virtual field value
+ >0 Error occurred when storing a virtual field value and
+ ignore_errors == 0. If set then an error was generated.
*/
-int TABLE::update_default_fields()
+int TABLE::update_default_fields(bool update_command, bool ignore_errors)
{
- DBUG_ENTER("update_default_fields");
- Field **dfield_ptr, *dfield;
+ Query_arena backup_arena;
+ Field **field_ptr;
int res= 0;
- enum_sql_command cmd= in_use->lex->sql_command;
-
+ DBUG_ENTER("TABLE::update_default_fields");
DBUG_ASSERT(default_field);
+ in_use->set_n_backup_active_arena(expr_arena, &backup_arena);
+
/* Iterate over fields with default functions in the table */
- for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ for (field_ptr= default_field; *field_ptr ; field_ptr++)
{
- dfield= (*dfield_ptr);
+ Field *field= (*field_ptr);
/*
- If an explicit default value for a filed overrides the default,
+ If an explicit default value for a field overrides the default,
do not update the field with its automatic default value.
*/
- if (!(dfield->flags & HAS_EXPLICIT_VALUE))
+ if (!field->has_explicit_value())
{
- if (sql_command_flags[cmd] & CF_INSERTS_DATA)
- res= dfield->evaluate_insert_default_function();
- if (sql_command_flags[cmd] & CF_UPDATES_DATA)
- res= dfield->evaluate_update_default_function();
- if (res)
- DBUG_RETURN(res);
+ if (!update_command)
+ {
+ if (field->default_value &&
+ (field->default_value->flags || field->flags & BLOB_FLAG))
+ res|= (field->default_value->expr->save_in_field(field, 0) < 0);
+ }
+ else
+ res|= field->evaluate_update_default_function();
+ if (!ignore_errors && res)
+ {
+ my_error(ER_CALCULATING_DEFAULT_VALUE, MYF(0), field->field_name);
+ break;
+ }
+ res= 0;
}
}
+ in_use->restore_active_arena(expr_arena, &backup_arena);
DBUG_RETURN(res);
}
+/**
+ Reset markers that fields are being updated
+*/
+
void TABLE::reset_default_fields()
{
- if (default_field)
- for (Field **df= default_field; *df; df++)
- (*df)->flags&= ~HAS_EXPLICIT_VALUE;
+ DBUG_ENTER("reset_default_fields");
+ bitmap_clear_all(&has_value_set);
+ DBUG_VOID_RETURN;
}
/*
@@ -7155,7 +7855,7 @@ bool TABLE::validate_default_values_of_unset_fields(THD *thd) const
/*
We're here if:
- validate_value_in_record_with_warn() failed and
- strict mode converted WARN to ERROR
+ strict mo validate_default_values_of_unset_fieldsde converted WARN to ERROR
- or the connection was killed, or closed unexpectedly
*/
DBUG_RETURN(true);
@@ -7166,6 +7866,71 @@ bool TABLE::validate_default_values_of_unset_fields(THD *thd) const
}
+bool TABLE::insert_all_rows_into_tmp_table(THD *thd,
+ TABLE *tmp_table,
+ TMP_TABLE_PARAM *tmp_table_param,
+ bool with_cleanup)
+{
+ int write_err= 0;
+
+ DBUG_ENTER("TABLE::insert_all_rows_into_tmp_table");
+
+ if (with_cleanup)
+ {
+ if ((write_err= tmp_table->file->ha_delete_all_rows()))
+ goto err;
+ }
+
+ if (file->indexes_are_disabled())
+ tmp_table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL);
+ file->ha_index_or_rnd_end();
+
+ if (file->ha_rnd_init_with_error(1))
+ DBUG_RETURN(1);
+
+ if (tmp_table->no_rows)
+ tmp_table->file->extra(HA_EXTRA_NO_ROWS);
+ else
+ {
+ /* update table->file->stats.records */
+ file->info(HA_STATUS_VARIABLE);
+ tmp_table->file->ha_start_bulk_insert(file->stats.records);
+ }
+
+ while (!file->ha_rnd_next(tmp_table->record[0]))
+ {
+ write_err= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
+ if (write_err)
+ {
+ bool is_duplicate;
+ if (tmp_table->file->is_fatal_error(write_err, HA_CHECK_DUP) &&
+ create_internal_tmp_table_from_heap(thd, tmp_table,
+ tmp_table_param->start_recinfo,
+ &tmp_table_param->recinfo,
+ write_err, 1, &is_duplicate))
+ DBUG_RETURN(1);
+
+ }
+ if (thd->check_killed())
+ {
+ thd->send_kill_message();
+ goto err_killed;
+ }
+ }
+ if (!tmp_table->no_rows && tmp_table->file->ha_end_bulk_insert())
+ goto err;
+ DBUG_RETURN(0);
+
+err:
+ DBUG_PRINT("error",("Got error: %d",write_err));
+ file->print_error(write_err, MYF(0));
+err_killed:
+ (void) file->ha_rnd_end();
+ DBUG_RETURN(1);
+}
+
+
+
/*
@brief Reset const_table flag
@@ -7206,20 +7971,24 @@ void TABLE_LIST::reset_const_table()
bool TABLE_LIST::handle_derived(LEX *lex, uint phases)
{
- SELECT_LEX_UNIT *unit;
+ SELECT_LEX_UNIT *unit= get_unit();
DBUG_ENTER("handle_derived");
DBUG_PRINT("enter", ("phases: 0x%x", phases));
- if ((unit= get_unit()))
+
+ if (unit)
{
- for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select())
- if (sl->handle_derived(lex, phases))
- DBUG_RETURN(TRUE);
- DBUG_RETURN(mysql_handle_single_derived(lex, this, phases));
+ if (!is_with_table_recursive_reference())
+ {
+ for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select())
+ if (sl->handle_derived(lex, phases))
+ DBUG_RETURN(TRUE);
+ }
+ if (mysql_handle_single_derived(lex, this, phases))
+ DBUG_RETURN(TRUE);
}
DBUG_RETURN(FALSE);
}
-
/**
@brief
Return unit of this derived table/view
@@ -7325,8 +8094,10 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
/* A subquery might be forced to be materialized due to a side-effect. */
if (!is_materialized_derived() && first_select->is_mergeable() &&
optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) &&
+ !thd->lex->can_not_use_merged() &&
!(thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
- thd->lex->sql_command == SQLCOM_DELETE_MULTI))
+ thd->lex->sql_command == SQLCOM_DELETE_MULTI) &&
+ !is_recursive_with_table())
set_merged_derived();
else
set_materialized_derived();
@@ -7347,7 +8118,9 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
*/
if (is_merged_derived())
{
- if (is_view() || unit->prepared)
+ if (is_view() ||
+ (unit->prepared &&
+ !(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW)))
create_field_translation(thd);
}
@@ -7376,7 +8149,7 @@ int TABLE_LIST::fetch_number_of_rows()
{
if (jtbm_subselect->is_jtbm_merged)
{
- table->file->stats.records= jtbm_subselect->jtbm_record_count;
+ table->file->stats.records= (ha_rows)jtbm_subselect->jtbm_record_count;
set_if_bigger(table->file->stats.records, 2);
table->used_stat_records= table->file->stats.records;
}
@@ -7497,6 +8270,12 @@ void TABLE_LIST::set_lock_type(THD *thd, enum thr_lock_type lock)
}
}
+bool TABLE_LIST::is_with_table()
+{
+ return derived && derived->with_element;
+}
+
+
uint TABLE_SHARE::actual_n_key_parts(THD *thd)
{
return use_ext_keys &&
@@ -7513,6 +8292,201 @@ double KEY::actual_rec_per_key(uint i)
read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
}
+
+/**
+ @brief
+ Mark subformulas of a condition unusable for the condition pushed into table
+
+ @param cond The condition whose subformulas are to be marked
+
+ @details
+ This method recursively traverses the AND-OR condition cond and for each subformula
+ of the codition it checks whether it can be usable for the extraction of a condition
+ that can be pushed into this table. The subformulas that are not usable are
+ marked with the flag NO_EXTRACTION_FL.
+ @note
+ This method is called before any call of TABLE_LIST::build_pushable_cond_for_table.
+ The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone
+ for the subformula when extracting the pushable condition.
+*/
+
+void TABLE_LIST::check_pushable_cond_for_table(Item *cond)
+{
+ table_map tab_map= table->map;
+ cond->clear_extraction_flag();
+ if (cond->type() == Item::COND_ITEM)
+ {
+ bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC;
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ uint count= 0;
+ Item *item;
+ while ((item=li++))
+ {
+ check_pushable_cond_for_table(item);
+ if (item->get_extraction_flag() != NO_EXTRACTION_FL)
+ count++;
+ else if (!and_cond)
+ break;
+ }
+ if ((and_cond && count == 0) || item)
+ {
+ cond->set_extraction_flag(NO_EXTRACTION_FL);
+ if (and_cond)
+ li.rewind();
+ while ((item= li++))
+ item->clear_extraction_flag();
+ }
+ }
+ else if (!cond->excl_dep_on_table(tab_map))
+ cond->set_extraction_flag(NO_EXTRACTION_FL);
+}
+
+
+/**
+ @brief
+ Build condition extractable from the given one depended only on this table
+
+ @param thd The thread handle
+ @param cond The condition from which the pushable one is to be extracted
+
+ @details
+ For the given condition cond this method finds out what condition depended
+ only on this table can be extracted from cond. If such condition C exists
+ the method builds the item for it.
+ The method uses the flag NO_EXTRACTION_FL set by the preliminary call of
+ the method TABLE_LIST::check_pushable_cond_for_table to figure out whether
+ a subformula depends only on this table or not.
+ @note
+ The built condition C is always implied by the condition cond
+ (cond => C). The method tries to build the most restictive such
+ condition (i.e. for any other condition C' such that cond => C'
+ we have C => C').
+ @note
+ The build item is not ready for usage: substitution for the field items
+ has to be done and it has to be re-fixed.
+
+ @retval
+ the built condition pushable into this table if such a condition exists
+ NULL if there is no such a condition
+*/
+
+Item* TABLE_LIST::build_pushable_cond_for_table(THD *thd, Item *cond)
+{
+ table_map tab_map= table->map;
+ bool is_multiple_equality= cond->type() == Item::FUNC_ITEM &&
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC;
+ if (cond->get_extraction_flag() == NO_EXTRACTION_FL)
+ return 0;
+ if (cond->type() == Item::COND_ITEM)
+ {
+ bool cond_and= false;
+ Item_cond *new_cond;
+ if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ cond_and= true;
+ new_cond=new (thd->mem_root) Item_cond_and(thd);
+ }
+ else
+ new_cond= new (thd->mem_root) Item_cond_or(thd);
+ if (!new_cond)
+ return 0;
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ bool is_fix_needed= false;
+ while ((item=li++))
+ {
+ if (item->get_extraction_flag() == NO_EXTRACTION_FL)
+ {
+ if (!cond_and)
+ return 0;
+ continue;
+ }
+ Item *fix= build_pushable_cond_for_table(thd, item);
+ if (!fix && !cond_and)
+ return 0;
+ if (!fix)
+ continue;
+
+ if (fix->type() == Item::COND_ITEM &&
+ ((Item_cond*) fix)->functype() == Item_func::COND_AND_FUNC)
+ is_fix_needed= true;
+
+ new_cond->argument_list()->push_back(fix, thd->mem_root);
+ }
+ if (is_fix_needed && new_cond->fix_fields(thd, 0))
+ return 0;
+
+ switch (new_cond->argument_list()->elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return new_cond->argument_list()->head();
+ default:
+ return new_cond;
+ }
+ }
+ else if (is_multiple_equality)
+ {
+ if (!(cond->used_tables() & tab_map))
+ return 0;
+ Item *new_cond= NULL;
+ int i= 0;
+ Item_equal *item_equal= (Item_equal *) cond;
+ Item *left_item = item_equal->get_const();
+ Item_equal_fields_iterator it(*item_equal);
+ Item *item;
+ if (!left_item)
+ {
+ while ((item=it++))
+ if (item->used_tables() == tab_map)
+ {
+ left_item= item;
+ break;
+ }
+ }
+ if (!left_item)
+ return 0;
+ while ((item=it++))
+ {
+ if (!(item->used_tables() == tab_map))
+ continue;
+ Item_func_eq *eq= 0;
+ Item *left_item_clone= left_item->build_clone(thd, thd->mem_root);
+ Item *right_item_clone= item->build_clone(thd, thd->mem_root);
+ if (left_item_clone && right_item_clone)
+ {
+ left_item_clone->set_item_equal(NULL);
+ right_item_clone->set_item_equal(NULL);
+ eq= new (thd->mem_root) Item_func_eq(thd, right_item_clone,
+ left_item_clone);
+ }
+ if (eq)
+ {
+ i++;
+ switch (i)
+ {
+ case 1:
+ new_cond= eq;
+ break;
+ case 2:
+ new_cond= new (thd->mem_root) Item_cond_and(thd, new_cond, eq);
+ break;
+ default:
+ ((Item_cond_and*)new_cond)->argument_list()->push_back(eq,
+ thd->mem_root);
+ }
+ }
+ }
+ if (new_cond)
+ new_cond->fix_fields(thd, &new_cond);
+ return new_cond;
+ }
+ else if (cond->get_extraction_flag() != NO_EXTRACTION_FL)
+ return cond->build_clone(thd, thd->mem_root);
+ return 0;
+}
+
LEX_CSTRING *fk_option_name(enum_fk_option opt)
{
static LEX_CSTRING names[]=
diff --git a/sql/table.h b/sql/table.h
index afe3220c943..c3ce5b8af51 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1,7 +1,7 @@
#ifndef TABLE_INCLUDED
#define TABLE_INCLUDED
/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB
+ Copyright (c) 2009, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -49,7 +49,11 @@ class ACL_internal_schema_access;
class ACL_internal_table_access;
class Field;
class Table_statistics;
-class TDC_element;
+class With_element;
+struct TDC_element;
+class Virtual_column_info;
+class Table_triggers_list;
+class TMP_TABLE_PARAM;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@@ -213,8 +217,13 @@ typedef struct st_order {
Field *fast_field_copier_setup;
int counter; /* position in SELECT list, correct
only if counter_used is true*/
- bool asc; /* true if ascending */
- bool free_me; /* true if item isn't shared */
+ enum enum_order {
+ ORDER_NOT_RELEVANT,
+ ORDER_ASC,
+ ORDER_DESC
+ };
+
+ enum_order direction; /* Requested direction of ordering */
bool in_field_list; /* true if in select field list */
bool counter_used; /* parameter was counter of columns */
Field *field; /* If tmp-table group */
@@ -318,61 +327,14 @@ enum release_type { RELEASE_NORMAL, RELEASE_WAIT_FOR_DROP };
enum enum_vcol_update_mode
{
VCOL_UPDATE_FOR_READ= 0,
- VCOL_UPDATE_FOR_WRITE
-};
-
-class Filesort_info
-{
- /// Buffer for sorting keys.
- Filesort_buffer filesort_buffer;
-
-public:
- IO_CACHE *io_cache; /* If sorted through filesort */
- uchar *buffpek; /* Buffer for buffpek structures */
- uint buffpek_len; /* Max number of buffpeks in the buffer */
- uchar *addon_buf; /* Pointer to a buffer if sorted with fields */
- size_t addon_length; /* Length of the buffer */
- struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
- void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); /* To unpack back */
- uchar *record_pointers; /* If sorted in memory */
- ha_rows found_records; /* How many records in sort */
-
- /** Sort filesort_buffer */
- void sort_buffer(Sort_param *param, uint count)
- { filesort_buffer.sort_buffer(param, count); }
-
- /**
- Accessors for Filesort_buffer (which @c).
- */
- uchar *get_record_buffer(uint idx)
- { return filesort_buffer.get_record_buffer(idx); }
-
- uchar **get_sort_keys()
- { return filesort_buffer.get_sort_keys(); }
-
- uchar **alloc_sort_buffer(uint num_records, uint record_length)
- { return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
-
- bool check_sort_buffer_properties(uint num_records, uint record_length)
- {
- return filesort_buffer.check_sort_buffer_properties(num_records,
- record_length);
- }
-
- void free_sort_buffer()
- { filesort_buffer.free_sort_buffer(); }
-
- void init_record_pointers()
- { filesort_buffer.init_record_pointers(); }
-
- size_t sort_buffer_size() const
- { return filesort_buffer.sort_buffer_size(); }
+ VCOL_UPDATE_FOR_WRITE,
+ VCOL_UPDATE_FOR_DELETE,
+ VCOL_UPDATE_INDEXED,
+ VCOL_UPDATE_INDEXED_FOR_UPDATE,
+ VCOL_UPDATE_FOR_REPLACE
};
-class Field_blob;
-class Table_triggers_list;
-
/**
Category of table found in the table share.
*/
@@ -490,9 +452,6 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db,
const LEX_STRING *name);
-struct TABLE_share;
-struct All_share_tables;
-
typedef struct st_table_field_type
{
LEX_STRING name;
@@ -637,7 +596,9 @@ struct TABLE_SHARE
Field **field;
Field **found_next_number_field;
KEY *key_info; /* data of keys in database */
+ Virtual_column_info **check_constraints;
uint *blob_field; /* Index to blobs in Field arrray*/
+ LEX_CUSTRING vcol_defs; /* definitions of generated columns */
TABLE_STATISTICS_CB stats_cb;
@@ -645,6 +606,7 @@ struct TABLE_SHARE
LEX_STRING comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
+ MY_BITMAP *check_set; /* Fields used by check constrant */
MY_BITMAP all_set;
/*
Key which is used for looking-up table in table cache and in the list
@@ -700,17 +662,25 @@ struct TABLE_SHARE
the record then this value is 0.
*/
uint null_bytes_for_compare;
- uint fields; /* Number of fields */
- /* Number of stored fields, generated-only virtual fields are not included */
- uint stored_fields;
+ uint fields; /* number of fields */
+ /* number of stored fields, purely virtual not included */
+ uint stored_fields;
+ uint virtual_fields; /* number of purely virtual fields */
+ /* number of purely virtual not stored blobs */
+ uint virtual_not_stored_blob_fields;
+ uint null_fields; /* number of null fields */
+ uint blob_fields; /* number of blob fields */
+ uint varchar_fields; /* number of varchar fields */
+ uint default_fields; /* number of default fields */
+
+ uint default_expressions;
+ uint table_check_constraints, field_check_constraints;
+
uint rec_buff_length; /* Size of table->record[] buffer */
uint keys, key_parts;
uint ext_key_parts; /* Total number of key parts in extended keys */
uint max_key_length, max_unique_length, total_key_length;
uint uniques; /* Number of UNIQUE index */
- uint null_fields; /* number of null fields */
- uint blob_fields; /* number of blob fields */
- uint varchar_fields; /* number of varchar fields */
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
uint db_record_offset; /* if HA_REC_IN_SEQ */
@@ -724,17 +694,21 @@ struct TABLE_SHARE
uint open_errno; /* error from open_table_def() */
uint column_bitmap_size;
uchar frm_version;
- uint vfields; /* Number of computed (virtual) fields */
- uint default_fields; /* Number of default fields */
+
+ enum enum_v_keys { NOT_INITIALIZED=0, NO_V_KEYS, V_KEYS };
+ enum_v_keys check_set_initialized;
+
bool use_ext_keys; /* Extended keys can be used */
bool null_field_first;
bool system; /* Set if system table (one record) */
bool crypted; /* If .frm file is crypted */
bool crashed;
bool is_view;
- bool deleting; /* going to delete this table */
bool can_cmp_whole_record;
bool table_creation_was_logged;
+ bool non_determinstic_insert;
+ bool vcols_need_refixing;
+ bool has_update_default_function;
ulong table_map_id; /* for row-based replication */
/*
@@ -750,7 +724,6 @@ struct TABLE_SHARE
*/
const File_parser *view_def;
-
/*
Cache for row-based replication table share checks that does not
need to be repeated. Possible values are: -1 when cache value is
@@ -1029,7 +1002,7 @@ public:
@param length string length
@retval Pointer to the copied string.
- @retval 0 if an error occured.
+ @retval 0 if an error occurred.
*/
char *store(const char *from, uint length)
{
@@ -1074,14 +1047,14 @@ private:
One should use methods of I_P_List template instead.
*/
TABLE *share_all_next, **share_all_prev;
+ TABLE *global_free_next, **global_free_prev;
friend struct All_share_tables;
+ friend struct Table_cache_instance;
public:
+ uint32 instance; /** Table cache instance this TABLE is belonging to */
THD *in_use; /* Which thread uses this */
- /* Time when table was released to table cache. Valid for unused tables. */
- ulonglong tc_time;
- Field **field; /* Pointer to fields */
uchar *record[2]; /* Pointer to records */
uchar *write_row_record; /* Used as optimisation in
@@ -1092,7 +1065,7 @@ public:
needed by the query without reading the row.
*/
key_map covering_keys;
- key_map quick_keys, merge_keys,intersect_keys;
+ key_map quick_keys, intersect_keys;
/*
A set of keys that can be used in the query that references this
table.
@@ -1111,11 +1084,12 @@ public:
key_map keys_in_use_for_order_by;
KEY *key_info; /* data of keys in database */
+ Field **field; /* Pointer to fields */
+ Field **vfield; /* Pointer to virtual fields*/
+ Field **default_field; /* Fields with non-constant DEFAULT */
Field *next_number_field; /* Set if next_number is activated */
Field *found_next_number_field; /* Set on open */
- Field **vfield; /* Pointer to virtual fields*/
- /* Fields that are updated automatically on INSERT or UPDATE. */
- Field **default_field;
+ Virtual_column_info **check_constraints;
/* Table's triggers, 0 if there are no of them */
Table_triggers_list *triggers;
@@ -1139,6 +1113,8 @@ public:
MY_BITMAP *read_set, *write_set, *rpl_write_set;
/* Set if using virtual fields */
MY_BITMAP *vcol_set, *def_vcol_set;
+ /* On INSERT: fields that the user specified a value for */
+ MY_BITMAP has_value_set;
/*
The ID of the query that opened and is using this table. Has different
@@ -1279,11 +1255,6 @@ public:
*/
bool keep_row_order;
- /**
- If set, the optimizer has found that row retrieval should access index
- tree only.
- */
- bool key_read;
bool no_keyread;
/**
If set, indicate that the table is not replicated by the server.
@@ -1305,7 +1276,9 @@ public:
bool alias_name_used; /* true if table_name is alias */
bool get_fields_in_item_tree; /* Signal to fix_field */
bool m_needs_reopen;
+private:
bool created; /* For tmp tables. TRUE <=> tmp table was actually created.*/
+public:
#ifdef HAVE_REPLICATION
/* used in RBR Triggers */
bool master_had_triggers;
@@ -1320,7 +1293,6 @@ public:
*/
Blob_mem_storage *blob_storage;
GRANT_INFO grant;
- Filesort_info sort;
/*
The arena which the items for expressions from the table definition
are associated with.
@@ -1345,23 +1317,29 @@ public:
void reset_item_list(List<Item> *item_list) const;
void clear_column_bitmaps(void);
void prepare_for_position(void);
+ MY_BITMAP *prepare_for_keyread(uint index, MY_BITMAP *map);
+ MY_BITMAP *prepare_for_keyread(uint index)
+ { return prepare_for_keyread(index, &tmp_set); }
+ void mark_columns_used_by_index(uint index, MY_BITMAP *map);
void mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *map);
- void mark_columns_used_by_index(uint index);
- void add_read_columns_used_by_index(uint index);
- void restore_column_maps_after_mark_index();
+ void restore_column_maps_after_keyread(MY_BITMAP *backup);
void mark_auto_increment_column(void);
void mark_columns_needed_for_update(void);
void mark_columns_needed_for_delete(void);
void mark_columns_needed_for_insert(void);
void mark_columns_per_binlog_row_image(void);
bool mark_virtual_col(Field *field);
- void mark_virtual_columns_for_write(bool insert_fl);
- void mark_default_fields_for_write(MY_BITMAP* bset);
- inline void mark_default_fields_for_write()
+ bool mark_virtual_columns_for_write(bool insert_fl);
+ void mark_default_fields_for_write(bool insert_fl);
+ void mark_columns_used_by_virtual_fields(void);
+ void mark_check_constraint_columns_for_read(void);
+ int verify_constraints(bool ignore_failure);
+ inline void column_bitmaps_set(MY_BITMAP *read_set_arg)
{
- mark_default_fields_for_write(write_set);
+ read_set= read_set_arg;
+ if (file)
+ file->column_bitmaps_signal();
}
- bool has_default_function(bool is_update);
inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
MY_BITMAP *write_set_arg)
{
@@ -1423,39 +1401,44 @@ public:
map= map_arg;
tablenr= tablenr_arg;
}
- inline void enable_keyread()
+
+ /// Return true if table is instantiated, and false otherwise.
+ bool is_created() const { return created; }
+
+ /**
+ Set the table as "created", and enable flags in storage engine
+ that could not be enabled without an instantiated table.
+ */
+ void set_created()
{
- DBUG_ENTER("enable_keyread");
- DBUG_ASSERT(key_read == 0);
- key_read= 1;
- file->extra(HA_EXTRA_KEYREAD);
- DBUG_VOID_RETURN;
+ if (created)
+ return;
+ if (file->keyread_enabled())
+ file->extra(HA_EXTRA_KEYREAD);
+ created= true;
}
+
/*
Returns TRUE if the table is filled at execution phase (and so, the
optimizer must not do anything that depends on the contents of the table,
like range analysis or constant table detection)
*/
bool is_filled_at_execution();
- inline void disable_keyread()
- {
- DBUG_ENTER("disable_keyread");
- if (key_read)
- {
- key_read= 0;
- file->extra(HA_EXTRA_NO_KEYREAD);
- }
- DBUG_VOID_RETURN;
- }
bool update_const_key_parts(COND *conds);
my_ptrdiff_t default_values_offset() const
{ return (my_ptrdiff_t) (s->default_values - record[0]); }
+ void move_fields(Field **ptr, const uchar *to, const uchar *from);
+ void remember_blob_values(String *blob_storage);
+ void restore_blob_values(String *blob_storage);
+
uint actual_n_key_parts(KEY *keyinfo);
ulong actual_key_flags(KEY *keyinfo);
- int update_default_fields();
+ int update_virtual_field(Field *vf);
+ int update_virtual_fields(handler *h, enum_vcol_update_mode update_mode);
+ int update_default_fields(bool update, bool ignore_errors);
void reset_default_fields();
inline ha_rows stat_records() { return used_stat_records; }
@@ -1465,6 +1448,11 @@ public:
inline Field **field_to_fill();
bool validate_default_values_of_unset_fields(THD *thd) const;
+
+ bool insert_all_rows_into_tmp_table(THD *thd,
+ TABLE *tmp_table,
+ TMP_TABLE_PARAM *tmp_table_param,
+ bool with_cleanup);
};
@@ -1485,7 +1473,6 @@ struct TABLE_share
}
};
-
struct All_share_tables
{
static inline TABLE **next_ptr(TABLE *l)
@@ -1498,6 +1485,7 @@ struct All_share_tables
}
};
+typedef I_P_List <TABLE, All_share_tables> All_share_tables_list;
enum enum_schema_table_state
{
@@ -1526,13 +1514,13 @@ typedef struct st_foreign_key_info
LEX_CSTRING *fk_option_name(enum_fk_option opt);
bool fk_modifies_child(enum_fk_option opt);
-#define MY_I_S_MAYBE_NULL 1
-#define MY_I_S_UNSIGNED 2
+#define MY_I_S_MAYBE_NULL 1U
+#define MY_I_S_UNSIGNED 2U
-#define SKIP_OPEN_TABLE 0 // do not open table
-#define OPEN_FRM_ONLY 1 // open FRM file only
-#define OPEN_FULL_TABLE 2 // open FRM,MYD, MYI files
+#define SKIP_OPEN_TABLE 0U // do not open table
+#define OPEN_FRM_ONLY 1U // open FRM file only
+#define OPEN_FULL_TABLE 2U // open FRM,MYD, MYI files
typedef struct st_field_info
{
@@ -1596,27 +1584,27 @@ class IS_table_read_plan;
Types of derived tables. The ending part is a bitmap of phases that are
applicable to a derived table of the type.
*/
-#define DTYPE_ALGORITHM_UNDEFINED 0
-#define DTYPE_VIEW 1
-#define DTYPE_TABLE 2
-#define DTYPE_MERGE 4
-#define DTYPE_MATERIALIZE 8
-#define DTYPE_MULTITABLE 16
-#define DTYPE_MASK 19
+#define DTYPE_ALGORITHM_UNDEFINED 0U
+#define DTYPE_VIEW 1U
+#define DTYPE_TABLE 2U
+#define DTYPE_MERGE 4U
+#define DTYPE_MATERIALIZE 8U
+#define DTYPE_MULTITABLE 16U
+#define DTYPE_MASK (DTYPE_VIEW|DTYPE_TABLE|DTYPE_MULTITABLE)
/*
Phases of derived tables/views handling, see sql_derived.cc
Values are used as parts of a bitmap attached to derived table types.
*/
-#define DT_INIT 1
-#define DT_PREPARE 2
-#define DT_OPTIMIZE 4
-#define DT_MERGE 8
-#define DT_MERGE_FOR_INSERT 16
-#define DT_CREATE 32
-#define DT_FILL 64
-#define DT_REINIT 128
-#define DT_PHASES 8
+#define DT_INIT 1U
+#define DT_PREPARE 2U
+#define DT_OPTIMIZE 4U
+#define DT_MERGE 8U
+#define DT_MERGE_FOR_INSERT 16U
+#define DT_CREATE 32U
+#define DT_FILL 64U
+#define DT_REINIT 128U
+#define DT_PHASES 8U
/* Phases that are applicable to all derived tables. */
#define DT_COMMON (DT_INIT + DT_PREPARE + DT_REINIT + DT_OPTIMIZE)
/* Phases that are applicable only to materialized derived tables. */
@@ -1636,13 +1624,13 @@ class IS_table_read_plan;
representation for backward compatibility.
*/
-#define VIEW_ALGORITHM_UNDEFINED_FRM 0
-#define VIEW_ALGORITHM_MERGE_FRM 1
-#define VIEW_ALGORITHM_TMPTABLE_FRM 2
+#define VIEW_ALGORITHM_UNDEFINED_FRM 0U
+#define VIEW_ALGORITHM_MERGE_FRM 1U
+#define VIEW_ALGORITHM_TMPTABLE_FRM 2U
-#define JOIN_TYPE_LEFT 1
-#define JOIN_TYPE_RIGHT 2
-#define JOIN_TYPE_OUTER 4 /* Marker that this is an outer join */
+#define JOIN_TYPE_LEFT 1U
+#define JOIN_TYPE_RIGHT 2U
+#define JOIN_TYPE_OUTER 4U /* Marker that this is an outer join */
#define VIEW_SUID_INVOKER 0
#define VIEW_SUID_DEFINER 1
@@ -1782,6 +1770,8 @@ struct TABLE_LIST
else
mdl_type= MDL_SHARED_READ;
+ DBUG_ASSERT(!db_name_arg || strlen(db_name_arg) == db_length_arg);
+ DBUG_ASSERT(!table_name_arg || strlen(table_name_arg) == table_name_length_arg);
reset();
db= (char*) db_name_arg;
db_length= db_length_arg;
@@ -1816,6 +1806,7 @@ struct TABLE_LIST
*last_ptr= &next_global;
}
+
/*
List of tables local to a subquery (used by SQL_I_List). Considers
views as leaves (unlike 'next_leaf' below). Created at parse time
@@ -1895,7 +1886,7 @@ struct TABLE_LIST
/* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */
List<Index_hint> *index_hints;
TABLE *table; /* opened table */
- ulonglong table_id; /* table id (from binlog) for opened table */
+ ulonglong table_id; /* table id (from binlog) for opened table */
/*
select_result for derived table to pass it from table creation to table
filling procedure
@@ -1953,6 +1944,10 @@ struct TABLE_LIST
derived tables. Use TABLE_LIST::is_anonymous_derived_table().
*/
st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */
+ With_element *with; /* With element defining this table (if any) */
+ /* Bitmap of the defining with element */
+ table_map with_internal_reference_map;
+ bool block_handle_derived;
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
st_select_lex *schema_select_lex;
/*
@@ -2116,8 +2111,6 @@ struct TABLE_LIST
*/
bool is_fqtn;
- bool deleting; /* going to delete this table */
-
/* TRUE <=> derived table should be filled right after optimization. */
bool fill_me;
/* TRUE <=> view/DT is merged. */
@@ -2170,9 +2163,16 @@ struct TABLE_LIST
/* TRUE <=> this table is a const one and was optimized away. */
bool optimized_away;
+ /**
+ TRUE <=> already materialized. Valid only for materialized derived
+ tables/views.
+ */
+ bool materialized;
/* I_S: Flags to open_table (e.g. OPEN_TABLE_ONLY or OPEN_VIEW_ONLY) */
uint i_s_requested_object;
+ bool prohibit_cond_pushdown;
+
/*
I_S: how to read the tables (SKIP_OPEN_TABLE/OPEN_FRM_ONLY/OPEN_FULL_TABLE)
*/
@@ -2315,6 +2315,11 @@ struct TABLE_LIST
{
return (derived_type & DTYPE_TABLE);
}
+ bool is_with_table();
+ bool is_recursive_with_table();
+ bool is_with_table_recursive_reference();
+ bool fill_recursive(THD *thd);
+
inline void set_view()
{
derived_type= DTYPE_VIEW;
@@ -2362,6 +2367,7 @@ struct TABLE_LIST
{
derived_type|= DTYPE_MULTITABLE;
}
+ bool set_as_with_table(THD *thd, With_element *with_elem);
void reset_const_table();
bool handle_derived(LEX *lex, uint phases);
@@ -2405,6 +2411,8 @@ struct TABLE_LIST
return false;
}
void set_lock_type(THD* thd, enum thr_lock_type lock);
+ void check_pushable_cond_for_table(Item *cond);
+ Item *build_pushable_cond_for_table(THD *thd, Item *cond);
void remove_join_columns()
{
@@ -2691,7 +2699,7 @@ enum get_table_share_flags {
GTS_FORCE_DISCOVERY = 16
};
-size_t max_row_length(TABLE *table, const uchar *data);
+size_t max_row_length(TABLE *table, MY_BITMAP const *cols, const uchar *data);
void init_mdl_requests(TABLE_LIST *table_list);
@@ -2699,9 +2707,11 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
const char *alias, uint db_stat, uint prgflag,
uint ha_open_flags, TABLE *outparam,
bool is_create_table);
-bool unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root,
- TABLE *table, Field *field,
- LEX_STRING *vcol_expr, bool *error_reported);
+bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol);
+bool fix_session_vcol_expr_for_read(THD *thd, Field *field,
+ Virtual_column_info *vcol);
+bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
+ bool *error_reported);
TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
const char *key, uint key_length);
void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
@@ -2725,7 +2735,7 @@ bool get_field(MEM_ROOT *mem, Field *field, class String *res);
bool validate_comment_length(THD *thd, LEX_STRING *comment, size_t max_len,
uint err_code, const char *name);
-int closefrm(TABLE *table, bool free_share);
+int closefrm(TABLE *table);
void free_blobs(TABLE *table);
void free_field_buffers_larger_than(TABLE *table, uint32 size);
ulong get_form_pos(File file, uchar *head, TYPELIB *save_names);
@@ -2769,15 +2779,6 @@ inline bool is_infoschema_db(const char *name)
TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings);
-/**
- return true if the table was created explicitly.
-*/
-inline bool is_user_table(TABLE * table)
-{
- const char *name= table->s->table_name.str;
- return strncmp(name, tmp_file_prefix, tmp_file_prefix_length);
-}
-
inline void mark_as_null_row(TABLE *table)
{
table->null_row=1;
diff --git a/sql/table_cache.cc b/sql/table_cache.cc
index b3cf6cd2892..8682662e924 100644
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@ -36,8 +36,6 @@
- get number of TABLE objects in cache (tc_records())
Dependencies:
- - intern_close_table(): frees TABLE object
- - kill_delayed_threads_for_table()
- close_cached_tables(): flush tables on shutdown
- alloc_table_share()
- free_table_share()
@@ -57,21 +55,22 @@
/** Configuration. */
ulong tdc_size; /**< Table definition cache threshold for LRU eviction. */
ulong tc_size; /**< Table cache threshold for LRU eviction. */
+uint32 tc_instances;
+static uint32 tc_active_instances= 1;
+static uint32 tc_contention_warning_reported;
/** Data collections. */
static LF_HASH tdc_hash; /**< Collection of TABLE_SHARE objects. */
/** Collection of unused TABLE_SHARE objects. */
+static
I_P_List <TDC_element,
I_P_List_adapter<TDC_element, &TDC_element::next, &TDC_element::prev>,
I_P_List_null_counter,
I_P_List_fast_push_back<TDC_element> > unused_shares;
-static int64 tdc_version; /* Increments on each reload */
-static int64 last_table_id;
+static tdc_version_t tdc_version; /* Increments on each reload */
static bool tdc_inited;
-static int32 tc_count; /**< Number of TABLE objects in table cache. */
-
/**
Protects unused shares list.
@@ -84,31 +83,20 @@ static int32 tc_count; /**< Number of TABLE objects in table cache. */
static mysql_mutex_t LOCK_unused_shares;
#ifdef HAVE_PSI_INTERFACE
-PSI_mutex_key key_LOCK_unused_shares, key_TABLE_SHARE_LOCK_table_share;
+static PSI_mutex_key key_LOCK_unused_shares, key_TABLE_SHARE_LOCK_table_share,
+ key_LOCK_table_cache;
static PSI_mutex_info all_tc_mutexes[]=
{
{ &key_LOCK_unused_shares, "LOCK_unused_shares", PSI_FLAG_GLOBAL },
- { &key_TABLE_SHARE_LOCK_table_share, "TABLE_SHARE::tdc.LOCK_table_share", 0 }
+ { &key_TABLE_SHARE_LOCK_table_share, "TABLE_SHARE::tdc.LOCK_table_share", 0 },
+ { &key_LOCK_table_cache, "LOCK_table_cache", 0 }
};
-PSI_cond_key key_TABLE_SHARE_COND_release;
+static PSI_cond_key key_TABLE_SHARE_COND_release;
static PSI_cond_info all_tc_conds[]=
{
{ &key_TABLE_SHARE_COND_release, "TABLE_SHARE::tdc.COND_release", 0 }
};
-
-
-static void init_tc_psi_keys(void)
-{
- const char *category= "sql";
- int count;
-
- count= array_elements(all_tc_mutexes);
- mysql_mutex_register(category, all_tc_mutexes, count);
-
- count= array_elements(all_tc_conds);
- mysql_cond_register(category, all_tc_conds, count);
-}
#endif
@@ -128,6 +116,114 @@ static int fix_thd_pins(THD *thd)
part of table definition cache.
*/
+struct Table_cache_instance
+{
+ /**
+ Protects free_tables (TABLE::global_free_next and TABLE::global_free_prev),
+ records, Share_free_tables::List (TABLE::prev and TABLE::next),
+ TABLE::in_use.
+ */
+ mysql_mutex_t LOCK_table_cache;
+ I_P_List <TABLE, I_P_List_adapter<TABLE, &TABLE::global_free_next,
+ &TABLE::global_free_prev>,
+ I_P_List_null_counter, I_P_List_fast_push_back<TABLE> >
+ free_tables;
+ ulong records;
+ uint mutex_waits;
+ uint mutex_nowaits;
+ /** Avoid false sharing between instances */
+ char pad[CPU_LEVEL1_DCACHE_LINESIZE];
+
+ Table_cache_instance(): records(0), mutex_waits(0), mutex_nowaits(0)
+ {
+ mysql_mutex_init(key_LOCK_table_cache, &LOCK_table_cache,
+ MY_MUTEX_INIT_FAST);
+ }
+
+ ~Table_cache_instance()
+ {
+ mysql_mutex_destroy(&LOCK_table_cache);
+ DBUG_ASSERT(free_tables.is_empty());
+ DBUG_ASSERT(records == 0);
+ }
+
+ /**
+ Lock table cache mutex and check contention.
+
+ Instance is considered contested if more than 20% of mutex acquisiotions
+ can't be served immediately. Up to 100 000 probes may be performed to avoid
+ instance activation on short sporadic peaks. 100 000 is estimated maximum
+ number of queries one instance can serve in one second.
+
+ These numbers work well on a 2 socket / 20 core / 40 threads Intel Broadwell
+ system, that is expected number of instances is activated within reasonable
+ warmup time. It may have to be adjusted for other systems.
+
+ Only TABLE object acquistion is instrumented. We intentionally avoid this
+ overhead on TABLE object release. All other table cache mutex acquistions
+ are considered out of hot path and are not instrumented either.
+ */
+ void lock_and_check_contention(uint32 n_instances, uint32 instance)
+ {
+ if (mysql_mutex_trylock(&LOCK_table_cache))
+ {
+ mysql_mutex_lock(&LOCK_table_cache);
+ if (++mutex_waits == 20000)
+ {
+ if (n_instances < tc_instances)
+ {
+ if (my_atomic_cas32_weak_explicit((int32*) &tc_active_instances,
+ (int32*) &n_instances,
+ (int32) n_instances + 1,
+ MY_MEMORY_ORDER_RELAXED,
+ MY_MEMORY_ORDER_RELAXED))
+ {
+ sql_print_information("Detected table cache mutex contention at instance %d: "
+ "%d%% waits. Additional table cache instance "
+ "activated. Number of instances after "
+ "activation: %d.",
+ instance + 1,
+ mutex_waits * 100 / (mutex_nowaits + mutex_waits),
+ n_instances + 1);
+ }
+ }
+ else if (!my_atomic_fas32_explicit((int32*) &tc_contention_warning_reported,
+ 1, MY_MEMORY_ORDER_RELAXED))
+ {
+ sql_print_warning("Detected table cache mutex contention at instance %d: "
+ "%d%% waits. Additional table cache instance "
+ "cannot be activated: consider raising "
+ "table_open_cache_instances. Number of active "
+ "instances: %d.",
+ instance + 1,
+ mutex_waits * 100 / (mutex_nowaits + mutex_waits),
+ n_instances);
+ }
+ mutex_waits= 0;
+ mutex_nowaits= 0;
+ }
+ }
+ else if (++mutex_nowaits == 80000)
+ {
+ mutex_waits= 0;
+ mutex_nowaits= 0;
+ }
+ }
+};
+
+
+static Table_cache_instance *tc;
+
+
+static void intern_close_table(TABLE *table)
+{
+ delete table->triggers;
+ DBUG_ASSERT(table->file);
+ closefrm(table);
+ tdc_release_share(table->s);
+ my_free(table);
+}
+
/**
Get number of TABLE objects (used and unused) in table cache.
@@ -135,21 +231,65 @@ static int fix_thd_pins(THD *thd)
uint tc_records(void)
{
- return my_atomic_load32_explicit(&tc_count, MY_MEMORY_ORDER_RELAXED);
+ ulong total= 0;
+ for (ulong i= 0; i < tc_instances; i++)
+ {
+ mysql_mutex_lock(&tc[i].LOCK_table_cache);
+ total+= tc[i].records;
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+ }
+ return total;
}
/**
Remove TABLE object from table cache.
-
- - decrement tc_count
- - remove object from TABLE_SHARE::tdc.all_tables
*/
static void tc_remove_table(TABLE *table)
{
- my_atomic_add32_explicit(&tc_count, -1, MY_MEMORY_ORDER_RELAXED);
- table->s->tdc->all_tables.remove(table);
+ TDC_element *element= table->s->tdc;
+
+ mysql_mutex_lock(&element->LOCK_table_share);
+ /* Wait for MDL deadlock detector to complete traversing tdc.all_tables. */
+ while (element->all_tables_refs)
+ mysql_cond_wait(&element->COND_release, &element->LOCK_table_share);
+ element->all_tables.remove(table);
+ mysql_mutex_unlock(&element->LOCK_table_share);
+
+ intern_close_table(table);
+}
+
+
+static void tc_remove_all_unused_tables(TDC_element *element,
+ Share_free_tables::List *purge_tables,
+ bool mark_flushed)
+{
+ TABLE *table;
+
+ /*
+ Mark share flushed in order to ensure that it gets
+ automatically deleted once it is no longer referenced.
+
+ Note that code in TABLE_SHARE::wait_for_old_version() assumes that
+ marking share flushed is followed by purge of unused table
+ shares.
+ */
+ if (mark_flushed)
+ element->flushed= true;
+ for (ulong i= 0; i < tc_instances; i++)
+ {
+ mysql_mutex_lock(&tc[i].LOCK_table_cache);
+ while ((table= element->free_tables[i].list.pop_front()))
+ {
+ tc[i].records--;
+ tc[i].free_tables.remove(table);
+ DBUG_ASSERT(element->all_tables_refs == 0);
+ element->all_tables.remove(table);
+ purge_tables->push_front(table);
+ }
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+ }
}
@@ -170,24 +310,15 @@ static void tc_remove_table(TABLE *table)
struct tc_purge_arg
{
- TDC_element::TABLE_list purge_tables;
+ Share_free_tables::List purge_tables;
bool mark_flushed;
};
static my_bool tc_purge_callback(TDC_element *element, tc_purge_arg *arg)
{
- TABLE *table;
-
mysql_mutex_lock(&element->LOCK_table_share);
- element->wait_for_mdl_deadlock_detector();
- if (arg->mark_flushed)
- element->flushed= true;
- while ((table= element->free_tables.pop_front()))
- {
- tc_remove_table(table);
- arg->purge_tables.push_front(table);
- }
+ tc_remove_all_unused_tables(element, &arg->purge_tables, arg->mark_flushed);
mysql_mutex_unlock(&element->LOCK_table_share);
return FALSE;
}
@@ -221,80 +352,70 @@ void tc_purge(bool mark_flushed)
- free evicted object
*/
-struct tc_add_table_arg
-{
- char key[MAX_DBKEY_LENGTH];
- uint key_length;
- ulonglong purge_time;
-};
-
-
-my_bool tc_add_table_callback(TDC_element *element, tc_add_table_arg *arg)
+void tc_add_table(THD *thd, TABLE *table)
{
- TABLE *table;
+ uint32 i= thd->thread_id % my_atomic_load32_explicit((int32*) &tc_active_instances,
+ MY_MEMORY_ORDER_RELAXED);
+ TABLE *LRU_table= 0;
+ TDC_element *element= table->s->tdc;
+ DBUG_ASSERT(table->in_use == thd);
+ table->instance= i;
mysql_mutex_lock(&element->LOCK_table_share);
- if ((table= element->free_tables_back()) && table->tc_time < arg->purge_time)
+ /* Wait for MDL deadlock detector to complete traversing tdc.all_tables. */
+ while (element->all_tables_refs)
+ mysql_cond_wait(&element->COND_release, &element->LOCK_table_share);
+ element->all_tables.push_front(table);
+ mysql_mutex_unlock(&element->LOCK_table_share);
+
+ mysql_mutex_lock(&tc[i].LOCK_table_cache);
+ if (tc[i].records == tc_size && (LRU_table= tc[i].free_tables.pop_front()))
{
- memcpy(arg->key, element->m_key, element->m_key_length);
- arg->key_length= element->m_key_length;
- arg->purge_time= table->tc_time;
+ LRU_table->s->tdc->free_tables[i].list.remove(LRU_table);
+ /* Needed if MDL deadlock detector chimes in before tc_remove_table() */
+ LRU_table->in_use= thd;
}
- mysql_mutex_unlock(&element->LOCK_table_share);
- return FALSE;
+ else
+ tc[i].records++;
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+
+ if (LRU_table)
+ tc_remove_table(LRU_table);
}
-void tc_add_table(THD *thd, TABLE *table)
-{
- bool need_purge;
- DBUG_ASSERT(table->in_use == thd);
- mysql_mutex_lock(&table->s->tdc->LOCK_table_share);
- table->s->tdc->wait_for_mdl_deadlock_detector();
- table->s->tdc->all_tables.push_front(table);
- mysql_mutex_unlock(&table->s->tdc->LOCK_table_share);
+/**
+ Acquire TABLE object from table cache.
- /* If we have too many TABLE instances around, try to get rid of them */
- need_purge= my_atomic_add32_explicit(&tc_count, 1, MY_MEMORY_ORDER_RELAXED) >=
- (int32) tc_size;
+ @pre share must be protected against removal.
- if (need_purge)
- {
- tc_add_table_arg argument;
- argument.purge_time= ULONGLONG_MAX;
- tdc_iterate(thd, (my_hash_walk_action) tc_add_table_callback, &argument);
+ Acquired object cannot be evicted or acquired again.
- if (argument.purge_time != ULONGLONG_MAX)
- {
- TDC_element *element= (TDC_element*) lf_hash_search(&tdc_hash,
- thd->tdc_hash_pins,
- argument.key,
- argument.key_length);
- if (element)
- {
- TABLE *entry;
- mysql_mutex_lock(&element->LOCK_table_share);
- lf_hash_search_unpin(thd->tdc_hash_pins);
- element->wait_for_mdl_deadlock_detector();
-
- /*
- It may happen that oldest table was acquired meanwhile. In this case
- just go ahead, number of objects in table cache will normalize
- eventually.
- */
- if ((entry= element->free_tables_back()) &&
- entry->tc_time == argument.purge_time)
- {
- element->free_tables.remove(entry);
- tc_remove_table(entry);
- mysql_mutex_unlock(&element->LOCK_table_share);
- intern_close_table(entry);
- }
- else
- mysql_mutex_unlock(&element->LOCK_table_share);
- }
- }
+ @return TABLE object, or NULL if no unused objects.
+*/
+
+static TABLE *tc_acquire_table(THD *thd, TDC_element *element)
+{
+ uint32 n_instances=
+ my_atomic_load32_explicit((int32*) &tc_active_instances,
+ MY_MEMORY_ORDER_RELAXED);
+ uint32 i= thd->thread_id % n_instances;
+ TABLE *table;
+
+ tc[i].lock_and_check_contention(n_instances, i);
+ table= element->free_tables[i].list.pop_front();
+ if (table)
+ {
+ DBUG_ASSERT(!table->in_use);
+ table->in_use= thd;
+ /* The ex-unused table must be fully functional. */
+ DBUG_ASSERT(table->db_stat && table->file);
+ /* The children must be detached from the table. */
+ DBUG_ASSERT(!table->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN));
+ tc[i].free_tables.remove(table);
}
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+ return table;
}
@@ -324,42 +445,44 @@ void tc_add_table(THD *thd, TABLE *table)
@retval false object released
*/
-bool tc_release_table(TABLE *table)
+void tc_release_table(TABLE *table)
{
+ uint32 i= table->instance;
DBUG_ASSERT(table->in_use);
DBUG_ASSERT(table->file);
DBUG_ASSERT(!table->pos_in_locked_tables);
- if (table->needs_reopen() || tc_records() > tc_size)
+ mysql_mutex_lock(&tc[i].LOCK_table_cache);
+ if (table->needs_reopen() || table->s->tdc->flushed ||
+ tc[i].records > tc_size)
{
- mysql_mutex_lock(&table->s->tdc->LOCK_table_share);
- goto purge;
+ tc[i].records--;
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+ tc_remove_table(table);
}
+ else
+ {
+ table->in_use= 0;
+ table->s->tdc->free_tables[i].list.push_front(table);
+ tc[i].free_tables.push_back(table);
+ mysql_mutex_unlock(&tc[i].LOCK_table_cache);
+ }
+}
- table->tc_time= my_interval_timer();
- mysql_mutex_lock(&table->s->tdc->LOCK_table_share);
- if (table->s->tdc->flushed)
- goto purge;
- /*
- in_use doesn't really need mutex protection, but must be reset after
- checking tdc.flushed and before this table appears in free_tables.
- Resetting in_use is needed only for print_cached_tables() and
- list_open_tables().
- */
- table->in_use= 0;
- /* Add table to the list of unused TABLE objects for this share. */
- table->s->tdc->free_tables.push_front(table);
- mysql_mutex_unlock(&table->s->tdc->LOCK_table_share);
- return false;
-
-purge:
- table->s->tdc->wait_for_mdl_deadlock_detector();
- tc_remove_table(table);
- mysql_mutex_unlock(&table->s->tdc->LOCK_table_share);
- table->in_use= 0;
- intern_close_table(table);
- return true;
+static void tdc_assert_clean_share(TDC_element *element)
+{
+ DBUG_ASSERT(element->share == 0);
+ DBUG_ASSERT(element->ref_count == 0);
+ DBUG_ASSERT(element->m_flush_tickets.is_empty());
+ DBUG_ASSERT(element->all_tables.is_empty());
+#ifndef DBUG_OFF
+ for (ulong i= 0; i < tc_instances; i++)
+ DBUG_ASSERT(element->free_tables[i].list.is_empty());
+#endif
+ DBUG_ASSERT(element->all_tables_refs == 0);
+ DBUG_ASSERT(element->next == 0);
+ DBUG_ASSERT(element->prev == 0);
}
@@ -405,7 +528,7 @@ static void tdc_delete_share_from_hash(TDC_element *element)
pins= lf_hash_get_pins(&tdc_hash);
DBUG_ASSERT(pins); // What can we do about it?
- element->assert_clean_share();
+ tdc_assert_clean_share(element);
lf_hash_delete(&tdc_hash, pins, element->m_key, element->m_key_length);
if (!thd)
lf_hash_put_pins(pins);
@@ -415,26 +538,88 @@ static void tdc_delete_share_from_hash(TDC_element *element)
/**
+ Prepeare table share for use with table definition cache.
+*/
+
+static void lf_alloc_constructor(uchar *arg)
+{
+ TDC_element *element= (TDC_element*) (arg + LF_HASH_OVERHEAD);
+ DBUG_ENTER("lf_alloc_constructor");
+ mysql_mutex_init(key_TABLE_SHARE_LOCK_table_share,
+ &element->LOCK_table_share, MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_TABLE_SHARE_COND_release, &element->COND_release, 0);
+ element->m_flush_tickets.empty();
+ element->all_tables.empty();
+ for (ulong i= 0; i < tc_instances; i++)
+ element->free_tables[i].list.empty();
+ element->all_tables_refs= 0;
+ element->share= 0;
+ element->ref_count= 0;
+ element->next= 0;
+ element->prev= 0;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Release table definition cache specific resources of table share.
+*/
+
+static void lf_alloc_destructor(uchar *arg)
+{
+ TDC_element *element= (TDC_element*) (arg + LF_HASH_OVERHEAD);
+ DBUG_ENTER("lf_alloc_destructor");
+ tdc_assert_clean_share(element);
+ mysql_cond_destroy(&element->COND_release);
+ mysql_mutex_destroy(&element->LOCK_table_share);
+ DBUG_VOID_RETURN;
+}
+
+
+static void tdc_hash_initializer(LF_HASH *hash __attribute__((unused)),
+ TDC_element *element, LEX_STRING *key)
+{
+ memcpy(element->m_key, key->str, key->length);
+ element->m_key_length= key->length;
+ tdc_assert_clean_share(element);
+}
+
+
+static uchar *tdc_hash_key(const TDC_element *element, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= element->m_key_length;
+ return (uchar*) element->m_key;
+}
+
+
+/**
Initialize table definition cache.
*/
-void tdc_init(void)
+bool tdc_init(void)
{
DBUG_ENTER("tdc_init");
#ifdef HAVE_PSI_INTERFACE
- init_tc_psi_keys();
+ mysql_mutex_register("sql", all_tc_mutexes, array_elements(all_tc_mutexes));
+ mysql_cond_register("sql", all_tc_conds, array_elements(all_tc_conds));
#endif
+ /* Extra instance is allocated to avoid false sharing */
+ if (!(tc= new Table_cache_instance[tc_instances + 1]))
+ DBUG_RETURN(true);
tdc_inited= true;
mysql_mutex_init(key_LOCK_unused_shares, &LOCK_unused_shares,
MY_MUTEX_INIT_FAST);
tdc_version= 1L; /* Increments on each reload */
- lf_hash_init(&tdc_hash, sizeof(TDC_element), LF_HASH_UNIQUE, 0, 0,
- (my_hash_get_key) TDC_element::key,
+ lf_hash_init(&tdc_hash, sizeof(TDC_element) +
+ sizeof(Share_free_tables) * (tc_instances - 1),
+ LF_HASH_UNIQUE, 0, 0,
+ (my_hash_get_key) tdc_hash_key,
&my_charset_bin);
- tdc_hash.alloc.constructor= TDC_element::lf_alloc_constructor;
- tdc_hash.alloc.destructor= TDC_element::lf_alloc_destructor;
- tdc_hash.initializer= (lf_hash_initializer) TDC_element::lf_hash_initializer;
- DBUG_VOID_RETURN;
+ tdc_hash.alloc.constructor= lf_alloc_constructor;
+ tdc_hash.alloc.destructor= lf_alloc_destructor;
+ tdc_hash.initializer= (lf_hash_initializer) tdc_hash_initializer;
+ DBUG_RETURN(false);
}
@@ -476,6 +661,7 @@ void tdc_deinit(void)
tdc_inited= false;
lf_hash_destroy(&tdc_hash);
mysql_mutex_destroy(&LOCK_unused_shares);
+ delete [] tc;
}
DBUG_VOID_RETURN;
}
@@ -586,10 +772,9 @@ void tdc_unlock_share(TDC_element *element)
tdc_acquire_share()
thd Thread handle
- table_list Table that should be opened
- key Table cache key
- key_length Length of key
+ tl Table that should be opened
flags operation: what to open table or view
+ out_table TABLE for the requested table
IMPLEMENTATION
Get a table definition from the table definition cache.
@@ -600,13 +785,14 @@ void tdc_unlock_share(TDC_element *element)
# Share for table
*/
-TABLE_SHARE *tdc_acquire_share(THD *thd, const char *db, const char *table_name,
- const char *key, uint key_length,
- my_hash_value_type hash_value, uint flags,
+TABLE_SHARE *tdc_acquire_share(THD *thd, TABLE_LIST *tl, uint flags,
TABLE **out_table)
{
TABLE_SHARE *share;
TDC_element *element;
+ const char *key;
+ uint key_length= get_table_def_key(tl, &key);
+ my_hash_value_type hash_value= tl->mdl_request.key.tc_hash_value();
bool was_unused;
DBUG_ENTER("tdc_acquire_share");
@@ -630,7 +816,7 @@ retry:
lf_hash_search_unpin(thd->tdc_hash_pins);
DBUG_ASSERT(element);
- if (!(share= alloc_table_share(db, table_name, key, key_length)))
+ if (!(share= alloc_table_share(tl->db, tl->table_name, key, key_length)))
{
lf_hash_delete(&tdc_hash, thd->tdc_hash_pins, key, key_length);
DBUG_RETURN(0);
@@ -666,7 +852,7 @@ retry:
if (out_table && (flags & GTS_TABLE))
{
- if ((*out_table= element->acquire_table(thd)))
+ if ((*out_table= tc_acquire_table(thd, element)))
{
lf_hash_search_unpin(thd->tdc_hash_pins);
DBUG_ASSERT(!(flags & GTS_NOLOCK));
@@ -728,8 +914,8 @@ retry:
}
end:
- DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
- (ulong) share, share->tdc->ref_count));
+ DBUG_PRINT("exit", ("share: %p ref_count: %u",
+ share, share->tdc->ref_count));
if (flags & GTS_NOLOCK)
{
tdc_release_share(share);
@@ -760,8 +946,8 @@ void tdc_release_share(TABLE_SHARE *share)
mysql_mutex_lock(&share->tdc->LOCK_table_share);
DBUG_PRINT("enter",
- ("share: 0x%lx table: %s.%s ref_count: %u version: %lu",
- (ulong) share, share->db.str, share->table_name.str,
+ ("share: %p table: %s.%s ref_count: %u version: %lld",
+ share, share->db.str, share->table_name.str,
share->tdc->ref_count, share->tdc->version));
DBUG_ASSERT(share->tdc->ref_count);
@@ -802,6 +988,47 @@ void tdc_release_share(TABLE_SHARE *share)
/**
+ Auxiliary function which allows to kill delayed threads for
+ particular table identified by its share.
+
+ @param share Table share.
+
+ @pre Caller should have TABLE_SHARE::tdc.LOCK_table_share mutex.
+*/
+
+static void kill_delayed_threads_for_table(TDC_element *element)
+{
+ All_share_tables_list::Iterator it(element->all_tables);
+ TABLE *tab;
+
+ mysql_mutex_assert_owner(&element->LOCK_table_share);
+
+ if (!delayed_insert_threads)
+ return;
+
+ while ((tab= it++))
+ {
+ THD *in_use= tab->in_use;
+
+ DBUG_ASSERT(in_use && tab->s->tdc->flushed);
+ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+ ! in_use->killed)
+ {
+ in_use->killed= KILL_SYSTEM_THREAD;
+ mysql_mutex_lock(&in_use->mysys_var->mutex);
+ if (in_use->mysys_var->current_cond)
+ {
+ mysql_mutex_lock(in_use->mysys_var->current_mutex);
+ mysql_cond_broadcast(in_use->mysys_var->current_cond);
+ mysql_mutex_unlock(in_use->mysys_var->current_mutex);
+ }
+ mysql_mutex_unlock(&in_use->mysys_var->mutex);
+ }
+ }
+}
+
+
+/**
Remove all or some (depending on parameter) instances of TABLE and
TABLE_SHARE from the table definition cache.
@@ -844,7 +1071,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
const char *db, const char *table_name,
bool kill_delayed_threads)
{
- I_P_List <TABLE, TABLE_share> purge_tables;
+ Share_free_tables::List purge_tables;
TABLE *table;
TDC_element *element;
uint my_refs= 1;
@@ -883,37 +1110,22 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
element->ref_count++;
- element->wait_for_mdl_deadlock_detector();
- /*
- Mark share flushed in order to ensure that it gets
- automatically deleted once it is no longer referenced.
-
- Note that code in TABLE_SHARE::wait_for_old_version() assumes that
- marking share flushed is followed by purge of unused table
- shares.
- */
- if (remove_type != TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE)
- element->flushed= true;
+ tc_remove_all_unused_tables(element, &purge_tables,
+ remove_type != TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE);
- while ((table= element->free_tables.pop_front()))
- {
- tc_remove_table(table);
- purge_tables.push_front(table);
- }
if (kill_delayed_threads)
kill_delayed_threads_for_table(element);
if (remove_type == TDC_RT_REMOVE_NOT_OWN ||
remove_type == TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE)
{
- TDC_element::All_share_tables_list::Iterator it(element->all_tables);
+ All_share_tables_list::Iterator it(element->all_tables);
while ((table= it++))
{
- my_refs++;
- DBUG_ASSERT(table->in_use == thd);
+ if (table->in_use == thd)
+ my_refs++;
}
}
- DBUG_ASSERT(element->all_tables.is_empty() || remove_type != TDC_RT_REMOVE_ALL);
mysql_mutex_unlock(&element->LOCK_table_share);
while ((table= purge_tables.pop_front()))
@@ -945,6 +1157,17 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
mysql_mutex_lock(&element->LOCK_table_share);
while (element->ref_count > my_refs)
mysql_cond_wait(&element->COND_release, &element->LOCK_table_share);
+ DBUG_ASSERT(element->all_tables.is_empty() ||
+ remove_type != TDC_RT_REMOVE_ALL);
+#ifndef DBUG_OFF
+ if (remove_type == TDC_RT_REMOVE_NOT_OWN ||
+ remove_type == TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE)
+ {
+ All_share_tables_list::Iterator it(element->all_tables);
+ while ((table= it++))
+ DBUG_ASSERT(table->in_use == thd);
+ }
+#endif
mysql_mutex_unlock(&element->LOCK_table_share);
}
@@ -969,8 +1192,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
*/
int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name,
- ulong wait_timeout, uint deadlock_weight,
- ulong refresh_version)
+ ulong wait_timeout, uint deadlock_weight, tdc_version_t refresh_version)
{
TDC_element *element;
@@ -989,16 +1211,16 @@ int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name,
}
-ulong tdc_refresh_version(void)
+tdc_version_t tdc_refresh_version(void)
{
- return my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED);
+ return (tdc_version_t)my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED);
}
-ulong tdc_increment_refresh_version(void)
+tdc_version_t tdc_increment_refresh_version(void)
{
- ulong v= my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED);
- DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu", v));
+ tdc_version_t v= (tdc_version_t)my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED);
+ DBUG_PRINT("tcache", ("incremented global refresh_version to: %lld", v));
return v + 1;
}
@@ -1094,59 +1316,3 @@ int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument,
}
return res;
}
-
-
-/*
- Function to assign a new table map id to a table share.
-
- PARAMETERS
-
- share - Pointer to table share structure
-
- DESCRIPTION
-
- We are intentionally not checking that share->mutex is locked
- since this function should only be called when opening a table
- share and before it is entered into the table definition cache
- (meaning that it cannot be fetched by another thread, even
- accidentally).
-
- PRE-CONDITION(S)
-
- share is non-NULL
- last_table_id_lock initialized (tdc_inited)
-
- POST-CONDITION(S)
-
- share->table_map_id is given a value that with a high certainty is
- not used by any other table (the only case where a table id can be
- reused is on wrap-around, which means more than 4 billion table
- share opens have been executed while one table was open all the
- time).
-
- share->table_map_id is not ~0UL.
-*/
-
-void tdc_assign_new_table_id(TABLE_SHARE *share)
-{
- ulong tid;
- DBUG_ENTER("assign_new_table_id");
- DBUG_ASSERT(share);
- DBUG_ASSERT(tdc_inited);
-
- DBUG_EXECUTE_IF("simulate_big_table_id",
- if (last_table_id < UINT_MAX32)
- last_table_id= UINT_MAX32 - 1;);
- /*
- There is one reserved number that cannot be used. Remember to
- change this when 6-byte global table id's are introduced.
- */
- do
- {
- tid= my_atomic_add64_explicit(&last_table_id, 1, MY_MEMORY_ORDER_RELAXED);
- } while (unlikely(tid == ~0UL || tid == 0));
-
- share->table_map_id= tid;
- DBUG_PRINT("info", ("table_id= %lu", share->table_map_id));
- DBUG_VOID_RETURN;
-}
diff --git a/sql/table_cache.h b/sql/table_cache.h
index 2c5b0fc45a2..2e5bb3428dc 100644
--- a/sql/table_cache.h
+++ b/sql/table_cache.h
@@ -1,3 +1,5 @@
+#ifndef TABLE_CACHE_H_INCLUDED
+#define TABLE_CACHE_H_INCLUDED
/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
Copyright (c) 2010, 2011 Monty Program Ab
Copyright (C) 2013 Sergey Vojtovich and MariaDB Foundation
@@ -16,25 +18,27 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifdef HAVE_PSI_INTERFACE
-extern PSI_mutex_key key_TABLE_SHARE_LOCK_table_share;
-extern PSI_cond_key key_TABLE_SHARE_COND_release;
-#endif
+struct Share_free_tables
+{
+ typedef I_P_List <TABLE, TABLE_share> List;
+ List list;
+ /** Avoid false sharing between instances */
+ char pad[CPU_LEVEL1_DCACHE_LINESIZE];
+};
+
+typedef int64 tdc_version_t;
+#define TDC_VERSION_MAX INT_MAX64
-class TDC_element
+struct TDC_element
{
-public:
uchar m_key[NAME_LEN + 1 + NAME_LEN + 1];
uint m_key_length;
- ulong version;
+ tdc_version_t version;
bool flushed;
TABLE_SHARE *share;
- typedef I_P_List <TABLE, TABLE_share> TABLE_list;
- typedef I_P_List <TABLE, All_share_tables> All_share_tables_list;
/**
- Protects ref_count, m_flush_tickets, all_tables, free_tables, flushed,
- all_tables_refs.
+ Protects ref_count, m_flush_tickets, all_tables, flushed, all_tables_refs.
*/
mysql_mutex_t LOCK_table_share;
mysql_cond_t COND_release;
@@ -50,139 +54,9 @@ public:
for this share.
*/
All_share_tables_list all_tables;
- TABLE_list free_tables;
-
- TDC_element() {}
-
- TDC_element(const char *key_arg, uint key_length) : m_key_length(key_length)
- {
- memcpy(m_key, key_arg, key_length);
- }
-
-
- void assert_clean_share()
- {
- DBUG_ASSERT(share == 0);
- DBUG_ASSERT(ref_count == 0);
- DBUG_ASSERT(m_flush_tickets.is_empty());
- DBUG_ASSERT(all_tables.is_empty());
- DBUG_ASSERT(free_tables.is_empty());
- DBUG_ASSERT(all_tables_refs == 0);
- DBUG_ASSERT(next == 0);
- DBUG_ASSERT(prev == 0);
- }
-
-
- /**
- Acquire TABLE object from table cache.
-
- @pre share must be protected against removal.
-
- Acquired object cannot be evicted or acquired again.
-
- @return TABLE object, or NULL if no unused objects.
- */
-
- TABLE *acquire_table(THD *thd)
- {
- TABLE *table;
-
- mysql_mutex_lock(&LOCK_table_share);
- table= free_tables.pop_front();
- if (table)
- {
- DBUG_ASSERT(!table->in_use);
- table->in_use= thd;
- /* The ex-unused table must be fully functional. */
- DBUG_ASSERT(table->db_stat && table->file);
- /* The children must be detached from the table. */
- DBUG_ASSERT(!table->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN));
- }
- mysql_mutex_unlock(&LOCK_table_share);
- return table;
- }
-
-
- /**
- Get last element of free_tables.
- */
-
- TABLE *free_tables_back()
- {
- TABLE_list::Iterator it(free_tables);
- TABLE *entry, *last= 0;
- while ((entry= it++))
- last= entry;
- return last;
- }
-
-
- /**
- Wait for MDL deadlock detector to complete traversing tdc.all_tables.
-
- Must be called before updating TABLE_SHARE::tdc.all_tables.
- */
-
- void wait_for_mdl_deadlock_detector()
- {
- while (all_tables_refs)
- mysql_cond_wait(&COND_release, &LOCK_table_share);
- }
-
-
- /**
- Prepeare table share for use with table definition cache.
- */
-
- static void lf_alloc_constructor(uchar *arg)
- {
- TDC_element *element= (TDC_element*) (arg + LF_HASH_OVERHEAD);
- DBUG_ENTER("lf_alloc_constructor");
- mysql_mutex_init(key_TABLE_SHARE_LOCK_table_share,
- &element->LOCK_table_share, MY_MUTEX_INIT_FAST);
- mysql_cond_init(key_TABLE_SHARE_COND_release, &element->COND_release, 0);
- element->m_flush_tickets.empty();
- element->all_tables.empty();
- element->free_tables.empty();
- element->all_tables_refs= 0;
- element->share= 0;
- element->ref_count= 0;
- element->next= 0;
- element->prev= 0;
- DBUG_VOID_RETURN;
- }
-
-
- /**
- Release table definition cache specific resources of table share.
- */
-
- static void lf_alloc_destructor(uchar *arg)
- {
- TDC_element *element= (TDC_element*) (arg + LF_HASH_OVERHEAD);
- DBUG_ENTER("lf_alloc_destructor");
- element->assert_clean_share();
- mysql_cond_destroy(&element->COND_release);
- mysql_mutex_destroy(&element->LOCK_table_share);
- DBUG_VOID_RETURN;
- }
-
-
- static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
- TDC_element *element, LEX_STRING *key)
- {
- memcpy(element->m_key, key->str, key->length);
- element->m_key_length= key->length;
- element->assert_clean_share();
- }
-
-
- static uchar *key(const TDC_element *element, size_t *length,
- my_bool not_used __attribute__((unused)))
- {
- *length= element->m_key_length;
- return (uchar*) element->m_key;
- }
+ /** Avoid false sharing between TDC_element and free_tables */
+ char pad[CPU_LEVEL1_DCACHE_LINESIZE];
+ Share_free_tables free_tables[1];
};
@@ -196,8 +70,9 @@ enum enum_tdc_remove_table_type
extern ulong tdc_size;
extern ulong tc_size;
+extern uint32 tc_instances;
-extern void tdc_init(void);
+extern bool tdc_init(void);
extern void tdc_start_shutdown(void);
extern void tdc_deinit(void);
extern ulong tdc_records(void);
@@ -205,29 +80,27 @@ extern void tdc_purge(bool all);
extern TDC_element *tdc_lock_share(THD *thd, const char *db,
const char *table_name);
extern void tdc_unlock_share(TDC_element *element);
-extern TABLE_SHARE *tdc_acquire_share(THD *thd, const char *db,
- const char *table_name,
- const char *key, uint key_length,
- my_hash_value_type hash_value,
- uint flags, TABLE **out_table);
+extern TABLE_SHARE *tdc_acquire_share(THD *thd, TABLE_LIST *tl, uint flags,
+ TABLE **out_table= 0);
extern void tdc_release_share(TABLE_SHARE *share);
extern bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
const char *db, const char *table_name,
bool kill_delayed_threads);
+
+
extern int tdc_wait_for_old_version(THD *thd, const char *db,
const char *table_name,
ulong wait_timeout, uint deadlock_weight,
- ulong refresh_version= ULONG_MAX);
-extern ulong tdc_refresh_version(void);
-extern ulong tdc_increment_refresh_version(void);
-extern void tdc_assign_new_table_id(TABLE_SHARE *share);
+ tdc_version_t refresh_version= TDC_VERSION_MAX);
+extern tdc_version_t tdc_refresh_version(void);
+extern tdc_version_t tdc_increment_refresh_version(void);
extern int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument,
bool no_dups= false);
extern uint tc_records(void);
extern void tc_purge(bool mark_flushed= false);
extern void tc_add_table(THD *thd, TABLE *table);
-extern bool tc_release_table(TABLE *table);
+extern void tc_release_table(TABLE *table);
/**
Create a table cache key for non-temporary table.
@@ -249,50 +122,4 @@ inline uint tdc_create_key(char *key, const char *db, const char *table_name)
return (uint) (strmake(strmake(key, db, NAME_LEN) + 1, table_name,
NAME_LEN) - key + 1);
}
-
-/**
- Convenience helper: call tdc_acquire_share() without out_table.
-*/
-
-static inline TABLE_SHARE *tdc_acquire_share(THD *thd, const char *db,
- const char *table_name,
- const char *key,
- uint key_length, uint flags)
-{
- return tdc_acquire_share(thd, db, table_name, key, key_length,
- my_hash_sort(&my_charset_bin, (uchar*) key,
- key_length), flags, 0);
-}
-
-
-/**
- Convenience helper: call tdc_acquire_share() without precomputed cache key.
-*/
-
-static inline TABLE_SHARE *tdc_acquire_share(THD *thd, const char *db,
- const char *table_name, uint flags)
-{
- char key[MAX_DBKEY_LENGTH];
- uint key_length;
- key_length= tdc_create_key(key, db, table_name);
- return tdc_acquire_share(thd, db, table_name, key, key_length, flags);
-}
-
-
-/**
- Convenience helper: call tdc_acquire_share() reusing the MDL cache key.
-
- @note lifetime of the returned TABLE_SHARE is limited by the
- lifetime of the TABLE_LIST object!!!
-*/
-
-uint get_table_def_key(const TABLE_LIST *table_list, const char **key);
-
-static inline TABLE_SHARE *tdc_acquire_share_shortlived(THD *thd, TABLE_LIST *tl,
- uint flags)
-{
- const char *key;
- uint key_length= get_table_def_key(tl, &key);
- return tdc_acquire_share(thd, tl->db, tl->table_name, key, key_length,
- tl->mdl_request.key.tc_hash_value(), flags, 0);
-}
+#endif /* TABLE_CACHE_H_INCLUDED */
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
new file mode 100644
index 00000000000..ed23dae06d2
--- /dev/null
+++ b/sql/temporary_tables.cc
@@ -0,0 +1,1513 @@
+/*
+ Copyright (c) 2016 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/**
+ All methods pertaining to temporary tables.
+*/
+
+#include "sql_acl.h" /* TMP_TABLE_ACLS */
+#include "sql_base.h" /* tdc_create_key */
+#include "lock.h" /* mysql_lock_remove */
+#include "log_event.h" /* Query_log_event */
+#include "sql_show.h" /* append_identifier */
+#include "sql_handler.h" /* mysql_ha_rm_temporary_tables */
+#include "rpl_rli.h" /* rpl_group_info */
+
+#define IS_USER_TABLE(A) ((A->tmp_table == TRANSACTIONAL_TMP_TABLE) || \
+ (A->tmp_table == NON_TRANSACTIONAL_TMP_TABLE))
+
+/**
+ Check whether temporary tables exist. The decision is made based on the
+ existence of TMP_TABLE_SHAREs in Open_tables_state::temporary_tables list.
+
+ @return false Temporary tables exist
+ true No temporary table exist
+*/
+bool THD::has_thd_temporary_tables()
+{
+ DBUG_ENTER("THD::has_thd_temporary_tables");
+ bool result= (temporary_tables && !temporary_tables->is_empty());
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Create a temporary table, open it and return the TABLE handle.
+
+ @param hton [IN] Handlerton
+ @param frm [IN] Binary frm image
+ @param path [IN] File path (without extension)
+ @param db [IN] Schema name
+ @param table_name [IN] Table name
+ @param open_in_engine [IN] Whether open table in SE
+
+
+ @return Success A pointer to table object
+ Failure NULL
+*/
+TABLE *THD::create_and_open_tmp_table(handlerton *hton,
+ LEX_CUSTRING *frm,
+ const char *path,
+ const char *db,
+ const char *table_name,
+ bool open_in_engine)
+{
+ DBUG_ENTER("THD::create_and_open_tmp_table");
+
+ TMP_TABLE_SHARE *share;
+ TABLE *table= NULL;
+
+ if ((share= create_temporary_table(hton, frm, path, db, table_name)))
+ {
+ table= open_temporary_table(share, table_name, open_in_engine);
+
+ /*
+ Failed to open a temporary table instance. As we are not passing
+ the created TMP_TABLE_SHARE to the caller, we must remove it from
+ the list and free it here.
+ */
+ if (!table)
+ {
+ /* Remove the TABLE_SHARE from the list of temporary tables. */
+ temporary_tables->remove(share);
+
+ /* Free the TMP_TABLE_SHARE. */
+ free_tmp_table_share(share, false);
+ }
+ }
+
+ DBUG_RETURN(table);
+}
+
+
+/**
+ Check whether an open table with db/table name is in use.
+
+ @param db [IN] Database name
+ @param table_name [IN] Table name
+ @param state [IN] State of temp table to open
+
+ @return Success Pointer to first used table instance.
+ Failure NULL
+*/
+TABLE *THD::find_temporary_table(const char *db,
+ const char *table_name,
+ Temporary_table_state state)
+{
+ DBUG_ENTER("THD::find_temporary_table");
+
+ TABLE *table;
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ bool locked;
+
+ if (!has_temporary_tables())
+ {
+ DBUG_RETURN(NULL);
+ }
+
+ key_length= create_tmp_table_def_key(key, db, table_name);
+
+ locked= lock_temporary_tables();
+ table= find_temporary_table(key, key_length, state);
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(table);
+}
+
+
+/**
+ Check whether an open table specified in TABLE_LIST is in use.
+
+ @return tl [IN] TABLE_LIST
+
+ @return Success Pointer to first used table instance.
+ Failure NULL
+*/
+TABLE *THD::find_temporary_table(const TABLE_LIST *tl,
+ Temporary_table_state state)
+{
+ DBUG_ENTER("THD::find_temporary_table");
+ TABLE *table= find_temporary_table(tl->get_db_name(), tl->get_table_name(),
+ state);
+ DBUG_RETURN(table);
+}
+
+
+/**
+ Check whether a temporary table exists with the specified key.
+ The key, in this case, is not the usual key used for temporary tables.
+ It does not contain server_id & pseudo_thread_id. This function is
+ essentially used use to check whether there is any temporary table
+ which _shadows_ a base table.
+ (see: Query_cache::send_result_to_client())
+
+ @return Success A pointer to table share object
+ Failure NULL
+*/
+TMP_TABLE_SHARE *THD::find_tmp_table_share_w_base_key(const char *key,
+ uint key_length)
+{
+ DBUG_ENTER("THD::find_tmp_table_share_w_base_key");
+
+ TMP_TABLE_SHARE *share;
+ TMP_TABLE_SHARE *result= NULL;
+ bool locked;
+
+ if (!has_temporary_tables())
+ {
+ DBUG_RETURN(NULL);
+ }
+
+ locked= lock_temporary_tables();
+
+ All_tmp_tables_list::Iterator it(*temporary_tables);
+ while ((share= it++))
+ {
+ if ((share->table_cache_key.length - TMP_TABLE_KEY_EXTRA) == key_length
+ && !memcmp(share->table_cache_key.str, key, key_length))
+ {
+ result= share;
+ }
+ }
+
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Lookup the TMP_TABLE_SHARE using the given db/table_name.The server_id and
+ pseudo_thread_id used to generate table definition key is taken from THD
+ (see create_tmp_table_def_key()). Return NULL is none found.
+
+ @return Success A pointer to table share object
+ Failure NULL
+*/
+TMP_TABLE_SHARE *THD::find_tmp_table_share(const char *db,
+ const char *table_name)
+{
+ DBUG_ENTER("THD::find_tmp_table_share");
+
+ TMP_TABLE_SHARE *share;
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+
+ key_length= create_tmp_table_def_key(key, db, table_name);
+ share= find_tmp_table_share(key, key_length);
+
+ DBUG_RETURN(share);
+}
+
+
+/**
+ Lookup TMP_TABLE_SHARE using the specified TABLE_LIST element.
+ Return NULL is none found.
+
+ @param tl [IN] Table
+
+ @return Success A pointer to table share object
+ Failure NULL
+*/
+TMP_TABLE_SHARE *THD::find_tmp_table_share(const TABLE_LIST *tl)
+{
+ DBUG_ENTER("THD::find_tmp_table_share");
+ TMP_TABLE_SHARE *share= find_tmp_table_share(tl->get_db_name(),
+ tl->get_table_name());
+ DBUG_RETURN(share);
+}
+
+
+/**
+ Lookup TMP_TABLE_SHARE using the specified table definition key.
+ Return NULL is none found.
+
+ @return Success A pointer to table share object
+ Failure NULL
+*/
+TMP_TABLE_SHARE *THD::find_tmp_table_share(const char *key, uint key_length)
+{
+ DBUG_ENTER("THD::find_tmp_table_share");
+
+ TMP_TABLE_SHARE *share;
+ TMP_TABLE_SHARE *result= NULL;
+ bool locked;
+
+ if (!has_temporary_tables())
+ {
+ DBUG_RETURN(NULL);
+ }
+
+ locked= lock_temporary_tables();
+
+ All_tmp_tables_list::Iterator it(*temporary_tables);
+ while ((share= it++))
+ {
+ if (share->table_cache_key.length == key_length &&
+ !(memcmp(share->table_cache_key.str, key, key_length)))
+ {
+ result= share;
+ break;
+ }
+ }
+
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Find a temporary table specified by TABLE_LIST instance in the open table
+ list and prepare its TABLE instance for use. If
+
+ This function tries to resolve this table in the list of temporary tables
+ of this thread. Temporary tables are thread-local and "shadow" base
+ tables with the same name.
+
+ @note In most cases one should use THD::open_tables() instead
+ of this call.
+
+ @note One should finalize process of opening temporary table for table
+ list element by calling open_and_process_table(). This function
+ is responsible for table version checking and handling of merge
+ tables.
+
+ @note We used to check global_read_lock before opening temporary tables.
+ However, that limitation was artificial and is removed now.
+
+ @param tl [IN] TABLE_LIST
+
+ @return Error status.
+ @retval false On success. If a temporary table exists
+ for the given key, tl->table is set.
+ @retval true On error. my_error() has been called.
+*/
+bool THD::open_temporary_table(TABLE_LIST *tl)
+{
+ DBUG_ENTER("THD::open_temporary_table");
+ DBUG_PRINT("enter", ("table: '%s'.'%s'", tl->db, tl->table_name));
+
+ TMP_TABLE_SHARE *share;
+ TABLE *table= NULL;
+
+ /*
+ Code in open_table() assumes that TABLE_LIST::table can be non-zero only
+ for pre-opened temporary tables.
+ */
+ DBUG_ASSERT(tl->table == NULL);
+
+ /*
+ This function should not be called for cases when derived or I_S
+ tables can be met since table list elements for such tables can
+ have invalid db or table name.
+ Instead THD::open_tables() should be used.
+ */
+ DBUG_ASSERT(!tl->derived && !tl->schema_table);
+
+ if (tl->open_type == OT_BASE_ONLY || !has_temporary_tables())
+ {
+ DBUG_PRINT("info", ("skip_temporary is set or no temporary tables"));
+ DBUG_RETURN(false);
+ }
+
+ /*
+ Temporary tables are not safe for parallel replication. They were
+ designed to be visible to one thread only, so have no table locking.
+ Thus there is no protection against two conflicting transactions
+ committing in parallel and things like that.
+
+ So for now, anything that uses temporary tables will be serialised
+ with anything before it, when using parallel replication.
+ */
+
+ if (rgi_slave &&
+ rgi_slave->is_parallel_exec &&
+ find_temporary_table(tl) &&
+ wait_for_prior_commit())
+ DBUG_RETURN(true);
+
+ /*
+ First check if there is a reusable open table available in the
+ open table list.
+ */
+ if (find_and_use_tmp_table(tl, &table))
+ {
+ DBUG_RETURN(true); /* Error */
+ }
+
+ /*
+ No reusable table was found. We will have to open a new instance.
+ */
+ if (!table && (share= find_tmp_table_share(tl)))
+ {
+ table= open_temporary_table(share, tl->get_table_name(), true);
+ }
+
+ if (!table)
+ {
+ if (tl->open_type == OT_TEMPORARY_ONLY &&
+ tl->open_strategy == TABLE_LIST::OPEN_NORMAL)
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), tl->db, tl->table_name);
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+ }
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (tl->partition_names)
+ {
+ /* Partitioned temporary tables is not supported. */
+ DBUG_ASSERT(!table->part_info);
+ my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(true);
+ }
+#endif
+
+ table->query_id= query_id;
+ thread_specific_used= true;
+
+ /* It is neither a derived table nor non-updatable view. */
+ tl->updatable= true;
+ tl->table= table;
+
+ table->init(this, tl);
+
+ DBUG_PRINT("info", ("Using temporary table"));
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Pre-open temporary tables corresponding to table list elements.
+
+ @note One should finalize process of opening temporary tables
+ by calling open_tables(). This function is responsible
+ for table version checking and handling of merge tables.
+
+ @param tl [IN] TABLE_LIST
+
+ @return false On success. If a temporary table exists
+ for the given element, tl->table is set.
+ true On error. my_error() has been called.
+*/
+bool THD::open_temporary_tables(TABLE_LIST *tl)
+{
+ DBUG_ENTER("THD::open_temporary_tables");
+
+ TABLE_LIST *first_not_own= lex->first_not_own_table();
+
+ for (TABLE_LIST *table= tl; table && table != first_not_own;
+ table= table->next_global)
+ {
+ if (table->derived || table->schema_table)
+ {
+ /*
+ Derived and I_S tables will be handled by a later call to open_tables().
+ */
+ continue;
+ }
+
+ if (open_temporary_table(table))
+ {
+ DBUG_RETURN(true);
+ }
+ }
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Close all temporary tables created by 'CREATE TEMPORARY TABLE' for thread
+ creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread.
+
+ Temporary tables created in a sql slave is closed by
+ Relay_log_info::close_temporary_tables().
+
+ @return false Success
+ true Failure
+*/
+bool THD::close_temporary_tables()
+{
+ DBUG_ENTER("THD::close_temporary_tables");
+
+ TMP_TABLE_SHARE *share;
+ TABLE *table;
+
+ bool error= false;
+
+ if (!has_thd_temporary_tables())
+ {
+ if (temporary_tables)
+ {
+ my_free(temporary_tables);
+ temporary_tables= NULL;
+ }
+ DBUG_RETURN(false);
+ }
+
+ DBUG_ASSERT(!rgi_slave);
+
+ /*
+ Ensure we don't have open HANDLERs for tables we are about to close.
+ This is necessary when THD::close_temporary_tables() is called as
+ part of execution of BINLOG statement (e.g. for format description event).
+ */
+ mysql_ha_rm_temporary_tables(this);
+
+ /* Close all open temporary tables. */
+ All_tmp_tables_list::Iterator it(*temporary_tables);
+ while ((share= it++))
+ {
+ /* Traverse the table list. */
+ while ((table= share->all_tmp_tables.pop_front()))
+ {
+ table->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
+ free_temporary_table(table);
+ }
+ }
+
+ // Write DROP TEMPORARY TABLE query log events to binary log.
+ if (mysql_bin_log.is_open())
+ {
+ error= log_events_and_free_tmp_shares();
+ }
+ else
+ {
+ while ((share= temporary_tables->pop_front()))
+ {
+ free_tmp_table_share(share, true);
+ }
+ }
+
+ /* By now, there mustn't be any elements left in the list. */
+ DBUG_ASSERT(temporary_tables->is_empty());
+
+ my_free(temporary_tables);
+ temporary_tables= NULL;
+
+ DBUG_RETURN(error);
+}
+
+
+/**
+ Rename a temporary table.
+
+ @param table [IN] Table handle
+ @param db [IN] New schema name
+ @param table_name [IN] New table name
+
+ @return false Success
+ true Error
+*/
+bool THD::rename_temporary_table(TABLE *table,
+ const char *db,
+ const char *table_name)
+{
+ DBUG_ENTER("THD::rename_temporary_table");
+
+ char *key;
+ uint key_length;
+
+ TABLE_SHARE *share= table->s;
+
+ if (!(key= (char *) alloc_root(&share->mem_root, MAX_DBKEY_LENGTH)))
+ {
+ DBUG_RETURN(true);
+ }
+
+ /*
+ Temporary tables are renamed by simply changing their table definition key.
+ */
+ key_length= create_tmp_table_def_key(key, db, table_name);
+ share->set_table_cache_key(key, key_length);
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Drop a temporary table.
+
+ Try to locate the table in the list of open temporary tables.
+ If the table is found:
+ - If the table is locked with LOCK TABLES or by prelocking,
+ unlock it and remove it from the list of locked tables
+ (THD::lock). Currently only transactional temporary tables
+ are locked.
+ - Close the temporary table, remove its .FRM.
+ - Remove the table share from the list of temporary table shares.
+
+ This function is used to drop user temporary tables, as well as
+ internal tables created in CREATE TEMPORARY TABLE ... SELECT
+ or ALTER TABLE.
+
+ @param table [IN] Temporary table to be deleted
+ @param is_trans [OUT] Is set to the type of the table:
+ transactional (e.g. innodb) as true or
+ non-transactional (e.g. myisam) as false.
+ @paral delete_table [IN] Whether to delete the table files?
+
+ @return false Table was dropped
+ true Error
+*/
+bool THD::drop_temporary_table(TABLE *table, bool *is_trans, bool delete_table)
+{
+ DBUG_ENTER("THD::drop_temporary_table");
+
+ TMP_TABLE_SHARE *share;
+ TABLE *tab;
+ bool result= false;
+ bool locked;
+
+ DBUG_ASSERT(table);
+ DBUG_PRINT("tmptable", ("Dropping table: '%s'.'%s'",
+ table->s->db.str, table->s->table_name.str));
+
+ locked= lock_temporary_tables();
+
+ share= tmp_table_share(table);
+
+ /* Table might be in use by some outer statement. */
+ All_share_tables_list::Iterator it(share->all_tmp_tables);
+ while ((tab= it++))
+ {
+ if (tab != table && tab->query_id != 0)
+ {
+ /* Found a table instance in use. This table cannot be be dropped. */
+ my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
+ result= true;
+ goto end;
+ }
+ }
+
+ if (is_trans)
+ *is_trans= table->file->has_transactions();
+
+ /*
+ Iterate over the list of open tables and close them.
+ */
+ while ((tab= share->all_tmp_tables.pop_front()))
+ {
+ /*
+ We need to set the THD as it may be different in case of
+ parallel replication
+ */
+ tab->in_use= this;
+ if (delete_table)
+ tab->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
+ free_temporary_table(tab);
+ }
+
+ DBUG_ASSERT(temporary_tables);
+
+ /* Remove the TABLE_SHARE from the list of temporary tables. */
+ temporary_tables->remove(share);
+
+ /* Free the TABLE_SHARE and/or delete the files. */
+ free_tmp_table_share(share, delete_table);
+
+end:
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Delete the temporary table files.
+
+ @param base [IN] Handlerton for table to be deleted.
+ @param path [IN] Path to the table to be deleted (i.e. path
+ to its .frm without an extension).
+
+ @return false Success
+ true Error
+*/
+bool THD::rm_temporary_table(handlerton *base, const char *path)
+{
+ DBUG_ENTER("THD::rm_temporary_table");
+
+ bool error= false;
+ handler *file;
+ char frm_path[FN_REFLEN + 1];
+
+ strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS);
+ if (mysql_file_delete(key_file_frm, frm_path, MYF(0)))
+ {
+ error= true;
+ }
+ file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
+ if (file && file->ha_delete_table(path))
+ {
+ error= true;
+ sql_print_warning("Could not remove temporary table: '%s', error: %d",
+ path, my_errno);
+ }
+
+ delete file;
+ DBUG_RETURN(error);
+}
+
+
+/**
+ Mark all temporary tables which were used by the current statement or
+ sub-statement as free for reuse, but only if the query_id can be cleared.
+
+ @remark For temp tables associated with a open SQL HANDLER the query_id
+ is not reset until the HANDLER is closed.
+*/
+void THD::mark_tmp_tables_as_free_for_reuse()
+{
+ DBUG_ENTER("THD::mark_tmp_tables_as_free_for_reuse");
+
+ TMP_TABLE_SHARE *share;
+ TABLE *table;
+ bool locked;
+
+ if (query_id == 0)
+ {
+ /*
+ Thread has not executed any statement and has not used any
+ temporary tables.
+ */
+ DBUG_VOID_RETURN;
+ }
+
+ if (!has_temporary_tables())
+ {
+ DBUG_VOID_RETURN;
+ }
+
+ locked= lock_temporary_tables();
+
+ All_tmp_tables_list::Iterator it(*temporary_tables);
+ while ((share= it++))
+ {
+ All_share_tables_list::Iterator tables_it(share->all_tmp_tables);
+ while ((table= tables_it++))
+ {
+ if ((table->query_id == query_id) && !table->open_by_handler)
+ {
+ mark_tmp_table_as_free_for_reuse(table);
+ }
+ }
+ }
+
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ if (rgi_slave)
+ {
+ /*
+ Temporary tables are shared with other by sql execution threads.
+ As a safety measure, clear the pointer to the common area.
+ */
+ temporary_tables= NULL;
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Reset a single temporary table. Effectively this "closes" one temporary
+ table in a session.
+
+ @param table Temporary table
+
+ @return void
+*/
+void THD::mark_tmp_table_as_free_for_reuse(TABLE *table)
+{
+ DBUG_ENTER("THD::mark_tmp_table_as_free_for_reuse");
+
+ DBUG_ASSERT(table->s->tmp_table);
+
+ table->query_id= 0;
+ table->file->ha_reset();
+
+ /* Detach temporary MERGE children from temporary parent. */
+ DBUG_ASSERT(table->file);
+ table->file->extra(HA_EXTRA_DETACH_CHILDREN);
+
+ /*
+ Reset temporary table lock type to it's default value (TL_WRITE).
+
+ Statements such as INSERT INTO .. SELECT FROM tmp, CREATE TABLE
+ .. SELECT FROM tmp and UPDATE may under some circumstances modify
+ the lock type of the tables participating in the statement. This
+ isn't a problem for non-temporary tables since their lock type is
+ reset at every open, but the same does not occur for temporary
+ tables for historical reasons.
+
+ Furthermore, the lock type of temporary tables is not really that
+ important because they can only be used by one query at a time.
+ Nonetheless, it's safer from a maintenance point of view to reset
+ the lock type of this singleton TABLE object as to not cause problems
+ when the table is reused.
+
+ Even under LOCK TABLES mode its okay to reset the lock type as
+ LOCK TABLES is allowed (but ignored) for a temporary table.
+ */
+ table->reginfo.lock_type= TL_WRITE;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Remove and return the specified table's TABLE_SHARE from the temporary
+ tables list.
+
+ @param table [IN] Table
+
+ @return TMP_TABLE_SHARE of the specified table.
+*/
+TMP_TABLE_SHARE *THD::save_tmp_table_share(TABLE *table)
+{
+ DBUG_ENTER("THD::save_tmp_table_share");
+
+ TMP_TABLE_SHARE *share;
+
+ lock_temporary_tables();
+ DBUG_ASSERT(temporary_tables);
+ share= tmp_table_share(table);
+ temporary_tables->remove(share);
+ unlock_temporary_tables();
+
+ DBUG_RETURN(share);
+}
+
+
+/**
+ Add the specified TMP_TABLE_SHARE to the temporary tables list.
+
+ @param share [IN] Table share
+
+ @return void
+*/
+void THD::restore_tmp_table_share(TMP_TABLE_SHARE *share)
+{
+ DBUG_ENTER("THD::restore_tmp_table_share");
+
+ lock_temporary_tables();
+ DBUG_ASSERT(temporary_tables);
+ temporary_tables->push_front(share);
+ unlock_temporary_tables();
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ If its a replication slave, report whether slave temporary tables
+ exist (Relay_log_info::save_temporary_tables) or report about THD
+ temporary table (Open_tables_state::temporary_tables) otherwise.
+
+ @return false Temporary tables exist
+ true No temporary table exist
+*/
+inline bool THD::has_temporary_tables()
+{
+ DBUG_ENTER("THD::has_temporary_tables");
+ bool result= (rgi_slave
+ ? (rgi_slave->rli->save_temporary_tables &&
+ !rgi_slave->rli->save_temporary_tables->is_empty())
+ : (has_thd_temporary_tables()));
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Create a table definition key.
+
+ @param key [OUT] Buffer for the key to be created (must
+ be of size MAX_DBKRY_LENGTH)
+ @param db [IN] Database name
+ @param table_name [IN] Table name
+
+ @return Key length.
+
+ @note
+ The table key is create from:
+ db + \0
+ table_name + \0
+
+ Additionally, we add the following to make each temporary table unique on
+ the slave.
+
+ 4 bytes of master thread id
+ 4 bytes of pseudo thread id
+*/
+uint THD::create_tmp_table_def_key(char *key, const char *db,
+ const char *table_name)
+{
+ DBUG_ENTER("THD::create_tmp_table_def_key");
+
+ uint key_length;
+
+ key_length= tdc_create_key(key, db, table_name);
+ int4store(key + key_length, variables.server_id);
+ int4store(key + key_length + 4, variables.pseudo_thread_id);
+ key_length += TMP_TABLE_KEY_EXTRA;
+
+ DBUG_RETURN(key_length);
+}
+
+
+/**
+ Create a temporary table.
+
+ @param hton [IN] Handlerton
+ @param frm [IN] Binary frm image
+ @param path [IN] File path (without extension)
+ @param db [IN] Schema name
+ @param table_name [IN] Table name
+
+ @return Success A pointer to table share object
+ Failure NULL
+*/
+TMP_TABLE_SHARE *THD::create_temporary_table(handlerton *hton,
+ LEX_CUSTRING *frm,
+ const char *path,
+ const char *db,
+ const char *table_name)
+{
+ DBUG_ENTER("THD::create_temporary_table");
+
+ TMP_TABLE_SHARE *share;
+ char key_cache[MAX_DBKEY_LENGTH];
+ char *saved_key_cache;
+ char *tmp_path;
+ uint key_length;
+ bool locked;
+ int res;
+
+ /* Temporary tables are not safe for parallel replication. */
+ if (rgi_slave &&
+ rgi_slave->is_parallel_exec &&
+ wait_for_prior_commit())
+ DBUG_RETURN(NULL);
+
+ /* Create the table definition key for the temporary table. */
+ key_length= create_tmp_table_def_key(key_cache, db, table_name);
+
+ if (!(share= (TMP_TABLE_SHARE *) my_malloc(sizeof(TMP_TABLE_SHARE) +
+ strlen(path) + 1 + key_length,
+ MYF(MY_WME))))
+ {
+ DBUG_RETURN(NULL); /* Out of memory */
+ }
+
+ tmp_path= (char *)(share + 1);
+ saved_key_cache= strmov(tmp_path, path) + 1;
+ memcpy(saved_key_cache, key_cache, key_length);
+
+ init_tmp_table_share(this, share, saved_key_cache, key_length,
+ strend(saved_key_cache) + 1, tmp_path);
+
+ share->db_plugin= ha_lock_engine(this, hton);
+
+ /*
+ Prefer using frm image over file. The image might not be available in
+ ALTER TABLE, when the discovering engine took over the ownership (see
+ TABLE::read_frm_image).
+ */
+ res= (frm->str)
+ ? share->init_from_binary_frm_image(this, false, frm->str, frm->length)
+ : open_table_def(this, share, GTS_TABLE | GTS_USE_DISCOVERY);
+
+ if (res)
+ {
+ /*
+ No need to lock share->mutex as this is not needed for temporary tables.
+ */
+ free_table_share(share);
+ my_free(share);
+ DBUG_RETURN(NULL);
+ }
+
+ share->m_psi= PSI_CALL_get_table_share(true, share);
+
+ locked= lock_temporary_tables();
+
+ /* Initialize the all_tmp_tables list. */
+ share->all_tmp_tables.empty();
+
+ /*
+ We need to alloc & initialize temporary_tables if this happens
+ to be the very first temporary table.
+ */
+ if (!temporary_tables)
+ {
+ if ((temporary_tables=
+ (All_tmp_tables_list *) my_malloc(sizeof(All_tmp_tables_list),
+ MYF(MY_WME))))
+ {
+ temporary_tables->empty();
+ }
+ else
+ {
+ DBUG_RETURN(NULL); /* Out of memory */
+ }
+ }
+
+ /* Add share to the head of the temporary table share list. */
+ temporary_tables->push_front(share);
+
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(share);
+}
+
+
+/**
+ Find a table with the specified key.
+
+ @param key [IN] Key
+ @param key_length [IN] Key length
+ @param state [IN] Open table state to look for
+
+ @return Success Pointer to the table instance.
+ Failure NULL
+*/
+TABLE *THD::find_temporary_table(const char *key, uint key_length,
+ Temporary_table_state state)
+{
+ DBUG_ENTER("THD::find_temporary_table");
+
+ TMP_TABLE_SHARE *share;
+ TABLE *table;
+ TABLE *result= NULL;
+ bool locked;
+
+ locked= lock_temporary_tables();
+
+ All_tmp_tables_list::Iterator it(*temporary_tables);
+ while ((share= it++))
+ {
+ if (share->table_cache_key.length == key_length &&
+ !(memcmp(share->table_cache_key.str, key, key_length)))
+ {
+ /* A matching TMP_TABLE_SHARE is found. */
+ All_share_tables_list::Iterator tables_it(share->all_tmp_tables);
+
+ bool found= false;
+ while (!found && (table= tables_it++))
+ {
+ switch (state)
+ {
+ case TMP_TABLE_IN_USE: found= table->query_id > 0; break;
+ case TMP_TABLE_NOT_IN_USE: found= table->query_id == 0; break;
+ case TMP_TABLE_ANY: found= true; break;
+ }
+ }
+ if (table && unlikely(table->m_needs_reopen))
+ {
+ share->all_tmp_tables.remove(table);
+ free_temporary_table(table);
+ it.rewind();
+ continue;
+ }
+ result= table;
+ break;
+ }
+ }
+
+ if (locked)
+ {
+ DBUG_ASSERT(m_tmp_tables_locked);
+ unlock_temporary_tables();
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+
+/**
+ Open a table from the specified TABLE_SHARE with the given alias.
+
+ @param share [IN] Table share
+ @param alias [IN] Table alias
+ @param open_in_engine [IN] Whether open table in SE
+
+ @return Success A pointer to table object
+ Failure NULL
+*/
+TABLE *THD::open_temporary_table(TMP_TABLE_SHARE *share,
+ const char *alias,
+ bool open_in_engine)
+{
+ DBUG_ENTER("THD::open_temporary_table");
+
+ TABLE *table;
+
+ if (!(table= (TABLE *) my_malloc(sizeof(TABLE), MYF(MY_WME))))
+ {
+ DBUG_RETURN(NULL); /* Out of memory */
+ }
+
+ if (open_table_from_share(this, share, alias,
+ open_in_engine ? (uint)HA_OPEN_KEYFILE : 0,
+ EXTRA_RECORD, ha_open_options, table,
+ open_in_engine ? false : true))
+ {
+ my_free(table);
+ DBUG_RETURN(NULL);
+ }
+
+ table->reginfo.lock_type= TL_WRITE; /* Simulate locked */
+ table->grant.privilege= TMP_TABLE_ACLS;
+ share->tmp_table= (table->file->has_transactions() ?
+ TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE);
+
+ table->pos_in_table_list= 0;
+ table->query_id= query_id;
+
+ /* Add table to the head of table list. */
+ share->all_tmp_tables.push_front(table);
+
+ /* Increment Slave_open_temp_table_definitions status variable count. */
+ if (rgi_slave)
+ {
+ thread_safe_increment32(&slave_open_temp_tables);
+ }
+
+ DBUG_PRINT("tmptable", ("Opened table: '%s'.'%s'%p", table->s->db.str,
+ table->s->table_name.str, table));
+ DBUG_RETURN(table);
+}
+
+
+/**
+ Find a reusable table in the open table list using the specified TABLE_LIST.
+
+ @param tl [IN] Table list
+ @param out_table [OUT] Pointer to the requested TABLE object
+
+ @return Success false
+ Failure true
+*/
+bool THD::find_and_use_tmp_table(const TABLE_LIST *tl, TABLE **out_table)
+{
+ DBUG_ENTER("THD::find_and_use_tmp_table");
+
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ bool result;
+
+ key_length= create_tmp_table_def_key(key, tl->get_db_name(),
+ tl->get_table_name());
+ result= use_temporary_table(find_temporary_table(key, key_length,
+ TMP_TABLE_NOT_IN_USE),
+ out_table);
+ DBUG_RETURN(result);
+}
+
+/**
+ Mark table as in-use.
+
+ @param table [IN] Table to be marked in-use
+ @param out_table [OUT] Pointer to the specified table
+
+ @return false Success
+ true Error
+*/
+bool THD::use_temporary_table(TABLE *table, TABLE **out_table)
+{
+ DBUG_ENTER("THD::use_temporary_table");
+
+ *out_table= table;
+
+ /* The following can happen if find_temporary_table() returns NULL */
+ if (!table)
+ DBUG_RETURN(false);
+
+ /*
+ Temporary tables are not safe for parallel replication. They were
+ designed to be visible to one thread only, so have no table locking.
+ Thus there is no protection against two conflicting transactions
+ committing in parallel and things like that.
+
+ So for now, anything that uses temporary tables will be serialised
+ with anything before it, when using parallel replication.
+
+ TODO: We might be able to introduce a reference count or something
+ on temp tables, and have slave worker threads wait for it to reach
+ zero before being allowed to use the temp table. Might not be worth
+ it though, as statement-based replication using temporary tables is
+ in any case rather fragile.
+ */
+ if (rgi_slave &&
+ rgi_slave->is_parallel_exec &&
+ wait_for_prior_commit())
+ DBUG_RETURN(true);
+
+ /*
+ We need to set the THD as it may be different in case of
+ parallel replication
+ */
+ table->in_use= this;
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Close a temporary table.
+
+ @param table [IN] Table handle
+
+ @return void
+*/
+void THD::close_temporary_table(TABLE *table)
+{
+ DBUG_ENTER("THD::close_temporary_table");
+
+ DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'%p alias: '%s'",
+ table->s->db.str, table->s->table_name.str,
+ table, table->alias.c_ptr()));
+
+ closefrm(table);
+ my_free(table);
+
+ if (rgi_slave)
+ {
+ /* Natural invariant of temporary_tables */
+ DBUG_ASSERT(slave_open_temp_tables || !temporary_tables);
+ /* Decrement Slave_open_temp_table_definitions status variable count. */
+ thread_safe_decrement32(&slave_open_temp_tables);
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Write query log events with "DROP TEMPORARY TABLES .." for each pseudo
+ thread to the binary log.
+
+ @return false Success
+ true Error
+*/
+bool THD::log_events_and_free_tmp_shares()
+{
+ DBUG_ENTER("THD::log_events_and_free_tmp_shares");
+
+ DBUG_ASSERT(!rgi_slave);
+
+ TMP_TABLE_SHARE *share;
+ TMP_TABLE_SHARE *sorted;
+ TMP_TABLE_SHARE *prev_sorted;
+ // Assume thd->variables.option_bits has OPTION_QUOTE_SHOW_CREATE.
+ bool was_quote_show= true;
+ bool error= false;
+ bool found_user_tables= false;
+ // Better add "IF EXISTS" in case a RESET MASTER has been done.
+ const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
+ char buf[FN_REFLEN];
+
+ String s_query(buf, sizeof(buf), system_charset_info);
+ s_query.copy(stub, sizeof(stub) - 1, system_charset_info);
+
+ /*
+ Insertion sort of temporary tables by pseudo_thread_id to build ordered
+ list of sublists of equal pseudo_thread_id.
+ */
+ All_tmp_tables_list::Iterator it_sorted(*temporary_tables);
+ All_tmp_tables_list::Iterator it_unsorted(*temporary_tables);
+ uint sorted_count= 0;
+ while((share= it_unsorted++))
+ {
+ if (IS_USER_TABLE(share))
+ {
+ prev_sorted= NULL;
+
+ if (!found_user_tables) found_user_tables= true;
+
+ for (uint i= 0; i < sorted_count; i ++)
+ {
+ sorted= it_sorted ++;
+
+ if (!IS_USER_TABLE(sorted) ||
+ (tmpkeyval(sorted) > tmpkeyval(share)))
+ {
+ /*
+ Insert this share before the current element in
+ the sorted part of the list.
+ */
+ temporary_tables->remove(share);
+
+ if (prev_sorted)
+ {
+ temporary_tables->insert_after(prev_sorted, share);
+ }
+ else
+ {
+ temporary_tables->push_front(share);
+ }
+ break;
+ }
+ prev_sorted= sorted;
+ }
+ it_sorted.rewind();
+ }
+ sorted_count ++;
+ }
+
+ /*
+ We always quote db & table names.
+ */
+ if (found_user_tables &&
+ !(was_quote_show= MY_TEST(variables.option_bits &
+ OPTION_QUOTE_SHOW_CREATE)))
+ {
+ variables.option_bits |= OPTION_QUOTE_SHOW_CREATE;
+ }
+
+ /*
+ Scan sorted temporary tables to generate sequence of DROP.
+ */
+ share= temporary_tables->pop_front();
+ while (share)
+ {
+ if (IS_USER_TABLE(share))
+ {
+ bool save_thread_specific_used= thread_specific_used;
+ my_thread_id save_pseudo_thread_id= variables.pseudo_thread_id;
+ char db_buf[FN_REFLEN];
+ String db(db_buf, sizeof(db_buf), system_charset_info);
+
+ /*
+ Set pseudo_thread_id to be that of the processed table.
+ */
+ variables.pseudo_thread_id= tmpkeyval(share);
+
+ db.copy(share->db.str, share->db.length, system_charset_info);
+ /*
+ Reset s_query() if changed by previous loop.
+ */
+ s_query.length(sizeof(stub) - 1);
+
+ /*
+ Loop forward through all tables that belong to a common database
+ within the sublist of common pseudo_thread_id to create single
+ DROP query.
+ */
+ for (;
+ share && IS_USER_TABLE(share) &&
+ tmpkeyval(share) == variables.pseudo_thread_id &&
+ share->db.length == db.length() &&
+ memcmp(share->db.str, db.ptr(), db.length()) == 0;
+ /* Get the next TABLE_SHARE in the list. */
+ share= temporary_tables->pop_front())
+ {
+ /*
+ We are going to add ` around the table names and possible more
+ due to special characters.
+ */
+ append_identifier(this, &s_query, share->table_name.str,
+ share->table_name.length);
+ s_query.append(',');
+ rm_temporary_table(share->db_type(), share->path.str);
+ free_table_share(share);
+ my_free(share);
+ }
+
+ clear_error();
+ CHARSET_INFO *cs_save= variables.character_set_client;
+ variables.character_set_client= system_charset_info;
+ thread_specific_used= true;
+
+ Query_log_event qinfo(this, s_query.ptr(),
+ s_query.length() - 1 /* to remove trailing ',' */,
+ false, true, false, 0);
+ qinfo.db= db.ptr();
+ qinfo.db_len= db.length();
+ variables.character_set_client= cs_save;
+
+ get_stmt_da()->set_overwrite_status(true);
+ transaction.stmt.mark_dropped_temp_table();
+ if ((error= (mysql_bin_log.write(&qinfo) || error)))
+ {
+ /*
+ If we're here following THD::cleanup, thence the connection
+ has been closed already. So lets print a message to the
+ error log instead of pushing yet another error into the
+ stmt_da.
+
+ Also, we keep the error flag so that we propagate the error
+ up in the stack. This way, if we're the SQL thread we notice
+ that THD::close_tables failed. (Actually, the SQL
+ thread only calls THD::close_tables while applying
+ old Start_log_event_v3 events.)
+ */
+ sql_print_error("Failed to write the DROP statement for "
+ "temporary tables to binary log");
+ }
+
+ get_stmt_da()->set_overwrite_status(false);
+ variables.pseudo_thread_id= save_pseudo_thread_id;
+ thread_specific_used= save_thread_specific_used;
+ }
+ else
+ {
+ free_tmp_table_share(share, true);
+ /* Get the next TABLE_SHARE in the list. */
+ share= temporary_tables->pop_front();
+ }
+ }
+
+ if (!was_quote_show)
+ {
+ /*
+ Restore option.
+ */
+ variables.option_bits&= ~OPTION_QUOTE_SHOW_CREATE;
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+/**
+ Delete the files and free the specified table share.
+
+ @param share [IN] TABLE_SHARE to free
+ @param delete_table [IN] Whether to delete the table files?
+
+ @return void
+*/
+void THD::free_tmp_table_share(TMP_TABLE_SHARE *share, bool delete_table)
+{
+ DBUG_ENTER("THD::free_tmp_table_share");
+
+ if (delete_table)
+ {
+ rm_temporary_table(share->db_type(), share->path.str);
+ }
+ free_table_share(share);
+ my_free(share);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Free the specified table object.
+
+ @param table [IN] Table object to free.
+
+ @return void
+*/
+void THD::free_temporary_table(TABLE *table)
+{
+ DBUG_ENTER("THD::free_temporary_table");
+
+ /*
+ If LOCK TABLES list is not empty and contains this table, unlock the table
+ and remove the table from this list.
+ */
+ mysql_lock_remove(this, lock, table);
+
+ close_temporary_table(table);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ On replication slave, acquire the Relay_log_info's data_lock and use slave
+ temporary tables.
+
+ @return true Lock acquired
+ false Lock wasn't acquired
+*/
+bool THD::lock_temporary_tables()
+{
+ DBUG_ENTER("THD::lock_temporary_tables");
+
+ /* Do not proceed if a lock has already been taken. */
+ if (m_tmp_tables_locked)
+ {
+ DBUG_RETURN(false);
+ }
+
+ if (rgi_slave)
+ {
+ mysql_mutex_lock(&rgi_slave->rli->data_lock);
+ temporary_tables= rgi_slave->rli->save_temporary_tables;
+ m_tmp_tables_locked= true;
+ }
+
+ DBUG_RETURN(m_tmp_tables_locked);
+}
+
+
+/**
+ On replication slave, release the Relay_log_info::data_lock previously
+ acquired to use slave temporary tables.
+
+ @return void
+*/
+void THD::unlock_temporary_tables()
+{
+ DBUG_ENTER("THD::unlock_temporary_tables");
+
+ if (!m_tmp_tables_locked)
+ {
+ DBUG_VOID_RETURN;
+ }
+
+ if (rgi_slave)
+ {
+ rgi_slave->rli->save_temporary_tables= temporary_tables;
+ temporary_tables= NULL; /* Safety */
+ mysql_mutex_unlock(&rgi_slave->rli->data_lock);
+ m_tmp_tables_locked= false;
+ }
+
+ DBUG_VOID_RETURN;
+}
+
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index b82d29e51f4..cbed769a424 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -66,56 +66,7 @@ void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc,
}
-#ifndef MYSQL_CLIENT
-void *sql_alloc(size_t Size)
-{
- MEM_ROOT *root= *my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC);
- return alloc_root(root,Size);
-}
-#endif
-
-
-void *sql_calloc(size_t size)
-{
- void *ptr;
- if ((ptr=sql_alloc(size)))
- bzero(ptr,size);
- return ptr;
-}
-
-
-char *sql_strdup(const char *str)
-{
- size_t len= strlen(str)+1;
- char *pos;
- if ((pos= (char*) sql_alloc(len)))
- memcpy(pos,str,len);
- return pos;
-}
-
-
-char *sql_strmake(const char *str, size_t len)
-{
- char *pos;
- if ((pos= (char*) sql_alloc(len+1)))
- {
- memcpy(pos,str,len);
- pos[len]=0;
- }
- return pos;
-}
-
-
-void* sql_memdup(const void *ptr, size_t len)
-{
- void *pos;
- if ((pos= sql_alloc(len)))
- memcpy(pos,ptr,len);
- return pos;
-}
-
-
-char *sql_strmake_with_convert(const char *str, size_t arg_length,
+char *sql_strmake_with_convert(THD *thd, const char *str, size_t arg_length,
CHARSET_INFO *from_cs,
size_t max_res_length,
CHARSET_INFO *to_cs, size_t *result_length)
@@ -125,7 +76,7 @@ char *sql_strmake_with_convert(const char *str, size_t arg_length,
max_res_length--; // Reserve place for end null
set_if_smaller(new_length, max_res_length);
- if (!(pos= (char*) sql_alloc(new_length+1)))
+ if (!(pos= (char*) thd->alloc(new_length + 1)))
return pos; // Error
if ((from_cs == &my_charset_bin) || (to_cs == &my_charset_bin))
diff --git a/sql/thr_malloc.h b/sql/thr_malloc.h
index 0b17c5cdaf1..fc23c87c9fd 100644
--- a/sql/thr_malloc.h
+++ b/sql/thr_malloc.h
@@ -22,12 +22,7 @@ typedef struct st_mem_root MEM_ROOT;
void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size,
myf my_flags);
-void *sql_alloc(size_t);
-void *sql_calloc(size_t);
-char *sql_strdup(const char *str);
-char *sql_strmake(const char *str, size_t len);
-void *sql_memdup(const void * ptr, size_t size);
-char *sql_strmake_with_convert(const char *str, size_t arg_length,
+char *sql_strmake_with_convert(THD *thd, const char *str, size_t arg_length,
CHARSET_INFO *from_cs,
size_t max_res_length,
CHARSET_INFO *to_cs, size_t *result_length);
diff --git a/sql/threadpool.h b/sql/threadpool.h
index 719a3878ebd..ba17dc042c2 100644
--- a/sql/threadpool.h
+++ b/sql/threadpool.h
@@ -23,29 +23,19 @@ extern uint threadpool_max_size;
extern uint threadpool_stall_limit; /* time interval in 10 ms units for stall checks*/
extern uint threadpool_max_threads; /* Maximum threads in pool */
extern uint threadpool_oversubscribe; /* Maximum active threads in group */
+extern uint threadpool_prio_kickup_timer; /* Time before low prio item gets prio boost */
+#ifdef _WIN32
+extern uint threadpool_mode; /* Thread pool implementation , windows or generic */
+#define TP_MODE_WINDOWS 0
+#define TP_MODE_GENERIC 1
+#endif
+struct TP_connection;
+extern void tp_callback(TP_connection *c);
+extern void tp_timeout_handler(TP_connection *c);
-/* Common thread pool routines, suitable for different implementations */
-extern void threadpool_cleanup_connection(THD *thd);
-extern void threadpool_remove_connection(THD *thd);
-extern int threadpool_process_request(THD *thd);
-extern int threadpool_add_connection(THD *thd);
-/*
- Functions used by scheduler.
- OS-specific implementations are in
- threadpool_unix.cc or threadpool_win.cc
-*/
-extern bool tp_init();
-extern void tp_add_connection(THD*);
-extern void tp_wait_begin(THD *, int);
-extern void tp_wait_end(THD*);
-extern void tp_post_kill_notification(THD *thd);
-extern void tp_end(void);
-
-/* Used in SHOW for threadpool_idle_thread_count */
-extern int tp_get_idle_thread_count();
/*
Threadpool statistics
@@ -64,9 +54,103 @@ extern void tp_set_min_threads(uint val);
extern void tp_set_max_threads(uint val);
extern void tp_set_threadpool_size(uint val);
extern void tp_set_threadpool_stall_limit(uint val);
+extern int tp_get_idle_thread_count();
+extern int tp_get_thread_count();
/* Activate threadpool scheduler */
extern void tp_scheduler(void);
extern int show_threadpool_idle_threads(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope);
+
+enum TP_PRIORITY {
+ TP_PRIORITY_HIGH,
+ TP_PRIORITY_LOW,
+ TP_PRIORITY_AUTO
+};
+
+
+enum TP_STATE
+{
+ TP_STATE_IDLE,
+ TP_STATE_RUNNING,
+};
+
+/*
+ Connection structure, encapsulates THD + structures for asynchronous
+ IO and pool.
+
+ Platform specific parts are specified in subclasses called connection_t,
+ inside threadpool_win.cc and threadpool_unix.cc
+*/
+
+struct TP_connection
+{
+ THD* thd;
+ CONNECT* connect;
+ TP_STATE state;
+ TP_PRIORITY priority;
+ TP_connection(CONNECT *c) :
+ thd(0),
+ connect(c),
+ state(TP_STATE_IDLE),
+ priority(TP_PRIORITY_HIGH)
+ {}
+
+ virtual ~TP_connection()
+ {};
+
+ /* Initialize io structures windows threadpool, epoll etc */
+ virtual int init() = 0;
+
+ virtual void set_io_timeout(int sec) = 0;
+
+ /* Read for the next client command (async) with specified timeout */
+ virtual int start_io() = 0;
+
+ virtual void wait_begin(int type)= 0;
+ virtual void wait_end() = 0;
+
+};
+
+
+struct TP_pool
+{
+ virtual ~TP_pool(){};
+ virtual int init()= 0;
+ virtual TP_connection *new_connection(CONNECT *)= 0;
+ virtual void add(TP_connection *c)= 0;
+ virtual int set_max_threads(uint){ return 0; }
+ virtual int set_min_threads(uint){ return 0; }
+ virtual int set_pool_size(uint){ return 0; }
+ virtual int set_idle_timeout(uint){ return 0; }
+ virtual int set_oversubscribe(uint){ return 0; }
+ virtual int set_stall_limit(uint){ return 0; }
+ virtual int get_thread_count() { return tp_stats.num_worker_threads; }
+ virtual int get_idle_thread_count(){ return 0; }
+};
+
+#ifdef _WIN32
+struct TP_pool_win:TP_pool
+{
+ TP_pool_win();
+ virtual int init();
+ virtual ~TP_pool_win();
+ virtual TP_connection *new_connection(CONNECT *c);
+ virtual void add(TP_connection *);
+ virtual int set_max_threads(uint);
+ virtual int set_min_threads(uint);
+};
+#endif
+
+struct TP_pool_generic :TP_pool
+{
+ TP_pool_generic();
+ ~TP_pool_generic();
+ virtual int init();
+ virtual TP_connection *new_connection(CONNECT *c);
+ virtual void add(TP_connection *);
+ virtual int set_pool_size(uint);
+ virtual int set_stall_limit(uint);
+ virtual int get_idle_thread_count();
+};
diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc
index b99346ee106..598951da406 100644
--- a/sql/threadpool_common.cc
+++ b/sql/threadpool_common.cc
@@ -34,14 +34,25 @@ uint threadpool_max_size;
uint threadpool_stall_limit;
uint threadpool_max_threads;
uint threadpool_oversubscribe;
+uint threadpool_mode;
+uint threadpool_prio_kickup_timer;
/* Stats */
TP_STATISTICS tp_stats;
+static void threadpool_remove_connection(THD *thd);
+static int threadpool_process_request(THD *thd);
+static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data);
+
extern "C" pthread_key(struct st_my_thread_var*, THR_KEY_mysys);
extern bool do_command(THD*);
+static inline TP_connection *get_TP_connection(THD *thd)
+{
+ return (TP_connection *)thd->event_scheduler.data;
+}
+
/*
Worker threads contexts, and THD contexts.
=========================================
@@ -86,7 +97,6 @@ struct Worker_thread_context
#endif
pthread_setspecific(THR_KEY_mysys,mysys_var);
pthread_setspecific(THR_THD, 0);
- pthread_setspecific(THR_MALLOC, 0);
}
};
@@ -129,7 +139,7 @@ static inline void set_thd_idle(THD *thd)
/*
Attach/associate the connection with the OS thread,
*/
-static bool thread_attach(THD* thd)
+static void thread_attach(THD* thd)
{
pthread_setspecific(THR_KEY_mysys,thd->mysys_var);
thd->thread_stack=(char*)&thd;
@@ -138,16 +148,81 @@ static bool thread_attach(THD* thd)
PSI_THREAD_CALL(set_thread)(thd->event_scheduler.m_psi);
#endif
mysql_socket_set_thread_owner(thd->net.vio->mysql_socket);
- return 0;
+}
+
+/*
+ Determine connection priority , using current
+ transaction state and 'threadpool_priority' variable value.
+*/
+static TP_PRIORITY get_priority(TP_connection *c)
+{
+ DBUG_ASSERT(c->thd == current_thd);
+ TP_PRIORITY prio= (TP_PRIORITY)c->thd->variables.threadpool_priority;
+ if (prio == TP_PRIORITY_AUTO)
+ {
+ return c->thd->transaction.is_active() ? TP_PRIORITY_HIGH : TP_PRIORITY_LOW;
+ }
+ return prio;
}
-int threadpool_add_connection(THD *thd)
+void tp_callback(TP_connection *c)
{
- int retval=1;
+ DBUG_ASSERT(c);
+
Worker_thread_context worker_context;
worker_context.save();
+ THD *thd= c->thd;
+
+ c->state = TP_STATE_RUNNING;
+
+ if (!thd)
+ {
+ /* No THD, need to login first. */
+ DBUG_ASSERT(c->connect);
+ thd= c->thd= threadpool_add_connection(c->connect, c);
+ if (!thd)
+ {
+ /* Bail out on connect error.*/
+ goto error;
+ }
+ c->connect= 0;
+ }
+ else if (threadpool_process_request(thd))
+ {
+ /* QUIT or an error occured. */
+ goto error;
+ }
+
+ /* Set priority */
+ c->priority= get_priority(c);
+
+ /* Read next command from client. */
+ c->set_io_timeout(thd->variables.net_wait_timeout);
+ c->state= TP_STATE_IDLE;
+ if (c->start_io())
+ goto error;
+
+ worker_context.restore();
+ return;
+
+error:
+ c->thd= 0;
+ delete c;
+
+ if (thd)
+ {
+ threadpool_remove_connection(thd);
+ }
+ worker_context.restore();
+}
+
+
+static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data)
+{
+ THD *thd= NULL;
+
/*
Create a new connection context: mysys_thread_var and PSI thread
Store them in THD.
@@ -155,13 +230,28 @@ int threadpool_add_connection(THD *thd)
pthread_setspecific(THR_KEY_mysys, 0);
my_thread_init();
- thd->mysys_var= (st_my_thread_var *)pthread_getspecific(THR_KEY_mysys);
- if (!thd->mysys_var)
+ st_my_thread_var* mysys_var= (st_my_thread_var *)pthread_getspecific(THR_KEY_mysys);
+ if (!mysys_var ||!(thd= connect->create_thd(NULL)))
{
/* Out of memory? */
- worker_context.restore();
- return 1;
+ connect->close_and_delete();
+ if (mysys_var)
+ {
+#ifdef HAVE_PSI_INTERFACE
+ /*
+ current PSI is still from worker thread.
+ Set to 0, to avoid premature cleanup by my_thread_end
+ */
+ if (PSI_server) PSI_server->set_thread(0);
+#endif
+ my_thread_end();
+ }
+ return NULL;
}
+ delete connect;
+ add_to_active_threads(thd);
+ thd->mysys_var= mysys_var;
+ thd->event_scheduler.data= scheduler_data;
/* Create new PSI thread for use with the THD. */
#ifdef HAVE_PSI_THREAD_INTERFACE
@@ -191,55 +281,57 @@ int threadpool_add_connection(THD *thd)
if (!thd_is_connection_alive(thd))
goto end;
- retval= 0;
thd->skip_wait_timeout= true;
set_thd_idle(thd);
+ return thd;
end:
- worker_context.restore();
- return retval;
+ threadpool_remove_connection(thd);
+ return NULL;
}
-/*
- threadpool_cleanup_connection() does the bulk of connection shutdown work.
- Usually called from threadpool_remove_connection(), but rarely it might
- be called also in the main polling thread if connection initialization fails.
-*/
-void threadpool_cleanup_connection(THD *thd)
+
+static void threadpool_remove_connection(THD *thd)
{
+ thread_attach(thd);
+ thd->event_scheduler.data= 0;
thd->net.reading_or_writing = 0;
end_connection(thd);
close_connection(thd, 0);
unlink_thd(thd);
- mysql_cond_broadcast(&COND_thread_count);
-}
-
+ delete thd;
-void threadpool_remove_connection(THD *thd)
-{
- Worker_thread_context worker_context;
- worker_context.save();
- thread_attach(thd);
-
- threadpool_cleanup_connection(thd);
/*
Free resources associated with this connection:
mysys thread_var and PSI thread.
*/
my_thread_end();
+}
- worker_context.restore();
+
+/*
+ Ensure that proper error message is sent to client,
+ and "aborted" message appears in the log in case of
+ wait timeout.
+
+ See also timeout handling in net_serv.cc
+*/
+static void handle_wait_timeout(THD *thd)
+{
+ thd->get_stmt_da()->reset_diagnostics_area();
+ thd->reset_killed();
+ my_error(ER_NET_READ_INTERRUPTED, MYF(0));
+ thd->net.last_errno= ER_NET_READ_INTERRUPTED;
+ thd->net.error= 2;
}
+
/**
Process a single client request or a single batch.
*/
-int threadpool_process_request(THD *thd)
+static int threadpool_process_request(THD *thd)
{
int retval= 0;
- Worker_thread_context worker_context;
- worker_context.save();
-
thread_attach(thd);
if (thd->killed >= KILL_CONNECTION)
@@ -249,12 +341,15 @@ int threadpool_process_request(THD *thd)
or KILL command. Return error.
*/
retval= 1;
+ if(thd->killed == KILL_WAIT_TIMEOUT)
+ handle_wait_timeout(thd);
goto end;
}
/*
- In the loop below, the flow is essentially the copy of thead-per-connections
+ In the loop below, the flow is essentially the copy of
+ thead-per-connections
logic, see do_handle_one_connection() in sql_connect.c
The goal is to execute a single query, thus the loop is normally executed
@@ -289,23 +384,148 @@ int threadpool_process_request(THD *thd)
}
end:
- worker_context.restore();
return retval;
}
+
+/* Dummy functions, do nothing */
+
+static bool tp_init_new_connection_thread()
+{
+ return 0;
+}
+
+static bool tp_end_thread(THD *, bool)
+{
+ return 0;
+}
+
+static TP_pool *pool;
+
+static bool tp_init()
+{
+
+#ifdef _WIN32
+ if (threadpool_mode == TP_MODE_WINDOWS)
+ pool= new (std::nothrow) TP_pool_win;
+ else
+ pool= new (std::nothrow) TP_pool_generic;
+#else
+ pool= new (std::nothrow) TP_pool_generic;
+#endif
+ if (!pool)
+ return true;
+ if (pool->init())
+ {
+ delete pool;
+ pool= 0;
+ return true;
+ }
+ return false;
+}
+
+static void tp_add_connection(CONNECT *connect)
+{
+ TP_connection *c= pool->new_connection(connect);
+ DBUG_EXECUTE_IF("simulate_failed_connection_1", delete c ; c= 0;);
+ if (c)
+ pool->add(c);
+ else
+ connect->close_and_delete();
+}
+
+int tp_get_idle_thread_count()
+{
+ return pool? pool->get_idle_thread_count(): 0;
+}
+
+int tp_get_thread_count()
+{
+ return pool ? pool->get_thread_count() : 0;
+}
+
+void tp_set_min_threads(uint val)
+{
+ if (pool)
+ pool->set_min_threads(val);
+}
+
+
+void tp_set_max_threads(uint val)
+{
+ if (pool)
+ pool->set_max_threads(val);
+}
+
+void tp_set_threadpool_size(uint val)
+{
+ if (pool)
+ pool->set_pool_size(val);
+}
+
+
+void tp_set_threadpool_stall_limit(uint val)
+{
+ if (pool)
+ pool->set_stall_limit(val);
+}
+
+
+void tp_timeout_handler(TP_connection *c)
+{
+ if (c->state != TP_STATE_IDLE)
+ return;
+ THD *thd=c->thd;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ thd->set_killed(KILL_WAIT_TIMEOUT);
+ c->priority= TP_PRIORITY_HIGH;
+ post_kill_notification(thd);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+}
+
+
+static void tp_wait_begin(THD *thd, int type)
+{
+ TP_connection *c = get_TP_connection(thd);
+ if (c)
+ c->wait_begin(type);
+}
+
+
+static void tp_wait_end(THD *thd)
+{
+ TP_connection *c = get_TP_connection(thd);
+ if (c)
+ c->wait_end();
+}
+
+
+static void tp_end()
+{
+ delete pool;
+}
+
+static void tp_post_kill_notification(THD *thd)
+{
+ TP_connection *c= get_TP_connection(thd);
+ if (c)
+ c->priority= TP_PRIORITY_HIGH;
+ post_kill_notification(thd);
+}
+
static scheduler_functions tp_scheduler_functions=
{
0, // max_threads
NULL,
NULL,
tp_init, // init
- NULL, // init_new_connection_thread
+ tp_init_new_connection_thread, // init_new_connection_thread
tp_add_connection, // add_connection
tp_wait_begin, // thd_wait_begin
tp_wait_end, // thd_wait_end
- post_kill_notification, // post_kill_notification
- NULL, // end_thread
+ tp_post_kill_notification, // post kill notification
+ tp_end_thread, // Dummy function
tp_end // end
};
diff --git a/sql/threadpool_unix.cc b/sql/threadpool_generic.cc
index f1133b22cf5..400d072df3c 100644
--- a/sql/threadpool_unix.cc
+++ b/sql/threadpool_generic.cc
@@ -22,6 +22,25 @@
#ifdef HAVE_POOL_OF_THREADS
+#ifdef _WIN32
+/* AIX may define this, too ?*/
+#define HAVE_IOCP
+#endif
+
+#ifdef HAVE_IOCP
+#define OPTIONAL_IO_POLL_READ_PARAM this
+#else
+#define OPTIONAL_IO_POLL_READ_PARAM 0
+#endif
+
+#ifdef _WIN32
+typedef HANDLE TP_file_handle;
+#else
+typedef int TP_file_handle;
+#define INVALID_HANDLE_VALUE -1
+#endif
+
+
#include <sql_connect.h>
#include <mysqld.h>
#include <debug_sync.h>
@@ -38,10 +57,26 @@ typedef struct kevent native_event;
#elif defined (__sun)
#include <port.h>
typedef port_event_t native_event;
+#elif defined (HAVE_IOCP)
+typedef OVERLAPPED_ENTRY native_event;
#else
#error threadpool is not available on this platform
#endif
+#ifdef _MSC_VER
+#pragma warning (disable : 4312)
+#endif
+
+static void io_poll_close(TP_file_handle fd)
+{
+#ifdef _WIN32
+ CloseHandle(fd);
+#else
+ close(fd);
+#endif
+}
+
+
/** Maximum number of native events a listener can read in one go */
#define MAX_EVENTS 1024
@@ -108,35 +143,52 @@ typedef I_P_List<worker_thread_t, I_P_List_adapter<worker_thread_t,
>
worker_list_t;
-struct connection_t
+struct TP_connection_generic:public TP_connection
{
+ TP_connection_generic(CONNECT *c);
+ ~TP_connection_generic();
+
+ virtual int init(){ return 0; };
+ virtual void set_io_timeout(int sec);
+ virtual int start_io();
+ virtual void wait_begin(int type);
+ virtual void wait_end();
- THD *thd;
thread_group_t *thread_group;
- connection_t *next_in_queue;
- connection_t **prev_in_queue;
+ TP_connection_generic *next_in_queue;
+ TP_connection_generic **prev_in_queue;
ulonglong abs_wait_timeout;
- bool logged_in;
+ ulonglong dequeue_time;
+ TP_file_handle fd;
bool bound_to_poll_descriptor;
- bool waiting;
+ int waiting;
+#ifdef HAVE_IOCP
+ OVERLAPPED overlapped;
+#endif
+#ifdef _WIN32
+ enum_vio_type vio_type;
+#endif
};
-typedef I_P_List<connection_t,
- I_P_List_adapter<connection_t,
- &connection_t::next_in_queue,
- &connection_t::prev_in_queue>,
+
+typedef I_P_List<TP_connection_generic,
+ I_P_List_adapter<TP_connection_generic,
+ &TP_connection_generic::next_in_queue,
+ &TP_connection_generic::prev_in_queue>,
I_P_List_null_counter,
- I_P_List_fast_push_back<connection_t> >
+ I_P_List_fast_push_back<TP_connection_generic> >
connection_queue_t;
+const int NQUEUES=2; /* We have high and low priority queues*/
+
struct thread_group_t
{
mysql_mutex_t mutex;
- connection_queue_t queue;
+ connection_queue_t queues[NQUEUES];
worker_list_t waiting_threads;
worker_thread_t *listener;
pthread_attr_t *pthread_attr;
- int pollfd;
+ TP_file_handle pollfd;
int thread_count;
int active_thread_count;
int connection_count;
@@ -146,9 +198,8 @@ struct thread_group_t
ulonglong last_thread_creation_time;
int shutdown_pipe[2];
bool shutdown;
- bool stalled;
-
-} MY_ALIGNED(512);
+ bool stalled;
+} MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE);
static thread_group_t *all_groups;
static uint group_count;
@@ -174,15 +225,13 @@ struct pool_timer_t
static pool_timer_t pool_timer;
-static void queue_put(thread_group_t *thread_group, connection_t *connection);
+static void queue_put(thread_group_t *thread_group, TP_connection_generic *connection);
+static void queue_put(thread_group_t *thread_group, native_event *ev, int cnt);
static int wake_thread(thread_group_t *thread_group);
-static void handle_event(connection_t *connection);
static int wake_or_create_thread(thread_group_t *thread_group);
static int create_worker(thread_group_t *thread_group);
static void *worker_main(void *param);
static void check_stall(thread_group_t *thread_group);
-static void connection_abort(connection_t *connection);
-static void set_wait_timeout(connection_t *connection);
static void set_next_timeout_check(ulonglong abstime);
static void print_pool_blocked_message(bool);
@@ -193,12 +242,12 @@ static void print_pool_blocked_message(bool);
This maps to different APIs on different Unixes.
Supported are currently Linux with epoll, Solaris with event ports,
- OSX and BSD with kevent. All those API's are used with one-shot flags
+ OSX and BSD with kevent, Windows with IOCP. All those API's are used with one-shot flags
(the event is signalled once client has written something into the socket,
then socket is removed from the "poll-set" until the command is finished,
and we need to re-arm/re-register socket)
- No implementation for poll/select/AIO is currently provided.
+ No implementation for poll/select is currently provided.
The API closely resembles all of the above mentioned platform APIs
and consists of following functions.
@@ -207,21 +256,21 @@ static void print_pool_blocked_message(bool);
Creates an io_poll descriptor
On Linux: epoll_create()
- - io_poll_associate_fd(int poll_fd, int fd, void *data)
+ - io_poll_associate_fd(int poll_fd, TP_file_handle fd, void *data, void *opt)
Associate file descriptor with io poll descriptor
On Linux : epoll_ctl(..EPOLL_CTL_ADD))
- - io_poll_disassociate_fd(int pollfd, int fd)
+ - io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
Associate file descriptor with io poll descriptor
On Linux: epoll_ctl(..EPOLL_CTL_DEL)
- - io_poll_start_read(int poll_fd,int fd, void *data)
+ - io_poll_start_read(int poll_fd,int fd, void *data, void *opt)
The same as io_poll_associate_fd(), but cannot be used before
io_poll_associate_fd() was called.
On Linux : epoll_ctl(..EPOLL_CTL_MOD)
- - io_poll_wait (int pollfd, native_event *native_events, int maxevents,
+ - io_poll_wait (TP_file_handle pollfd, native_event *native_events, int maxevents,
int timeout_ms)
wait until one or more descriptors added with io_poll_associate_fd()
@@ -238,13 +287,13 @@ static void print_pool_blocked_message(bool);
/* Early 2.6 kernel did not have EPOLLRDHUP */
#define EPOLLRDHUP 0
#endif
-static int io_poll_create()
+static TP_file_handle io_poll_create()
{
return epoll_create(1);
}
-int io_poll_associate_fd(int pollfd, int fd, void *data)
+int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void*)
{
struct epoll_event ev;
ev.data.u64= 0; /* Keep valgrind happy */
@@ -255,7 +304,7 @@ int io_poll_associate_fd(int pollfd, int fd, void *data)
-int io_poll_start_read(int pollfd, int fd, void *data)
+int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
{
struct epoll_event ev;
ev.data.u64= 0; /* Keep valgrind happy */
@@ -264,7 +313,7 @@ int io_poll_start_read(int pollfd, int fd, void *data)
return epoll_ctl(pollfd, EPOLL_CTL_MOD, fd, &ev);
}
-int io_poll_disassociate_fd(int pollfd, int fd)
+int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
{
struct epoll_event ev;
return epoll_ctl(pollfd, EPOLL_CTL_DEL, fd, &ev);
@@ -276,7 +325,7 @@ int io_poll_disassociate_fd(int pollfd, int fd)
NOTE - in case of EINTR, it restarts with original timeout. Since we use
either infinite or 0 timeouts, this is not critical
*/
-int io_poll_wait(int pollfd, native_event *native_events, int maxevents,
+int io_poll_wait(TP_file_handle pollfd, native_event *native_events, int maxevents,
int timeout_ms)
{
int ret;
@@ -309,12 +358,12 @@ static void *native_event_get_userdata(native_event *event)
#endif
-int io_poll_create()
+TP_file_handle io_poll_create()
{
return kqueue();
}
-int io_poll_start_read(int pollfd, int fd, void *data)
+int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data,void *)
{
struct kevent ke;
MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
@@ -323,16 +372,16 @@ int io_poll_start_read(int pollfd, int fd, void *data)
}
-int io_poll_associate_fd(int pollfd, int fd, void *data)
+int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data,void *)
{
struct kevent ke;
MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
0, 0, data);
- return io_poll_start_read(pollfd,fd, data);
+ return io_poll_start_read(pollfd,fd, data, 0);
}
-int io_poll_disassociate_fd(int pollfd, int fd)
+int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
{
struct kevent ke;
MY_EV_SET(&ke,fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
@@ -340,7 +389,7 @@ int io_poll_disassociate_fd(int pollfd, int fd)
}
-int io_poll_wait(int pollfd, struct kevent *events, int maxevents, int timeout_ms)
+int io_poll_wait(TP_file_handle pollfd, struct kevent *events, int maxevents, int timeout_ms)
{
struct timespec ts;
int ret;
@@ -365,27 +414,27 @@ static void* native_event_get_userdata(native_event *event)
#elif defined (__sun)
-static int io_poll_create()
+static TP_file_handle io_poll_create()
{
return port_create();
}
-int io_poll_start_read(int pollfd, int fd, void *data)
+int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
{
return port_associate(pollfd, PORT_SOURCE_FD, fd, POLLIN, data);
}
-static int io_poll_associate_fd(int pollfd, int fd, void *data)
+static int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
{
- return io_poll_start_read(pollfd, fd, data);
+ return io_poll_start_read(pollfd, fd, data, 0);
}
-int io_poll_disassociate_fd(int pollfd, int fd)
+int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
{
return port_dissociate(pollfd, PORT_SOURCE_FD, fd);
}
-int io_poll_wait(int pollfd, native_event *events, int maxevents, int timeout_ms)
+int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, int timeout_ms)
{
struct timespec ts;
int ret;
@@ -409,23 +458,122 @@ static void* native_event_get_userdata(native_event *event)
{
return event->portev_user;
}
+
+#elif defined(HAVE_IOCP)
+
+
+static TP_file_handle io_poll_create()
+{
+ return CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0);
+}
+
+
+int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *, void *opt)
+{
+ static char c;
+ TP_connection_generic *con= (TP_connection_generic *)opt;
+ OVERLAPPED *overlapped= &con->overlapped;
+ if (con->vio_type == VIO_TYPE_NAMEDPIPE)
+ {
+ if (ReadFile(fd, &c, 0, NULL, overlapped))
+ return 0;
+ }
+ else
+ {
+ WSABUF buf;
+ buf.buf= &c;
+ buf.len= 0;
+ DWORD flags=0;
+
+ if (WSARecv((SOCKET)fd, &buf, 1,NULL, &flags,overlapped, NULL) == 0)
+ return 0;
+ }
+
+ if (GetLastError() == ERROR_IO_PENDING)
+ return 0;
+
+ return 1;
+}
+
+
+static int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void *opt)
+{
+ HANDLE h= CreateIoCompletionPort(fd, pollfd, (ULONG_PTR)data, 0);
+ if (!h)
+ return -1;
+ return io_poll_start_read(pollfd,fd, 0, opt);
+}
+
+
+int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
+{
+ /* Not possible to unbind/rebind file descriptor in IOCP. */
+ return 0;
+}
+
+
+int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, int timeout_ms)
+{
+ ULONG n;
+ BOOL ok = GetQueuedCompletionStatusEx(pollfd, events,
+ maxevents, &n, timeout_ms, FALSE);
+
+ return ok ? (int)n : -1;
+}
+
+
+static void* native_event_get_userdata(native_event *event)
+{
+ return (void *)event->lpCompletionKey;
+}
#endif
/* Dequeue element from a workqueue */
-static connection_t *queue_get(thread_group_t *thread_group)
+static TP_connection_generic *queue_get(thread_group_t *thread_group)
{
DBUG_ENTER("queue_get");
thread_group->queue_event_count++;
- connection_t *c= thread_group->queue.front();
- if (c)
+ TP_connection_generic *c;
+ for (int i=0; i < NQUEUES;i++)
{
- thread_group->queue.remove(c);
+ c= thread_group->queues[i].pop_front();
+ if (c)
+ DBUG_RETURN(c);
}
- DBUG_RETURN(c);
+ DBUG_RETURN(0);
}
+static bool is_queue_empty(thread_group_t *thread_group)
+{
+ for (int i=0; i < NQUEUES; i++)
+ {
+ if (!thread_group->queues[i].is_empty())
+ return false;
+ }
+ return true;
+}
+
+
+static void queue_init(thread_group_t *thread_group)
+{
+ for (int i=0; i < NQUEUES; i++)
+ {
+ thread_group->queues[i].empty();
+ }
+}
+
+static void queue_put(thread_group_t *thread_group, native_event *ev, int cnt)
+{
+ ulonglong now= pool_timer.current_microtime;
+ for(int i=0; i < cnt; i++)
+ {
+ TP_connection_generic *c = (TP_connection_generic *)native_event_get_userdata(&ev[i]);
+ c->dequeue_time= now;
+ thread_group->queues[c->priority].push_back(c);
+ }
+}
/*
Handle wait timeout :
@@ -449,7 +597,7 @@ static void timeout_check(pool_timer_t *timer)
if (thd->net.reading_or_writing != 1)
continue;
- connection_t *connection= (connection_t *)thd->event_scheduler.data;
+ TP_connection_generic *connection= (TP_connection_generic *)thd->event_scheduler.data;
if (!connection)
{
/*
@@ -461,11 +609,7 @@ static void timeout_check(pool_timer_t *timer)
if(connection->abs_wait_timeout < timer->current_microtime)
{
- /* Wait timeout exceeded, kill connection. */
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->set_killed(KILL_CONNECTION);
- post_kill_notification(thd);
- mysql_mutex_unlock(&thd->LOCK_thd_data);
+ tp_timeout_handler(connection);
}
else
{
@@ -544,10 +688,23 @@ static void* timer_thread(void *param)
void check_stall(thread_group_t *thread_group)
{
- if (mysql_mutex_trylock(&thread_group->mutex) != 0)
+ mysql_mutex_lock(&thread_group->mutex);
+
+ /*
+ Bump priority for the low priority connections that spent too much
+ time in low prio queue.
+ */
+ TP_connection_generic *c;
+ for (;;)
{
- /* Something happens. Don't disturb */
- return;
+ c= thread_group->queues[TP_PRIORITY_LOW].front();
+ if (c && pool_timer.current_microtime - c->dequeue_time > 1000ULL * threadpool_prio_kickup_timer)
+ {
+ thread_group->queues[TP_PRIORITY_LOW].remove(c);
+ thread_group->queues[TP_PRIORITY_HIGH].push_back(c);
+ }
+ else
+ break;
}
/*
@@ -592,7 +749,7 @@ void check_stall(thread_group_t *thread_group)
do wait and indicate that via thd_wait_begin/end callbacks, thread creation
will be faster.
*/
- if (!thread_group->queue.is_empty() && !thread_group->queue_event_count)
+ if (!is_queue_empty(thread_group) && !thread_group->queue_event_count)
{
thread_group->stalled= true;
wake_or_create_thread(thread_group);
@@ -635,11 +792,11 @@ static void stop_timer(pool_timer_t *timer)
@return a ready connection, or NULL on shutdown
*/
-static connection_t * listener(worker_thread_t *current_thread,
+static TP_connection_generic * listener(worker_thread_t *current_thread,
thread_group_t *thread_group)
{
DBUG_ENTER("listener");
- connection_t *retval= NULL;
+ TP_connection_generic *retval= NULL;
for(;;)
{
@@ -706,28 +863,17 @@ static connection_t * listener(worker_thread_t *current_thread,
and wake a worker.
NOTE: Currently nothing is done to detect or prevent long queuing times.
- A solutionc for the future would be to give up "one active thread per
+ A solution for the future would be to give up "one active thread per
group" principle, if events stay in the queue for too long, and just wake
more workers.
*/
- bool listener_picks_event= thread_group->queue.is_empty();
-
- /*
- If listener_picks_event is set, listener thread will handle first event,
- and put the rest into the queue. If listener_pick_event is not set, all
- events go to the queue.
- */
- for(int i=(listener_picks_event)?1:0; i < cnt ; i++)
- {
- connection_t *c= (connection_t *)native_event_get_userdata(&ev[i]);
- thread_group->queue.push_back(c);
- }
-
+ bool listener_picks_event=is_queue_empty(thread_group);
+ queue_put(thread_group, ev, cnt);
if (listener_picks_event)
{
/* Handle the first event. */
- retval= (connection_t *)native_event_get_userdata(&ev[0]);
+ retval= queue_get(thread_group);
mysql_mutex_unlock(&thread_group->mutex);
break;
}
@@ -811,7 +957,7 @@ static int create_worker(thread_group_t *thread_group)
if (!err)
{
thread_group->last_thread_creation_time=microsecond_interval_timer();
- thread_created++;
+ statistic_increment(thread_created,&LOCK_status);
add_thread_count(thread_group, 1);
}
else
@@ -910,10 +1056,10 @@ int thread_group_init(thread_group_t *thread_group, pthread_attr_t* thread_attr)
DBUG_ENTER("thread_group_init");
thread_group->pthread_attr = thread_attr;
mysql_mutex_init(key_group_mutex, &thread_group->mutex, NULL);
- thread_group->pollfd= -1;
+ thread_group->pollfd= INVALID_HANDLE_VALUE;
thread_group->shutdown_pipe[0]= -1;
thread_group->shutdown_pipe[1]= -1;
- thread_group->queue.empty();
+ queue_init(thread_group);
DBUG_RETURN(0);
}
@@ -921,11 +1067,12 @@ int thread_group_init(thread_group_t *thread_group, pthread_attr_t* thread_attr)
void thread_group_destroy(thread_group_t *thread_group)
{
mysql_mutex_destroy(&thread_group->mutex);
- if (thread_group->pollfd != -1)
+ if (thread_group->pollfd != INVALID_HANDLE_VALUE)
{
- close(thread_group->pollfd);
- thread_group->pollfd= -1;
+ io_poll_close(thread_group->pollfd);
+ thread_group->pollfd= INVALID_HANDLE_VALUE;
}
+#ifndef HAVE_IOCP
for(int i=0; i < 2; i++)
{
if(thread_group->shutdown_pipe[i] != -1)
@@ -934,6 +1081,8 @@ void thread_group_destroy(thread_group_t *thread_group)
thread_group->shutdown_pipe[i]= -1;
}
}
+#endif
+
if (my_atomic_add32(&shutdown_group_count, -1) == 1)
my_free(all_groups);
}
@@ -956,7 +1105,32 @@ static int wake_thread(thread_group_t *thread_group)
DBUG_RETURN(1); /* no thread in waiter list => missed wakeup */
}
+/*
+ Wake listener thread (during shutdown)
+ Self-pipe trick is used in most cases,except IOCP.
+*/
+static int wake_listener(thread_group_t *thread_group)
+{
+#ifndef HAVE_IOCP
+ if (pipe(thread_group->shutdown_pipe))
+ {
+ return -1;
+ }
+ /* Wake listener */
+ if (io_poll_associate_fd(thread_group->pollfd,
+ thread_group->shutdown_pipe[0], NULL, NULL))
+ {
+ return -1;
+ }
+ char c= 0;
+ if (write(thread_group->shutdown_pipe[1], &c, 1) < 0)
+ return -1;
+#else
+ PostQueuedCompletionStatus(thread_group->pollfd, 0, 0, 0);
+#endif
+ return 0;
+}
/**
Initiate shutdown for thread group.
@@ -980,28 +1154,13 @@ static void thread_group_close(thread_group_t *thread_group)
thread_group->shutdown= true;
thread_group->listener= NULL;
- if (pipe(thread_group->shutdown_pipe))
- {
- goto end;
- }
-
- /* Wake listener */
- if (io_poll_associate_fd(thread_group->pollfd,
- thread_group->shutdown_pipe[0], NULL))
- {
- goto end;
- }
- {
- char c= 0;
- if (write(thread_group->shutdown_pipe[1], &c, 1) < 0)
- goto end;
- }
+ wake_listener(thread_group);
+
/* Wake all workers. */
while(wake_thread(thread_group) == 0)
{
}
-end:
mysql_mutex_unlock(&thread_group->mutex);
DBUG_VOID_RETURN;
@@ -1016,18 +1175,16 @@ end:
*/
-static void queue_put(thread_group_t *thread_group, connection_t *connection)
+static void queue_put(thread_group_t *thread_group, TP_connection_generic *connection)
{
DBUG_ENTER("queue_put");
- mysql_mutex_lock(&thread_group->mutex);
- thread_group->queue.push_back(connection);
+ connection->dequeue_time= pool_timer.current_microtime;
+ thread_group->queues[connection->priority].push_back(connection);
if (thread_group->active_thread_count == 0)
wake_or_create_thread(thread_group);
- mysql_mutex_unlock(&thread_group->mutex);
-
DBUG_VOID_RETURN;
}
@@ -1062,18 +1219,19 @@ static bool too_many_threads(thread_group_t *thread_group)
NULL is returned if timeout has expired,or on shutdown.
*/
-connection_t *get_event(worker_thread_t *current_thread,
+TP_connection_generic *get_event(worker_thread_t *current_thread,
thread_group_t *thread_group, struct timespec *abstime)
{
DBUG_ENTER("get_event");
- connection_t *connection = NULL;
- int err=0;
+ TP_connection_generic *connection = NULL;
+
mysql_mutex_lock(&thread_group->mutex);
DBUG_ASSERT(thread_group->active_thread_count >= 0);
for(;;)
{
+ int err=0;
bool oversubscribed = too_many_threads(thread_group);
if (thread_group->shutdown)
break;
@@ -1101,22 +1259,27 @@ connection_t *get_event(worker_thread_t *current_thread,
thread_group->listener= NULL;
break;
}
-
+
+
/*
Last thing we try before going to sleep is to
- pick a single event via epoll, without waiting (timeout 0)
+ non-blocking event poll, i.e with timeout = 0.
+ If this returns events, pick one
*/
if (!oversubscribed)
{
- native_event nev;
- if (io_poll_wait(thread_group->pollfd,&nev,1, 0) == 1)
+
+ native_event ev[MAX_EVENTS];
+ int cnt = io_poll_wait(thread_group->pollfd, ev, MAX_EVENTS, 0);
+ if (cnt > 0)
{
- thread_group->io_event_count++;
- connection = (connection_t *)native_event_get_userdata(&nev);
+ queue_put(thread_group, ev, cnt);
+ connection= queue_get(thread_group);
break;
}
}
+
/* And now, finally sleep */
current_thread->woken = false; /* wake() sets this to true */
@@ -1174,9 +1337,9 @@ void wait_begin(thread_group_t *thread_group)
DBUG_ASSERT(thread_group->active_thread_count >=0);
DBUG_ASSERT(thread_group->connection_count > 0);
-
+
if ((thread_group->active_thread_count == 0) &&
- (thread_group->queue.is_empty() || !thread_group->listener))
+ (is_queue_empty(thread_group) || !thread_group->listener))
{
/*
Group might stall while this thread waits, thus wake
@@ -1203,103 +1366,47 @@ void wait_end(thread_group_t *thread_group)
}
-/**
- Allocate/initialize a new connection structure.
-*/
-connection_t *alloc_connection(THD *thd)
+
+TP_connection * TP_pool_generic::new_connection(CONNECT *c)
{
- DBUG_ENTER("alloc_connection");
-
- connection_t* connection = (connection_t *)my_malloc(sizeof(connection_t),0);
- if (connection)
- {
- connection->thd = thd;
- connection->waiting= false;
- connection->logged_in= false;
- connection->bound_to_poll_descriptor= false;
- connection->abs_wait_timeout= ULONGLONG_MAX;
- }
- DBUG_RETURN(connection);
+ return new (std::nothrow) TP_connection_generic(c);
}
-
-
/**
Add a new connection to thread pool..
*/
-void tp_add_connection(THD *thd)
+void TP_pool_generic::add(TP_connection *c)
{
DBUG_ENTER("tp_add_connection");
-
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
- connection_t *connection= alloc_connection(thd);
- if (connection)
- {
- thd->event_scheduler.data= connection;
-
- /* Assign connection to a group. */
- thread_group_t *group=
- &all_groups[thd->thread_id%group_count];
-
- connection->thread_group=group;
-
- mysql_mutex_lock(&group->mutex);
- group->connection_count++;
- mysql_mutex_unlock(&group->mutex);
-
- /*
- Add connection to the work queue.Actual logon
- will be done by a worker thread.
- */
- queue_put(group, connection);
- }
- else
- {
- /* Allocation failed */
- threadpool_cleanup_connection(thd);
- }
- DBUG_VOID_RETURN;
-}
-
-/**
- Terminate connection.
-*/
-
-static void connection_abort(connection_t *connection)
-{
- DBUG_ENTER("connection_abort");
- thread_group_t *group= connection->thread_group;
-
- threadpool_remove_connection(connection->thd);
-
- mysql_mutex_lock(&group->mutex);
- group->connection_count--;
- mysql_mutex_unlock(&group->mutex);
-
- my_free(connection);
+ TP_connection_generic *connection=(TP_connection_generic *)c;
+ thread_group_t *thread_group= connection->thread_group;
+ /*
+ Add connection to the work queue.Actual logon
+ will be done by a worker thread.
+ */
+ mysql_mutex_lock(&thread_group->mutex);
+ queue_put(thread_group, connection);
+ mysql_mutex_unlock(&thread_group->mutex);
DBUG_VOID_RETURN;
}
+
/**
MySQL scheduler callback: wait begin
*/
-void tp_wait_begin(THD *thd, int type)
+void TP_connection_generic::wait_begin(int type)
{
- DBUG_ENTER("tp_wait_begin");
- DBUG_ASSERT(thd);
- connection_t *connection = (connection_t *)thd->event_scheduler.data;
- if (connection)
- {
- DBUG_ASSERT(!connection->waiting);
- connection->waiting= true;
- wait_begin(connection->thread_group);
- }
+ DBUG_ENTER("wait_begin");
+
+ DBUG_ASSERT(!waiting);
+ waiting++;
+ if (waiting == 1)
+ ::wait_begin(thread_group);
DBUG_VOID_RETURN;
}
@@ -1308,18 +1415,13 @@ void tp_wait_begin(THD *thd, int type)
MySQL scheduler callback: wait end
*/
-void tp_wait_end(THD *thd)
+void TP_connection_generic::wait_end()
{
- DBUG_ENTER("tp_wait_end");
- DBUG_ASSERT(thd);
-
- connection_t *connection = (connection_t *)thd->event_scheduler.data;
- if (connection)
- {
- DBUG_ASSERT(connection->waiting);
- connection->waiting = false;
- wait_end(connection->thread_group);
- }
+ DBUG_ENTER("wait_end");
+ DBUG_ASSERT(waiting);
+ waiting--;
+ if (waiting == 0)
+ ::wait_end(thread_group);
DBUG_VOID_RETURN;
}
@@ -1336,12 +1438,51 @@ static void set_next_timeout_check(ulonglong abstime)
DBUG_VOID_RETURN;
}
+TP_connection_generic::TP_connection_generic(CONNECT *c):
+ TP_connection(c),
+ thread_group(0),
+ next_in_queue(0),
+ prev_in_queue(0),
+ abs_wait_timeout(ULONGLONG_MAX),
+ bound_to_poll_descriptor(false),
+ waiting(false)
+#ifdef HAVE_IOCP
+, overlapped()
+#endif
+{
+ DBUG_ASSERT(c->vio);
+
+#ifdef _WIN32
+ vio_type= c->vio->type;
+ fd= (vio_type == VIO_TYPE_NAMEDPIPE) ?
+ c->vio->hPipe: (TP_file_handle)mysql_socket_getfd(c->vio->mysql_socket);
+#else
+ fd= mysql_socket_getfd(c->vio->mysql_socket);
+#endif
+
+ /* Assign connection to a group. */
+ thread_group_t *group=
+ &all_groups[c->thread_id%group_count];
+
+ thread_group=group;
+
+ mysql_mutex_lock(&group->mutex);
+ group->connection_count++;
+ mysql_mutex_unlock(&group->mutex);
+}
+
+TP_connection_generic::~TP_connection_generic()
+{
+ mysql_mutex_lock(&thread_group->mutex);
+ thread_group->connection_count--;
+ mysql_mutex_unlock(&thread_group->mutex);
+}
/**
Set wait timeout for connection.
*/
-static void set_wait_timeout(connection_t *c)
+void TP_connection_generic::set_io_timeout(int timeout_sec)
{
DBUG_ENTER("set_wait_timeout");
/*
@@ -1352,11 +1493,11 @@ static void set_wait_timeout(connection_t *c)
one tick interval.
*/
- c->abs_wait_timeout= pool_timer.current_microtime +
+ abs_wait_timeout= pool_timer.current_microtime +
1000LL*pool_timer.tick_interval +
- 1000000LL*c->thd->variables.net_wait_timeout;
+ 1000000LL*timeout_sec;
- set_next_timeout_check(c->abs_wait_timeout);
+ set_next_timeout_check(abs_wait_timeout);
DBUG_VOID_RETURN;
}
@@ -1368,12 +1509,11 @@ static void set_wait_timeout(connection_t *c)
after thread_pool_size setting.
*/
-static int change_group(connection_t *c,
+static int change_group(TP_connection_generic *c,
thread_group_t *old_group,
thread_group_t *new_group)
{
int ret= 0;
- int fd= mysql_socket_getfd(c->thd->net.vio->mysql_socket);
DBUG_ASSERT(c->thread_group == old_group);
@@ -1381,7 +1521,7 @@ static int change_group(connection_t *c,
mysql_mutex_lock(&old_group->mutex);
if (c->bound_to_poll_descriptor)
{
- io_poll_disassociate_fd(old_group->pollfd,fd);
+ io_poll_disassociate_fd(old_group->pollfd,c->fd);
c->bound_to_poll_descriptor= false;
}
c->thread_group->connection_count--;
@@ -1399,10 +1539,9 @@ static int change_group(connection_t *c,
}
-static int start_io(connection_t *connection)
-{
- int fd = mysql_socket_getfd(connection->thd->net.vio->mysql_socket);
-
+int TP_connection_generic::start_io()
+{
+#ifndef HAVE_IOCP
/*
Usually, connection will stay in the same group for the entire
connection's life. However, we do allow group_count to
@@ -1414,55 +1553,25 @@ static int start_io(connection_t *connection)
on thread_id and current group count, and migrate if necessary.
*/
thread_group_t *group =
- &all_groups[connection->thd->thread_id%group_count];
+ &all_groups[thd->thread_id%group_count];
- if (group != connection->thread_group)
+ if (group != thread_group)
{
- if (change_group(connection, connection->thread_group, group))
+ if (change_group(this, thread_group, group))
return -1;
}
-
+#endif
+
/*
Bind to poll descriptor if not yet done.
*/
- if (!connection->bound_to_poll_descriptor)
+ if (!bound_to_poll_descriptor)
{
- connection->bound_to_poll_descriptor= true;
- return io_poll_associate_fd(group->pollfd, fd, connection);
+ bound_to_poll_descriptor= true;
+ return io_poll_associate_fd(thread_group->pollfd, fd, this, OPTIONAL_IO_POLL_READ_PARAM);
}
- return io_poll_start_read(group->pollfd, fd, connection);
-}
-
-
-
-static void handle_event(connection_t *connection)
-{
-
- DBUG_ENTER("handle_event");
- int err;
-
- if (!connection->logged_in)
- {
- err= threadpool_add_connection(connection->thd);
- connection->logged_in= true;
- }
- else
- {
- err= threadpool_process_request(connection->thd);
- }
-
- if(err)
- goto end;
-
- set_wait_timeout(connection);
- err= start_io(connection);
-
-end:
- if (err)
- connection_abort(connection);
-
- DBUG_VOID_RETURN;
+ return io_poll_start_read(thread_group->pollfd, fd, this, OPTIONAL_IO_POLL_READ_PARAM);
}
@@ -1490,14 +1599,14 @@ static void *worker_main(void *param)
/* Run event loop */
for(;;)
{
- connection_t *connection;
+ TP_connection_generic *connection;
struct timespec ts;
set_timespec(ts,threadpool_idle_timeout);
connection = get_event(&this_thread, thread_group, &ts);
if (!connection)
break;
this_thread.event_count++;
- handle_event(connection);
+ tp_callback(connection);
}
/* Thread shutdown: cleanup per-worker-thread structure. */
@@ -1518,30 +1627,33 @@ static void *worker_main(void *param)
}
-bool tp_init()
+TP_pool_generic::TP_pool_generic()
+{}
+
+int TP_pool_generic::init()
{
- DBUG_ENTER("tp_init");
+ DBUG_ENTER("TP_pool_generic::TP_pool_generic");
threadpool_max_size= MY_MAX(threadpool_size, 128);
all_groups= (thread_group_t *)
my_malloc(sizeof(thread_group_t) * threadpool_max_size, MYF(MY_WME|MY_ZEROFILL));
if (!all_groups)
{
threadpool_max_size= 0;
- DBUG_RETURN(1);
+ sql_print_error("Allocation failed");
+ DBUG_RETURN(-1);
}
- threadpool_started= true;
scheduler_init();
-
+ threadpool_started= true;
for (uint i= 0; i < threadpool_max_size; i++)
{
thread_group_init(&all_groups[i], get_connection_attrib());
}
- tp_set_threadpool_size(threadpool_size);
+ set_pool_size(threadpool_size);
if(group_count == 0)
{
/* Something went wrong */
sql_print_error("Can't set threadpool size to %d",threadpool_size);
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
}
PSI_register(mutex);
PSI_register(cond);
@@ -1552,8 +1664,7 @@ bool tp_init()
DBUG_RETURN(0);
}
-
-void tp_end()
+TP_pool_generic::~TP_pool_generic()
{
DBUG_ENTER("tp_end");
@@ -1572,45 +1683,41 @@ void tp_end()
/** Ensure that poll descriptors are created when threadpool_size changes */
-
-void tp_set_threadpool_size(uint size)
+int TP_pool_generic::set_pool_size(uint size)
{
bool success= true;
- if (!threadpool_started)
- return;
-
+
for(uint i=0; i< size; i++)
{
thread_group_t *group= &all_groups[i];
mysql_mutex_lock(&group->mutex);
- if (group->pollfd == -1)
+ if (group->pollfd == INVALID_HANDLE_VALUE)
{
group->pollfd= io_poll_create();
- success= (group->pollfd >= 0);
+ success= (group->pollfd != INVALID_HANDLE_VALUE);
if(!success)
{
sql_print_error("io_poll_create() failed, errno=%d\n", errno);
- break;
}
}
- mysql_mutex_unlock(&all_groups[i].mutex);
+ mysql_mutex_unlock(&group->mutex);
if (!success)
{
group_count= i;
- return;
+ return -1;
}
}
group_count= size;
+ return 0;
}
-void tp_set_threadpool_stall_limit(uint limit)
+int TP_pool_generic::set_stall_limit(uint limit)
{
- if (!threadpool_started)
- return;
mysql_mutex_lock(&(pool_timer.mutex));
pool_timer.tick_interval= limit;
mysql_mutex_unlock(&(pool_timer.mutex));
mysql_cond_signal(&(pool_timer.cond));
+ return 0;
}
@@ -1621,10 +1728,10 @@ void tp_set_threadpool_stall_limit(uint limit)
Don't do any locking, it is not required for stats.
*/
-int tp_get_idle_thread_count()
+int TP_pool_generic::get_idle_thread_count()
{
int sum=0;
- for (uint i= 0; i < threadpool_max_size && all_groups[i].pollfd >= 0; i++)
+ for (uint i= 0; i < threadpool_max_size && all_groups[i].pollfd != INVALID_HANDLE_VALUE; i++)
{
sum+= (all_groups[i].thread_count - all_groups[i].active_thread_count);
}
diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc
index cb44687f154..855b9b38d78 100644
--- a/sql/threadpool_win.cc
+++ b/sql/threadpool_win.cc
@@ -32,16 +32,6 @@
#include <windows.h>
-/*
- Threadpool API is not available on XP. We still want to compile a single
- version on Windows, but use the latest functionality if available.
- We cannot use threadpool functionality directly, since executable won't
- start on XP and loader will complain about missing symbols.
-
- We solve using the usual way it is done on Windows, i.e with dynamic loading.
- We'll need to load a lot of function, and make this less painful with the
- WEAK_SYMBOL macro below
-*/
/*
WEAK_SYMBOL(return_type, function_name, argument_type1,..,argument_typeN)
@@ -61,107 +51,10 @@
static pFN_##function my_##function = (pFN_##function) \
(GetProcAddress(GetModuleHandle("kernel32"),#function))
-WEAK_SYMBOL(VOID, CancelThreadpoolIo, PTP_IO);
-#define CancelThreadpoolIo my_CancelThreadpoolIo
-
-WEAK_SYMBOL(VOID, CloseThreadpool, PTP_POOL);
-#define CloseThreadpool my_CloseThreadpool
-
-WEAK_SYMBOL(VOID, CloseThreadpoolIo, PTP_IO);
-#define CloseThreadpoolIo my_CloseThreadpoolIo
-
-WEAK_SYMBOL(VOID, CloseThreadpoolTimer,PTP_TIMER);
-#define CloseThreadpoolTimer my_CloseThreadpoolTimer
-
-WEAK_SYMBOL(VOID, CloseThreadpoolWait,PTP_WAIT);
-#define CloseThreadpoolWait my_CloseThreadpoolWait
-
-WEAK_SYMBOL(PTP_POOL, CreateThreadpool,PVOID);
-#define CreateThreadpool my_CreateThreadpool
-
-WEAK_SYMBOL(PTP_IO, CreateThreadpoolIo, HANDLE, PTP_WIN32_IO_CALLBACK, PVOID ,
- PTP_CALLBACK_ENVIRON);
-#define CreateThreadpoolIo my_CreateThreadpoolIo
-
-WEAK_SYMBOL(PTP_TIMER, CreateThreadpoolTimer, PTP_TIMER_CALLBACK ,
- PVOID pv, PTP_CALLBACK_ENVIRON pcbe);
-#define CreateThreadpoolTimer my_CreateThreadpoolTimer
-
-WEAK_SYMBOL(PTP_WAIT, CreateThreadpoolWait, PTP_WAIT_CALLBACK, PVOID,
- PTP_CALLBACK_ENVIRON);
-#define CreateThreadpoolWait my_CreateThreadpoolWait
-
-WEAK_SYMBOL(VOID, DisassociateCurrentThreadFromCallback, PTP_CALLBACK_INSTANCE);
-#define DisassociateCurrentThreadFromCallback my_DisassociateCurrentThreadFromCallback
-
-WEAK_SYMBOL(DWORD, FlsAlloc, PFLS_CALLBACK_FUNCTION);
-#define FlsAlloc my_FlsAlloc
-
-WEAK_SYMBOL(PVOID, FlsGetValue, DWORD);
-#define FlsGetValue my_FlsGetValue
-
-WEAK_SYMBOL(BOOL, FlsSetValue, DWORD, PVOID);
-#define FlsSetValue my_FlsSetValue
-
-WEAK_SYMBOL(VOID, SetThreadpoolThreadMaximum, PTP_POOL, DWORD);
-#define SetThreadpoolThreadMaximum my_SetThreadpoolThreadMaximum
-WEAK_SYMBOL(BOOL, SetThreadpoolThreadMinimum, PTP_POOL, DWORD);
-#define SetThreadpoolThreadMinimum my_SetThreadpoolThreadMinimum
-
-WEAK_SYMBOL(VOID, SetThreadpoolTimer, PTP_TIMER, PFILETIME,DWORD,DWORD);
-#define SetThreadpoolTimer my_SetThreadpoolTimer
-
-WEAK_SYMBOL(VOID, SetThreadpoolWait, PTP_WAIT,HANDLE,PFILETIME);
-#define SetThreadpoolWait my_SetThreadpoolWait
-
-WEAK_SYMBOL(VOID, StartThreadpoolIo, PTP_IO);
-#define StartThreadpoolIo my_StartThreadpoolIo
-
-WEAK_SYMBOL(VOID, WaitForThreadpoolIoCallbacks,PTP_IO, BOOL);
-#define WaitForThreadpoolIoCallbacks my_WaitForThreadpoolIoCallbacks
-
-WEAK_SYMBOL(VOID, WaitForThreadpoolTimerCallbacks, PTP_TIMER, BOOL);
-#define WaitForThreadpoolTimerCallbacks my_WaitForThreadpoolTimerCallbacks
-
-WEAK_SYMBOL(VOID, WaitForThreadpoolWaitCallbacks, PTP_WAIT, BOOL);
-#define WaitForThreadpoolWaitCallbacks my_WaitForThreadpoolWaitCallbacks
-
-WEAK_SYMBOL(BOOL, SetFileCompletionNotificationModes, HANDLE, UCHAR);
-#define SetFileCompletionNotificationModes my_SetFileCompletionNotificationModes
-
-WEAK_SYMBOL(BOOL, TrySubmitThreadpoolCallback, PTP_SIMPLE_CALLBACK pfns,
- PVOID pv,PTP_CALLBACK_ENVIRON pcbe);
-#define TrySubmitThreadpoolCallback my_TrySubmitThreadpoolCallback
-
-WEAK_SYMBOL(PTP_WORK, CreateThreadpoolWork, PTP_WORK_CALLBACK pfnwk, PVOID pv,
- PTP_CALLBACK_ENVIRON pcbe);
-#define CreateThreadpoolWork my_CreateThreadpoolWork
-
-WEAK_SYMBOL(VOID, SubmitThreadpoolWork,PTP_WORK pwk);
-#define SubmitThreadpoolWork my_SubmitThreadpoolWork
-
-WEAK_SYMBOL(VOID, CloseThreadpoolWork, PTP_WORK pwk);
-#define CloseThreadpoolWork my_CloseThreadpoolWork
-
-WEAK_SYMBOL(BOOL, CallbackMayRunLong, PTP_CALLBACK_INSTANCE pci);
-#define CallbackMayRunLong my_CallbackMayRunLong
-
-#if _MSC_VER >= 1600
-/* Stack size manipulation available only on Win7+ /declarations in VS10 */
WEAK_SYMBOL(BOOL, SetThreadpoolStackInformation, PTP_POOL,
PTP_POOL_STACK_INFORMATION);
#define SetThreadpoolStackInformation my_SetThreadpoolStackInformation
-#else /* _MSC_VER < 1600 */
-#define SetThreadpoolCallbackPriority(env,prio)
-typedef enum _TP_CALLBACK_PRIORITY {
- TP_CALLBACK_PRIORITY_HIGH,
- TP_CALLBACK_PRIORITY_NORMAL,
- TP_CALLBACK_PRIORITY_LOW,
- TP_CALLBACK_PRIORITY_INVALID
-} TP_CALLBACK_PRIORITY;
-#endif
-
/* Log a warning */
static void tp_log_warning(const char *msg, const char *fct)
@@ -171,8 +64,9 @@ static void tp_log_warning(const char *msg, const char *fct)
}
-PTP_POOL pool;
-DWORD fls;
+static PTP_POOL pool;
+static TP_CALLBACK_ENVIRON callback_environ;
+static DWORD fls;
static bool skip_completion_port_on_success = false;
@@ -192,13 +86,16 @@ static void CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
static void CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PVOID overlapped, ULONG io_result, ULONG_PTR nbytes, PTP_IO io);
+
+static void CALLBACK work_callback(PTP_CALLBACK_INSTANCE instance, PVOID context, PTP_WORK work);
+
static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
PVOID Context, PTP_WAIT wait,TP_WAIT_RESULT wait_result);
static void CALLBACK shm_close_callback(PTP_CALLBACK_INSTANCE instance,
PVOID Context, PTP_WAIT wait,TP_WAIT_RESULT wait_result);
-static void check_thread_init();
+static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance);
/* Get current time as Windows time */
static ulonglong now()
@@ -208,72 +105,86 @@ static ulonglong now()
return current_time;
}
-/*
- Connection structure, encapsulates THD + structures for asynchronous
- IO and pool.
-*/
-
-struct connection_t
+struct TP_connection_win:public TP_connection
{
- THD *thd;
+public:
+ TP_connection_win(CONNECT*);
+ ~TP_connection_win();
+ virtual int init();
+ virtual int start_io();
+ virtual void set_io_timeout(int sec);
+ virtual void wait_begin(int type);
+ virtual void wait_end();
+
+ ulonglong timeout;
+ enum_vio_type vio_type;
HANDLE handle;
OVERLAPPED overlapped;
- /* absolute time for wait timeout (as Windows time) */
- volatile ulonglong timeout;
- TP_CALLBACK_ENVIRON callback_environ;
+ PTP_CALLBACK_INSTANCE callback_instance;
PTP_IO io;
PTP_TIMER timer;
PTP_WAIT shm_read;
- /* Callback instance, used to inform treadpool about long callbacks */
- PTP_CALLBACK_INSTANCE callback_instance;
- bool logged_in;
+ PTP_WORK work;
+ bool long_callback;
+
};
+struct TP_connection *new_TP_connection(CONNECT *connect)
+{
+ TP_connection *c = new (std::nothrow) TP_connection_win(connect);
+ if (!c || c->init())
+ {
+ delete c;
+ return 0;
+ }
+ return c;
+}
+
+void TP_pool_win::add(TP_connection *c)
+{
+ SubmitThreadpoolWork(((TP_connection_win *)c)->work);
+}
+
-void init_connection(connection_t *connection)
+TP_connection_win::TP_connection_win(CONNECT *c) :
+ TP_connection(c),
+ timeout(ULONGLONG_MAX),
+ callback_instance(0),
+ io(0),
+ shm_read(0),
+ timer(0),
+ work(0)
{
- connection->logged_in = false;
- connection->handle= 0;
- connection->io= 0;
- connection->shm_read= 0;
- connection->timer= 0;
- connection->logged_in = false;
- connection->timeout= ULONGLONG_MAX;
- connection->callback_instance= 0;
- memset(&connection->overlapped, 0, sizeof(OVERLAPPED));
- InitializeThreadpoolEnvironment(&connection->callback_environ);
- SetThreadpoolCallbackPool(&connection->callback_environ, pool);
- connection->thd = 0;
}
+#define CHECK_ALLOC_ERROR(op) if (!(op)) {tp_log_warning("Allocation failed", #op); DBUG_ASSERT(0); return -1; }
-int init_io(connection_t *connection, THD *thd)
+int TP_connection_win::init()
{
- connection->thd= thd;
- Vio *vio = thd->net.vio;
- switch(vio->type)
+
+ memset(&overlapped, 0, sizeof(OVERLAPPED));
+ Vio *vio = connect->vio;
+ switch ((vio_type = vio->type))
{
- case VIO_TYPE_SSL:
- case VIO_TYPE_TCPIP:
- connection->handle= (HANDLE)mysql_socket_getfd(connection->thd->net.vio->mysql_socket);
- break;
- case VIO_TYPE_NAMEDPIPE:
- connection->handle= (HANDLE)vio->hPipe;
- break;
- case VIO_TYPE_SHARED_MEMORY:
- connection->shm_read= CreateThreadpoolWait(shm_read_callback, connection,
- &connection->callback_environ);
- if (!connection->shm_read)
- {
- tp_log_warning("Allocation failed", "CreateThreadpoolWait");
- return -1;
- }
- break;
- default:
- abort();
+ case VIO_TYPE_SSL:
+ case VIO_TYPE_TCPIP:
+ handle= (HANDLE)mysql_socket_getfd(vio->mysql_socket);
+ break;
+ case VIO_TYPE_NAMEDPIPE:
+ handle= (HANDLE)vio->hPipe;
+ break;
+ case VIO_TYPE_SHARED_MEMORY:
+ handle= vio->event_server_wrote;
+ break;
+ default:
+ abort();
}
- if (connection->handle)
+ if (vio_type == VIO_TYPE_SHARED_MEMORY)
+ {
+ CHECK_ALLOC_ERROR(shm_read= CreateThreadpoolWait(shm_read_callback, this, &callback_environ));
+ }
+ else
{
/* Performance tweaks (s. MSDN documentation)*/
UCHAR flags= FILE_SKIP_SET_EVENT_ON_HANDLE;
@@ -281,25 +192,13 @@ int init_io(connection_t *connection, THD *thd)
{
flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
}
- (void)SetFileCompletionNotificationModes(connection->handle, flags);
-
+ (void)SetFileCompletionNotificationModes(handle, flags);
/* Assign io completion callback */
- connection->io= CreateThreadpoolIo(connection->handle,
- io_completion_callback, connection, &connection->callback_environ);
- if(!connection->io)
- {
- tp_log_warning("Allocation failed", "CreateThreadpoolWait");
- return -1;
- }
- }
- connection->timer= CreateThreadpoolTimer(timer_callback, connection,
- &connection->callback_environ);
- if (!connection->timer)
- {
- tp_log_warning("Allocation failed", "CreateThreadpoolWait");
- return -1;
+ CHECK_ALLOC_ERROR(io= CreateThreadpoolIo(handle, io_completion_callback, this, &callback_environ));
}
+ CHECK_ALLOC_ERROR(timer= CreateThreadpoolTimer(timer_callback, this, &callback_environ));
+ CHECK_ALLOC_ERROR(work= CreateThreadpoolWork(work_callback, this, &callback_environ));
return 0;
}
@@ -307,9 +206,8 @@ int init_io(connection_t *connection, THD *thd)
/*
Start asynchronous read
*/
-int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
+int TP_connection_win::start_io()
{
- /* Start async read */
DWORD num_bytes = 0;
static char c;
WSABUF buf;
@@ -319,33 +217,20 @@ int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
DWORD last_error= 0;
int retval;
- Vio *vio= connection->thd->net.vio;
-
- if (vio->type == VIO_TYPE_SHARED_MEMORY)
- {
- SetThreadpoolWait(connection->shm_read, vio->event_server_wrote, NULL);
- return 0;
- }
- if (vio->type == VIO_CLOSED)
+ if (shm_read)
{
- return -1;
+ SetThreadpoolWait(shm_read, handle, NULL);
+ return 0;
}
-
- DBUG_ASSERT(vio->type == VIO_TYPE_TCPIP ||
- vio->type == VIO_TYPE_SSL ||
- vio->type == VIO_TYPE_NAMEDPIPE);
-
- OVERLAPPED *overlapped= &connection->overlapped;
- PTP_IO io= connection->io;
StartThreadpoolIo(io);
- if (vio->type == VIO_TYPE_TCPIP || vio->type == VIO_TYPE_SSL)
+ if (vio_type == VIO_TYPE_TCPIP || vio_type == VIO_TYPE_SSL)
{
/* Start async io (sockets). */
- if (WSARecv(mysql_socket_getfd(vio->mysql_socket) , &buf, 1, &num_bytes, &flags,
- overlapped, NULL) == 0)
+ if (WSARecv((SOCKET)handle , &buf, 1, &num_bytes, &flags,
+ &overlapped, NULL) == 0)
{
- retval= last_error= 0;
+ retval= last_error= 0;
}
else
{
@@ -356,7 +241,7 @@ int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
else
{
/* Start async io (named pipe) */
- if (ReadFile(vio->hPipe, &c, 0, &num_bytes ,overlapped))
+ if (ReadFile(handle, &c, 0, &num_bytes,&overlapped))
{
retval= last_error= 0;
}
@@ -377,7 +262,7 @@ int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
if(skip_completion_port_on_success)
{
CancelThreadpoolIo(io);
- io_completion_callback(instance, connection, overlapped, last_error,
+ io_completion_callback(callback_instance, this, &overlapped, last_error,
num_bytes, io);
}
return 0;
@@ -393,81 +278,81 @@ int start_io(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
return -1;
}
-
-int login(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
-{
- if (threadpool_add_connection(connection->thd) == 0
- && init_io(connection, connection->thd) == 0
- && start_io(connection, instance) == 0)
- {
- return 0;
- }
- return -1;
-}
-
/*
- Recalculate wait timeout, maybe reset timer.
+ Recalculate wait timeout, maybe reset timer.
*/
-void set_wait_timeout(connection_t *connection, ulonglong old_timeout)
+void TP_connection_win::set_io_timeout(int timeout_sec)
{
- ulonglong new_timeout = now() +
- 10000000LL*connection->thd->variables.net_wait_timeout;
+ ulonglong old_timeout= timeout;
+ ulonglong new_timeout = now() + 10000000LL * timeout_sec;
if (new_timeout < old_timeout)
{
- SetThreadpoolTimer(connection->timer, (PFILETIME) &new_timeout, 0, 1000);
+ SetThreadpoolTimer(timer, (PFILETIME)&new_timeout, 0, 1000);
}
- connection->timeout = new_timeout;
+ /* new_timeout > old_timeout case is handled by expiring timer. */
+ timeout = new_timeout;
}
-/* Connection destructor */
-void destroy_connection(connection_t *connection, PTP_CALLBACK_INSTANCE instance)
+TP_connection_win::~TP_connection_win()
{
- if (instance)
- DisassociateCurrentThreadFromCallback(instance);
- if (connection->io)
- {
- WaitForThreadpoolIoCallbacks(connection->io, TRUE);
- CloseThreadpoolIo(connection->io);
- }
+ if (io)
+ CloseThreadpoolIo(io);
- if(connection->shm_read)
- {
- WaitForThreadpoolWaitCallbacks(connection->shm_read, TRUE);
- CloseThreadpoolWait(connection->shm_read);
- }
+ if (shm_read)
+ CloseThreadpoolWait(shm_read);
- if(connection->timer)
+ if (work)
+ CloseThreadpoolWork(work);
+
+ if (timer)
{
- SetThreadpoolTimer(connection->timer, 0, 0, 0);
- WaitForThreadpoolTimerCallbacks(connection->timer, TRUE);
- CloseThreadpoolTimer(connection->timer);
+ WaitForThreadpoolTimerCallbacks(timer, TRUE);
+ CloseThreadpoolTimer(timer);
}
-
- if (connection->thd)
+}
+
+void TP_connection_win::wait_begin(int type)
+{
+
+ /*
+ Signal to the threadpool whenever callback can run long. Currently, binlog
+ waits are a good candidate, its waits are really long
+ */
+ if (type == THD_WAIT_BINLOG)
{
- threadpool_remove_connection(connection->thd);
+ if (!long_callback)
+ {
+ CallbackMayRunLong(callback_instance);
+ long_callback= true;
+ }
}
-
- DestroyThreadpoolEnvironment(&connection->callback_environ);
}
-
+void TP_connection_win::wait_end()
+{
+ /* Do we need to do anything ? */
+}
/*
This function should be called first whenever a callback is invoked in the
threadpool, does my_thread_init() if not yet done
*/
extern ulong thread_created;
-static void check_thread_init()
+static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance)
{
if (FlsGetValue(fls) == NULL)
{
+ /* Running in new worker thread*/
FlsSetValue(fls, (void *)1);
- thread_created++;
+ statistic_increment(thread_created, &LOCK_status);
InterlockedIncrement((volatile long *)&tp_stats.num_worker_threads);
+ my_thread_init();
}
+ TP_connection_win *c = (TP_connection_win *)context;
+ c->callback_instance = instance;
+ c->long_callback = false;
}
@@ -480,68 +365,16 @@ static VOID WINAPI thread_destructor(void *data)
if(data)
{
InterlockedDecrement((volatile long *)&tp_stats.num_worker_threads);
+ my_thread_end();
}
}
-/* Scheduler callback : init */
-bool tp_init(void)
-{
- fls= FlsAlloc(thread_destructor);
- pool= CreateThreadpool(NULL);
- if(!pool)
- {
- sql_print_error("Can't create threadpool. "
- "CreateThreadpool() failed with %d. Likely cause is memory pressure",
- GetLastError());
- exit(1);
- }
-
- if (threadpool_max_threads)
- {
- SetThreadpoolThreadMaximum(pool,threadpool_max_threads);
- }
-
- if (threadpool_min_threads)
- {
- if (!SetThreadpoolThreadMinimum(pool, threadpool_min_threads))
- {
- tp_log_warning( "Can't set threadpool minimum threads",
- "SetThreadpoolThreadMinimum");
- }
- }
- /*
- Control stack size (OS must be Win7 or later, plus corresponding SDK)
- */
-#if _MSC_VER >=1600
- if (SetThreadpoolStackInformation)
- {
- TP_POOL_STACK_INFORMATION stackinfo;
- stackinfo.StackCommit = 0;
- stackinfo.StackReserve = (SIZE_T)my_thread_stack_size;
- if (!SetThreadpoolStackInformation(pool, &stackinfo))
- {
- tp_log_warning("Can't set threadpool stack size",
- "SetThreadpoolStackInformation");
- }
- }
-#endif
-
- return 0;
-}
-
-
-/**
- Scheduler callback : Destroy the scheduler.
-*/
-void tp_end(void)
+static inline void tp_callback(PTP_CALLBACK_INSTANCE instance, PVOID context)
{
- if(pool)
- {
- SetThreadpoolThreadMaximum(pool, 0);
- CloseThreadpool(pool);
- }
+ pre_callback(context, instance);
+ tp_callback((TP_connection *)context);
}
@@ -551,83 +384,42 @@ void tp_end(void)
static VOID CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PVOID overlapped, ULONG io_result, ULONG_PTR nbytes, PTP_IO io)
{
- if(instance)
- {
- check_thread_init();
- }
-
- connection_t *connection = (connection_t*)context;
-
- if (io_result != ERROR_SUCCESS)
- goto error;
-
- THD *thd= connection->thd;
- ulonglong old_timeout = connection->timeout;
- connection->timeout = ULONGLONG_MAX;
- connection->callback_instance= instance;
- if (threadpool_process_request(connection->thd))
- goto error;
-
- set_wait_timeout(connection, old_timeout);
- if(start_io(connection, instance))
- goto error;
-
- return;
-
-error:
- /* Some error has occurred. */
-
- destroy_connection(connection, instance);
- free(connection);
+ TP_connection_win *c= (TP_connection_win *)context;
+ /*
+ Execute high priority connections immediately.
+ 'Yield' in case of low priority connections, i.e SubmitThreadpoolWork (with the same callback)
+ which makes Windows threadpool place the items at the end of its internal work queue.
+ */
+ if (c->priority == TP_PRIORITY_HIGH)
+ tp_callback(instance, context);
+ else
+ SubmitThreadpoolWork(c->work);
}
-/* Simple callback for login */
-static void CALLBACK login_callback(PTP_CALLBACK_INSTANCE instance,
- PVOID context, PTP_WORK work)
-{
- if(instance)
- {
- check_thread_init();
- }
-
- connection_t *connection =(connection_t *)context;
- if (login(connection, instance) != 0)
- {
- destroy_connection(connection, instance);
- free(connection);
- }
-}
-
/*
Timer callback.
Invoked when connection times out (wait_timeout)
*/
-static VOID CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
+static VOID CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
PVOID parameter, PTP_TIMER timer)
{
- check_thread_init();
-
- connection_t *con= (connection_t*)parameter;
- ulonglong timeout= con->timeout;
-
- if (timeout <= now())
+ TP_connection_win *c = (TP_connection_win *)parameter;
+ if (c->timeout <= now())
{
- con->thd->set_killed(KILL_CONNECTION);
- if(con->thd->net.vio)
- vio_shutdown(con->thd->net.vio, SD_BOTH);
+ tp_timeout_handler(c);
}
- else if(timeout != ULONGLONG_MAX)
+ else
{
- /*
- Reset timer.
- There is a tiny possibility of a race condition, since the value of timeout
- could have changed to smaller value in the thread doing io callback.
+ /*
+ Reset timer.
+ There is a tiny possibility of a race condition, since the value of timeout
+ could have changed to smaller value in the thread doing io callback.
- Given the relative unimportance of the wait timeout, we accept race
+ Given the relative unimportance of the wait timeout, we accept race
condition.
- */
- SetThreadpoolTimer(timer, (PFILETIME)&timeout, 0, 1000);
+ */
+ SetThreadpoolTimer(timer, (PFILETIME)&c->timeout, 0, 1000);
}
}
@@ -636,10 +428,11 @@ static VOID CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
Shared memory read callback.
Invoked when read event is set on connection.
*/
+
static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
PVOID context, PTP_WAIT wait,TP_WAIT_RESULT wait_result)
{
- connection_t *con= (connection_t *)context;
+ TP_connection_win *c= (TP_connection_win *)context;
/* Disarm wait. */
SetThreadpoolWait(wait, NULL, NULL);
@@ -648,100 +441,106 @@ static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
and the current state is "not set". Thus we need to reset the event again,
or vio_read will hang.
*/
- HANDLE h = con->thd->net.vio->event_server_wrote;
- SetEvent(h);
- io_completion_callback(instance, context, NULL, 0, 0 , 0);
+ SetEvent(c->handle);
+ tp_callback(instance, context);
}
-/*
- Notify the thread pool about a new connection.
- NOTE: LOCK_thread_count is locked on entry. This function must unlock it.
-*/
-void tp_add_connection(THD *thd)
+static void CALLBACK work_callback(PTP_CALLBACK_INSTANCE instance, PVOID context, PTP_WORK work)
+{
+ tp_callback(instance, context);
+}
+
+TP_pool_win::TP_pool_win()
+{}
+
+int TP_pool_win::init()
{
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ fls= FlsAlloc(thread_destructor);
+ pool= CreateThreadpool(NULL);
- connection_t *con = (connection_t *)malloc(sizeof(connection_t));
- if(!con)
+ if (!pool)
{
- tp_log_warning("Allocation failed", "tp_add_connection");
- threadpool_cleanup_connection(thd);
- return;
+ sql_print_error("Can't create threadpool. "
+ "CreateThreadpool() failed with %d. Likely cause is memory pressure",
+ GetLastError());
+ return -1;
}
- init_connection(con);
- con->thd= thd;
- thd->event_scheduler.data= con;
+ InitializeThreadpoolEnvironment(&callback_environ);
+ SetThreadpoolCallbackPool(&callback_environ, pool);
- /* Try to login asynchronously, using threads in the pool */
- PTP_WORK wrk = CreateThreadpoolWork(login_callback,con, &con->callback_environ);
- if (wrk)
+ if (threadpool_max_threads)
{
- SubmitThreadpoolWork(wrk);
- CloseThreadpoolWork(wrk);
+ SetThreadpoolThreadMaximum(pool, threadpool_max_threads);
}
- else
+
+ if (threadpool_min_threads)
{
- /* Likely memory pressure */
- threadpool_cleanup_connection(thd);
+ if (!SetThreadpoolThreadMinimum(pool, threadpool_min_threads))
+ {
+ tp_log_warning("Can't set threadpool minimum threads",
+ "SetThreadpoolThreadMinimum");
+ }
}
-}
-
-
-/**
- Sets the number of idle threads the thread pool maintains in anticipation of new
- requests.
-*/
-void tp_set_min_threads(uint val)
-{
- if (pool)
- SetThreadpoolThreadMinimum(pool, val);
-}
-
-void tp_set_max_threads(uint val)
-{
- if (pool)
- SetThreadpoolThreadMaximum(pool, val);
-}
-
-void tp_wait_begin(THD *thd, int type)
-{
- DBUG_ASSERT(thd);
/*
- Signal to the threadpool whenever callback can run long. Currently, binlog
- waits are a good candidate, its waits are really long
+ Control stack size (OS must be Win7 or later)
*/
- if (type == THD_WAIT_BINLOG)
+ if (SetThreadpoolStackInformation)
{
- connection_t *connection= (connection_t *)thd->event_scheduler.data;
- if(connection && connection->callback_instance)
+ TP_POOL_STACK_INFORMATION stackinfo;
+ stackinfo.StackCommit = 0;
+ stackinfo.StackReserve = (SIZE_T)my_thread_stack_size;
+ if (!SetThreadpoolStackInformation(pool, &stackinfo))
{
- CallbackMayRunLong(connection->callback_instance);
- /*
- Reset instance, to avoid calling CallbackMayRunLong twice within
- the same callback (it is an error according to docs).
- */
- connection->callback_instance= 0;
+ tp_log_warning("Can't set threadpool stack size",
+ "SetThreadpoolStackInformation");
}
}
+ return 0;
}
-void tp_wait_end(THD *thd)
+
+/**
+ Scheduler callback : Destroy the scheduler.
+*/
+TP_pool_win::~TP_pool_win()
{
- /* Do we need to do anything ? */
+ if (!pool)
+ return;
+ DestroyThreadpoolEnvironment(&callback_environ);
+ SetThreadpoolThreadMaximum(pool, 0);
+ CloseThreadpool(pool);
+ if (!tp_stats.num_worker_threads)
+ FlsFree(fls);
}
-
-
/**
- Number of idle threads in pool.
- This info is not available in Windows implementation,
- thus function always returns 0.
+ Sets the number of idle threads the thread pool maintains in anticipation of new
+ requests.
*/
-int tp_get_idle_thread_count()
+int TP_pool_win::set_min_threads(uint val)
+{
+ SetThreadpoolThreadMinimum(pool, val);
+ return 0;
+}
+
+int TP_pool_win::set_max_threads(uint val)
{
+ SetThreadpoolThreadMaximum(pool, val);
return 0;
}
+
+TP_connection *TP_pool_win::new_connection(CONNECT *connect)
+{
+ TP_connection *c= new (std::nothrow) TP_connection_win(connect);
+ if (!c )
+ return 0;
+ if (c->init())
+ {
+ delete c;
+ return 0;
+ }
+ return c;
+}
diff --git a/sql/transaction.cc b/sql/transaction.cc
index 1744feea151..0f1cd377198 100644
--- a/sql/transaction.cc
+++ b/sql/transaction.cc
@@ -25,6 +25,44 @@
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_acl.h"
+
+#ifndef EMBEDDED_LIBRARY
+/**
+ Helper: Tell tracker (if any) that transaction ended.
+*/
+static void trans_track_end_trx(THD *thd)
+{
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ {
+ ((Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER))->end_trx(thd);
+ }
+}
+#else
+#define trans_track_end_trx(A) do{}while(0)
+#endif //EMBEDDED_LIBRARY
+
+
+/**
+ Helper: transaction ended, SET TRANSACTION one-shot variables
+ revert to session values. Let the transaction state tracker know.
+*/
+void trans_reset_one_shot_chistics(THD *thd)
+{
+#ifndef EMBEDDED_LIBRARY
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ {
+ Transaction_state_tracker *tst= (Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER);
+
+ tst->set_read_flags(thd, TX_READ_INHERIT);
+ tst->set_isol_level(thd, TX_ISOL_INHERIT);
+ }
+#endif //EMBEDDED_LIBRARY
+ thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
+ thd->tx_read_only= thd->variables.tx_read_only;
+}
+
/* Conditions under which the transaction state must not change. */
static bool trans_check(THD *thd)
{
@@ -125,11 +163,20 @@ static bool xa_trans_force_rollback(THD *thd)
bool trans_begin(THD *thd, uint flags)
{
int res= FALSE;
+#ifndef EMBEDDED_LIBRARY
+ Transaction_state_tracker *tst= NULL;
+#endif //EMBEDDED_LIBRARY
DBUG_ENTER("trans_begin");
if (trans_check(thd))
DBUG_RETURN(TRUE);
+#ifndef EMBEDDED_LIBRARY
+ if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
+ tst= (Transaction_state_tracker *)
+ thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER);
+#endif //EMBEDDED_LIBRARY
+
thd->locked_tables_list.unlock_locked_tables(thd);
DBUG_ASSERT(!thd->locked_tables_mode);
@@ -171,7 +218,13 @@ bool trans_begin(THD *thd, uint flags)
DBUG_ASSERT(!((flags & MYSQL_START_TRANS_OPT_READ_ONLY) &&
(flags & MYSQL_START_TRANS_OPT_READ_WRITE)));
if (flags & MYSQL_START_TRANS_OPT_READ_ONLY)
+ {
thd->tx_read_only= true;
+#ifndef EMBEDDED_LIBRARY
+ if (tst)
+ tst->set_read_flags(thd, TX_READ_ONLY);
+#endif //EMBEDDED_LIBRARY
+ }
else if (flags & MYSQL_START_TRANS_OPT_READ_WRITE)
{
/*
@@ -188,6 +241,14 @@ bool trans_begin(THD *thd, uint flags)
DBUG_RETURN(true);
}
thd->tx_read_only= false;
+ /*
+ This flags that tx_read_only was set explicitly, rather than
+ just from the session's default.
+ */
+#ifndef EMBEDDED_LIBRARY
+ if (tst)
+ tst->set_read_flags(thd, TX_READ_WRITE);
+#endif //EMBEDDED_LIBRARY
}
#ifdef WITH_WSREP
@@ -202,9 +263,20 @@ bool trans_begin(THD *thd, uint flags)
thd->server_status|= SERVER_STATUS_IN_TRANS_READONLY;
DBUG_PRINT("info", ("setting SERVER_STATUS_IN_TRANS"));
+#ifndef EMBEDDED_LIBRARY
+ if (tst)
+ tst->add_trx_state(thd, TX_EXPLICIT);
+#endif //EMBEDDED_LIBRARY
+
/* ha_start_consistent_snapshot() relies on OPTION_BEGIN flag set. */
if (flags & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT)
+ {
+#ifndef EMBEDDED_LIBRARY
+ if (tst)
+ tst->add_trx_state(thd, TX_WITH_SNAPSHOT);
+#endif //EMBEDDED_LIBRARY
res= ha_start_consistent_snapshot(thd);
+ }
DBUG_RETURN(MY_TEST(res));
}
@@ -253,6 +325,8 @@ bool trans_commit(THD *thd)
thd->transaction.all.reset();
thd->lex->start_transaction_opt= 0;
+ trans_track_end_trx(thd);
+
DBUG_RETURN(MY_TEST(res));
}
@@ -305,8 +379,9 @@ bool trans_commit_implicit(THD *thd)
@@session.completion_type since it's documented
to not have any effect on implicit commit.
*/
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
+
+ trans_track_end_trx(thd);
DBUG_RETURN(res);
}
@@ -345,6 +420,8 @@ bool trans_rollback(THD *thd)
thd->transaction.all.reset();
thd->lex->start_transaction_opt= 0;
+ trans_track_end_trx(thd);
+
DBUG_RETURN(MY_TEST(res));
}
@@ -391,6 +468,8 @@ bool trans_rollback_implicit(THD *thd)
/* Rollback should clear transaction_rollback_request flag. */
DBUG_ASSERT(! thd->transaction_rollback_request);
+ trans_track_end_trx(thd);
+
DBUG_RETURN(MY_TEST(res));
}
@@ -431,8 +510,7 @@ bool trans_commit_stmt(THD *thd)
res= ha_commit_trans(thd, FALSE);
if (! thd->in_active_multi_stmt_transaction())
{
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
+ trans_reset_one_shot_chistics(thd);
if (WSREP_ON)
wsrep_post_commit(thd, FALSE);
}
@@ -486,10 +564,7 @@ bool trans_rollback_stmt(THD *thd)
wsrep_register_hton(thd, FALSE);
ha_rollback_trans(thd, FALSE);
if (! thd->in_active_multi_stmt_transaction())
- {
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- thd->tx_read_only= thd->variables.tx_read_only;
- }
+ trans_reset_one_shot_chistics(thd);
}
(void) RUN_HOOK(transaction, after_rollback, (thd, FALSE));
@@ -902,6 +977,8 @@ bool trans_xa_commit(THD *thd)
xid_cache_delete(thd, &thd->transaction.xid_state);
thd->transaction.xid_state.xa_state= XA_NOTR;
+ trans_track_end_trx(thd);
+
DBUG_RETURN(res);
}
@@ -957,5 +1034,7 @@ bool trans_xa_rollback(THD *thd)
xid_cache_delete(thd, &thd->transaction.xid_state);
thd->transaction.xid_state.xa_state= XA_NOTR;
+ trans_track_end_trx(thd);
+
DBUG_RETURN(res);
}
diff --git a/sql/transaction.h b/sql/transaction.h
index 54b25f1de2a..90de11aabe3 100644
--- a/sql/transaction.h
+++ b/sql/transaction.h
@@ -44,4 +44,6 @@ bool trans_xa_prepare(THD *thd);
bool trans_xa_commit(THD *thd);
bool trans_xa_rollback(THD *thd);
+void trans_reset_one_shot_chistics(THD *thd);
+
#endif /* TRANSACTION_H */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 60a3ceafe0a..1167ea18e7e 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1615,7 +1615,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
/*
To be able to run this from boot, we allocate a temporary THD
*/
- if (!(thd= new THD))
+ if (!(thd= new THD(0)))
DBUG_RETURN(1);
thd->thread_stack= (char*) &thd;
thd->store_globals();
@@ -1795,8 +1795,6 @@ end:
delete thd;
if (org_thd)
org_thd->store_globals(); /* purecov: inspected */
- else
- my_pthread_setspecific_ptr(THR_MALLOC, 0);
default_tz= default_tz_name ? global_system_variables.time_zone
: my_tz_SYSTEM;
diff --git a/sql/udf_example.c b/sql/udf_example.c
index a48801d1c4a..c7e2f989829 100644
--- a/sql/udf_example.c
+++ b/sql/udf_example.c
@@ -1095,7 +1095,7 @@ my_bool is_const_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
strmov(message, "IS_CONST accepts only one argument");
return 1;
}
- initid->ptr= (char*)((args->args[0] != NULL) ? 1UL : 0);
+ initid->ptr= (char*)((args->args[0] != NULL) ? (size_t)1 : (size_t)0);
return 0;
}
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 1ce186b48e1..86622b41351 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -37,7 +37,9 @@
#include "sql_sort.h"
#include "queues.h" // QUEUE
#include "my_tree.h" // element_count
-#include "sql_class.h" // Unique
+#include "uniques.h" // Unique
+#include "sql_sort.h"
+#include "myisamchk.h" // BUFFPEK
int unique_write_to_file(uchar* key, element_count count, Unique *unique)
{
@@ -58,8 +60,8 @@ int unique_write_to_file_with_count(uchar* key, element_count count, Unique *uni
int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique)
{
- memcpy(unique->record_pointers, key, unique->size);
- unique->record_pointers+=unique->size;
+ memcpy(unique->sort.record_pointers, key, unique->size);
+ unique->sort.record_pointers+=unique->size;
return 0;
}
@@ -67,8 +69,8 @@ int unique_intersect_write_to_ptrs(uchar* key, element_count count, Unique *uniq
{
if (count >= unique->min_dupl_count)
{
- memcpy(unique->record_pointers, key, unique->size);
- unique->record_pointers+=unique->size;
+ memcpy(unique->sort.record_pointers, key, unique->size);
+ unique->sort.record_pointers+=unique->size;
}
else
unique->filtered_out_elems++;
@@ -77,20 +79,19 @@ int unique_intersect_write_to_ptrs(uchar* key, element_count count, Unique *uniq
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
- uint size_arg, ulonglong max_in_memory_size_arg,
+ uint size_arg, size_t max_in_memory_size_arg,
uint min_dupl_count_arg)
:max_in_memory_size(max_in_memory_size_arg),
- record_pointers(NULL),
size(size_arg),
elements(0)
{
+ my_b_clear(&file);
min_dupl_count= min_dupl_count_arg;
full_size= size;
if (min_dupl_count_arg)
full_size+= sizeof(element_count);
with_counters= MY_TEST(min_dupl_count_arg);
- my_b_clear(&file);
- init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func,
+ init_tree(&tree, (max_in_memory_size / 16), 0, size, comp_func,
NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC));
/* If the following fail's the next add will also fail */
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16,
@@ -208,7 +209,7 @@ static double get_merge_many_buffs_cost(uint *buffer,
uint last_n_elems, int elem_size,
uint compare_factor)
{
- register int i;
+ int i;
double total_cost= 0.0;
uint *buff_elems= buffer; /* #s of elements in each of merged sequences */
@@ -305,7 +306,7 @@ static double get_merge_many_buffs_cost(uint *buffer,
*/
double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
- ulonglong max_in_memory_size,
+ size_t max_in_memory_size,
uint compare_factor,
bool intersect_fl, bool *in_memory)
{
@@ -411,8 +412,10 @@ Unique::reset()
reset_dynamic(&file_ptrs);
reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1);
}
+ my_free(sort.record_pointers);
elements= 0;
tree.flag= 0;
+ sort.record_pointers= 0;
}
/*
@@ -639,7 +642,7 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
if (elements == 0) /* the whole tree is in memory */
return tree_walk(&tree, action, walk_action_arg, left_root_right);
- table->sort.found_records=elements+tree.elements_in_tree;
+ sort.return_rows= elements+tree.elements_in_tree;
/* flush current tree to the file to have some memory for merge buffer */
if (flush())
return 1;
@@ -671,9 +674,11 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
/*
DESCRIPTION
- Perform multi-pass sort merge of the elements accessed through table->sort,
- using the buffer buff as the merge buffer. The last pass is not performed
- if without_last_merge is TRUE.
+
+ Perform multi-pass sort merge of the elements using the buffer buff as
+ the merge buffer. The last pass is not performed if without_last_merge is
+ TRUE.
+
SYNOPSIS
Unique:merge()
All params are 'IN':
@@ -687,23 +692,19 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
{
- IO_CACHE *outfile= table->sort.io_cache;
+ IO_CACHE *outfile= &sort.io_cache;
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
uint maxbuffer= file_ptrs.elements - 1;
my_off_t save_pos;
bool error= 1;
+ Sort_param sort_param;
- /* Open cached file if it isn't open */
- if (!outfile)
- outfile= table->sort.io_cache= (IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_THREAD_SPECIFIC|MY_ZEROFILL));
- if (!outfile ||
- (! my_b_inited(outfile) &&
- open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
- MYF(MY_WME))))
+ /* Open cached file for table records if it isn't open */
+ if (! my_b_inited(outfile) &&
+ open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
+ MYF(MY_WME)))
return 1;
- Sort_param sort_param;
bzero((char*) &sort_param,sizeof(sort_param));
sort_param.max_rows= elements;
sort_param.sort_form= table;
@@ -752,44 +753,49 @@ err:
/*
- Modify the TABLE element so that when one calls init_records()
- the rows will be read in priority order.
+ Allocate memory that can be used with init_records() so that
+ rows will be read in priority order.
*/
bool Unique::get(TABLE *table)
{
bool rc= 1;
uchar *sort_buffer= NULL;
- table->sort.found_records= elements+tree.elements_in_tree;
+ sort.return_rows= elements+tree.elements_in_tree;
+ DBUG_ENTER("Unique::get");
if (my_b_tell(&file) == 0)
{
/* Whole tree is in memory; Don't use disk if you don't need to */
- if ((record_pointers=table->sort.record_pointers= (uchar*)
+ if ((sort.record_pointers= (uchar*)
my_malloc(size * tree.elements_in_tree, MYF(MY_THREAD_SPECIFIC))))
{
+ uchar *save_record_pointers= sort.record_pointers;
tree_walk_action action= min_dupl_count ?
(tree_walk_action) unique_intersect_write_to_ptrs :
(tree_walk_action) unique_write_to_ptrs;
filtered_out_elems= 0;
(void) tree_walk(&tree, action,
this, left_root_right);
- table->sort.found_records-= filtered_out_elems;
- return 0;
+ /* Restore record_pointers that was changed in by 'action' above */
+ sort.record_pointers= save_record_pointers;
+ sort.return_rows-= filtered_out_elems;
+ DBUG_RETURN(0);
}
}
/* Not enough memory; Save the result to file && free memory used by tree */
if (flush())
- return 1;
+ DBUG_RETURN(1);
size_t buff_sz= (max_in_memory_size / full_size + 1) * full_size;
- if (!(sort_buffer= (uchar*) my_malloc(buff_sz, MYF(MY_THREAD_SPECIFIC|MY_WME))))
- return 1;
+ if (!(sort_buffer= (uchar*) my_malloc(buff_sz,
+ MYF(MY_THREAD_SPECIFIC|MY_WME))))
+ DBUG_RETURN(1);
if (merge(table, sort_buffer, FALSE))
- goto err;
+ goto err;
rc= 0;
err:
my_free(sort_buffer);
- return rc;
+ DBUG_RETURN(rc);
}
diff --git a/sql/uniques.h b/sql/uniques.h
new file mode 100644
index 00000000000..654b3692aaa
--- /dev/null
+++ b/sql/uniques.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2016 MariaDB corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef UNIQUE_INCLUDED
+#define UNIQUE_INCLUDED
+
+#include "filesort.h"
+
+/*
+ Unique -- class for unique (removing of duplicates).
+ Puts all values to the TREE. If the tree becomes too big,
+ it's dumped to the file. User can request sorted values, or
+ just iterate through them. In the last case tree merging is performed in
+ memory simultaneously with iteration, so it should be ~2-3x faster.
+ */
+
+class Unique :public Sql_alloc
+{
+ DYNAMIC_ARRAY file_ptrs;
+ ulong max_elements;
+ size_t max_in_memory_size;
+ IO_CACHE file;
+ TREE tree;
+ ulong filtered_out_elems;
+ uint size;
+ uint full_size;
+ uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
+ bool with_counters;
+
+ bool merge(TABLE *table, uchar *buff, bool without_last_merge);
+ bool flush();
+
+public:
+ ulong elements;
+ SORT_INFO sort;
+ Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
+ uint size_arg, size_t max_in_memory_size_arg,
+ uint min_dupl_count_arg= 0);
+ ~Unique();
+ ulong elements_in_tree() { return tree.elements_in_tree; }
+ inline bool unique_add(void *ptr)
+ {
+ DBUG_ENTER("unique_add");
+ DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
+ if (!(tree.flag & TREE_ONLY_DUPS) &&
+ tree.elements_in_tree >= max_elements && flush())
+ DBUG_RETURN(1);
+ DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
+ }
+
+ bool is_in_memory() { return (my_b_tell(&file) == 0); }
+ void close_for_expansion() { tree.flag= TREE_ONLY_DUPS; }
+
+ bool get(TABLE *table);
+
+ /* Cost of searching for an element in the tree */
+ inline static double get_search_cost(ulonglong tree_elems, uint compare_factor)
+ {
+ return log((double) tree_elems) / (compare_factor * M_LN2);
+ }
+
+ static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
+ size_t max_in_memory_size, uint compare_factor,
+ bool intersect_fl, bool *in_memory);
+ inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
+ size_t max_in_memory_size)
+ {
+ size_t max_elems_in_tree=
+ max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
+ return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
+ }
+
+ void reset();
+ bool walk(TABLE *table, tree_walk_action action, void *walk_action_arg);
+
+ uint get_size() const { return size; }
+ size_t get_max_in_memory_size() const { return max_in_memory_size; }
+
+ friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
+ friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
+
+ friend int unique_write_to_file_with_count(uchar* key, element_count count,
+ Unique *unique);
+ friend int unique_intersect_write_to_ptrs(uchar* key, element_count count,
+ Unique *unique);
+};
+
+#endif /* UNIQUE_INCLUDED */
diff --git a/sql/unireg.cc b/sql/unireg.cc
index e41cca2dfcb..ff2b406350a 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -40,11 +40,15 @@
#define ALLOCA_THRESHOLD 2048
static uint pack_keys(uchar *,uint, KEY *, ulong);
-static bool pack_header(THD *, uchar *, List<Create_field> &, uint, ulong, handler *);
+static bool pack_header(THD *, uchar *, List<Create_field> &, HA_CREATE_INFO *,
+ ulong, handler *);
+static bool pack_vcols(String *, List<Create_field> &, List<Virtual_column_info> *);
static uint get_interval_id(uint *,List<Create_field> &, Create_field *);
-static bool pack_fields(uchar *, List<Create_field> &, ulong);
+static bool pack_fields(uchar **, List<Create_field> &, HA_CREATE_INFO*,
+ ulong);
static size_t packed_fields_length(List<Create_field> &);
-static bool make_empty_rec(THD *, uchar *, uint, List<Create_field> &, uint, ulong);
+static bool make_empty_rec(THD *, uchar *, uint, List<Create_field> &, uint,
+ ulong);
/*
write the length as
@@ -54,7 +58,7 @@ static bool make_empty_rec(THD *, uchar *, uint, List<Create_field> &, uint, ulo
static uchar *extra2_write_len(uchar *pos, size_t len)
{
if (len <= 255)
- *pos++= len;
+ *pos++= (uchar)len;
else
{
/*
@@ -114,6 +118,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
int error;
uchar *frm_ptr, *pos;
LEX_CUSTRING frm= {0,0};
+ StringBuffer<MAX_FIELD_WIDTH> vcols;
DBUG_ENTER("build_frm_image");
/* If fixed row records, we need one bit to check for deleted rows */
@@ -121,13 +126,23 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
create_info->null_bits++;
data_offset= (create_info->null_bits + 7) / 8;
- error= pack_header(thd, forminfo, create_fields, create_info->table_options,
- data_offset, db_file);
+ sql_mode_t save_sql_mode= thd->variables.sql_mode;
+ thd->variables.sql_mode &= ~MODE_ANSI_QUOTES;
+ error= pack_vcols(&vcols, create_fields, create_info->check_constraint_list);
+ thd->variables.sql_mode= save_sql_mode;
+
+ if (error)
+ DBUG_RETURN(frm);
+ if (vcols.length())
+ create_info->expression_length= vcols.length() + FRM_VCOL_NEW_BASE_SIZE;
+
+ error= pack_header(thd, forminfo, create_fields, create_info,
+ data_offset, db_file);
if (error)
DBUG_RETURN(frm);
- reclength=uint2korr(forminfo+266);
+ reclength= uint2korr(forminfo+266);
/* Calculate extra data segment length */
str_db_type= *hton_name(create_info->db_type);
@@ -220,8 +235,10 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
filepos= frm.length;
frm.length+= FRM_FORMINFO_SIZE; // forminfo
frm.length+= packed_fields_length(create_fields);
+ frm.length+= create_info->expression_length;
- if (frm.length > FRM_MAX_SIZE)
+ if (frm.length > FRM_MAX_SIZE ||
+ create_info->expression_length > UINT_MAX32)
{
my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table);
DBUG_RETURN(frm);
@@ -285,8 +302,6 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61]));
}
- int2store(fileinfo+59,db_file->extra_rec_buf_length());
-
memcpy(frm_ptr, fileinfo, FRM_HEADER_SIZE);
pos+= key_buff_length;
@@ -335,9 +350,19 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
}
memcpy(frm_ptr + filepos, forminfo, 288);
- if (pack_fields(frm_ptr + filepos + 288, create_fields, data_offset))
+ pos= frm_ptr + filepos + 288;
+ if (pack_fields(&pos, create_fields, create_info, data_offset))
goto err;
+ if (vcols.length())
+ {
+ /* Store header for packed fields (extra space for future) */
+ bzero(pos, FRM_VCOL_NEW_BASE_SIZE);
+ pos+= FRM_VCOL_NEW_BASE_SIZE;
+ memcpy(pos, vcols.ptr(), vcols.length());
+ pos+= vcols.length();
+ }
+
{
/*
Restore all UCS2 intervals.
@@ -436,9 +461,9 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
int2store(pos+6, key->block_size);
pos+=8;
key_parts+=key->user_defined_key_parts;
- DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: 0x%lx",
+ DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: %p",
key->flags, key->user_defined_key_parts,
- (long) key->key_part));
+ key->key_part));
for (key_part=key->key_part,key_part_end=key_part+key->user_defined_key_parts ;
key_part != key_part_end ;
key_part++)
@@ -497,15 +522,88 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
} /* pack_keys */
+/**
+ Pack the expression (for GENERATED ALWAYS AS, DEFAULT, CHECK)
+
+ The data is stored as:
+ 1 byte type (enum_vcol_info_type)
+ 2 bytes field_number
+ 2 bytes length of expression
+ 1 byte length of name
+ name
+ next bytes column expression (text data)
+
+ @return 0 ok
+ @return 1 error (out of memory or wrong characters in expression)
+*/
+
+static bool pack_expression(String *buf, Virtual_column_info *vcol,
+ uint field_nr, enum_vcol_info_type type)
+{
+ if (buf->reserve(FRM_VCOL_NEW_HEADER_SIZE + vcol->name.length))
+ return 1;
+
+ buf->q_append((char) type);
+ buf->q_append2b(field_nr);
+ size_t len_off= buf->length();
+ buf->q_append2b(0); // to be added later
+ buf->q_append((char)vcol->name.length);
+ buf->q_append(vcol->name.str, vcol->name.length);
+ size_t expr_start= buf->length();
+ vcol->print(buf);
+ size_t expr_len= buf->length() - expr_start;
+ if (expr_len >= 65536)
+ {
+ my_error(ER_EXPRESSION_IS_TOO_BIG, MYF(0), vcol_type_name(type));
+ return 1;
+ }
+ int2store(buf->ptr() + len_off, expr_len);
+ return 0;
+}
+
+
+static bool pack_vcols(String *buf, List<Create_field> &create_fields,
+ List<Virtual_column_info> *check_constraint_list)
+{
+ List_iterator<Create_field> it(create_fields);
+ Create_field *field;
+
+ for (uint field_nr=0; (field= it++); field_nr++)
+ {
+ if (field->vcol_info)
+ if (pack_expression(buf, field->vcol_info, field_nr,
+ field->vcol_info->stored_in_db
+ ? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL))
+ return 1;
+ if (field->has_default_expression() && !field->has_default_now_unireg_check())
+ if (pack_expression(buf, field->default_value, field_nr, VCOL_DEFAULT))
+ return 1;
+ if (field->check_constraint)
+ if (pack_expression(buf, field->check_constraint, field_nr,
+ VCOL_CHECK_FIELD))
+ return 1;
+ }
+
+ List_iterator<Virtual_column_info> cit(*check_constraint_list);
+ Virtual_column_info *check;
+ while ((check= cit++))
+ if (pack_expression(buf, check, UINT_MAX32, VCOL_CHECK_TABLE))
+ return 1;
+ return 0;
+}
+
+
/* Make formheader */
static bool pack_header(THD *thd, uchar *forminfo,
List<Create_field> &create_fields,
- uint table_options, ulong data_offset, handler *file)
+ HA_CREATE_INFO *create_info, ulong data_offset,
+ handler *file)
{
- uint length,int_count,int_length,no_empty, int_parts;
+ uint int_count,int_length, int_parts;
uint time_stamp_pos,null_fields;
- ulong reclength, totlength, n_length, com_length, vcol_info_length;
+ uint table_options= create_info->table_options;
+ size_t length, reclength, totlength, n_length, com_length;
DBUG_ENTER("pack_header");
if (create_fields.elements > MAX_FIELDS)
@@ -516,9 +614,10 @@ static bool pack_header(THD *thd, uchar *forminfo,
totlength= 0L;
reclength= data_offset;
- no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields=0;
- com_length=vcol_info_length=0;
+ int_count=int_parts=int_length=time_stamp_pos=null_fields=0;
+ com_length= 0;
n_length=2L;
+ create_info->field_check_constraints= 0;
/* Check fields */
List_iterator<Create_field> it(create_fields);
@@ -529,42 +628,10 @@ static bool pack_header(THD *thd, uchar *forminfo,
ER_TOO_LONG_FIELD_COMMENT, field->field_name))
DBUG_RETURN(1);
- if (field->vcol_info)
- {
- uint col_expr_maxlen= field->virtual_col_expr_maxlen();
- uint tmp_len= my_charpos(system_charset_info,
- field->vcol_info->expr_str.str,
- field->vcol_info->expr_str.str +
- field->vcol_info->expr_str.length,
- col_expr_maxlen);
-
- if (tmp_len < field->vcol_info->expr_str.length)
- {
- my_error(ER_WRONG_STRING_LENGTH, MYF(0),
- field->vcol_info->expr_str.str,"VIRTUAL COLUMN EXPRESSION",
- col_expr_maxlen);
- DBUG_RETURN(1);
- }
- /*
- Sum up the length of the expression string and the length of the
- mandatory header to the total length of info on the defining
- expressions saved in the frm file for virtual columns.
- */
- vcol_info_length+= field->vcol_info->expr_str.length+
- FRM_VCOL_HEADER_SIZE(field->interval);
- }
-
- totlength+= field->length;
+ totlength+= (size_t)field->length;
com_length+= field->comment.length;
- if (MTYP_TYPENR(field->unireg_check) == Field::NOEMPTY ||
- field->unireg_check & MTYP_NOEMPTY_BIT)
- {
- field->unireg_check= (Field::utype) ((uint) field->unireg_check |
- MTYP_NOEMPTY_BIT);
- no_empty++;
- }
- /*
- We mark first TIMESTAMP field with NOW() in DEFAULT or ON UPDATE
+ /*
+ We mark first TIMESTAMP field with NOW() in DEFAULT or ON UPDATE
as auto-update field.
*/
if (field->sql_type == MYSQL_TYPE_TIMESTAMP &&
@@ -625,6 +692,8 @@ static bool pack_header(THD *thd, uchar *forminfo,
}
if (f_maybe_null(field->pack_flag))
null_fields++;
+ if (field->check_constraint)
+ create_info->field_check_constraints++;
}
int_length+=int_count*2; // 255 prefix + 0 suffix
@@ -634,25 +703,23 @@ static bool pack_header(THD *thd, uchar *forminfo,
my_error(ER_TOO_BIG_ROWSIZE, MYF(0), static_cast<long>(file->max_record_length()));
DBUG_RETURN(1);
}
+
/* Hack to avoid bugs with small static rows in MySQL */
- reclength=MY_MAX(file->min_record_length(table_options),reclength);
- if ((ulong) create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+
- n_length+int_length+com_length+vcol_info_length > 65535L ||
- int_count > 255)
+ reclength= MY_MAX(file->min_record_length(table_options), reclength);
+ length= n_length + create_fields.elements*FCOMP + FRM_FORMINFO_SIZE +
+ int_length + com_length + create_info->expression_length;
+ if (length > 65535L || int_count > 255)
{
- my_message(ER_TOO_MANY_FIELDS, ER_THD(thd, ER_TOO_MANY_FIELDS), MYF(0));
+ my_message(ER_TOO_MANY_FIELDS, "Table definition is too large", MYF(0));
DBUG_RETURN(1);
}
bzero((char*)forminfo,FRM_FORMINFO_SIZE);
- length=(create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+n_length+int_length+
- com_length+vcol_info_length);
int2store(forminfo,length);
- forminfo[256] = 0;
int2store(forminfo+258,create_fields.elements);
- int2store(forminfo+260,0);
+ // bytes 260-261 are unused
int2store(forminfo+262,totlength);
- int2store(forminfo+264,no_empty);
+ // bytes 264-265 are unused
int2store(forminfo+266,reclength);
int2store(forminfo+268,n_length);
int2store(forminfo+270,int_count);
@@ -663,7 +730,7 @@ static bool pack_header(THD *thd, uchar *forminfo,
int2store(forminfo+280,22); /* Rows needed */
int2store(forminfo+282,null_fields);
int2store(forminfo+284,com_length);
- int2store(forminfo+286,vcol_info_length);
+ int2store(forminfo+286,create_info->expression_length);
DBUG_RETURN(0);
} /* pack_header */
@@ -716,26 +783,23 @@ static size_t packed_fields_length(List<Create_field> &create_fields)
}
length++;
}
- if (field->vcol_info)
- {
- length+= field->vcol_info->expr_str.length +
- FRM_VCOL_HEADER_SIZE(field->interval);
- }
+
length+= FCOMP;
length+= strlen(field->field_name)+1;
length+= field->comment.length;
}
- length++;
- length++;
+ length+= 2;
DBUG_RETURN(length);
}
/* Save fields, fieldnames and intervals */
-static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
+static bool pack_fields(uchar **buff_arg, List<Create_field> &create_fields,
+ HA_CREATE_INFO *create_info,
ulong data_offset)
{
- uint int_count, comment_length= 0, vcol_info_length=0;
+ uchar *buff= *buff_arg;
+ uint int_count, comment_length= 0;
Create_field *field;
DBUG_ENTER("pack_fields");
@@ -745,16 +809,14 @@ static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
while ((field=it++))
{
uint recpos;
- uint cur_vcol_expr_len= 0;
int2store(buff+3, field->length);
/* The +1 is here becasue the col offset in .frm file have offset 1 */
recpos= field->offset+1 + (uint) data_offset;
int3store(buff+5,recpos);
int2store(buff+8,field->pack_flag);
- DBUG_ASSERT(field->unireg_check < 256);
buff[10]= (uchar) field->unireg_check;
buff[12]= (uchar) field->interval_id;
- buff[13]= (uchar) field->sql_type;
+ buff[13]= (uchar) field->sql_type;
if (field->sql_type == MYSQL_TYPE_GEOMETRY)
{
buff[11]= 0;
@@ -772,17 +834,7 @@ static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
{
buff[11]= buff[14]= 0; // Numerical
}
- if (field->vcol_info)
- {
- /*
- Use the interval_id place in the .frm file to store the length of
- the additional data saved for the virtual field
- */
- buff[12]= cur_vcol_expr_len= field->vcol_info->expr_str.length +
- FRM_VCOL_HEADER_SIZE(field->interval);
- vcol_info_length+= cur_vcol_expr_len;
- buff[13]= (uchar) MYSQL_TYPE_VIRTUAL;
- }
+
int2store(buff+15, field->comment.length);
comment_length+= field->comment.length;
set_if_bigger(int_count,field->interval_id);
@@ -853,7 +905,6 @@ static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
*buff++= sep;
}
*buff++= 0;
-
}
}
}
@@ -866,35 +917,10 @@ static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
buff+= field->comment.length;
}
}
- if (vcol_info_length)
- {
- it.rewind();
- while ((field=it++))
- {
- /*
- Pack each virtual field as follows:
- byte 1 = interval_id == 0 ? 1 : 2
- byte 2 = sql_type
- byte 3 = flags (as of now, 0 - no flags, 1 - field is physically stored)
- [byte 4] = possible interval_id for sql_type
- next byte ... = virtual column expression (text data)
- */
- if (field->vcol_info && field->vcol_info->expr_str.length)
- {
- *buff++= (uchar) (1 + MY_TEST(field->interval));
- *buff++= (uchar) field->sql_type;
- *buff++= (uchar) field->stored_in_db;
- if (field->interval)
- *buff++= (uchar) field->interval_id;
- memcpy(buff, field->vcol_info->expr_str.str, field->vcol_info->expr_str.length);
- buff+= field->vcol_info->expr_str.length;
- }
- }
- }
+ *buff_arg= buff;
DBUG_RETURN(0);
}
-
/* save an empty record on start of formfile */
static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
@@ -902,7 +928,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
uint reclength, ulong data_offset)
{
int error= 0;
- Field::utype type;
uint null_count;
uchar *null_pos;
TABLE table;
@@ -930,12 +955,10 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values
while ((field=it++))
{
- /*
- regfield don't have to be deleted as it's allocated with sql_alloc()
- */
+ /* regfield don't have to be deleted as it's allocated on THD::mem_root */
Field *regfield= make_field(&share, thd->mem_root,
buff+field->offset + data_offset,
- field->length,
+ (uint32)field->length,
null_pos + null_count / 8,
null_count & 7,
field->pack_flag,
@@ -943,8 +966,8 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
field->charset,
field->geom_type, field->srid,
field->unireg_check,
- field->save_interval ? field->save_interval :
- field->interval,
+ field->save_interval ? field->save_interval
+ : field->interval,
field->field_name);
if (!regfield)
{
@@ -964,11 +987,18 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
if (field->sql_type == MYSQL_TYPE_BIT && !f_bit_as_char(field->pack_flag))
null_count+= field->length & 7;
- type= (Field::utype) MTYP_TYPENR(field->unireg_check);
-
- if (field->def)
+ if (field->default_value && !field->default_value->flags &&
+ (!(field->flags & BLOB_FLAG) || field->sql_type == MYSQL_TYPE_GEOMETRY))
{
- int res= field->def->save_in_field(regfield, 1);
+ Item *expr= field->default_value->expr;
+
+ int res= !expr->fixed && // may be already fixed if ALTER TABLE
+ expr->fix_fields(thd, &expr);
+ if (!res)
+ res= expr->save_in_field(regfield, 1);
+ if (!res && (field->flags & BLOB_FLAG))
+ regfield->reset();
+
/* If not ok or warning of level 'note' */
if (res != 0 && res != 3)
{
@@ -977,6 +1007,7 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
delete regfield; //To avoid memory leak
goto err;
}
+ delete regfield; //To avoid memory leak
}
else if (regfield->real_type() == MYSQL_TYPE_ENUM &&
(field->flags & NOT_NULL_FLAG))
@@ -984,12 +1015,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
regfield->set_notnull();
regfield->store((longlong) 1, TRUE);
}
- else if (type == Field::YES) // Old unireg type
- regfield->store(ER_THD(thd, ER_YES),(uint) strlen(ER_THD(thd, ER_YES)),
- system_charset_info);
- else if (type == Field::NO) // Old unireg type
- regfield->store(ER_THD(thd, ER_NO), (uint) strlen(ER_THD(thd, ER_NO)),
- system_charset_info);
else
regfield->reset();
}
diff --git a/sql/unireg.h b/sql/unireg.h
index 86d88fcdc21..eb555008b35 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -43,15 +43,16 @@
#define PLUGINDIR "lib/plugin"
#endif
-#define CURRENT_THD_ERRMSGS current_thd->variables.errmsgs
-#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs
+#define MAX_ERROR_RANGES 4 /* 1000-2000, 2000-3000, 3000-4000, 4000-5000 */
+#define ERRORS_PER_RANGE 1000
-#define ER(X) CURRENT_THD_ERRMSGS[(X) - ER_ERROR_FIRST]
-#define ER_DEFAULT(X) DEFAULT_ERRMSGS[(X) - ER_ERROR_FIRST]
-#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code")
-#define ER_SAFE_THD(T,X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER_THD(T,X) : "Invalid error code")
-#define ER_THD(thd,X) ((thd)->variables.errmsgs[(X) - ER_ERROR_FIRST])
-#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, X) : ER_DEFAULT(X))
+#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs
+#define CURRENT_THD_ERRMSGS (current_thd)->variables.errmsgs
+
+#define ER_DEFAULT(X) DEFAULT_ERRMSGS[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X)% ERRORS_PER_RANGE]
+#define ER_THD(thd,X) ((thd)->variables.errmsgs[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X) % ERRORS_PER_RANGE])
+#define ER(X) ER_THD(current_thd, (X))
+#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, (X)) : ER_DEFAULT(X))
#define ME_INFO (ME_HOLDTANG | ME_NOREFRESH)
#define ME_ERROR (ME_BELL | ME_NOREFRESH)
@@ -82,68 +83,53 @@
/* Defines for use with openfrm, openprt and openfrd */
-#define READ_ALL 1 /* openfrm: Read all parameters */
-#define CHANGE_FRM 2 /* openfrm: open .frm as O_RDWR */
-#define READ_KEYINFO 4 /* L{s nyckeldata fr}n filen */
-#define EXTRA_RECORD 8 /* Reserve space for an extra record */
-#define DONT_OPEN_TABLES 8 /* Don't open database-files (frd) */
-#define DONT_OPEN_MASTER_REG 16 /* Don't open first reg-file (prt) */
-#define EXTRA_LONG_RECORD 16 /* Plats f|r dubbel s|k-record */
-#define COMPUTE_TYPES 32 /* Kontrollera type f|r f{ltena */
-#define SEARCH_PRG 64 /* S|k efter registret i 'prg_dev' */
-#define READ_USED_NAMES 128 /* L{s anv{nda formul{rnamn */
-#define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */
-#define READ_SCREENS 1024 /* Read screens, info and helpfile */
-#define DELAYED_OPEN 4096 /* Open table later */
-#define OPEN_VIEW 8192 /* Allow open on view */
-#define OPEN_VIEW_NO_PARSE 16384 /* Open frm only if it's a view,
- but do not parse view itself */
+#define READ_ALL (1 << 0) /* openfrm: Read all parameters */
+#define EXTRA_RECORD (1 << 3) /* Reserve space for an extra record */
+#define DELAYED_OPEN (1 << 12) /* Open table later */
+#define OPEN_VIEW_NO_PARSE (1 << 14) /* Open frm only if it's a view,
+ but do not parse view itself */
/**
This flag is used in function get_all_tables() which fills
I_S tables with data which are retrieved from frm files and storage engine
The flag means that we need to open FRM file only to get necessary data.
*/
-#define OPEN_FRM_FILE_ONLY 32768
+#define OPEN_FRM_FILE_ONLY (1 << 15)
/**
This flag is used in function get_all_tables() which fills
I_S tables with data which are retrieved from frm files and storage engine
The flag means that we need to process tables only to get necessary data.
Views are not processed.
*/
-#define OPEN_TABLE_ONLY (OPEN_FRM_FILE_ONLY*2)
+#define OPEN_TABLE_ONLY (1 << 16)
/**
This flag is used in function get_all_tables() which fills
I_S tables with data which are retrieved from frm files and storage engine
The flag means that we need to process views only to get necessary data.
Tables are not processed.
*/
-#define OPEN_VIEW_ONLY (OPEN_TABLE_ONLY*2)
+#define OPEN_VIEW_ONLY (1 << 17)
/**
This flag is used in function get_all_tables() which fills
I_S tables with data which are retrieved from frm files and storage engine.
The flag means that we need to open a view using
open_normal_and_derived_tables() function.
*/
-#define OPEN_VIEW_FULL (OPEN_VIEW_ONLY*2)
+#define OPEN_VIEW_FULL (1 << 18)
/**
This flag is used in function get_all_tables() which fills
I_S tables with data which are retrieved from frm files and storage engine.
The flag means that I_S table uses optimization algorithm.
*/
-#define OPTIMIZE_I_S_TABLE (OPEN_VIEW_FULL*2)
+#define OPTIMIZE_I_S_TABLE (1 << 19)
/**
This flag is used to instruct tdc_open_view() to check metadata version.
*/
-#define CHECK_METADATA_VERSION (OPEN_TRIGGER_ONLY*2)
+#define CHECK_METADATA_VERSION (1 << 20)
/*
The flag means that we need to process trigger files only.
*/
-#define OPEN_TRIGGER_ONLY (OPTIMIZE_I_S_TABLE*2)
-
-#define SC_INFO_LENGTH 4 /* Form format constant */
-#define TE_INFO_LENGTH 3
-#define MTYP_NOEMPTY_BIT 128
+#define OPEN_TRIGGER_ONLY (1 << 21)
/*
Minimum length pattern before Turbo Boyer-Moore is used
@@ -210,7 +196,7 @@ static inline bool is_binary_frm_header(uchar *head)
return head[0] == 254
&& head[1] == 1
&& head[2] >= FRM_VER
- && head[2] <= FRM_VER+4;
+ && head[2] <= FRM_VER_CURRENT;
}
#endif
diff --git a/sql/winservice.c b/sql/winservice.c
index efbbb527c9b..f817ab2b142 100644
--- a/sql/winservice.c
+++ b/sql/winservice.c
@@ -108,7 +108,7 @@ BOOL exclude_service(mysqld_service_properties *props)
}
if ((props->version_major == 0) ||
(props->version_major > 5 && props->version_major < 10) ||
- (props->version_major == 5 && props->version_minor > 6))
+ (props->version_major == 5 && props->version_minor > 7))
{
return TRUE;
}
diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc
index 723804c76db..be6297abd09 100644
--- a/sql/wsrep_applier.cc
+++ b/sql/wsrep_applier.cc
@@ -56,7 +56,6 @@ static Log_event* wsrep_read_log_event(
#include "transaction.h" // trans_commit(), trans_rollback()
#include "rpl_rli.h" // class Relay_log_info;
-#include "sql_base.h" // close_temporary_table()
void wsrep_set_apply_format(THD* thd, Format_description_log_event* ev)
{
@@ -253,6 +252,9 @@ wsrep_cb_status_t wsrep_apply_cb(void* const ctx,
else
thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
+ /* With galera we assume that the master has done the constraint checks */
+ thd->variables.option_bits|= OPTION_NO_CHECK_CONSTRAINT_CHECKS;
+
if (flags & WSREP_FLAG_ISOLATION)
{
thd->wsrep_apply_toi= true;
@@ -277,14 +279,11 @@ wsrep_cb_status_t wsrep_apply_cb(void* const ctx,
wsrep_dump_rbr_buf_with_header(thd, buf, buf_len);
}
- TABLE *tmp;
- while ((tmp = thd->temporary_tables))
+ if (thd->has_thd_temporary_tables())
{
- WSREP_DEBUG("Applier %lu, has temporary tables: %s.%s",
- thd->thread_id,
- (tmp->s) ? tmp->s->db.str : "void",
- (tmp->s) ? tmp->s->table_name.str : "void");
- close_temporary_table(thd, tmp, 1, 1);
+ WSREP_DEBUG("Applier %lld has temporary tables. Closing them now..",
+ thd->thread_id);
+ thd->close_temporary_tables();
}
return rcode;
diff --git a/sql/wsrep_binlog.cc b/sql/wsrep_binlog.cc
index 39c77133f02..292cbe6ee25 100644
--- a/sql/wsrep_binlog.cc
+++ b/sql/wsrep_binlog.cc
@@ -318,9 +318,9 @@ int wsrep_write_cache(wsrep_t* const wsrep,
void wsrep_dump_rbr_buf(THD *thd, const void* rbr_buf, size_t buf_len)
{
- int len= snprintf(NULL, 0, "%s/GRA_%ld_%lld.log",
- wsrep_data_home_dir, thd->thread_id,
- (long long)wsrep_thd_trx_seqno(thd));
+ int len= snprintf(NULL, 0, "%s/GRA_%lld_%lld.log",
+ wsrep_data_home_dir, (longlong) thd->thread_id,
+ (longlong) wsrep_thd_trx_seqno(thd));
if (len < 0)
{
WSREP_ERROR("snprintf error: %d, skipping dump.", len);
@@ -332,8 +332,8 @@ void wsrep_dump_rbr_buf(THD *thd, const void* rbr_buf, size_t buf_len)
*/
char *filename= (char *)malloc(len+1);
- int len1= snprintf(filename, len+1, "%s/GRA_%ld_%lld.log",
- wsrep_data_home_dir, thd->thread_id,
+ int len1= snprintf(filename, len+1, "%s/GRA_%lld_%lld.log",
+ wsrep_data_home_dir, (longlong) thd->thread_id,
(long long)wsrep_thd_trx_seqno(thd));
if (len > len1)
@@ -394,9 +394,9 @@ int wsrep_binlog_savepoint_rollback(THD *thd, void *sv)
void wsrep_dump_rbr_direct(THD* thd, IO_CACHE* cache)
{
char filename[PATH_MAX]= {0};
- int len= snprintf(filename, PATH_MAX, "%s/GRA_%ld_%lld.log",
- wsrep_data_home_dir, thd->thread_id,
- (long long)wsrep_thd_trx_seqno(thd));
+ int len= snprintf(filename, PATH_MAX, "%s/GRA_%lld_%lld.log",
+ wsrep_data_home_dir, (longlong) thd->thread_id,
+ (longlong) wsrep_thd_trx_seqno(thd));
size_t bytes_in_cache = 0;
// check path
if (len >= PATH_MAX)
@@ -470,9 +470,8 @@ void wsrep_dump_rbr_buf_with_header(THD *thd, const void *rbr_buf,
Format_description_log_event *ev=NULL;
longlong thd_trx_seqno= (long long)wsrep_thd_trx_seqno(thd);
-
- int len= snprintf(NULL, 0, "%s/GRA_%ld_%lld_v2.log",
- wsrep_data_home_dir, thd->thread_id,
+ int len= snprintf(NULL, 0, "%s/GRA_%lld_%lld_v2.log",
+ wsrep_data_home_dir, (longlong)thd->thread_id,
thd_trx_seqno);
/*
len doesn't count the \0 end-of-string. Use len+1 below
@@ -485,8 +484,8 @@ void wsrep_dump_rbr_buf_with_header(THD *thd, const void *rbr_buf,
DBUG_VOID_RETURN;
}
- int len1= snprintf(filename, len+1, "%s/GRA_%ld_%lld_v2.log",
- wsrep_data_home_dir, thd->thread_id,
+ int len1= snprintf(filename, len+1, "%s/GRA_%lld_%lld_v2.log",
+ wsrep_data_home_dir, (longlong) thd->thread_id,
thd_trx_seqno);
if (len > len1)
diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc
index 28bd3a4492b..032b2d98005 100644
--- a/sql/wsrep_check_opts.cc
+++ b/sql/wsrep_check_opts.cc
@@ -18,8 +18,6 @@
#include "sys_vars_shared.h"
#include "wsrep.h"
#include "wsrep_sst.h"
-//#include <sql_class.h>
-//#include "wsrep_mysqld.h"
extern char *my_bind_addr_str;
diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc
index 795e2d19252..aff75cf7790 100644
--- a/sql/wsrep_dummy.cc
+++ b/sql/wsrep_dummy.cc
@@ -133,6 +133,12 @@ void wsrep_thd_auto_increment_variables(THD *thd,
*increment= thd->variables.auto_increment_increment;
}
+void wsrep_set_load_multi_commit(THD *thd, bool split)
+{ }
+
+bool wsrep_is_load_multi_commit(THD *thd)
+{ return false; }
+
int wsrep_trx_is_aborting(THD *)
{ return 0; }
@@ -141,3 +147,6 @@ void wsrep_unlock_rollback()
void wsrep_set_data_home_dir(const char *)
{ }
+
+my_bool wsrep_thd_is_applier(MYSQL_THD thd)
+{ return false; }
diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc
index a935f8c69b8..3603e05fd5f 100644
--- a/sql/wsrep_hton.cc
+++ b/sql/wsrep_hton.cc
@@ -45,6 +45,7 @@ void wsrep_cleanup_transaction(THD *thd)
thd->wsrep_exec_mode= LOCAL_STATE;
thd->wsrep_affected_rows= 0;
thd->wsrep_skip_wsrep_GTID= false;
+ thd->wsrep_split_flag= false;
return;
}
@@ -412,9 +413,9 @@ wsrep_run_wsrep_commit(THD *thd, bool all)
&wtime);
if (replay_round++ % 100000 == 0)
- WSREP_DEBUG("commit waiting for replaying: replayers %d, thd: (%lu) "
+ WSREP_DEBUG("commit waiting for replaying: replayers %d, thd: %lld "
"conflict: %d (round: %d)",
- wsrep_replaying, thd->thread_id,
+ wsrep_replaying, (longlong) thd->thread_id,
thd->wsrep_conflict_state, replay_round);
mysql_mutex_unlock(&LOCK_wsrep_replaying);
@@ -478,11 +479,11 @@ wsrep_run_wsrep_commit(THD *thd, bool all)
if (WSREP_UNDEFINED_TRX_ID == thd->wsrep_ws_handle.trx_id)
{
- WSREP_WARN("SQL statement was ineffective, THD: %lu, buf: %zu\n"
+ WSREP_WARN("SQL statement was ineffective thd: %lld buf: %zu\n"
"schema: %s \n"
"QUERY: %s\n"
" => Skipping replication",
- thd->thread_id, data_len,
+ (longlong) thd->thread_id, data_len,
(thd->db ? thd->db : "(null)"), thd->query());
rcode = WSREP_TRX_FAIL;
}
@@ -498,20 +499,22 @@ wsrep_run_wsrep_commit(THD *thd, bool all)
&thd->wsrep_trx_meta);
if (rcode == WSREP_TRX_MISSING) {
- WSREP_WARN("Transaction missing in provider, thd: %ld, schema: %s, SQL: %s",
- thd->thread_id, (thd->db ? thd->db : "(null)"), thd->query());
+ WSREP_WARN("Transaction missing in provider, thd: %lld schema: %s SQL: %s",
+ (longlong) thd->thread_id,
+ (thd->db ? thd->db : "(null)"), thd->query());
rcode = WSREP_TRX_FAIL;
} else if (rcode == WSREP_BF_ABORT) {
- WSREP_DEBUG("thd %lu seqno %lld BF aborted by provider, will replay",
- thd->thread_id, (long long)thd->wsrep_trx_meta.gtid.seqno);
+ WSREP_DEBUG("thd: %lld seqno: %lld BF aborted by provider, will replay",
+ (longlong) thd->thread_id,
+ (longlong) thd->wsrep_trx_meta.gtid.seqno);
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->wsrep_conflict_state = MUST_REPLAY;
DBUG_ASSERT(wsrep_thd_trx_seqno(thd) > 0);
mysql_mutex_unlock(&thd->LOCK_thd_data);
mysql_mutex_lock(&LOCK_wsrep_replaying);
wsrep_replaying++;
- WSREP_DEBUG("replaying increased: %d, thd: %lu",
- wsrep_replaying, thd->thread_id);
+ WSREP_DEBUG("replaying increased: %d, thd: %lld",
+ wsrep_replaying, (longlong) thd->thread_id);
mysql_mutex_unlock(&LOCK_wsrep_replaying);
}
} else {
@@ -538,9 +541,9 @@ wsrep_run_wsrep_commit(THD *thd, bool all)
if (thd->wsrep_conflict_state != NO_CONFLICT)
{
- WSREP_WARN("thd %lu seqno %lld: conflict state %d after post commit",
- thd->thread_id,
- (long long)thd->wsrep_trx_meta.gtid.seqno,
+ WSREP_WARN("thd: %llu seqno: %lld conflict state %d after post commit",
+ (longlong) thd->thread_id,
+ (longlong) thd->wsrep_trx_meta.gtid.seqno,
thd->wsrep_conflict_state);
}
thd->wsrep_exec_mode= LOCAL_COMMIT;
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index ee8509e3fa2..08efa0086a7 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -55,52 +55,61 @@ rpl_sidno wsrep_sidno= -1;
my_bool wsrep_preordered_opt= FALSE;
/*
- * Begin configuration options and their default values
+ * Begin configuration options
*/
extern my_bool plugins_are_initialized;
extern uint kill_cached_threads;
extern mysql_cond_t COND_thread_cache;
-const char* wsrep_data_home_dir = NULL;
-const char* wsrep_dbug_option = "";
-
-long wsrep_slave_threads = 1; // # of slave action appliers wanted
-int wsrep_slave_count_change = 0; // # of appliers to stop or start
-my_bool wsrep_debug = 0; // enable debug level logging
-my_bool wsrep_convert_LOCK_to_trx = 1; // convert locking sessions to trx
-ulong wsrep_retry_autocommit = 5; // retry aborted autocommit trx
-my_bool wsrep_auto_increment_control = 1; // control auto increment variables
-my_bool wsrep_drupal_282555_workaround = 1; // retry autoinc insert after dupkey
-my_bool wsrep_incremental_data_collection = 0; // incremental data collection
-ulong wsrep_max_ws_size = 1073741824UL;//max ws (RBR buffer) size
-ulong wsrep_max_ws_rows = 65536; // max number of rows in ws
-int wsrep_to_isolation = 0; // # of active TO isolation threads
-my_bool wsrep_certify_nonPK = 1; // certify, even when no primary key
+/* System variables. */
+const char *wsrep_provider;
+const char *wsrep_provider_options;
+const char *wsrep_cluster_address;
+const char *wsrep_cluster_name;
+const char *wsrep_node_name;
+const char *wsrep_node_address;
+const char *wsrep_node_incoming_address;
+const char *wsrep_start_position;
+const char *wsrep_data_home_dir;
+const char *wsrep_dbug_option;
+const char *wsrep_notify_cmd;
+
+my_bool wsrep_debug; // Enable debug level logging
+my_bool wsrep_convert_LOCK_to_trx; // Convert locking sessions to trx
+my_bool wsrep_auto_increment_control; // Control auto increment variables
+my_bool wsrep_drupal_282555_workaround; // Retry autoinc insert after dupkey
+my_bool wsrep_certify_nonPK; // Certify, even when no primary key
ulong wsrep_certification_rules = WSREP_CERTIFICATION_RULES_STRICT;
-long wsrep_max_protocol_version = 3; // maximum protocol version to use
-ulong wsrep_forced_binlog_format = BINLOG_FORMAT_UNSPEC;
-my_bool wsrep_recovery = 0; // recovery
-my_bool wsrep_replicate_myisam = 0; // enable myisam replication
-my_bool wsrep_log_conflicts = 0;
-ulong wsrep_mysql_replication_bundle = 0;
-my_bool wsrep_desync = 0; // desynchronize the node from the
- // cluster
-my_bool wsrep_load_data_splitting = 1; // commit load data every 10K intervals
-my_bool wsrep_restart_slave = 0; // should mysql slave thread be
- // restarted, if node joins back
-my_bool wsrep_restart_slave_activated = 0; // node has dropped, and slave
- // restart will be needed
-my_bool wsrep_slave_UK_checks = 0; // slave thread does UK checks
-my_bool wsrep_slave_FK_checks = 0; // slave thread does FK checks
-bool wsrep_new_cluster = false; // Bootstrap the cluster ?
-
-// Use wsrep_gtid_domain_id for galera transactions?
-bool wsrep_gtid_mode = 0;
-// gtid_domain_id for galera transactions.
-uint32 wsrep_gtid_domain_id = 0;
-// Allow reads even if the node is not in the primary component.
-bool wsrep_dirty_reads = false;
+my_bool wsrep_recovery; // Recovery
+my_bool wsrep_replicate_myisam; // Enable MyISAM replication
+my_bool wsrep_log_conflicts;
+my_bool wsrep_load_data_splitting; // Commit load data every 10K intervals
+my_bool wsrep_slave_UK_checks; // Slave thread does UK checks
+my_bool wsrep_slave_FK_checks; // Slave thread does FK checks
+my_bool wsrep_restart_slave; // Should mysql slave thread be
+ // restarted, when node joins back?
+my_bool wsrep_desync; // De(re)synchronize the node from the
+ // cluster
+long wsrep_slave_threads; // No. of slave appliers threads
+ulong wsrep_retry_autocommit; // Retry aborted autocommit trx
+ulong wsrep_max_ws_size; // Max allowed ws (RBR buffer) size
+ulong wsrep_max_ws_rows; // Max number of rows in ws
+ulong wsrep_forced_binlog_format;
+ulong wsrep_mysql_replication_bundle;
+bool wsrep_gtid_mode; // Use wsrep_gtid_domain_id
+ // for galera transactions?
+uint32 wsrep_gtid_domain_id; // gtid_domain_id for galera
+ // transactions
+
+/* Other configuration variables and their default values. */
+my_bool wsrep_incremental_data_collection= 0; // Incremental data collection
+my_bool wsrep_restart_slave_activated= 0; // Node has dropped, and slave
+ // restart will be needed
+bool wsrep_new_cluster= false; // Bootstrap the cluster?
+int wsrep_slave_count_change= 0; // No. of appliers to stop/start
+int wsrep_to_isolation= 0; // No. of active TO isolation threads
+long wsrep_max_protocol_version= 3; // Maximum protocol version to use
/*
* End configuration options
@@ -207,7 +216,7 @@ wsrep_uuid_t local_uuid = WSREP_UUID_UNDEFINED;
wsrep_seqno_t local_seqno = WSREP_SEQNO_UNDEFINED;
long wsrep_protocol_version = 3;
-wsp::Config_state wsrep_config_state;
+wsp::Config_state *wsrep_config_state;
// Boolean denoting if server is in initial startup phase. This is needed
// to make sure that main thread waiting in wsrep_sst_wait() is signaled
@@ -286,7 +295,7 @@ wsrep_view_handler_cb (void* app_ctx,
*sst_req = NULL;
*sst_req_len = 0;
- wsrep_member_status_t memb_status= wsrep_config_state.get_status();
+ wsrep_member_status_t memb_status= wsrep_config_state->get_status();
if (memcmp(&cluster_uuid, &view->state_id.uuid, sizeof(wsrep_uuid_t)))
{
@@ -446,7 +455,7 @@ wsrep_view_handler_cb (void* app_ctx,
out:
if (view->status == WSREP_VIEW_PRIMARY) wsrep_startup= FALSE;
- wsrep_config_state.set(memb_status, view);
+ wsrep_config_state->set(memb_status, view);
return WSREP_CB_SUCCESS;
}
@@ -498,7 +507,7 @@ static void wsrep_synced_cb(void* app_ctx)
{
WSREP_INFO("Synchronized with group, ready for connections");
my_bool signal_main= wsrep_ready_set(TRUE);
- wsrep_config_state.set(WSREP_MEMBER_SYNCED);
+ wsrep_config_state->set(WSREP_MEMBER_SYNCED);
if (signal_main)
{
@@ -569,10 +578,13 @@ int wsrep_init()
int rcode= -1;
DBUG_ASSERT(wsrep_inited == 0);
- if (strcmp(wsrep_start_position, WSREP_START_POSITION_ZERO))
- wsrep_start_position_init(wsrep_start_position);
+ if (strcmp(wsrep_start_position, WSREP_START_POSITION_ZERO) &&
+ wsrep_start_position_init(wsrep_start_position))
+ {
+ return 1;
+ }
- wsrep_sst_auth_init(wsrep_sst_auth);
+ wsrep_sst_auth_init();
wsrep_ready_set(FALSE);
assert(wsrep_provider);
@@ -766,6 +778,8 @@ done:
/* Initialize wsrep thread LOCKs and CONDs */
void wsrep_thr_init()
{
+ DBUG_ENTER("wsrep_thr_init");
+ wsrep_config_state = new wsp::Config_state;
#ifdef HAVE_PSI_INTERFACE
mysql_mutex_register("sql", wsrep_mutexes, array_elements(wsrep_mutexes));
mysql_cond_register("sql", wsrep_conds, array_elements(wsrep_conds));
@@ -785,6 +799,7 @@ void wsrep_thr_init()
mysql_mutex_init(key_LOCK_wsrep_slave_threads, &LOCK_wsrep_slave_threads, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_wsrep_desync, &LOCK_wsrep_desync, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_wsrep_config_state, &LOCK_wsrep_config_state, MY_MUTEX_INIT_FAST);
+ DBUG_VOID_RETURN;
}
void wsrep_init_startup (bool first)
@@ -812,7 +827,7 @@ void wsrep_init_startup (bool first)
if (!strcmp(wsrep_provider, WSREP_NONE)) return;
/* Skip replication start if no cluster address */
- if (!wsrep_cluster_address || strlen(wsrep_cluster_address) == 0) return;
+ if (!wsrep_cluster_address || wsrep_cluster_address[0] == 0) return;
if (first) wsrep_sst_grab(); // do it so we can wait for SST below
@@ -845,6 +860,8 @@ void wsrep_deinit(bool free_options)
/* Destroy wsrep thread LOCKs and CONDs */
void wsrep_thr_deinit()
{
+ if (!wsrep_config_state)
+ return; // Never initialized
mysql_mutex_destroy(&LOCK_wsrep_ready);
mysql_cond_destroy(&COND_wsrep_ready);
mysql_mutex_destroy(&LOCK_wsrep_sst);
@@ -858,6 +875,8 @@ void wsrep_thr_deinit()
mysql_mutex_destroy(&LOCK_wsrep_slave_threads);
mysql_mutex_destroy(&LOCK_wsrep_desync);
mysql_mutex_destroy(&LOCK_wsrep_config_state);
+ delete wsrep_config_state;
+ wsrep_config_state= 0; // Safety
}
void wsrep_recover()
@@ -921,7 +940,7 @@ bool wsrep_start_replication()
return true;
}
- if (!wsrep_cluster_address || strlen(wsrep_cluster_address)== 0)
+ if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0)
{
// if provider is non-trivial, but no address is specified, wait for address
wsrep_ready_set(FALSE);
@@ -1254,9 +1273,11 @@ int wsrep_to_buf_helper(
65536, MYF(MY_WME)))
return 1;
int ret(0);
+ enum enum_binlog_checksum_alg current_binlog_check_alg=
+ (enum_binlog_checksum_alg) binlog_checksum_options;
Format_description_log_event *tmp_fd= new Format_description_log_event(4);
- tmp_fd->checksum_alg= (enum_binlog_checksum_alg)binlog_checksum_options;
+ tmp_fd->checksum_alg= current_binlog_check_alg;
writer.write(tmp_fd);
delete tmp_fd;
@@ -1285,11 +1306,13 @@ int wsrep_to_buf_helper(
Query_log_event ev(thd, thd->wsrep_TOI_pre_query,
thd->wsrep_TOI_pre_query_len,
FALSE, FALSE, FALSE, 0);
+ ev.checksum_alg= current_binlog_check_alg;
if (writer.write(&ev)) ret= 1;
}
/* continue to append the actual query */
Query_log_event ev(thd, query, query_len, FALSE, FALSE, FALSE, 0);
+ ev.checksum_alg= current_binlog_check_alg;
if (!ret && writer.write(&ev)) ret= 1;
if (!ret && wsrep_write_cache_buf(&tmp_io_cache, buf, buf_len)) ret= 1;
close_cached_file(&tmp_io_cache);
@@ -1429,7 +1452,7 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len)
bool found_temp_table= false;
for (TABLE_LIST* table= first_table; table; table= table->next_global)
{
- if (find_temporary_table(thd, table->db, table->table_name))
+ if (thd->find_temporary_table(table->db, table->table_name))
{
found_temp_table= true;
break;
@@ -1444,7 +1467,7 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len)
for (TABLE_LIST* table= first_table; table; table= table->next_global)
{
- if (!find_temporary_table(thd, table->db, table->table_name))
+ if (!thd->find_temporary_table(table->db, table->table_name))
{
append_identifier(thd, &buff, table->db, strlen(table->db));
buff.append(".");
@@ -1514,7 +1537,7 @@ static bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
*/
for (TABLE_LIST* it= first_table->next_global; it; it= it->next_global)
{
- if (find_temporary_table(thd, it))
+ if (thd->find_temporary_table(it))
{
return false;
}
@@ -1526,14 +1549,14 @@ static bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
DBUG_ASSERT(!table_list);
DBUG_ASSERT(first_table);
- if (find_temporary_table(thd, first_table))
+ if (thd->find_temporary_table(first_table))
{
return false;
}
return true;
default:
- if (table && !find_temporary_table(thd, db, table))
+ if (table && !thd->find_temporary_table(db, table))
{
return true;
}
@@ -1542,7 +1565,7 @@ static bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
{
for (TABLE_LIST* table= first_table; table; table= table->next_global)
{
- if (!find_temporary_table(thd, table->db, table->table_name))
+ if (!thd->find_temporary_table(table->db, table->table_name))
{
return true;
}
@@ -1653,8 +1676,8 @@ static int wsrep_TOI_begin(THD *thd, char *db_, char *table_,
ret,
(thd->db ? thd->db : "(null)"),
(thd->query()) ? thd->query() : "void");
- my_error(ER_LOCK_DEADLOCK, MYF(0), "WSREP replication failed. Check "
- "your wsrep connection state and retry the query.");
+ my_message(ER_LOCK_DEADLOCK, "WSREP replication failed. Check "
+ "your wsrep connection state and retry the query.", MYF(0));
wsrep_keys_free(&key_arr);
rc= -1;
}
@@ -1791,8 +1814,8 @@ int wsrep_to_isolation_begin(THD *thd, char *db_, char *table_,
if (thd->wsrep_conflict_state == MUST_ABORT)
{
- WSREP_INFO("thread: %lu, schema: %s, query: %s has been aborted due to multi-master conflict",
- thd->thread_id,
+ WSREP_INFO("thread: %lld schema: %s query: %s has been aborted due to multi-master conflict",
+ (longlong) thd->thread_id,
(thd->db ? thd->db : "(null)"),
thd->query());
mysql_mutex_unlock(&thd->LOCK_thd_data);
@@ -1805,15 +1828,15 @@ int wsrep_to_isolation_begin(THD *thd, char *db_, char *table_,
if (thd->global_read_lock.can_acquire_protection())
{
- WSREP_DEBUG("Aborting TOI: Global Read-Lock (FTWRL) in place: %s %lu",
- thd->query(), thd->thread_id);
+ WSREP_DEBUG("Aborting TOI: Global Read-Lock (FTWRL) in place: %s %lld",
+ thd->query(), (longlong) thd->thread_id);
return -1;
}
if (wsrep_debug && thd->mdl_context.has_locks())
{
- WSREP_DEBUG("thread holds MDL locks at TI begin: %s %lu",
- thd->query(), thd->thread_id);
+ WSREP_DEBUG("thread holds MDL locks at TI begin: %s %lld",
+ thd->query(), (longlong) thd->thread_id);
}
/*
@@ -1879,13 +1902,13 @@ void wsrep_to_isolation_end(THD *thd)
WSREP_##severity( \
"%s\n" \
"schema: %.*s\n" \
- "request: (%lu \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)\n" \
- "granted: (%lu \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)", \
+ "request: (%lld \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)\n" \
+ "granted: (%lld \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)", \
msg, schema_len, schema, \
- req->thread_id, (long long)wsrep_thd_trx_seqno(req), \
+ (longlong) req->thread_id, (long long)wsrep_thd_trx_seqno(req), \
req->wsrep_exec_mode, req->wsrep_query_state, req->wsrep_conflict_state, \
req->get_command(), req->lex->sql_command, req->query(), \
- gra->thread_id, (long long)wsrep_thd_trx_seqno(gra), \
+ (longlong) gra->thread_id, (long long)wsrep_thd_trx_seqno(gra), \
gra->wsrep_exec_mode, gra->wsrep_query_state, gra->wsrep_conflict_state, \
gra->get_command(), gra->lex->sql_command, gra->query());
@@ -1899,22 +1922,41 @@ void wsrep_to_isolation_end(THD *thd)
@retval FALSE Lock request cannot be granted
*/
-bool
-wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
- MDL_ticket *ticket,
- const MDL_key *key
-) {
+bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
+ MDL_ticket *ticket,
+ const MDL_key *key)
+{
/* Fallback to the non-wsrep behaviour */
if (!WSREP_ON) return FALSE;
- THD *request_thd = requestor_ctx->get_thd();
- THD *granted_thd = ticket->get_ctx()->get_thd();
- bool ret = FALSE;
+ THD *request_thd= requestor_ctx->get_thd();
+ THD *granted_thd= ticket->get_ctx()->get_thd();
+ bool ret= false;
const char* schema= key->db_name();
int schema_len= key->db_name_length();
mysql_mutex_lock(&request_thd->LOCK_thd_data);
+
+ /*
+ We consider granting MDL exceptions only for appliers (BF THD) and ones
+ executing under TOI mode.
+
+ Rules:
+ 1. If granted/owner THD is also an applier (BF THD) or one executing
+ under TOI mode, then we grant the requested lock to the requester
+ THD.
+ @return true
+
+ 2. If granted/owner THD is executing a FLUSH command or already has an
+ explicit lock, then do not grant the requested lock to the requester
+ THD and it has to wait.
+ @return false
+
+ 3. In all other cases the granted/owner THD is aborted and the requested
+ lock is not granted to the requester THD, thus it has to wait.
+ @return false
+ */
if (request_thd->wsrep_exec_mode == TOTAL_ORDER ||
request_thd->wsrep_exec_mode == REPL_RECV)
{
@@ -1931,7 +1973,7 @@ wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
request_thd, granted_thd);
ticket->wsrep_report(true);
mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- ret = TRUE;
+ ret= true;
}
else if (granted_thd->lex->sql_command == SQLCOM_FLUSH ||
granted_thd->mdl_context.has_explicit_locks())
@@ -1939,38 +1981,39 @@ wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
WSREP_DEBUG("BF thread waiting for FLUSH");
ticket->wsrep_report(wsrep_debug);
mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- ret = FALSE;
- }
- else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE)
- {
- WSREP_DEBUG("DROP caused BF abort, conf %d", granted_thd->wsrep_conflict_state);
- ticket->wsrep_report(wsrep_debug);
- mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- wsrep_abort_thd((void*)request_thd, (void*)granted_thd, 1);
- ret = FALSE;
- }
- else if (granted_thd->wsrep_query_state == QUERY_COMMITTING)
- {
- WSREP_DEBUG("MDL granted, but committing thd abort scheduled");
- ticket->wsrep_report(wsrep_debug);
- mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- wsrep_abort_thd((void*)request_thd, (void*)granted_thd, 1);
- ret = FALSE;
+ ret= false;
}
else
{
- WSREP_MDL_LOG(DEBUG, "MDL conflict-> BF abort", schema, schema_len,
- request_thd, granted_thd);
- ticket->wsrep_report(wsrep_debug);
+ /* Print some debug information. */
+ if (wsrep_debug)
+ {
+ if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE)
+ {
+ WSREP_DEBUG("DROP caused BF abort, conf %d", granted_thd->wsrep_conflict_state);
+ }
+ else if (granted_thd->wsrep_query_state == QUERY_COMMITTING)
+ {
+ WSREP_DEBUG("MDL granted, but committing thd abort scheduled");
+ }
+ else
+ {
+ WSREP_MDL_LOG(DEBUG, "MDL conflict-> BF abort", schema, schema_len,
+ request_thd, granted_thd);
+ }
+ ticket->wsrep_report(true);
+ }
+
mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- wsrep_abort_thd((void*)request_thd, (void*)granted_thd, 1);
- ret = FALSE;
+ wsrep_abort_thd((void *) request_thd, (void *) granted_thd, 1);
+ ret= false;
}
}
else
{
mysql_mutex_unlock(&request_thd->LOCK_thd_data);
}
+
return ret;
}
@@ -1980,13 +2023,12 @@ pthread_handler_t start_wsrep_THD(void *arg)
THD *thd;
wsrep_thd_processor_fun processor= (wsrep_thd_processor_fun)arg;
- if (my_thread_init() || (!(thd= new THD(true))))
+ if (my_thread_init() || (!(thd= new THD(next_thread_id(), true))))
{
goto error;
}
mysql_mutex_lock(&LOCK_thread_count);
- thd->thread_id=thread_id++;
if (wsrep_gtid_mode)
{
@@ -1995,7 +2037,6 @@ pthread_handler_t start_wsrep_THD(void *arg)
}
thd->real_id=pthread_self(); // Keep purify happy
- thread_count++;
thread_created++;
threads.append(thd);
@@ -2087,14 +2128,14 @@ pthread_handler_t start_wsrep_THD(void *arg)
// at server shutdown
}
- my_thread_end();
if (thread_handling > SCHEDULER_ONE_THREAD_PER_CONNECTION)
{
mysql_mutex_lock(&LOCK_thread_count);
- delete thd;
- thread_count--;
+ thd->unlink();
mysql_mutex_unlock(&LOCK_thread_count);
+ delete thd;
}
+ my_thread_end();
return(NULL);
error:
@@ -2161,8 +2202,8 @@ static bool have_client_connections()
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
- DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
- tmp->thread_id));
+ DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
+ (longlong) tmp->thread_id));
if (is_client_connection(tmp) && tmp->killed == KILL_CONNECTION)
{
(void)abort_replicated(tmp);
@@ -2246,8 +2287,8 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd)
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
- DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
- tmp->thread_id));
+ DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
+ (longlong) tmp->thread_id));
/* We skip slave threads & scheduler on this first loop through. */
if (!is_client_connection(tmp))
continue;
@@ -2268,7 +2309,7 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd)
if (abort_replicated(tmp))
continue;
- WSREP_DEBUG("closing connection %ld", tmp->thread_id);
+ WSREP_DEBUG("closing connection %lld", (longlong) tmp->thread_id);
/*
instead of wsrep_close_thread() we do now soft kill by THD::awake
@@ -2299,7 +2340,7 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd)
!is_replaying_connection(tmp) &&
tmp != except_caller_thd)
{
- WSREP_INFO("killing local connection: %ld",tmp->thread_id);
+ WSREP_INFO("killing local connection: %lld", (longlong) tmp->thread_id);
close_connection(tmp,0);
}
#endif
@@ -2324,7 +2365,7 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd)
void wsrep_close_applier(THD *thd)
{
- WSREP_DEBUG("closing applier %ld", thd->thread_id);
+ WSREP_DEBUG("closing applier %lld", (longlong) thd->thread_id);
wsrep_close_thread(thd);
}
@@ -2337,12 +2378,12 @@ void wsrep_close_threads(THD *thd)
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
- DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
- tmp->thread_id));
+ DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
+ (longlong) tmp->thread_id));
/* We skip slave threads & scheduler on this first loop through. */
if (tmp->wsrep_applier && tmp != thd)
{
- WSREP_DEBUG("closing wsrep thread %ld", tmp->thread_id);
+ WSREP_DEBUG("closing wsrep thread %lld", (longlong) tmp->thread_id);
wsrep_close_thread (tmp);
}
}
@@ -2415,7 +2456,7 @@ static int wsrep_create_sp(THD *thd, uchar** buf, size_t* buf_len)
{
String log_query;
sp_head *sp = thd->lex->sphead;
- ulong saved_mode= thd->variables.sql_mode;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
String retstr(64);
retstr.set_charset(system_charset_info);
@@ -2679,26 +2720,13 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
TABLE_LIST* src_table,
HA_CREATE_INFO *create_info)
{
- TABLE *tmp_table;
- bool is_tmp_table= FALSE;
-
- for (tmp_table= thd->temporary_tables; tmp_table; tmp_table=tmp_table->next)
- {
- if (!strcmp(src_table->db, tmp_table->s->db.str) &&
- !strcmp(src_table->table_name, tmp_table->s->table_name.str))
- {
- is_tmp_table= TRUE;
- break;
- }
- }
if (create_info->tmp_table())
{
-
/* CREATE TEMPORARY TABLE LIKE must be skipped from replication */
WSREP_DEBUG("CREATE TEMPORARY TABLE LIKE... skipped replication\n %s",
thd->query());
}
- else if (!is_tmp_table)
+ else if (!(thd->find_temporary_table(src_table)))
{
/* this is straight CREATE TABLE LIKE... eith no tmp tables */
WSREP_TO_ISOLATION_BEGIN(table->db, table->table_name, NULL);
@@ -2713,7 +2741,7 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
bzero((void*) &tbl, sizeof(tbl));
tbl.db= src_table->db;
tbl.table_name= tbl.alias= src_table->table_name;
- tbl.table= tmp_table;
+ tbl.table= src_table->table;
char buf[2048];
String query(buf, sizeof(buf), system_charset_info);
query.length(0); // Have to zero it since constructor doesn't
@@ -2782,10 +2810,11 @@ static int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len)
append_definer(thd, &stmt_query, &definer_user, &definer_host);
LEX_STRING stmt_definition;
+ uint not_used;
stmt_definition.str= (char*) thd->lex->stmt_definition_begin;
stmt_definition.length= thd->lex->stmt_definition_end
- thd->lex->stmt_definition_begin;
- trim_whitespace(thd->charset(), & stmt_definition);
+ trim_whitespace(thd->charset(), &stmt_definition, &not_used);
stmt_query.append(stmt_definition.str, stmt_definition.length);
@@ -2869,10 +2898,10 @@ void wsrep_aborting_thd_enqueue(THD *thd)
bool wsrep_node_is_donor()
{
- return (WSREP_ON) ? (wsrep_config_state.get_status() == 2) : false;
+ return (WSREP_ON) ? (wsrep_config_state->get_status() == 2) : false;
}
bool wsrep_node_is_synced()
{
- return (WSREP_ON) ? (wsrep_config_state.get_status() == 4) : false;
+ return (WSREP_ON) ? (wsrep_config_state->get_status() == 4) : false;
}
diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h
index 2b55fbe42ee..0b1e75102e8 100644
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@ -92,7 +92,6 @@ extern ulong wsrep_running_threads;
extern bool wsrep_new_cluster;
extern bool wsrep_gtid_mode;
extern uint32 wsrep_gtid_domain_id;
-extern bool wsrep_dirty_reads;
enum enum_wsrep_reject_types {
WSREP_REJECT_NONE, /* nothing rejected */
@@ -342,6 +341,10 @@ bool wsrep_prepare_keys_for_isolation(THD* thd,
wsrep_key_arr_t* ka);
void wsrep_keys_free(wsrep_key_arr_t* key_arr);
+#define WSREP_BINLOG_FORMAT(my_format) \
+ ((wsrep_forced_binlog_format != BINLOG_FORMAT_UNSPEC) ? \
+ wsrep_forced_binlog_format : my_format)
+
#else /* WITH_WSREP */
#define WSREP(T) (0)
@@ -371,5 +374,6 @@ void wsrep_keys_free(wsrep_key_arr_t* key_arr);
#define wsrep_thr_init() do {} while(0)
#define wsrep_thr_deinit() do {} while(0)
#define wsrep_running_threads (0)
+#define WSREP_BINLOG_FORMAT(my_format) my_format
#endif /* WITH_WSREP */
#endif /* WSREP_MYSQLD_H */
diff --git a/sql/wsrep_notify.cc b/sql/wsrep_notify.cc
index 20cc9111a72..92c685ba485 100644
--- a/sql/wsrep_notify.cc
+++ b/sql/wsrep_notify.cc
@@ -17,7 +17,6 @@
#include "wsrep_priv.h"
#include "wsrep_utils.h"
-const char* wsrep_notify_cmd="";
static const char* _status_str(wsrep_member_status_t status)
{
diff --git a/sql/wsrep_priv.h b/sql/wsrep_priv.h
index b00bfda16fa..222a49cc2ab 100644
--- a/sql/wsrep_priv.h
+++ b/sql/wsrep_priv.h
@@ -40,11 +40,12 @@ extern wsrep_uuid_t local_uuid;
extern wsrep_seqno_t local_seqno;
// a helper function
-void wsrep_sst_received (wsrep_t* const wsrep,
- const wsrep_uuid_t& uuid,
- wsrep_seqno_t const segno,
- const void * const state,
- size_t const state_len);
+bool wsrep_sst_received (wsrep_t* const wsrep,
+ const wsrep_uuid_t& uuid,
+ const wsrep_seqno_t seqno,
+ const void* const state,
+ const size_t state_len,
+ const bool implicit);
/*! SST thread signals init thread about sst completion */
void wsrep_sst_complete(const wsrep_uuid_t*, wsrep_seqno_t, bool);
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index a54b96a4aa2..6ec9a589974 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -31,9 +31,7 @@
#include <cstdio>
#include <cstdlib>
-#if MYSQL_VERSION_ID < 100200
-# include <my_service_manager.h>
-#endif
+#include <my_service_manager.h>
static char wsrep_defaults_file[FN_REFLEN * 2 + 10 + 30 +
sizeof(WSREP_SST_OPT_CONF) +
@@ -43,7 +41,7 @@ static char wsrep_defaults_file[FN_REFLEN * 2 + 10 + 30 +
const char* wsrep_sst_method = WSREP_SST_DEFAULT;
const char* wsrep_sst_receive_address = WSREP_SST_ADDRESS_AUTO;
const char* wsrep_sst_donor = "";
- char* wsrep_sst_auth = NULL;
+const char* wsrep_sst_auth = NULL;
// container for real auth string
static const char* sst_auth_real = NULL;
@@ -165,13 +163,12 @@ void wsrep_sst_auth_free()
bool wsrep_sst_auth_update (sys_var *self, THD* thd, enum_var_type type)
{
- return sst_auth_real_set (wsrep_sst_auth);
+ return sst_auth_real_set (wsrep_sst_auth);
}
-void wsrep_sst_auth_init (const char* value)
+void wsrep_sst_auth_init ()
{
- DBUG_ASSERT(wsrep_sst_auth == value);
- sst_auth_real_set (wsrep_sst_auth);
+ sst_auth_real_set(wsrep_sst_auth);
}
bool wsrep_sst_donor_check (sys_var *self, THD* thd, set_var* var)
@@ -181,7 +178,7 @@ bool wsrep_sst_donor_check (sys_var *self, THD* thd, set_var* var)
bool wsrep_sst_donor_update (sys_var *self, THD* thd, enum_var_type type)
{
- return 0;
+ return 0;
}
bool wsrep_before_SE()
@@ -275,51 +272,110 @@ void wsrep_sst_complete (const wsrep_uuid_t* sst_uuid,
mysql_mutex_unlock (&LOCK_wsrep_sst);
}
-void wsrep_sst_received (wsrep_t* const wsrep,
+/*
+ If wsrep provider is loaded, inform that the new state snapshot
+ has been received. Also update the local checkpoint.
+
+ @param wsrep [IN] wsrep handle
+ @param uuid [IN] Initial state UUID
+ @param seqno [IN] Initial state sequence number
+ @param state [IN] Always NULL, also ignored by wsrep provider (?)
+ @param state_len [IN] Always 0, also ignored by wsrep provider (?)
+ @param implicit [IN] Whether invoked implicitly due to SST
+ (true) or explicitly because if change
+ in wsrep_start_position by user (false).
+ @return false Success
+ true Error
+
+*/
+bool wsrep_sst_received (wsrep_t* const wsrep,
const wsrep_uuid_t& uuid,
- wsrep_seqno_t const seqno,
+ const wsrep_seqno_t seqno,
const void* const state,
- size_t const state_len)
+ const size_t state_len,
+ const bool implicit)
{
- wsrep_get_SE_checkpoint(local_uuid, local_seqno);
+ /*
+ To keep track of whether the local uuid:seqno should be updated. Also, note
+ that local state (uuid:seqno) is updated/checkpointed only after we get an
+ OK from wsrep provider. By doing so, the values remain consistent across
+ the server & wsrep provider.
+ */
+ bool do_update= false;
- if (memcmp(&local_uuid, &uuid, sizeof(wsrep_uuid_t)) ||
- local_seqno < seqno || seqno < 0)
+ // Get the locally stored uuid:seqno.
+ if (wsrep_get_SE_checkpoint(local_uuid, local_seqno))
+ {
+ return true;
+ }
+
+ if (memcmp(&local_uuid, &uuid, sizeof(wsrep_uuid_t)) ||
+ local_seqno < seqno || seqno < 0)
+ {
+ do_update= true;
+ }
+ else if (local_seqno > seqno)
+ {
+ WSREP_WARN("SST position can't be set in past. Requested: %lld, Current: "
+ " %lld.", (long long)seqno, (long long)local_seqno);
+ /*
+ If we are here because of SET command, simply return true (error) instead of
+ aborting.
+ */
+ if (implicit)
{
- wsrep_set_SE_checkpoint(uuid, seqno);
- local_uuid = uuid;
- local_seqno = seqno;
+ WSREP_WARN("Can't continue.");
+ unireg_abort(1);
}
- else if (local_seqno > seqno)
+ else
{
- WSREP_WARN("SST postion is in the past: %" PRId64 ", current: %" PRId64
- ". Can't continue.", seqno, local_seqno);
- unireg_abort(1);
+ return true;
}
+ }
#ifdef GTID_SUPPORT
- wsrep_init_sidno(uuid);
+ wsrep_init_sidno(uuid);
#endif /* GTID_SUPPORT */
- if (wsrep)
+ if (wsrep)
+ {
+ int const rcode(seqno < 0 ? seqno : 0);
+ wsrep_gtid_t const state_id= {uuid,
+ (rcode ? WSREP_SEQNO_UNDEFINED : seqno)};
+
+ wsrep_status_t ret= wsrep->sst_received(wsrep, &state_id, state,
+ state_len, rcode);
+
+ if (ret != WSREP_OK)
{
- int const rcode(seqno < 0 ? seqno : 0);
- wsrep_gtid_t const state_id = {
- uuid, (rcode ? WSREP_SEQNO_UNDEFINED : seqno)
- };
+ return true;
+ }
+ }
- wsrep->sst_received(wsrep, &state_id, state, state_len, rcode);
+ // Now is the good time to update the local state and checkpoint.
+ if (do_update)
+ {
+ if (wsrep_set_SE_checkpoint(uuid, seqno))
+ {
+ return true;
}
+
+ local_uuid= uuid;
+ local_seqno= seqno;
+ }
+
+ return false;
}
// Let applier threads to continue
-void wsrep_sst_continue ()
+bool wsrep_sst_continue ()
{
if (sst_needed)
{
WSREP_INFO("Signalling provider to continue.");
- wsrep_sst_received (wsrep, local_uuid, local_seqno, NULL, 0);
+ return wsrep_sst_received (wsrep, local_uuid, local_seqno, NULL, 0, true);
}
+ return false;
}
struct sst_thread_arg
@@ -1025,7 +1081,7 @@ static int run_sql_command(THD *thd, const char *query)
return -1;
}
- mysql_parse(thd, thd->query(), thd->query_length(), &ps);
+ mysql_parse(thd, thd->query(), thd->query_length(), &ps, FALSE, FALSE);
if (thd->is_error())
{
int const err= thd->get_stmt_da()->sql_errno();
@@ -1344,7 +1400,7 @@ wsrep_cb_status_t wsrep_sst_donate_cb (void* app_ctx, void* recv_ctx,
/* This will be reset when sync callback is called.
* Should we set wsrep_ready to FALSE here too? */
- wsrep_config_state.set(WSREP_MEMBER_DONOR);
+ wsrep_config_state->set(WSREP_MEMBER_DONOR);
const char* method = (char*)msg;
size_t method_len = strlen (method);
diff --git a/sql/wsrep_sst.h b/sql/wsrep_sst.h
index a35ce46cae8..cc0f1f5389d 100644
--- a/sql/wsrep_sst.h
+++ b/sql/wsrep_sst.h
@@ -57,15 +57,16 @@
extern const char* wsrep_sst_method;
extern const char* wsrep_sst_receive_address;
extern const char* wsrep_sst_donor;
-extern char* wsrep_sst_auth;
-extern my_bool wsrep_sst_donor_rejects_queries;
+extern const char* wsrep_sst_auth;
+extern my_bool wsrep_sst_donor_rejects_queries;
/*! Synchronizes applier thread start with init thread */
extern void wsrep_sst_grab();
/*! Init thread waits for SST completion */
extern bool wsrep_sst_wait();
/*! Signals wsrep that initialization is complete, writesets can be applied */
-extern void wsrep_sst_continue();
+extern bool wsrep_sst_continue();
+extern void wsrep_sst_auth_init();
extern void wsrep_sst_auth_free();
extern void wsrep_SE_init_grab(); /*! grab init critical section */
@@ -77,7 +78,7 @@ extern void wsrep_SE_initialized(); /*! mark SE initialization complete */
#define wsrep_SE_initialized() do { } while(0)
#define wsrep_SE_init_grab() do { } while(0)
#define wsrep_SE_init_done() do { } while(0)
-#define wsrep_sst_continue() do { } while(0)
+#define wsrep_sst_continue() (0)
#endif /* WITH_WSREP */
#endif /* WSREP_SST_H */
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index 551e710cfeb..00afbec290e 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -50,8 +50,8 @@ int wsrep_show_bf_aborts (THD *thd, SHOW_VAR *var, char *buff,
/* must have (&thd->LOCK_thd_data) */
void wsrep_client_rollback(THD *thd)
{
- WSREP_DEBUG("client rollback due to BF abort for (%ld), query: %s",
- thd->thread_id, thd->query());
+ WSREP_DEBUG("client rollback due to BF abort for (%lld), query: %s",
+ (longlong) thd->thread_id, thd->query());
WSREP_ATOMIC_ADD_LONG(&wsrep_bf_aborts_counter, 1);
@@ -61,14 +61,16 @@ void wsrep_client_rollback(THD *thd)
if (thd->locked_tables_mode && thd->lock)
{
- WSREP_DEBUG("unlocking tables for BF abort (%ld)", thd->thread_id);
+ WSREP_DEBUG("unlocking tables for BF abort (%lld)",
+ (longlong) thd->thread_id);
thd->locked_tables_list.unlock_locked_tables(thd);
thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
}
if (thd->global_read_lock.is_acquired())
{
- WSREP_DEBUG("unlocking GRL for BF abort (%ld)", thd->thread_id);
+ WSREP_DEBUG("unlocking GRL for BF abort (%lld)",
+ (longlong) thd->thread_id);
thd->global_read_lock.unlock_global_read_lock(thd);
}
@@ -80,7 +82,8 @@ void wsrep_client_rollback(THD *thd)
if (thd->get_binlog_table_maps())
{
- WSREP_DEBUG("clearing binlog table map for BF abort (%ld)", thd->thread_id);
+ WSREP_DEBUG("clearing binlog table map for BF abort (%lld)",
+ (longlong) thd->thread_id);
thd->clear_binlog_table_maps();
}
mysql_mutex_lock(&thd->LOCK_thd_data);
@@ -94,7 +97,6 @@ static rpl_group_info* wsrep_relay_group_init(const char* log_fname)
{
Relay_log_info* rli= new Relay_log_info(false);
- rli->no_storage= true;
if (!rli->relay_log.description_event_for_exec)
{
rli->relay_log.description_event_for_exec=
@@ -236,8 +238,8 @@ void wsrep_replay_transaction(THD *thd)
close_thread_tables(thd);
if (thd->locked_tables_mode && thd->lock)
{
- WSREP_DEBUG("releasing table lock for replaying (%ld)",
- thd->thread_id);
+ WSREP_DEBUG("releasing table lock for replaying (%lld)",
+ (longlong) thd->thread_id);
thd->locked_tables_list.unlock_locked_tables(thd);
thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
}
@@ -276,15 +278,16 @@ void wsrep_replay_transaction(THD *thd)
case WSREP_OK:
thd->wsrep_conflict_state= NO_CONFLICT;
wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
- WSREP_DEBUG("trx_replay successful for: %ld %llu",
- thd->thread_id, (long long)thd->real_id);
+ WSREP_DEBUG("trx_replay successful for: %lld %lld",
+ (longlong) thd->thread_id, (longlong) thd->real_id);
if (thd->get_stmt_da()->is_sent())
{
WSREP_WARN("replay ok, thd has reported status");
}
else if (thd->get_stmt_da()->is_set())
{
- if (thd->get_stmt_da()->status() != Diagnostics_area::DA_OK)
+ if (thd->get_stmt_da()->status() != Diagnostics_area::DA_OK &&
+ thd->get_stmt_da()->status() != Diagnostics_area::DA_OK_BULK)
{
WSREP_WARN("replay ok, thd has error status %d",
thd->get_stmt_da()->status());
@@ -336,8 +339,8 @@ void wsrep_replay_transaction(THD *thd)
mysql_mutex_lock(&LOCK_wsrep_replaying);
wsrep_replaying--;
- WSREP_DEBUG("replaying decreased: %d, thd: %lu",
- wsrep_replaying, thd->thread_id);
+ WSREP_DEBUG("replaying decreased: %d, thd: %lld",
+ wsrep_replaying, (longlong) thd->thread_id);
mysql_cond_broadcast(&COND_wsrep_replaying);
mysql_mutex_unlock(&LOCK_wsrep_replaying);
}
@@ -403,13 +406,10 @@ static void wsrep_replication_process(THD *thd)
mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
- TABLE *tmp;
- while ((tmp = thd->temporary_tables))
+ if(thd->has_thd_temporary_tables())
{
- WSREP_WARN("Applier %lu, has temporary tables at exit: %s.%s",
- thd->thread_id,
- (tmp->s) ? tmp->s->db.str : "void",
- (tmp->s) ? tmp->s->table_name.str : "void");
+ WSREP_WARN("Applier %lld has temporary tables at exit.",
+ thd->thread_id);
}
wsrep_return_from_bf_mode(thd, &shadow);
DBUG_VOID_RETURN;
@@ -516,8 +516,9 @@ static void wsrep_rollback_process(THD *thd)
mysql_mutex_lock(&aborting->LOCK_thd_data);
wsrep_client_rollback(aborting);
- WSREP_DEBUG("WSREP rollbacker aborted thd: (%lu %llu)",
- aborting->thread_id, (long long)aborting->real_id);
+ WSREP_DEBUG("WSREP rollbacker aborted thd: (%lld %lld)",
+ (longlong) aborting->thread_id,
+ (longlong) aborting->real_id);
mysql_mutex_unlock(&aborting->LOCK_thd_data);
set_current_thd(thd);
@@ -697,3 +698,23 @@ void wsrep_thd_auto_increment_variables(THD* thd,
*increment= thd->variables.auto_increment_increment;
}
}
+
+my_bool wsrep_thd_is_applier(MYSQL_THD thd)
+{
+ my_bool is_applier= false;
+
+ if (thd && thd->wsrep_applier)
+ is_applier= true;
+
+ return (is_applier);
+}
+
+void wsrep_set_load_multi_commit(THD *thd, bool split)
+{
+ thd->wsrep_split_flag= split;
+}
+
+bool wsrep_is_load_multi_commit(THD *thd)
+{
+ return thd->wsrep_split_flag;
+}
diff --git a/sql/wsrep_utils.cc b/sql/wsrep_utils.cc
index 8a72d754a43..1a358877a35 100644
--- a/sql/wsrep_utils.cc
+++ b/sql/wsrep_utils.cc
@@ -413,7 +413,7 @@ process::wait ()
return err_;
}
-thd::thd (my_bool won) : init(), ptr(new THD)
+thd::thd (my_bool won) : init(), ptr(new THD(0))
{
if (ptr)
{
diff --git a/sql/wsrep_utils.h b/sql/wsrep_utils.h
index dee7eb11504..0afca96ea41 100644
--- a/sql/wsrep_utils.h
+++ b/sql/wsrep_utils.h
@@ -238,7 +238,7 @@ private:
} /* namespace wsp */
-extern wsp::Config_state wsrep_config_state;
+extern wsp::Config_state *wsrep_config_state;
namespace wsp {
/* a class to manage env vars array */
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 3959746156a..60b4cb6c11a 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -26,14 +26,6 @@
#include <cstdio>
#include <cstdlib>
-const char* wsrep_provider = 0;
-const char* wsrep_provider_options = 0;
-const char* wsrep_cluster_address = 0;
-const char* wsrep_cluster_name = 0;
-const char* wsrep_node_name = 0;
-const char* wsrep_node_address = 0;
-const char* wsrep_node_incoming_address = 0;
-const char* wsrep_start_position = 0;
ulong wsrep_reject_queries;
static long wsrep_prev_slave_threads = wsrep_slave_threads;
@@ -48,12 +40,14 @@ int wsrep_init_vars()
wsrep_node_address = my_strdup("", MYF(MY_WME));
wsrep_node_incoming_address= my_strdup(WSREP_NODE_INCOMING_AUTO, MYF(MY_WME));
wsrep_start_position = my_strdup(WSREP_START_POSITION_ZERO, MYF(MY_WME));
-
- global_system_variables.binlog_format=BINLOG_FORMAT_ROW;
return 0;
}
-extern ulong innodb_lock_schedule_algorithm;
+/* This is intentionally declared as a weak global symbol, so that
+linking will succeed even if the server is built with a dynamically
+linked InnoDB. */
+ulong innodb_lock_schedule_algorithm __attribute__((weak));
+struct handlerton* innodb_hton_ptr __attribute__((weak));
bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
{
@@ -72,7 +66,7 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var)
if (check_has_super(self, thd, var))
return true;
- if (new_wsrep_on && innodb_lock_schedule_algorithm != 0) {
+ if (new_wsrep_on && innodb_hton_ptr && innodb_lock_schedule_algorithm != 0) {
my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled "
"if innodb_lock_schedule_algorithm=VATS. Please configure"
" innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0));
@@ -119,32 +113,68 @@ bool wsrep_sync_wait_update (sys_var* self, THD* thd, enum_var_type var_type)
return false;
}
-static int wsrep_start_position_verify (const char* start_str)
+
+/*
+ Verify the format of the given UUID:seqno.
+
+ @return
+ true Fail
+ false Pass
+*/
+static
+bool wsrep_start_position_verify (const char* start_str)
{
size_t start_len;
wsrep_uuid_t uuid;
ssize_t uuid_len;
+ // Check whether it has minimum acceptable length.
start_len = strlen (start_str);
if (start_len < 34)
- return 1;
+ return true;
+ /*
+ Parse the input to check whether UUID length is acceptable
+ and seqno has been provided.
+ */
uuid_len = wsrep_uuid_scan (start_str, start_len, &uuid);
if (uuid_len < 0 || (start_len - uuid_len) < 2)
- return 1;
+ return true;
- if (start_str[uuid_len] != ':') // separator should follow UUID
- return 1;
+ // Separator must follow the UUID.
+ if (start_str[uuid_len] != ':')
+ return true;
char* endptr;
wsrep_seqno_t const seqno __attribute__((unused)) // to avoid GCC warnings
(strtoll(&start_str[uuid_len + 1], &endptr, 10));
- if (*endptr == '\0') return 0; // remaining string was seqno
+ // Remaining string was seqno.
+ if (*endptr == '\0') return false;
- return 1;
+ return true;
}
+
+static
+bool wsrep_set_local_position(const char* const value, size_t length,
+ bool const sst)
+{
+ wsrep_uuid_t uuid;
+ size_t const uuid_len = wsrep_uuid_scan(value, length, &uuid);
+ wsrep_seqno_t const seqno = strtoll(value + uuid_len + 1, NULL, 10);
+
+ if (sst) {
+ return wsrep_sst_received (wsrep, uuid, seqno, NULL, 0, false);
+ } else {
+ // initialization
+ local_uuid = uuid;
+ local_seqno = seqno;
+ }
+ return false;
+}
+
+
bool wsrep_start_position_check (sys_var *self, THD* thd, set_var* var)
{
char start_pos_buf[FN_REFLEN];
@@ -157,52 +187,52 @@ bool wsrep_start_position_check (sys_var *self, THD* thd, set_var* var)
var->save_result.string_value.length);
start_pos_buf[var->save_result.string_value.length]= 0;
- if (!wsrep_start_position_verify(start_pos_buf)) return 0;
+ // Verify the format.
+ if (wsrep_start_position_verify(start_pos_buf)) return true;
+
+ /*
+ As part of further verification, we try to update the value and catch
+ errors (if any).
+ */
+ if (wsrep_set_local_position(var->save_result.string_value.str,
+ var->save_result.string_value.length,
+ true))
+ {
+ goto err;
+ }
+
+ return false;
err:
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name.str,
var->save_result.string_value.str ?
var->save_result.string_value.str : "NULL");
- return 1;
-}
-
-static
-void wsrep_set_local_position(const char* const value, bool const sst)
-{
- size_t const value_len = strlen(value);
- wsrep_uuid_t uuid;
- size_t const uuid_len = wsrep_uuid_scan(value, value_len, &uuid);
- wsrep_seqno_t const seqno = strtoll(value + uuid_len + 1, NULL, 10);
-
- if (sst) {
- wsrep_sst_received (wsrep, uuid, seqno, NULL, 0);
- } else {
- // initialization
- local_uuid = uuid;
- local_seqno = seqno;
- }
+ return true;
}
bool wsrep_start_position_update (sys_var *self, THD* thd, enum_var_type type)
{
- WSREP_INFO ("wsrep_start_position var submitted: '%s'",
- wsrep_start_position);
- // since this value passed wsrep_start_position_check, don't check anything
- // here
- wsrep_set_local_position (wsrep_start_position, true);
- return 0;
+ // Print a confirmation that wsrep_start_position has been updated.
+ WSREP_INFO ("wsrep_start_position set to '%s'", wsrep_start_position);
+ return false;
}
-void wsrep_start_position_init (const char* val)
+bool wsrep_start_position_init (const char* val)
{
if (NULL == val || wsrep_start_position_verify (val))
{
WSREP_ERROR("Bad initial value for wsrep_start_position: %s",
(val ? val : ""));
- return;
+ return true;
+ }
+
+ if (wsrep_set_local_position (val, strlen(val), false))
+ {
+ WSREP_ERROR("Failed to set initial wsep_start_position: %s", val);
+ return true;
}
- wsrep_set_local_position (val, false);
+ return false;
}
static int get_provider_option_value(const char* opts,
@@ -326,7 +356,7 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type)
if (wsrep_init())
{
- my_error(ER_CANT_OPEN_LIBRARY, MYF(0), tmp);
+ my_error(ER_CANT_OPEN_LIBRARY, MYF(0), tmp, my_error, "wsrep_init failed");
rcode = true;
}
free(tmp);
@@ -421,14 +451,14 @@ bool wsrep_cluster_address_check (sys_var *self, THD* thd, set_var* var)
char addr_buf[FN_REFLEN];
if ((! var->save_result.string_value.str) ||
- (var->save_result.string_value.length > (FN_REFLEN - 1))) // safety
+ (var->save_result.string_value.length >= sizeof(addr_buf))) // safety
goto err;
- memcpy(addr_buf, var->save_result.string_value.str,
- var->save_result.string_value.length);
- addr_buf[var->save_result.string_value.length]= 0;
+ strmake(addr_buf, var->save_result.string_value.str,
+ MY_MIN(sizeof(addr_buf)-1, var->save_result.string_value.length));
- if (!wsrep_cluster_address_verify(addr_buf)) return 0;
+ if (!wsrep_cluster_address_verify(addr_buf))
+ return 0;
err:
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name.str,
@@ -486,8 +516,8 @@ void wsrep_cluster_address_init (const char* value)
(wsrep_cluster_address) ? wsrep_cluster_address : "null",
(value) ? value : "null");
- if (wsrep_cluster_address) my_free ((void*)wsrep_cluster_address);
- wsrep_cluster_address = (value) ? my_strdup(value, MYF(0)) : NULL;
+ my_free((void*) wsrep_cluster_address);
+ wsrep_cluster_address= my_strdup(value ? value : "", MYF(0));
}
/* wsrep_cluster_name cannot be NULL or an empty string. */
@@ -710,7 +740,7 @@ int wsrep_show_status (THD *thd, SHOW_VAR *var, char *buff,
v->name = thd->strdup(sv->name);
switch (sv->type) {
case WSREP_VAR_INT64:
- v->value = (char*)thd->memdup(&sv->value._int64, sizeof(longlong));
+ v->value = (char*)thd->memdup(&sv->value._integer64, sizeof(longlong));
v->type = SHOW_LONGLONG;
break;
case WSREP_VAR_STRING:
diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h
index 53952173c83..7d3ff50f1d2 100644
--- a/sql/wsrep_var.h
+++ b/sql/wsrep_var.h
@@ -47,7 +47,7 @@ extern bool wsrep_on_update UPDATE_ARGS;
extern bool wsrep_sync_wait_update UPDATE_ARGS;
extern bool wsrep_start_position_check CHECK_ARGS;
extern bool wsrep_start_position_update UPDATE_ARGS;
-extern void wsrep_start_position_init INIT_ARGS;
+extern bool wsrep_start_position_init INIT_ARGS;
extern bool wsrep_provider_check CHECK_ARGS;
extern bool wsrep_provider_update UPDATE_ARGS;
@@ -80,7 +80,6 @@ extern bool wsrep_sst_receive_address_update UPDATE_ARGS;
extern bool wsrep_sst_auth_check CHECK_ARGS;
extern bool wsrep_sst_auth_update UPDATE_ARGS;
-extern void wsrep_sst_auth_init INIT_ARGS;
extern bool wsrep_sst_donor_check CHECK_ARGS;
extern bool wsrep_sst_donor_update UPDATE_ARGS;
@@ -101,7 +100,6 @@ extern bool wsrep_reject_queries_update UPDATE_ARGS;
#define wsrep_provider_init(X)
#define wsrep_init_vars() (0)
#define wsrep_start_position_init(X)
-#define wsrep_sst_auth_init(X)
#endif /* WITH_WSREP */
#endif /* WSREP_VAR_H */
diff --git a/sql/wsrep_xid.cc b/sql/wsrep_xid.cc
index 132956e88b3..72b96cadfb0 100644
--- a/sql/wsrep_xid.cc
+++ b/sql/wsrep_xid.cc
@@ -20,6 +20,8 @@
#include "sql_class.h"
#include "wsrep_mysqld.h" // for logging macros
+#include <algorithm> /* std::sort() */
+
/*
* WSREPXid
*/
@@ -89,16 +91,17 @@ static my_bool set_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
return FALSE;
}
-void wsrep_set_SE_checkpoint(XID& xid)
+bool wsrep_set_SE_checkpoint(XID& xid)
{
- plugin_foreach(NULL, set_SE_checkpoint, MYSQL_STORAGE_ENGINE_PLUGIN, &xid);
+ return plugin_foreach(NULL, set_SE_checkpoint, MYSQL_STORAGE_ENGINE_PLUGIN,
+ &xid);
}
-void wsrep_set_SE_checkpoint(const wsrep_uuid_t& uuid, wsrep_seqno_t seqno)
+bool wsrep_set_SE_checkpoint(const wsrep_uuid_t& uuid, wsrep_seqno_t seqno)
{
XID xid;
wsrep_xid_init(&xid, uuid, seqno);
- wsrep_set_SE_checkpoint(xid);
+ return wsrep_set_SE_checkpoint(xid);
}
static my_bool get_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
@@ -118,12 +121,13 @@ static my_bool get_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
return FALSE;
}
-void wsrep_get_SE_checkpoint(XID& xid)
+bool wsrep_get_SE_checkpoint(XID& xid)
{
- plugin_foreach(NULL, get_SE_checkpoint, MYSQL_STORAGE_ENGINE_PLUGIN, &xid);
+ return plugin_foreach(NULL, get_SE_checkpoint, MYSQL_STORAGE_ENGINE_PLUGIN,
+ &xid);
}
-void wsrep_get_SE_checkpoint(wsrep_uuid_t& uuid, wsrep_seqno_t& seqno)
+bool wsrep_get_SE_checkpoint(wsrep_uuid_t& uuid, wsrep_seqno_t& seqno)
{
uuid= WSREP_UUID_UNDEFINED;
seqno= WSREP_SEQNO_UNDEFINED;
@@ -131,16 +135,56 @@ void wsrep_get_SE_checkpoint(wsrep_uuid_t& uuid, wsrep_seqno_t& seqno)
XID xid;
xid.null();
- wsrep_get_SE_checkpoint(xid);
+ if (wsrep_get_SE_checkpoint(xid))
+ {
+ return true;
+ }
- if (xid.is_null()) return;
+ if (xid.is_null())
+ {
+ return false;
+ }
if (!wsrep_is_wsrep_xid(&xid))
{
WSREP_WARN("Read non-wsrep XID from storage engines.");
- return;
+ return false;
}
uuid= *wsrep_xid_uuid(xid);
seqno= wsrep_xid_seqno(xid);
+
+ return false;
+}
+
+/*
+ Sort order for XIDs. Wsrep XIDs are sorted according to
+ seqno in ascending order. Non-wsrep XIDs are considered
+ equal among themselves and greater than with respect
+ to wsrep XIDs.
+ */
+struct Wsrep_xid_cmp
+{
+ bool operator()(const XID& left, const XID& right) const
+ {
+ const bool left_is_wsrep= wsrep_is_wsrep_xid(&left);
+ const bool right_is_wsrep= wsrep_is_wsrep_xid(&right);
+ if (left_is_wsrep && right_is_wsrep)
+ {
+ return (wsrep_xid_seqno(left) < wsrep_xid_seqno(right));
+ }
+ else if (left_is_wsrep)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+};
+
+void wsrep_sort_xid_array(XID *array, int len)
+{
+ std::sort(array, array + len, Wsrep_xid_cmp());
}
diff --git a/sql/wsrep_xid.h b/sql/wsrep_xid.h
index c3cad0231d7..01b18506708 100644
--- a/sql/wsrep_xid.h
+++ b/sql/wsrep_xid.h
@@ -28,9 +28,11 @@ const wsrep_uuid_t* wsrep_xid_uuid(const XID&);
wsrep_seqno_t wsrep_xid_seqno(const XID&);
//void wsrep_get_SE_checkpoint(XID&); /* uncomment if needed */
-void wsrep_get_SE_checkpoint(wsrep_uuid_t&, wsrep_seqno_t&);
+bool wsrep_get_SE_checkpoint(wsrep_uuid_t&, wsrep_seqno_t&);
//void wsrep_set_SE_checkpoint(XID&); /* uncomment if needed */
-void wsrep_set_SE_checkpoint(const wsrep_uuid_t&, wsrep_seqno_t);
+bool wsrep_set_SE_checkpoint(const wsrep_uuid_t&, wsrep_seqno_t);
+
+void wsrep_sort_xid_array(XID *array, int len);
#endif /* WITH_WSREP */
#endif /* WSREP_UTILS_H */