summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt18
-rw-r--r--sql/compat56.cc445
-rw-r--r--sql/compat56.h46
-rw-r--r--sql/create_options.cc197
-rw-r--r--sql/create_options.h17
-rw-r--r--sql/custom_conf.h2
-rw-r--r--sql/datadict.cc118
-rw-r--r--sql/datadict.h22
-rw-r--r--sql/db.opt2
-rw-r--r--sql/discover.cc171
-rw-r--r--sql/discover.h21
-rw-r--r--sql/event_db_repository.cc4
-rw-r--r--sql/event_parse_data.cc4
-rw-r--r--sql/event_scheduler.cc26
-rw-r--r--sql/field.cc695
-rw-r--r--sql/field.h650
-rw-r--r--sql/field_conv.cc4
-rw-r--r--sql/filesort.cc23
-rw-r--r--sql/frm_crypt.cc37
-rw-r--r--sql/frm_crypt.h23
-rw-r--r--sql/ha_ndbcluster_cond.cc2
-rw-r--r--sql/ha_ndbcluster_cond.h2
-rw-r--r--sql/ha_ndbcluster_tables.h2
-rw-r--r--sql/ha_partition.cc70
-rw-r--r--sql/ha_partition.h22
-rw-r--r--sql/handler.cc818
-rw-r--r--sql/handler.h479
-rw-r--r--sql/hostname.cc2
-rw-r--r--sql/innodb_priv.h6
-rw-r--r--sql/item.cc233
-rw-r--r--sql/item.h292
-rw-r--r--sql/item_cmpfunc.cc279
-rw-r--r--sql/item_cmpfunc.h113
-rw-r--r--sql/item_create.cc112
-rw-r--r--sql/item_create.h5
-rw-r--r--sql/item_func.cc876
-rw-r--r--sql/item_func.h41
-rw-r--r--sql/item_geofunc.cc29
-rw-r--r--sql/item_geofunc.h4
-rw-r--r--sql/item_row.cc4
-rw-r--r--sql/item_strfunc.cc210
-rw-r--r--sql/item_strfunc.h75
-rw-r--r--sql/item_subselect.cc548
-rw-r--r--sql/item_subselect.h96
-rw-r--r--sql/item_sum.cc191
-rw-r--r--sql/item_sum.h11
-rw-r--r--sql/item_timefunc.cc78
-rw-r--r--sql/item_timefunc.h4
-rw-r--r--sql/item_xmlfunc.cc6
-rw-r--r--sql/key.cc108
-rw-r--r--sql/key.h4
-rw-r--r--sql/keycaches.cc70
-rw-r--r--sql/keycaches.h13
-rw-r--r--sql/lex.h5
-rw-r--r--sql/lex_symbol.h2
-rw-r--r--sql/lock.cc4
-rw-r--r--sql/log.cc526
-rw-r--r--sql/log.h19
-rw-r--r--sql/log_event.cc982
-rw-r--r--sql/log_event.h336
-rw-r--r--sql/log_event_old.cc2
-rw-r--r--sql/log_event_old.h2
-rw-r--r--sql/mdl.cc143
-rw-r--r--sql/mdl.h47
-rw-r--r--sql/mem_root_array.h2
-rw-r--r--sql/multi_range_read.cc7
-rw-r--r--sql/my_apc.cc3
-rw-r--r--sql/my_apc.h8
-rw-r--r--sql/my_decimal.h2
-rw-r--r--sql/mysql_install_db.cc16
-rw-r--r--sql/mysqld.cc544
-rw-r--r--sql/mysqld.h61
-rw-r--r--sql/net_serv.cc2
-rw-r--r--sql/opt_range.cc877
-rw-r--r--sql/opt_range.h17
-rw-r--r--sql/opt_range_mrr.cc6
-rw-r--r--sql/opt_subselect.cc35
-rw-r--r--sql/opt_subselect.h1
-rw-r--r--sql/opt_sum.cc2
-rw-r--r--sql/opt_table_elimination.cc2
-rw-r--r--sql/partition_element.h7
-rw-r--r--sql/partition_info.cc104
-rw-r--r--sql/partition_info.h16
-rw-r--r--sql/protocol.cc10
-rw-r--r--sql/repl_failsafe.cc9
-rw-r--r--sql/rpl_filter.cc25
-rw-r--r--sql/rpl_filter.h3
-rw-r--r--sql/rpl_gtid.cc1431
-rw-r--r--sql/rpl_gtid.h199
-rw-r--r--sql/rpl_handler.cc24
-rw-r--r--sql/rpl_injector.cc8
-rw-r--r--sql/rpl_mi.cc185
-rw-r--r--sql/rpl_mi.h48
-rw-r--r--sql/rpl_reporting.cc2
-rw-r--r--sql/rpl_rli.cc242
-rw-r--r--sql/rpl_rli.h22
-rw-r--r--sql/rpl_utility.cc59
-rw-r--r--sql/rpl_utility.h10
-rw-r--r--sql/scheduler.cc64
-rw-r--r--sql/set_var.cc129
-rw-r--r--sql/set_var.h16
-rw-r--r--sql/share/charsets/Index.xml2
-rw-r--r--sql/share/charsets/armscii8.xml2
-rw-r--r--sql/share/charsets/ascii.xml2
-rw-r--r--sql/share/charsets/cp1250.xml2
-rw-r--r--sql/share/charsets/cp1256.xml2
-rw-r--r--sql/share/charsets/cp1257.xml2
-rw-r--r--sql/share/charsets/cp850.xml2
-rw-r--r--sql/share/charsets/cp852.xml2
-rw-r--r--sql/share/charsets/cp866.xml2
-rw-r--r--sql/share/charsets/dec8.xml2
-rw-r--r--sql/share/charsets/geostd8.xml2
-rw-r--r--sql/share/charsets/greek.xml2
-rw-r--r--sql/share/charsets/hebrew.xml2
-rw-r--r--sql/share/charsets/hp8.xml2
-rw-r--r--sql/share/charsets/keybcs2.xml2
-rw-r--r--sql/share/charsets/koi8r.xml2
-rw-r--r--sql/share/charsets/koi8u.xml2
-rw-r--r--sql/share/charsets/languages.html2
-rw-r--r--sql/share/charsets/latin1.xml2
-rw-r--r--sql/share/charsets/latin2.xml2
-rw-r--r--sql/share/charsets/latin5.xml2
-rw-r--r--sql/share/charsets/latin7.xml2
-rw-r--r--sql/share/charsets/macce.xml2
-rw-r--r--sql/share/charsets/macroman.xml2
-rw-r--r--sql/share/charsets/swe7.xml2
-rw-r--r--sql/share/errmsg-utf8.txt558
-rw-r--r--sql/slave.cc828
-rw-r--r--sql/sp.cc4
-rw-r--r--sql/sp_head.cc11
-rw-r--r--sql/sp_pcontext.cc12
-rw-r--r--sql/sp_rcontext.cc6
-rw-r--r--sql/sp_rcontext.h2
-rw-r--r--sql/spatial.cc136
-rw-r--r--sql/spatial.h45
-rw-r--r--sql/sql_acl.cc97
-rw-r--r--sql/sql_admin.cc28
-rw-r--r--sql/sql_admin.h4
-rw-r--r--sql/sql_analyse.cc2
-rw-r--r--sql/sql_array.h13
-rw-r--r--sql/sql_audit.cc32
-rw-r--r--sql/sql_audit.h188
-rw-r--r--sql/sql_base.cc705
-rw-r--r--sql/sql_base.h83
-rw-r--r--sql/sql_binlog.cc2
-rw-r--r--sql/sql_bitmap.h5
-rw-r--r--sql/sql_cache.cc79
-rw-r--r--sql/sql_cache.h12
-rw-r--r--sql/sql_class.cc264
-rw-r--r--sql/sql_class.h163
-rw-r--r--sql/sql_cmd.h2
-rw-r--r--sql/sql_connect.cc8
-rw-r--r--sql/sql_const.h3
-rw-r--r--sql/sql_cursor.cc2
-rw-r--r--sql/sql_db.cc153
-rw-r--r--sql/sql_db.h1
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_derived.cc14
-rw-r--r--sql/sql_error.cc6
-rw-r--r--sql/sql_error.h2
-rw-r--r--sql/sql_expression_cache.cc2
-rw-r--r--sql/sql_handler.cc12
-rw-r--r--sql/sql_help.cc2
-rw-r--r--sql/sql_hset.h2
-rw-r--r--sql/sql_insert.cc106
-rw-r--r--sql/sql_join_cache.cc6
-rw-r--r--sql/sql_lex.cc56
-rw-r--r--sql/sql_lex.h24
-rw-r--r--sql/sql_list.h4
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_parse.cc356
-rw-r--r--sql/sql_parse.h22
-rw-r--r--sql/sql_partition.cc112
-rw-r--r--sql/sql_partition.h4
-rw-r--r--sql/sql_partition_admin.cc4
-rw-r--r--sql/sql_plugin.cc291
-rw-r--r--sql/sql_plugin.h11
-rw-r--r--sql/sql_plugin_compat.h65
-rw-r--r--sql/sql_plugin_services.h15
-rw-r--r--sql/sql_prepare.cc14
-rw-r--r--sql/sql_priv.h11
-rw-r--r--sql/sql_reload.cc3
-rw-r--r--sql/sql_reload.h2
-rw-r--r--sql/sql_rename.cc108
-rw-r--r--sql/sql_rename.h3
-rw-r--r--sql/sql_repl.cc1821
-rw-r--r--sql/sql_repl.h11
-rw-r--r--sql/sql_select.cc1043
-rw-r--r--sql/sql_select.h54
-rw-r--r--sql/sql_show.cc620
-rw-r--r--sql/sql_show.h11
-rw-r--r--sql/sql_state.c2
-rw-r--r--sql/sql_statistics.cc606
-rw-r--r--sql/sql_statistics.h194
-rw-r--r--sql/sql_string.cc33
-rw-r--r--sql/sql_string.h17
-rw-r--r--sql/sql_table.cc1200
-rw-r--r--sql/sql_table.h20
-rw-r--r--sql/sql_test.cc2
-rw-r--r--sql/sql_time.cc58
-rw-r--r--sql/sql_time.h36
-rw-r--r--sql/sql_trigger.cc7
-rw-r--r--sql/sql_truncate.cc40
-rw-r--r--sql/sql_udf.h4
-rw-r--r--sql/sql_union.cc5
-rw-r--r--sql/sql_update.cc36
-rw-r--r--sql/sql_view.cc58
-rw-r--r--sql/sql_yacc.yy561
-rw-r--r--sql/strfunc.cc4
-rw-r--r--sql/sys_vars.cc498
-rw-r--r--sql/sys_vars.h151
-rw-r--r--sql/table.cc1385
-rw-r--r--sql/table.h157
-rw-r--r--sql/threadpool_common.cc8
-rw-r--r--sql/threadpool_unix.cc43
-rw-r--r--sql/transaction.cc22
-rw-r--r--sql/tztime.cc27
-rw-r--r--sql/uniques.cc40
-rw-r--r--sql/unireg.cc739
-rw-r--r--sql/unireg.h60
220 files changed, 20188 insertions, 7740 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index e42ccc16bc9..b43474224ee 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -37,7 +37,7 @@ IF(SSL_DEFINES)
ENDIF()
SET (SQL_SOURCE
- ../sql-common/client.c derror.cc des_key_file.cc
+ ../sql-common/client.c compat56.cc derror.cc des_key_file.cc
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
filesort_utils.cc
filesort.cc gstream.cc sha2.cc
@@ -90,6 +90,7 @@ SET (SQL_SOURCE
threadpool_common.cc
../sql-common/mysql_async.c
my_apc.cc my_apc.h
+ rpl_gtid.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
@@ -190,9 +191,12 @@ INSTALL_DEBUG_TARGET(mysqld
PDB_DESTINATION ${INSTALL_SBINDIR}/debug
RENAME mysqld-debug)
+INCLUDE(${CMAKE_SOURCE_DIR}/cmake/bison.cmake)
+
# Handle out-of-source build from source package with possibly broken
# bison. Copy bison output to from source to build directory, if not already
# there
+IF (NOT BISON_USABLE)
IF (NOT ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR})
IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.cc)
IF(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc)
@@ -203,9 +207,8 @@ IF (NOT ${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR})
ENDIF()
ENDIF()
ENDIF()
+ENDIF()
-
-INCLUDE(${CMAKE_SOURCE_DIR}/cmake/bison.cmake)
RUN_BISON(
${CMAKE_CURRENT_SOURCE_DIR}/sql_yacc.yy
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc
@@ -273,12 +276,9 @@ ADD_CUSTOM_TARGET(distclean
IF(INSTALL_LAYOUT STREQUAL "STANDALONE")
-# We need to create empty directories (data/test) the installation.
-# This does not work with current CPack due to http://www.cmake.org/Bug/view.php?id=8767
-# Avoid completely empty directories and install dummy file instead.
-SET(DUMMY_FILE ${CMAKE_CURRENT_BINARY_DIR}/db.opt )
-FILE(WRITE ${DUMMY_FILE} "")
-INSTALL(FILES ${DUMMY_FILE} DESTINATION data/test COMPONENT DataFiles)
+# Copy db.opt into data/test/
+SET(DBOPT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/db.opt )
+INSTALL(FILES ${DBOPT_FILE} DESTINATION data/test COMPONENT DataFiles)
# Install initial database on windows
IF(NOT CMAKE_CROSSCOMPILING)
diff --git a/sql/compat56.cc b/sql/compat56.cc
new file mode 100644
index 00000000000..3bd6b21a154
--- /dev/null
+++ b/sql/compat56.cc
@@ -0,0 +1,445 @@
+/*
+ Copyright (c) 2004, 2012, Oracle and/or its affiliates.
+ Copyright (c) 2013, MariaDB Foundation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "my_global.h"
+#include "compat56.h"
+#include "myisampack.h"
+#include "my_time.h"
+
+/*** MySQL56 TIME low-level memory and disk representation routines ***/
+
+/*
+ In-memory format:
+
+ 1 bit sign (Used for sign, when on disk)
+ 1 bit unused (Reserved for wider hour range, e.g. for intervals)
+ 10 bit hour (0-836)
+ 6 bit minute (0-59)
+ 6 bit second (0-59)
+ 24 bits microseconds (0-999999)
+
+ Total: 48 bits = 6 bytes
+ Suhhhhhh.hhhhmmmm.mmssssss.ffffffff.ffffffff.ffffffff
+*/
+
+
+/**
+ Convert time value to MySQL56 numeric packed representation.
+
+ @param ltime The value to convert.
+ @return Numeric packed representation.
+*/
+longlong TIME_to_longlong_time_packed(const MYSQL_TIME *ltime)
+{
+ /* If month is 0, we mix day with hours: "1 00:10:10" -> "24:00:10" */
+ long hms= (((ltime->month ? 0 : ltime->day * 24) + ltime->hour) << 12) |
+ (ltime->minute << 6) | ltime->second;
+ longlong tmp= MY_PACKED_TIME_MAKE(hms, ltime->second_part);
+ return ltime->neg ? -tmp : tmp;
+}
+
+
+
+/**
+ Convert MySQL56 time packed numeric representation to time.
+
+ @param OUT ltime The MYSQL_TIME variable to set.
+ @param tmp The packed numeric representation.
+*/
+void TIME_from_longlong_time_packed(MYSQL_TIME *ltime, longlong tmp)
+{
+ long hms;
+ if ((ltime->neg= (tmp < 0)))
+ tmp= -tmp;
+ hms= MY_PACKED_TIME_GET_INT_PART(tmp);
+ ltime->year= (uint) 0;
+ ltime->month= (uint) 0;
+ ltime->day= (uint) 0;
+ ltime->hour= (uint) (hms >> 12) % (1 << 10); /* 10 bits starting at 12th */
+ ltime->minute= (uint) (hms >> 6) % (1 << 6); /* 6 bits starting at 6th */
+ ltime->second= (uint) hms % (1 << 6); /* 6 bits starting at 0th */
+ ltime->second_part= MY_PACKED_TIME_GET_FRAC_PART(tmp);
+ ltime->time_type= MYSQL_TIMESTAMP_TIME;
+}
+
+
+/**
+ Calculate binary size of MySQL56 packed numeric time representation.
+
+ @param dec Precision.
+*/
+uint my_time_binary_length(uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ return 3 + (dec + 1) / 2;
+}
+
+
+/*
+ On disk we convert from signed representation to unsigned
+ representation using TIMEF_OFS, so all values become binary comparable.
+*/
+#define TIMEF_OFS 0x800000000000LL
+#define TIMEF_INT_OFS 0x800000LL
+
+
+/**
+ Convert MySQL56 in-memory numeric time representation to on-disk representation
+
+ @param nr Value in packed numeric time format.
+ @param OUT ptr The buffer to put value at.
+ @param dec Precision.
+*/
+void my_time_packed_to_binary(longlong nr, uchar *ptr, uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ /* Make sure the stored value was previously properly rounded or truncated */
+ DBUG_ASSERT((MY_PACKED_TIME_GET_FRAC_PART(nr) %
+ (int) log_10_int[TIME_SECOND_PART_DIGITS - dec]) == 0);
+
+ switch (dec)
+ {
+ case 0:
+ default:
+ mi_int3store(ptr, TIMEF_INT_OFS + MY_PACKED_TIME_GET_INT_PART(nr));
+ break;
+
+ case 1:
+ case 2:
+ mi_int3store(ptr, TIMEF_INT_OFS + MY_PACKED_TIME_GET_INT_PART(nr));
+ ptr[3]= (unsigned char) (char) (MY_PACKED_TIME_GET_FRAC_PART(nr) / 10000);
+ break;
+
+ case 4:
+ case 3:
+ mi_int3store(ptr, TIMEF_INT_OFS + MY_PACKED_TIME_GET_INT_PART(nr));
+ mi_int2store(ptr + 3, MY_PACKED_TIME_GET_FRAC_PART(nr) / 100);
+ break;
+
+ case 5:
+ case 6:
+ mi_int6store(ptr, nr + TIMEF_OFS);
+ break;
+ }
+}
+
+
+/**
+ Convert MySQL56 on-disk time representation to in-memory packed numeric
+ representation.
+
+ @param ptr The pointer to read the value at.
+ @param dec Precision.
+ @return Packed numeric time representation.
+*/
+longlong my_time_packed_from_binary(const uchar *ptr, uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+
+ switch (dec)
+ {
+ case 0:
+ default:
+ {
+ longlong intpart= mi_uint3korr(ptr) - TIMEF_INT_OFS;
+ return MY_PACKED_TIME_MAKE_INT(intpart);
+ }
+ case 1:
+ case 2:
+ {
+ longlong intpart= mi_uint3korr(ptr) - TIMEF_INT_OFS;
+ int frac= (uint) ptr[3];
+ if (intpart < 0 && frac)
+ {
+ /*
+ Negative values are stored with reverse fractional part order,
+ for binary sort compatibility.
+
+ Disk value intpart frac Time value Memory value
+ 800000.00 0 0 00:00:00.00 0000000000.000000
+ 7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0
+ 7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0
+ 7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000
+ 7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0
+ 7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960
+
+ Formula to convert fractional part from disk format
+ (now stored in "frac" variable) to absolute value: "0x100 - frac".
+ To reconstruct in-memory value, we shift
+ to the next integer value and then substruct fractional part.
+ */
+ intpart++; /* Shift to the next integer value */
+ frac-= 0x100; /* -(0x100 - frac) */
+ }
+ return MY_PACKED_TIME_MAKE(intpart, frac * 10000);
+ }
+
+ case 3:
+ case 4:
+ {
+ longlong intpart= mi_uint3korr(ptr) - TIMEF_INT_OFS;
+ int frac= mi_uint2korr(ptr + 3);
+ if (intpart < 0 && frac)
+ {
+ /*
+ Fix reverse fractional part order: "0x10000 - frac".
+ See comments for FSP=1 and FSP=2 above.
+ */
+ intpart++; /* Shift to the next integer value */
+ frac-= 0x10000; /* -(0x10000-frac) */
+ }
+ return MY_PACKED_TIME_MAKE(intpart, frac * 100);
+ }
+
+ case 5:
+ case 6:
+ return ((longlong) mi_uint6korr(ptr)) - TIMEF_OFS;
+ }
+}
+
+
+/*** MySQL56 DATETIME low-level memory and disk representation routines ***/
+
+/*
+ 1 bit sign (used when on disk)
+ 17 bits year*13+month (year 0-9999, month 0-12)
+ 5 bits day (0-31)
+ 5 bits hour (0-23)
+ 6 bits minute (0-59)
+ 6 bits second (0-59)
+ 24 bits microseconds (0-999999)
+
+ Total: 64 bits = 8 bytes
+
+ SYYYYYYY.YYYYYYYY.YYdddddh.hhhhmmmm.mmssssss.ffffffff.ffffffff.ffffffff
+*/
+
+/**
+ Convert datetime to MySQL56 packed numeric datetime representation.
+ @param ltime The value to convert.
+ @return Packed numeric representation of ltime.
+*/
+longlong TIME_to_longlong_datetime_packed(const MYSQL_TIME *ltime)
+{
+ longlong ymd= ((ltime->year * 13 + ltime->month) << 5) | ltime->day;
+ longlong hms= (ltime->hour << 12) | (ltime->minute << 6) | ltime->second;
+ longlong tmp= MY_PACKED_TIME_MAKE(((ymd << 17) | hms), ltime->second_part);
+ DBUG_ASSERT(!check_datetime_range(ltime)); /* Make sure no overflow */
+ return ltime->neg ? -tmp : tmp;
+}
+
+
+/**
+ Convert MySQL56 packed numeric datetime representation to MYSQL_TIME.
+ @param OUT ltime The datetime variable to convert to.
+ @param tmp The packed numeric datetime value.
+*/
+void TIME_from_longlong_datetime_packed(MYSQL_TIME *ltime, longlong tmp)
+{
+ longlong ymd, hms;
+ longlong ymdhms, ym;
+ if ((ltime->neg= (tmp < 0)))
+ tmp= -tmp;
+
+ ltime->second_part= MY_PACKED_TIME_GET_FRAC_PART(tmp);
+ ymdhms= MY_PACKED_TIME_GET_INT_PART(tmp);
+
+ ymd= ymdhms >> 17;
+ ym= ymd >> 5;
+ hms= ymdhms % (1 << 17);
+
+ ltime->day= ymd % (1 << 5);
+ ltime->month= ym % 13;
+ ltime->year= ym / 13;
+
+ ltime->second= hms % (1 << 6);
+ ltime->minute= (hms >> 6) % (1 << 6);
+ ltime->hour= (hms >> 12);
+
+ ltime->time_type= MYSQL_TIMESTAMP_DATETIME;
+}
+
+
+/**
+ Calculate binary size of MySQL56 packed datetime representation.
+ @param dec Precision.
+*/
+uint my_datetime_binary_length(uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ return 5 + (dec + 1) / 2;
+}
+
+
+/*
+ On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
+ for HA_KETYPE_BINARY compatibilty purposes.
+*/
+#define DATETIMEF_INT_OFS 0x8000000000LL
+
+
+/**
+ Convert MySQL56 on-disk datetime representation
+ to in-memory packed numeric representation.
+
+ @param ptr The pointer to read value at.
+ @param dec Precision.
+ @return In-memory packed numeric datetime representation.
+*/
+longlong my_datetime_packed_from_binary(const uchar *ptr, uint dec)
+{
+ longlong intpart= mi_uint5korr(ptr) - DATETIMEF_INT_OFS;
+ int frac;
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ switch (dec)
+ {
+ case 0:
+ default:
+ return MY_PACKED_TIME_MAKE_INT(intpart);
+ case 1:
+ case 2:
+ frac= ((int) (signed char) ptr[5]) * 10000;
+ break;
+ case 3:
+ case 4:
+ frac= mi_sint2korr(ptr + 5) * 100;
+ break;
+ case 5:
+ case 6:
+ frac= mi_sint3korr(ptr + 5);
+ break;
+ }
+ return MY_PACKED_TIME_MAKE(intpart, frac);
+}
+
+
+/**
+ Store MySQL56 in-memory numeric packed datetime representation to disk.
+
+ @param nr In-memory numeric packed datetime representation.
+ @param OUT ptr The pointer to store at.
+ @param dec Precision, 1-6.
+*/
+void my_datetime_packed_to_binary(longlong nr, uchar *ptr, uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ /* The value being stored must have been properly rounded or truncated */
+ DBUG_ASSERT((MY_PACKED_TIME_GET_FRAC_PART(nr) %
+ (int) log_10_int[TIME_SECOND_PART_DIGITS - dec]) == 0);
+
+ mi_int5store(ptr, MY_PACKED_TIME_GET_INT_PART(nr) + DATETIMEF_INT_OFS);
+ switch (dec)
+ {
+ case 0:
+ default:
+ break;
+ case 1:
+ case 2:
+ ptr[5]= (unsigned char) (char) (MY_PACKED_TIME_GET_FRAC_PART(nr) / 10000);
+ break;
+ case 3:
+ case 4:
+ mi_int2store(ptr + 5, MY_PACKED_TIME_GET_FRAC_PART(nr) / 100);
+ break;
+ case 5:
+ case 6:
+ mi_int3store(ptr + 5, MY_PACKED_TIME_GET_FRAC_PART(nr));
+ }
+}
+
+
+/*** MySQL56 TIMESTAMP low-level memory and disk representation routines ***/
+
+/**
+ Calculate on-disk size of a timestamp value.
+
+ @param dec Precision.
+*/
+uint my_timestamp_binary_length(uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ return 4 + (dec + 1) / 2;
+}
+
+
+/**
+ Convert MySQL56 binary timestamp representation to in-memory representation.
+
+ @param OUT tm The variable to convert to.
+ @param ptr The pointer to read the value from.
+ @param dec Precision.
+*/
+void my_timestamp_from_binary(struct timeval *tm, const uchar *ptr, uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ tm->tv_sec= mi_uint4korr(ptr);
+ switch (dec)
+ {
+ case 0:
+ default:
+ tm->tv_usec= 0;
+ break;
+ case 1:
+ case 2:
+ tm->tv_usec= ((int) ptr[4]) * 10000;
+ break;
+ case 3:
+ case 4:
+ tm->tv_usec= mi_sint2korr(ptr + 4) * 100;
+ break;
+ case 5:
+ case 6:
+ tm->tv_usec= mi_sint3korr(ptr + 4);
+ }
+}
+
+
+/**
+ Convert MySQL56 in-memory timestamp representation to on-disk representation.
+
+ @param tm The value to convert.
+ @param OUT ptr The pointer to store the value to.
+ @param dec Precision.
+*/
+void my_timestamp_to_binary(const struct timeval *tm, uchar *ptr, uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ /* Stored value must have been previously properly rounded or truncated */
+ DBUG_ASSERT((tm->tv_usec %
+ (int) log_10_int[TIME_SECOND_PART_DIGITS - dec]) == 0);
+ mi_int4store(ptr, tm->tv_sec);
+ switch (dec)
+ {
+ case 0:
+ default:
+ break;
+ case 1:
+ case 2:
+ ptr[4]= (unsigned char) (char) (tm->tv_usec / 10000);
+ break;
+ case 3:
+ case 4:
+ mi_int2store(ptr + 4, tm->tv_usec / 100);
+ break;
+ /* Impossible second precision. Fall through */
+ case 5:
+ case 6:
+ mi_int3store(ptr + 4, tm->tv_usec);
+ }
+}
+
+/****************************************/
diff --git a/sql/compat56.h b/sql/compat56.h
new file mode 100644
index 00000000000..bb5e2670f7d
--- /dev/null
+++ b/sql/compat56.h
@@ -0,0 +1,46 @@
+#ifndef COMPAT56_H_INCLUDED
+#define COMPAT56_H_INCLUDED
+/*
+ Copyright (c) 2004, 2012, Oracle and/or its affiliates.
+ Copyright (c) 2013 MariaDB Foundation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+/** MySQL56 routines and macros **/
+#define MY_PACKED_TIME_GET_INT_PART(x) ((x) >> 24)
+#define MY_PACKED_TIME_GET_FRAC_PART(x) ((x) % (1LL << 24))
+#define MY_PACKED_TIME_MAKE(i, f) ((((longlong) (i)) << 24) + (f))
+#define MY_PACKED_TIME_MAKE_INT(i) ((((longlong) (i)) << 24))
+
+longlong TIME_to_longlong_datetime_packed(const MYSQL_TIME *);
+longlong TIME_to_longlong_time_packed(const MYSQL_TIME *);
+
+void TIME_from_longlong_datetime_packed(MYSQL_TIME *ltime, longlong nr);
+void TIME_from_longlong_time_packed(MYSQL_TIME *ltime, longlong nr);
+
+void my_datetime_packed_to_binary(longlong nr, uchar *ptr, uint dec);
+longlong my_datetime_packed_from_binary(const uchar *ptr, uint dec);
+uint my_datetime_binary_length(uint dec);
+
+void my_time_packed_to_binary(longlong nr, uchar *ptr, uint dec);
+longlong my_time_packed_from_binary(const uchar *ptr, uint dec);
+uint my_time_binary_length(uint dec);
+
+void my_timestamp_to_binary(const struct timeval *tm, uchar *ptr, uint dec);
+void my_timestamp_from_binary(struct timeval *tm, const uchar *ptr, uint dec);
+uint my_timestamp_binary_length(uint dec);
+/** End of MySQL routines and macros **/
+
+#endif /* COMPAT56_H_INCLUDED */
diff --git a/sql/create_options.cc b/sql/create_options.cc
index dd0a14cd476..d956d01aa66 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -21,6 +21,7 @@
#include "create_options.h"
#include <my_getopt.h>
+#include "set_var.h"
#define FRM_QUOTED_VALUE 0x8000
@@ -74,7 +75,7 @@ void engine_option_value::link(engine_option_value **start,
}
static bool report_wrong_value(THD *thd, const char *name, const char *val,
- my_bool suppress_warning)
+ bool suppress_warning)
{
if (suppress_warning)
return 0;
@@ -92,7 +93,7 @@ static bool report_wrong_value(THD *thd, const char *name, const char *val,
}
static bool report_unknown_option(THD *thd, engine_option_value *val,
- my_bool suppress_warning)
+ bool suppress_warning)
{
DBUG_ENTER("report_unknown_option");
@@ -115,8 +116,8 @@ static bool report_unknown_option(THD *thd, engine_option_value *val,
}
static bool set_one_value(ha_create_table_option *opt,
- THD *thd, LEX_STRING *value, void *base,
- my_bool suppress_warning,
+ THD *thd, const LEX_STRING *value, void *base,
+ bool suppress_warning,
MEM_ROOT *root)
{
DBUG_ENTER("set_one_value");
@@ -126,6 +127,8 @@ static bool set_one_value(ha_create_table_option *opt,
(value->str ? value->str : "<DEFAULT>")));
switch (opt->type)
{
+ case HA_OPTION_TYPE_SYSVAR:
+ DBUG_ASSERT(0); // HA_OPTION_TYPE_SYSVAR's are replaced in resolve_sysvars()
case HA_OPTION_TYPE_ULL:
{
ulonglong *val= (ulonglong*)((char*)base + opt->offset);
@@ -257,52 +260,92 @@ static const size_t ha_option_type_sizeof[]=
@retval FALSE OK
*/
-my_bool parse_option_list(THD* thd, void *option_struct_arg,
- engine_option_value *option_list,
- ha_create_table_option *rules,
- my_bool suppress_warning,
- MEM_ROOT *root)
+bool parse_option_list(THD* thd, handlerton *hton, void *option_struct_arg,
+ engine_option_value **option_list,
+ ha_create_table_option *rules,
+ bool suppress_warning, MEM_ROOT *root)
{
ha_create_table_option *opt;
size_t option_struct_size= 0;
- engine_option_value *val= option_list;
+ engine_option_value *val, *last;
void **option_struct= (void**)option_struct_arg;
DBUG_ENTER("parse_option_list");
DBUG_PRINT("enter",
- ("struct: 0x%lx list: 0x%lx rules: 0x%lx suppres %u root 0x%lx",
- (ulong) *option_struct, (ulong)option_list, (ulong)rules,
- (uint) suppress_warning, (ulong) root));
+ ("struct: %p list: %p rules: %p suppress_warning: %u root: %p",
+ *option_struct, *option_list, rules,
+ (uint) suppress_warning, root));
if (rules)
{
- LEX_STRING default_val= {NULL, 0};
for (opt= rules; opt->name; opt++)
set_if_bigger(option_struct_size, opt->offset +
ha_option_type_sizeof[opt->type]);
*option_struct= alloc_root(root, option_struct_size);
-
- /* set all values to default */
- for (opt= rules; opt->name; opt++)
- set_one_value(opt, thd, &default_val, *option_struct,
- suppress_warning, root);
}
- for (; val; val= val->next)
+ for (opt= rules; opt && opt->name; opt++)
{
- for (opt= rules; opt && opt->name; opt++)
+ bool seen=false;
+ for (val= *option_list; val; val= val->next)
{
+ last= val;
if (my_strnncoll(system_charset_info,
(uchar*)opt->name, opt->name_length,
(uchar*)val->name.str, val->name.length))
continue;
+ seen=true;
+
+ if (val->parsed && !val->value.str)
+ continue;
+
if (set_one_value(opt, thd, &val->value,
*option_struct, suppress_warning || val->parsed, root))
DBUG_RETURN(TRUE);
val->parsed= true;
break;
}
+ if (!seen)
+ {
+ LEX_STRING default_val= null_lex_str;
+
+ /*
+ If it's CREATE/ALTER TABLE parsing mode (options are created in the
+ transient thd->mem_root, not in the long living TABLE_SHARE::mem_root),
+ and variable-backed option was not explicitly set.
+
+ If it's not create, but opening of the existing frm (that was,
+ probably, created with the older version of the storage engine and
+ does not have this option stored), we take the *default* value of the
+ sysvar, not the *current* value. Because we don't want to have
+ different option values for the same table if it's opened many times.
+ */
+ if (root == thd->mem_root && opt->var)
+ {
+ // take a value from the variable and add it to the list
+ sys_var *sysvar= find_hton_sysvar(hton, opt->var);
+ DBUG_ASSERT(sysvar);
+
+ char buf[256];
+ String sbuf(buf, sizeof(buf), system_charset_info), *str;
+ if ((str= sysvar->val_str(&sbuf, thd, OPT_SESSION, 0)))
+ {
+ LEX_STRING name= { const_cast<char*>(opt->name), opt->name_length };
+ default_val.str= strmake_root(root, str->ptr(), str->length());
+ default_val.length= str->length();
+ val= new (root) engine_option_value(name, default_val, true,
+ option_list, &last);
+ val->parsed= true;
+ }
+ }
+ set_one_value(opt, thd, &default_val, *option_struct,
+ suppress_warning, root);
+ }
+ }
+
+ for (val= *option_list; val; val= val->next)
+ {
if (report_unknown_option(thd, val, suppress_warning))
DBUG_RETURN(TRUE);
val->parsed= true;
@@ -313,6 +356,102 @@ my_bool parse_option_list(THD* thd, void *option_struct_arg,
/**
+ Resolves all HA_OPTION_TYPE_SYSVAR elements.
+
+ This is done when an engine is loaded.
+*/
+static bool resolve_sysvars(handlerton *hton, ha_create_table_option *rules)
+{
+ for (ha_create_table_option *opt= rules; opt && opt->name; opt++)
+ {
+ if (opt->type == HA_OPTION_TYPE_SYSVAR)
+ {
+ struct my_option optp;
+ plugin_opt_set_limits(&optp, opt->var);
+ switch(optp.var_type) {
+ case GET_ULL:
+ case GET_ULONG:
+ case GET_UINT:
+ opt->type= HA_OPTION_TYPE_ULL;
+ opt->def_value= (ulonglong)optp.def_value;
+ opt->min_value= (ulonglong)optp.min_value;
+ opt->max_value= (ulonglong)optp.max_value;
+ opt->block_size= (ulonglong)optp.block_size;
+ break;
+ case GET_STR:
+ case GET_STR_ALLOC:
+ opt->type= HA_OPTION_TYPE_STRING;
+ break;
+ case GET_BOOL:
+ opt->type= HA_OPTION_TYPE_BOOL;
+ opt->def_value= optp.def_value;
+ break;
+ case GET_ENUM:
+ {
+ opt->type= HA_OPTION_TYPE_ENUM;
+ opt->def_value= optp.def_value;
+
+ char buf[256];
+ String str(buf, sizeof(buf), system_charset_info);
+ for (const char **s= optp.typelib->type_names; *s; s++)
+ {
+ if (str.append(*s) || str.append(','))
+ return 1;
+ }
+ DBUG_ASSERT(str.length());
+ opt->values= my_strndup(str.ptr(), str.length()-1, MYF(MY_WME));
+ if (!opt->values)
+ return 1;
+ break;
+ }
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+ }
+ return 0;
+}
+
+bool resolve_sysvar_table_options(handlerton *hton)
+{
+ return resolve_sysvars(hton, hton->table_options) ||
+ resolve_sysvars(hton, hton->field_options) ||
+ resolve_sysvars(hton, hton->index_options);
+}
+
+/*
+ Restore HA_OPTION_TYPE_SYSVAR options back as they were
+ before resolve_sysvars().
+
+ This is done when the engine is unloaded, so that we could
+ call resolve_sysvars() if the engine is installed again.
+*/
+static void free_sysvars(handlerton *hton, ha_create_table_option *rules)
+{
+ for (ha_create_table_option *opt= rules; opt && opt->name; opt++)
+ {
+ if (opt->var)
+ {
+ my_free(const_cast<char*>(opt->values));
+ opt->type= HA_OPTION_TYPE_SYSVAR;
+ opt->def_value= 0;
+ opt->min_value= 0;
+ opt->max_value= 0;
+ opt->block_size= 0;
+ opt->values= 0;
+ }
+ }
+}
+
+void free_sysvar_table_options(handlerton *hton)
+{
+ free_sysvars(hton, hton->table_options);
+ free_sysvars(hton, hton->field_options);
+ free_sysvars(hton, hton->index_options);
+}
+
+
+/**
Parses all table/fields/keys options
@param thd thread handler
@@ -323,27 +462,27 @@ my_bool parse_option_list(THD* thd, void *option_struct_arg,
@retval FALSE OK
*/
-my_bool parse_engine_table_options(THD *thd, handlerton *ht,
- TABLE_SHARE *share)
+bool parse_engine_table_options(THD *thd, handlerton *ht, TABLE_SHARE *share)
{
MEM_ROOT *root= &share->mem_root;
DBUG_ENTER("parse_engine_table_options");
- if (parse_option_list(thd, &share->option_struct, share->option_list,
+ if (parse_option_list(thd, ht, &share->option_struct, & share->option_list,
ht->table_options, TRUE, root))
DBUG_RETURN(TRUE);
for (Field **field= share->field; *field; field++)
{
- if (parse_option_list(thd, &(*field)->option_struct, (*field)->option_list,
+ if (parse_option_list(thd, ht, &(*field)->option_struct,
+ & (*field)->option_list,
ht->field_options, TRUE, root))
DBUG_RETURN(TRUE);
}
for (uint index= 0; index < share->keys; index ++)
{
- if (parse_option_list(thd, &share->key_info[index].option_struct,
- share->key_info[index].option_list,
+ if (parse_option_list(thd, ht, &share->key_info[index].option_struct,
+ & share->key_info[index].option_list,
ht->index_options, TRUE, root))
DBUG_RETURN(TRUE);
}
@@ -543,8 +682,8 @@ uchar *engine_option_value::frm_read(const uchar *buff, engine_option_value **st
@retval FALSE OK
*/
-my_bool engine_table_options_frm_read(const uchar *buff, uint length,
- TABLE_SHARE *share)
+bool engine_table_options_frm_read(const uchar *buff, uint length,
+ TABLE_SHARE *share)
{
const uchar *buff_end= buff + length;
engine_option_value *UNINIT_VAR(end);
diff --git a/sql/create_options.h b/sql/create_options.h
index ae918f6cea1..ea05bf75fac 100644
--- a/sql/create_options.h
+++ b/sql/create_options.h
@@ -69,16 +69,15 @@ class engine_option_value: public Sql_alloc
typedef struct st_key KEY;
class Create_field;
-my_bool parse_engine_table_options(THD *thd, handlerton *ht,
+bool resolve_sysvar_table_options(handlerton *hton);
+void free_sysvar_table_options(handlerton *hton);
+bool parse_engine_table_options(THD *thd, handlerton *ht, TABLE_SHARE *share);
+bool parse_option_list(THD* thd, handlerton *hton, void *option_struct,
+ engine_option_value **option_list,
+ ha_create_table_option *rules,
+ bool suppress_warning, MEM_ROOT *root);
+bool engine_table_options_frm_read(const uchar *buff, uint length,
TABLE_SHARE *share);
-my_bool parse_option_list(THD* thd, void *option_struct,
- engine_option_value *option_list,
- ha_create_table_option *rules,
- my_bool suppress_warning,
- MEM_ROOT *root);
-my_bool engine_table_options_frm_read(const uchar *buff,
- uint length,
- TABLE_SHARE *share);
engine_option_value *merge_engine_table_options(engine_option_value *source,
engine_option_value *changes,
MEM_ROOT *root);
diff --git a/sql/custom_conf.h b/sql/custom_conf.h
index 137b7e9eef2..afef0219857 100644
--- a/sql/custom_conf.h
+++ b/sql/custom_conf.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef __MYSQL_CUSTOM_BUILD_CONFIG__
#define __MYSQL_CUSTOM_BUILD_CONFIG__
diff --git a/sql/datadict.cc b/sql/datadict.cc
index e3f679cc7ec..4bc74af7bdb 100644
--- a/sql/datadict.cc
+++ b/sql/datadict.cc
@@ -55,11 +55,14 @@ frm_type_enum dd_frm_type(THD *thd, char *path, enum legacy_db_type *dbt)
if the following test is true (arg #3). This should not have effect
on return value from this function (default FRMTYPE_TABLE)
*/
- if (header[0] != (uchar) 254 || header[1] != 1 ||
- (header[2] != FRM_VER && header[2] != FRM_VER+1 &&
- (header[2] < FRM_VER+3 || header[2] > FRM_VER+4)))
+ if (!is_binary_frm_header(header))
DBUG_RETURN(FRMTYPE_TABLE);
+ /*
+ XXX this is a bug.
+ if header[3] is > DB_TYPE_FIRST_DYNAMIC, then the complete
+ storage engine name must be read from the frm
+ */
*dbt= (enum legacy_db_type) (uint) *(header + 3);
/* Probably a table. */
@@ -67,117 +70,44 @@ frm_type_enum dd_frm_type(THD *thd, char *path, enum legacy_db_type *dbt)
}
-/**
- Given a table name, check type of .frm and legacy table type.
-
- @param[in] thd The current session.
- @param[in] db Table schema.
- @param[in] table_name Table database.
- @param[out] table_type handlerton of the table if FRMTYPE_TABLE,
- otherwise undefined.
-
- @return FALSE if FRMTYPE_TABLE and storage engine found. TRUE otherwise.
-*/
-
-bool dd_frm_storage_engine(THD *thd, const char *db, const char *table_name,
- handlerton **table_type)
-{
- char path[FN_REFLEN + 1];
- enum legacy_db_type db_type;
- LEX_STRING db_name = {(char *) db, strlen(db)};
-
- /* There should be at least some lock on the table. */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db,
- table_name, MDL_SHARED));
-
- if (check_db_name(&db_name))
- {
- my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str);
- return TRUE;
- }
-
- if (check_table_name(table_name, strlen(table_name), FALSE))
- {
- my_error(ER_WRONG_TABLE_NAME, MYF(0), table_name);
- return TRUE;
- }
-
- (void) build_table_filename(path, sizeof(path) - 1, db,
- table_name, reg_ext, 0);
-
- dd_frm_type(thd, path, &db_type);
-
- /* Type is unknown if the object is not found or is not a table. */
- if (db_type == DB_TYPE_UNKNOWN ||
- !(*table_type= ha_resolve_by_legacy_type(thd, db_type)))
- {
- my_error(ER_NO_SUCH_TABLE, MYF(0), db, table_name);
- return TRUE;
- }
-
- return FALSE;
-}
-
-
-/**
- Given a table name, check if the storage engine for the
- table referred by this name supports an option 'flag'.
- Return an error if the table does not exist or is not a
- base table.
-
- @pre Any metadata lock on the table.
-
- @param[in] thd The current session.
- @param[in] db Table schema.
- @param[in] table_name Table database.
- @param[in] flag The option to check.
- @param[out] yes_no The result. Undefined if error.
-*/
-
-bool dd_check_storage_engine_flag(THD *thd,
- const char *db, const char *table_name,
- uint32 flag, bool *yes_no)
-{
- handlerton *table_type;
-
- if (dd_frm_storage_engine(thd, db, table_name, &table_type))
- return TRUE;
-
- *yes_no= ha_check_storage_engine_flag(table_type, flag);
-
- return FALSE;
-}
-
-
/*
Regenerate a metadata locked table.
@param thd Thread context.
@param db Name of the database to which the table belongs to.
@param name Table name.
+ @param path For temporary tables only - path to table files.
+ Otherwise NULL (the path is calculated from db and table names).
@retval FALSE Success.
@retval TRUE Error.
*/
-bool dd_recreate_table(THD *thd, const char *db, const char *table_name)
+bool dd_recreate_table(THD *thd, const char *db, const char *table_name,
+ const char *path)
{
bool error= TRUE;
HA_CREATE_INFO create_info;
- char path[FN_REFLEN + 1];
+ char path_buf[FN_REFLEN + 1];
DBUG_ENTER("dd_recreate_table");
- /* There should be a exclusive metadata lock on the table. */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
- MDL_EXCLUSIVE));
-
memset(&create_info, 0, sizeof(create_info));
- /* Create a path to the table, but without a extension. */
- build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0);
+ if (path)
+ create_info.options|= HA_LEX_CREATE_TMP_TABLE;
+ else
+ {
+ build_table_filename(path_buf, sizeof(path_buf) - 1,
+ db, table_name, "", 0);
+ path= path_buf;
+
+ /* There should be a exclusive metadata lock on the table. */
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
+ MDL_EXCLUSIVE));
+ }
/* Attempt to reconstruct the table. */
- error= ha_create_table(thd, path, db, table_name, &create_info, TRUE);
+ error= ha_create_table(thd, path, db, table_name, &create_info, NULL);
DBUG_RETURN(error);
}
diff --git a/sql/datadict.h b/sql/datadict.h
index f852b02f52c..dd80942daca 100644
--- a/sql/datadict.h
+++ b/sql/datadict.h
@@ -28,14 +28,22 @@ enum frm_type_enum
FRMTYPE_VIEW
};
+/*
+ Take extra care when using dd_frm_type() - it only checks the .frm file,
+ and it won't work for any engine that supports discovery.
+
+ Prefer to use ha_table_exists() instead.
+ To check whether it's an frm of a view, use dd_frm_is_view().
+*/
frm_type_enum dd_frm_type(THD *thd, char *path, enum legacy_db_type *dbt);
-bool dd_frm_storage_engine(THD *thd, const char *db, const char *table_name,
- handlerton **table_type);
-bool dd_check_storage_engine_flag(THD *thd,
- const char *db, const char *table_name,
- uint32 flag,
- bool *yes_no);
-bool dd_recreate_table(THD *thd, const char *db, const char *table_name);
+static inline bool dd_frm_is_view(THD *thd, char *path)
+{
+ enum legacy_db_type not_used;
+ return dd_frm_type(thd, path, &not_used) == FRMTYPE_VIEW;
+}
+
+bool dd_recreate_table(THD *thd, const char *db, const char *table_name,
+ const char *path = NULL);
#endif // DATADICT_INCLUDED
diff --git a/sql/db.opt b/sql/db.opt
new file mode 100644
index 00000000000..d8429c4e0de
--- /dev/null
+++ b/sql/db.opt
@@ -0,0 +1,2 @@
+default-character-set=latin1
+default-collation=latin1_swedish_ci
diff --git a/sql/discover.cc b/sql/discover.cc
index b9dba92a780..9351cf034ab 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -45,7 +45,7 @@
3 Could not allocate data for read. Could not read file
*/
-int readfrm(const char *name, uchar **frmdata, size_t *len)
+int readfrm(const char *name, const uchar **frmdata, size_t *len)
{
int error;
char index_file[FN_REFLEN];
@@ -70,13 +70,17 @@ int readfrm(const char *name, uchar **frmdata, size_t *len)
error= 2;
if (mysql_file_fstat(file, &state, MYF(0)))
goto err;
- read_len= (size_t)state.st_size;
+ read_len= (size_t)MY_MIN(FRM_MAX_SIZE, state.st_size); // safety
// Read whole frm file
error= 3;
- read_data= 0; // Nothing to free
- if (read_string(file, &read_data, read_len))
+ if (!(read_data= (uchar*)my_malloc(read_len, MYF(MY_WME))))
goto err;
+ if (mysql_file_read(file, read_data, read_len, MYF(MY_NABP)))
+ {
+ my_free(read_data);
+ goto err;
+ }
// Setup return data
*frmdata= (uchar*) read_data;
@@ -96,7 +100,7 @@ int readfrm(const char *name, uchar **frmdata, size_t *len)
Write the content of a frm data pointer
to a frm file.
- @param name path to table-file "db/name"
+ @param path path to table-file "db/name"
@param frmdata frm data
@param len length of the frmdata
@@ -106,29 +110,160 @@ int readfrm(const char *name, uchar **frmdata, size_t *len)
2 Could not write file
*/
-int writefrm(const char *name, const uchar *frmdata, size_t len)
+int writefrm(const char *path, const char *db, const char *table,
+ bool tmp_table, const uchar *frmdata, size_t len)
{
- File file;
- char index_file[FN_REFLEN];
+ char file_name[FN_REFLEN+1];
int error;
+ int create_flags= O_RDWR | O_TRUNC;
DBUG_ENTER("writefrm");
- DBUG_PRINT("enter",("name: '%s' len: %lu ",name, (ulong) len));
+ DBUG_PRINT("enter",("name: '%s' len: %lu ",path, (ulong) len));
- error= 0;
- if ((file= mysql_file_create(key_file_frm,
- fn_format(index_file, name, "", reg_ext,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- CREATE_MODE, O_RDWR | O_TRUNC,
- MYF(MY_WME))) >= 0)
+ if (tmp_table)
+ create_flags|= O_EXCL | O_NOFOLLOW;
+
+ strxnmov(file_name, sizeof(file_name)-1, path, reg_ext, NullS);
+
+ File file= mysql_file_create(key_file_frm, file_name,
+ CREATE_MODE, create_flags, MYF(0));
+
+ if ((error= file < 0))
{
- if (mysql_file_write(file, frmdata, len, MYF(MY_WME | MY_NABP)))
- error= 2;
- (void) mysql_file_close(file, MYF(0));
+ if (my_errno == ENOENT)
+ my_error(ER_BAD_DB_ERROR, MYF(0), db);
+ else
+ my_error(ER_CANT_CREATE_TABLE, MYF(0), db, table, my_errno);
+ }
+ else
+ {
+ error= mysql_file_write(file, frmdata, len, MYF(MY_WME | MY_NABP));
+
+ if (!error && !tmp_table && opt_sync_frm)
+ error= mysql_file_sync(file, MYF(MY_WME)) ||
+ my_sync_dir_by_file(file_name, MYF(MY_WME));
+
+ error|= mysql_file_close(file, MYF(MY_WME));
}
DBUG_RETURN(error);
} /* writefrm */
+static inline void advance(FILEINFO* &from, FILEINFO* &to,
+ FILEINFO* cur, bool &skip)
+{
+ if (skip) // if not copying
+ from= cur; // just advance the start pointer
+ else // if copying
+ if (to == from) // but to the same place (not shifting the data)
+ from= to= cur; // advance both pointers
+ else // otherwise
+ while (from < cur) // have to copy [from...cur) to [to...)
+ *to++ = *from++;
+ skip= false;
+}
+
+/**
+ Go through the directory listing looking for files with a specified
+ extension and add them to the result list
+
+ @details
+ This function may be called many times on the same directory listing
+ but with different extensions. To avoid discovering the same table twice,
+ whenever a table file is discovered, all files with the same name
+ (independently from the extensions) are removed from the list.
+ Example: the list contained
+ { "db.opt", "t1.MYD", "t1.MYI", "t1.frm", "t2.ARZ", "t3.ARZ", "t3.frm" }
+ on discovering all ".frm" files, tables "t1" and "t3" will be found,
+ and list will become
+ { "db.opt", "t2.ARZ" }
+ and now ".ARZ" discovery can discover the table "t2"
+ @note
+ This function assumes that the directory listing is sorted alphabetically.
+ @note Partitioning makes this more complicated. A partitioned table t1 might
+ have files, like t1.frm, t1#P#part1.ibd, t1#P#foo.ibd, etc.
+ That means we need to compare file names only up to the first '#' or '.'
+ whichever comes first.
+*/
+int extension_based_table_discovery(MY_DIR *dirp, const char *ext_meta,
+ handlerton::discovered_list *result)
+{
+ CHARSET_INFO *cs= character_set_filesystem;
+ size_t ext_meta_len= strlen(ext_meta);
+ FILEINFO *from, *to, *cur, *end;
+ bool skip= false;
+
+ from= to= cur= dirp->dir_entry;
+ end= cur + dirp->number_of_files;
+ while (cur < end)
+ {
+ char *octothorp= strrchr(cur->name + 1, '#');
+ char *ext= strchr(octothorp ? octothorp : cur->name, FN_EXTCHAR);
+
+ if (ext)
+ {
+ size_t len= (octothorp ? octothorp : ext) - cur->name;
+ if (from != cur &&
+ (my_strnncoll(cs, (uchar*)from->name, len, (uchar*)cur->name, len) ||
+ (from->name[len] != FN_EXTCHAR && from->name[len] != '#')))
+ advance(from, to, cur, skip);
+
+ if (my_strnncoll(cs, (uchar*)ext, strlen(ext),
+ (uchar*)ext_meta, ext_meta_len) == 0)
+ {
+ *ext = 0;
+ if (result->add_file(cur->name))
+ return 1;
+ *ext = FN_EXTCHAR;
+ skip= true; // table discovered, skip all files with the same name
+ }
+ }
+ else
+ {
+ advance(from, to, cur, skip);
+ from++;
+ }
+
+ cur++;
+ }
+ advance(from, to, cur, skip);
+ dirp->number_of_files= to - dirp->dir_entry;
+ return 0;
+}
+
+/**
+ Simple, not reusable file-based table discovery
+
+ @details
+ simplified version of extension_based_table_discovery(), that does not
+ modify the list of files. It cannot be called many times for the same
+ directory listing, otherwise it'll produce duplicate results.
+*/
+int ext_table_discovery_simple(MY_DIR *dirp,
+ handlerton::discovered_list *result)
+{
+ CHARSET_INFO *cs= character_set_filesystem;
+ FILEINFO *cur, *end;
+
+ cur= dirp->dir_entry;
+ end= cur + dirp->number_of_files;
+ while (cur < end)
+ {
+ char *ext= strrchr(cur->name, FN_EXTCHAR);
+
+ if (ext)
+ {
+ if (my_strnncoll(cs, (uchar*)ext, strlen(ext),
+ (uchar*)reg_ext, reg_ext_length) == 0)
+ {
+ *ext = 0;
+ if (result->add_file(cur->name))
+ return 1;
+ }
+ }
+ cur++;
+ }
+ return 0;
+}
diff --git a/sql/discover.h b/sql/discover.h
index a663e44128d..e1508107235 100644
--- a/sql/discover.h
+++ b/sql/discover.h
@@ -18,7 +18,24 @@
#include "my_global.h" /* uchar */
-int readfrm(const char *name, uchar **data, size_t *length);
-int writefrm(const char* name, const uchar* data, size_t len);
+int extension_based_table_discovery(MY_DIR *dirp, const char *ext,
+ handlerton::discovered_list *tl);
+
+#ifdef MYSQL_SERVER
+int readfrm(const char *name, const uchar **data, size_t *length);
+int writefrm(const char *path, const char *db, const char *table,
+ bool tmp_table, const uchar *frmdata, size_t len);
+
+/* a helper to delete an frm file, given a path w/o .frm extension */
+inline void deletefrm(const char *path)
+{
+ char frm_name[FN_REFLEN];
+ strxmov(frm_name, path, reg_ext, NullS);
+ mysql_file_delete(key_file_frm, frm_name, MYF(0));
+}
+
+int ext_table_discovery_simple(MY_DIR *dirp,
+ handlerton::discovered_list *result);
+#endif
#endif /* DISCOVER_INCLUDED */
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 860f31e5c3a..34658ab51ac 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -55,7 +55,7 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] =
},
{
{ C_STRING_WITH_LEN("definer") },
- { C_STRING_WITH_LEN("char(77)") },
+ { C_STRING_WITH_LEN("char(") },
{ C_STRING_WITH_LEN("utf8") }
},
{
@@ -163,7 +163,7 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] =
};
static const TABLE_FIELD_DEF
- event_table_def= {ET_FIELD_COUNT, event_table_fields};
+event_table_def= {ET_FIELD_COUNT, event_table_fields, 0, (uint*) 0};
class Event_db_intact : public Table_check_intact
{
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index 86c5c6ec1d0..7647419aff9 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -574,8 +574,8 @@ void Event_parse_data::check_originator_id(THD *thd)
status= Event_parse_data::SLAVESIDE_DISABLED;
status_changed= true;
}
- originator = thd->server_id;
+ originator = thd->variables.server_id;
}
else
- originator = server_id;
+ originator = global_system_variables.server_id;
}
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index da8e4388a21..f75a8abc835 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -133,11 +133,11 @@ post_init_event_thread(THD *thd)
return TRUE;
}
+ thread_safe_increment32(&thread_count, &thread_count_lock);
mysql_mutex_lock(&LOCK_thread_count);
threads.append(thd);
- thread_count++;
- inc_thread_running();
mysql_mutex_unlock(&LOCK_thread_count);
+ inc_thread_running();
return FALSE;
}
@@ -155,12 +155,8 @@ deinit_event_thread(THD *thd)
{
thd->proc_info= "Clearing";
DBUG_PRINT("exit", ("Event thread finishing"));
- mysql_mutex_lock(&LOCK_thread_count);
- thread_count--;
- dec_thread_running();
- delete thd;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+
+ delete_running_thd(thd);
}
@@ -441,12 +437,7 @@ Event_scheduler::start()
ret= TRUE;
new_thd->proc_info= "Clearing";
- mysql_mutex_lock(&LOCK_thread_count);
- thread_count--;
- dec_thread_running();
- delete new_thd;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ delete_running_thd(new_thd);
}
end:
UNLOCK_DATA();
@@ -575,12 +566,7 @@ error:
if (new_thd)
{
new_thd->proc_info= "Clearing";
- mysql_mutex_lock(&LOCK_thread_count);
- thread_count--;
- dec_thread_running();
- delete new_thd;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ delete_running_thd(new_thd);
}
delete event_name;
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index d84baac41f5..9b374c2770d 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2011, Monty Program Ab
+ Copyright (c) 2008, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ const char field_separator=',';
#define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128
#define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
-((ulong) ((LL(1) << MY_MIN(arg, 4) * 8) - LL(1)))
+ ((ulong) ((1LL << MY_MIN(arg, 4) * 8) - 1))
#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(is_stat_field || !table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(table->vcol_set, field_index)))
@@ -87,6 +87,7 @@ const char field_separator=',';
#define FIELDTYPE_NUM (FIELDTYPE_TEAR_FROM + (255 - FIELDTYPE_TEAR_TO))
static inline int field_type2index (enum_field_types field_type)
{
+ field_type= real_type_to_type(field_type);
return (field_type < FIELDTYPE_TEAR_FROM ?
field_type :
((int)FIELDTYPE_TEAR_FROM) + (field_type - FIELDTYPE_TEAR_TO) - 1);
@@ -199,7 +200,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -230,7 +231,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -261,7 +262,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -292,7 +293,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP,
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
- MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG,
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME,
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
@@ -947,8 +948,10 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
enum_field_types Field::field_type_merge(enum_field_types a,
enum_field_types b)
{
- DBUG_ASSERT(a < FIELDTYPE_TEAR_FROM || a > FIELDTYPE_TEAR_TO);
- DBUG_ASSERT(b < FIELDTYPE_TEAR_FROM || b > FIELDTYPE_TEAR_TO);
+ DBUG_ASSERT(real_type_to_type(a) < FIELDTYPE_TEAR_FROM ||
+ real_type_to_type(a) > FIELDTYPE_TEAR_TO);
+ DBUG_ASSERT(real_type_to_type(b) < FIELDTYPE_TEAR_FROM ||
+ real_type_to_type(b) > FIELDTYPE_TEAR_TO);
return field_types_merge_rules[field_type2index(a)]
[field_type2index(b)];
}
@@ -1042,8 +1045,8 @@ CPP_UNNAMED_NS_END
Item_result Field::result_merge_type(enum_field_types field_type)
{
- DBUG_ASSERT(field_type < FIELDTYPE_TEAR_FROM || field_type
- > FIELDTYPE_TEAR_TO);
+ DBUG_ASSERT(real_type_to_type(field_type) < FIELDTYPE_TEAR_FROM ||
+ real_type_to_type(field_type) > FIELDTYPE_TEAR_TO);
return field_types_result_type[field_type2index(field_type)];
}
@@ -1130,6 +1133,111 @@ void Field::make_sort_key(uchar *buff,uint length)
/**
+ @brief
+ Determine the relative position of the field value in a numeric interval
+
+ @details
+ The function returns a double number between 0.0 and 1.0 as the relative
+ position of the value of the this field in the numeric interval of [min,max].
+ If the value is not in the interval the the function returns 0.0 when
+ the value is less than min, and, 1.0 when the value is greater than max.
+
+ @param min value of the left end of the interval
+ @param max value of the right end of the interval
+
+ @return
+ relative position of the field value in the numeric interval [min,max]
+*/
+
+double Field::pos_in_interval_val_real(Field *min, Field *max)
+{
+ double n, d;
+ n= val_real() - min->val_real();
+ if (n < 0)
+ return 0.0;
+ d= max->val_real() - min->val_real();
+ if (d <= 0)
+ return 1.0;
+ return MY_MIN(n/d, 1.0);
+}
+
+
+static
+inline ulonglong char_prefix_to_ulonglong(uchar *src)
+{
+ uint sz= sizeof(ulonglong);
+ for (uint i= 0; i < sz/2; i++)
+ {
+ uchar tmp= src[i];
+ src[i]= src[sz-1-i];
+ src[sz-1-i]= tmp;
+ }
+ return uint8korr(src);
+}
+
+
+/**
+ @brief
+ Determine the relative position of the field value in a string interval
+
+ @details
+ The function returns a double number between 0.0 and 1.0 as the relative
+ position of the value of the this field in the string interval of [min,max].
+ If the value is not in the interval the the function returns 0.0 when
+ the value is less than min, and, 1.0 when the value is greater than max.
+
+ @note
+ To calculate the relative position of the string value v in the interval
+ [min, max] the function first converts the beginning of these three
+ strings v, min, max into the strings that are used for byte comparison.
+ For each string not more sizeof(ulonglong) first bytes are taken
+ from the result of conversion. Then these bytes are interpreted as the
+ big-endian representation of an ulonglong integer. The values of these
+ integer numbers obtained for the strings v, min, max are used to calculate
+ the position of v in [min,max] in the same way is it's done for numeric
+ fields (see Field::pos_in_interval_val_real).
+
+ @todo
+ Improve the procedure for the case when min and max have the same
+ beginning
+
+ @param min value of the left end of the interval
+ @param max value of the right end of the interval
+
+ @return
+ relative position of the field value in the string interval [min,max]
+*/
+
+double Field::pos_in_interval_val_str(Field *min, Field *max, uint data_offset)
+{
+ uchar mp_prefix[sizeof(ulonglong)];
+ uchar minp_prefix[sizeof(ulonglong)];
+ uchar maxp_prefix[sizeof(ulonglong)];
+ ulonglong mp, minp, maxp;
+ my_strnxfrm(charset(), mp_prefix, sizeof(mp),
+ ptr + data_offset,
+ data_length());
+ my_strnxfrm(charset(), minp_prefix, sizeof(minp),
+ min->ptr + data_offset,
+ min->data_length());
+ my_strnxfrm(charset(), maxp_prefix, sizeof(maxp),
+ max->ptr + data_offset,
+ max->data_length());
+ mp= char_prefix_to_ulonglong(mp_prefix);
+ minp= char_prefix_to_ulonglong(minp_prefix);
+ maxp= char_prefix_to_ulonglong(maxp_prefix);
+ double n, d;
+ n= mp - minp;
+ if (n < 0)
+ return 0.0;
+ d= maxp - minp;
+ if (d <= 0)
+ return 1.0;
+ return MY_MIN(n/d, 1.0);
+}
+
+
+/**
Numeric fields base class constructor.
*/
Field_num::Field_num(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
@@ -1274,6 +1382,7 @@ out_of_range:
return 1;
}
+
/**
Process decimal library return codes and issue warnings for overflow and
truncation.
@@ -1345,6 +1454,8 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
comment.length=0;
field_index= 0;
is_stat_field= FALSE;
+ cond_selectivity= 1.0;
+ next_equal_field= NULL;
}
@@ -1790,7 +1901,7 @@ bool Field::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
if (!(res=val_str(&tmp)) ||
str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(),
- ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR)
+ ltime, fuzzydate))
return 1;
return 0;
}
@@ -3644,7 +3755,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
res=0;
error= 1;
}
- else if ((ulonglong) nr >= (LL(1) << 32))
+ else if ((ulonglong) nr >= (1LL << 32))
{
res=(int32) (uint32) ~0L;
error= 1;
@@ -4453,13 +4564,12 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
- TABLE_SHARE *share,
- CHARSET_INFO *cs)
- :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, cs)
+ TABLE_SHARE *share)
+ :Field_temporal(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
- flags|= UNSIGNED_FLAG | BINARY_FLAG;
+ flags|= UNSIGNED_FLAG;
if (unireg_check != NONE)
{
/*
@@ -4538,18 +4648,18 @@ int Field_timestamp::store_time_dec(MYSQL_TIME *ltime, uint dec)
int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
{
MYSQL_TIME l_time;
- int error;
- int have_smth_to_conv;
+ MYSQL_TIME_STATUS status;
+ bool have_smth_to_conv;
ErrConvString str(from, len, cs);
THD *thd= get_thd();
/* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
- have_smth_to_conv= (str_to_datetime(cs, from, len, &l_time,
+ have_smth_to_conv= !str_to_datetime(cs, from, len, &l_time,
(thd->variables.sql_mode &
MODE_NO_ZERO_DATE) |
- MODE_NO_ZERO_IN_DATE, &error) >
- MYSQL_TIMESTAMP_ERROR);
- return store_TIME_with_warning(thd, &l_time, &str, error, have_smth_to_conv);
+ MODE_NO_ZERO_IN_DATE, &status);
+ return store_TIME_with_warning(thd, &l_time, &str,
+ status.warnings, have_smth_to_conv);
}
@@ -4578,7 +4688,7 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
longlong tmp= number_to_datetime(nr, 0, &l_time, (thd->variables.sql_mode &
MODE_NO_ZERO_DATE) |
MODE_NO_ZERO_IN_DATE, &error);
- return store_TIME_with_warning(thd, &l_time, &str, error, tmp != LL(-1));
+ return store_TIME_with_warning(thd, &l_time, &str, error, tmp != -1);
}
@@ -4604,6 +4714,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
{
MYSQL_TIME ltime;
uint32 temp, temp2;
+ uint dec;
char *to;
val_buffer->alloc(field_length+1);
@@ -4658,6 +4769,16 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
*to++= (char) ('0'+(char) (temp));
*to= 0;
val_buffer->set_charset(&my_charset_numeric);
+
+ if ((dec= decimals()))
+ {
+ ulong sec_part= (ulong) sec_part_shift(ltime.second_part, dec);
+ char *buf= const_cast<char*>(val_buffer->ptr() + MAX_DATETIME_WIDTH);
+ for (int i= dec; i > 0; i--, sec_part/= 10)
+ buf[i]= (char)(sec_part % 10) + '0';
+ buf[0]= '.';
+ buf[dec + 1]= 0;
+ }
return val_buffer;
}
@@ -4711,7 +4832,14 @@ void Field_timestamp::sort_string(uchar *to,uint length __attribute__((unused)))
void Field_timestamp::sql_type(String &res) const
{
- res.set_ascii(STRING_WITH_LEN("timestamp"));
+ if (!decimals())
+ {
+ res.set_ascii(STRING_WITH_LEN("timestamp"));
+ return;
+ }
+ CHARSET_INFO *cs=res.charset();
+ res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
+ "timestamp(%u)", decimals()));
}
@@ -4751,13 +4879,6 @@ void Field_timestamp::set_explicit_default(Item *value)
set_has_explicit_value();
}
-void Field_timestamp_hires::sql_type(String &res) const
-{
- CHARSET_INFO *cs=res.charset();
- res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
- "timestamp(%u)", dec));
-}
-
#ifdef NOT_USED
static void store_native(ulonglong num, uchar *to, uint bytes)
{
@@ -4851,7 +4972,7 @@ my_time_t Field_timestamp_hires::get_timestamp(ulong *sec_part) const
return mi_uint4korr(ptr);
}
-double Field_timestamp_hires::val_real(void)
+double Field_timestamp_with_dec::val_real(void)
{
MYSQL_TIME ltime;
if (get_date(&ltime, TIME_NO_ZERO_DATE))
@@ -4862,31 +4983,14 @@ double Field_timestamp_hires::val_real(void)
ltime.minute * 1e2 + ltime.second + ltime.second_part*1e-6;
}
-String *Field_timestamp_hires::val_str(String *val_buffer, String *val_ptr)
-{
- String *tmp= Field_timestamp::val_str(val_buffer, val_ptr);
- ulong sec_part= (ulong)read_bigendian(ptr+4, sec_part_bytes[dec]);
-
- if (tmp->ptr() == zero_timestamp)
- return tmp;
-
- char *buf= const_cast<char*>(tmp->ptr() + MAX_DATETIME_WIDTH);
- for (int i=dec; i>0; i--, sec_part/=10)
- buf[i]= (char)(sec_part % 10) + '0';
- buf[0]= '.';
- buf[dec+1]= 0;
- return tmp;
-}
-
-
-my_decimal *Field_timestamp_hires::val_decimal(my_decimal *d)
+my_decimal *Field_timestamp_with_dec::val_decimal(my_decimal *d)
{
MYSQL_TIME ltime;
get_date(&ltime, 0);
return TIME_to_my_decimal(&ltime, d);
}
-int Field_timestamp_hires::store_decimal(const my_decimal *d)
+int Field_timestamp::store_decimal(const my_decimal *d)
{
ulonglong nr;
ulong sec_part;
@@ -4909,7 +5013,7 @@ int Field_timestamp_hires::store_decimal(const my_decimal *d)
return store_TIME_with_warning(thd, &ltime, &str, error, tmp != -1);
}
-int Field_timestamp_hires::set_time()
+int Field_timestamp_with_dec::set_time()
{
THD *thd= get_thd();
set_notnull();
@@ -4917,7 +5021,7 @@ int Field_timestamp_hires::set_time()
return 0;
}
-bool Field_timestamp_hires::send_binary(Protocol *protocol)
+bool Field_timestamp_with_dec::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
Field_timestamp::get_date(&ltime, 0);
@@ -4938,23 +5042,72 @@ int Field_timestamp_hires::cmp(const uchar *a_ptr, const uchar *b_ptr)
}
-void Field_timestamp_hires::sort_string(uchar *to,uint length)
-{
- DBUG_ASSERT(length == Field_timestamp_hires::pack_length());
- memcpy(to, ptr, length);
-}
-
uint32 Field_timestamp_hires::pack_length() const
{
return 4 + sec_part_bytes[dec];
}
-void Field_timestamp_hires::make_field(Send_field *field)
+void Field_timestamp_with_dec::make_field(Send_field *field)
{
Field::make_field(field);
field->decimals= dec;
}
+
+/*************************************************************
+** MySQL-5.6 compatible TIMESTAMP(N)
+**************************************************************/
+
+void Field_timestampf::store_TIME(my_time_t timestamp, ulong sec_part)
+{
+ struct timeval tm;
+ tm.tv_sec= timestamp;
+ tm.tv_usec= sec_part;
+ my_timeval_trunc(&tm, dec);
+ my_timestamp_to_binary(&tm, ptr, dec);
+}
+
+
+my_time_t Field_timestampf::get_timestamp(ulong *sec_part) const
+{
+ struct timeval tm;
+ my_timestamp_from_binary(&tm, ptr, dec);
+ *sec_part= tm.tv_usec;
+ return tm.tv_sec;
+}
+
+
+/*************************************************************/
+uint Field_temporal::is_equal(Create_field *new_field)
+{
+ return new_field->sql_type == real_type() &&
+ new_field->length == max_display_length();
+}
+
+
+void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level,
+ const ErrConv *str, int was_cut,
+ timestamp_type ts_type)
+{
+ /*
+ error code logic:
+ MYSQL_TIME_WARN_TRUNCATED means that the value was not a date/time at all.
+ it will be stored as zero date/time.
+ MYSQL_TIME_WARN_OUT_OF_RANGE means that the value was a date/time,
+ that is, it was parsed as such, but the value was invalid.
+
+ Also, MYSQL_TIME_WARN_TRUNCATED is used when storing a DATETIME in
+ a DATE field and non-zero time part is thrown away.
+ */
+ if (was_cut & MYSQL_TIME_WARN_TRUNCATED)
+ set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED,
+ str, mysql_type_to_time_type(type()), 1);
+ if (was_cut & MYSQL_TIME_WARN_OUT_OF_RANGE)
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
+ str, mysql_type_to_time_type(type()), 1);
+}
+
+
/*
Store string into a date/time field
@@ -4965,21 +5118,21 @@ void Field_timestamp_hires::make_field(Send_field *field)
3 Datetime value that was cut (warning level NOTE)
This is used by opt_range.cc:get_mm_leaf().
*/
-int Field_temporal::store_TIME_with_warning(MYSQL_TIME *ltime,
- const ErrConv *str,
- int was_cut, int have_smth_to_conv)
+int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime,
+ const ErrConv *str,
+ int was_cut,
+ int have_smth_to_conv)
{
Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN;
int ret= 2;
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
- if (was_cut == 0 &&
- have_smth_to_conv == 0 &&
- mysql_type_to_time_type(type()) != MYSQL_TIMESTAMP_TIME) // special case: zero date
+ if (was_cut == 0 && have_smth_to_conv == 0) // special case: zero date
+ {
was_cut= MYSQL_TIME_WARN_OUT_OF_RANGE;
- else
- if (!have_smth_to_conv)
+ }
+ else if (!have_smth_to_conv)
{
bzero(ltime, sizeof(*ltime));
was_cut= MYSQL_TIME_WARN_TRUNCATED;
@@ -4993,57 +5146,28 @@ int Field_temporal::store_TIME_with_warning(MYSQL_TIME *ltime,
was_cut|= MYSQL_TIME_WARN_TRUNCATED;
ret= 3;
}
- else if (!(was_cut & MYSQL_TIME_WARN_TRUNCATED) &&
- mysql_type_to_time_type(type()) == MYSQL_TIMESTAMP_TIME &&
- (ltime->year || ltime->month))
- {
- ltime->year= ltime->month= ltime->day= 0;
- trunc_level= Sql_condition::WARN_LEVEL_NOTE;
- was_cut|= MYSQL_TIME_WARN_TRUNCATED;
- ret= 3;
- }
-
- /*
- error code logic:
- MYSQL_TIME_WARN_TRUNCATED means that the value was not a date/time at all.
- it will be stored as zero date/time.
- MYSQL_TIME_WARN_OUT_OF_RANGE means that the value was a date/time,
- that is, it was parsed as such, but the value was invalid.
-
- Also, MYSQL_TIME_WARN_TRUNCATED is used when storing a DATETIME in
- a DATE field and non-zero time part is thrown away.
- */
- if (was_cut & MYSQL_TIME_WARN_TRUNCATED)
- set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED,
- str, mysql_type_to_time_type(type()), 1);
- if (was_cut & MYSQL_TIME_WARN_OUT_OF_RANGE)
- set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
- str, mysql_type_to_time_type(type()), 1);
-
+ set_warnings(trunc_level, str, was_cut, mysql_type_to_time_type(type()));
store_TIME(ltime);
return was_cut ? ret : 0;
}
-int Field_temporal::store(const char *from,uint len,CHARSET_INFO *cs)
+int Field_temporal_with_date::store(const char *from, uint len, CHARSET_INFO *cs)
{
MYSQL_TIME ltime;
- int error;
- enum enum_mysql_timestamp_type func_res;
+ MYSQL_TIME_STATUS status;
THD *thd= get_thd();
ErrConvString str(from, len, cs);
-
- func_res= str_to_datetime(cs, from, len, &ltime,
- (TIME_FUZZY_DATE |
- (thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))),
- &error);
- return store_TIME_with_warning(&ltime, &str, error, func_res > MYSQL_TIMESTAMP_ERROR);
+ bool func_res= !str_to_datetime(cs, from, len, &ltime,
+ (thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES)),
+ &status);
+ return store_TIME_with_warning(&ltime, &str, status.warnings, func_res);
}
-int Field_temporal::store(double nr)
+int Field_temporal_with_date::store(double nr)
{
int error= 0;
MYSQL_TIME ltime;
@@ -5051,16 +5175,15 @@ int Field_temporal::store(double nr)
ErrConvDouble str(nr);
longlong tmp= double_to_datetime(nr, &ltime,
- (TIME_FUZZY_DATE |
- (thd->variables.sql_mode &
+ (thd->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE |
MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error);
+ MODE_INVALID_DATES)), &error);
return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
}
-int Field_temporal::store(longlong nr, bool unsigned_val)
+int Field_temporal_with_date::store(longlong nr, bool unsigned_val)
{
int error;
MYSQL_TIME ltime;
@@ -5068,17 +5191,16 @@ int Field_temporal::store(longlong nr, bool unsigned_val)
THD *thd= get_thd();
ErrConvInteger str(nr);
- tmp= number_to_datetime(nr, 0, &ltime, (TIME_FUZZY_DATE |
- (thd->variables.sql_mode &
+ tmp= number_to_datetime(nr, 0, &ltime, (thd->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE |
MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error);
+ MODE_INVALID_DATES)), &error);
return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
}
-int Field_temporal::store_time_dec(MYSQL_TIME *ltime, uint dec)
+int Field_temporal_with_date::store_time_dec(MYSQL_TIME *ltime, uint dec)
{
int error = 0, have_smth_to_conv= 1;
MYSQL_TIME l_time= *ltime;
@@ -5088,17 +5210,16 @@ int Field_temporal::store_time_dec(MYSQL_TIME *ltime, uint dec)
structure always fit into DATETIME range.
*/
have_smth_to_conv= !check_date(&l_time, pack_time(&l_time) != 0,
- (TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
+ (current_thd->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error);
+ MODE_INVALID_DATES)), &error);
return store_TIME_with_warning(&l_time, &str, error, have_smth_to_conv);
}
my_decimal *Field_temporal::val_decimal(my_decimal *d)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE))
+ if (get_date(&ltime, 0))
{
bzero(&ltime, sizeof(ltime));
ltime.time_type= mysql_type_to_time_type(type());
@@ -5112,6 +5233,35 @@ my_decimal *Field_temporal::val_decimal(my_decimal *d)
** In number context: HHMMSS
** Stored as a 3 byte unsigned int
****************************************************************************/
+int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime,
+ const ErrConv *str,
+ int was_cut,
+ int have_smth_to_conv)
+{
+ Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN;
+ int ret= 2;
+
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+
+ if (!have_smth_to_conv)
+ {
+ bzero(ltime, sizeof(*ltime));
+ was_cut= MYSQL_TIME_WARN_TRUNCATED;
+ ret= 1;
+ }
+ else if (!(was_cut & MYSQL_TIME_WARN_TRUNCATED) &&
+ (ltime->year || ltime->month))
+ {
+ ltime->year= ltime->month= ltime->day= 0;
+ trunc_level= Sql_condition::WARN_LEVEL_NOTE;
+ was_cut|= MYSQL_TIME_WARN_TRUNCATED;
+ ret= 3;
+ }
+ set_warnings(trunc_level, str, was_cut, MYSQL_TIMESTAMP_TIME);
+ store_TIME(ltime);
+ return was_cut ? ret : 0;
+}
+
void Field_time::store_TIME(MYSQL_TIME *ltime)
{
@@ -5125,16 +5275,17 @@ void Field_time::store_TIME(MYSQL_TIME *ltime)
int Field_time::store(const char *from,uint len,CHARSET_INFO *cs)
{
MYSQL_TIME ltime;
+ MYSQL_TIME_STATUS status;
ErrConvString str(from, len, cs);
- int was_cut;
- int have_smth_to_conv=
- str_to_time(cs, from, len, &ltime,
+ bool have_smth_to_conv=
+ !str_to_time(cs, from, len, &ltime,
get_thd()->variables.sql_mode &
(MODE_NO_ZERO_DATE | MODE_NO_ZERO_IN_DATE |
MODE_INVALID_DATES),
- &was_cut) > MYSQL_TIMESTAMP_ERROR;
+ &status);
- return store_TIME_with_warning(&ltime, &str, was_cut, have_smth_to_conv);
+ return store_TIME_with_warning(&ltime, &str,
+ status.warnings, have_smth_to_conv);
}
@@ -5197,32 +5348,16 @@ longlong Field_time::val_int(void)
my_charset_bin
*/
-String *Field_time::val_str(String *val_buffer,
- String *val_ptr __attribute__((unused)))
+String *Field_time::val_str(String *str,
+ String *unused __attribute__((unused)))
{
ASSERT_COLUMN_MARKED_FOR_READ;
MYSQL_TIME ltime;
- long tmp=(long) sint3korr(ptr);
- ltime.neg= 0;
- if (tmp < 0)
- {
- tmp= -tmp;
- ltime.neg= 1;
- }
- ltime.year= ltime.month= 0;
- ltime.day= (uint) 0;
- ltime.hour= (uint) (tmp/10000);
- ltime.minute= (uint) (tmp/100 % 100);
- ltime.second= (uint) (tmp % 100);
- ltime.second_part= 0;
-
- val_buffer->alloc(MAX_DATE_STRING_REP_LENGTH);
- uint length= (uint) my_time_to_str(&ltime,
- const_cast<char*>(val_buffer->ptr()), 0);
- val_buffer->length(length);
- val_buffer->set_charset(&my_charset_numeric);
-
- return val_buffer;
+ get_date(&ltime, TIME_TIME_ONLY);
+ str->alloc(field_length + 1);
+ str->length(my_time_to_str(&ltime, const_cast<char*>(str->ptr()), decimals()));
+ str->set_charset(&my_charset_numeric);
+ return str;
}
@@ -5235,9 +5370,10 @@ String *Field_time::val_str(String *val_buffer,
bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
- THD *thd= get_thd();
- if (!(fuzzydate & (TIME_FUZZY_DATE|TIME_TIME_ONLY)))
+ if (!(fuzzydate & TIME_TIME_ONLY) &&
+ (fuzzydate & TIME_NO_ZERO_IN_DATE))
{
+ THD *thd= get_thd();
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE), field_name,
@@ -5265,8 +5401,8 @@ bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
bool Field_time::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- Field_time::get_date(&ltime, TIME_TIME_ONLY);
- return protocol->store_time(&ltime, 0);
+ get_date(&ltime, TIME_TIME_ONLY);
+ return protocol->store_time(&ltime, decimals());
}
@@ -5287,7 +5423,14 @@ void Field_time::sort_string(uchar *to,uint length __attribute__((unused)))
void Field_time::sql_type(String &res) const
{
- res.set_ascii(STRING_WITH_LEN("time"));
+ if (decimals() == 0)
+ {
+ res.set_ascii(STRING_WITH_LEN("time"));
+ return;
+ }
+ const CHARSET_INFO *cs= res.charset();
+ res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
+ "time(%d)", decimals()));
}
int Field_time_hires::reset()
@@ -5303,7 +5446,7 @@ void Field_time_hires::store_TIME(MYSQL_TIME *ltime)
store_bigendian(packed, ptr, Field_time_hires::pack_length());
}
-int Field_time_hires::store_decimal(const my_decimal *d)
+int Field_time::store_decimal(const my_decimal *d)
{
ulonglong nr;
ulong sec_part;
@@ -5322,35 +5465,23 @@ uint32 Field_time_hires::pack_length() const
return time_hires_bytes[dec];
}
-longlong Field_time_hires::val_int(void)
+longlong Field_time_with_dec::val_int(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
MYSQL_TIME ltime;
- Field_time_hires::get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, TIME_TIME_ONLY);
longlong val= TIME_to_ulonglong_time(&ltime);
return ltime.neg ? -val : val;
}
-double Field_time_hires::val_real(void)
+double Field_time_with_dec::val_real(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
MYSQL_TIME ltime;
- Field_time_hires::get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, TIME_TIME_ONLY);
return TIME_to_double(&ltime);
}
-String *Field_time_hires::val_str(String *str,
- String *unused __attribute__((unused)))
-{
- ASSERT_COLUMN_MARKED_FOR_READ;
- MYSQL_TIME ltime;
- Field_time_hires::get_date(&ltime, TIME_TIME_ONLY);
- str->alloc(field_length+1);
- str->length(my_time_to_str(&ltime, (char*) str->ptr(), dec));
- str->set_charset(&my_charset_bin);
- return str;
-}
-
bool Field_time_hires::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
uint32 len= pack_length();
@@ -5366,15 +5497,7 @@ bool Field_time_hires::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
ltime->time_type= MYSQL_TIMESTAMP_TIME;
ltime->hour+= (ltime->month*32+ltime->day)*24;
ltime->month= ltime->day= 0;
- return fuzzydate & (TIME_FUZZY_DATE | TIME_TIME_ONLY) ? 0 : 1;
-}
-
-
-bool Field_time_hires::send_binary(Protocol *protocol)
-{
- MYSQL_TIME ltime;
- Field_time_hires::get_date(&ltime, TIME_TIME_ONLY);
- return protocol->store_time(&ltime, dec);
+ return !(fuzzydate & TIME_TIME_ONLY) && (fuzzydate & TIME_NO_ZERO_IN_DATE);
}
@@ -5392,17 +5515,36 @@ void Field_time_hires::sort_string(uchar *to,uint length __attribute__((unused))
to[0]^= 128;
}
-void Field_time_hires::sql_type(String &res) const
+void Field_time_with_dec::make_field(Send_field *field)
{
- CHARSET_INFO *cs=res.charset();
- res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
- "time(%u)", dec));
+ Field::make_field(field);
+ field->decimals= dec;
}
-void Field_time_hires::make_field(Send_field *field)
+/****************************************************************************
+** time type with fsp (MySQL-5.6 version)
+** In string context: HH:MM:SS.FFFFFF
+** In number context: HHMMSS.FFFFFF
+****************************************************************************/
+
+int Field_timef::reset()
{
- Field::make_field(field);
- field->decimals= dec;
+ my_time_packed_to_binary(0, ptr, dec);
+ return 0;
+}
+
+void Field_timef::store_TIME(MYSQL_TIME *ltime)
+{
+ my_time_trunc(ltime, decimals());
+ longlong tmp= TIME_to_longlong_time_packed(ltime);
+ my_time_packed_to_binary(tmp, ptr, dec);
+}
+
+bool Field_timef::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+{
+ longlong tmp= my_time_packed_from_binary(ptr, dec);
+ TIME_from_longlong_time_packed(ltime, tmp);
+ return false;
}
/****************************************************************************
@@ -5757,11 +5899,11 @@ void Field_datetime::store_TIME(MYSQL_TIME *ltime)
bool Field_datetime::send_binary(Protocol *protocol)
{
MYSQL_TIME tm;
- Field_datetime::get_date(&tm, TIME_FUZZY_DATE);
+ Field_datetime::get_date(&tm, 0);
return protocol->store(&tm, 0);
}
-
-
+
+
double Field_datetime::val_real(void)
{
return (double) Field_datetime::val_int();
@@ -5794,8 +5936,8 @@ String *Field_datetime::val_str(String *val_buffer,
Avoid problem with slow longlong arithmetic and sprintf
*/
- part1=(long) (tmp/LL(1000000));
- part2=(long) (tmp - (ulonglong) part1*LL(1000000));
+ part1=(long) (tmp/1000000LL);
+ part2=(long) (tmp - (ulonglong) part1*1000000LL);
pos=(char*) val_buffer->ptr() + MAX_DATETIME_WIDTH;
*pos--=0;
@@ -5826,8 +5968,8 @@ bool Field_datetime::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
longlong tmp=Field_datetime::val_int();
uint32 part1,part2;
- part1=(uint32) (tmp/LL(1000000));
- part2=(uint32) (tmp - (ulonglong) part1*LL(1000000));
+ part1=(uint32) (tmp/1000000LL);
+ part2=(uint32) (tmp - (ulonglong) part1*1000000LL);
ltime->time_type= MYSQL_TIMESTAMP_DATETIME;
ltime->neg= 0;
@@ -5841,7 +5983,7 @@ bool Field_datetime::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
if (!tmp)
return fuzzydate & TIME_NO_ZERO_DATE;
if (!ltime->month || !ltime->day)
- return !(fuzzydate & TIME_FUZZY_DATE);
+ return fuzzydate & TIME_NO_ZERO_IN_DATE;
return 0;
}
@@ -5869,7 +6011,14 @@ void Field_datetime::sort_string(uchar *to,uint length __attribute__((unused)))
void Field_datetime::sql_type(String &res) const
{
- res.set_ascii(STRING_WITH_LEN("datetime"));
+ if (decimals() == 0)
+ {
+ res.set_ascii(STRING_WITH_LEN("datetime"));
+ return;
+ }
+ CHARSET_INFO *cs= res.charset();
+ res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
+ "datetime(%u)", decimals()));
}
@@ -5892,7 +6041,7 @@ void Field_datetime_hires::store_TIME(MYSQL_TIME *ltime)
store_bigendian(packed, ptr, Field_datetime_hires::pack_length());
}
-int Field_datetime_hires::store_decimal(const my_decimal *d)
+int Field_temporal_with_date::store_decimal(const my_decimal *d)
{
ulonglong nr;
ulong sec_part;
@@ -5908,47 +6057,46 @@ int Field_datetime_hires::store_decimal(const my_decimal *d)
error= 2;
}
else
- tmp= number_to_datetime(nr, sec_part, &ltime, (TIME_FUZZY_DATE |
- (thd->variables.sql_mode &
+ tmp= number_to_datetime(nr, sec_part, &ltime, (thd->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE |
MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error);
+ MODE_INVALID_DATES)), &error);
return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
}
-bool Field_datetime_hires::send_binary(Protocol *protocol)
+bool Field_datetime_with_dec::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- Field_datetime_hires::get_date(&ltime, TIME_FUZZY_DATE);
+ get_date(&ltime, 0);
return protocol->store(&ltime, dec);
}
-double Field_datetime_hires::val_real(void)
+double Field_datetime_with_dec::val_real(void)
{
MYSQL_TIME ltime;
- Field_datetime_hires::get_date(&ltime, TIME_FUZZY_DATE);
+ get_date(&ltime, 0);
return TIME_to_double(&ltime);
}
-longlong Field_datetime_hires::val_int(void)
+longlong Field_datetime_with_dec::val_int(void)
{
MYSQL_TIME ltime;
- Field_datetime_hires::get_date(&ltime, TIME_FUZZY_DATE);
+ get_date(&ltime, 0);
return TIME_to_ulonglong_datetime(&ltime);
}
-String *Field_datetime_hires::val_str(String *str,
- String *unused __attribute__((unused)))
+String *Field_datetime_with_dec::val_str(String *str,
+ String *unused __attribute__((unused)))
{
MYSQL_TIME ltime;
- Field_datetime_hires::get_date(&ltime, TIME_FUZZY_DATE);
+ get_date(&ltime, 0);
str->alloc(field_length+1);
str->length(field_length);
my_datetime_to_str(&ltime, (char*) str->ptr(), dec);
- str->set_charset(&my_charset_bin);
+ str->set_charset(&my_charset_numeric);
return str;
}
@@ -5959,7 +6107,7 @@ bool Field_datetime_hires::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
if (!packed)
return fuzzydate & TIME_NO_ZERO_DATE;
if (!ltime->month || !ltime->day)
- return !(fuzzydate & TIME_FUZZY_DATE);
+ return fuzzydate & TIME_NO_ZERO_IN_DATE;
return 0;
}
@@ -5975,27 +6123,42 @@ int Field_datetime_hires::cmp(const uchar *a_ptr, const uchar *b_ptr)
return a < b ? -1 : a > b ? 1 : 0;
}
-void Field_datetime_hires::sort_string(uchar *to,
- uint length __attribute__((unused)))
+void Field_datetime_with_dec::make_field(Send_field *field)
{
- DBUG_ASSERT(length == Field_datetime_hires::pack_length());
- memcpy(to, ptr, length);
+ Field::make_field(field);
+ field->decimals= dec;
}
-void Field_datetime_hires::sql_type(String &res) const
+/****************************************************************************
+** MySQL-5.6 compatible DATETIME(N)
+**
+****************************************************************************/
+int Field_datetimef::reset()
{
- CHARSET_INFO *cs=res.charset();
- res.length(cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(),
- "datetime(%u)", dec));
+ my_datetime_packed_to_binary(0, ptr, dec);
+ return 0;
}
-void Field_datetime_hires::make_field(Send_field *field)
+void Field_datetimef::store_TIME(MYSQL_TIME *ltime)
{
- Field::make_field(field);
- field->decimals= dec;
+ my_time_trunc(ltime, decimals());
+ longlong tmp= TIME_to_longlong_datetime_packed(ltime);
+ my_datetime_packed_to_binary(tmp, ptr, dec);
}
+bool Field_datetimef::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+{
+ longlong tmp= my_datetime_packed_from_binary(ptr, dec);
+ TIME_from_longlong_datetime_packed(ltime, tmp);
+ if (!tmp)
+ return fuzzydate & TIME_NO_ZERO_DATE;
+ if (!ltime->month || !ltime->day)
+ return fuzzydate & TIME_NO_ZERO_IN_DATE;
+ return false;
+}
+
+
/****************************************************************************
** string type
** A string may be varchar or binary
@@ -6166,7 +6329,6 @@ int Field_str::store(double nr)
return store(buff, length, &my_charset_numeric);
}
-
uint Field::is_equal(Create_field *new_field)
{
return (new_field->sql_type == real_type());
@@ -7618,6 +7780,19 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs)
if (wkb_type < (uint32) Geometry::wkb_point ||
wkb_type > (uint32) Geometry::wkb_last)
goto err;
+
+ if (geom_type != Field::GEOM_GEOMETRY &&
+ geom_type != Field::GEOM_GEOMETRYCOLLECTION &&
+ (uint32) geom_type != wkb_type)
+ {
+ my_printf_error(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
+ ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), MYF(0),
+ Geometry::ci_collection[geom_type]->m_name.str,
+ Geometry::ci_collection[wkb_type]->m_name.str, field_name,
+ (ulong) table->in_use->get_stmt_da()->current_row_for_warning());
+ goto err_exit;
+ }
+
Field_blob::store_length(length);
if (table->copy_blobs || length <= MAX_FIELD_WIDTH)
{ // Must make a copy
@@ -7629,9 +7804,10 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs)
return 0;
err:
- bzero(ptr, Field_blob::pack_length());
my_message(ER_CANT_CREATE_GEOMETRY_OBJECT,
ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0));
+err_exit:
+ bzero(ptr, Field_blob::pack_length());
return -1;
}
@@ -7894,7 +8070,7 @@ int Field_set::store(longlong nr, bool unsigned_val)
if (sizeof(ulonglong)*8 <= typelib->count)
max_nr= ULONGLONG_MAX;
else
- max_nr= (ULL(1) << typelib->count) - 1;
+ max_nr= (1ULL << typelib->count) - 1;
if ((ulonglong) nr > max_nr)
{
@@ -8871,6 +9047,7 @@ void Create_field::init_for_tmp_table(enum_field_types sql_type_arg,
FLAGSTR(pack_flag, FIELDFLAG_DECIMAL),
f_packtype(pack_flag)));
vcol_info= 0;
+ create_if_not_exists= FALSE;
stored_in_db= TRUE;
DBUG_VOID_RETURN;
@@ -8908,7 +9085,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
char *fld_change, List<String> *fld_interval_list,
CHARSET_INFO *fld_charset, uint fld_geom_type,
Virtual_column_info *fld_vcol_info,
- engine_option_value *create_opt)
+ engine_option_value *create_opt, bool check_exists)
{
uint sign_len, allowed_type_modifier= 0;
ulong max_field_charlength= MAX_FIELD_CHARLENGTH;
@@ -8962,6 +9139,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
comment= *fld_comment;
vcol_info= fld_vcol_info;
+ create_if_not_exists= check_exists;
stored_in_db= TRUE;
/* Initialize data for a computed field */
@@ -8993,7 +9171,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
it is NOT NULL, not an AUTO_INCREMENT field and not a TIMESTAMP.
*/
if (!fld_default_value && !(fld_type_modifier & AUTO_INCREMENT_FLAG) &&
- (fld_type_modifier & NOT_NULL_FLAG) && fld_type != MYSQL_TYPE_TIMESTAMP)
+ (fld_type_modifier & NOT_NULL_FLAG) && !is_timestamp_type(fld_type))
flags|= NO_DEFAULT_VALUE_FLAG;
if (fld_length != NULL)
@@ -9155,6 +9333,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
}
break;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
if (length > MAX_DATETIME_PRECISION)
{
my_error(ER_TOO_BIG_PRECISION, MYF(0), length, fld_name,
@@ -9172,6 +9351,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
length= MAX_DATE_WIDTH;
break;
case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_TIME2:
if (length > MAX_DATETIME_PRECISION)
{
my_error(ER_TOO_BIG_PRECISION, MYF(0), length, fld_name,
@@ -9181,6 +9361,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
length+= MIN_TIME_WIDTH + (length ? 1 : 0);
break;
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
if (length > MAX_DATETIME_PRECISION)
{
my_error(ER_TOO_BIG_PRECISION, MYF(0), length, fld_name,
@@ -9262,17 +9443,6 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
DBUG_RETURN(TRUE);
}
- switch (fld_type) {
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_NEWDATE:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_TIMESTAMP:
- charset= &my_charset_numeric;
- flags|= BINARY_FLAG;
- default: break;
- }
-
DBUG_RETURN(FALSE); /* success */
}
@@ -9311,10 +9481,16 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
case MYSQL_TYPE_TIME: return length > MIN_TIME_WIDTH
? time_hires_bytes[length - 1 - MIN_TIME_WIDTH]
: 3;
+ case MYSQL_TYPE_TIME2:
+ return length > MIN_TIME_WIDTH ?
+ my_time_binary_length(length - MIN_TIME_WIDTH - 1) : 3;
case MYSQL_TYPE_TIMESTAMP:
return length > MAX_DATETIME_WIDTH
? 4 + sec_part_bytes[length - 1 - MAX_DATETIME_WIDTH]
: 4;
+ case MYSQL_TYPE_TIMESTAMP2:
+ return length > MAX_DATETIME_WIDTH ?
+ my_timestamp_binary_length(length - MAX_DATETIME_WIDTH - 1) : 4;
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_LONG : return 4;
case MYSQL_TYPE_FLOAT : return sizeof(float);
@@ -9323,6 +9499,9 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
return length > MAX_DATETIME_WIDTH
? datetime_hires_bytes[length - 1 - MAX_DATETIME_WIDTH]
: 8;
+ case MYSQL_TYPE_DATETIME2:
+ return length > MAX_DATETIME_WIDTH ?
+ my_datetime_binary_length(length - MAX_DATETIME_WIDTH - 1) : 5;
case MYSQL_TYPE_LONGLONG: return 8; /* Don't crash if no longlong */
case MYSQL_TYPE_NULL : return 0;
case MYSQL_TYPE_TINY_BLOB: return 1+portable_sizeof_char_ptr;
@@ -9387,16 +9566,6 @@ Field *make_field(TABLE_SHARE *share, uchar *ptr, uint32 field_length,
null_bit= ((uchar) 1) << null_bit;
}
- switch (field_type) {
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_NEWDATE:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_TIMESTAMP:
- field_charset= &my_charset_numeric;
- default: break;
- }
-
DBUG_PRINT("debug", ("field_type: %d, field_length: %u, interval: %p, pack_flag: %s%s%s%s%s",
field_type, field_length, interval,
FLAGSTR(pack_flag, FIELDFLAG_BINARY),
@@ -9510,30 +9679,51 @@ Field *make_field(TABLE_SHARE *share, uchar *ptr, uint32 field_length,
uint dec= field_length > MAX_DATETIME_WIDTH ?
field_length - MAX_DATETIME_WIDTH - 1: 0;
return new_Field_timestamp(ptr, null_pos, null_bit, unireg_check,
- field_name, share, dec, field_charset);
+ field_name, share, dec);
+ }
+ case MYSQL_TYPE_TIMESTAMP2:
+ {
+ uint dec= field_length > MAX_DATETIME_WIDTH ?
+ field_length - MAX_DATETIME_WIDTH - 1: 0;
+ return new Field_timestampf(ptr, null_pos, null_bit, unireg_check,
+ field_name, share, dec);
}
case MYSQL_TYPE_YEAR:
return new Field_year(ptr,field_length,null_pos,null_bit,
unireg_check, field_name);
case MYSQL_TYPE_DATE:
return new Field_date(ptr,null_pos,null_bit,
- unireg_check, field_name, field_charset);
+ unireg_check, field_name);
case MYSQL_TYPE_NEWDATE:
return new Field_newdate(ptr,null_pos,null_bit,
- unireg_check, field_name, field_charset);
+ unireg_check, field_name);
case MYSQL_TYPE_TIME:
{
uint dec= field_length > MIN_TIME_WIDTH ?
field_length - MIN_TIME_WIDTH - 1: 0;
return new_Field_time(ptr, null_pos, null_bit, unireg_check,
- field_name, dec, field_charset);
+ field_name, dec);
+ }
+ case MYSQL_TYPE_TIME2:
+ {
+ uint dec= field_length > MIN_TIME_WIDTH ?
+ field_length - MIN_TIME_WIDTH - 1: 0;
+ return new Field_timef(ptr, null_pos, null_bit, unireg_check,
+ field_name, dec);
}
case MYSQL_TYPE_DATETIME:
{
uint dec= field_length > MAX_DATETIME_WIDTH ?
field_length - MAX_DATETIME_WIDTH - 1: 0;
return new_Field_datetime(ptr, null_pos, null_bit, unireg_check,
- field_name, dec, field_charset);
+ field_name, dec);
+ }
+ case MYSQL_TYPE_DATETIME2:
+ {
+ uint dec= field_length > MAX_DATETIME_WIDTH ?
+ field_length - MAX_DATETIME_WIDTH - 1: 0;
+ return new Field_datetimef(ptr, null_pos, null_bit, unireg_check,
+ field_name, dec);
}
case MYSQL_TYPE_NULL:
return new Field_null(ptr, field_length, unireg_check, field_name,
@@ -9568,6 +9758,7 @@ Create_field::Create_field(Field *old_field,Field *orig_field)
comment= old_field->comment;
decimals= old_field->decimals();
vcol_info= old_field->vcol_info;
+ create_if_not_exists= FALSE;
stored_in_db= old_field->stored_in_db;
option_list= old_field->option_list;
option_struct= old_field->option_struct;
diff --git a/sql/field.h b/sql/field.h
index 3b4285c9cc9..40be4f7776a 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -29,7 +29,8 @@
#include "table.h" /* TABLE */
#include "sql_string.h" /* String */
#include "my_decimal.h" /* my_decimal */
-#include "sql_error.h" /* MYSQL_ERROR */
+#include "sql_error.h" /* Sql_condition */
+#include "compat56.h"
class Send_field;
class Protocol;
@@ -89,6 +90,42 @@ inline uint get_set_pack_length(int elements)
return len > 4 ? 8 : len;
}
+
+/**
+ Recognizer for concrete data type (called real_type for some reason),
+ returning true if it is one of the TIMESTAMP types.
+*/
+inline bool is_timestamp_type(enum_field_types type)
+{
+ return type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_TIMESTAMP2;
+}
+
+
+/**
+ Convert temporal real types as retuned by field->real_type()
+ to field type as returned by field->type().
+
+ @param real_type Real type.
+ @retval Field type.
+*/
+inline enum_field_types real_type_to_type(enum_field_types real_type)
+{
+ switch (real_type)
+ {
+ case MYSQL_TYPE_TIME2:
+ return MYSQL_TYPE_TIME;
+ case MYSQL_TYPE_DATETIME2:
+ return MYSQL_TYPE_DATETIME;
+ case MYSQL_TYPE_TIMESTAMP2:
+ return MYSQL_TYPE_TIMESTAMP;
+ case MYSQL_TYPE_NEWDATE:
+ return MYSQL_TYPE_DATE;
+ /* Note: NEWDECIMAL is a type, not only a real_type */
+ default: return real_type;
+ }
+}
+
+
/*
Virtual_column_info is the class to contain additional
characteristics that is specific for a virtual/computed
@@ -220,7 +257,23 @@ public:
*/
bool is_created_from_null_item;
- bool is_stat_field; /* TRUE in Field objects created for column min/max values */
+ /* TRUE in Field objects created for column min/max values */
+ bool is_stat_field;
+
+ /*
+ Selectivity of the range condition over this field.
+ When calculating this selectivity a range predicate
+ is taken into account only if:
+ - it is extracted from the WHERE clause
+ - it depends only on the table the field belongs to
+ */
+ double cond_selectivity;
+
+ /*
+ The next field in the class of equal fields at the top AND level
+ of the WHERE clause
+ */
+ Field *next_equal_field;
/*
This structure is used for statistical data on the column
@@ -412,6 +465,54 @@ public:
virtual uint32 key_length() const { return pack_length(); }
virtual enum_field_types type() const =0;
virtual enum_field_types real_type() const { return type(); }
+ virtual enum_field_types binlog_type() const
+ {
+ /*
+ Binlog stores field->type() as type code by default. For example,
+ it puts MYSQL_TYPE_STRING in case of CHAR, VARCHAR, SET and ENUM,
+ with extra data type details put into metadata.
+
+ Binlog behaviour slightly differs between various MySQL and MariaDB
+ versions for the temporal data types TIME, DATETIME and TIMESTAMP.
+
+ MySQL prior to 5.6 uses MYSQL_TYPE_TIME, MYSQL_TYPE_DATETIME
+ and MYSQL_TYPE_TIMESTAMP type codes in binlog and stores no
+ additional metadata.
+
+ MariaDB-5.3 implements new versions for TIME, DATATIME, TIMESTAMP
+ with fractional second precision, but uses the old format for the
+ types TIME(0), DATETIME(0), TIMESTAMP(0), and it still stores
+ MYSQL_TYPE_TIME, MYSQL_TYPE_DATETIME and MYSQL_TYPE_TIMESTAMP in binlog,
+ with no additional metadata.
+ So row-based replication between temporal data types of
+ different precision is not possible in MariaDB.
+
+ MySQL-5.6 also implements a new version of TIME, DATETIME, TIMESTAMP
+ which support fractional second precision 0..6, and use the new
+ format even for the types TIME(0), DATETIME(0), TIMESTAMP(0).
+ For these new data types, MySQL-5.6 stores new type codes
+ MYSQL_TYPE_TIME2, MYSQL_TYPE_DATETIME2, MYSQL_TYPE_TIMESTAMP2 in binlog,
+ with fractional precision 0..6 put into metadata.
+ This makes it in theory possible to do row-based replication between
+ columns of different fractional precision (e.g. from TIME(1) on master
+ to TIME(6) on slave). However, it's not currently fully implemented yet.
+ MySQL-5.6 can only do row-based replication from the old types
+ TIME, DATETIME, TIMESTAMP (represented by MYSQL_TYPE_TIME,
+ MYSQL_TYPE_DATETIME and MYSQL_TYPE_TIMESTAMP type codes in binlog)
+ to the new corresponding types TIME(0), DATETIME(0), TIMESTAMP(0).
+
+ Note: MariaDB starting from the version 10.0 understands the new
+ MySQL-5.6 type codes MYSQL_TYPE_TIME2, MYSQL_TYPE_DATETIME2,
+ MYSQL_TYPE_TIMESTAMP2. When started over MySQL-5.6 tables both on
+ master and on slave, MariaDB-10.0 can also do row-based replication
+ from the old types TIME, DATETIME, TIMESTAMP to the new MySQL-5.6
+ types TIME(0), DATETIME(0), TIMESTAMP(0).
+
+ Note: perhaps binlog should eventually be modified to store
+ real_type() instead of type() for all column types.
+ */
+ return type();
+ }
inline int cmp(const uchar *str) { return cmp(ptr,str); }
virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
{ return cmp(a, b); }
@@ -456,6 +557,10 @@ public:
}
return update_fl;
}
+ virtual void store_field_value(uchar *val, uint len)
+ {
+ memcpy(ptr, val, len);
+ }
virtual uint decimals() const { return 0; }
/*
Caller beware: sql_type can change str.Ptr, so check
@@ -662,6 +767,16 @@ public:
{ return binary() ? &my_charset_bin : charset(); }
virtual CHARSET_INFO *sort_charset(void) const { return charset(); }
virtual bool has_charset(void) const { return FALSE; }
+ /*
+ match_collation_to_optimize_range() is to distinguish in
+ range optimizer (see opt_range.cc) between real string types:
+ CHAR, VARCHAR, TEXT
+ and the other string-alike types with result_type() == STRING_RESULT:
+ DATE, TIME, DATETIME, TIMESTAMP
+ We need it to decide whether to test if collation of the operation
+ matches collation of the field (needed only for real string types).
+ */
+ virtual bool match_collation_to_optimize_range() const { return false; }
virtual void set_charset(CHARSET_INFO *charset_arg) { }
virtual enum Derivation derivation(void) const
{ return DERIVATION_IMPLICIT; }
@@ -748,6 +863,12 @@ public:
virtual bool hash_join_is_possible() { return TRUE; }
virtual bool eq_cmp_as_binary() { return TRUE; }
+ /* Position of the field value within the interval of [min, max] */
+ virtual double pos_in_interval(Field *min, Field *max)
+ {
+ return (double) 0.5;
+ }
+
friend int cre_myisam(char * name, register TABLE *form, uint options,
ulonglong auto_increment_value);
friend class Copy_field;
@@ -827,7 +948,8 @@ protected:
{
return (flags & (BINCMP_FLAG | BINARY_FLAG)) != 0;
}
-
+ double pos_in_interval_val_real(Field *min, Field *max);
+ double pos_in_interval_val_str(Field *min, Field *max, uint data_offset);
};
@@ -866,6 +988,10 @@ public:
bool get_int(CHARSET_INFO *cs, const char *from, uint len,
longlong *rnd, ulonglong unsigned_max,
longlong signed_min, longlong signed_max);
+ double pos_in_interval(Field *min, Field *max)
+ {
+ return pos_in_interval_val_real(min, max);
+ }
};
@@ -878,23 +1004,11 @@ public:
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg, CHARSET_INFO *charset);
Item_result result_type () const { return STRING_RESULT; }
- /*
- match_collation_to_optimize_range() is to distinguish in
- range optimizer (see opt_range.cc) between real string types:
- CHAR, VARCHAR, TEXT
- and the other string-alike types with result_type() == STRING_RESULT:
- DATE, TIME, DATETIME, TIMESTAMP
- We need it to decide whether to test if collation of the operation
- matches collation of the field (needed only for real string types).
- QQ: shouldn't DATE/TIME types have their own XXX_RESULT types eventually?
- */
- virtual bool match_collation_to_optimize_range() const=0;
uint decimals() const { return NOT_FIXED_DEC; }
int store(double nr);
int store(longlong nr, bool unsigned_val)=0;
int store_decimal(const my_decimal *);
int store(const char *to,uint length,CHARSET_INFO *cs)=0;
- uint size_of() const { return sizeof(*this); }
uint repertoire(void) const
{
return my_charset_repertoire(field_charset);
@@ -911,6 +1025,11 @@ public:
virtual bool str_needs_quotes() { return TRUE; }
uint is_equal(Create_field *new_field);
bool eq_cmp_as_binary() { return test(flags & BINARY_FLAG); }
+ virtual uint length_size() { return 0; }
+ double pos_in_interval(Field *min, Field *max)
+ {
+ return pos_in_interval_val_str(min, max, length_size());
+ }
};
/* base class for Field_string, Field_varstring and Field_blob */
@@ -930,6 +1049,7 @@ public:
int store_decimal(const my_decimal *d);
uint32 max_data_length() const;
+ bool match_collation_to_optimize_range() const { return true; }
};
/* base class for float and double and decimal (old one) */
@@ -1339,7 +1459,6 @@ public:
unireg_check_arg, field_name_arg, cs)
{}
enum_field_types type() const { return MYSQL_TYPE_NULL;}
- bool match_collation_to_optimize_range() const { return FALSE; }
int store(const char *to, uint length, CHARSET_INFO *cs)
{ null[0]=1; return 0; }
int store(double nr) { null[0]=1; return 0; }
@@ -1361,7 +1480,67 @@ public:
};
-class Field_timestamp :public Field_str {
+class Field_temporal: public Field {
+public:
+ Field_temporal(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, utype unireg_check_arg,
+ const char *field_name_arg)
+ :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
+ field_name_arg)
+ { flags|= BINARY_FLAG; }
+ Item_result result_type () const { return STRING_RESULT; }
+ uint32 max_display_length() { return field_length; }
+ bool str_needs_quotes() { return TRUE; }
+ enum Derivation derivation(void) const { return DERIVATION_NUMERIC; }
+ uint repertoire(void) const { return MY_REPERTOIRE_NUMERIC; }
+ CHARSET_INFO *charset(void) const { return &my_charset_numeric; }
+ const CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; }
+ bool binary() const { return true; }
+ enum Item_result cmp_type () const { return TIME_RESULT; }
+ uint is_equal(Create_field *new_field);
+ bool eq_def(Field *field)
+ {
+ return (Field::eq_def(field) && decimals() == field->decimals());
+ }
+ my_decimal *val_decimal(my_decimal*);
+ void set_warnings(Sql_condition::enum_warning_level trunc_level,
+ const ErrConv *str, int was_cut, timestamp_type ts_type);
+ double pos_in_interval(Field *min, Field *max)
+ {
+ return pos_in_interval_val_real(min, max);
+ }
+};
+
+
+/**
+ Abstract class for:
+ - DATE
+ - DATETIME
+ - DATETIME(1..6)
+ - DATETIME(0..6) - MySQL56 version
+*/
+class Field_temporal_with_date: public Field_temporal {
+protected:
+ int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
+ int was_cut, int have_smth_to_conv);
+ virtual void store_TIME(MYSQL_TIME *ltime) = 0;
+public:
+ Field_temporal_with_date(uchar *ptr_arg, uint32 len_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
+ utype unireg_check_arg,
+ const char *field_name_arg)
+ :Field_temporal(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
+ {}
+ int store(const char *to, uint length, CHARSET_INFO *charset);
+ int store(double nr);
+ int store(longlong nr, bool unsigned_val);
+ int store_time_dec(MYSQL_TIME *ltime, uint dec);
+ int store_decimal(const my_decimal *);
+};
+
+
+class Field_timestamp :public Field_temporal {
protected:
int store_TIME_with_warning(THD *, MYSQL_TIME *, const ErrConv *,
bool, bool);
@@ -1369,21 +1548,14 @@ public:
Field_timestamp(uchar *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- TABLE_SHARE *share, CHARSET_INFO *cs);
- Field_timestamp(bool maybe_null_arg, const char *field_name_arg,
- CHARSET_INFO *cs);
+ TABLE_SHARE *share);
enum_field_types type() const { return MYSQL_TYPE_TIMESTAMP;}
- bool match_collation_to_optimize_range() const { return FALSE; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
- enum Item_result cmp_type () const { return TIME_RESULT; }
- enum Derivation derivation(void) const { return DERIVATION_NUMERIC; }
- uint repertoire(void) const { return MY_REPERTOIRE_NUMERIC; }
- CHARSET_INFO *charset(void) const { return &my_charset_numeric; }
- bool binary() const { return 1; }
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
int store_time_dec(MYSQL_TIME *ltime, uint dec);
+ int store_decimal(const my_decimal *);
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -1393,7 +1565,6 @@ public:
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
bool zero_pack() const { return 0; }
- uint decimals() const { return 0; }
virtual int set_time();
virtual void set_default()
{
@@ -1434,46 +1605,109 @@ public:
{
return unpack_int32(to, from, from_end);
}
+ uint size_of() const { return sizeof(*this); }
};
-class Field_timestamp_hires :public Field_timestamp {
+/**
+ Abstract class for:
+ - TIMESTAMP(1..6)
+ - TIMESTAMP(0..6) - MySQL56 version
+*/
+class Field_timestamp_with_dec :public Field_timestamp {
+protected:
uint dec;
public:
- Field_timestamp_hires(uchar *ptr_arg,
- uchar *null_ptr_arg, uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- TABLE_SHARE *share, uint dec_arg, CHARSET_INFO *cs) :
- Field_timestamp(ptr_arg, MAX_DATETIME_WIDTH + dec_arg + 1, null_ptr_arg,
- null_bit_arg, unireg_check_arg, field_name_arg, share, cs),
+ Field_timestamp_with_dec(uchar *ptr_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg,
+ const char *field_name_arg,
+ TABLE_SHARE *share, uint dec_arg) :
+ Field_timestamp(ptr_arg,
+ MAX_DATETIME_WIDTH + dec_arg + test(dec_arg), null_ptr_arg,
+ null_bit_arg, unireg_check_arg, field_name_arg, share),
dec(dec_arg)
{
- DBUG_ASSERT(dec);
DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
}
- void sql_type(String &str) const;
- my_time_t get_timestamp(ulong *sec_part) const;
- void store_TIME(my_time_t timestamp, ulong sec_part);
- int store_decimal(const my_decimal *d);
- double val_real(void);
- String *val_str(String*,String *);
- my_decimal* val_decimal(my_decimal*);
- bool send_binary(Protocol *protocol);
- int cmp(const uchar *,const uchar *);
- void sort_string(uchar *buff,uint length);
uint decimals() const { return dec; }
- int set_time();
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
- void make_field(Send_field *field);
- uint32 pack_length() const;
uchar *pack(uchar *to, const uchar *from, uint max_length)
{ return Field::pack(to, from, max_length); }
const uchar *unpack(uchar* to, const uchar *from, const uchar *from_end,
uint param_data)
{ return Field::unpack(to, from, from_end, param_data); }
+ void make_field(Send_field *field);
+ void sort_string(uchar *to, uint length)
+ {
+ DBUG_ASSERT(length == pack_length());
+ memcpy(to, ptr, length);
+ }
+ bool send_binary(Protocol *protocol);
+ double val_real(void);
+ my_decimal* val_decimal(my_decimal*);
+ int set_time();
+};
+
+
+class Field_timestamp_hires :public Field_timestamp_with_dec {
+public:
+ Field_timestamp_hires(uchar *ptr_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg,
+ const char *field_name_arg,
+ TABLE_SHARE *share, uint dec_arg) :
+ Field_timestamp_with_dec(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, share, dec_arg)
+ {
+ DBUG_ASSERT(dec);
+ }
+ my_time_t get_timestamp(ulong *sec_part) const;
+ void store_TIME(my_time_t timestamp, ulong sec_part);
+ int cmp(const uchar *,const uchar *);
+ uint32 pack_length() const;
+ uint size_of() const { return sizeof(*this); }
+};
+
+
+/**
+ TIMESTAMP(0..6) - MySQL56 version
+*/
+class Field_timestampf :public Field_timestamp_with_dec {
+ int do_save_field_metadata(uchar *metadata_ptr)
+ {
+ *metadata_ptr= decimals();
+ return 1;
+ }
+public:
+ Field_timestampf(uchar *ptr_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg,
+ const char *field_name_arg,
+ TABLE_SHARE *share, uint dec_arg) :
+ Field_timestamp_with_dec(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, share, dec_arg)
+ {}
+ enum_field_types real_type() const { return MYSQL_TYPE_TIMESTAMP2; }
+ enum_field_types binlog_type() const { return MYSQL_TYPE_TIMESTAMP2; }
+ uint32 pack_length() const
+ {
+ return my_timestamp_binary_length(dec);
+ }
+ uint row_pack_length() { return pack_length(); }
+ uint pack_length_from_metadata(uint field_metadata)
+ {
+ DBUG_ENTER("Field_timestampf::pack_length_from_metadata");
+ uint tmp= my_timestamp_binary_length(field_metadata);
+ DBUG_RETURN(tmp);
+ }
+ int cmp(const uchar *a_ptr,const uchar *b_ptr)
+ {
+ return memcmp(a_ptr, b_ptr, pack_length());
+ }
+ void store_TIME(my_time_t timestamp, ulong sec_part);
+ my_time_t get_timestamp(ulong *sec_part) const;
uint size_of() const { return sizeof(*this); }
- bool eq_def(Field *field)
- { return Field_str::eq_def(field) && dec == field->decimals(); }
};
@@ -1500,56 +1734,24 @@ public:
};
-class Field_temporal: public Field_str {
-protected:
- int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
- int was_cut, int have_smth_to_conv);
- virtual void store_TIME(MYSQL_TIME *ltime) = 0;
-public:
- Field_temporal(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
- uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg, CHARSET_INFO *charset_arg)
- :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, charset_arg)
- { flags|= BINARY_FLAG; }
- enum Derivation derivation(void) const { return DERIVATION_NUMERIC; }
- uint repertoire(void) const { return MY_REPERTOIRE_NUMERIC; }
- CHARSET_INFO *charset(void) const { return &my_charset_numeric; }
- bool binary() const { return 1; }
- bool match_collation_to_optimize_range() const { return FALSE; }
- enum Item_result cmp_type () const { return TIME_RESULT; }
- int store(const char *to,uint length,CHARSET_INFO *charset);
- int store(double nr);
- int store(longlong nr, bool unsigned_val);
- int store_time_dec(MYSQL_TIME *ltime, uint dec);
- my_decimal *val_decimal(my_decimal*);
- bool eq_def(Field *field)
- {
- return (Field_str::eq_def(field) && decimals() == field->decimals());
- }
-};
-
-class Field_date :public Field_temporal {
+class Field_date :public Field_temporal_with_date {
void store_TIME(MYSQL_TIME *ltime);
public:
Field_date(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- CHARSET_INFO *cs)
- :Field_temporal(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, cs) {}
+ enum utype unireg_check_arg, const char *field_name_arg)
+ :Field_temporal_with_date(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg) {}
enum_field_types type() const { return MYSQL_TYPE_DATE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; return 0; }
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
- uint decimals() const { return 0; }
bool send_binary(Protocol *protocol);
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
- bool zero_pack() const { return 1; }
uchar *pack(uchar* to, const uchar *from,
uint max_length __attribute__((unused)))
{
@@ -1560,23 +1762,22 @@ public:
{
return unpack_int32(to, from, from_end);
}
+ uint size_of() const { return sizeof(*this); }
};
-class Field_newdate :public Field_temporal {
+class Field_newdate :public Field_temporal_with_date {
void store_TIME(MYSQL_TIME *ltime);
public:
Field_newdate(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
- enum utype unireg_check_arg, const char *field_name_arg,
- CHARSET_INFO *cs)
- :Field_temporal(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, cs)
+ enum utype unireg_check_arg, const char *field_name_arg)
+ :Field_temporal_with_date(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_DATE;}
enum_field_types real_type() const { return MYSQL_TYPE_NEWDATE; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_UINT24; }
int reset(void) { ptr[0]=ptr[1]=ptr[2]=0; return 0; }
- uint decimals() const { return 0; }
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -1585,19 +1786,22 @@ public:
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
- bool zero_pack() const { return 1; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ uint size_of() const { return sizeof(*this); }
};
class Field_time :public Field_temporal {
- void store_TIME(MYSQL_TIME *ltime);
+protected:
+ virtual void store_TIME(MYSQL_TIME *ltime);
+ int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
+ int was_cut, int have_smth_to_conv);
public:
Field_time(uchar *ptr_arg, uint length_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
- const char *field_name_arg, CHARSET_INFO *cs)
+ const char *field_name_arg)
:Field_temporal(ptr_arg, length_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, cs)
+ unireg_check_arg, field_name_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_TIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; }
@@ -1605,7 +1809,7 @@ public:
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
- uint decimals() const { return 0; }
+ int store_decimal(const my_decimal *);
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -1615,55 +1819,122 @@ public:
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
- bool zero_pack() const { return 1; }
+ uint size_of() const { return sizeof(*this); }
};
-class Field_time_hires :public Field_time {
+
+/**
+ Abstract class for:
+ - TIME(1..6)
+ - TIME(0..6) - MySQL56 version
+*/
+class Field_time_with_dec :public Field_time {
+protected:
uint dec;
+public:
+ Field_time_with_dec(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ uint dec_arg)
+ :Field_time(ptr_arg, MIN_TIME_WIDTH + dec_arg + test(dec_arg), null_ptr_arg,
+ null_bit_arg, unireg_check_arg, field_name_arg),
+ dec(dec_arg)
+ {
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ }
+ uint decimals() const { return dec; }
+ enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
+ longlong val_int(void);
+ double val_real(void);
+ void make_field(Send_field *);
+};
+
+
+/**
+ TIME(1..6)
+*/
+class Field_time_hires :public Field_time_with_dec {
longlong zero_point;
void store_TIME(MYSQL_TIME *ltime);
public:
Field_time_hires(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
- uint dec_arg, CHARSET_INFO *cs)
- :Field_time(ptr_arg, MIN_TIME_WIDTH + dec_arg + 1, null_ptr_arg,
- null_bit_arg, unireg_check_arg, field_name_arg, cs),
- dec(dec_arg)
+ uint dec_arg)
+ :Field_time_with_dec(ptr_arg, null_ptr_arg,
+ null_bit_arg, unireg_check_arg, field_name_arg,
+ dec_arg)
{
DBUG_ASSERT(dec);
- DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
zero_point= sec_part_shift(
((TIME_MAX_VALUE_SECONDS+1LL)*TIME_SECOND_PART_FACTOR), dec);
}
- enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
- uint decimals() const { return dec; }
- int store_decimal(const my_decimal *d);
- longlong val_int(void);
- double val_real(void);
- String *val_str(String*,String *);
int reset(void);
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool send_binary(Protocol *protocol);
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
uint32 pack_length() const;
- void sql_type(String &str) const;
- void make_field(Send_field *);
uint size_of() const { return sizeof(*this); }
};
-class Field_datetime :public Field_temporal {
+
+/**
+ TIME(0..6) - MySQL56 version
+*/
+class Field_timef :public Field_time_with_dec {
+ void store_TIME(MYSQL_TIME *ltime);
+ int do_save_field_metadata(uchar *metadata_ptr)
+ {
+ *metadata_ptr= decimals();
+ return 1;
+ }
+public:
+ Field_timef(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg, const char *field_name_arg,
+ uint dec_arg)
+ :Field_time_with_dec(ptr_arg, null_ptr_arg,
+ null_bit_arg, unireg_check_arg, field_name_arg,
+ dec_arg)
+ {
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ }
+ enum_field_types real_type() const { return MYSQL_TYPE_TIME2; }
+ enum_field_types binlog_type() const { return MYSQL_TYPE_TIME2; }
+ uint32 pack_length() const
+ {
+ return my_time_binary_length(dec);
+ }
+ uint row_pack_length() { return pack_length(); }
+ uint pack_length_from_metadata(uint field_metadata)
+ {
+ DBUG_ENTER("Field_timef::pack_length_from_metadata");
+ uint tmp= my_time_binary_length(field_metadata);
+ DBUG_RETURN(tmp);
+ }
+ void sort_string(uchar *to, uint length)
+ {
+ DBUG_ASSERT(length == Field_timef::pack_length());
+ memcpy(to, ptr, length);
+ }
+ int cmp(const uchar *a_ptr, const uchar *b_ptr)
+ {
+ return memcmp(a_ptr, b_ptr, pack_length());
+ }
+ int reset();
+ bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ uint size_of() const { return sizeof(*this); }
+};
+
+
+class Field_datetime :public Field_temporal_with_date {
void store_TIME(MYSQL_TIME *ltime);
public:
Field_datetime(uchar *ptr_arg, uint length_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
- const char *field_name_arg, CHARSET_INFO *cs)
- :Field_temporal(ptr_arg, length_arg, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg, cs)
+ const char *field_name_arg)
+ :Field_temporal_with_date(ptr_arg, length_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
{}
enum_field_types type() const { return MYSQL_TYPE_DATETIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; }
- uint decimals() const { return 0; }
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -1672,7 +1943,6 @@ public:
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 8; }
void sql_type(String &str) const;
- bool zero_pack() const { return 1; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
virtual int set_time();
virtual void set_default()
@@ -1706,85 +1976,149 @@ public:
{
return unpack_int64(to, from, from_end);
}
+ uint size_of() const { return sizeof(*this); }
};
-class Field_datetime_hires :public Field_datetime {
- void store_TIME(MYSQL_TIME *ltime);
+/**
+ Abstract class for:
+ - DATETIME(1..6)
+ - DATETIME(0..6) - MySQL56 version
+*/
+class Field_datetime_with_dec :public Field_datetime {
+protected:
uint dec;
public:
- Field_datetime_hires(uchar *ptr_arg, uchar *null_ptr_arg,
- uchar null_bit_arg, enum utype unireg_check_arg,
- const char *field_name_arg, uint dec_arg,
- CHARSET_INFO *cs)
- :Field_datetime(ptr_arg, MAX_DATETIME_WIDTH + dec_arg + 1,
+ Field_datetime_with_dec(uchar *ptr_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, enum utype unireg_check_arg,
+ const char *field_name_arg, uint dec_arg)
+ :Field_datetime(ptr_arg, MAX_DATETIME_WIDTH + dec_arg + test(dec_arg),
null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, cs), dec(dec_arg)
+ field_name_arg), dec(dec_arg)
{
- DBUG_ASSERT(dec);
DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
}
- enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
uint decimals() const { return dec; }
+ enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
void make_field(Send_field *field);
- int store_decimal(const my_decimal *d);
- double val_real(void);
- longlong val_int(void);
- String *val_str(String*,String *);
bool send_binary(Protocol *protocol);
- int cmp(const uchar *,const uchar *);
- void sort_string(uchar *buff,uint length);
- uint32 pack_length() const;
- void sql_type(String &str) const;
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
uchar *pack(uchar *to, const uchar *from, uint max_length)
{ return Field::pack(to, from, max_length); }
const uchar *unpack(uchar* to, const uchar *from, const uchar *from_end,
uint param_data)
{ return Field::unpack(to, from, from_end, param_data); }
+ void sort_string(uchar *to, uint length)
+ {
+ DBUG_ASSERT(length == pack_length());
+ memcpy(to, ptr, length);
+ }
+ double val_real(void);
+ longlong val_int(void);
+ String *val_str(String*,String *);
+};
+
+
+/**
+ DATETIME(1..6)
+*/
+class Field_datetime_hires :public Field_datetime_with_dec {
+ void store_TIME(MYSQL_TIME *ltime);
+public:
+ Field_datetime_hires(uchar *ptr_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, enum utype unireg_check_arg,
+ const char *field_name_arg, uint dec_arg)
+ :Field_datetime_with_dec(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, dec_arg)
+ {
+ DBUG_ASSERT(dec);
+ }
+ int cmp(const uchar *,const uchar *);
+ uint32 pack_length() const;
+ bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ uint size_of() const { return sizeof(*this); }
+};
+
+
+/**
+ DATETIME(0..6) - MySQL56 version
+*/
+class Field_datetimef :public Field_datetime_with_dec {
+ void store_TIME(MYSQL_TIME *ltime);
+ int do_save_field_metadata(uchar *metadata_ptr)
+ {
+ *metadata_ptr= decimals();
+ return 1;
+ }
+public:
+ Field_datetimef(uchar *ptr_arg, uchar *null_ptr_arg,
+ uchar null_bit_arg, enum utype unireg_check_arg,
+ const char *field_name_arg, uint dec_arg)
+ :Field_datetime_with_dec(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg, dec_arg)
+ {}
+ enum_field_types real_type() const { return MYSQL_TYPE_DATETIME2; }
+ enum_field_types binlog_type() const { return MYSQL_TYPE_DATETIME2; }
+ uint32 pack_length() const
+ {
+ return my_datetime_binary_length(dec);
+ }
+ uint row_pack_length() { return pack_length(); }
+ uint pack_length_from_metadata(uint field_metadata)
+ {
+ DBUG_ENTER("Field_datetimef::pack_length_from_metadata");
+ uint tmp= my_datetime_binary_length(field_metadata);
+ DBUG_RETURN(tmp);
+ }
+ int cmp(const uchar *a_ptr, const uchar *b_ptr)
+ {
+ return memcmp(a_ptr, b_ptr, pack_length());
+ }
+ int reset();
+ bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
uint size_of() const { return sizeof(*this); }
};
+
static inline Field_timestamp *
new_Field_timestamp(uchar *ptr, uchar *null_ptr, uchar null_bit,
enum Field::utype unireg_check, const char *field_name,
- TABLE_SHARE *share, uint dec, CHARSET_INFO *cs)
+ TABLE_SHARE *share, uint dec)
{
if (dec==0)
return new Field_timestamp(ptr, MAX_DATETIME_WIDTH, null_ptr, null_bit,
- unireg_check, field_name, share, cs);
+ unireg_check, field_name, share);
if (dec == NOT_FIXED_DEC)
dec= MAX_DATETIME_PRECISION;
return new Field_timestamp_hires(ptr, null_ptr, null_bit, unireg_check,
- field_name, share, dec, cs);
+ field_name, share, dec);
}
static inline Field_time *
new_Field_time(uchar *ptr, uchar *null_ptr, uchar null_bit,
enum Field::utype unireg_check, const char *field_name,
- uint dec, CHARSET_INFO *cs)
+ uint dec)
{
if (dec == 0)
return new Field_time(ptr, MIN_TIME_WIDTH, null_ptr, null_bit,
- unireg_check, field_name, cs);
+ unireg_check, field_name);
if (dec == NOT_FIXED_DEC)
dec= MAX_DATETIME_PRECISION;
return new Field_time_hires(ptr, null_ptr, null_bit,
- unireg_check, field_name, dec, cs);
+ unireg_check, field_name, dec);
}
static inline Field_datetime *
new_Field_datetime(uchar *ptr, uchar *null_ptr, uchar null_bit,
enum Field::utype unireg_check,
- const char *field_name, uint dec, CHARSET_INFO *cs)
+ const char *field_name, uint dec)
{
if (dec == 0)
return new Field_datetime(ptr, MAX_DATETIME_WIDTH, null_ptr, null_bit,
- unireg_check, field_name, cs);
+ unireg_check, field_name);
if (dec == NOT_FIXED_DEC)
dec= MAX_DATETIME_PRECISION;
return new Field_datetime_hires(ptr, null_ptr, null_bit,
- unireg_check, field_name, dec, cs);
+ unireg_check, field_name, dec);
}
class Field_string :public Field_longstr {
@@ -1811,7 +2145,6 @@ public:
orig_table->s->frm_version < FRM_VER_TRUE_VARCHAR ?
MYSQL_TYPE_VAR_STRING : MYSQL_TYPE_STRING);
}
- bool match_collation_to_optimize_range() const { return TRUE; }
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; }
bool zero_pack() const { return 0; }
@@ -1892,7 +2225,6 @@ public:
}
enum_field_types type() const { return MYSQL_TYPE_VARCHAR; }
- bool match_collation_to_optimize_range() const { return TRUE; }
enum ha_base_keytype key_type() const;
uint row_pack_length() { return field_length; }
bool zero_pack() const { return 0; }
@@ -1939,6 +2271,7 @@ public:
uint new_null_bit);
uint is_equal(Create_field *new_field);
void hash(ulong *nr, ulong *nr2);
+ uint length_size() { return length_bytes; }
private:
int do_save_field_metadata(uchar *first_byte);
};
@@ -1988,7 +2321,6 @@ public:
packlength(packlength_arg) {}
/* Note that the default copy constructor is used, in clone() */
enum_field_types type() const { return MYSQL_TYPE_BLOB;}
- bool match_collation_to_optimize_range() const { return TRUE; }
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -2134,7 +2466,7 @@ public:
{ geom_type= geom_type_arg; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; }
enum_field_types type() const { return MYSQL_TYPE_GEOMETRY; }
- bool match_collation_to_optimize_range() const { return FALSE; }
+ bool match_collation_to_optimize_range() const { return false; }
void sql_type(String &str) const;
int store(const char *to, uint length, CHARSET_INFO *charset);
int store(double nr);
@@ -2172,7 +2504,6 @@ public:
}
Field *new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type);
enum_field_types type() const { return MYSQL_TYPE_STRING; }
- bool match_collation_to_optimize_range() const { return FALSE; }
enum Item_result cmp_type () const { return INT_RESULT; }
enum ha_base_keytype key_type() const;
int store(const char *to,uint length,CHARSET_INFO *charset);
@@ -2197,6 +2528,7 @@ public:
bool has_charset(void) const { return TRUE; }
/* enum and set are sorted as integers */
CHARSET_INFO *sort_charset(void) const { return &my_charset_bin; }
+ uint decimals() const { return 0; }
virtual uchar *pack(uchar *to, const uchar *from, uint max_length);
virtual const uchar *unpack(uchar *to, const uchar *from,
@@ -2320,6 +2652,14 @@ public:
}
return update_fl;
}
+ void store_field_value(uchar *val, uint len)
+ {
+ store(*((longlong *)val), TRUE);
+ }
+ double pos_in_interval(Field *min, Field *max)
+ {
+ return pos_in_interval_val_real(min, max);
+ }
void get_image(uchar *buff, uint length, CHARSET_INFO *cs)
{ get_key_image(buff, length, itRAW); }
void set_image(const uchar *buff,uint length, CHARSET_INFO *cs)
@@ -2428,11 +2768,11 @@ public:
/** structure with parsed options (for comparing fields in ALTER TABLE) */
ha_field_option_struct *option_struct;
- uint8 row,col,sc_length,interval_id; // For rea_create_table
+ uint8 interval_id; // For rea_create_table
uint offset,pack_flag;
bool create_if_not_exists; // Used in ALTER TABLE IF NOT EXISTS
- /*
+ /*
This is additinal data provided for any computed(virtual) field.
In particular it includes a pointer to the item by which this field
can be computed from other fields.
@@ -2464,7 +2804,7 @@ public:
Item *on_update_value, LEX_STRING *comment, char *change,
List<String> *interval_list, CHARSET_INFO *cs,
uint uint_geom_type, Virtual_column_info *vcol_info,
- engine_option_value *option_list);
+ engine_option_value *option_list, bool check_exists);
bool field_flags_are_binary()
{
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 37b0308eb1b..5e16166531d 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -419,7 +419,7 @@ static void do_field_decimal(Copy_field *copy)
static void do_field_temporal(Copy_field *copy)
{
MYSQL_TIME ltime;
- copy->from_field->get_date(&ltime, TIME_FUZZY_DATE);
+ copy->from_field->get_date(&ltime, 0);
copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
}
@@ -890,7 +890,7 @@ int field_conv(Field *to,Field *from)
if (from->cmp_type() == TIME_RESULT)
{
MYSQL_TIME ltime;
- if (from->get_date(&ltime, TIME_FUZZY_DATE))
+ if (from->get_date(&ltime, 0))
return to->reset();
else
return to->store_time_dec(&ltime, from->decimals());
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 2ac784485ce..7cb2306eb7c 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@@ -55,7 +55,7 @@ static ha_rows find_all_keys(Sort_param *param,SQL_SELECT *select,
IO_CACHE *tempfile,
Bounded_queue<uchar, uchar> *pq,
ha_rows *found_rows);
-static int write_keys(Sort_param *param, Filesort_info *fs_info,
+static bool write_keys(Sort_param *param, Filesort_info *fs_info,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
static void register_used_fields(Sort_param *param);
@@ -147,7 +147,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
ha_rows *found_rows)
{
int error;
- ulong memory_available= thd->variables.sortbuff_size;
+ size_t memory_available= thd->variables.sortbuff_size;
uint maxbuffer;
BUFFPEK *buffpek;
ha_rows num_rows= HA_POS_ERROR;
@@ -245,11 +245,11 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
{
DBUG_PRINT("info", ("filesort PQ is not applicable"));
- ulong min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+ size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
- ulong keys= memory_available / (param.rec_length + sizeof(char*));
+ ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys);
if (table_sort.get_sort_keys())
{
@@ -267,7 +267,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
table_sort.alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length);
if (table_sort.get_sort_keys())
break;
- ulong old_memory_available= memory_available;
+ size_t old_memory_available= memory_available;
memory_available= memory_available/4*3;
if (memory_available < min_sort_memory &&
old_memory_available > min_sort_memory)
@@ -449,7 +449,7 @@ void filesort_free_buffers(TABLE *table, bool full)
static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
uchar *buf)
{
- ulong length= sizeof(BUFFPEK)*count;
+ size_t length= sizeof(BUFFPEK)*count;
uchar *tmp= buf;
DBUG_ENTER("read_buffpek_from_file");
if (count > UINT_MAX/sizeof(BUFFPEK))
@@ -791,7 +791,7 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select,
1 Error
*/
-static int
+static bool
write_keys(Sort_param *param, Filesort_info *fs_info, uint count,
IO_CACHE *buffpek_pointers, IO_CACHE *tempfile)
{
@@ -952,8 +952,11 @@ static void make_sortkey(register Sort_param *param,
else
{
MYSQL_TIME buf;
- if (item->get_date_result(&buf, TIME_FUZZY_DATE | TIME_INVALID_DATES))
- DBUG_ASSERT(maybe_null && item->null_value);
+ if (item->get_date_result(&buf, TIME_INVALID_DATES))
+ {
+ DBUG_ASSERT(maybe_null);
+ DBUG_ASSERT(item->null_value);
+ }
else
value= pack_time(&buf);
}
diff --git a/sql/frm_crypt.cc b/sql/frm_crypt.cc
deleted file mode 100644
index 5612908aea5..00000000000
--- a/sql/frm_crypt.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-
-/*
-** change the following to the output of password('our password')
-** split into 2 parts of 8 characters each.
-** This is done to make it impossible to search after a text string in the
-** mysql binary.
-*/
-
-#include "sql_priv.h"
-#include "frm_crypt.h"
-
-#ifdef HAVE_CRYPTED_FRM
-
-/* password('test') */
-ulong password_seed[2]={0x378b243e, 0x220ca493};
-
-SQL_CRYPT *get_crypt_for_frm(void)
-{
- return new SQL_CRYPT(password_seed);
-}
-
-#endif
diff --git a/sql/frm_crypt.h b/sql/frm_crypt.h
deleted file mode 100644
index 0605644b3e0..00000000000
--- a/sql/frm_crypt.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-#ifndef FRM_CRYPT_INCLUDED
-#define FRM_CRYPT_INCLUDED
-
-class SQL_CRYPT;
-
-SQL_CRYPT *get_crypt_for_frm(void);
-
-#endif /* FRM_CRYPT_INCLUDED */
diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc
index 4fb780f94d1..22a7dbe55f7 100644
--- a/sql/ha_ndbcluster_cond.cc
+++ b/sql/ha_ndbcluster_cond.cc
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
diff --git a/sql/ha_ndbcluster_cond.h b/sql/ha_ndbcluster_cond.h
index 442eac2fafd..27675588ed7 100644
--- a/sql/ha_ndbcluster_cond.h
+++ b/sql/ha_ndbcluster_cond.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/*
This file defines the data structures used by engine condition pushdown in
diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h
index ba2e8ec251b..6ed46123738 100644
--- a/sql/ha_ndbcluster_tables.h
+++ b/sql/ha_ndbcluster_tables.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#define NDB_REP_DB "mysql"
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index c06520df5fd..a5acd5759aa 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2005, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009-2013 Monty Program Ab & SkySQL Ab
+ Copyright (c) 2005, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -37,10 +37,6 @@
in the execution of queries. This functionality will grow with later
versions of MySQL.
- You can enable it in your build by doing the following during your build
- process:
- ./configure --with-partition
-
The partition is setup to use table locks. It implements an partition "SHARE"
that is inserted into a hash by table name. You can use this to store
information of state that any partition handler object will be able to see
@@ -59,6 +55,9 @@
#include "sql_table.h" // tablename_to_filename
#include "key.h"
#include "sql_plugin.h"
+#include "sql_show.h" // append_identifier
+#include "sql_admin.h" // SQL_ADMIN_MSG_TEXT_SIZE
+
#include "debug_sync.h"
/* First 4 bytes in the .par file is the number of 32-bit words in the file */
@@ -87,6 +86,17 @@ static handler *partition_create_handler(handlerton *hton,
static uint partition_flags();
static uint alter_table_flags(uint flags);
+/*
+ If frm_error() is called then we will use this to to find out what file
+ extensions exist for the storage engine. This is also used by the default
+ rename_table and delete_table method in handler.cc.
+*/
+
+static const char *ha_partition_ext[]=
+{
+ ha_par_ext, NullS
+};
+
#ifdef HAVE_PSI_INTERFACE
PSI_mutex_key key_partition_auto_inc_mutex;
@@ -120,6 +130,8 @@ static int partition_initialize(void *p)
partition_hton->flags= HTON_NOT_USER_SELECTABLE |
HTON_HIDDEN |
HTON_TEMPORARY_NOT_SUPPORTED;
+ partition_hton->tablefile_extensions= ha_partition_ext;
+
#ifdef HAVE_PSI_INTERFACE
init_partition_psi_keys();
#endif
@@ -572,7 +584,7 @@ int ha_partition::rename_table(const char *from, const char *to)
Create the handler file (.par-file)
SYNOPSIS
- create_handler_files()
+ create_partitioning_metadata()
name Full path of table name
create_info Create info generated for CREATE TABLE
@@ -581,19 +593,18 @@ int ha_partition::rename_table(const char *from, const char *to)
0 Success
DESCRIPTION
- create_handler_files is called to create any handler specific files
+ create_partitioning_metadata is called to create any handler specific files
before opening the file with openfrm to later call ::create on the
file object.
In the partition handler this is used to store the names of partitions
and types of engines in the partitions.
*/
-int ha_partition::create_handler_files(const char *path,
+int ha_partition::create_partitioning_metadata(const char *path,
const char *old_path,
- int action_flag,
- HA_CREATE_INFO *create_info)
+ int action_flag)
{
- DBUG_ENTER("ha_partition::create_handler_files()");
+ DBUG_ENTER("ha_partition::create_partitioning_metadata()");
/*
We need to update total number of parts since we might write the handler
@@ -1114,7 +1125,8 @@ int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
{
DBUG_ENTER("ha_partition::repair");
- DBUG_RETURN(handle_opt_partitions(thd, check_opt, REPAIR_PARTS));
+ int res= handle_opt_partitions(thd, check_opt, REPAIR_PARTS);
+ DBUG_RETURN(res);
}
/**
@@ -1206,11 +1218,11 @@ static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
TODO: move this into the handler, or rewrite mysql_admin_table.
*/
static bool print_admin_msg(THD* thd, const char* msg_type,
- const char* db_name, const char* table_name,
+ const char* db_name, String &table_name,
const char* op_name, const char *fmt, ...)
ATTRIBUTE_FORMAT(printf, 6, 7);
static bool print_admin_msg(THD* thd, const char* msg_type,
- const char* db_name, const char* table_name,
+ const char* db_name, String &table_name,
const char* op_name, const char *fmt, ...)
{
va_list args;
@@ -1231,7 +1243,7 @@ static bool print_admin_msg(THD* thd, const char* msg_type,
return TRUE;
}
- length=(uint) (strxmov(name, db_name, ".", table_name,NullS) - name);
+ length=(uint) (strxmov(name, db_name, ".", table_name.c_ptr_safe(), NullS) - name);
/*
TODO: switch from protocol to push_warning here. The main reason we didn't
it yet is parallel repair. Due to following trace:
@@ -1310,8 +1322,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
error != HA_ADMIN_ALREADY_DONE &&
error != HA_ADMIN_TRY_ALTER)
{
- print_admin_msg(thd, "error", table_share->db.str,
- table->alias.c_ptr(),
+ print_admin_msg(thd, "error", table_share->db.str, table->alias,
opt_op_name[flag],
"Subpartition %s returned error",
sub_elem->partition_name);
@@ -1337,8 +1348,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
error != HA_ADMIN_ALREADY_DONE &&
error != HA_ADMIN_TRY_ALTER)
{
- print_admin_msg(thd, "error", table_share->db.str,
- table->alias.c_ptr(),
+ print_admin_msg(thd, "error", table_share->db.str, table->alias,
opt_op_name[flag], "Partition %s returned error",
part_elem->partition_name);
}
@@ -1975,7 +1985,6 @@ init_error:
DBUG_RETURN(result);
}
-
/*
Update create info as part of ALTER TABLE
@@ -3248,6 +3257,7 @@ err:
}
+
/**
Helper function for freeing all internal bitmaps.
*/
@@ -4296,7 +4306,7 @@ int ha_partition::delete_row(const uchar *buf)
Called from item_sum.cc by Item_func_group_concat::clear(),
Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
Called from sql_delete.cc by mysql_delete().
- Called from sql_select.cc by JOIN::reinit().
+ Called from sql_select.cc by JOIN::reset().
Called from sql_union.cc by st_select_lex_unit::exec().
*/
@@ -4403,6 +4413,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
part, sub_elem->partition_name));
if ((error= m_file[part]->ha_truncate()))
break;
+ sub_elem->part_state= PART_NORMAL;
} while (++j < num_subparts);
}
else
@@ -8040,21 +8051,6 @@ void ha_partition::notify_table_changed()
}
-/*
- If frm_error() is called then we will use this to to find out what file
- extensions exist for the storage engine. This is also used by the default
- rename_table and delete_table method in handler.cc.
-*/
-
-static const char *ha_partition_ext[]=
-{
- ha_par_ext, NullS
-};
-
-const char **ha_partition::bas_ext() const
-{ return ha_partition_ext; }
-
-
uint ha_partition::min_of_the_max_uint(
uint (handler::*operator_func)(void) const) const
{
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index eeb2ee5dec5..fc1f1a600d0 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -3,7 +3,7 @@
/*
Copyright (c) 2005, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009-2013 Monty Program Ab & SkySQL Ab
+ Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@ enum partition_keywords
PKW_COLUMNS
};
+
#define PARTITION_BYTES_IN_POS 2
@@ -125,8 +126,9 @@ private:
partition_index_first= 1,
partition_index_first_unordered= 2,
partition_index_last= 3,
- partition_read_range = 4,
- partition_no_index_scan= 5
+ partition_index_read_last= 4,
+ partition_read_range = 5,
+ partition_no_index_scan= 6
};
/* Data for the partition handler */
int m_mode; // Open mode
@@ -305,7 +307,7 @@ public:
chance for the handler to add any interesting comments to the table
comments not provided by the users comment.
- create_handler_files is called before opening a new handler object
+ create_partitioning_metadata is called before opening a new handler object
with openfrm to call create. It is used to create any local handler
object needed in opening the object in openfrm
-------------------------------------------------------------------------
@@ -314,9 +316,8 @@ public:
virtual int rename_table(const char *from, const char *to);
virtual int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info);
- virtual int create_handler_files(const char *name,
- const char *old_name, int action_flag,
- HA_CREATE_INFO *create_info);
+ virtual int create_partitioning_metadata(const char *name,
+ const char *old_name, int action_flag);
virtual void update_create_info(HA_CREATE_INFO *create_info);
virtual char *update_table_comment(const char *comment);
virtual int change_partitions(HA_CREATE_INFO *create_info,
@@ -494,6 +495,7 @@ public:
return TRUE;
}
+
/*
-------------------------------------------------------------------------
MODULE full table scan
@@ -723,7 +725,7 @@ public:
virtual ha_rows records();
/* Calculate hash value for PARTITION BY KEY tables. */
- uint32 calculate_key_hash_value(Field **field_array);
+ static uint32 calculate_key_hash_value(Field **field_array);
/*
-------------------------------------------------------------------------
@@ -977,10 +979,6 @@ public:
*/
virtual uint alter_table_flags(uint flags);
/*
- extensions of table handler files
- */
- virtual const char **bas_ext() const;
- /*
unireg.cc will call the following to make sure that the storage engine
can handle the data it is about to send.
diff --git a/sql/handler.cc b/sql/handler.cc
index 2dfc929864c..685bb6e6c30 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2012, Monty Program Ab.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -30,7 +30,7 @@
#include "sql_parse.h" // check_stack_overrun
#include "sql_acl.h" // SUPER_ACL
#include "sql_base.h" // free_io_cache
-#include "discover.h" // writefrm
+#include "discover.h" // extension_based_table_discovery, etc
#include "log_event.h" // *_rows_log_event
#include "create_options.h"
#include "rpl_filter.h"
@@ -40,6 +40,7 @@
#include "probes_mysql.h"
#include <mysql/psi/mysql_table.h>
#include "debug_sync.h" // DEBUG_SYNC
+#include "sql_audit.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -121,7 +122,7 @@ handlerton *ha_default_handlerton(THD *thd)
{
plugin_ref plugin= ha_default_plugin(thd);
DBUG_ASSERT(plugin);
- handlerton *hton= plugin_data(plugin, handlerton*);
+ handlerton *hton= plugin_hton(plugin);
DBUG_ASSERT(hton);
return hton;
}
@@ -152,7 +153,7 @@ redo:
if ((plugin= my_plugin_lock_by_name(thd, name, MYSQL_STORAGE_ENGINE_PLUGIN)))
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton && !(hton->flags & HTON_NOT_USER_SELECTABLE))
return plugin;
@@ -200,7 +201,7 @@ handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
default:
if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
(plugin= ha_lock_engine(thd, installed_htons[db_type])))
- return plugin_data(plugin, handlerton*);
+ return plugin_hton(plugin);
/* fall through */
case DB_TYPE_UNKNOWN:
return NULL;
@@ -230,13 +231,6 @@ handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
(void) RUN_HOOK(transaction, after_rollback, (thd, FALSE));
- switch (database_type) {
- case DB_TYPE_MRG_ISAM:
- return ha_resolve_by_legacy_type(thd, DB_TYPE_MRG_MYISAM);
- default:
- break;
- }
-
return ha_default_handlerton(thd);
} /* ha_checktype */
@@ -280,7 +274,8 @@ handler *get_ha_partition(partition_info *part_info)
}
else
{
- my_error(ER_OUTOFMEMORY, MYF(0), static_cast<int>(sizeof(ha_partition)));
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ static_cast<int>(sizeof(ha_partition)));
}
DBUG_RETURN(((handler*) partition));
}
@@ -389,6 +384,34 @@ static int ha_finish_errors(void)
return 0;
}
+static volatile int32 need_full_discover_for_existence= 0;
+static volatile int32 engines_with_discover_table_names= 0;
+
+static int full_discover_for_existence(handlerton *, const char *, const char *)
+{ return 1; }
+
+static int ext_based_existence(handlerton *, const char *, const char *)
+{ return 1; }
+
+static int hton_ext_based_table_discovery(handlerton *hton, LEX_STRING *db,
+ MY_DIR *dir, handlerton::discovered_list *result)
+{
+ /*
+ tablefile_extensions[0] is the metadata file, see
+ the comment above tablefile_extensions declaration
+ */
+ return extension_based_table_discovery(dir, hton->tablefile_extensions[0],
+ result);
+}
+
+static void update_discovery_counters(handlerton *hton, int val)
+{
+ if (hton->discover_table_existence == full_discover_for_existence)
+ my_atomic_add32(&need_full_discover_for_existence, val);
+
+ if (hton->discover_table_names)
+ my_atomic_add32(&engines_with_discover_table_names, val);
+}
int ha_finalize_handlerton(st_plugin_int *plugin)
{
@@ -426,6 +449,9 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
}
}
+ free_sysvar_table_options(hton);
+ update_discovery_counters(hton, -1);
+
/*
In case a plugin is uninstalled and re-installed later, it should
reuse an array slot. Otherwise the number of uninstall/install
@@ -449,12 +475,12 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
int ha_initialize_handlerton(st_plugin_int *plugin)
{
handlerton *hton;
+ static const char *no_exts[]= { 0 };
DBUG_ENTER("ha_initialize_handlerton");
DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str));
hton= (handlerton *)my_malloc(sizeof(handlerton),
MYF(MY_WME | MY_ZEROFILL));
-
if (hton == NULL)
{
sql_print_error("Unable to allocate memory for plugin '%s' handlerton.",
@@ -462,6 +488,9 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
goto err_no_hton_memory;
}
+ hton->tablefile_extensions= no_exts;
+ hton->discover_table_names= hton_ext_based_table_discovery;
+
hton->slot= HA_SLOT_UNDEF;
/* Historical Requirement */
plugin->data= hton; // shortcut for the future
@@ -472,6 +501,21 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
goto err;
}
+ // hton_ext_based_table_discovery() works only when discovery
+ // is supported and the engine if file-based.
+ if (hton->discover_table_names == hton_ext_based_table_discovery &&
+ (!hton->discover_table || !hton->tablefile_extensions[0]))
+ hton->discover_table_names= NULL;
+
+ // default discover_table_existence implementation
+ if (!hton->discover_table_existence && hton->discover_table)
+ {
+ if (hton->tablefile_extensions[0])
+ hton->discover_table_existence= ext_based_existence;
+ else
+ hton->discover_table_existence= full_discover_for_existence;
+ }
+
/*
the switch below and hton->state should be removed when
command-line options for plugins will be implemented
@@ -561,6 +605,9 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
break;
};
+ resolve_sysvar_table_options(hton);
+ update_discovery_counters(hton, 1);
+
DBUG_RETURN(0);
err_deinit:
@@ -614,7 +661,7 @@ int ha_end()
static my_bool dropdb_handlerton(THD *unused1, plugin_ref plugin,
void *path)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->drop_database)
hton->drop_database(hton, (char *)path);
return FALSE;
@@ -630,7 +677,7 @@ void ha_drop_database(char* path)
static my_bool checkpoint_state_handlerton(THD *unused1, plugin_ref plugin,
void *disable)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->checkpoint_state)
hton->checkpoint_state(hton, (int) *(bool*) disable);
return FALSE;
@@ -652,7 +699,7 @@ static my_bool commit_checkpoint_request_handlerton(THD *unused1, plugin_ref plu
void *data)
{
st_commit_checkpoint_request *st= (st_commit_checkpoint_request *)data;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->commit_checkpoint_request)
{
void *cookie= st->cookie;
@@ -684,7 +731,7 @@ ha_commit_checkpoint_request(void *cookie, void (*pre_hook)(void *))
static my_bool closecon_handlerton(THD *thd, plugin_ref plugin,
void *unused)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
/*
there's no need to rollback here as all transactions must
be rolled back already
@@ -711,7 +758,7 @@ void ha_close_connection(THD* thd)
static my_bool kill_handlerton(THD *thd, plugin_ref plugin,
void *level)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->kill_query &&
thd_get_ha_data(thd, hton))
@@ -1565,7 +1612,7 @@ struct xahton_st {
static my_bool xacommit_handlerton(THD *unused1, plugin_ref plugin,
void *arg)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->recover)
{
hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid);
@@ -1577,7 +1624,7 @@ static my_bool xacommit_handlerton(THD *unused1, plugin_ref plugin,
static my_bool xarollback_handlerton(THD *unused1, plugin_ref plugin,
void *arg)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->recover)
{
hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid);
@@ -1683,7 +1730,7 @@ struct xarecover_st
static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
void *arg)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
struct xarecover_st *info= (struct xarecover_st *) arg;
int got;
@@ -1970,7 +2017,7 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv)
}
if ((err= ht->savepoint_set(ht, thd, (uchar *)(sv+1)+ht->savepoint_offset)))
{ // cannot happen
- my_error(ER_GET_ERRNO, MYF(0), err);
+ my_error(ER_GET_ERRNO, MYF(0), err, hton_name(ht)->str);
error=1;
}
status_var_increment(thd->status_var.ha_savepoint_count);
@@ -2001,7 +2048,7 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
if ((err= ht->savepoint_release(ht, thd,
(uchar *)(sv+1) + ht->savepoint_offset)))
{ // cannot happen
- my_error(ER_GET_ERRNO, MYF(0), err);
+ my_error(ER_GET_ERRNO, MYF(0), err, hton_name(ht)->str);
error=1;
}
}
@@ -2012,7 +2059,7 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin,
void *arg)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES &&
hton->start_consistent_snapshot)
{
@@ -2052,7 +2099,7 @@ int ha_start_consistent_snapshot(THD *thd)
static my_bool flush_handlerton(THD *thd, plugin_ref plugin,
void *arg)
{
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->flush_logs &&
hton->flush_logs(hton))
return TRUE;
@@ -2156,7 +2203,7 @@ handle_condition(THD *,
{
*cond_hdl= NULL;
/* Grab the error message */
- strmake(buff, msg, sizeof(buff)-1);
+ strmake_buf(buff, msg);
return TRUE;
}
@@ -2175,15 +2222,15 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
TABLE_SHARE dummy_share;
DBUG_ENTER("ha_delete_table");
+ /* table_type is NULL in ALTER TABLE when renaming only .frm files */
+ if (table_type == NULL || table_type == view_pseudo_hton ||
+ ! (file=get_new_handler((TABLE_SHARE*)0, thd->mem_root, table_type)))
+ DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
+
bzero((char*) &dummy_table, sizeof(dummy_table));
bzero((char*) &dummy_share, sizeof(dummy_share));
dummy_table.s= &dummy_share;
- /* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */
- if (table_type == NULL ||
- ! (file=get_new_handler((TABLE_SHARE*)0, thd->mem_root, table_type)))
- DBUG_RETURN(ENOENT);
-
path= get_canonical_filename(file, path, tmp_path);
if ((error= file->ha_delete_table(path)) && generate_warning)
{
@@ -2198,6 +2245,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
/* Fill up strucutures that print_error may need */
dummy_share.path.str= (char*) path;
dummy_share.path.length= strlen(path);
+ dummy_share.normalized_path= dummy_share.path;
dummy_share.db.str= (char*) db;
dummy_share.db.length= strlen(db);
dummy_share.table_name.str= (char*) alias;
@@ -2221,15 +2269,6 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
}
delete file;
-#ifdef HAVE_PSI_TABLE_INTERFACE
- if (likely(error == 0))
- {
- my_bool temp_table= (my_bool)is_prefix(alias, tmp_file_prefix);
- PSI_TABLE_CALL(drop_table_share)(temp_table, db, strlen(db),
- alias, strlen(alias));
- }
-#endif
-
DBUG_RETURN(error);
}
@@ -2306,31 +2345,26 @@ THD *handler::ha_thd(void) const
void handler::unbind_psi()
{
-#ifdef HAVE_PSI_TABLE_INTERFACE
/*
Notify the instrumentation that this table is not owned
by this thread any more.
*/
- PSI_TABLE_CALL(unbind_table)(m_psi);
-#endif
+ PSI_CALL_unbind_table(m_psi);
}
void handler::rebind_psi()
{
-#ifdef HAVE_PSI_TABLE_INTERFACE
/*
Notify the instrumentation that this table is now owned
by this thread.
*/
- PSI_table_share *share_psi= ha_table_share_psi(table_share);
- m_psi= PSI_TABLE_CALL(rebind_table)(share_psi, this, m_psi);
-#endif
+ m_psi= PSI_CALL_rebind_table(ha_table_share_psi(), this, m_psi);
}
-PSI_table_share *handler::ha_table_share_psi(const TABLE_SHARE *share) const
+PSI_table_share *handler::ha_table_share_psi() const
{
- return share->m_psi;
+ return table_share->m_psi;
}
/** @brief
@@ -2374,7 +2408,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
{
DBUG_ASSERT(m_psi == NULL);
DBUG_ASSERT(table_share != NULL);
-#ifdef HAVE_PSI_TABLE_INTERFACE
/*
Do not call this for partitions handlers, since it may take too much
resources.
@@ -2382,10 +2415,8 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
*/
if (!(test_if_locked & HA_OPEN_NO_PSI_CALL))
{
- PSI_table_share *share_psi= ha_table_share_psi(table_share);
- m_psi= PSI_TABLE_CALL(open_table)(share_psi, this);
+ m_psi= PSI_CALL_open_table(ha_table_share_psi(), this);
}
-#endif
if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
table->db_stat|=HA_READ_ONLY;
@@ -2416,12 +2447,9 @@ int handler::ha_close(void)
*/
if (table->in_use)
status_var_add(table->in_use->status_var.rows_tmp_read, rows_tmp_read);
-#ifdef HAVE_PSI_TABLE_INTERFACE
- PSI_TABLE_CALL(close_table)(m_psi);
+ PSI_CALL_close_table(m_psi);
m_psi= NULL; /* instrumentation handle, invalid after close_table() */
-#endif
- // TODO: set table= NULL to mark the handler as closed?
- DBUG_ASSERT(m_psi == NULL);
+
DBUG_ASSERT(m_lock_type == F_UNLCK);
DBUG_ASSERT(inited == NONE);
DBUG_RETURN(close());
@@ -3049,6 +3077,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
{
/* This should never happen, assert in debug, and fail in release build */
DBUG_ASSERT(0);
+ (void) extra(HA_EXTRA_NO_KEYREAD);
*first_value= ULONGLONG_MAX;
return;
}
@@ -3197,7 +3226,7 @@ void handler::print_error(int error, myf errflag)
DBUG_ENTER("handler::print_error");
DBUG_PRINT("enter",("error: %d",error));
- int textno=ER_GET_ERRNO;
+ int textno= -1; // impossible value
switch (error) {
case EACCES:
textno=ER_OPEN_AS_READONLY;
@@ -3439,10 +3468,11 @@ void handler::print_error(int error, myf errflag)
}
}
else
- my_error(ER_GET_ERRNO,errflag,error);
+ my_error(ER_GET_ERRNO, errflag, error, table_type());
DBUG_VOID_RETURN;
}
}
+ DBUG_ASSERT(textno > 0);
if (fatal_error)
{
/* Ensure this becomes a true error */
@@ -3456,7 +3486,17 @@ void handler::print_error(int error, myf errflag)
errflag|= ME_NOREFRESH;
}
}
- my_error(textno, errflag, table_share->table_name.str, error);
+
+ /* if we got an OS error from a file-based engine, specify a path of error */
+ if (error < HA_ERR_FIRST && bas_ext()[0])
+ {
+ char buff[FN_REFLEN];
+ strxnmov(buff, sizeof(buff),
+ table_share->normalized_path.str, bas_ext()[0], NULL);
+ my_error(textno, errflag, buff, error);
+ }
+ else
+ my_error(textno, errflag, table_share->table_name.str, error);
DBUG_VOID_RETURN;
}
@@ -3660,9 +3700,14 @@ int handler::delete_table(const char *name)
{
int saved_error= 0;
int error= 0;
- int enoent_or_zero= ENOENT; // Error if no file was deleted
+ int enoent_or_zero;
char buff[FN_REFLEN];
+ if (ht->discover_table)
+ enoent_or_zero= 0; // the table may not exist in the engine, it's ok
+ else
+ enoent_or_zero= ENOENT; // the first file of bas_ext() *must* exist
+
for (const char **ext=bas_ext(); *ext ; ext++)
{
fn_format(buff, name, "", *ext, MY_UNPACK_FILENAME|MY_APPEND_EXT);
@@ -3755,6 +3800,9 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
}
if ((error= check(thd, check_opt)))
return error;
+ /* Skip updating frm version if not main handler. */
+ if (table->file != this)
+ return error;
return update_frm_version(table);
}
@@ -4074,7 +4122,7 @@ handler::check_if_supported_inplace_alter(TABLE *altered_table,
void handler::notify_table_changed()
{
- ha_create_handler_files(table->s->path.str, NULL, CHF_INDEX_FLAG, NULL);
+ ha_create_partitioning_metadata(table->s->path.str, NULL, CHF_INDEX_FLAG);
}
@@ -4116,7 +4164,6 @@ int
handler::ha_delete_table(const char *name)
{
mark_trx_read_write();
-
return delete_table(name);
}
@@ -4151,20 +4198,23 @@ handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info)
{
DBUG_ASSERT(m_lock_type == F_UNLCK);
mark_trx_read_write();
-
- return create(name, form, info);
+ int error= create(name, form, info);
+ if (!error &&
+ !(info->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER)))
+ mysql_audit_create_table(form);
+ return error;
}
/**
Create handler files for CREATE TABLE: public interface.
- @sa handler::create_handler_files()
+ @sa handler::create_partitioning_metadata()
*/
int
-handler::ha_create_handler_files(const char *name, const char *old_name,
- int action_flag, HA_CREATE_INFO *info)
+handler::ha_create_partitioning_metadata(const char *name, const char *old_name,
+ int action_flag)
{
/*
Normally this is done when unlocked, but in fast_alter_partition_table,
@@ -4175,7 +4225,7 @@ handler::ha_create_handler_files(const char *name, const char *old_name,
(!old_name && strcmp(name, table_share->path.str)));
mark_trx_read_write();
- return create_handler_files(name, old_name, action_flag, info);
+ return create_partitioning_metadata(name, old_name, action_flag);
}
@@ -4473,154 +4523,66 @@ end:
*/
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- bool update_create_info)
+ HA_CREATE_INFO *create_info, LEX_CUSTRING *frm)
{
int error= 1;
TABLE table;
char name_buff[FN_REFLEN];
const char *name;
TABLE_SHARE share;
+ bool temp_table __attribute__((unused)) =
+ create_info->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER);
+
DBUG_ENTER("ha_create_table");
-#ifdef HAVE_PSI_TABLE_INTERFACE
- my_bool temp_table= (my_bool)is_prefix(table_name, tmp_file_prefix) ||
- (create_info->options & HA_LEX_CREATE_TMP_TABLE ? TRUE : FALSE);
-#endif
-
- init_tmp_table_share(thd, &share, db, 0, table_name, path);
- if (open_table_def(thd, &share, 0))
- goto err;
-
-#ifdef HAVE_PSI_TABLE_INTERFACE
- share.m_psi= PSI_TABLE_CALL(get_table_share)(temp_table, &share);
-#endif
- if (open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table,
- TRUE))
- goto err;
- if (update_create_info)
- update_create_info_from_table(create_info, &table);
-
- name= get_canonical_filename(table.file, share.path.str, name_buff);
+ init_tmp_table_share(thd, &share, db, 0, table_name, path);
- error= table.file->ha_create(name, &table, create_info);
- (void) closefrm(&table, 0);
- if (error)
+ if (frm)
{
- strxmov(name_buff, db, ".", table_name, NullS);
- my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name_buff, error);
-#ifdef HAVE_PSI_TABLE_INTERFACE
- PSI_TABLE_CALL(drop_table_share)(temp_table, db, strlen(db), table_name,
- strlen(table_name));
-#endif
- }
-err:
- free_table_share(&share);
- DBUG_RETURN(error != 0);
-}
-
-/**
- Try to discover table from engine.
+ bool write_frm_now= !create_info->db_type->discover_table &&
+ !create_info->tmp_table();
- @note
- If found, write the frm file to disk.
+ share.frm_image= frm;
- @retval
- -1 Table did not exists
- @retval
- 0 Table created ok
- @retval
- > 0 Error, table existed but could not be created
-*/
-int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
-{
- int error;
- uchar *frmblob;
- size_t frmlen;
- char path[FN_REFLEN + 1];
- HA_CREATE_INFO create_info;
- TABLE table;
- TABLE_SHARE share;
- DBUG_ENTER("ha_create_table_from_engine");
- DBUG_PRINT("enter", ("name '%s'.'%s'", db, name));
-
- bzero((uchar*) &create_info,sizeof(create_info));
- if ((error= ha_discover(thd, db, name, &frmblob, &frmlen)))
- {
- /* Table could not be discovered and thus not created */
- DBUG_RETURN(error);
+ // open an frm image
+ if (share.init_from_binary_frm_image(thd, write_frm_now,
+ frm->str, frm->length))
+ goto err;
}
-
- /*
- Table exists in handler and could be discovered
- frmblob and frmlen are set, write the frm to disk
- */
-
- build_table_filename(path, sizeof(path) - 1, db, name, "", 0);
- // Save the frm file
- error= writefrm(path, frmblob, frmlen);
- my_free(frmblob);
- if (error)
- DBUG_RETURN(2);
-
- init_tmp_table_share(thd, &share, db, 0, name, path);
- if (open_table_def(thd, &share, 0))
+ else
{
- DBUG_RETURN(3);
- }
+ // open an frm file
+ share.db_plugin= ha_lock_engine(thd, create_info->db_type);
-#ifdef HAVE_PSI_TABLE_INTERFACE
- /*
- Table discovery is not instrumented.
- Once discovered, the table will be opened normally,
- and instrumented normally.
- */
-#endif
-
- if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE))
- {
- free_table_share(&share);
- DBUG_RETURN(3);
+ if (open_table_def(thd, &share))
+ goto err;
}
- update_create_info_from_table(&create_info, &table);
- create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
-
- get_canonical_filename(table.file, path, path);
- error=table.file->ha_create(path, &table, &create_info);
- (void) closefrm(&table, 1);
+ share.m_psi= PSI_CALL_get_table_share(temp_table, &share);
- DBUG_RETURN(error != 0);
-}
-
-
-/**
- Try to find a table in a storage engine.
+ if (open_table_from_share(thd, &share, "", 0, READ_ALL, 0, &table, true))
+ goto err;
- @param db Normalized table schema name
- @param name Normalized table name.
- @param[out] exists Only valid if the function succeeded.
+ update_create_info_from_table(create_info, &table);
- @retval TRUE An error is found
- @retval FALSE Success, check *exists
-*/
+ name= get_canonical_filename(table.file, share.path.str, name_buff);
-bool
-ha_check_if_table_exists(THD* thd, const char *db, const char *name,
- bool *exists)
-{
- uchar *frmblob= NULL;
- size_t frmlen;
- DBUG_ENTER("ha_check_if_table_exists");
+ error= table.file->ha_create(name, &table, create_info);
- *exists= ! ha_discover(thd, db, name, &frmblob, &frmlen);
- if (*exists)
- my_free(frmblob);
+ (void) closefrm(&table, 0);
- DBUG_RETURN(FALSE);
+ if (error)
+ {
+ my_error(ER_CANT_CREATE_TABLE, MYF(0), db, table_name, error);
+ PSI_CALL_drop_table_share(temp_table, share.db.str, share.db.length,
+ share.table_name.str, share.table_name.length);
+ }
+
+err:
+ free_table_share(&share);
+ DBUG_RETURN(error != 0);
}
-
void st_ha_check_opt::init()
{
flags= sql_flags= 0;
@@ -4743,149 +4705,353 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
}
-/**
- Try to discover one table from handler(s).
-
- @retval
- -1 Table did not exists
- @retval
- 0 OK. In this case *frmblob and *frmlen are set
- @retval
- >0 error. frmblob and frmlen may not be set
-*/
-struct st_discover_args
-{
- const char *db;
- const char *name;
- uchar **frmblob;
- size_t *frmlen;
-};
-
static my_bool discover_handlerton(THD *thd, plugin_ref plugin,
void *arg)
{
- st_discover_args *vargs= (st_discover_args *)arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
- if (hton->state == SHOW_OPTION_YES && hton->discover &&
- (!(hton->discover(hton, thd, vargs->db, vargs->name,
- vargs->frmblob,
- vargs->frmlen))))
- return TRUE;
+ TABLE_SHARE *share= (TABLE_SHARE *)arg;
+ handlerton *hton= plugin_hton(plugin);
+ if (hton->state == SHOW_OPTION_YES && hton->discover_table)
+ {
+ share->db_plugin= plugin;
+ int error= hton->discover_table(hton, thd, share);
+ if (error != HA_ERR_NO_SUCH_TABLE)
+ {
+ if (error)
+ {
+ DBUG_ASSERT(share->error); // get_cached_table_share needs that
+ /*
+ report an error, unless it is "generic" and a more
+ specific one was already reported
+ */
+ if (error != HA_ERR_GENERIC || !thd->is_error())
+ my_error(ER_GET_ERRNO, MYF(0), error, plugin_name(plugin)->str);
+ share->db_plugin= 0;
+ }
+ else
+ share->error= OPEN_FRM_OK;
- return FALSE;
+ status_var_increment(thd->status_var.ha_discover_count);
+ return TRUE; // abort the search
+ }
+ share->db_plugin= 0;
+ }
+
+ DBUG_ASSERT(share->error == OPEN_FRM_OPEN_ERROR);
+ return FALSE; // continue with the next engine
}
-int ha_discover(THD *thd, const char *db, const char *name,
- uchar **frmblob, size_t *frmlen)
+int ha_discover_table(THD *thd, TABLE_SHARE *share)
{
- int error= -1; // Table does not exist in any handler
- DBUG_ENTER("ha_discover");
- DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
- st_discover_args args= {db, name, frmblob, frmlen};
+ DBUG_ENTER("ha_discover_table");
+ int found;
- if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */
- DBUG_RETURN(error);
+ DBUG_ASSERT(share->error == OPEN_FRM_OPEN_ERROR); // share is not OK yet
- if (plugin_foreach(thd, discover_handlerton,
- MYSQL_STORAGE_ENGINE_PLUGIN, &args))
- error= 0;
+ if (share->db_plugin)
+ found= discover_handlerton(thd, share->db_plugin, share);
+ else
+ found= plugin_foreach(thd, discover_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, share);
+
+ if (!found)
+ open_table_error(share, OPEN_FRM_OPEN_ERROR, ENOENT); // not found
- if (!error)
- status_var_increment(thd->status_var.ha_discover_count);
- DBUG_RETURN(error);
+ DBUG_RETURN(share->error != OPEN_FRM_OK);
}
+static my_bool file_ext_exists(char *path, size_t path_len, const char *ext)
+{
+ strmake(path + path_len, ext, FN_REFLEN - path_len);
+ return !access(path, F_OK);
+}
-/**
- Call this function in order to give the handler the possiblity
- to ask engine if there are any new tables that should be written to disk
- or any dropped tables that need to be removed from disk
-*/
-struct st_find_files_args
+struct st_discover_existence_args
{
- const char *db;
- const char *path;
- const char *wild;
- bool dir;
- List<LEX_STRING> *files;
+ char *path;
+ size_t path_len;
+ const char *db, *table_name;
+ handlerton *hton;
};
-static my_bool find_files_handlerton(THD *thd, plugin_ref plugin,
- void *arg)
+static my_bool discover_existence(THD *thd, plugin_ref plugin,
+ void *arg)
{
- st_find_files_args *vargs= (st_find_files_args *)arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ st_discover_existence_args *args= (st_discover_existence_args*)arg;
+ handlerton *ht= plugin_hton(plugin);
+ if (ht->state != SHOW_OPTION_YES || !ht->discover_table_existence)
+ return FALSE;
+ args->hton= ht;
- if (hton->state == SHOW_OPTION_YES && hton->find_files)
- if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild,
- vargs->dir, vargs->files))
- return TRUE;
+ if (ht->discover_table_existence == ext_based_existence)
+ return file_ext_exists(args->path, args->path_len,
+ ht->tablefile_extensions[0]);
- return FALSE;
+ return ht->discover_table_existence(ht, args->db, args->table_name);
}
-int
-ha_find_files(THD *thd,const char *db,const char *path,
- const char *wild, bool dir, List<LEX_STRING> *files)
+class Table_exists_error_handler : public Internal_error_handler
{
- int error= 0;
- DBUG_ENTER("ha_find_files");
- DBUG_PRINT("enter", ("db: '%s' path: '%s' wild: '%s' dir: %d",
- db, path, wild, dir));
- st_find_files_args args= {db, path, wild, dir, files};
-
- plugin_foreach(thd, find_files_handlerton,
- MYSQL_STORAGE_ENGINE_PLUGIN, &args);
- /* The return value is not currently used */
- DBUG_RETURN(error);
+public:
+ Table_exists_error_handler()
+ : m_handled_errors(0), m_unhandled_errors(0)
+ {}
+
+ bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ Sql_condition::enum_warning_level level,
+ const char* msg,
+ Sql_condition ** cond_hdl)
+ {
+ *cond_hdl= NULL;
+ if (sql_errno == ER_NO_SUCH_TABLE ||
+ sql_errno == ER_NO_SUCH_TABLE_IN_ENGINE ||
+ sql_errno == ER_WRONG_OBJECT)
+ {
+ m_handled_errors++;
+ return TRUE;
+ }
+
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
+ m_unhandled_errors++;
+ return FALSE;
+ }
+
+ bool safely_trapped_errors()
+ {
+ return ((m_handled_errors > 0) && (m_unhandled_errors == 0));
+ }
+
+private:
+ int m_handled_errors;
+ int m_unhandled_errors;
+};
+
+/**
+ Check if a given table exists, without doing a full discover, if possible
+
+ If the 'hton' is not NULL, it's set to the handlerton of the storage engine
+ of this table, or to view_pseudo_hton if the frm belongs to a view.
+
+
+ @retval true Table exists (even if the error occurred, like bad frm)
+ @retval false Table does not exist (one can do CREATE TABLE table_name)
+*/
+bool ha_table_exists(THD *thd, const char *db, const char *table_name,
+ handlerton **hton)
+{
+ DBUG_ENTER("ha_table_exists");
+
+ if (hton)
+ *hton= 0;
+
+ if (need_full_discover_for_existence)
+ {
+ TABLE_LIST table;
+ uint flags = GTS_TABLE | GTS_VIEW;
+
+ if (!hton)
+ flags|= GTS_NOLOCK;
+
+ Table_exists_error_handler no_such_table_handler;
+ thd->push_internal_handler(&no_such_table_handler);
+ TABLE_SHARE *share= get_table_share(thd, db, table_name, flags);
+ thd->pop_internal_handler();
+
+ if (hton && share)
+ {
+ *hton= share->db_type();
+ mysql_mutex_lock(&LOCK_open);
+ release_table_share(share);
+ mysql_mutex_unlock(&LOCK_open);
+ }
+
+ // the table doesn't exist if we've caught ER_NO_SUCH_TABLE and nothing else
+ DBUG_RETURN(!no_such_table_handler.safely_trapped_errors());
+ }
+
+ mysql_mutex_lock(&LOCK_open);
+ TABLE_SHARE *share= get_cached_table_share(db, table_name);
+ if (hton && share)
+ *hton= share->db_type();
+ mysql_mutex_unlock(&LOCK_open);
+
+ if (share)
+ DBUG_RETURN(TRUE);
+
+ char path[FN_REFLEN + 1];
+ size_t path_len = build_table_filename(path, sizeof(path) - 1,
+ db, table_name, "", 0);
+
+ if (file_ext_exists(path, path_len, reg_ext))
+ {
+ if (hton)
+ {
+ enum legacy_db_type db_type;
+ if (dd_frm_type(thd, path, &db_type) != FRMTYPE_VIEW)
+ *hton= ha_resolve_by_legacy_type(thd, db_type);
+ else
+ *hton= view_pseudo_hton;
+ }
+ DBUG_RETURN(TRUE);
+ }
+
+ st_discover_existence_args args= {path, path_len, db, table_name, 0};
+
+ if (plugin_foreach(thd, discover_existence, MYSQL_STORAGE_ENGINE_PLUGIN,
+ &args))
+ {
+ if (hton)
+ *hton= args.hton;
+ DBUG_RETURN(TRUE);
+ }
+
+ DBUG_RETURN(FALSE);
}
/**
- Ask handler if the table exists in engine.
- @retval
- HA_ERR_NO_SUCH_TABLE Table does not exist
- @retval
- HA_ERR_TABLE_EXIST Table exists
- @retval
- \# Error code
+ Discover all table names in a given database
*/
-struct st_table_exists_in_engine_args
+extern "C" {
+
+static int cmp_file_names(const void *a, const void *b)
{
- const char *db;
- const char *name;
- int err;
-};
+ CHARSET_INFO *cs= character_set_filesystem;
+ char *aa= ((FILEINFO *)a)->name;
+ char *bb= ((FILEINFO *)b)->name;
+ return my_strnncoll(cs, (uchar*)aa, strlen(aa), (uchar*)bb, strlen(bb));
+}
-static my_bool table_exists_in_engine_handlerton(THD *thd, plugin_ref plugin,
- void *arg)
+static int cmp_table_names(LEX_STRING * const *a, LEX_STRING * const *b)
{
- st_table_exists_in_engine_args *vargs= (st_table_exists_in_engine_args *)arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ return my_strnncoll(&my_charset_bin, (uchar*)((*a)->str), (*a)->length,
+ (uchar*)((*b)->str), (*b)->length);
+}
- int err= HA_ERR_NO_SUCH_TABLE;
+}
- if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine)
- err = hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name);
+Discovered_table_list::Discovered_table_list(THD *thd_arg,
+ Dynamic_array<LEX_STRING*> *tables_arg,
+ const LEX_STRING *wild_arg)
+{
+ thd= thd_arg;
+ tables= tables_arg;
+ if (wild_arg->str && wild_arg->str[0])
+ {
+ wild= wild_arg->str;
+ wend= wild + wild_arg->length;
+ }
+ else
+ wild= 0;
+}
- vargs->err = err;
- if (vargs->err == HA_ERR_TABLE_EXIST)
- return TRUE;
+bool Discovered_table_list::add_table(const char *tname, size_t tlen)
+{
+ if (wild && my_wildcmp(files_charset_info, tname, tname + tlen, wild, wend,
+ wild_prefix, wild_one, wild_many))
+ return 0;
- return FALSE;
+ LEX_STRING *name= thd->make_lex_string(tname, tlen);
+ if (!name || tables->append(name))
+ return 1;
+ return 0;
+}
+
+bool Discovered_table_list::add_file(const char *fname)
+{
+ char tname[SAFE_NAME_LEN + 1];
+ size_t tlen= filename_to_tablename(fname, tname, sizeof(tname));
+ return add_table(tname, tlen);
+}
+
+
+void Discovered_table_list::sort()
+{
+ tables->sort(cmp_table_names);
+}
+
+void Discovered_table_list::remove_duplicates()
+{
+ LEX_STRING **src= tables->front();
+ LEX_STRING **dst= src;
+ while (++dst <= tables->back())
+ {
+ LEX_STRING *s= *src, *d= *dst;
+ DBUG_ASSERT(strncmp(s->str, d->str, MY_MIN(s->length, d->length)) <= 0);
+ if ((s->length != d->length || strncmp(s->str, d->str, d->length)))
+ {
+ src++;
+ if (src != dst)
+ *src= *dst;
+ }
+ }
+ tables->elements(src - tables->front() + 1);
+}
+
+struct st_discover_names_args
+{
+ LEX_STRING *db;
+ MY_DIR *dirp;
+ Discovered_table_list *result;
+ uint possible_duplicates;
+};
+
+static my_bool discover_names(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ st_discover_names_args *args= (st_discover_names_args *)arg;
+ handlerton *ht= plugin_hton(plugin);
+
+ if (ht->state == SHOW_OPTION_YES && ht->discover_table_names)
+ {
+ uint old_elements= args->result->tables->elements();
+ if (ht->discover_table_names(ht, args->db, args->dirp, args->result))
+ return 1;
+
+ /*
+ hton_ext_based_table_discovery never discovers a table that has
+ a corresponding .frm file; but custom engine discover methods might
+ */
+ if (ht->discover_table_names != hton_ext_based_table_discovery)
+ args->possible_duplicates+= args->result->tables->elements() - old_elements;
+ }
+
+ return 0;
}
-int ha_table_exists_in_engine(THD* thd, const char* db, const char* name)
+int ha_discover_table_names(THD *thd, LEX_STRING *db, MY_DIR *dirp,
+ Discovered_table_list *result, bool reusable)
{
- DBUG_ENTER("ha_table_exists_in_engine");
- DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
- st_table_exists_in_engine_args args= {db, name, HA_ERR_NO_SUCH_TABLE};
- plugin_foreach(thd, table_exists_in_engine_handlerton,
- MYSQL_STORAGE_ENGINE_PLUGIN, &args);
- DBUG_PRINT("exit", ("error: %d", args.err));
- DBUG_RETURN(args.err);
+ int error;
+ DBUG_ENTER("ha_discover_table_names");
+
+ if (engines_with_discover_table_names == 0 && !reusable)
+ {
+ error= ext_table_discovery_simple(dirp, result);
+ result->sort();
+ }
+ else
+ {
+ st_discover_names_args args= {db, dirp, result, 0};
+
+ /* extension_based_table_discovery relies on dirp being sorted */
+ my_qsort(dirp->dir_entry, dirp->number_of_files,
+ sizeof(FILEINFO), cmp_file_names);
+
+ error= extension_based_table_discovery(dirp, reg_ext, result) ||
+ plugin_foreach(thd, discover_names,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
+ result->sort();
+
+ if (args.possible_duplicates > 0)
+ result->remove_duplicates();
+ }
+
+ DBUG_RETURN(error);
}
+
#ifdef HAVE_NDB_BINLOG
/*
TODO: change this into a dynamic struct
@@ -4912,7 +5078,7 @@ struct binlog_func_st
static my_bool binlog_func_list(THD *thd, plugin_ref plugin, void *arg)
{
hton_list_st *hton_list= (hton_list_st *)arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->binlog_func)
{
uint sz= hton_list->sz;
@@ -5002,7 +5168,7 @@ static my_bool binlog_log_query_handlerton(THD *thd,
plugin_ref plugin,
void *args)
{
- return binlog_log_query_handlerton2(thd, plugin_data(plugin, handlerton *), args);
+ return binlog_log_query_handlerton2(thd, plugin_hton(plugin), args);
}
void ha_binlog_log_query(THD *thd, handlerton *hton,
@@ -5240,27 +5406,21 @@ static my_bool exts_handlerton(THD *unused, plugin_ref plugin,
void *arg)
{
List<char> *found_exts= (List<char> *) arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
- handler *file;
- if (hton->state == SHOW_OPTION_YES && hton->create &&
- (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root)))
- {
- List_iterator_fast<char> it(*found_exts);
- const char **ext, *old_ext;
+ handlerton *hton= plugin_hton(plugin);
+ List_iterator_fast<char> it(*found_exts);
+ const char **ext, *old_ext;
- for (ext= file->bas_ext(); *ext; ext++)
+ for (ext= hton->tablefile_extensions; *ext; ext++)
+ {
+ while ((old_ext= it++))
{
- while ((old_ext= it++))
- {
- if (!strcmp(old_ext, *ext))
- break;
- }
- if (!old_ext)
- found_exts->push_back((char *) *ext);
-
- it.rewind();
+ if (!strcmp(old_ext, *ext))
+ break;
}
- delete file;
+ if (!old_ext)
+ found_exts->push_back((char *) *ext);
+
+ it.rewind();
}
return FALSE;
}
@@ -5315,7 +5475,7 @@ static my_bool showstat_handlerton(THD *thd, plugin_ref plugin,
void *arg)
{
enum ha_stat_type stat= *(enum ha_stat_type *) arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->state == SHOW_OPTION_YES && hton->show_status &&
hton->show_status(hton, thd, stat_print, stat))
return TRUE;
@@ -5361,7 +5521,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
if (!result && !thd->is_error())
my_eof(thd);
else if (!thd->is_error())
- my_error(ER_GET_ERRNO, MYF(0), errno);
+ my_error(ER_GET_ERRNO, MYF(0), errno, hton_name(db_type)->str);
return result;
}
@@ -5584,12 +5744,10 @@ int handler::ha_external_lock(THD *thd, int lock_type)
if (error == 0)
{
- /*
- The lock type is needed by MRR when creating a clone of this handler
- object and for assert checking.
- */
m_lock_type= lock_type;
cached_table_flags= table_flags();
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_audit_external_lock(thd, table_share, lock_type);
}
if (MYSQL_HANDLER_RDLOCK_DONE_ENABLED() ||
@@ -5647,6 +5805,8 @@ int handler::ha_write_row(uchar *buf)
m_lock_type == F_WRLCK);
DBUG_ENTER("handler::ha_write_row");
DEBUG_SYNC_C("ha_write_row_start");
+ DBUG_EXECUTE_IF("inject_error_ha_write_row",
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR); );
MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
@@ -5679,6 +5839,7 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data)
(and the old record is in record[1]).
*/
DBUG_ASSERT(new_data == table->record[0]);
+ DBUG_ASSERT(old_data == table->record[1]);
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
@@ -5700,6 +5861,13 @@ int handler::ha_delete_row(const uchar *buf)
{
int error;
Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function;
+ /*
+ Normally table->record[0] is used, but sometimes table->record[1] is used.
+ */
+ DBUG_ASSERT(buf == table->record[0] ||
+ buf == table->record[1]);
+ DBUG_EXECUTE_IF("inject_error_ha_delete_row",
+ return HA_ERR_INTERNAL_ERROR; );
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
m_lock_type == F_WRLCK);
diff --git a/sql/handler.h b/sql/handler.h
index 0a9933cd028..478317e881d 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2,7 +2,7 @@
#define HANDLER_INCLUDED
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2009-2011 Monty Program Ab
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -31,10 +31,12 @@
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA */
#include "sql_cache.h"
#include "structs.h" /* SHOW_COMP_OPTION */
+#include "sql_array.h" /* Dynamic_array<> */
#include <my_compare.h>
#include <ft_global.h>
#include <keycache.h>
+#include <mysql/psi/mysql_table.h>
#if MAX_KEY > 128
#error MAX_KEY is too large. Values up to 128 are supported.
@@ -77,9 +79,9 @@ enum enum_alter_inplace_result {
/* Bits in table_flags() to show what database can do */
-#define HA_NO_TRANSACTIONS (1 << 0) /* Doesn't support transactions */
-#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */
-#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
+#define HA_NO_TRANSACTIONS (1ULL << 0) /* Doesn't support transactions */
+#define HA_PARTIAL_COLUMN_READ (1ULL << 1) /* read may not return all columns */
+#define HA_TABLE_SCAN_ON_INDEX (1ULL << 2) /* No separate data/index file */
/*
The following should be set if the following is not true when scanning
a table with rnd_next()
@@ -88,37 +90,37 @@ enum enum_alter_inplace_result {
If this flag is not set, filesort will do a position() call for each matched
row to be able to find the row later.
*/
-#define HA_REC_NOT_IN_SEQ (1 << 3)
-#define HA_CAN_GEOMETRY (1 << 4)
+#define HA_REC_NOT_IN_SEQ (1ULL << 3)
+#define HA_CAN_GEOMETRY (1ULL << 4)
/*
Reading keys in random order is as fast as reading keys in sort order
(Used in records.cc to decide if we should use a record cache and by
filesort to decide if we should sort key + data or key + pointer-to-row
*/
-#define HA_FAST_KEY_READ (1 << 5)
+#define HA_FAST_KEY_READ (1ULL << 5)
/*
Set the following flag if we on delete should force all key to be read
and on update read all keys that changes
*/
-#define HA_REQUIRES_KEY_COLUMNS_FOR_DELETE (1 << 6)
-#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
-#define HA_DUPLICATE_POS (1 << 8) /* position() gives dup row */
-#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
-#define HA_CAN_INDEX_BLOBS (1 << 10)
-#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
-#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
-#define HA_STATS_RECORDS_IS_EXACT (1 << 13) /* stats.records is exact */
+#define HA_REQUIRES_KEY_COLUMNS_FOR_DELETE (1ULL << 6)
+#define HA_NULL_IN_KEY (1ULL << 7) /* One can have keys with NULL */
+#define HA_DUPLICATE_POS (1ULL << 8) /* ha_position() gives dup row */
+#define HA_NO_BLOBS (1ULL << 9) /* Doesn't support blobs */
+#define HA_CAN_INDEX_BLOBS (1ULL << 10)
+#define HA_AUTO_PART_KEY (1ULL << 11) /* auto-increment in multi-part key */
+#define HA_REQUIRE_PRIMARY_KEY (1ULL << 12) /* .. and can't create a hidden one */
+#define HA_STATS_RECORDS_IS_EXACT (1ULL << 13) /* stats.records is exact */
/*
INSERT_DELAYED only works with handlers that uses MySQL internal table
level locks
*/
-#define HA_CAN_INSERT_DELAYED (1 << 14)
+#define HA_CAN_INSERT_DELAYED (1ULL << 14)
/*
If we get the primary key columns for free when we do an index read
(usually, it also implies that HA_PRIMARY_KEY_REQUIRED_FOR_POSITION
flag is set).
*/
-#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
+#define HA_PRIMARY_KEY_IN_READ_INDEX (1ULL << 15)
/*
If HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is set, it means that to position()
uses a primary key given by the record argument.
@@ -126,36 +128,36 @@ enum enum_alter_inplace_result {
If not set, the position is returned as the current rows position
regardless of what argument is given.
*/
-#define HA_PRIMARY_KEY_REQUIRED_FOR_POSITION (1 << 16)
-#define HA_CAN_RTREEKEYS (1 << 17)
-#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
+#define HA_PRIMARY_KEY_REQUIRED_FOR_POSITION (1ULL << 16)
+#define HA_CAN_RTREEKEYS (1ULL << 17)
+#define HA_NOT_DELETE_WITH_CACHE (1ULL << 18)
/*
The following is we need to a primary key to delete (and update) a row.
If there is no primary key, all columns needs to be read on update and delete
*/
-#define HA_PRIMARY_KEY_REQUIRED_FOR_DELETE (1 << 19)
-#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
-#define HA_CAN_FULLTEXT (1 << 21)
-#define HA_CAN_SQL_HANDLER (1 << 22)
-#define HA_NO_AUTO_INCREMENT (1 << 23)
+#define HA_PRIMARY_KEY_REQUIRED_FOR_DELETE (1ULL << 19)
+#define HA_NO_PREFIX_CHAR_KEYS (1ULL << 20)
+#define HA_CAN_FULLTEXT (1ULL << 21)
+#define HA_CAN_SQL_HANDLER (1ULL << 22)
+#define HA_NO_AUTO_INCREMENT (1ULL << 23)
/* Has automatic checksums and uses the old checksum format */
-#define HA_HAS_OLD_CHECKSUM (1 << 24)
+#define HA_HAS_OLD_CHECKSUM (1ULL << 24)
/* Table data are stored in separate files (for lower_case_table_names) */
-#define HA_FILE_BASED (1 << 26)
-#define HA_NO_VARCHAR (1 << 27)
-#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
-#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
-#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
-#define HA_NO_COPY_ON_ALTER (LL(1) << 31)
-#define HA_HAS_RECORDS (LL(1) << 32) /* records() gives exact count*/
+#define HA_FILE_BASED (1ULL << 26)
+#define HA_NO_VARCHAR (1ULL << 27)
+#define HA_CAN_BIT_FIELD (1ULL << 28) /* supports bit fields */
+#define HA_NEED_READ_RANGE_BUFFER (1ULL << 29) /* for read_multi_range */
+#define HA_ANY_INDEX_MAY_BE_UNIQUE (1ULL << 30)
+#define HA_NO_COPY_ON_ALTER (1ULL << 31)
+#define HA_HAS_RECORDS (1ULL << 32) /* records() gives exact count*/
/* Has it's own method of binlog logging */
-#define HA_HAS_OWN_BINLOGGING (LL(1) << 33)
+#define HA_HAS_OWN_BINLOGGING (1ULL << 33)
/*
Engine is capable of row-format and statement-format logging,
respectively
*/
-#define HA_BINLOG_ROW_CAPABLE (LL(1) << 34)
-#define HA_BINLOG_STMT_CAPABLE (LL(1) << 35)
+#define HA_BINLOG_ROW_CAPABLE (1ULL << 34)
+#define HA_BINLOG_STMT_CAPABLE (1ULL << 35)
/*
When a multiple key conflict happens in a REPLACE command mysql
expects the conflicts to be reported in the ascending order of
@@ -178,20 +180,20 @@ enum enum_alter_inplace_result {
This flag helps the underlying SE to inform the server that the keys are not
ordered.
*/
-#define HA_DUPLICATE_KEY_NOT_IN_ORDER (LL(1) << 36)
+#define HA_DUPLICATE_KEY_NOT_IN_ORDER (1ULL << 36)
/*
Engine supports REPAIR TABLE. Used by CHECK TABLE FOR UPGRADE if an
incompatible table is detected. If this flag is set, CHECK TABLE FOR UPGRADE
will report ER_TABLE_NEEDS_UPGRADE, otherwise ER_TABLE_NEED_REBUILD.
*/
-#define HA_CAN_REPAIR (LL(1) << 37)
+#define HA_CAN_REPAIR (1ULL << 37)
/* Has automatic checksums and uses the new checksum format */
-#define HA_HAS_NEW_CHECKSUM (LL(1) << 38)
-#define HA_CAN_VIRTUAL_COLUMNS (LL(1) << 39)
-#define HA_MRR_CANT_SORT (LL(1) << 40)
-#define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (LL(1) << 41)
+#define HA_HAS_NEW_CHECKSUM (1ULL << 38)
+#define HA_CAN_VIRTUAL_COLUMNS (1ULL << 39)
+#define HA_MRR_CANT_SORT (1ULL << 40)
+#define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (1ULL << 41)
/*
Table condition pushdown must be performed regardless of
@@ -204,9 +206,67 @@ enum enum_alter_inplace_result {
then the "query=..." condition must be always pushed down into storage
engine.
*/
-#define HA_MUST_USE_TABLE_CONDITION_PUSHDOWN (LL(1) << 42)
+#define HA_MUST_USE_TABLE_CONDITION_PUSHDOWN (1ULL << 42)
+
+/**
+ The handler supports read before write removal optimization
+
+ Read before write removal may be used for storage engines which support
+ write without previous read of the row to be updated. Handler returning
+ this flag must implement start_read_removal() and end_read_removal().
+ The handler may return "fake" rows constructed from the key of the row
+ asked for. This is used to optimize UPDATE and DELETE by reducing the
+ numer of roundtrips between handler and storage engine.
+
+ Example:
+ UPDATE a=1 WHERE pk IN (<keys>)
+
+ mysql_update()
+ {
+ if (<conditions for starting read removal>)
+ start_read_removal()
+ -> handler returns true if read removal supported for this table/query
+
+ while(read_record("pk=<key>"))
+ -> handler returns fake row with column "pk" set to <key>
+
+ ha_update_row()
+ -> handler sends write "a=1" for row with "pk=<key>"
+
+ end_read_removal()
+ -> handler returns the number of rows actually written
+ }
+
+ @note This optimization in combination with batching may be used to
+ remove even more roundtrips.
+*/
+#define HA_READ_BEFORE_WRITE_REMOVAL (1LL << 43)
+
+/*
+ Engine supports extended fulltext API
+ */
+#define HA_CAN_FULLTEXT_EXT (1LL << 44)
/*
+ Storage engine doesn't synchronize result set with expected table contents.
+ Used by replication slave to check if it is possible to retrieve rows from
+ the table when deciding whether to do a full table scan, index scan or hash
+ scan while applying a row event.
+ */
+#define HA_READ_OUT_OF_SYNC (1LL << 45)
+
+/*
+ Storage engine supports table export using the
+ FLUSH TABLE <table_list> FOR EXPORT statement.
+ */
+#define HA_CAN_EXPORT (1LL << 46)
+
+/*
+ The handler don't want accesses to this table to
+ be const-table optimized
+*/
+#define HA_BLOCK_CONST_TABLE (1LL << 47)
+/*
Set of all binlog flags. Currently only contain the capabilities
flags.
*/
@@ -303,7 +363,7 @@ enum enum_alter_inplace_result {
(yes, the sum is deliberately inaccurate)
TODO remove the limit, use dynarrays
*/
-#define MAX_HA 15
+#define MAX_HA 64
/*
Use this instead of 0 as the initial value for the slot number of
@@ -336,6 +396,7 @@ enum enum_alter_inplace_result {
#define HA_LEX_CREATE_TMP_TABLE 1
#define HA_LEX_CREATE_IF_NOT_EXISTS 2
#define HA_LEX_CREATE_TABLE_LIKE 4
+#define HA_CREATE_TMP_ALTER 8
#define HA_MAX_REC_LENGTH 65535
/* Table caching type */
@@ -370,26 +431,24 @@ static const uint MYSQL_START_TRANS_OPT_READ_WRITE = 4;
enum legacy_db_type
{
- DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1,
- DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM,
- DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM,
- DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM,
- DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
- DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
- DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
- DB_TYPE_FEDERATED_DB,
- DB_TYPE_BLACKHOLE_DB,
- DB_TYPE_PARTITION_DB,
- DB_TYPE_BINLOG,
- DB_TYPE_SOLID,
- DB_TYPE_PBXT,
- DB_TYPE_TABLE_FUNCTION,
- DB_TYPE_MEMCACHE,
- DB_TYPE_FALCON,
- DB_TYPE_MARIA,
- /** Performance schema engine. */
- DB_TYPE_PERFORMANCE_SCHEMA,
- DB_TYPE_FIRST_DYNAMIC=42,
+ /* note these numerical values are fixed and can *not* be changed */
+ DB_TYPE_UNKNOWN=0,
+ DB_TYPE_HEAP=6,
+ DB_TYPE_MYISAM=9,
+ DB_TYPE_MRG_MYISAM=10,
+ DB_TYPE_INNODB=12,
+ DB_TYPE_NDBCLUSTER=14,
+ DB_TYPE_EXAMPLE_DB=15,
+ DB_TYPE_ARCHIVE_DB=16,
+ DB_TYPE_CSV_DB=17,
+ DB_TYPE_FEDERATED_DB=18,
+ DB_TYPE_BLACKHOLE_DB=19,
+ DB_TYPE_PARTITION_DB=20,
+ DB_TYPE_BINLOG=21,
+ DB_TYPE_PBXT=23,
+ DB_TYPE_PERFORMANCE_SCHEMA=28,
+ DB_TYPE_ARIA=42,
+ DB_TYPE_FIRST_DYNAMIC=43,
DB_TYPE_DEFAULT=127 // Must be last
};
/*
@@ -690,6 +749,7 @@ enum enum_schema_tables
SCH_PARAMETERS,
SCH_PARTITIONS,
SCH_PLUGINS,
+ SCH_ALL_PLUGINS,
SCH_PROCESSLIST,
SCH_PROFILES,
SCH_REFERENTIAL_CONSTRAINTS,
@@ -714,6 +774,7 @@ enum enum_schema_tables
};
struct TABLE_SHARE;
+struct HA_CREATE_INFO;
struct st_foreign_key_info;
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
typedef bool (stat_print_fn)(THD *thd, const char *type, uint type_len,
@@ -793,22 +854,26 @@ struct ha_index_option_struct;
enum ha_option_type { HA_OPTION_TYPE_ULL, /* unsigned long long */
HA_OPTION_TYPE_STRING, /* char * */
HA_OPTION_TYPE_ENUM, /* uint */
- HA_OPTION_TYPE_BOOL}; /* bool */
+ HA_OPTION_TYPE_BOOL, /* bool */
+ HA_OPTION_TYPE_SYSVAR};/* type of the sysval */
#define HA_xOPTION_NUMBER(name, struc, field, def, min, max, blk_siz) \
{ HA_OPTION_TYPE_ULL, name, sizeof(name)-1, \
- offsetof(struc, field), def, min, max, blk_siz, 0 }
+ offsetof(struc, field), def, min, max, blk_siz, 0, 0 }
#define HA_xOPTION_STRING(name, struc, field) \
{ HA_OPTION_TYPE_STRING, name, sizeof(name)-1, \
- offsetof(struc, field), 0, 0, 0, 0, 0 }
+ offsetof(struc, field), 0, 0, 0, 0, 0, 0}
#define HA_xOPTION_ENUM(name, struc, field, values, def) \
{ HA_OPTION_TYPE_ENUM, name, sizeof(name)-1, \
offsetof(struc, field), def, 0, \
- sizeof(values)-1, 0, values }
+ sizeof(values)-1, 0, values, 0 }
#define HA_xOPTION_BOOL(name, struc, field, def) \
{ HA_OPTION_TYPE_BOOL, name, sizeof(name)-1, \
- offsetof(struc, field), def, 0, 1, 0, 0 }
-#define HA_xOPTION_END { HA_OPTION_TYPE_ULL, 0, 0, 0, 0, 0, 0, 0, 0 }
+ offsetof(struc, field), def, 0, 1, 0, 0, 0 }
+#define HA_xOPTION_SYSVAR(name, struc, field, sysvar) \
+ { HA_OPTION_TYPE_SYSVAR, name, sizeof(name)-1, \
+ offsetof(struc, field), 0, 0, 0, 0, 0, MYSQL_SYSVAR(sysvar) }
+#define HA_xOPTION_END { HA_OPTION_TYPE_ULL, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
#define HA_TOPTION_NUMBER(name, field, def, min, max, blk_siz) \
HA_xOPTION_NUMBER(name, ha_table_option_struct, field, def, min, max, blk_siz)
@@ -818,6 +883,8 @@ enum ha_option_type { HA_OPTION_TYPE_ULL, /* unsigned long long */
HA_xOPTION_ENUM(name, ha_table_option_struct, field, values, def)
#define HA_TOPTION_BOOL(name, field, def) \
HA_xOPTION_BOOL(name, ha_table_option_struct, field, def)
+#define HA_TOPTION_SYSVAR(name, field, sysvar) \
+ HA_xOPTION_SYSVAR(name, ha_table_option_struct, field, sysvar)
#define HA_TOPTION_END HA_xOPTION_END
#define HA_FOPTION_NUMBER(name, field, def, min, max, blk_siz) \
@@ -828,6 +895,8 @@ enum ha_option_type { HA_OPTION_TYPE_ULL, /* unsigned long long */
HA_xOPTION_ENUM(name, ha_field_option_struct, field, values, def)
#define HA_FOPTION_BOOL(name, field, def) \
HA_xOPTION_BOOL(name, ha_field_option_struct, field, def)
+#define HA_FOPTION_SYSVAR(name, field, sysvar) \
+ HA_xOPTION_SYSVAR(name, ha_field_option_struct, field, sysvar)
#define HA_FOPTION_END HA_xOPTION_END
#define HA_IOPTION_NUMBER(name, field, def, min, max, blk_siz) \
@@ -838,6 +907,8 @@ enum ha_option_type { HA_OPTION_TYPE_ULL, /* unsigned long long */
HA_xOPTION_ENUM(name, ha_index_option_struct, field, values, def)
#define HA_IOPTION_BOOL(name, field, values, def) \
HA_xOPTION_BOOL(name, ha_index_option_struct, field, values, def)
+#define HA_IOPTION_SYSVAR(name, field, sysvar) \
+ HA_xOPTION_SYSVAR(name, ha_index_option_struct, field, sysvar)
#define HA_IOPTION_END HA_xOPTION_END
typedef struct st_ha_create_table_option {
@@ -848,6 +919,7 @@ typedef struct st_ha_create_table_option {
ulonglong def_value;
ulonglong min_value, max_value, block_size;
const char *values;
+ struct st_mysql_sys_var *var;
} ha_create_table_option;
enum handler_iterator_type
@@ -1156,92 +1228,136 @@ struct handlerton
enum handler_create_iterator_result
(*create_iterator)(handlerton *hton, enum handler_iterator_type type,
struct handler_iterator *fill_this_in);
- int (*discover)(handlerton *hton, THD* thd, const char *db,
- const char *name,
- uchar **frmblob,
- size_t *frmlen);
- int (*find_files)(handlerton *hton, THD *thd,
- const char *db,
- const char *path,
- const char *wild, bool dir, List<LEX_STRING> *files);
- int (*table_exists_in_engine)(handlerton *hton, THD* thd, const char *db,
- const char *name);
-
- uint32 license; /* Flag for Engine License */
- void *data; /* Location for engines to keep personal structures */
-
/*
Optional clauses in the CREATE/ALTER TABLE
*/
ha_create_table_option *table_options; // table level options
ha_create_table_option *field_options; // these are specified per field
ha_create_table_option *index_options; // these are specified per index
-};
-/**
- The handler supports read before write removal optimization
+ /**
+ The list of extensions of files created for a single table in the
+ database directory (datadir/db_name/).
- Read before write removal may be used for storage engines which support
- write without previous read of the row to be updated. Handler returning
- this flag must implement start_read_removal() and end_read_removal().
- The handler may return "fake" rows constructed from the key of the row
- asked for. This is used to optimize UPDATE and DELETE by reducing the
- numer of roundtrips between handler and storage engine.
+ Used by open_table_error(), by the default rename_table and delete_table
+ handler methods, and by the default discovery implementation.
- Example:
- UPDATE a=1 WHERE pk IN (<keys>)
+ For engines that have more than one file name extentions (separate
+ metadata, index, and/or data files), the order of elements is relevant.
+ First element of engine file name extentions array should be metadata
+ file extention. This is implied by the open_table_error()
+ and the default discovery implementation.
+
+ Second element - data file extention. This is implied
+ assumed by REPAIR TABLE ... USE_FRM implementation.
+ */
+ const char **tablefile_extensions; // by default - empty list
- mysql_update()
- {
- if (<conditions for starting read removal>)
- start_read_removal()
- -> handler returns true if read removal supported for this table/query
+ /*********************************************************************
+ Table discovery API.
+ It allows the server to "discover" tables that exist in the storage
+ engine, without user issuing an explicit CREATE TABLE statement.
+ **********************************************************************/
- while(read_record("pk=<key>"))
- -> handler returns fake row with column "pk" set to <key>
+ /*
+ This method is required for any engine that supports automatic table
+ discovery, there is no default implementation.
- ha_update_row()
- -> handler sends write "a=1" for row with "pk=<key>"
+ Given a TABLE_SHARE discover_table() fills it in with a correct table
+ structure using one of the TABLE_SHARE::init_from_* methods.
- end_read_removal()
- -> handler returns the number of rows actually written
- }
+ Returns HA_ERR_NO_SUCH_TABLE if the table did not exist in the engine,
+ zero if the table was discovered successfully, or any other
+ HA_ERR_* error code as appropriate if the table existed, but the
+ discovery failed.
+ */
+ int (*discover_table)(handlerton *hton, THD* thd, TABLE_SHARE *share);
- @note This optimization in combination with batching may be used to
- remove even more roundtrips.
-*/
-#define HA_READ_BEFORE_WRITE_REMOVAL (LL(1) << 38)
+ /*
+ The discover_table_names method tells the server
+ about all tables in the specified database that the engine
+ knows about. Tables (or file names of tables) are added to
+ the provided discovered_list collector object using
+ add_table() or add_file() methods.
+ */
+ class discovered_list
+ {
+ public:
+ virtual bool add_table(const char *tname, size_t tlen) = 0;
+ virtual bool add_file(const char *fname) = 0;
+ protected: virtual ~discovered_list() {}
+ };
-/*
- Engine supports extended fulltext API
- */
-#define HA_CAN_FULLTEXT_EXT (LL(1) << 39)
+ /*
+ By default (if not implemented by the engine, but the discovery_table() is
+ implemented) it will perform a file-based discovery:
-/*
- Storage engine doesn't synchronize result set with expected table contents.
- Used by replication slave to check if it is possible to retrieve rows from
- the table when deciding whether to do a full table scan, index scan or hash
- scan while applying a row event.
- */
-#define HA_READ_OUT_OF_SYNC (LL(1) << 40)
+ - if tablefile_extensions[0] is not null, this will discovers all tables
+ with the tablefile_extensions[0] extension.
-/*
- Storage engine supports table export using the
- FLUSH TABLE <table_list> FOR EXPORT statement.
- */
-#define HA_CAN_EXPORT (LL(1) << 41)
+ Returns 0 on success and 1 on error.
+ */
+ int (*discover_table_names)(handlerton *hton, LEX_STRING *db, MY_DIR *dir,
+ discovered_list *result);
-/*
- The handler don't want accesses to this table to
- be const-table optimized
-*/
-#define HA_BLOCK_CONST_TABLE (LL(1) << 42)
+ /*
+ This is a method that allows to server to check if a table exists without
+ an overhead of the complete discovery.
+
+ By default (if not implemented by the engine, but the discovery_table() is
+ implemented) it will try to perform a file-based discovery:
+
+ - if tablefile_extensions[0] is not null this will look for a file name
+ with the tablefile_extensions[0] extension.
+
+ - if tablefile_extensions[0] is null, this will resort to discover_table().
+
+ Note that resorting to discover_table() is slow and the engine
+ should probably implement its own discover_table_existence() method,
+ if its tablefile_extensions[0] is null.
-inline LEX_STRING *hton_name(const handlerton *hton)
+ Returns 1 if the table exists and 0 if it does not.
+ */
+ int (*discover_table_existence)(handlerton *hton, const char *db,
+ const char *table_name);
+
+ /*
+ This is the assisted table discovery method. Unlike the fully
+ automatic discovery as above, here a user is expected to issue an
+ explicit CREATE TABLE with the appropriate table attributes to
+ "assist" the discovery of a table. But this "discovering" CREATE TABLE
+ statement will not specify the table structure - the engine discovers
+ it using this method. For example, FederatedX uses it in
+
+ CREATE TABLE t1 ENGINE=FEDERATED CONNECTION="mysql://foo/bar/t1";
+
+ Given a TABLE_SHARE discover_table_structure() fills it in with a correct
+ table structure using one of the TABLE_SHARE::init_from_* methods.
+
+ Assisted discovery works independently from the automatic discover.
+ An engine is allowed to support only assisted discovery and not
+ support automatic one. Or vice versa.
+ */
+ int (*discover_table_structure)(handlerton *hton, THD* thd,
+ TABLE_SHARE *share, HA_CREATE_INFO *info);
+};
+
+
+static inline LEX_STRING *hton_name(const handlerton *hton)
{
return &(hton2plugin[hton->slot]->name);
}
+static inline handlerton *plugin_hton(plugin_ref plugin)
+{
+ return plugin_data(plugin, handlerton *);
+}
+
+static inline sys_var *find_hton_sysvar(handlerton *hton, st_mysql_sys_var *var)
+{
+ return find_plugin_sysvar(hton2plugin[hton->slot], var);
+}
+
/* Possible flags of a handlerton (there can be 32 of them) */
#define HTON_NO_FLAGS 0
@@ -1457,7 +1573,7 @@ enum enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT= 0,
HA_STATS_AUTO_RECALC_ON,
HA_STATS_AUTO_RECALC_OFF };
-typedef struct st_ha_create_information
+struct HA_CREATE_INFO
{
CHARSET_INFO *table_charset, *default_table_charset;
LEX_CUSTRING tabledef_version;
@@ -1503,8 +1619,7 @@ typedef struct st_ha_create_information
ha_index_option_struct **indexes_option_struct; ///< array of index option structures
bool tmp_table() { return options & HA_LEX_CREATE_TMP_TABLE; }
-} HA_CREATE_INFO;
-
+};
/**
@@ -2424,7 +2539,7 @@ public:
cached_table_flags= table_flags();
}
/* ha_ methods: pubilc wrappers for private virtual API */
-
+
int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked);
int ha_index_init(uint idx, bool sorted)
{
@@ -2535,8 +2650,8 @@ public:
int ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info);
- int ha_create_handler_files(const char *name, const char *old_name,
- int action_flag, HA_CREATE_INFO *info);
+ int ha_create_partitioning_metadata(const char *name, const char *old_name,
+ int action_flag);
int ha_change_partitions(HA_CREATE_INFO *create_info,
const char *path,
@@ -2989,18 +3104,7 @@ public:
virtual void free_foreign_key_create_info(char* str) {}
/** The following can be called without an open handler */
const char *table_type() const { return hton_name(ht)->str; }
- /**
- If frm_error() is called then we will use this to find out what file
- extentions exist for the storage engine. This is also used by the default
- rename_table and delete_table method in handler.cc.
-
- For engines that have two file name extentions (separate meta/index file
- and data file), the order of elements is relevant. First element of engine
- file name extentions array should be meta/index file extention. Second
- element - data file extention. This order is assumed by
- prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
- */
- virtual const char **bas_ext() const =0;
+ const char **bas_ext() const { return ht->tablefile_extensions; }
virtual int get_default_no_partitions(HA_CREATE_INFO *create_info)
{ return 1;}
@@ -3214,7 +3318,33 @@ public:
Pops the top if condition stack, if stack is not empty.
*/
virtual void cond_pop() { return; };
+
+ /**
+ Push down an index condition to the handler.
+
+ The server will use this method to push down a condition it wants
+ the handler to evaluate when retrieving records using a specified
+ index. The pushed index condition will only refer to fields from
+ this handler that is contained in the index (but it may also refer
+ to fields in other handlers). Before the handler evaluates the
+ condition it must read the content of the index entry into the
+ record buffer.
+
+ The handler is free to decide if and how much of the condition it
+ will take responsibility for evaluating. Based on this evaluation
+ it should return the part of the condition it will not evaluate.
+ If it decides to evaluate the entire condition it should return
+ NULL. If it decides not to evaluate any part of the condition it
+ should return a pointer to the same condition as given as argument.
+
+ @param keyno the index number to evaluate the condition on
+ @param idx_cond the condition to be evaluated by the handler
+
+ @return The part of the pushed condition that the handler decides
+ not to evaluate
+ */
virtual Item *idx_cond_push(uint keyno, Item* idx_cond) { return idx_cond; }
+
/** Reset information about pushed index conditions */
virtual void cancel_pushed_idx_cond()
{
@@ -3520,10 +3650,9 @@ protected:
/**
Acquire the instrumented table information from a table share.
- @param share a table share
@return an instrumented table share, or NULL.
*/
- PSI_table_share *ha_table_share_psi(const TABLE_SHARE *share) const;
+ PSI_table_share *ha_table_share_psi() const;
/**
Default rename_table() and delete_table() rename/delete files with a
@@ -3720,8 +3849,8 @@ public:
virtual void drop_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
- virtual int create_handler_files(const char *name, const char *old_name,
- int action_flag, HA_CREATE_INFO *info)
+ virtual int create_partitioning_metadata(const char *name, const char *old_name,
+ int action_flag)
{ return FALSE; }
virtual int change_partitions(HA_CREATE_INFO *create_info,
@@ -3805,6 +3934,8 @@ static inline bool ha_storage_engine_is_enabled(const handlerton *db_type)
(db_type->state == SHOW_OPTION_YES) : FALSE;
}
+#define view_pseudo_hton ((handlerton *)1)
+
/* basic stuff */
int ha_init_errors(void);
int ha_init(void);
@@ -3822,8 +3953,7 @@ void ha_checkpoint_state(bool disable);
void ha_commit_checkpoint_request(void *cookie, void (*pre_hook)(void *));
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- bool update_create_info);
+ HA_CREATE_INFO *create_info, LEX_CUSTRING *frm);
int ha_delete_table(THD *thd, handlerton *db_type, const char *path,
const char *db, const char *alias, bool generate_warning);
@@ -3831,16 +3961,31 @@ int ha_delete_table(THD *thd, handlerton *db_type, const char *path,
bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat);
/* discovery */
-int ha_create_table_from_engine(THD* thd, const char *db, const char *name);
-bool ha_check_if_table_exists(THD* thd, const char *db, const char *name,
- bool *exists);
-int ha_discover(THD* thd, const char* dbname, const char* name,
- uchar** frmblob, size_t* frmlen);
-int ha_find_files(THD *thd,const char *db,const char *path,
- const char *wild, bool dir, List<LEX_STRING>* files);
-int ha_table_exists_in_engine(THD* thd, const char* db, const char* name);
-bool ha_check_if_supported_system_table(handlerton *hton, const char* db,
- const char* table_name);
+#ifdef MYSQL_SERVER
+class Discovered_table_list: public handlerton::discovered_list
+{
+ THD *thd;
+ const char *wild, *wend;
+public:
+ Dynamic_array<LEX_STRING*> *tables;
+
+ Discovered_table_list(THD *thd_arg, Dynamic_array<LEX_STRING*> *tables_arg,
+ const LEX_STRING *wild_arg);
+ ~Discovered_table_list() {}
+
+ bool add_table(const char *tname, size_t tlen);
+ bool add_file(const char *fname);
+
+ void sort();
+ void remove_duplicates(); // assumes that the list is sorted
+};
+
+int ha_discover_table(THD *thd, TABLE_SHARE *share);
+int ha_discover_table_names(THD *thd, LEX_STRING *db, MY_DIR *dirp,
+ Discovered_table_list *result, bool reusable);
+bool ha_table_exists(THD *thd, const char *db, const char *table_name,
+ handlerton **hton= 0);
+#endif
/* key cache */
extern "C" int ha_init_key_cache(const char *name, KEY_CACHE *key_cache, void *);
diff --git a/sql/hostname.cc b/sql/hostname.cc
index cca5b185e38..6c3c70aa7ea 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h
index d6f7c90e93e..82d74236ff9 100644
--- a/sql/innodb_priv.h
+++ b/sql/innodb_priv.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef INNODB_PRIV_INCLUDED
#define INNODB_PRIV_INCLUDED
@@ -25,12 +25,10 @@ class THD;
int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
-bool check_global_access(THD *thd, ulong want_access);
+bool check_global_access(THD *thd, ulong want_access, bool no_errors=false);
uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length,
uint *errors);
void sql_print_error(const char *format, ...);
-
-
#endif /* INNODB_PRIV_INCLUDED */
diff --git a/sql/item.cc b/sql/item.cc
index ab7806cfd0e..ac920004b80 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2010, 2012, Monty Program Ab
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -198,7 +198,7 @@ Hybrid_type_traits_integer::fix_length_and_dec(Item *item, Item *arg) const
void item_init(void)
{
- item_user_lock_init();
+ item_func_sleep_init();
uuid_short_init();
}
@@ -297,7 +297,7 @@ String *Item::val_string_from_decimal(String *str)
String *Item::val_string_from_date(String *str)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE | sql_mode_for_dates()) ||
+ if (get_date(&ltime, sql_mode_for_dates()) ||
str->alloc(MAX_DATE_STRING_REP_LENGTH))
{
null_value= 1;
@@ -354,7 +354,7 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE | sql_mode_for_dates()))
+ if (get_date(&ltime, sql_mode_for_dates()))
{
my_decimal_set_zero(decimal_value);
null_value= 1; // set NULL, stop processing
@@ -377,6 +377,27 @@ my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value)
}
+longlong Item::val_int_from_date()
+{
+ DBUG_ASSERT(fixed == 1);
+ MYSQL_TIME ltime;
+ if (get_date(&ltime, 0))
+ return 0;
+ longlong v= TIME_to_ulonglong(&ltime);
+ return ltime.neg ? -v : v;
+}
+
+
+double Item::val_real_from_date()
+{
+ DBUG_ASSERT(fixed == 1);
+ MYSQL_TIME ltime;
+ if (get_date(&ltime, 0))
+ return 0;
+ return TIME_to_double(&ltime);
+}
+
+
double Item::val_real_from_decimal()
{
/* Note that fix_fields may not be called for Item_avg_field items */
@@ -413,10 +434,9 @@ int Item::save_time_in_field(Field *field)
int Item::save_date_in_field(Field *field)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))))
+ if (get_date(&ltime, (current_thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))))
return set_field_to_null_with_conversions(field, 0);
field->set_notnull();
return field->store_time_dec(&ltime, decimals);
@@ -513,7 +533,7 @@ Item::Item(THD *thd, Item *item):
with_field(item->with_field),
fixed(item->fixed),
is_autogenerated_name(item->is_autogenerated_name),
- with_subselect(item->with_subselect),
+ with_subselect(item->has_subquery()),
collation(item->collation),
cmp_context(item->cmp_context)
{
@@ -651,9 +671,12 @@ Item_result Item::cmp_type() const
case MYSQL_TYPE_GEOMETRY:
return STRING_RESULT;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_TIME2:
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
case MYSQL_TYPE_NEWDATE:
return TIME_RESULT;
};
@@ -807,10 +830,15 @@ bool Item_ident::remove_dependence_processor(uchar * arg)
bool Item_ident::collect_outer_ref_processor(uchar *param)
{
Collect_deps_prm *prm= (Collect_deps_prm *)param;
- if (depended_from &&
+ if (depended_from &&
depended_from->nest_level_base == prm->nest_level_base &&
depended_from->nest_level < prm->nest_level)
- prm->parameters->add_unique(this, &cmp_items);
+ {
+ if (prm->collect)
+ prm->parameters->add_unique(this, &cmp_items);
+ else
+ prm->count++;
+ }
return FALSE;
}
@@ -1278,7 +1306,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
String tmp(buff,sizeof(buff), &my_charset_bin),*res;
if (!(res=val_str(&tmp)) ||
str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(),
- ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR)
+ ltime, fuzzydate))
goto err;
break;
}
@@ -1286,11 +1314,15 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
DBUG_ASSERT(0);
}
- return 0;
+ return null_value= 0;
err:
+ /*
+ if the item was not null and convertion failed, we return a zero date
+ if allowed, otherwise - null.
+ */
bzero((char*) ltime,sizeof(*ltime));
- return 1;
+ return null_value|= !(fuzzydate & TIME_FUZZY_DATES);
}
bool Item::get_seconds(ulonglong *sec, ulong *sec_part)
@@ -3223,11 +3255,7 @@ void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type,
value.time= *tm;
value.time.time_type= time_type;
- if (value.time.year > 9999 || value.time.month > 12 ||
- value.time.day > 31 ||
- (time_type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23) ||
- value.time.minute > 59 || value.time.second > 59 ||
- value.time.second_part > TIME_MAX_SECOND_PART)
+ if (check_datetime_range(&value.time))
{
ErrConvTime str(&value.time);
make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
@@ -3682,7 +3710,9 @@ bool Item_param::convert_str_value(THD *thd)
/* Here str_value is guaranteed to be in final_character_set_of_str_value */
max_length= str_value.numchars() * str_value.charset()->mbmaxlen;
- decimals= 0;
+
+ /* For the strings converted to numeric form within some functions */
+ decimals= NOT_FIXED_DEC;
/*
str_value_ptr is returned from val_str(). It must be not alloced
to prevent it's modification by val_str() invoker.
@@ -4015,8 +4045,8 @@ double Item_copy_string::val_real()
longlong Item_copy_string::val_int()
{
int err;
- return null_value ? LL(0) : my_strntoll(str_value.charset(),str_value.ptr(),
- str_value.length(),10, (char**) 0,
+ return null_value ? 0 : my_strntoll(str_value.charset(),str_value.ptr(),
+ str_value.length(), 10, (char**) 0,
&err);
}
@@ -4186,7 +4216,7 @@ double Item_copy_decimal::val_real()
longlong Item_copy_decimal::val_int()
{
if (null_value)
- return LL(0);
+ return 0;
else
{
longlong result;
@@ -4421,7 +4451,7 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
if (db_name && lower_case_table_names)
{
/* Convert database to lower case for comparison */
- strmake(name_buff, db_name, sizeof(name_buff)-1);
+ strmake_buf(name_buff, db_name);
my_casedn_str(files_charset_info, name_buff);
db_name= name_buff;
}
@@ -5679,19 +5709,17 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
break;
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_DATE:
- field= new Field_newdate(0, null_ptr, 0, Field::NONE, name, &my_charset_bin);
+ field= new Field_newdate(0, null_ptr, 0, Field::NONE, name);
break;
case MYSQL_TYPE_TIME:
- field= new_Field_time(0, null_ptr, 0, Field::NONE, name,
- decimals, &my_charset_bin);
+ field= new_Field_time(0, null_ptr, 0, Field::NONE, name, decimals);
break;
case MYSQL_TYPE_TIMESTAMP:
field= new_Field_timestamp(0, null_ptr, 0,
- Field::NONE, name, 0, decimals, &my_charset_bin);
+ Field::NONE, name, 0, decimals);
break;
case MYSQL_TYPE_DATETIME:
- field= new_Field_datetime(0, null_ptr, 0, Field::NONE, name,
- decimals, &my_charset_bin);
+ field= new_Field_datetime(0, null_ptr, 0, Field::NONE, name, decimals);
break;
case MYSQL_TYPE_YEAR:
field= new Field_year((uchar*) 0, max_length, null_ptr, 0, Field::NONE,
@@ -6129,17 +6157,8 @@ inline uint char_val(char X)
X-'a'+10);
}
-Item_hex_string::Item_hex_string()
-{
- hex_string_init("", 0);
-}
-Item_hex_string::Item_hex_string(const char *str, uint str_length)
-{
- hex_string_init(str, str_length);
-}
-
-void Item_hex_string::hex_string_init(const char *str, uint str_length)
+void Item_hex_constant::hex_string_init(const char *str, uint str_length)
{
max_length=(str_length+1)/2;
char *ptr=(char*) sql_alloc(max_length+1);
@@ -6163,7 +6182,7 @@ void Item_hex_string::hex_string_init(const char *str, uint str_length)
unsigned_flag= 1;
}
-longlong Item_hex_string::val_int()
+longlong Item_hex_hybrid::val_int()
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
@@ -6177,17 +6196,7 @@ longlong Item_hex_string::val_int()
}
-my_decimal *Item_hex_string::val_decimal(my_decimal *decimal_value)
-{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
- ulonglong value= (ulonglong)val_int();
- int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value);
- return (decimal_value);
-}
-
-
-int Item_hex_string::save_in_field(Field *field, bool no_conversions)
+int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions)
{
field->set_notnull();
if (field->result_type() == STRING_RESULT)
@@ -6220,22 +6229,27 @@ warn:
}
-void Item_hex_string::print(String *str, enum_query_type query_type)
+void Item_hex_hybrid::print(String *str, enum_query_type query_type)
{
- char *end= (char*) str_value.ptr() + str_value.length(),
- *ptr= end - MY_MIN(str_value.length(), sizeof(longlong));
+ uint32 len= MY_MIN(str_value.length(), sizeof(longlong));
+ const char *ptr= str_value.ptr() + str_value.length() - len;
str->append("0x");
- for (; ptr != end ; ptr++)
- {
- str->append(_dig_vec_lower[((uchar) *ptr) >> 4]);
- str->append(_dig_vec_lower[((uchar) *ptr) & 0x0F]);
- }
+ str->append_hex(ptr, len);
}
-bool Item_hex_string::eq(const Item *arg, bool binary_cmp) const
+void Item_hex_string::print(String *str, enum_query_type query_type)
{
- if (arg->basic_const_item() && arg->type() == type())
+ str->append("X'");
+ str->append_hex(str_value.ptr(), str_value.length());
+ str->append("'");
+}
+
+
+bool Item_hex_constant::eq(const Item *arg, bool binary_cmp) const
+{
+ if (arg->basic_const_item() && arg->type() == type() &&
+ arg->cast_to_int_type() == cast_to_int_type())
{
if (binary_cmp)
return !stringcmp(&str_value, &arg->str_value);
@@ -6245,7 +6259,7 @@ bool Item_hex_string::eq(const Item *arg, bool binary_cmp) const
}
-Item *Item_hex_string::safe_charset_converter(CHARSET_INFO *tocs)
+Item *Item_hex_constant::safe_charset_converter(CHARSET_INFO *tocs)
{
Item_string *conv;
String tmp, *str= val_str(&tmp);
@@ -6302,6 +6316,76 @@ Item_bin_string::Item_bin_string(const char *str, uint str_length)
}
+bool Item_temporal_literal::eq(const Item *item, bool binary_cmp) const
+{
+ return
+ item->basic_const_item() && type() == item->type() &&
+ field_type() == ((Item_temporal_literal *) item)->field_type() &&
+ !my_time_compare(&cached_time,
+ &((Item_temporal_literal *) item)->cached_time);
+}
+
+
+void Item_date_literal::print(String *str, enum_query_type query_type)
+{
+ str->append("DATE'");
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ my_date_to_str(&cached_time, buf);
+ str->append(buf);
+ str->append('\'');
+}
+
+
+bool Item_date_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+{
+ DBUG_ASSERT(fixed);
+ *ltime= cached_time;
+ return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ MYSQL_TIMESTAMP_ERROR));
+}
+
+
+void Item_datetime_literal::print(String *str, enum_query_type query_type)
+{
+ str->append("TIMESTAMP'");
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ my_datetime_to_str(&cached_time, buf, decimals);
+ str->append(buf);
+ str->append('\'');
+}
+
+
+bool Item_datetime_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+{
+ DBUG_ASSERT(fixed);
+ *ltime= cached_time;
+ return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ MYSQL_TIMESTAMP_ERROR));
+}
+
+
+void Item_time_literal::print(String *str, enum_query_type query_type)
+{
+ str->append("TIME'");
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ my_time_to_str(&cached_time, buf, decimals);
+ str->append(buf);
+ str->append('\'');
+}
+
+
+bool Item_time_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+{
+ DBUG_ASSERT(fixed);
+ *ltime= cached_time;
+ if (fuzzy_date & TIME_TIME_ONLY)
+ return (null_value= false);
+ return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ MYSQL_TIMESTAMP_ERROR));
+}
+
+
+
/**
Pack data in buffer for sending.
*/
@@ -6403,7 +6487,7 @@ bool Item::send(Protocol *protocol, String *buffer)
case MYSQL_TYPE_TIMESTAMP:
{
MYSQL_TIME tm;
- get_date(&tm, TIME_FUZZY_DATE | sql_mode_for_dates());
+ get_date(&tm, sql_mode_for_dates());
if (!null_value)
{
if (f_type == MYSQL_TYPE_DATE)
@@ -6489,6 +6573,13 @@ Item* Item::cache_const_expr_transformer(uchar *arg)
return this;
}
+/**
+ Find Item by reference in the expression
+*/
+bool Item::find_item_processor(uchar *arg)
+{
+ return (this == ((Item *) arg));
+}
bool Item_field::send(Protocol *protocol, String *buffer)
{
@@ -8101,7 +8192,7 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
}
if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of())))
goto error;
- memcpy(def_field, field_arg->field, field_arg->field->size_of());
+ memcpy((void *)def_field, (void *)field_arg->field, field_arg->field->size_of());
def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->s->default_values -
def_field->table->record[0]));
@@ -8237,7 +8328,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
Field *def_field= (Field*) sql_alloc(field_arg->field->size_of());
if (!def_field)
return TRUE;
- memcpy(def_field, field_arg->field, field_arg->field->size_of());
+ memcpy((void *)def_field, (void *)field_arg->field, field_arg->field->size_of());
def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->insert_values -
def_field->table->record[0]));
@@ -8628,8 +8719,8 @@ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item)
}
else
{
- field->get_date(&field_time, TIME_FUZZY_DATE | TIME_INVALID_DATES);
- item->get_date(&item_time, TIME_FUZZY_DATE | TIME_INVALID_DATES);
+ field->get_date(&field_time, TIME_INVALID_DATES);
+ item->get_date(&item_time, TIME_INVALID_DATES);
}
return my_time_compare(&field_time, &item_time);
}
@@ -8651,7 +8742,7 @@ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item)
Item_cache* Item_cache::get_cache(const Item *item)
{
- return get_cache(item, item->result_type());
+ return get_cache(item, item->cmp_type());
}
@@ -8811,7 +8902,7 @@ bool Item_cache_temporal::cache_value()
value_cached= true;
MYSQL_TIME ltime;
- if (example->get_date_result(&ltime, TIME_FUZZY_DATE))
+ if (example->get_date_result(&ltime, 0))
value=0;
else
value= pack_time(&ltime);
diff --git a/sql/item.h b/sql/item.h
index 5dd49d9c6b2..fb2948a9149 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1,8 +1,8 @@
#ifndef SQL_ITEM_INCLUDED
#define SQL_ITEM_INCLUDED
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013 Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -15,7 +15,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifdef USE_PRAGMA_INTERFACE
@@ -384,6 +384,12 @@ struct Name_resolution_context: Sql_alloc
{
(*error_processor)(thd, error_processor_data);
}
+ st_select_lex *outer_select()
+ {
+ return (outer_context ?
+ outer_context->select_lex :
+ NULL);
+ }
};
@@ -548,11 +554,21 @@ typedef bool (Item::*Item_analyzer) (uchar **argp);
typedef Item* (Item::*Item_transformer) (uchar *arg);
typedef void (*Cond_traverser) (const Item *item, void *arg);
+struct st_cond_statistic;
+
+struct find_selective_predicates_list_processor_data
+{
+ TABLE *table;
+ List<st_cond_statistic> list;
+};
+
class Item_equal;
class COND_EQUAL;
class st_select_lex_unit;
+class Item_func_not;
+
class Item {
Item(const Item &); /* Prevent use of these */
void operator=(Item &);
@@ -583,7 +599,8 @@ public:
SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER,
PARAM_ITEM, TRIGGER_FIELD_ITEM, DECIMAL_ITEM,
XPATH_NODESET, XPATH_NODESET_CMP,
- VIEW_FIXER_ITEM, EXPR_CACHE_ITEM};
+ VIEW_FIXER_ITEM, EXPR_CACHE_ITEM,
+ DATE_ITEM};
enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE };
@@ -940,7 +957,9 @@ public:
my_decimal *val_decimal_from_date(my_decimal *decimal_value);
my_decimal *val_decimal_from_time(my_decimal *decimal_value);
longlong val_int_from_decimal();
+ longlong val_int_from_date();
double val_real_from_decimal();
+ double val_real_from_date();
int save_time_in_field(Field *field);
int save_date_in_field(Field *field);
@@ -1041,7 +1060,7 @@ public:
Item **ref, bool skip_registered);
virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
bool get_time(MYSQL_TIME *ltime)
- { return get_date(ltime, TIME_TIME_ONLY | TIME_FUZZY_DATE | TIME_INVALID_DATES); }
+ { return get_date(ltime, TIME_TIME_ONLY | TIME_INVALID_DATES); }
bool get_seconds(ulonglong *sec, ulong *sec_part);
virtual bool get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate)
{ return get_date(ltime,fuzzydate); }
@@ -1097,8 +1116,8 @@ public:
*/
virtual CHARSET_INFO *charset_for_protocol(void) const
{
- return result_type() == STRING_RESULT ? collation.collation :
- &my_charset_bin;
+ return cmp_type() == STRING_RESULT ? collation.collation :
+ &my_charset_bin;
};
virtual bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
@@ -1106,6 +1125,11 @@ public:
return (this->*processor)(arg);
}
+ virtual bool walk_top_and(Item_processor processor, uchar *arg)
+ {
+ return (this->*processor)(arg);
+ }
+
virtual Item* transform(Item_transformer transformer, uchar *arg);
/*
@@ -1153,6 +1177,7 @@ public:
virtual bool collect_item_field_processor(uchar * arg) { return 0; }
virtual bool add_field_to_set_processor(uchar * arg) { return 0; }
virtual bool find_item_in_field_list_processor(uchar *arg) { return 0; }
+ virtual bool find_item_processor(uchar *arg);
virtual bool change_context_processor(uchar *context) { return 0; }
virtual bool reset_query_id_processor(uchar *query_id_arg) { return 0; }
virtual bool is_expensive_processor(uchar *arg) { return 0; }
@@ -1167,9 +1192,12 @@ public:
virtual bool eval_not_null_tables(uchar *opt_arg) { return 0; }
virtual bool is_subquery_processor (uchar *opt_arg) { return 0; }
virtual bool limit_index_condition_pushdown_processor(uchar *opt_arg)
- {
+ {
return FALSE;
}
+ virtual bool exists2in_processor(uchar *opt_arg) { return 0; }
+ virtual bool find_selective_predicates_list_processor(uchar *opt_arg)
+ { return 0; }
/* To call bool function for all arguments */
struct bool_func_call_args
@@ -1186,6 +1214,7 @@ public:
return FALSE;
}
+
/*
The next function differs from the previous one that a bitmap to be updated
is passed as uchar *arg.
@@ -1315,7 +1344,9 @@ public:
List<Item> *parameters;
/* unit from which we count nest_level */
st_select_lex_unit *nest_level_base;
+ uint count;
int nest_level;
+ bool collect;
};
/**
Collect outer references
@@ -1444,6 +1475,12 @@ public:
Return TRUE if the item points to a column of an outer-joined table.
*/
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
+
+ /**
+ Checks if this item or any of its decendents contains a subquery.
+ */
+ virtual bool has_subquery() const { return with_subselect; }
+
Item* set_expr_cache(THD *thd);
virtual Item_equal *get_item_equal() { return NULL; }
@@ -1478,6 +1515,15 @@ public:
virtual void get_cache_parameters(List<Item> &parameters) { };
virtual void mark_as_condition_AND_part(TABLE_LIST *embedding) {};
+
+ /* how much position should be reserved for Exists2In transformation */
+ virtual uint exists2in_reserved_items() { return 0; };
+
+ /**
+ Inform the item that it is located under a NOT, which is a top-level item.
+ */
+ virtual void under_not(Item_func_not * upper
+ __attribute__((unused))) {};
};
@@ -2724,41 +2770,221 @@ public:
};
-class Item_hex_string: public Item_basic_constant
+/**
+ Item_hex_constant -- a common class for hex literals: X'HHHH' and 0xHHHH
+*/
+class Item_hex_constant: public Item_basic_constant
{
+private:
+ void hex_string_init(const char *str, uint str_length);
public:
- Item_hex_string();
- Item_hex_string(const char *str,uint str_length);
+ Item_hex_constant()
+ {
+ hex_string_init("", 0);
+ }
+ Item_hex_constant(const char *str, uint str_length)
+ {
+ hex_string_init(str, str_length);
+ }
enum Type type() const { return VARBIN_ITEM; }
+ enum Item_result result_type () const { return STRING_RESULT; }
+ enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
+ bool basic_const_item() const { return 1; }
+ bool eq(const Item *item, bool binary_cmp) const;
+ String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; }
+};
+
+
+/**
+ Item_hex_hybrid -- is a class implementing 0xHHHH literals, e.g.:
+ SELECT 0x3132;
+ They can behave as numbers and as strings depending on context.
+*/
+class Item_hex_hybrid: public Item_hex_constant
+{
+public:
+ Item_hex_hybrid(): Item_hex_constant() {}
+ Item_hex_hybrid(const char *str, uint str_length):
+ Item_hex_constant(str, str_length) {}
double val_real()
{
DBUG_ASSERT(fixed == 1);
- return (double) (ulonglong) Item_hex_string::val_int();
+ return (double) (ulonglong) Item_hex_hybrid::val_int();
}
longlong val_int();
- bool basic_const_item() const { return 1; }
- String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; }
- my_decimal *val_decimal(my_decimal *);
+ my_decimal *val_decimal(my_decimal *decimal_value)
+ {
+ // following assert is redundant, because fixed=1 assigned in constructor
+ DBUG_ASSERT(fixed == 1);
+ ulonglong value= (ulonglong) Item_hex_hybrid::val_int();
+ int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value);
+ return decimal_value;
+ }
int save_in_field(Field *field, bool no_conversions);
- enum Item_result result_type () const { return STRING_RESULT; }
enum Item_result cast_to_int_type() const { return INT_RESULT; }
- enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
- virtual void print(String *str, enum_query_type query_type);
+ void print(String *str, enum_query_type query_type);
+};
+
+
+/**
+ Item_hex_string -- is a class implementing X'HHHH' literals, e.g.:
+ SELECT X'3132';
+ Unlike Item_hex_hybrid, X'HHHH' literals behave as strings in all contexts.
+ X'HHHH' are also used in replication of string constants in case of
+ "dangerous" charsets (sjis, cp932, big5, gbk) who can have backslash (0x5C)
+ as the second byte of a multi-byte character, so using '\' escaping for
+ these charsets is not desirable.
+*/
+class Item_hex_string: public Item_hex_constant
+{
+public:
+ Item_hex_string(): Item_hex_constant() {}
+ Item_hex_string(const char *str, uint str_length):
+ Item_hex_constant(str, str_length) {}
+ longlong val_int()
+ {
+ DBUG_ASSERT(fixed == 1);
+ return longlong_from_string_with_check(str_value.charset(),
+ str_value.ptr(),
+ str_value.ptr()+
+ str_value.length());
+ }
+ double val_real()
+ {
+ DBUG_ASSERT(fixed == 1);
+ return double_from_string_with_check(str_value.charset(),
+ str_value.ptr(),
+ str_value.ptr() +
+ str_value.length());
+ }
+ my_decimal *val_decimal(my_decimal *decimal_value)
+ {
+ return val_decimal_from_string(decimal_value);
+ }
+ int save_in_field(Field *field, bool no_conversions)
+ {
+ field->set_notnull();
+ return field->store(str_value.ptr(), str_value.length(),
+ collation.collation);
+ }
+ enum Item_result cast_to_int_type() const { return STRING_RESULT; }
+ void print(String *str, enum_query_type query_type);
+};
+
+
+class Item_bin_string: public Item_hex_hybrid
+{
+public:
+ Item_bin_string(const char *str,uint str_length);
+};
+
+
+class Item_temporal_literal :public Item_basic_constant
+{
+ //sql_mode= current_thd->variables.sql_mode &
+ // (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
+protected:
+ MYSQL_TIME cached_time;
+public:
+ /**
+ Constructor for Item_date_literal.
+ @param ltime DATE value.
+ */
+ Item_temporal_literal(MYSQL_TIME *ltime) :Item_basic_constant()
+ {
+ collation.set(&my_charset_numeric, DERIVATION_NUMERIC, MY_REPERTOIRE_ASCII);
+ decimals= 0;
+ cached_time= *ltime;
+ }
+ Item_temporal_literal(MYSQL_TIME *ltime, uint dec_arg) :Item_basic_constant()
+ {
+ collation.set(&my_charset_numeric, DERIVATION_NUMERIC, MY_REPERTOIRE_ASCII);
+ decimals= dec_arg;
+ cached_time= *ltime;
+ }
+ bool basic_const_item() const { return true; }
+ bool const_item() const { return true; }
+ enum Type type() const { return DATE_ITEM; }
bool eq(const Item *item, bool binary_cmp) const;
- virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
+ enum Item_result result_type () const { return STRING_RESULT; }
+ Item_result cmp_type() const { return TIME_RESULT; }
+
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool check_vcol_func_processor(uchar *arg) { return FALSE;}
-private:
- void hex_string_init(const char *str, uint str_length);
+
+ String *val_str(String *str)
+ { return val_string_from_date(str); }
+ longlong val_int()
+ { return val_int_from_date(); }
+ double val_real()
+ { return val_real_from_date(); }
+ my_decimal *val_decimal(my_decimal *decimal_value)
+ { return val_decimal_from_date(decimal_value); }
+ Field *tmp_table_field(TABLE *table)
+ { return tmp_table_field_from_field_type(table, 0); }
+ int save_in_field(Field *field, bool no_conversions)
+ { return save_date_in_field(field); }
};
-class Item_bin_string: public Item_hex_string
+/**
+ DATE'2010-01-01'
+*/
+class Item_date_literal: public Item_temporal_literal
{
public:
- Item_bin_string(const char *str,uint str_length);
+ Item_date_literal(MYSQL_TIME *ltime)
+ :Item_temporal_literal(ltime)
+ {
+ max_length= MAX_DATE_WIDTH;
+ fixed= 1;
+ }
+ enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
+ void print(String *str, enum_query_type query_type);
+ bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+};
+
+
+/**
+ TIME'10:10:10'
+*/
+class Item_time_literal: public Item_temporal_literal
+{
+public:
+ Item_time_literal(MYSQL_TIME *ltime, uint dec_arg)
+ :Item_temporal_literal(ltime, dec_arg)
+ {
+ max_length= MIN_TIME_WIDTH + (decimals ? decimals + 1 : 0);
+ fixed= 1;
+ }
+ enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
+ void print(String *str, enum_query_type query_type);
+ bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+};
+
+
+/**
+ TIMESTAMP'2001-01-01 10:20:30'
+*/
+class Item_datetime_literal: public Item_temporal_literal
+{
+public:
+ Item_datetime_literal(MYSQL_TIME *ltime, uint dec_arg)
+ :Item_temporal_literal(ltime, dec_arg)
+ {
+ max_length= MAX_DATETIME_WIDTH + (decimals ? decimals + 1 : 0);
+ fixed= 1;
+ }
+ enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ void print(String *str, enum_query_type query_type);
+ bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
};
+
+
class Item_result_field :public Item /* Item with result field */
{
public:
@@ -2957,6 +3183,14 @@ public:
DBUG_ASSERT(ref);
return (*ref)->is_outer_field();
}
+
+ /**
+ Checks if the item tree that ref points to contains a subquery.
+ */
+ virtual bool has_subquery() const
+ {
+ return (*ref)->has_subquery();
+ }
};
@@ -2983,6 +3217,13 @@ public:
alias_name_used_arg)
{}
+ bool fix_fields(THD *thd, Item **it)
+ {
+ if ((!(*ref)->fixed && (*ref)->fix_fields(thd, ref)) ||
+ (*ref)->check_cols(1))
+ return TRUE;
+ return Item_ref::fix_fields(thd, it);
+ }
void save_val(Field *to);
double val_real();
longlong val_int();
@@ -3202,7 +3443,7 @@ public:
bool subst_argument_checker(uchar **arg);
Item *equal_fields_propagator(uchar *arg);
Item *replace_equal_field(uchar *arg);
- table_map used_tables() const;
+ table_map used_tables() const;
table_map not_null_tables() const;
void update_used_tables();
bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
@@ -3569,7 +3810,7 @@ public:
}
virtual longlong val_int()
{
- return null_value ? LL(0) : cached_value;
+ return null_value ? 0 : cached_value;
}
virtual void copy();
};
@@ -4030,6 +4271,7 @@ public:
bool cache_value();
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
int save_in_field(Field *field, bool no_conversions);
+ Item_result cmp_type() const { return TIME_RESULT; }
void store_packed(longlong val_arg, Item *example);
/*
Having a clone_item method tells optimizer that this object
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index ec785eaed49..33b94ece45d 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@@ -241,15 +241,15 @@ static uint collect_cmp_types(Item **items, uint nitems, bool skip_nulls= FALSE)
items[i]->cmp_type() == ROW_RESULT) &&
cmp_row_type(items[0], items[i]))
return 0;
- found_types|= 1<< (uint)item_cmp_type(left_result,
- items[i]->cmp_type());
+ found_types|= 1U << (uint)item_cmp_type(left_result,
+ items[i]->cmp_type());
}
/*
Even if all right-hand items are NULLs and we are skipping them all, we need
at least one type bit in the found_type bitmask.
*/
if (skip_nulls && !found_types)
- found_types= 1 << (uint)left_result;
+ found_types= 1U << (uint)left_result;
return found_types;
}
@@ -721,31 +721,31 @@ bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type,
const char *warn_name, MYSQL_TIME *l_time)
{
bool value;
- int error;
- enum_mysql_timestamp_type timestamp_type;
- int flags= TIME_FUZZY_DATE | MODE_INVALID_DATES;
+ MYSQL_TIME_STATUS status;
+ int flags= TIME_FUZZY_DATES | MODE_INVALID_DATES;
ErrConvString err(str);
- if (warn_type == MYSQL_TIMESTAMP_TIME)
- flags|= TIME_TIME_ONLY;
-
- timestamp_type=
- str_to_datetime(str->charset(), str->ptr(), str->length(),
- l_time, flags, &error);
+ DBUG_ASSERT(warn_type != MYSQL_TIMESTAMP_TIME);
- if (timestamp_type > MYSQL_TIMESTAMP_ERROR)
+ if (!str_to_datetime(str->charset(), str->ptr(), str->length(),
+ l_time, flags, &status))
+ {
+ DBUG_ASSERT(l_time->time_type == MYSQL_TIMESTAMP_DATETIME ||
+ l_time->time_type == MYSQL_TIMESTAMP_DATE);
/*
Do not return yet, we may still want to throw a "trailing garbage"
warning.
*/
value= FALSE;
+ }
else
{
+ DBUG_ASSERT(l_time->time_type != MYSQL_TIMESTAMP_TIME);
+ DBUG_ASSERT(status.warnings != 0); // Must be set by set_to_datetime()
value= TRUE;
- error= 1; /* force warning */
}
- if (error > 0)
+ if (status.warnings > 0)
make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
&err, warn_type, warn_name);
@@ -896,7 +896,7 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
else
{
MYSQL_TIME ltime;
- uint fuzzydate= TIME_FUZZY_DATE | TIME_INVALID_DATES;
+ uint fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES;
if (f_type == MYSQL_TYPE_TIME)
fuzzydate|= TIME_TIME_ONLY;
if (item->get_date(&ltime, fuzzydate))
@@ -906,7 +906,8 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
}
if ((*is_null= item->null_value))
return ~(ulonglong) 0;
- if (cache_arg && item->const_item() && item->type() != Item::CACHE_ITEM)
+ if (cache_arg && item->const_item() &&
+ !(item->type() == Item::CACHE_ITEM && item->cmp_type() == TIME_RESULT))
{
Query_arena backup;
Query_arena *save_arena= thd->switch_to_arena_for_cached_items(&backup);
@@ -1434,7 +1435,7 @@ bool Item_in_optimizer::eval_not_null_tables(uchar *opt_arg)
return FALSE;
}
-bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
+bool Item_in_optimizer::fix_left(THD *thd)
{
if ((!args[0]->fixed && args[0]->fix_fields(thd, args)) ||
(!cache && !(cache= Item_cache::get_cache(args[0]))))
@@ -1482,6 +1483,13 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
cache->store(args[0]);
cache->cache_value();
}
+ if (args[1]->fixed)
+ {
+ /* to avoid overriding is called to update left expression */
+ used_tables_cache|= args[1]->used_tables();
+ with_sum_func= with_sum_func || args[1]->with_sum_func;
+ const_item_cache= const_item_cache && args[1]->const_item();
+ }
return 0;
}
@@ -1489,15 +1497,17 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
- if (fix_left(thd, ref))
+ if (fix_left(thd))
return TRUE;
if (args[0]->maybe_null)
maybe_null=1;
if (!args[1]->fixed && args[1]->fix_fields(thd, args+1))
return TRUE;
+
Item_in_subselect * sub= (Item_in_subselect *)args[1];
- if (args[0]->cols() != sub->engine->cols())
+ if (!invisible_mode() &&
+ args[0]->cols() != sub->engine->cols())
{
my_error(ER_OPERAND_COLUMNS, MYF(0), args[0]->cols());
return TRUE;
@@ -1513,6 +1523,30 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
return FALSE;
}
+/**
+ Check if Item_in_optimizer should work as a pass-through item for its
+ arguments.
+
+ @note
+ Item_in_optimizer should work as pass-through for
+ - subqueries that were processed by ALL/ANY->MIN/MAX rewrite
+ - subqueries taht were originally EXISTS subqueries (and were coverted by
+ the EXISTS->IN rewrite)
+
+ When Item_in_optimizer is not not working as a pass-through, it
+ - caches its "left argument", args[0].
+ - makes adjustments to subquery item's return value for proper NULL
+ value handling
+*/
+
+bool Item_in_optimizer::invisible_mode()
+{
+ /* MAX/MIN transformed or EXISTS->IN prepared => do nothing */
+ return (args[1]->type() != Item::SUBSELECT_ITEM ||
+ ((Item_subselect *)args[1])->substype() ==
+ Item_subselect::EXISTS_SUBS);
+}
+
/**
Add an expression cache for this subquery if it is needed
@@ -1536,8 +1570,9 @@ Item *Item_in_optimizer::expr_cache_insert_transformer(uchar *thd_arg)
{
THD *thd= (THD*) thd_arg;
DBUG_ENTER("Item_in_optimizer::expr_cache_insert_transformer");
- if (args[1]->type() != Item::SUBSELECT_ITEM)
- DBUG_RETURN(this); // MAX/MIN transformed => do nothing
+
+ if (invisible_mode())
+ DBUG_RETURN(this);
if (expr_cache)
DBUG_RETURN(expr_cache);
@@ -1560,13 +1595,16 @@ Item *Item_in_optimizer::expr_cache_insert_transformer(uchar *thd_arg)
void Item_in_optimizer::get_cache_parameters(List<Item> &parameters)
{
/* Add left expression to the list of the parameters of the subquery */
- if (args[0]->cols() == 1)
- parameters.add_unique(args[0], &cmp_items);
- else
+ if (!invisible_mode())
{
- for (uint i= 0; i < args[0]->cols(); i++)
+ if (args[0]->cols() == 1)
+ parameters.add_unique(args[0], &cmp_items);
+ else
{
- parameters.add_unique(args[0]->element_index(i), &cmp_items);
+ for (uint i= 0; i < args[0]->cols(); i++)
+ {
+ parameters.add_unique(args[0]->element_index(i), &cmp_items);
+ }
}
}
args[1]->get_cache_parameters(parameters);
@@ -1649,17 +1687,19 @@ longlong Item_in_optimizer::val_int()
DBUG_ASSERT(fixed == 1);
cache->store(args[0]);
cache->cache_value();
+ DBUG_ENTER(" Item_in_optimizer::val_int");
- if (args[1]->type() != Item::SUBSELECT_ITEM)
+ if (invisible_mode())
{
- /* MAX/MIN transformed => pass through */
longlong res= args[1]->val_int();
null_value= args[1]->null_value;
- return (res);
+ DBUG_PRINT("info", ("pass trough"));
+ DBUG_RETURN(res);
}
if (cache->null_value)
{
+ DBUG_PRINT("info", ("Left NULL..."));
/*
We're evaluating
"<outer_value_list> [NOT] IN (SELECT <inner_value_list>...)"
@@ -1731,11 +1771,11 @@ longlong Item_in_optimizer::val_int()
for (uint i= 0; i < ncols; i++)
item_subs->set_cond_guard_var(i, TRUE);
}
- return 0;
+ DBUG_RETURN(0);
}
tmp= args[1]->val_bool_result();
null_value= args[1]->null_value;
- return tmp;
+ DBUG_RETURN(tmp);
}
@@ -1786,7 +1826,8 @@ bool Item_in_optimizer::is_null()
@retval NULL if an error occurred
*/
-Item *Item_in_optimizer::transform(Item_transformer transformer, uchar *argument)
+Item *Item_in_optimizer::transform(Item_transformer transformer,
+ uchar *argument)
{
Item *new_item;
@@ -1806,7 +1847,7 @@ Item *Item_in_optimizer::transform(Item_transformer transformer, uchar *argument
if ((*args) != new_item)
current_thd->change_item_tree(args, new_item);
- if (args[1]->type() != Item::SUBSELECT_ITEM)
+ if (invisible_mode())
{
/* MAX/MIN transformed => pass through */
new_item= args[1]->transform(transformer, argument);
@@ -2824,12 +2865,12 @@ Item *Item_func_case::find_item(String *str)
cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type());
DBUG_ASSERT(cmp_type != ROW_RESULT);
DBUG_ASSERT(cmp_items[(uint)cmp_type]);
- if (!(value_added_map & (1<<(uint)cmp_type)))
+ if (!(value_added_map & (1U << (uint)cmp_type)))
{
cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]);
if ((null_value=args[first_expr_num]->null_value))
return else_expr_num != -1 ? args[else_expr_num] : 0;
- value_added_map|= 1<<(uint)cmp_type;
+ value_added_map|= 1U << (uint)cmp_type;
}
if (!cmp_items[(uint)cmp_type]->cmp(args[i]) && !args[i]->null_value)
return args[i + 1];
@@ -3036,10 +3077,10 @@ void Item_func_case::fix_length_and_dec()
return;
Item *date_arg= 0;
- if (found_types & (1 << TIME_RESULT))
+ if (found_types & (1U << TIME_RESULT))
date_arg= find_date_time_item(args, arg_count, 0);
- if (found_types & (1 << STRING_RESULT))
+ if (found_types & (1U << STRING_RESULT))
{
/*
If we'll do string comparison, we also need to aggregate
@@ -3080,7 +3121,7 @@ void Item_func_case::fix_length_and_dec()
for (i= 0; i <= (uint)TIME_RESULT; i++)
{
- if (found_types & (1 << i) && !cmp_items[i])
+ if (found_types & (1U << i) && !cmp_items[i])
{
DBUG_ASSERT((Item_result)i != ROW_RESULT);
@@ -3936,7 +3977,7 @@ void Item_func_in::fix_length_and_dec()
}
for (i= 0; i <= (uint)TIME_RESULT; i++)
{
- if (found_types & 1 << i)
+ if (found_types & (1U << i))
{
(type_cnt)++;
cmp_type= (Item_result) i;
@@ -4063,14 +4104,14 @@ void Item_func_in::fix_length_and_dec()
}
else
{
- if (found_types & (1 << TIME_RESULT))
+ if (found_types & (1U << TIME_RESULT))
date_arg= find_date_time_item(args, arg_count, 0);
- if (found_types & (1 << STRING_RESULT) &&
+ if (found_types & (1U << STRING_RESULT) &&
agg_arg_charsets_for_comparison(cmp_collation, args, arg_count))
return;
for (i= 0; i <= (uint) TIME_RESULT; i++)
{
- if (found_types & (1 << i) && !cmp_items[i])
+ if (found_types & (1U << i) && !cmp_items[i])
{
if (!cmp_items[i] && !(cmp_items[i]=
cmp_item::get_comparator((Item_result)i, date_arg,
@@ -4156,12 +4197,12 @@ longlong Item_func_in::val_int()
Item_result cmp_type= item_cmp_type(left_result_type, args[i]->cmp_type());
in_item= cmp_items[(uint)cmp_type];
DBUG_ASSERT(in_item);
- if (!(value_added_map & (1 << (uint)cmp_type)))
+ if (!(value_added_map & (1U << (uint)cmp_type)))
{
in_item->store_value(args[0]);
if ((null_value= args[0]->null_value))
return 0;
- value_added_map|= 1 << (uint)cmp_type;
+ value_added_map|= 1U << (uint)cmp_type;
}
if (!in_item->cmp(args[i]) && !args[i]->null_value)
return (longlong) (!negated);
@@ -4311,7 +4352,7 @@ Item_cond::fix_fields(THD *thd, Item **ref)
with_sum_func= with_sum_func || item->with_sum_func;
with_field= with_field || item->with_field;
- with_subselect|= item->with_subselect;
+ with_subselect|= item->has_subquery();
if (item->maybe_null)
maybe_null=1;
}
@@ -4387,6 +4428,16 @@ bool Item_cond::walk(Item_processor processor, bool walk_subquery, uchar *arg)
return Item_func::walk(processor, walk_subquery, arg);
}
+bool Item_cond_and::walk_top_and(Item_processor processor, uchar *arg)
+{
+ List_iterator_fast<Item> li(list);
+ Item *item;
+ while ((item= li++))
+ if (item->walk_top_and(processor, arg))
+ return 1;
+ return Item_cond::walk_top_and(processor, arg);
+}
+
/**
Transform an Item_cond object with a transformer callback function.
@@ -4899,6 +4950,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
turboBM_compute_bad_character_shifts();
DBUG_PRINT("info",("done"));
}
+ use_sampling= ((*first == wild_many || *first == wild_one) && len > 2);
}
}
return FALSE;
@@ -4910,6 +4962,28 @@ void Item_func_like::cleanup()
Item_bool_func2::cleanup();
}
+
+bool Item_func_like::find_selective_predicates_list_processor(uchar *arg)
+{
+ find_selective_predicates_list_processor_data *data=
+ (find_selective_predicates_list_processor_data *) arg;
+ if (use_sampling && used_tables() == data->table->map)
+ {
+ COND_STATISTIC *stat= (COND_STATISTIC *)sql_alloc(sizeof(COND_STATISTIC));
+ if (!stat)
+ return TRUE;
+ stat->cond= this;
+ Item *arg0= args[0]->real_item();
+ if (args[1]->const_item() && arg0->type() == FIELD_ITEM)
+ stat->field_arg= ((Item_field *)arg0)->field;
+ else
+ stat->field_arg= NULL;
+ data->list.push_back(stat);
+ }
+ return FALSE;
+}
+
+
/**
@brief Compile regular expression.
@@ -4978,7 +5052,7 @@ Item_func_regex::fix_fields(THD *thd, Item **ref)
return TRUE; /* purecov: inspected */
with_sum_func=args[0]->with_sum_func || args[1]->with_sum_func;
with_field= args[0]->with_field || args[1]->with_field;
- with_subselect|= args[0]->with_subselect | args[1]->with_subselect;
+ with_subselect= args[0]->has_subquery() || args[1]->has_subquery();
max_length= 1;
decimals= 0;
@@ -5350,6 +5424,7 @@ Item *Item_func_not::neg_transformer(THD *thd) /* NOT(x) -> x */
bool Item_func_not::fix_fields(THD *thd, Item **ref)
{
+ args[0]->under_not(this);
if (args[0]->type() == FIELD_ITEM)
{
/* replace "NOT <field>" with "<filed> == 0" */
@@ -5526,13 +5601,15 @@ Item *Item_bool_rowready_func2::negated_item()
*/
Item_equal::Item_equal(Item *f1, Item *f2, bool with_const_item)
- : Item_bool_func(), eval_item(0), cond_false(0), context_field(NULL)
+ : Item_bool_func(), eval_item(0), cond_false(0), context_field(NULL),
+ link_equal_fields(FALSE)
{
const_item_cache= 0;
with_const= with_const_item;
equal_items.push_back(f1);
equal_items.push_back(f2);
compare_as_dates= with_const_item && f2->cmp_type() == TIME_RESULT;
+ upper_levels= NULL;
}
@@ -5549,7 +5626,8 @@ Item_equal::Item_equal(Item *f1, Item *f2, bool with_const_item)
*/
Item_equal::Item_equal(Item_equal *item_equal)
- : Item_bool_func(), eval_item(0), cond_false(0), context_field(NULL)
+ : Item_bool_func(), eval_item(0), cond_false(0), context_field(NULL),
+ link_equal_fields(FALSE)
{
const_item_cache= 0;
List_iterator_fast<Item> li(item_equal->equal_items);
@@ -5561,10 +5639,11 @@ Item_equal::Item_equal(Item_equal *item_equal)
with_const= item_equal->with_const;
compare_as_dates= item_equal->compare_as_dates;
cond_false= item_equal->cond_false;
+ upper_levels= item_equal->upper_levels;
}
-/*
+/**
@brief
Add a constant item to the Item_equal object
@@ -5618,6 +5697,7 @@ void Item_equal::add_const(Item *c, Item *f)
const_item_cache= 1;
}
+
/**
@brief
Check whether a field is referred to in the multiple equality
@@ -5686,6 +5766,87 @@ void Item_equal::merge(Item_equal *item)
/**
@brief
+ Merge members of another Item_equal object into this one
+
+ @param item multiple equality whose members are to be merged
+
+ @details
+ If the Item_equal 'item' happened to have some elements of the list
+ of equal items belonging to 'this' object then the function merges
+ the equal items from 'item' into this list.
+ If both lists contains constants and they are different then
+ the value of the cond_false flag is set to TRUE.
+
+ @retval
+ 1 the lists of equal items in 'item' and 'this' contain common elements
+ @retval
+ 0 otherwise
+
+ @notes
+ The method 'merge' just joins the list of equal items belonging to 'item'
+ to the list of equal items belonging to this object assuming that the lists
+ are disjoint. It would be more correct to call the method 'join'.
+ The method 'merge_with_check' really merges two lists of equal items if they
+ have common members.
+*/
+
+bool Item_equal::merge_with_check(Item_equal *item)
+{
+ bool intersected= FALSE;
+ Item_equal_fields_iterator_slow fi(*this);
+ while (fi++)
+ {
+ if (item->contains(fi.get_curr_field()))
+ {
+ fi.remove();
+ intersected= TRUE;
+ }
+ }
+ if (intersected)
+ item->merge(this);
+ return intersected;
+}
+
+
+/**
+ @brief
+ Merge this object into a list of Item_equal objects
+
+ @param list the list of Item_equal objects to merge into
+
+ @details
+ If the list of equal items from 'this' object contains common members
+ with the lists of equal items belonging to Item_equal objects from 'list'
+ then all involved Item_equal objects e1,...,ek are merged into one
+ Item equal that replaces e1,...,ek in the 'list'. Otherwise this
+ Item_equal is joined to the 'list'.
+*/
+
+void Item_equal::merge_into_list(List<Item_equal> *list)
+{
+ Item_equal *item;
+ List_iterator<Item_equal> it(*list);
+ Item_equal *merge_into= NULL;
+ while((item= it++))
+ {
+ if (!merge_into)
+ {
+ if (merge_with_check(item))
+ merge_into= item;
+ }
+ else
+ {
+ if (item->merge_with_check(merge_into))
+ it.remove();
+ }
+ }
+ if (!merge_into)
+ list->push_back(this);
+}
+
+
+/**
+ @brief
Order equal items of the multiple equality according to a sorting criteria
@param compare function to compare items from the equal_items list
@@ -5792,6 +5953,9 @@ bool Item_equal::fix_fields(THD *thd, Item **ref)
DBUG_ASSERT(fixed == 0);
Item_equal_fields_iterator it(*this);
Item *item;
+ Field *first_equal_field= NULL;
+ Field *last_equal_field= NULL;
+ Field *prev_equal_field= NULL;
not_null_tables_cache= used_tables_cache= 0;
const_item_cache= 0;
while ((item= it++))
@@ -5805,7 +5969,18 @@ bool Item_equal::fix_fields(THD *thd, Item **ref)
maybe_null= 1;
if (!item->get_item_equal())
item->set_item_equal(this);
+ if (link_equal_fields && item->real_item()->type() == FIELD_ITEM)
+ {
+ last_equal_field= ((Item_field *) (item->real_item()))->field;
+ if (!prev_equal_field)
+ first_equal_field= last_equal_field;
+ else
+ prev_equal_field->next_equal_field= last_equal_field;
+ prev_equal_field= last_equal_field;
+ }
}
+ if (prev_equal_field && last_equal_field != first_equal_field)
+ last_equal_field->next_equal_field= first_equal_field;
fix_length_and_dec();
fixed= 1;
return FALSE;
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index afb7bf005bb..bf65d6e7c07 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* compare and test functions */
@@ -246,12 +246,12 @@ protected:
*/
int result_for_null_param;
public:
- Item_in_optimizer(Item *a, Item_in_subselect *b):
- Item_bool_func(a, reinterpret_cast<Item *>(b)), cache(0), expr_cache(0),
+ Item_in_optimizer(Item *a, Item *b):
+ Item_bool_func(a, b), cache(0), expr_cache(0),
save_cache(0), result_for_null_param(UNKNOWN)
{ with_subselect= true; }
bool fix_fields(THD *, Item **);
- bool fix_left(THD *thd, Item **ref);
+ bool fix_left(THD *thd);
table_map not_null_tables() const { return 0; }
bool is_null();
longlong val_int();
@@ -269,6 +269,8 @@ public:
bool is_top_level_item();
bool eval_not_null_tables(uchar *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
+ bool invisible_mode();
+ void reset_cache() { cache= NULL; }
};
class Comp_creator
@@ -436,8 +438,11 @@ public:
class Item_func_not :public Item_bool_func
{
+ bool abort_on_null;
public:
- Item_func_not(Item *a) :Item_bool_func(a) {}
+ Item_func_not(Item *a) :Item_bool_func(a), abort_on_null(FALSE) {}
+ virtual void top_level_item() { abort_on_null= 1; }
+ bool is_top_level_item() { return abort_on_null; }
longlong val_int();
enum Functype functype() const { return NOT_FUNC; }
const char *func_name() const { return "not"; }
@@ -495,16 +500,13 @@ class Item_func_not_all :public Item_func_not
Item_sum_hybrid *test_sum_item;
Item_maxmin_subselect *test_sub_item;
- bool abort_on_null;
public:
bool show;
Item_func_not_all(Item *a)
- :Item_func_not(a), test_sum_item(0), test_sub_item(0), abort_on_null(0),
+ :Item_func_not(a), test_sum_item(0), test_sub_item(0),
show(0)
{}
- virtual void top_level_item() { abort_on_null= 1; }
- bool is_top_level_item() { return abort_on_null; }
table_map not_null_tables() const { return 0; }
longlong val_int();
enum Functype functype() const { return NOT_ALL_FUNC; }
@@ -550,6 +552,7 @@ public:
- Otherwise, UINT_MAX
*/
uint in_equality_no;
+ virtual uint exists2in_reserved_items() { return 1; };
};
class Item_func_equal :public Item_bool_rowready_func2
@@ -1484,8 +1487,9 @@ class Item_func_like :public Item_bool_func2
enum { alphabet_size = 256 };
Item *escape_item;
-
+
bool escape_used_in_parsing;
+ bool use_sampling;
public:
int escape;
@@ -1493,7 +1497,7 @@ public:
Item_func_like(Item *a,Item *b, Item *escape_arg, bool escape_used)
:Item_bool_func2(a,b), canDoTurboBM(FALSE), pattern(0), pattern_len(0),
bmGs(0), bmBc(0), escape_item(escape_arg),
- escape_used_in_parsing(escape_used) {}
+ escape_used_in_parsing(escape_used), use_sampling(0) {}
longlong val_int();
enum Functype functype() const { return LIKE_FUNC; }
optimize_type select_optimize() const;
@@ -1501,6 +1505,8 @@ public:
const char *func_name() const { return "like"; }
bool fix_fields(THD *thd, Item **ref);
void cleanup();
+
+ bool find_selective_predicates_list_processor(uchar *arg);
};
@@ -1575,6 +1581,11 @@ public:
DBUG_ASSERT(nlist->elements);
list.prepand(nlist);
}
+ void add_at_end(List<Item> *nlist)
+ {
+ DBUG_ASSERT(nlist->elements);
+ list.concat(nlist);
+ }
bool fix_fields(THD *, Item **ref);
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
@@ -1600,6 +1611,7 @@ public:
bool eval_not_null_tables(uchar *opt_arg);
};
+template <template<class> class LI, class T> class Item_equal_iterator;
/*
The class Item_equal is used to represent conjunctions of equality
@@ -1727,7 +1739,13 @@ class Item_equal: public Item_bool_func
used in the original equality.
*/
Item_field *context_field;
+
+ bool link_equal_fields;
+
public:
+
+ COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */
+
inline Item_equal()
: Item_bool_func(), with_const(FALSE), eval_item(0), cond_false(0),
context_field(NULL)
@@ -1744,6 +1762,8 @@ public:
/** Get number of field items / references to field items in this object */
uint n_field_items() { return equal_items.elements-test(with_const); }
void merge(Item_equal *item);
+ bool merge_with_check(Item_equal *equal_item);
+ void merge_into_list(List<Item_equal> *list);
void update_const();
enum Functype functype() const { return MULT_EQUAL_FUNC; }
longlong val_int();
@@ -1759,7 +1779,10 @@ public:
CHARSET_INFO *compare_collation();
void set_context_field(Item_field *ctx_field) { context_field= ctx_field; }
+ void set_link_equal_fields(bool flag) { link_equal_fields= flag; }
friend class Item_equal_fields_iterator;
+ friend class Item_equal_iterator<List_iterator_fast,Item>;
+ friend class Item_equal_iterator<List_iterator,Item>;
friend Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
Item_equal *item_equal);
friend bool setup_sj_materialization_part1(struct st_join_table *tab);
@@ -1778,43 +1801,55 @@ public:
{
upper_levels= 0;
}
+ void copy(COND_EQUAL &cond_equal)
+ {
+ max_members= cond_equal.max_members;
+ upper_levels= cond_equal.upper_levels;
+ if (cond_equal.current_level.is_empty())
+ current_level.empty();
+ else
+ current_level= cond_equal.current_level;
+ }
};
/*
- The class Item_equal_fields_iterator is used to iterate over references
- to table/view columns from a list of equal items.
+ The template Item_equal_iterator is used to define classes
+ Item_equal_fields_iterator and Item_equal_fields_iterator_slow.
+ These are helper classes for the class Item equal
+ Both classes are used to iterate over references to table/view columns
+ from the list of equal items that included in an Item_equal object.
+ The second class supports the operation of removal of the current member
+ from the list when performing an iteration.
*/
-class Item_equal_fields_iterator : public List_iterator_fast<Item>
+template <template<class> class LI, typename T> class Item_equal_iterator
+ : public LI<T>
{
+protected:
Item_equal *item_equal;
Item *curr_item;
public:
- Item_equal_fields_iterator(Item_equal &item_eq)
- :List_iterator_fast<Item> (item_eq.equal_items)
+ Item_equal_iterator<LI,T>(Item_equal &item_eq)
+ :LI<T> (item_eq.equal_items)
{
curr_item= NULL;
item_equal= &item_eq;
if (item_eq.with_const)
{
- List_iterator_fast<Item> *list_it= this;
+ LI<T> *list_it= this;
curr_item= (*list_it)++;
}
}
Item* operator++(int)
{
- List_iterator_fast<Item> *list_it= this;
+ LI<T> *list_it= this;
curr_item= (*list_it)++;
return curr_item;
}
- Item ** ref()
- {
- return List_iterator_fast<Item>::ref();
- }
void rewind(void)
{
- List_iterator_fast<Item> *list_it= this;
+ LI<T> *list_it= this;
list_it->rewind();
if (item_equal->with_const)
curr_item= (*list_it)++;
@@ -1826,6 +1861,36 @@ public:
}
};
+typedef Item_equal_iterator<List_iterator_fast,Item > Item_equal_iterator_fast;
+
+class Item_equal_fields_iterator
+ :public Item_equal_iterator_fast
+{
+public:
+ Item_equal_fields_iterator(Item_equal &item_eq)
+ :Item_equal_iterator_fast(item_eq)
+ { }
+ Item ** ref()
+ {
+ return List_iterator_fast<Item>::ref();
+ }
+};
+
+typedef Item_equal_iterator<List_iterator,Item > Item_equal_iterator_iterator_slow;
+
+class Item_equal_fields_iterator_slow
+ :public Item_equal_iterator_iterator_slow
+{
+public:
+ Item_equal_fields_iterator_slow(Item_equal &item_eq)
+ :Item_equal_iterator_iterator_slow(item_eq)
+ { }
+ void remove()
+ {
+ List_iterator<Item>::remove();
+ }
+};
+
class Item_cond_and :public Item_cond
{
@@ -1851,6 +1916,8 @@ public:
}
Item *neg_transformer(THD *thd);
void mark_as_condition_AND_part(TABLE_LIST *embedding);
+ virtual uint exists2in_reserved_items() { return list.elements; };
+ bool walk_top_and(Item_processor processor, uchar *arg);
};
inline bool is_cond_and(Item *item)
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 0a28c6414ec..962ea73f320 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -32,6 +32,7 @@
#include "set_var.h"
#include "sp_head.h"
#include "sp.h"
+#include "sql_time.h"
/*
=============================================================================
@@ -447,6 +448,19 @@ protected:
};
+class Create_func_binlog_gtid_pos : public Create_func_arg2
+{
+public:
+ virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_binlog_gtid_pos s_singleton;
+
+protected:
+ Create_func_binlog_gtid_pos() {}
+ virtual ~Create_func_binlog_gtid_pos() {}
+};
+
+
class Create_func_bit_count : public Create_func_arg1
{
public:
@@ -601,6 +615,19 @@ protected:
};
+class Create_func_decode_histogram : public Create_func_arg2
+{
+public:
+ Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_decode_histogram s_singleton;
+
+protected:
+ Create_func_decode_histogram() {}
+ virtual ~Create_func_decode_histogram() {}
+};
+
+
class Create_func_concat_ws : public Create_native_func
{
public:
@@ -3087,6 +3114,16 @@ Create_func_bin::create_1_arg(THD *thd, Item *arg1)
}
+Create_func_binlog_gtid_pos Create_func_binlog_gtid_pos::s_singleton;
+
+Item*
+Create_func_binlog_gtid_pos::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
+ return new (thd->mem_root) Item_func_binlog_gtid_pos(arg1, arg2);
+}
+
+
Create_func_bit_count Create_func_bit_count::s_singleton;
Item*
@@ -3195,6 +3232,13 @@ Create_func_concat::create_native(THD *thd, LEX_STRING name,
return new (thd->mem_root) Item_func_concat(*item_list);
}
+Create_func_decode_histogram Create_func_decode_histogram::s_singleton;
+
+Item *
+Create_func_decode_histogram::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_decode_histogram(arg1, arg2);
+}
Create_func_concat_ws Create_func_concat_ws::s_singleton;
@@ -4440,8 +4484,7 @@ Create_func_make_set::create_native(THD *thd, LEX_STRING name,
return NULL;
}
- Item *param_1= item_list->pop();
- return new (thd->mem_root) Item_func_make_set(param_1, *item_list);
+ return new (thd->mem_root) Item_func_make_set(*item_list);
}
@@ -5298,6 +5341,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("ATAN2") }, BUILDER(Create_func_atan)},
{ { C_STRING_WITH_LEN("BENCHMARK") }, BUILDER(Create_func_benchmark)},
{ { C_STRING_WITH_LEN("BIN") }, BUILDER(Create_func_bin)},
+ { { C_STRING_WITH_LEN("BINLOG_GTID_POS") }, BUILDER(Create_func_binlog_gtid_pos)},
{ { C_STRING_WITH_LEN("BIT_COUNT") }, BUILDER(Create_func_bit_count)},
{ { C_STRING_WITH_LEN("BIT_LENGTH") }, BUILDER(Create_func_bit_length)},
{ { C_STRING_WITH_LEN("BUFFER") }, GEOM_BUILDER(Create_func_buffer)},
@@ -5329,6 +5373,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("DAYOFYEAR") }, BUILDER(Create_func_dayofyear)},
{ { C_STRING_WITH_LEN("DECODE") }, BUILDER(Create_func_decode)},
{ { C_STRING_WITH_LEN("DEGREES") }, BUILDER(Create_func_degrees)},
+ { { C_STRING_WITH_LEN("DECODE_HISTOGRAM") }, BUILDER(Create_func_decode_histogram)},
{ { C_STRING_WITH_LEN("DES_DECRYPT") }, BUILDER(Create_func_des_decrypt)},
{ { C_STRING_WITH_LEN("DES_ENCRYPT") }, BUILDER(Create_func_des_encrypt)},
{ { C_STRING_WITH_LEN("DIMENSION") }, GEOM_BUILDER(Create_func_dimension)},
@@ -5751,6 +5796,69 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type,
}
+/**
+ Builder for datetime literals:
+ TIME'00:00:00', DATE'2001-01-01', TIMESTAMP'2001-01-01 00:00:00'.
+ @param thd The current thread
+ @param str Character literal
+ @param length Length of str
+ @param type Type of literal (TIME, DATE or DATETIME)
+ @param send_error Whether to generate an error on failure
+*/
+
+Item *create_temporal_literal(THD *thd,
+ const char *str, uint length,
+ CHARSET_INFO *cs,
+ enum_field_types type,
+ bool send_error)
+{
+ MYSQL_TIME_STATUS status;
+ MYSQL_TIME ltime;
+ Item *item= NULL;
+ ulonglong flags= thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE |
+ MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES);
+
+ switch(type)
+ {
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_NEWDATE:
+ if (!str_to_datetime(cs, str, length, &ltime, flags, &status) &&
+ ltime.time_type == MYSQL_TIMESTAMP_DATE && !status.warnings)
+ item= new (thd->mem_root) Item_date_literal(&ltime);
+ break;
+ case MYSQL_TYPE_DATETIME:
+ if (!str_to_datetime(cs, str, length, &ltime, flags, &status) &&
+ ltime.time_type == MYSQL_TIMESTAMP_DATETIME && !status.warnings)
+ item= new (thd->mem_root) Item_datetime_literal(&ltime,
+ status.precision);
+ break;
+ case MYSQL_TYPE_TIME:
+ if (!str_to_time(cs, str, length, &ltime, 0, &status) &&
+ ltime.time_type == MYSQL_TIMESTAMP_TIME && !status.warnings)
+ item= new (thd->mem_root) Item_time_literal(&ltime,
+ status.precision);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ if (item)
+ return item;
+
+ if (send_error)
+ {
+ const char *typestr=
+ (type == MYSQL_TYPE_DATE) ? "DATE" :
+ (type == MYSQL_TYPE_TIME) ? "TIME" : "DATETIME";
+ ErrConvString err(str, length, thd->variables.character_set_client);
+ my_error(ER_WRONG_VALUE, MYF(0), typestr, err.ptr());
+ }
+ return NULL;
+}
+
+
static List<Item> *create_func_dyncol_prepare(THD *thd,
DYNCALL_CREATE_DEF **dfs,
List<DYNCALL_CREATE_DEF> &list)
diff --git a/sql/item_create.h b/sql/item_create.h
index 5ecb45e9eae..5f1a8c6006d 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -168,6 +168,11 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type,
const char *len, const char *dec,
CHARSET_INFO *cs);
+Item *create_temporal_literal(THD *thd,
+ const char *str, uint length,
+ CHARSET_INFO *cs,
+ enum_field_types type,
+ bool send_error);
int item_create_init();
void item_create_cleanup();
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 27a27727be7..69b53871f9f 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -25,7 +25,7 @@
#pragma implementation // gcc: Class implementation
#endif
-#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "sql_plugin.h"
#include "sql_priv.h"
/*
It is necessary to include set_var.h instead of item.h because there
@@ -41,6 +41,7 @@
#include "sql_acl.h" // EXECUTE_ACL
#include "mysqld.h" // LOCK_short_uuid_generator
#include "rpl_mi.h"
+#include "sql_time.h"
#include <m_ctype.h>
#include <hash.h>
#include <time.h>
@@ -52,8 +53,6 @@
#include "sp.h"
#include "set_var.h"
#include "debug_sync.h"
-#include <mysql/plugin.h>
-#include <mysql/service_thd_wait.h>
#ifdef NO_EMBEDDED_ACCESS_CHECKS
#define sp_restore_security_context(A,B) while (0) {}
@@ -225,7 +224,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
with_field= with_field || item->with_field;
used_tables_cache|= item->used_tables();
const_item_cache&= item->const_item();
- with_subselect|= item->with_subselect;
+ with_subselect|= item->has_subquery();
}
}
fix_length_and_dec();
@@ -769,13 +768,14 @@ void Item_num_op::find_num_type(void)
{
hybrid_type= DECIMAL_RESULT;
result_precision();
+ fix_decimals();
}
else
{
DBUG_ASSERT(r0 == INT_RESULT && r1 == INT_RESULT);
- decimals= 0;
hybrid_type=INT_RESULT;
result_precision();
+ decimals= 0;
}
DBUG_PRINT("info", ("Type: %s",
(hybrid_type == REAL_RESULT ? "REAL_RESULT" :
@@ -1027,13 +1027,11 @@ longlong Item_func_signed::val_int_from_str(int *error)
value= cs->cset->strtoll10(cs, start, &end, error);
if (*error > 0 || end != start+ length)
{
- char err_buff[128];
- String err_tmp(err_buff,(uint32) sizeof(err_buff), system_charset_info);
- err_tmp.copy(start, length, system_charset_info);
+ ErrConvString err(res);
push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
- err_tmp.c_ptr());
+ err.ptr());
}
return value;
}
@@ -1708,6 +1706,7 @@ void Item_func_div::fix_length_and_dec()
break;
case DECIMAL_RESULT:
result_precision();
+ fix_decimals();
break;
case STRING_RESULT:
case ROW_RESULT:
@@ -1907,6 +1906,16 @@ longlong Item_func_neg::int_op()
if (args[0]->unsigned_flag &&
(ulonglong) value > (ulonglong) LONGLONG_MAX + 1)
return raise_integer_overflow();
+
+ if (value == LONGLONG_MIN)
+ {
+ if (args[0]->unsigned_flag != unsigned_flag)
+ /* negation of LONGLONG_MIN is LONGLONG_MIN. */
+ return LONGLONG_MIN;
+ else
+ return raise_integer_overflow();
+ }
+
return check_integer_overflow(-value, !args[0]->unsigned_flag && value < 0);
}
@@ -2203,7 +2212,7 @@ longlong Item_func_shift_left::val_int()
return 0;
}
null_value=0;
- return (shift < sizeof(longlong)*8 ? (longlong) res : LL(0));
+ return (shift < sizeof(longlong)*8 ? (longlong) res : 0);
}
longlong Item_func_shift_right::val_int()
@@ -2218,7 +2227,7 @@ longlong Item_func_shift_right::val_int()
return 0;
}
null_value=0;
- return (shift < sizeof(longlong)*8 ? (longlong) res : LL(0));
+ return (shift < sizeof(longlong)*8 ? (longlong) res : 0);
}
@@ -2767,6 +2776,12 @@ bool Item_func_min_max::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
min_max= res;
}
unpack_time(min_max, ltime);
+
+ if (!(fuzzy_date & TIME_TIME_ONLY) &&
+ ((null_value= check_date_with_warn(ltime, fuzzy_date,
+ MYSQL_TIMESTAMP_ERROR))))
+ return true;
+
if (compare_as_dates->field_type() == MYSQL_TYPE_DATE)
{
ltime->time_type= MYSQL_TIMESTAMP_DATE;
@@ -2830,7 +2845,7 @@ double Item_func_min_max::val_real()
if (compare_as_dates)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE))
+ if (get_date(&ltime, 0))
return 0;
return TIME_to_double(&ltime);
@@ -2859,7 +2874,7 @@ longlong Item_func_min_max::val_int()
if (compare_as_dates)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE))
+ if (get_date(&ltime, 0))
return 0;
return TIME_to_ulonglong(&ltime);
@@ -2889,7 +2904,7 @@ my_decimal *Item_func_min_max::val_decimal(my_decimal *dec)
if (compare_as_dates)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE))
+ if (get_date(&ltime, 0))
return 0;
return date2my_decimal(&ltime, dec);
@@ -3151,7 +3166,7 @@ void Item_func_find_in_set::fix_length_and_dec()
find->length(), 0);
enum_bit=0;
if (enum_value)
- enum_bit=LL(1) << (enum_value-1);
+ enum_bit=1LL << (enum_value-1);
}
}
}
@@ -3232,7 +3247,7 @@ longlong Item_func_find_in_set::val_int()
wc == (my_wc_t) separator)
return (longlong) ++position;
else
- return LL(0);
+ return 0;
}
}
return 0;
@@ -3738,120 +3753,6 @@ udf_handler::~udf_handler()
bool udf_handler::get_arguments() { return 0; }
#endif /* HAVE_DLOPEN */
-/*
-** User level locks
-*/
-
-mysql_mutex_t LOCK_user_locks;
-static HASH hash_user_locks;
-
-class User_level_lock
-{
- uchar *key;
- size_t key_length;
-
-public:
- int count;
- bool locked;
- mysql_cond_t cond;
- my_thread_id thread_id;
- void set_thread(THD *thd) { thread_id= thd->thread_id; }
-
- User_level_lock(const uchar *key_arg,uint length, ulong id)
- :key_length(length),count(1),locked(1), thread_id(id)
- {
- key= (uchar*) my_memdup(key_arg,length,MYF(0));
- mysql_cond_init(key_user_level_lock_cond, &cond, NULL);
- if (key)
- {
- if (my_hash_insert(&hash_user_locks,(uchar*) this))
- {
- my_free(key);
- key=0;
- }
- }
- }
- ~User_level_lock()
- {
- if (key)
- {
- my_hash_delete(&hash_user_locks,(uchar*) this);
- my_free(key);
- }
- mysql_cond_destroy(&cond);
- }
- inline bool initialized() { return key != 0; }
- friend void item_user_lock_release(User_level_lock *ull);
- friend uchar *ull_get_key(const User_level_lock *ull, size_t *length,
- my_bool not_used);
-};
-
-uchar *ull_get_key(const User_level_lock *ull, size_t *length,
- my_bool not_used __attribute__((unused)))
-{
- *length= ull->key_length;
- return ull->key;
-}
-
-#ifdef HAVE_PSI_INTERFACE
-static PSI_mutex_key key_LOCK_user_locks;
-
-static PSI_mutex_info all_user_mutexes[]=
-{
- { &key_LOCK_user_locks, "LOCK_user_locks", PSI_FLAG_GLOBAL}
-};
-
-static void init_user_lock_psi_keys(void)
-{
- const char* category= "sql";
- int count;
-
- if (PSI_server == NULL)
- return;
-
- count= array_elements(all_user_mutexes);
- PSI_server->register_mutex(category, all_user_mutexes, count);
-}
-#endif
-
-static bool item_user_lock_inited= 0;
-
-void item_user_lock_init(void)
-{
-#ifdef HAVE_PSI_INTERFACE
- init_user_lock_psi_keys();
-#endif
-
- mysql_mutex_init(key_LOCK_user_locks, &LOCK_user_locks, MY_MUTEX_INIT_SLOW);
- my_hash_init(&hash_user_locks,system_charset_info,
- 16,0,0,(my_hash_get_key) ull_get_key,NULL,0);
- item_user_lock_inited= 1;
-}
-
-void item_user_lock_free(void)
-{
- if (item_user_lock_inited)
- {
- item_user_lock_inited= 0;
- my_hash_free(&hash_user_locks);
- mysql_mutex_destroy(&LOCK_user_locks);
- }
-}
-
-void item_user_lock_release(User_level_lock *ull)
-{
- ull->locked=0;
- ull->thread_id= 0;
- if (--ull->count)
- mysql_cond_signal(&ull->cond);
- else
- delete ull;
-}
-
-/**
- Wait until we are at or past the given position in the master binlog
- on the slave.
-*/
longlong Item_master_pos_wait::val_int()
{
@@ -3953,7 +3854,7 @@ class Interruptible_wait
/** Time to wait before polling the connection status. */
-const ulonglong Interruptible_wait::m_interrupt_interval= 5 * ULL(1000000000);
+const ulonglong Interruptible_wait::m_interrupt_interval= 5 * 1000000000ULL;
/**
@@ -3998,7 +3899,136 @@ int Interruptible_wait::wait(mysql_cond_t *cond, mysql_mutex_t *mutex)
/**
- Get a user level lock. If the thread has an old lock this is first released.
+ For locks with EXPLICIT duration, MDL returns a new ticket
+ every time a lock is granted. This allows to implement recursive
+ locks without extra allocation or additional data structures, such
+ as below. However, if there are too many tickets in the same
+ MDL_context, MDL_context::find_ticket() is getting too slow,
+ since it's using a linear search.
+ This is why a separate structure is allocated for a user
+ level lock, and before requesting a new lock from MDL,
+ GET_LOCK() checks thd->ull_hash if such lock is already granted,
+ and if so, simply increments a reference counter.
+*/
+
+class User_level_lock
+{
+public:
+ MDL_ticket *lock;
+ int refs;
+};
+
+
+/** Extract a hash key from User_level_lock. */
+
+uchar *ull_get_key(const uchar *ptr, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ User_level_lock *ull = (User_level_lock*) ptr;
+ MDL_key *key = ull->lock->get_key();
+ *length= key->length();
+ return (uchar*) key->ptr();
+}
+
+
+/**
+ Release all user level locks for this THD.
+*/
+
+void mysql_ull_cleanup(THD *thd)
+{
+ User_level_lock *ull;
+ DBUG_ENTER("mysql_ull_cleanup");
+
+ for (uint i= 0; i < thd->ull_hash.records; i++)
+ {
+ ull = (User_level_lock*) my_hash_element(&thd->ull_hash, i);
+ thd->mdl_context.release_lock(ull->lock);
+ my_free(ull);
+ }
+
+ my_hash_free(&thd->ull_hash);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Set explicit duration for metadata locks corresponding to
+ user level locks to protect them from being released at the end
+ of transaction.
+*/
+
+void mysql_ull_set_explicit_lock_duration(THD *thd)
+{
+ User_level_lock *ull;
+ DBUG_ENTER("mysql_ull_set_explicit_lock_duration");
+
+ for (uint i= 0; i < thd->ull_hash.records; i++)
+ {
+ ull= (User_level_lock*) my_hash_element(&thd->ull_hash, i);
+ thd->mdl_context.set_lock_duration(ull->lock, MDL_EXPLICIT);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ When MDL detects a lock wait timeout, it pushes
+ an error into the statement diagnostics area.
+ For GET_LOCK(), lock wait timeout is not an error,
+ but a special return value (0). NULL is returned in
+ case of error.
+ Capture and suppress lock wait timeout.
+*/
+
+class Lock_wait_timeout_handler: public Internal_error_handler
+{
+public:
+ Lock_wait_timeout_handler() :m_lock_wait_timeout(false) {}
+
+ bool m_lock_wait_timeout;
+
+ bool handle_condition(THD * /* thd */, uint sql_errno,
+ const char * /* sqlstate */,
+ Sql_condition::enum_warning_level /* level */,
+ const char *message,
+ Sql_condition ** /* cond_hdl */);
+};
+
+bool
+Lock_wait_timeout_handler::
+handle_condition(THD * /* thd */, uint sql_errno,
+ const char * /* sqlstate */,
+ Sql_condition::enum_warning_level /* level */,
+ const char *message,
+ Sql_condition ** /* cond_hdl */)
+{
+ if (sql_errno == ER_LOCK_WAIT_TIMEOUT)
+ {
+ m_lock_wait_timeout= true;
+ return true; /* condition handled */
+ }
+ return false;
+}
+
+
+static int ull_name_ok(String *name)
+{
+ if (!name || !name->length())
+ return 0;
+
+ if (name->length() > NAME_LEN)
+ {
+ my_error(ER_TOO_LONG_IDENT, MYF(0), name->c_ptr_safe());
+ return 0;
+ }
+ return 1;
+}
+
+
+/**
+ Get a user level lock.
@retval
1 : Got lock
@@ -4011,14 +4041,13 @@ int Interruptible_wait::wait(mysql_cond_t *cond, mysql_mutex_t *mutex)
longlong Item_func_get_lock::val_int()
{
DBUG_ASSERT(fixed == 1);
- String *res=args[0]->val_str(&value);
+ String *res= args[0]->val_str(&value);
ulonglong timeout= args[1]->val_int();
- THD *thd=current_thd;
+ THD *thd= current_thd;
User_level_lock *ull;
- int error;
- Interruptible_wait timed_cond(thd);
DBUG_ENTER("Item_func_get_lock::val_int");
+ null_value= 1;
/*
In slave thread no need to get locks, everything is serialized. Anyway
there is no way to make GET_LOCK() work on slave like it did on master
@@ -4027,103 +4056,70 @@ longlong Item_func_get_lock::val_int()
it's not guaranteed to be same as on master.
*/
if (thd->slave_thread)
+ {
+ null_value= 0;
DBUG_RETURN(1);
+ }
- mysql_mutex_lock(&LOCK_user_locks);
-
- if (!res || !res->length())
- {
- mysql_mutex_unlock(&LOCK_user_locks);
- null_value=1;
+ if (!ull_name_ok(res))
DBUG_RETURN(0);
- }
+
DBUG_PRINT("info", ("lock %.*s, thd=%ld", res->length(), res->ptr(),
(long) thd->real_id));
- null_value=0;
-
- if (thd->ull)
+ /* HASH entries are of type User_level_lock. */
+ if (! my_hash_inited(&thd->ull_hash) &&
+ my_hash_init(&thd->ull_hash, &my_charset_bin,
+ 16 /* small hash */, 0, 0, ull_get_key, NULL, 0))
{
- item_user_lock_release(thd->ull);
- thd->ull=0;
+ DBUG_RETURN(0);
}
- if (!(ull= ((User_level_lock *) my_hash_search(&hash_user_locks,
- (uchar*) res->ptr(),
- (size_t) res->length()))))
- {
- ull= new User_level_lock((uchar*) res->ptr(), (size_t) res->length(),
- thd->thread_id);
- if (!ull || !ull->initialized())
- {
- delete ull;
- mysql_mutex_unlock(&LOCK_user_locks);
- null_value=1; // Probably out of memory
- DBUG_RETURN(0);
- }
- ull->set_thread(thd);
- thd->ull=ull;
- mysql_mutex_unlock(&LOCK_user_locks);
- DBUG_PRINT("info", ("made new lock"));
- DBUG_RETURN(1); // Got new lock
- }
- ull->count++;
- DBUG_PRINT("info", ("ull->count=%d", ull->count));
+ MDL_request ull_request;
+ ull_request.init(MDL_key::USER_LOCK, res->c_ptr_safe(), "",
+ MDL_SHARED_NO_WRITE, MDL_EXPLICIT);
+ MDL_key *ull_key = &ull_request.key;
- /*
- Structure is now initialized. Try to get the lock.
- Set up control struct to allow others to abort locks.
- */
- THD_STAGE_INFO(thd, stage_user_lock);
- thd->mysys_var->current_mutex= &LOCK_user_locks;
- thd->mysys_var->current_cond= &ull->cond;
- timed_cond.set_timeout(timeout * ULL(1000000000));
-
- error= 0;
- thd_wait_begin(thd, THD_WAIT_USER_LOCK);
- while (ull->locked && !thd->killed)
+ if ((ull= (User_level_lock*)
+ my_hash_search(&thd->ull_hash, ull_key->ptr(), ull_key->length())))
{
- DBUG_PRINT("info", ("waiting on lock"));
- error= timed_cond.wait(&ull->cond, &LOCK_user_locks);
- if (error == ETIMEDOUT || error == ETIME)
- {
- DBUG_PRINT("info", ("lock wait timeout"));
- break;
- }
- error= 0;
+ /* Recursive lock */
+ ull->refs++;
+ null_value = 0;
+ DBUG_RETURN(1);
}
- thd_wait_end(thd);
- if (ull->locked)
+ Lock_wait_timeout_handler lock_wait_timeout_handler;
+ thd->push_internal_handler(&lock_wait_timeout_handler);
+ bool error= thd->mdl_context.acquire_lock(&ull_request, timeout);
+ (void) thd->pop_internal_handler();
+ if (error)
{
- if (!--ull->count)
- {
- DBUG_ASSERT(0);
- delete ull; // Should never happen
- }
- if (!error) // Killed (thd->killed != 0)
- {
- error=1;
- null_value=1; // Return NULL
- }
+ if (lock_wait_timeout_handler.m_lock_wait_timeout)
+ null_value= 0;
+ DBUG_RETURN(0);
}
- else // We got the lock
+
+ ull= (User_level_lock*) my_malloc(sizeof(User_level_lock),
+ MYF(MY_WME|MY_THREAD_SPECIFIC));
+ if (ull == NULL)
{
- ull->locked=1;
- ull->set_thread(thd);
- ull->thread_id= thd->thread_id;
- thd->ull=ull;
- error=0;
- DBUG_PRINT("info", ("got the lock"));
+ thd->mdl_context.release_lock(ull_request.ticket);
+ DBUG_RETURN(0);
}
- mysql_mutex_unlock(&LOCK_user_locks);
- mysql_mutex_lock(&thd->mysys_var->mutex);
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- mysql_mutex_unlock(&thd->mysys_var->mutex);
+ ull->lock= ull_request.ticket;
+ ull->refs= 1;
+
+ if (my_hash_insert(&thd->ull_hash, (uchar*) ull))
+ {
+ thd->mdl_context.release_lock(ull->lock);
+ my_free(ull);
+ DBUG_RETURN(0);
+ }
+ null_value= 0;
- DBUG_RETURN(!error ? 1 : 0);
+ DBUG_RETURN(1);
}
@@ -4138,43 +4134,86 @@ longlong Item_func_get_lock::val_int()
longlong Item_func_release_lock::val_int()
{
DBUG_ASSERT(fixed == 1);
- String *res=args[0]->val_str(&value);
- User_level_lock *ull;
- longlong result;
- THD *thd=current_thd;
+ String *res= args[0]->val_str(&value);
+ THD *thd= current_thd;
DBUG_ENTER("Item_func_release_lock::val_int");
- if (!res || !res->length())
- {
- null_value=1;
+ null_value= 1;
+
+ if (!ull_name_ok(res))
DBUG_RETURN(0);
- }
+
DBUG_PRINT("info", ("lock %.*s", res->length(), res->ptr()));
- null_value=0;
- result=0;
- mysql_mutex_lock(&LOCK_user_locks);
- if (!(ull= ((User_level_lock*) my_hash_search(&hash_user_locks,
- (const uchar*) res->ptr(),
- (size_t) res->length()))))
+ MDL_key ull_key;
+ ull_key.mdl_key_init(MDL_key::USER_LOCK, res->c_ptr_safe(), "");
+
+ User_level_lock *ull;
+
+ if (!(ull=
+ (User_level_lock*) my_hash_search(&thd->ull_hash,
+ ull_key.ptr(), ull_key.length())))
{
- null_value=1;
+ null_value= thd->mdl_context.get_lock_owner(&ull_key) == 0;
+ DBUG_RETURN(0);
}
- else
+ null_value= 0;
+ if (--ull->refs == 0)
{
- DBUG_PRINT("info", ("ull->locked=%d ull->thread=%lu thd=%lu",
- (int) ull->locked,
- (long)ull->thread_id,
- (long)thd->thread_id));
- if (ull->locked && current_thd->thread_id == ull->thread_id)
- {
- DBUG_PRINT("info", ("release lock"));
- result=1; // Release is ok
- item_user_lock_release(ull);
- thd->ull=0;
- }
+ my_hash_delete(&thd->ull_hash, (uchar*) ull);
+ thd->mdl_context.release_lock(ull->lock);
+ my_free(ull);
}
- mysql_mutex_unlock(&LOCK_user_locks);
- DBUG_RETURN(result);
+ DBUG_RETURN(1);
+}
+
+
+/**
+ Check a user level lock.
+
+ Sets null_value=TRUE on error.
+
+ @retval
+ 1 Available
+ @retval
+ 0 Already taken, or error
+*/
+
+longlong Item_func_is_free_lock::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ String *res= args[0]->val_str(&value);
+ THD *thd= current_thd;
+ null_value= 1;
+
+ if (!ull_name_ok(res))
+ return 0;
+
+ MDL_key ull_key;
+ ull_key.mdl_key_init(MDL_key::USER_LOCK, res->c_ptr_safe(), "");
+
+ null_value= 0;
+ return thd->mdl_context.get_lock_owner(&ull_key) == 0;
+}
+
+
+longlong Item_func_is_used_lock::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ String *res= args[0]->val_str(&value);
+ THD *thd= current_thd;
+ null_value= 1;
+
+ if (!ull_name_ok(res))
+ return 0;
+
+ MDL_key ull_key;
+ ull_key.mdl_key_init(MDL_key::USER_LOCK, res->c_ptr_safe(), "");
+ ulong thread_id = thd->mdl_context.get_lock_owner(&ull_key);
+ if (thread_id == 0)
+ return 0;
+
+ null_value= 0;
+ return thread_id;
}
@@ -4275,6 +4314,54 @@ void Item_func_benchmark::print(String *str, enum_query_type query_type)
}
+mysql_mutex_t LOCK_item_func_sleep;
+
+#ifdef HAVE_PSI_INTERFACE
+static PSI_mutex_key key_LOCK_item_func_sleep;
+
+static PSI_mutex_info item_func_sleep_mutexes[]=
+{
+ { &key_LOCK_item_func_sleep, "LOCK_user_locks", PSI_FLAG_GLOBAL}
+};
+
+
+static void init_item_func_sleep_psi_keys(void)
+{
+ const char* category= "sql";
+ int count;
+
+ if (PSI_server == NULL)
+ return;
+
+ count= array_elements(item_func_sleep_mutexes);
+ PSI_server->register_mutex(category, item_func_sleep_mutexes, count);
+}
+#endif
+
+static bool item_func_sleep_inited= 0;
+
+
+void item_func_sleep_init(void)
+{
+#ifdef HAVE_PSI_INTERFACE
+ init_item_func_sleep_psi_keys();
+#endif
+
+ mysql_mutex_init(key_LOCK_item_func_sleep, &LOCK_item_func_sleep, MY_MUTEX_INIT_SLOW);
+ item_func_sleep_inited= 1;
+}
+
+
+void item_func_sleep_free(void)
+{
+ if (item_func_sleep_inited)
+ {
+ item_func_sleep_inited= 0;
+ mysql_mutex_destroy(&LOCK_item_func_sleep);
+ }
+}
+
+
/** This function is just used to create tests with time gaps. */
longlong Item_func_sleep::val_int()
@@ -4303,23 +4390,23 @@ longlong Item_func_sleep::val_int()
timed_cond.set_timeout((ulonglong) (timeout * 1000000000.0));
mysql_cond_init(key_item_func_sleep_cond, &cond, NULL);
- mysql_mutex_lock(&LOCK_user_locks);
+ mysql_mutex_lock(&LOCK_item_func_sleep);
THD_STAGE_INFO(thd, stage_user_sleep);
- thd->mysys_var->current_mutex= &LOCK_user_locks;
+ thd->mysys_var->current_mutex= &LOCK_item_func_sleep;
thd->mysys_var->current_cond= &cond;
error= 0;
thd_wait_begin(thd, THD_WAIT_SLEEP);
while (!thd->killed)
{
- error= timed_cond.wait(&cond, &LOCK_user_locks);
+ error= timed_cond.wait(&cond, &LOCK_item_func_sleep);
if (error == ETIMEDOUT || error == ETIME)
break;
error= 0;
}
thd_wait_end(thd);
- mysql_mutex_unlock(&LOCK_user_locks);
+ mysql_mutex_unlock(&LOCK_item_func_sleep);
mysql_mutex_lock(&thd->mysys_var->mutex);
thd->mysys_var->current_mutex= 0;
thd->mysys_var->current_cond= 0;
@@ -4655,7 +4742,7 @@ double user_var_entry::val_real(bool *null_value)
longlong user_var_entry::val_int(bool *null_value) const
{
if ((*null_value= (value == 0)))
- return LL(0);
+ return 0;
switch (type) {
case REAL_RESULT:
@@ -4679,7 +4766,7 @@ longlong user_var_entry::val_int(bool *null_value) const
DBUG_ASSERT(0); // Impossible
break;
}
- return LL(0); // Impossible
+ return 0; // Impossible
}
@@ -4817,7 +4904,7 @@ void Item_func_set_user_var::save_item_result(Item *item)
{
DBUG_ENTER("Item_func_set_user_var::save_item_result");
- switch (cached_result_type) {
+ switch (args[0]->result_type()) {
case REAL_RESULT:
save_result.vreal= item->val_result();
break;
@@ -5172,7 +5259,7 @@ longlong Item_func_get_user_var::val_int()
{
DBUG_ASSERT(fixed == 1);
if (!var_entry)
- return LL(0); // No such variable
+ return 0; // No such variable
return (var_entry->val_int(&null_value));
}
@@ -5384,7 +5471,7 @@ enum Item_result Item_func_get_user_var::result_type() const
void Item_func_get_user_var::print(String *str, enum_query_type query_type)
{
str->append(STRING_WITH_LEN("(@"));
- str->append(name.str,name.length);
+ append_identifier(current_thd, str, name.str, name.length);
str->append(')');
}
@@ -5654,28 +5741,18 @@ enum_field_types Item_func_get_system_var::field_type() const
}
-/*
- Uses var, var_type, component, cache_present, used_query_id, thd,
- cached_llval, null_value, cached_null_value
-*/
-#define get_sys_var_safe(type) \
-do { \
- type value; \
- mysql_mutex_lock(&LOCK_global_system_variables); \
- value= *(type*) var->value_ptr(thd, var_type, &component); \
- mysql_mutex_unlock(&LOCK_global_system_variables); \
- cache_present |= GET_SYS_VAR_CACHE_LONG; \
- used_query_id= thd->query_id; \
- cached_llval= null_value ? 0 : (longlong) value; \
- cached_null_value= null_value; \
- return cached_llval; \
-} while (0)
-
-
longlong Item_func_get_system_var::val_int()
{
THD *thd= current_thd;
+ DBUG_EXECUTE_IF("simulate_non_gtid_aware_master",
+ {
+ if (0 == strcmp("gtid_domain_id", var->name.str))
+ {
+ my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name.str);
+ return 0;
+ }
+ });
if (cache_present && thd->query_id == used_query_id)
{
if (cache_present & GET_SYS_VAR_CACHE_LONG)
@@ -5705,51 +5782,11 @@ longlong Item_func_get_system_var::val_int()
}
}
- switch (var->show_type())
- {
- case SHOW_SINT: get_sys_var_safe (int);
- case SHOW_SLONG: get_sys_var_safe (long);
- case SHOW_SLONGLONG:get_sys_var_safe (longlong);
- case SHOW_UINT: get_sys_var_safe (uint);
- case SHOW_ULONG: get_sys_var_safe (ulong);
- case SHOW_ULONGLONG:get_sys_var_safe (ulonglong);
- case SHOW_HA_ROWS: get_sys_var_safe (ha_rows);
- case SHOW_BOOL: get_sys_var_safe (bool);
- case SHOW_MY_BOOL: get_sys_var_safe (my_bool);
- case SHOW_DOUBLE:
- {
- double dval= val_real();
-
- used_query_id= thd->query_id;
- cached_llval= (longlong) dval;
- cache_present|= GET_SYS_VAR_CACHE_LONG;
- return cached_llval;
- }
- case SHOW_CHAR:
- case SHOW_CHAR_PTR:
- case SHOW_LEX_STRING:
- {
- String *str_val= val_str(NULL);
-
- if (str_val && str_val->length())
- cached_llval= longlong_from_string_with_check (system_charset_info,
- str_val->c_ptr(),
- str_val->c_ptr() +
- str_val->length());
- else
- {
- null_value= TRUE;
- cached_llval= 0;
- }
-
- cache_present|= GET_SYS_VAR_CACHE_LONG;
- return cached_llval;
- }
-
- default:
- my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name.str);
- return 0; // keep the compiler happy
- }
+ cached_llval= var->val_int(&null_value, thd, var_type, &component);
+ cache_present |= GET_SYS_VAR_CACHE_LONG;
+ used_query_id= thd->query_id;
+ cached_null_value= null_value;
+ return cached_llval;
}
@@ -5782,61 +5819,10 @@ String* Item_func_get_system_var::val_str(String* str)
}
}
- str= &cached_strval;
- switch (var->show_type())
- {
- case SHOW_CHAR:
- case SHOW_CHAR_PTR:
- case SHOW_LEX_STRING:
- {
- mysql_mutex_lock(&LOCK_global_system_variables);
- char *cptr= var->show_type() == SHOW_CHAR ?
- (char*) var->value_ptr(thd, var_type, &component) :
- *(char**) var->value_ptr(thd, var_type, &component);
- if (cptr)
- {
- size_t len= var->show_type() == SHOW_LEX_STRING ?
- ((LEX_STRING*)(var->value_ptr(thd, var_type, &component)))->length :
- strlen(cptr);
- if (str->copy(cptr, len, collation.collation))
- {
- null_value= TRUE;
- str= NULL;
- }
- }
- else
- {
- null_value= TRUE;
- str= NULL;
- }
- mysql_mutex_unlock(&LOCK_global_system_variables);
- break;
- }
-
- case SHOW_SINT:
- case SHOW_SLONG:
- case SHOW_SLONGLONG:
- case SHOW_UINT:
- case SHOW_ULONG:
- case SHOW_ULONGLONG:
- case SHOW_HA_ROWS:
- case SHOW_BOOL:
- case SHOW_MY_BOOL:
- str->set (val_int(), collation.collation);
- break;
- case SHOW_DOUBLE:
- str->set_real (val_real(), decimals, collation.collation);
- break;
-
- default:
- my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name.str);
- str= NULL;
- break;
- }
-
+ str= var->val_str(&cached_strval, thd, var_type, &component);
cache_present|= GET_SYS_VAR_CACHE_STRING;
used_query_id= thd->query_id;
- cached_null_value= null_value;
+ cached_null_value= null_value= !str;
return str;
}
@@ -5874,58 +5860,11 @@ double Item_func_get_system_var::val_real()
}
}
- switch (var->show_type())
- {
- case SHOW_DOUBLE:
- mysql_mutex_lock(&LOCK_global_system_variables);
- cached_dval= *(double*) var->value_ptr(thd, var_type, &component);
- mysql_mutex_unlock(&LOCK_global_system_variables);
- used_query_id= thd->query_id;
- cached_null_value= null_value;
- if (null_value)
- cached_dval= 0;
- cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
- return cached_dval;
- case SHOW_CHAR:
- case SHOW_LEX_STRING:
- case SHOW_CHAR_PTR:
- {
- mysql_mutex_lock(&LOCK_global_system_variables);
- char *cptr= var->show_type() == SHOW_CHAR ?
- (char*) var->value_ptr(thd, var_type, &component) :
- *(char**) var->value_ptr(thd, var_type, &component);
- if (cptr)
- cached_dval= double_from_string_with_check (system_charset_info,
- cptr, cptr + strlen (cptr));
- else
- {
- null_value= TRUE;
- cached_dval= 0;
- }
- mysql_mutex_unlock(&LOCK_global_system_variables);
- used_query_id= thd->query_id;
- cached_null_value= null_value;
- cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
- return cached_dval;
- }
- case SHOW_SINT:
- case SHOW_SLONG:
- case SHOW_SLONGLONG:
- case SHOW_UINT:
- case SHOW_ULONG:
- case SHOW_ULONGLONG:
- case SHOW_HA_ROWS:
- case SHOW_BOOL:
- case SHOW_MY_BOOL:
- cached_dval= (double) val_int();
- cache_present|= GET_SYS_VAR_CACHE_DOUBLE;
- used_query_id= thd->query_id;
- cached_null_value= null_value;
- return cached_dval;
- default:
- my_error(ER_VAR_CANT_BE_READ, MYF(0), var->name.str);
- return 0;
- }
+ cached_dval= var->val_real(&null_value, thd, var_type, &component);
+ cache_present |= GET_SYS_VAR_CACHE_DOUBLE;
+ used_query_id= thd->query_id;
+ cached_null_value= null_value;
+ return cached_dval;
}
@@ -6011,15 +5950,12 @@ void Item_func_match::init_search(bool no_order)
{
DBUG_ENTER("Item_func_match::init_search");
+ if (!table->file->get_table()) // the handler isn't opened yet
+ DBUG_VOID_RETURN;
+
/* Check if init_search() has been called before */
if (ft_handler)
{
- /*
- We should reset ft_handler as it is cleaned up
- on destruction of FT_SELECT object
- (necessary in case of re-execution of subquery).
- TODO: FT_SELECT should not clean up ft_handler.
- */
if (join_key)
table->file->ft_handler= ft_handler;
DBUG_VOID_RETURN;
@@ -6028,10 +5964,10 @@ void Item_func_match::init_search(bool no_order)
if (key == NO_SUCH_KEY)
{
List<Item> fields;
- fields.push_back(new Item_string(" ",1, cmp_collation.collation));
- for (uint i=1; i < arg_count; i++)
+ fields.push_back(new Item_string(" ", 1, cmp_collation.collation));
+ for (uint i= 1; i < arg_count; i++)
fields.push_back(args[i]);
- concat_ws=new Item_func_concat_ws(fields);
+ concat_ws= new Item_func_concat_ws(fields);
/*
Above function used only to get value and do not need fix_fields for it:
Item_string - basic constant
@@ -6043,10 +5979,10 @@ void Item_func_match::init_search(bool no_order)
if (master)
{
- join_key=master->join_key=join_key|master->join_key;
+ join_key= master->join_key= join_key | master->join_key;
master->init_search(no_order);
- ft_handler=master->ft_handler;
- join_key=master->join_key;
+ ft_handler= master->ft_handler;
+ join_key= master->join_key;
DBUG_VOID_RETURN;
}
@@ -6056,7 +5992,7 @@ void Item_func_match::init_search(bool no_order)
if (!(ft_tmp=key_item()->val_str(&value)))
{
ft_tmp= &value;
- value.set("",0,cmp_collation.collation);
+ value.set("", 0, cmp_collation.collation);
}
if (ft_tmp->charset() != cmp_collation.collation)
@@ -6069,7 +6005,11 @@ void Item_func_match::init_search(bool no_order)
if (join_key && !no_order)
flags|=FT_SORTED;
- ft_handler=table->file->ft_init_ext(flags, key, ft_tmp);
+
+ if (key != NO_SUCH_KEY)
+ THD_STAGE_INFO(table->in_use, stage_fulltext_initialization);
+
+ ft_handler= table->file->ft_init_ext(flags, key, ft_tmp);
if (join_key)
table->file->ft_handler=ft_handler;
@@ -6129,7 +6069,7 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
table=((Item_field *)item)->field->table;
if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT))
{
- my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0));
+ my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0), table->file->table_type());
return 1;
}
table->fulltext_searched=1;
@@ -6143,6 +6083,13 @@ bool Item_func_match::fix_index()
uint ft_to_key[MAX_KEY], ft_cnt[MAX_KEY], fts=0, keynr;
uint max_cnt=0, mkeys=0, i;
+ /*
+ We will skip execution if the item is not fixed
+ with fix_field
+ */
+ if (!fixed)
+ return false;
+
if (key == NO_SUCH_KEY)
return 0;
@@ -6350,61 +6297,6 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
}
-/**
- Check a user level lock.
-
- Sets null_value=TRUE on error.
-
- @retval
- 1 Available
- @retval
- 0 Already taken, or error
-*/
-
-longlong Item_func_is_free_lock::val_int()
-{
- DBUG_ASSERT(fixed == 1);
- String *res=args[0]->val_str(&value);
- User_level_lock *ull;
-
- null_value=0;
- if (!res || !res->length())
- {
- null_value=1;
- return 0;
- }
-
- mysql_mutex_lock(&LOCK_user_locks);
- ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
- (size_t) res->length());
- mysql_mutex_unlock(&LOCK_user_locks);
- if (!ull || !ull->locked)
- return 1;
- return 0;
-}
-
-longlong Item_func_is_used_lock::val_int()
-{
- DBUG_ASSERT(fixed == 1);
- String *res=args[0]->val_str(&value);
- User_level_lock *ull;
-
- null_value=1;
- if (!res || !res->length())
- return 0;
-
- mysql_mutex_lock(&LOCK_user_locks);
- ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
- (size_t) res->length());
- mysql_mutex_unlock(&LOCK_user_locks);
- if (!ull || !ull->locked)
- return 0;
-
- null_value=0;
- return ull->thread_id;
-}
-
-
longlong Item_func_row_count::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -6855,7 +6747,7 @@ ulonglong uuid_value;
void uuid_short_init()
{
- uuid_value= ((((ulonglong) server_id) << 56) +
+ uuid_value= ((((ulonglong) global_system_variables.server_id) << 56) +
(((ulonglong) server_start_time) << 24));
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 28cda8c03d0..71225c71639 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Function items used by mysql */
@@ -431,6 +431,13 @@ public:
void fix_num_length_and_dec();
virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */
+ inline void fix_decimals()
+ {
+ DBUG_ASSERT(result_type() == DECIMAL_RESULT);
+ if (decimals == NOT_FIXED_DEC)
+ set_if_smaller(decimals, max_length - 1);
+ }
+
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *);
@@ -545,14 +552,17 @@ public:
class Item_func_signed :public Item_int_func
{
public:
- Item_func_signed(Item *a) :Item_int_func(a) {}
+ Item_func_signed(Item *a) :Item_int_func(a)
+ {
+ unsigned_flag= 0;
+ }
const char *func_name() const { return "cast_as_signed"; }
longlong val_int();
longlong val_int_from_str(int *error);
void fix_length_and_dec()
{
- fix_char_length(MY_MIN(args[0]->max_char_length(), MY_INT64_NUM_DECIMAL_DIGITS));
- unsigned_flag=0;
+ fix_char_length(MY_MIN(args[0]->max_char_length(),
+ MY_INT64_NUM_DECIMAL_DIGITS));
}
virtual void print(String *str, enum_query_type query_type);
uint decimal_precision() const { return args[0]->decimal_precision(); }
@@ -562,14 +572,11 @@ public:
class Item_func_unsigned :public Item_func_signed
{
public:
- Item_func_unsigned(Item *a) :Item_func_signed(a) {}
- const char *func_name() const { return "cast_as_unsigned"; }
- void fix_length_and_dec()
+ Item_func_unsigned(Item *a) :Item_func_signed(a)
{
- fix_char_length(MY_MIN(args[0]->max_char_length(),
- DECIMAL_MAX_PRECISION + 2));
- unsigned_flag=1;
+ unsigned_flag= 1;
}
+ const char *func_name() const { return "cast_as_unsigned"; }
longlong val_int();
virtual void print(String *str, enum_query_type query_type);
};
@@ -1253,6 +1260,9 @@ public:
};
+void item_func_sleep_init(void);
+void item_func_sleep_free(void);
+
class Item_func_sleep :public Item_int_func
{
public:
@@ -1502,14 +1512,8 @@ public:
#endif /* HAVE_DLOPEN */
-/*
-** User level locks
-*/
-
-class User_level_lock;
-void item_user_lock_init(void);
-void item_user_lock_release(User_level_lock *ull);
-void item_user_lock_free(void);
+void mysql_ull_cleanup(THD *thd);
+void mysql_ull_set_explicit_lock_duration(THD *thd);
class Item_func_get_lock :public Item_int_func
{
@@ -1803,7 +1807,6 @@ public:
bool is_expensive_processor(uchar *arg) { return TRUE; }
enum Functype functype() const { return FT_FUNC; }
const char *func_name() const { return "match"; }
- void update_used_tables() {}
table_map not_null_tables() const { return 0; }
bool fix_fields(THD *thd, Item **ref);
bool eq(const Item *, bool binary_cmp) const;
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index a988426d72c..b36375a6e40 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -561,8 +561,8 @@ longlong Item_func_spatial_mbr_rel::val_int()
args[1]->null_value ||
!(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) ||
!(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) ||
- g1->get_mbr(&mbr1, &dummy) ||
- g2->get_mbr(&mbr2, &dummy))))
+ g1->get_mbr(&mbr1, &dummy) || !mbr1.valid() ||
+ g2->get_mbr(&mbr2, &dummy) || !mbr2.valid())))
return 0;
switch (spatial_rel) {
@@ -687,12 +687,11 @@ longlong Item_func_spatial_rel::val_int()
if ((null_value=
(args[0]->null_value || args[1]->null_value ||
!(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) ||
- !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())))))
+ !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) ||
+ g1->get_mbr(&mbr1, &c_end) || !mbr1.valid() ||
+ g2->get_mbr(&mbr2, &c_end) || !mbr2.valid())))
goto exit;
- g1->get_mbr(&mbr1, &c_end);
- g2->get_mbr(&mbr2, &c_end);
-
umbr= mbr1;
umbr.add_mbr(&mbr2);
collector.set_extent(umbr.xmin, umbr.xmax, umbr.ymin, umbr.ymax);
@@ -826,14 +825,14 @@ String *Item_func_spatial_operation::val_str(String *str_value)
if ((null_value=
(args[0]->null_value || args[1]->null_value ||
!(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) ||
- !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())))))
+ !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) ||
+ g1->get_mbr(&mbr1, &c_end) || !mbr1.valid() ||
+ g2->get_mbr(&mbr2, &c_end) || !mbr2.valid())))
{
str_value= 0;
goto exit;
}
- g1->get_mbr(&mbr1, &c_end);
- g2->get_mbr(&mbr2, &c_end);
mbr1.add_mbr(&mbr2);
collector.set_extent(mbr1.xmin, mbr1.xmax, mbr1.ymin, mbr1.ymax);
@@ -1358,11 +1357,11 @@ longlong Item_func_issimple::val_int()
DBUG_ENTER("Item_func_issimple::val_int");
DBUG_ASSERT(fixed == 1);
- if ((null_value= args[0]->null_value) ||
- !(g= Geometry::construct(&buffer, swkb->ptr(), swkb->length())))
+ if ((null_value= (args[0]->null_value ||
+ !(g= Geometry::construct(&buffer, swkb->ptr(), swkb->length())) ||
+ g->get_mbr(&mbr, &c_end))))
DBUG_RETURN(0);
- g->get_mbr(&mbr, &c_end);
collector.set_extent(mbr.xmin, mbr.xmax, mbr.ymin, mbr.ymax);
if (g->get_class_info()->m_type_id == Geometry::wkb_point)
@@ -1598,11 +1597,11 @@ double Item_func_distance::val_real()
if ((null_value= (args[0]->null_value || args[1]->null_value ||
!(g1= Geometry::construct(&buffer1, res1->ptr(), res1->length())) ||
- !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())))))
+ !(g2= Geometry::construct(&buffer2, res2->ptr(), res2->length())) ||
+ g1->get_mbr(&mbr1, &c_end) ||
+ g2->get_mbr(&mbr2, &c_end))))
goto mem_error;
- g1->get_mbr(&mbr1, &c_end);
- g2->get_mbr(&mbr2, &c_end);
mbr1.add_mbr(&mbr2);
collector.set_extent(mbr1.xmin, mbr1.xmax, mbr1.ymin, mbr1.ymax);
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index ee61f921adb..4d5911324ac 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -14,8 +14,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software Foundation,
- 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* This file defines all spatial functions */
diff --git a/sql/item_row.cc b/sql/item_row.cc
index ee7bd837553..03b460e3ada 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -47,13 +47,13 @@ Item_row::Item_row(List<Item> &arg):
items= (Item**) sql_alloc(sizeof(Item*)*arg_count);
else
items= 0;
- List_iterator<Item> li(arg);
+ List_iterator_fast<Item> li(arg);
uint i= 0;
Item *item;
while ((item= li++))
{
items[i]= item;
- i++;
+ i++;
}
}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 6ea58dfd2c9..854a99bea02 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -59,6 +59,10 @@ C_MODE_START
#include "../mysys/my_static.h" // For soundex_map
C_MODE_END
#include "sql_show.h" // append_identifier
+#include <sql_repl.h>
+#include "sql_statistics.h"
+
+size_t username_char_length= 16;
/**
@todo Remove this. It is not safe to use a shared String object.
@@ -447,6 +451,82 @@ void Item_func_aes_decrypt::fix_length_and_dec()
set_persist_maybe_null(1);
}
+///////////////////////////////////////////////////////////////////////////////
+
+
+const char *histogram_types[] =
+ {"SINGLE_PREC_HB", "DOUBLE_PREC_HB", 0};
+static TYPELIB hystorgam_types_typelib=
+ { array_elements(histogram_types),
+ "histogram_types",
+ histogram_types, NULL};
+const char *representation_by_type[]= {"%.3f", "%.5f"};
+
+String *Item_func_decode_histogram::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+ char buff[STRING_BUFFER_USUAL_SIZE];
+ String *res, tmp(buff, sizeof(buff), &my_charset_bin);
+ int type;
+
+ tmp.length(0);
+ if (!(res= args[1]->val_str(&tmp)) ||
+ (type= find_type(res->c_ptr_safe(),
+ &hystorgam_types_typelib, MYF(0))) <= 0)
+ {
+ null_value= 1;
+ return 0;
+ }
+ type--;
+
+ tmp.length(0);
+ if (!(res= args[0]->val_str(&tmp)))
+ {
+ null_value= 1;
+ return 0;
+ }
+ if (type == DOUBLE_PREC_HB && res->length() % 2 != 0)
+ res->length(res->length() - 1); // one byte is unused
+
+ double prev= 0.0;
+ uint i;
+ str->length(0);
+ char numbuf[32];
+ const uchar *p= (uchar*)res->c_ptr();
+ for (i= 0; i < res->length(); i++)
+ {
+ double val;
+ switch (type)
+ {
+ case SINGLE_PREC_HB:
+ val= p[i] / ((double)((1 << 8) - 1));
+ break;
+ case DOUBLE_PREC_HB:
+ val= ((uint16 *)(p + i))[0] / ((double)((1 << 16) - 1));
+ i++;
+ break;
+ default:
+ val= 0;
+ DBUG_ASSERT(0);
+ }
+ /* show delta with previous value */
+ int size= my_snprintf(numbuf, sizeof(numbuf),
+ representation_by_type[type], val - prev);
+ str->append(numbuf, size);
+ str->append(",");
+ prev= val;
+ }
+ /* show delta with max */
+ int size= my_snprintf(numbuf, sizeof(numbuf),
+ representation_by_type[type], 1.0 - prev);
+ str->append(numbuf, size);
+
+ null_value=0;
+ return str;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
/**
Concatenate args with the following premises:
@@ -2542,38 +2622,16 @@ String *Item_func_elt::val_str(String *str)
}
-void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array,
- List<Item> &fields)
-{
- item->split_sum_func2(thd, ref_pointer_array, fields, &item, TRUE);
- Item_str_func::split_sum_func(thd, ref_pointer_array, fields);
-}
-
-
void Item_func_make_set::fix_length_and_dec()
{
- uint32 char_length= arg_count - 1; /* Separators */
+ uint32 char_length= arg_count - 2; /* Separators */
- if (agg_arg_charsets_for_string_result(collation, args, arg_count))
+ if (agg_arg_charsets_for_string_result(collation, args + 1, arg_count - 1))
return;
- for (uint i=0 ; i < arg_count ; i++)
+ for (uint i=1 ; i < arg_count ; i++)
char_length+= args[i]->max_char_length();
fix_char_length(char_length);
- used_tables_cache|= item->used_tables();
- not_null_tables_cache&= item->not_null_tables();
- const_item_cache&= item->const_item();
- with_sum_func= with_sum_func || item->with_sum_func;
- with_field= with_field || item->with_field;
-}
-
-
-void Item_func_make_set::update_used_tables()
-{
- Item_func::update_used_tables();
- item->update_used_tables();
- used_tables_cache|=item->used_tables();
- const_item_cache&=item->const_item();
}
@@ -2582,15 +2640,15 @@ String *Item_func_make_set::val_str(String *str)
DBUG_ASSERT(fixed == 1);
ulonglong bits;
bool first_found=0;
- Item **ptr=args;
+ Item **ptr=args+1;
String *result=&my_empty_string;
- bits=item->val_int();
- if ((null_value=item->null_value))
+ bits=args[0]->val_int();
+ if ((null_value=args[0]->null_value))
return NULL;
- if (arg_count < 64)
- bits &= ((ulonglong) 1 << arg_count)-1;
+ if (arg_count < 65)
+ bits &= ((ulonglong) 1 << (arg_count-1))-1;
for (; bits; bits >>= 1, ptr++)
{
@@ -2630,39 +2688,6 @@ String *Item_func_make_set::val_str(String *str)
}
-Item *Item_func_make_set::transform(Item_transformer transformer, uchar *arg)
-{
- DBUG_ASSERT(!current_thd->stmt_arena->is_stmt_prepare());
-
- Item *new_item= item->transform(transformer, arg);
- if (!new_item)
- return 0;
-
- /*
- THD::change_item_tree() should be called only if the tree was
- really transformed, i.e. when a new item has been created.
- Otherwise we'll be allocating a lot of unnecessary memory for
- change records at each execution.
- */
- if (item != new_item)
- current_thd->change_item_tree(&item, new_item);
- return Item_str_func::transform(transformer, arg);
-}
-
-
-void Item_func_make_set::print(String *str, enum_query_type query_type)
-{
- str->append(STRING_WITH_LEN("make_set("));
- item->print(str, query_type);
- if (arg_count)
- {
- str->append(',');
- print_args(str, 0, query_type);
- }
- str->append(')');
-}
-
-
String *Item_func_char::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -2804,6 +2829,46 @@ err:
}
+void Item_func_binlog_gtid_pos::fix_length_and_dec()
+{
+ collation.set(system_charset_info);
+ max_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
+}
+
+
+String *Item_func_binlog_gtid_pos::val_str(String *str)
+{
+ DBUG_ASSERT(fixed == 1);
+#ifndef HAVE_REPLICATION
+ null_value= 0;
+ str->copy("", 0, system_charset_info);
+ return str;
+#else
+ String name_str, *name;
+ longlong pos;
+
+ if (args[0]->null_value || args[1]->null_value)
+ goto err;
+
+ name= args[0]->val_str(&name_str);
+ pos= args[1]->val_int();
+
+ if (pos < 0 || pos > UINT_MAX32)
+ goto err;
+
+ if (gtid_state_from_binlog_pos(name->c_ptr_safe(), (uint32)pos, str))
+ goto err;
+ null_value= 0;
+ return str;
+
+err:
+ null_value= 1;
+ return NULL;
+#endif /* !HAVE_REPLICATION */
+}
+
+
void Item_func_rpad::fix_length_and_dec()
{
// Handle character set for args[0] and args[2].
@@ -3069,7 +3134,7 @@ String *Item_func_conv_charset::val_str(String *str)
return null_value ? 0 : &str_value;
String *arg= args[0]->val_str(str);
uint dummy_errors;
- if (!arg)
+ if (args[0]->null_value)
{
null_value=1;
return 0;
@@ -3496,7 +3561,7 @@ String* Item_func_inet_ntoa::val_str(String* str)
Also return null if n > 255.255.255.255
*/
- if ((null_value= (args[0]->null_value || n > (ulonglong) LL(4294967295))))
+ if ((null_value= (args[0]->null_value || n > 0xffffffff)))
return 0; // Null value
str->set_charset(collation.collation);
@@ -3989,7 +4054,9 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
type= DYN_COL_NULL;
break;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
type= DYN_COL_DATETIME;
break;
case MYSQL_TYPE_DATE:
@@ -3997,6 +4064,7 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
type= DYN_COL_DATE;
break;
case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_TIME2:
type= DYN_COL_TIME;
break;
case MYSQL_TYPE_VARCHAR:
@@ -4115,8 +4183,7 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
break;
case DYN_COL_DATETIME:
case DYN_COL_DATE:
- args[valpos]->get_date(&vals[i].x.time_value,
- TIME_FUZZY_DATE | sql_mode_for_dates());
+ args[valpos]->get_date(&vals[i].x.time_value, sql_mode_for_dates());
break;
case DYN_COL_TIME:
args[valpos]->get_time(&vals[i].x.time_value);
@@ -4738,7 +4805,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
if (str_to_datetime_with_warn(&my_charset_numeric,
val.x.string.value.str,
val.x.string.value.length,
- ltime, fuzzy_date) <= MYSQL_TIMESTAMP_ERROR)
+ ltime, fuzzy_date))
goto null;
return 0;
case DYN_COL_DATETIME:
@@ -4755,11 +4822,16 @@ null:
void Item_dyncol_get::print(String *str, enum_query_type query_type)
{
+ /* see create_func_dyncol_get */
+ DBUG_ASSERT(str->length() >= 5);
+ DBUG_ASSERT(strncmp(str->ptr() + str->length() - 5, "cast(", 5) == 0);
+
+ str->length(str->length() - 5); // removing "cast("
str->append(STRING_WITH_LEN("column_get("));
args[0]->print(str, query_type);
str->append(',');
args[1]->print(str, query_type);
- str->append(')');
+ /* let the parent cast item add " as <type>)" */
}
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index ac5b9976745..9b380108542 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* This file defines all string functions */
@@ -27,6 +27,8 @@
#pragma interface /* gcc class implementation */
#endif
+extern size_t username_char_length;
+
class MY_LOCALE;
class Item_str_func :public Item_func
@@ -145,6 +147,22 @@ public:
const char *func_name() const { return "concat"; }
};
+class Item_func_decode_histogram :public Item_str_func
+{
+ String tmp_value;
+public:
+ Item_func_decode_histogram(Item *a, Item *b)
+ :Item_str_func(a, b) {}
+ String *val_str(String *);
+ void fix_length_and_dec()
+ {
+ collation.set(system_charset_info);
+ max_length= MAX_BLOB_WIDTH;
+ set_persist_maybe_null(1);
+ }
+ const char *func_name() const { return "decode_histogram"; }
+};
+
class Item_func_concat_ws :public Item_str_func
{
String tmp_value;
@@ -501,8 +519,8 @@ public:
bool fix_fields(THD *thd, Item **ref);
void fix_length_and_dec()
{
- max_length= (USERNAME_LENGTH +
- (HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN);
+ max_length= (username_char_length +
+ HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN;
}
const char *func_name() const { return "user"; }
const char *fully_qualified_func_name() const { return "user()"; }
@@ -551,31 +569,13 @@ public:
class Item_func_make_set :public Item_str_func
{
- Item *item;
String tmp_str;
public:
- Item_func_make_set(Item *a,List<Item> &list) :Item_str_func(list),item(a) {}
+ Item_func_make_set(List<Item> &list) :Item_str_func(list) {}
String *val_str(String *str);
- bool fix_fields(THD *thd, Item **ref)
- {
- DBUG_ASSERT(fixed == 0);
- return ((!item->fixed && item->fix_fields(thd, &item)) ||
- item->check_cols(1) ||
- Item_func::fix_fields(thd, ref));
- }
- void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields);
void fix_length_and_dec();
- void update_used_tables();
const char *func_name() const { return "make_set"; }
-
- bool walk(Item_processor processor, bool walk_subquery, uchar *arg)
- {
- return item->walk(processor, walk_subquery, arg) ||
- Item_str_func::walk(processor, walk_subquery, arg);
- }
- Item *transform(Item_transformer transformer, uchar *arg);
- virtual void print(String *str, enum_query_type query_type);
};
@@ -623,6 +623,17 @@ public:
};
+class Item_func_binlog_gtid_pos :public Item_str_func
+{
+ String tmp_value;
+public:
+ Item_func_binlog_gtid_pos(Item *arg1,Item *arg2) :Item_str_func(arg1,arg2) {}
+ String *val_str(String *);
+ void fix_length_and_dec();
+ const char *func_name() const { return "binlog_gtid_pos"; }
+};
+
+
class Item_func_rpad :public Item_str_func
{
String tmp_value, rpad_str;
@@ -866,25 +877,37 @@ public:
{
if (args[0]->result_type() == STRING_RESULT)
return Item_str_func::val_int();
- return args[0]->val_int();
+ longlong res= args[0]->val_int();
+ if ((null_value= args[0]->null_value))
+ return 0;
+ return res;
}
double val_real()
{
if (args[0]->result_type() == STRING_RESULT)
return Item_str_func::val_real();
- return args[0]->val_real();
+ double res= args[0]->val_real();
+ if ((null_value= args[0]->null_value))
+ return 0;
+ return res;
}
my_decimal *val_decimal(my_decimal *d)
{
if (args[0]->result_type() == STRING_RESULT)
return Item_str_func::val_decimal(d);
- return args[0]->val_decimal(d);
+ my_decimal *res= args[0]->val_decimal(d);
+ if ((null_value= args[0]->null_value))
+ return NULL;
+ return res;
}
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
if (args[0]->result_type() == STRING_RESULT)
return Item_str_func::get_date(ltime, fuzzydate);
- return args[0]->get_date(ltime, fuzzydate);
+ bool res= args[0]->get_date(ltime, fuzzydate);
+ if ((null_value= args[0]->null_value))
+ return 1;
+ return res;
}
void fix_length_and_dec();
const char *func_name() const { return "convert"; }
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index fef5d09f8fc..87fa8147411 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -43,6 +43,9 @@
double get_post_group_estimate(JOIN* join, double join_op_rows);
+const char *exists_outer_expr_name= "<exists outer expr>";
+
+int check_and_do_in_subquery_rewrites(JOIN *join);
Item_subselect::Item_subselect():
Item_result_field(), value_assigned(0), own_engine(0), thd(0), old_engine(0),
@@ -83,15 +86,24 @@ void Item_subselect::init(st_select_lex *select_lex,
if (unit->item)
{
- /*
- Item can be changed in JOIN::prepare while engine in JOIN::optimize
- => we do not copy old_engine here
- */
engine= unit->item->engine;
- own_engine= FALSE;
parsing_place= unit->item->parsing_place;
- thd->change_item_tree((Item**)&unit->item, this);
- engine->change_result(this, result, TRUE);
+ if (unit->item->substype() == EXISTS_SUBS &&
+ ((Item_exists_subselect *)unit->item)->exists_transformed)
+ {
+ /* it is permanent transformation of EXISTS to IN */
+ unit->item= this;
+ engine->change_result(this, result, FALSE);
+ }
+ else
+ {
+ /*
+ Item can be changed in JOIN::prepare while engine in JOIN::optimize
+ => we do not copy old_engine here
+ */
+ thd->change_item_tree((Item**)&unit->item, this);
+ engine->change_result(this, result, TRUE);
+ }
}
else
{
@@ -462,7 +474,7 @@ public:
void Item_subselect::recalc_used_tables(st_select_lex *new_parent,
bool after_pullout)
{
- List_iterator<Ref_to_outside> it(upper_refs);
+ List_iterator_fast<Ref_to_outside> it(upper_refs);
Ref_to_outside *upper;
used_tables_cache= 0;
@@ -547,8 +559,19 @@ bool Item_subselect::is_expensive()
if (!cur_join)
continue;
- /* If a subquery is not optimized we cannot estimate its cost. */
- if (!cur_join->join_tab)
+ /*
+ Subqueries whose result is known after optimization are not expensive.
+ Such subqueries have all tables optimized away, thus have no join plan.
+ */
+ if (cur_join->optimized &&
+ (cur_join->zero_result_cause || !cur_join->tables_list))
+ return false;
+
+ /*
+ If a subquery is not optimized we cannot estimate its cost. A subquery is
+ considered optimized if it has a join plan.
+ */
+ if (!(cur_join->optimized && cur_join->join_tab))
return true;
if (sl->first_inner_unit())
@@ -661,9 +684,12 @@ bool Item_subselect::exec()
void Item_subselect::get_cache_parameters(List<Item> &parameters)
{
- Collect_deps_prm prm= {&parameters,
- unit->first_select()->nest_level_base,
- unit->first_select()->nest_level};
+ Collect_deps_prm prm= {&parameters, // parameters
+ unit->first_select()->nest_level_base, // nest_level_base
+ 0, // count
+ unit->first_select()->nest_level, // nest_level
+ TRUE // collect
+ };
walk(&Item::collect_outer_ref_processor, TRUE, (uchar*)&prm);
}
@@ -1087,6 +1113,11 @@ enum Item_result Item_singlerow_subselect::result_type() const
return engine->type();
}
+enum Item_result Item_singlerow_subselect::cmp_type() const
+{
+ return engine->cmptype();
+}
+
/*
Don't rely on the result type to calculate field type.
Ask the engine instead.
@@ -1298,10 +1329,12 @@ bool Item_singlerow_subselect::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
Item_exists_subselect::Item_exists_subselect(st_select_lex *select_lex):
- Item_subselect()
+ Item_subselect(), upper_not(NULL), abort_on_null(0),
+ emb_on_expr_nest(NULL), optimizer(0), exists_transformed(0)
{
DBUG_ENTER("Item_exists_subselect::Item_exists_subselect");
bool val_bool();
+
init(select_lex, new select_exists_subselect(this));
max_columns= UINT_MAX;
null_value= FALSE; //can't be NULL
@@ -1335,21 +1368,19 @@ bool Item_in_subselect::test_limit(st_select_lex_unit *unit_arg)
Item_in_subselect::Item_in_subselect(Item * left_exp,
st_select_lex *select_lex):
- Item_exists_subselect(),
- left_expr_cache(0), first_execution(TRUE), in_strategy(SUBS_NOT_TRANSFORMED),
- optimizer(0), pushed_cond_guards(NULL), emb_on_expr_nest(NULL),
- is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE),
- is_flattenable_semijoin(FALSE),
- is_registered_semijoin(FALSE),
+ Item_exists_subselect(), left_expr_cache(0), first_execution(TRUE),
+ in_strategy(SUBS_NOT_TRANSFORMED),
+ pushed_cond_guards(NULL), is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE),
+ is_flattenable_semijoin(FALSE), is_registered_semijoin(FALSE),
upper_item(0)
{
DBUG_ENTER("Item_in_subselect::Item_in_subselect");
+ DBUG_PRINT("info", ("in_strategy: %u", (uint)in_strategy));
left_expr= left_exp;
func= &eq_creator;
init(select_lex, new select_exists_subselect(this));
max_columns= UINT_MAX;
maybe_null= 1;
- abort_on_null= 0;
reset();
//if test_limit will fail then error will be reported to client
test_limit(select_lex->master_unit());
@@ -1745,8 +1776,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
SELECT_LEX *current= thd->lex->current_select;
thd->lex->current_select= current->return_after_parsing();
- //optimizer never use Item **ref => we can pass 0 as parameter
- if (!optimizer || optimizer->fix_left(thd, 0))
+ if (!optimizer || optimizer->fix_left(thd))
{
thd->lex->current_select= current;
DBUG_RETURN(true);
@@ -1852,7 +1882,8 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
print_where(item, "rewrite with MIN/MAX", QT_ORDINARY););
save_allow_sum_func= thd->lex->allow_sum_func;
- thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
+ thd->lex->allow_sum_func|=
+ (nesting_map)1 << thd->lex->current_select->nest_level;
/*
Item_sum_(max|min) can't substitute other item => we can use 0 as
reference, also Item_sum_(max|min) can't be fixed after creation, so
@@ -2125,8 +2156,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
SELECT_LEX *current= thd->lex->current_select;
thd->lex->current_select= current->return_after_parsing();
- //optimizer never use Item **ref => we can pass 0 as parameter
- if (!optimizer || optimizer->fix_left(thd, 0))
+ if (!optimizer || optimizer->fix_left(thd))
{
thd->lex->current_select= current;
DBUG_RETURN(true);
@@ -2370,6 +2400,12 @@ Item_in_subselect::select_transformer(JOIN *join)
return select_in_like_transformer(join);
}
+bool
+Item_exists_subselect::select_transformer(JOIN *join)
+{
+ return select_prepare_to_be_in();
+}
+
/**
Create the predicates needed to transform an IN/ALL/ANY subselect into a
@@ -2505,6 +2541,437 @@ bool Item_in_subselect::inject_in_to_exists_cond(JOIN *join_arg)
}
+/*
+ If this select can potentially be converted by EXISTS->IN conversion, wrap it
+ in an Item_in_optimizer object. Final decision whether to do the conversion
+ is done at a later phase.
+*/
+
+bool Item_exists_subselect::select_prepare_to_be_in()
+{
+ bool trans_res= FALSE;
+ DBUG_ENTER("Item_exists_subselect::select_prepare_to_be_in");
+ if (!optimizer &&
+ thd->lex->sql_command == SQLCOM_SELECT &&
+ !unit->first_select()->is_part_of_union() &&
+ optimizer_flag(thd, OPTIMIZER_SWITCH_EXISTS_TO_IN) &&
+ (is_top_level_item() ||
+ (upper_not && upper_not->is_top_level_item())))
+ {
+ Query_arena *arena, backup;
+ bool result;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+ result= (!(optimizer= new Item_in_optimizer(new Item_int(1), this)));
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ if (result)
+ trans_res= TRUE;
+ else
+ substitution= optimizer;
+ }
+ DBUG_RETURN(trans_res);
+}
+
+/**
+ Check if 'func' is an equality in form "inner_table.column = outer_expr"
+
+ @param func Expression to check
+ @param local_field OUT Return "inner_table.column" here
+ @param outer_expr OUT Return outer_expr here
+
+ @return true - 'func' is an Equality.
+*/
+
+static bool check_equality_for_exist2in(Item_func *func,
+ Item_ident **local_field,
+ Item **outer_exp)
+{
+ Item **args;
+ if (func->functype() != Item_func::EQ_FUNC)
+ return FALSE;
+ DBUG_ASSERT(func->arg_count == 2);
+ args= func->arguments();
+ if (args[0]->real_type() == Item::FIELD_ITEM &&
+ args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
+ args[1]->all_used_tables() == OUTER_REF_TABLE_BIT)
+ {
+ /* It is Item_field or Item_direct_view_ref) */
+ DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
+ args[0]->type() == Item::REF_ITEM);
+ *local_field= (Item_ident *)args[0];
+ *outer_exp= args[1];
+ return TRUE;
+ }
+ else if (args[1]->real_type() == Item::FIELD_ITEM &&
+ args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
+ args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
+ {
+ /* It is Item_field or Item_direct_view_ref) */
+ DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
+ args[0]->type() == Item::REF_ITEM);
+ *local_field= (Item_ident *)args[1];
+ *outer_exp= args[0];
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+typedef struct st_eq_field_outer
+{
+ Item_func **eq_ref;
+ Item_ident *local_field;
+ Item *outer_exp;
+} EQ_FIELD_OUTER;
+
+
+/**
+ Check if 'conds' is a set of AND-ed outer_expr=inner_table.col equalities
+
+ @detail
+ Check if 'conds' has form
+
+ outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
+
+ @param conds Condition to be checked
+ @parm result Array to collect EQ_FIELD_OUTER elements describing
+ inner-vs-outer equalities the function has found.
+ @return
+ false - some inner-vs-outer equalities were found
+ true - otherwise.
+*/
+
+static bool find_inner_outer_equalities(Item **conds,
+ Dynamic_array<EQ_FIELD_OUTER> &result)
+{
+ bool found= FALSE;
+ EQ_FIELD_OUTER element;
+ if (is_cond_and(*conds))
+ {
+ List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
+ Item *item;
+ while ((item= li++))
+ {
+ if (item->type() == Item::FUNC_ITEM &&
+ check_equality_for_exist2in((Item_func *)item,
+ &element.local_field,
+ &element.outer_exp))
+ {
+ found= TRUE;
+ element.eq_ref= (Item_func **)li.ref();
+ if (result.append(element))
+ goto alloc_err;
+ }
+ }
+ }
+ else if ((*conds)->type() == Item::FUNC_ITEM &&
+ check_equality_for_exist2in((Item_func *)*conds,
+ &element.local_field,
+ &element.outer_exp))
+ {
+ found= TRUE;
+ element.eq_ref= (Item_func **)conds;
+ if (result.append(element))
+ goto alloc_err;
+ }
+
+ return !found;
+alloc_err:
+ return TRUE;
+}
+
+/**
+ Converts EXISTS subquery to IN subquery if it is possible and has sense
+
+ @param opt_arg Pointer on THD
+
+ @return TRUE in case of error and FALSE otherwise.
+*/
+
+bool Item_exists_subselect::exists2in_processor(uchar *opt_arg)
+{
+ THD *thd= (THD *)opt_arg;
+ SELECT_LEX *first_select=unit->first_select(), *save_select;
+ JOIN *join= first_select->join;
+ Item_func *eq= NULL, **eq_ref= NULL;
+ Item_ident *local_field= NULL;
+ Item *outer_exp= NULL;
+ Item *left_exp= NULL; Item_in_subselect *in_subs;
+ Query_arena *arena= NULL, backup;
+ int res= FALSE;
+ List<Item> outer;
+ Dynamic_array<EQ_FIELD_OUTER> eqs(5, 5);
+ bool will_be_correlated;
+ DBUG_ENTER("Item_exists_subselect::exists2in_processor");
+
+ if (!optimizer ||
+ !optimizer_flag(thd, OPTIMIZER_SWITCH_EXISTS_TO_IN) ||
+ (!is_top_level_item() && (!upper_not ||
+ !upper_not->is_top_level_item())) ||
+ first_select->is_part_of_union() ||
+ first_select->group_list.elements ||
+ first_select->order_list.elements ||
+ join->having ||
+ first_select->with_sum_func ||
+ !first_select->leaf_tables.elements||
+ !join->conds)
+ DBUG_RETURN(FALSE);
+
+ DBUG_ASSERT(first_select->order_list.elements == 0 &&
+ first_select->group_list.elements == 0 &&
+ first_select->having == NULL);
+
+ if (find_inner_outer_equalities(&join->conds, eqs))
+ DBUG_RETURN(FALSE);
+
+ DBUG_ASSERT(eqs.elements() != 0);
+
+ save_select= thd->lex->current_select;
+ thd->lex->current_select= first_select;
+
+ /* check that the subquery has only dependencies we are going pull out */
+ {
+ List<Item> unused;
+ Collect_deps_prm prm= {&unused, // parameters
+ unit->first_select()->nest_level_base, // nest_level_base
+ 0, // count
+ unit->first_select()->nest_level, // nest_level
+ FALSE // collect
+ };
+ walk(&Item::collect_outer_ref_processor, TRUE, (uchar*)&prm);
+ DBUG_ASSERT(prm.count > 0);
+ DBUG_ASSERT(prm.count >= (uint)eqs.elements());
+ will_be_correlated= prm.count > (uint)eqs.elements();
+ if (upper_not && will_be_correlated)
+ goto out;
+ }
+
+ if ((uint)eqs.elements() > (first_select->item_list.elements +
+ first_select->select_n_reserved))
+ goto out;
+ /* It is simple query */
+ DBUG_ASSERT(first_select->join->all_fields.elements ==
+ first_select->item_list.elements);
+
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ while (first_select->item_list.elements > (uint)eqs.elements())
+ {
+ first_select->item_list.pop();
+ first_select->join->all_fields.elements--;
+ }
+ {
+ List_iterator<Item> it(first_select->item_list);
+
+ for (uint i= 0; i < (uint)eqs.elements(); i++)
+ {
+ Item *item= it++;
+ eq_ref= eqs.at(i).eq_ref;
+ eq= *eq_ref;
+ local_field= eqs.at(i).local_field;
+ outer_exp= eqs.at(i).outer_exp;
+ /* Add the field to the SELECT_LIST */
+ if (item)
+ it.replace(local_field);
+ else
+ {
+ first_select->item_list.push_back(local_field);
+ first_select->join->all_fields.elements++;
+ }
+ first_select->ref_pointer_array[i]= (Item *)local_field;
+
+ /* remove the parts from condition */
+ if (!upper_not || !local_field->maybe_null)
+ {
+ eq->arguments()[0]= new Item_int(1);
+ eq->arguments()[1]= new Item_int(1);
+ }
+ else
+ {
+ *eq_ref= new Item_func_isnotnull(
+ new Item_field(thd,
+ ((Item_field*)(local_field->real_item()))->context,
+ ((Item_field*)(local_field->real_item()))->field));
+ if((*eq_ref)->fix_fields(thd, (Item **)eq_ref))
+ {
+ res= TRUE;
+ goto out;
+ }
+ }
+ outer_exp->fix_after_pullout(unit->outer_select(), &outer_exp);
+ outer_exp->update_used_tables();
+ outer.push_back(outer_exp);
+ }
+ }
+
+ join->conds->update_used_tables();
+
+ /* make IN SUBQUERY and put outer_exp as left part */
+ if (eqs.elements() == 1)
+ left_exp= outer_exp;
+ else
+ {
+ if (!(left_exp= new Item_row(outer)))
+ {
+ res= TRUE;
+ goto out;
+ }
+ }
+
+ /* make EXISTS->IN permanet (see Item_subselect::init()) */
+ set_exists_transformed();
+
+ first_select->select_limit= NULL;
+ if (!(in_subs= new Item_in_subselect(left_exp, first_select)))
+ {
+ res= TRUE;
+ goto out;
+ }
+ in_subs->set_exists_transformed();
+ optimizer->arguments()[0]= left_exp;
+ optimizer->arguments()[1]= in_subs;
+ in_subs->optimizer= optimizer;
+ DBUG_ASSERT(is_top_level_item() ||
+ (upper_not && upper_not->is_top_level_item()));
+ in_subs->top_level_item();
+ {
+ SELECT_LEX *current= thd->lex->current_select;
+ optimizer->reset_cache(); // renew cache, and we will not keep it
+ thd->lex->current_select= unit->outer_select();
+ DBUG_ASSERT(optimizer);
+ if (optimizer->fix_left(thd))
+ {
+ res= TRUE;
+ /*
+ We should not restore thd->lex->current_select because it will be
+ reset on exit from this procedure
+ */
+ goto out;
+ }
+ /*
+ As far as Item_ref_in_optimizer do not substitute itself on fix_fields
+ we can use same item for all selects.
+ */
+ in_subs->expr= new Item_direct_ref(&first_select->context,
+ (Item**)optimizer->get_cache(),
+ (char *)"<no matter>",
+ (char *)in_left_expr_name);
+ if (in_subs->fix_fields(thd, optimizer->arguments() + 1))
+ {
+ res= TRUE;
+ /*
+ We should not restore thd->lex->current_select because it will be
+ reset on exit from this procedure
+ */
+ goto out;
+ }
+ {
+ /* Move dependence list */
+ List_iterator_fast<Ref_to_outside> it(upper_refs);
+ Ref_to_outside *upper;
+ while ((upper= it++))
+ {
+ uint i;
+ for (i= 0; i < (uint)eqs.elements(); i++)
+ if (eqs.at(i).outer_exp->
+ walk(&Item::find_item_processor, TRUE, (uchar*)upper->item))
+ break;
+ if (i == (uint)eqs.elements() &&
+ (in_subs->upper_refs.push_back(upper, thd->stmt_arena->mem_root)))
+ goto out;
+ }
+ }
+ in_subs->update_used_tables();
+ /*
+ The engine of the subquery is fixed so above fix_fields() is not
+ complete and should be fixed
+ */
+ in_subs->upper_refs= upper_refs;
+ upper_refs.empty();
+ thd->lex->current_select= current;
+ }
+
+ DBUG_ASSERT(unit->item == in_subs);
+ DBUG_ASSERT(join == first_select->join);
+ /*
+ Fix dependency info
+ */
+ in_subs->is_correlated= will_be_correlated;
+ if (!will_be_correlated)
+ {
+ first_select->uncacheable&= ~UNCACHEABLE_DEPENDENT_GENERATED;
+ unit->uncacheable&= ~UNCACHEABLE_DEPENDENT_GENERATED;
+ }
+ /*
+ set possible optimization strategies
+ */
+ in_subs->emb_on_expr_nest= emb_on_expr_nest;
+ res= check_and_do_in_subquery_rewrites(join);
+ first_select->join->prepare_stage2();
+
+ first_select->fix_prepare_information(thd, &join->conds, &join->having);
+
+ if (upper_not)
+ {
+ Item *exp;
+ if (eqs.elements() == 1)
+ {
+ exp= (optimizer->arguments()[0]->maybe_null ?
+ (Item*)
+ new Item_cond_and(
+ new Item_func_isnotnull(
+ new Item_direct_ref(&unit->outer_select()->context,
+ optimizer->arguments(),
+ (char *)"<no matter>",
+ (char *)exists_outer_expr_name)),
+ optimizer) :
+ (Item *)optimizer);
+ }
+ else
+ {
+ List<Item> *and_list= new List<Item>;
+ if (!and_list)
+ {
+ res= TRUE;
+ goto out;
+ }
+ for (size_t i= 0; i < eqs.elements(); i++)
+ {
+ if (optimizer->arguments()[0]->maybe_null)
+ {
+ and_list->
+ push_front(
+ new Item_func_isnotnull(
+ new Item_direct_ref(&unit->outer_select()->context,
+ optimizer->arguments()[0]->addr(i),
+ (char *)"<no matter>",
+ (char *)exists_outer_expr_name)));
+ }
+ }
+ if (and_list->elements > 0)
+ {
+ and_list->push_front(optimizer);
+ exp= new Item_cond_and(*and_list);
+ }
+ else
+ exp= optimizer;
+ }
+ upper_not->arguments()[0]= exp;
+ if (!exp->fixed && exp->fix_fields(thd, upper_not->arguments()))
+ {
+ res= TRUE;
+ goto out;
+ }
+ }
+
+out:
+ thd->lex->current_select= save_select;
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ DBUG_RETURN(res);
+}
+
+
/**
Prepare IN/ALL/ANY/SOME subquery transformation and call the appropriate
transformation function.
@@ -2621,14 +3088,23 @@ void Item_in_subselect::print(String *str, enum_query_type query_type)
Item_subselect::print(str, query_type);
}
+bool Item_exists_subselect::fix_fields(THD *thd_arg, Item **ref)
+{
+ DBUG_ENTER("Item_exists_subselect::fix_fields");
+ if (exists_transformed)
+ DBUG_RETURN( !( (*ref)= new Item_int(1)));
+ DBUG_RETURN(Item_subselect::fix_fields(thd_arg, ref));
+}
+
bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
{
uint outer_cols_num;
List<Item> *inner_cols;
+ DBUG_ENTER("Item_in_subselect::fix_fields");
if (test_strategy(SUBS_SEMI_JOIN))
- return !( (*ref)= new Item_int(1));
+ DBUG_RETURN( !( (*ref)= new Item_int(1)) );
/*
Check if the outer and inner IN operands match in those cases when we
@@ -2660,7 +3136,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
if (outer_cols_num != inner_cols->elements)
{
my_error(ER_OPERAND_COLUMNS, MYF(0), outer_cols_num);
- return TRUE;
+ DBUG_RETURN(TRUE);
}
if (outer_cols_num > 1)
{
@@ -2670,7 +3146,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
{
inner_col= inner_col_it++;
if (inner_col->check_cols(left_expr->element_index(i)->cols()))
- return TRUE;
+ DBUG_RETURN(TRUE);
}
}
}
@@ -2678,12 +3154,12 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
if (thd_arg->lex->is_view_context_analysis() &&
left_expr && !left_expr->fixed &&
left_expr->fix_fields(thd_arg, &left_expr))
- return TRUE;
+ DBUG_RETURN(TRUE);
else
if (Item_subselect::fix_fields(thd_arg, ref))
- return TRUE;
+ DBUG_RETURN(TRUE);
fixed= TRUE;
- return FALSE;
+ DBUG_RETURN(FALSE);
}
@@ -3044,12 +3520,13 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
{
Item *sel_item;
List_iterator_fast<Item> li(item_list);
- res_type= STRING_RESULT;
+ cmp_type= res_type= STRING_RESULT;
res_field_type= MYSQL_TYPE_VAR_STRING;
for (uint i= 0; (sel_item= li++); i++)
{
item->max_length= sel_item->max_length;
res_type= sel_item->result_type();
+ cmp_type= sel_item->cmp_type();
res_field_type= sel_item->field_type();
item->decimals= sel_item->decimals;
item->unsigned_flag= sel_item->unsigned_flag;
@@ -3060,7 +3537,7 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
//psergey-backport-timours: row[i]->store(sel_item);
}
if (item_list.elements > 1)
- res_type= ROW_RESULT;
+ cmp_type= res_type= ROW_RESULT;
}
void subselect_single_select_engine::fix_length_and_dec(Item_cache **row)
@@ -3811,6 +4288,7 @@ subselect_single_select_engine::change_result(Item_subselect *si,
select_result_interceptor *res,
bool temp)
{
+ DBUG_ENTER("subselect_single_select_engine::change_result");
item= si;
if (temp)
{
@@ -3831,7 +4309,7 @@ subselect_single_select_engine::change_result(Item_subselect *si,
that would not require a lot of extra code that would be harder to manage
than the current code.
*/
- return select_lex->join->change_result(res);
+ DBUG_RETURN(select_lex->join->change_result(res));
}
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 1da129380e7..e806f45041a 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -244,6 +244,7 @@ public:
virtual bool expr_cache_is_needed(THD *);
virtual void get_cache_parameters(List<Item> &parameters);
virtual bool is_subquery_processor (uchar *opt_arg) { return 1; }
+ bool exists2in_processor(uchar *opt_arg) { return 0; }
bool limit_index_condition_pushdown_processor(uchar *opt_arg)
{
return TRUE;
@@ -286,6 +287,7 @@ public:
bool val_bool();
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
enum Item_result result_type() const;
+ enum Item_result cmp_type() const;
enum_field_types field_type() const;
void fix_length_and_dec();
@@ -338,13 +340,35 @@ public:
class Item_exists_subselect :public Item_subselect
{
protected:
+ Item_func_not *upper_not;
bool value; /* value of this item (boolean: exists/not-exists) */
+ bool abort_on_null;
void init_length_and_dec();
+ bool select_prepare_to_be_in();
public:
+ /*
+ Used by subquery optimizations to keep track about in which clause this
+ subquery predicate is located:
+ NO_JOIN_NEST - the predicate is an AND-part of the WHERE
+ join nest pointer - the predicate is an AND-part of ON expression
+ of a join nest
+ NULL - for all other locations
+ */
+ TABLE_LIST *emb_on_expr_nest;
+ /**
+ Reference on the Item_in_optimizer wrapper of this subquery
+ */
+ Item_in_optimizer *optimizer;
+ /* true if we got this from EXISTS or to IN */
+ bool exists_transformed;
+
Item_exists_subselect(st_select_lex *select_lex);
- Item_exists_subselect(): Item_subselect() {}
+ Item_exists_subselect()
+ :Item_subselect(), upper_not(NULL),abort_on_null(0),
+ emb_on_expr_nest(NULL), optimizer(0), exists_transformed(0)
+ {}
subs_type substype() { return EXISTS_SUBS; }
void reset()
@@ -360,11 +384,24 @@ public:
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
+ bool fix_fields(THD *thd, Item **ref);
void fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
+ bool select_transformer(JOIN *join);
+ void top_level_item() { abort_on_null=1; }
+ inline bool is_top_level_item() { return abort_on_null; }
+ bool exists2in_processor(uchar *opt_arg);
Item* expr_cache_insert_transformer(uchar *thd_arg);
+ void mark_as_condition_AND_part(TABLE_LIST *embedding)
+ {
+ emb_on_expr_nest= embedding;
+ }
+ virtual void under_not(Item_func_not *upper) { upper_not= upper; };
+
+ void set_exists_transformed() { exists_transformed= TRUE; }
+
friend class select_exists_subselect;
friend class subselect_uniquesubquery_engine;
friend class subselect_indexsubquery_engine;
@@ -424,11 +461,8 @@ protected:
*/
Item *expr;
bool was_null;
- bool abort_on_null;
/* A bitmap of possible execution strategies for an IN predicate. */
uchar in_strategy;
-public:
- Item_in_optimizer *optimizer;
protected:
/* Used to trigger on/off conditions that were pushed down to subselect */
bool *pushed_cond_guards;
@@ -451,15 +485,6 @@ public:
/* Priority of this predicate in the convert-to-semi-join-nest process. */
int sj_convert_priority;
/*
- Used by subquery optimizations to keep track about in which clause this
- subquery predicate is located:
- NO_JOIN_NEST - the predicate is an AND-part of the WHERE
- join nest pointer - the predicate is an AND-part of ON expression
- of a join nest
- NULL - for all other locations
- */
- TABLE_LIST *emb_on_expr_nest;
- /*
Types of left_expr and subquery's select list allow to perform subquery
materialization. Currently, we set this to FALSE when it as well could
be TRUE. This is to be properly addressed with fix for BUG#36752.
@@ -527,7 +552,9 @@ public:
*/
Item *original_item()
{
- return is_flattenable_semijoin ? (Item*)this : (Item*)optimizer;
+ return (is_flattenable_semijoin && !exists_transformed ?
+ (Item*)this :
+ (Item*)optimizer);
}
bool *get_cond_guard(int i)
@@ -546,11 +573,9 @@ public:
Item_in_subselect(Item * left_expr, st_select_lex *select_lex);
Item_in_subselect()
:Item_exists_subselect(), left_expr_cache(0), first_execution(TRUE),
- abort_on_null(0), in_strategy(SUBS_NOT_TRANSFORMED), optimizer(0),
- pushed_cond_guards(NULL), func(NULL), emb_on_expr_nest(NULL),
- is_jtbm_merged(FALSE), is_jtbm_const_tab(FALSE),
- upper_item(0)
- {}
+ in_strategy(SUBS_NOT_TRANSFORMED),
+ pushed_cond_guards(NULL), func(NULL), is_jtbm_merged(FALSE),
+ is_jtbm_const_tab(FALSE), upper_item(0) {}
void cleanup();
subs_type substype() { return IN_SUBS; }
void reset()
@@ -571,8 +596,6 @@ public:
my_decimal *val_decimal(my_decimal *);
void update_null_value () { (void) val_bool(); }
bool val_bool();
- void top_level_item() { abort_on_null=1; }
- inline bool is_top_level_item() { return abort_on_null; }
bool test_limit(st_select_lex_unit *unit);
virtual void print(String *str, enum_query_type query_type);
bool fix_fields(THD *thd, Item **ref);
@@ -589,19 +612,14 @@ public:
void set_first_execution() { if (first_execution) first_execution= FALSE; }
bool expr_cache_is_needed(THD *thd);
inline bool left_expr_has_null();
-
+
int optimize(double *out_rows, double *cost);
- /*
+ /*
Return the identifier that we could use to identify the subquery for the
user.
*/
int get_identifier();
- void mark_as_condition_AND_part(TABLE_LIST *embedding)
- {
- emb_on_expr_nest= embedding;
- }
-
bool test_strategy(uchar strategy)
{ return test(in_strategy & strategy); }
@@ -630,6 +648,9 @@ public:
void add_strategy (uchar strategy)
{
+ DBUG_ENTER("Item_in_subselect::add_strategy");
+ DBUG_PRINT("enter", ("current: %u add: %u",
+ (uint) in_strategy, (uint) strategy));
DBUG_ASSERT(strategy != SUBS_NOT_TRANSFORMED);
DBUG_ASSERT(!(strategy & SUBS_STRATEGY_CHOSEN));
/*
@@ -639,16 +660,25 @@ public:
DBUG_ASSERT(!(in_strategy & SUBS_STRATEGY_CHOSEN));
*/
in_strategy|= strategy;
+ DBUG_VOID_RETURN;
}
void reset_strategy(uchar strategy)
{
+ DBUG_ENTER("Item_in_subselect::reset_strategy");
+ DBUG_PRINT("enter", ("current: %u new: %u",
+ (uint) in_strategy, (uint) strategy));
DBUG_ASSERT(strategy != SUBS_NOT_TRANSFORMED);
in_strategy= strategy;
+ DBUG_VOID_RETURN;
}
void set_strategy(uchar strategy)
{
+ DBUG_ENTER("Item_in_subselect::set_strategy");
+ DBUG_PRINT("enter", ("current: %u set: %u",
+ (uint) in_strategy,
+ (uint) (SUBS_STRATEGY_CHOSEN | strategy)));
/* Check that only one strategy is set for execution. */
DBUG_ASSERT(strategy == SUBS_SEMI_JOIN ||
strategy == SUBS_IN_TO_EXISTS ||
@@ -658,7 +688,12 @@ public:
strategy == SUBS_MAXMIN_INJECTED ||
strategy == SUBS_MAXMIN_ENGINE);
in_strategy= (SUBS_STRATEGY_CHOSEN | strategy);
+ DBUG_VOID_RETURN;
}
+ bool exists2in_processor(uchar *opt_arg __attribute__((unused)))
+ {
+ return 0;
+ };
friend class Item_ref_null_helper;
friend class Item_is_not_null_test;
@@ -666,6 +701,7 @@ public:
friend class subselect_indexsubquery_engine;
friend class subselect_hash_sj_engine;
friend class subselect_partial_match_engine;
+ friend class Item_exists_subselect;
};
@@ -698,6 +734,7 @@ protected:
THD *thd; /* pointer to current THD */
Item_subselect *item; /* item, that use this engine */
enum Item_result res_type; /* type of results */
+ enum Item_result cmp_type; /* how to compare the results */
enum_field_types res_field_type; /* column type of the results */
bool maybe_null; /* may be null (first item in select) */
public:
@@ -712,7 +749,7 @@ public:
{
result= res;
item= si;
- res_type= STRING_RESULT;
+ cmp_type= res_type= STRING_RESULT;
res_field_type= MYSQL_TYPE_VAR_STRING;
maybe_null= 0;
set_thd(thd_arg);
@@ -752,6 +789,7 @@ public:
virtual uint cols()= 0; /* return number of columns in select */
virtual uint8 uncacheable()= 0; /* query is uncacheable */
enum Item_result type() { return res_type; }
+ enum Item_result cmptype() { return cmp_type; }
enum_field_types field_type() { return res_field_type; }
virtual void exclude()= 0;
virtual bool may_be_null() { return maybe_null; };
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 13f97d56759..b3be7339849 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2008, 2013 Monty Program Ab
This program is free software; you can redistribute it and/or modify
@@ -65,7 +65,15 @@ ulonglong Item_sum::ram_limitation(THD *thd)
bool Item_sum::init_sum_func_check(THD *thd)
{
- if (!thd->lex->allow_sum_func)
+ SELECT_LEX *curr_sel= thd->lex->current_select;
+ if (!curr_sel->name_visibility_map)
+ {
+ for (SELECT_LEX *sl= curr_sel; sl; sl= sl->context.outer_select())
+ {
+ curr_sel->name_visibility_map|= (1 << sl-> nest_level);
+ }
+ }
+ if (!(thd->lex->allow_sum_func & curr_sel->name_visibility_map))
{
my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE),
MYF(0));
@@ -136,8 +144,11 @@ bool Item_sum::init_sum_func_check(THD *thd)
bool Item_sum::check_sum_func(THD *thd, Item **ref)
{
+ SELECT_LEX *curr_sel= thd->lex->current_select;
+ nesting_map allow_sum_func= (thd->lex->allow_sum_func &
+ curr_sel->name_visibility_map);
bool invalid= FALSE;
- nesting_map allow_sum_func= thd->lex->allow_sum_func;
+ DBUG_ASSERT(curr_sel->name_visibility_map); // should be set already
/*
The value of max_arg_level is updated if an argument of the set function
contains a column reference resolved against a subquery whose level is
@@ -152,9 +163,10 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref)
If it is there under a construct where it is not allowed
we report an error.
*/
- invalid= !(allow_sum_func & (1 << max_arg_level));
+ invalid= !(allow_sum_func & ((nesting_map)1 << max_arg_level));
}
- else if (max_arg_level >= 0 || !(allow_sum_func & (1 << nest_level)))
+ else if (max_arg_level >= 0 ||
+ !(allow_sum_func & ((nesting_map)1 << nest_level)))
{
/*
The set function can be aggregated only in outer subqueries.
@@ -163,14 +175,15 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref)
*/
if (register_sum_func(thd, ref))
return TRUE;
- invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level));
+ invalid= aggr_level < 0 &&
+ !(allow_sum_func & ((nesting_map)1 << nest_level));
if (!invalid && thd->variables.sql_mode & MODE_ANSI)
invalid= aggr_level < 0 && max_arg_level < nest_level;
}
if (!invalid && aggr_level < 0)
{
aggr_level= nest_level;
- aggr_sel= thd->lex->current_select;
+ aggr_sel= curr_sel;
}
/*
By this moment we either found a subquery where the set function is
@@ -307,18 +320,19 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
{
SELECT_LEX *sl;
nesting_map allow_sum_func= thd->lex->allow_sum_func;
- for (sl= thd->lex->current_select->master_unit()->outer_select() ;
+ for (sl= thd->lex->current_select->context.outer_select() ;
sl && sl->nest_level > max_arg_level;
- sl= sl->master_unit()->outer_select() )
+ sl= sl->context.outer_select())
{
- if (aggr_level < 0 && (allow_sum_func & (1 << sl->nest_level)))
+ if (aggr_level < 0 &&
+ (allow_sum_func & ((nesting_map)1 << sl->nest_level)))
{
/* Found the most nested subquery where the function can be aggregated */
aggr_level= sl->nest_level;
aggr_sel= sl;
}
}
- if (sl && (allow_sum_func & (1 << sl->nest_level)))
+ if (sl && (allow_sum_func & ((nesting_map)1 << sl->nest_level)))
{
/*
We reached the subquery of level max_arg_level and checked
@@ -375,7 +389,12 @@ bool Item_sum::collect_outer_ref_processor(uchar *param)
if ((ds= depended_from()) &&
ds->nest_level_base == prm->nest_level_base &&
ds->nest_level < prm->nest_level)
- prm->parameters->add_unique(this, &cmp_items);
+ {
+ if (prm->collect)
+ prm->parameters->add_unique(this, &cmp_items);
+ else
+ prm->count++;
+ }
return FALSE;
}
@@ -559,7 +578,7 @@ void Item_sum::update_used_tables ()
used_tables_cache&= PSEUDO_TABLE_BITS;
// the aggregate function is aggregated into its local context
- used_tables_cache |= (1 << aggr_sel->join->table_count) - 1;
+ used_tables_cache|= ((table_map)1 << aggr_sel->join->tables) - 1;
} because if we do it, table elimination will assume that
- constructs like "COUNT(*)" use columns from all tables
@@ -727,7 +746,15 @@ int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
}
-int item_sum_distinct_walk(void *element, element_count num_of_dups,
+static int item_sum_distinct_walk_for_count(void *element,
+ element_count num_of_dups,
+ void *item)
+{
+ return ((Aggregator_distinct*) (item))->unique_walk_function_for_count(element);
+}
+
+
+static int item_sum_distinct_walk(void *element, element_count num_of_dups,
void *item)
{
return ((Aggregator_distinct*) (item))->unique_walk_function(element);
@@ -1097,7 +1124,12 @@ void Aggregator_distinct::endup()
{
/* go over the tree of distinct keys and calculate the aggregate value */
use_distinct_values= TRUE;
- tree->walk(table, item_sum_distinct_walk, (void*) this);
+ tree_walk_action func;
+ if (item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
+ func= item_sum_distinct_walk_for_count;
+ else
+ func= item_sum_distinct_walk;
+ tree->walk(table, func, (void*) this);
use_distinct_values= FALSE;
}
/* prevent consecutive recalculations */
@@ -1275,16 +1307,16 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
switch (args[0]->field_type()) {
case MYSQL_TYPE_DATE:
field= new Field_newdate(0, maybe_null ? (uchar*)"" : 0, 0, Field::NONE,
- name, collation.collation);
+ name);
break;
case MYSQL_TYPE_TIME:
field= new_Field_time(0, maybe_null ? (uchar*)"" : 0, 0, Field::NONE,
- name, decimals, collation.collation);
+ name, decimals);
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
field= new_Field_datetime(0, maybe_null ? (uchar*)"" : 0, 0, Field::NONE,
- name, decimals, collation.collation);
+ name, decimals);
break;
default:
return Item_sum::create_tmp_field(group, table, convert_blob_length);
@@ -1474,6 +1506,22 @@ bool Aggregator_distinct::unique_walk_function(void *element)
}
+/*
+ A variant of unique_walk_function() that is to be used with Item_sum_count.
+
+ COUNT is a special aggregate function: it doesn't need the values, it only
+ needs to count them. COUNT needs to know the values are not NULLs, but NULL
+ values are not put into the Unique, so we don't need to check for NULLs here.
+*/
+
+bool Aggregator_distinct::unique_walk_function_for_count(void *element)
+{
+ Item_sum_count *sum= (Item_sum_count *)item_sum;
+ sum->count++;
+ return 0;
+}
+
+
Aggregator_distinct::~Aggregator_distinct()
{
if (tree)
@@ -1589,9 +1637,10 @@ void Item_sum_avg::fix_length_and_dec()
f_scale= args[0]->decimals;
dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale);
}
- else {
+ else
+ {
decimals= MY_MIN(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
- max_length= args[0]->max_length + prec_increment;
+ max_length= MY_MIN(args[0]->max_length + prec_increment, float_length(decimals));
}
}
@@ -2920,9 +2969,9 @@ int group_concat_key_cmp_with_distinct(void* arg, const void* key1,
for (uint i= 0; i < item_func->arg_count_field; i++)
{
Item *item= item_func->args[i];
- /*
- If field_item is a const item then either get_tmp_table_field returns 0
- or it is an item over a const table.
+ /*
+ If item is a const item then either get_tmp_table_field returns 0
+ or it is an item over a const table.
*/
if (item->const_item())
continue;
@@ -2932,10 +2981,14 @@ int group_concat_key_cmp_with_distinct(void* arg, const void* key1,
the temporary table, not the original field
*/
Field *field= item->get_tmp_table_field();
- int res;
+
+ if (!field)
+ continue;
+
uint offset= (field->offset(field->table->record[0]) -
field->table->s->null_bytes);
- if((res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset)))
+ int res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset);
+ if (res)
return res;
}
return 0;
@@ -2965,27 +3018,29 @@ int group_concat_key_cmp_with_order(void* arg, const void* key1,
if (item->const_item())
continue;
/*
+ If item is a const item then either get_tmp_table_field returns 0
+ or it is an item over a const table.
+ */
+ if (item->const_item())
+ continue;
+ /*
We have to use get_tmp_table_field() instead of
real_item()->get_tmp_table_field() because we want the field in
the temporary table, not the original field
Note that for the case of ROLLUP, field may point to another table
- tham grp_item->table. This is howver ok as the table definitions are
+ tham grp_item->table. This is however ok as the table definitions are
the same.
*/
Field *field= item->get_tmp_table_field();
- /*
- If item is a const item then either get_tmp_table_field returns 0
- or it is an item over a const table.
- */
- if (field)
- {
- int res;
- uint offset= (field->offset(field->table->record[0]) -
- field->table->s->null_bytes);
- if ((res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset)))
- return (*order_item)->asc ? res : -res;
- }
+ if (!field)
+ continue;
+
+ uint offset= (field->offset(field->table->record[0]) -
+ field->table->s->null_bytes);
+ int res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset);
+ if (res)
+ return (*order_item)->asc ? res : -res;
}
/*
We can't return 0 because in that case the tree class would remove this
@@ -3025,23 +3080,28 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
for (; arg < arg_end; arg++)
{
String *res;
- if (! (*arg)->const_item())
+ /*
+ We have to use get_tmp_table_field() instead of
+ real_item()->get_tmp_table_field() because we want the field in
+ the temporary table, not the original field
+ We also can't use table->field array to access the fields
+ because it contains both order and arg list fields.
+ */
+ if ((*arg)->const_item())
+ res= (*arg)->val_str(&tmp);
+ else
{
- /*
- We have to use get_tmp_table_field() instead of
- real_item()->get_tmp_table_field() because we want the field in
- the temporary table, not the original field
- We also can't use table->field array to access the fields
- because it contains both order and arg list fields.
- */
Field *field= (*arg)->get_tmp_table_field();
- uint offset= (field->offset(field->table->record[0]) -
- table->s->null_bytes);
- DBUG_ASSERT(offset < table->s->reclength);
- res= field->val_str(&tmp, key + offset);
+ if (field)
+ {
+ uint offset= (field->offset(field->table->record[0]) -
+ table->s->null_bytes);
+ DBUG_ASSERT(offset < table->s->reclength);
+ res= field->val_str(&tmp, key + offset);
+ }
+ else
+ res= (*arg)->val_str(&tmp);
}
- else
- res= (*arg)->val_str(&tmp);
if (res)
result->append(*res);
}
@@ -3089,11 +3149,12 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
Item_func_group_concat::
Item_func_group_concat(Name_resolution_context *context_arg,
bool distinct_arg, List<Item> *select_list,
- SQL_I_List<ORDER> *order_list, String *separator_arg)
+ const SQL_I_List<ORDER> &order_list,
+ String *separator_arg)
:tmp_table_param(0), separator(separator_arg), tree(0),
unique_filter(NULL), table(0),
order(0), context(context_arg),
- arg_count_order(order_list ? order_list->elements : 0),
+ arg_count_order(order_list.elements),
arg_count_field(select_list->elements),
row_count(0),
distinct(distinct_arg),
@@ -3133,7 +3194,7 @@ Item_func_group_concat(Name_resolution_context *context_arg,
if (arg_count_order)
{
ORDER **order_ptr= order;
- for (ORDER *order_item= order_list->first;
+ for (ORDER *order_item= order_list.first;
order_item != NULL;
order_item= order_item->next)
{
@@ -3181,8 +3242,14 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
order= (ORDER **)(tmp + arg_count_order);
for (uint i= 0; i < arg_count_order; i++, tmp++)
{
- memcpy(tmp, item->order[i], sizeof(ORDER));
- tmp->next= i == arg_count_order-1 ? 0 : tmp+1;
+ /*
+ Compiler generated copy constructor is used to
+ to copy all the members of ORDER struct.
+ It's also necessary to update ORDER::next pointer
+ so that it points to new ORDER element.
+ */
+ new (tmp) st_order(*(item->order[i]));
+ tmp->next= (i + 1 == arg_count_order) ? NULL : (tmp + 1);
order[i]= tmp;
}
}
@@ -3272,12 +3339,12 @@ bool Item_func_group_concat::add()
for (uint i= 0; i < arg_count_field; i++)
{
Item *show_item= args[i];
- if (!show_item->const_item())
- {
- Field *f= show_item->get_tmp_table_field();
- if (f->is_null_in_record((const uchar*) table->record[0]))
+ if (show_item->const_item())
+ continue;
+
+ Field *field= show_item->get_tmp_table_field();
+ if (field && field->is_null_in_record((const uchar*) table->record[0]))
return 0; // Skip row if it contains null
- }
}
null_value= FALSE;
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 40a28d8beae..e82e0ead1c2 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -1,7 +1,7 @@
#ifndef ITEM_SUM_INCLUDED
#define ITEM_SUM_INCLUDED
-/* Copyright (c) 2000, 2011 Oracle and/or its affiliates.
- Copyright (c) 2008-2011 Monty Program Ab
+/* Copyright (c) 2000, 2013 Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -642,6 +642,7 @@ public:
virtual bool arg_is_null();
bool unique_walk_function(void *element);
+ bool unique_walk_function_for_count(void *element);
static int composite_key_cmp(void* arg, uchar* key1, uchar* key2);
};
@@ -1119,7 +1120,7 @@ public:
class Item_sum_or :public Item_sum_bit
{
public:
- Item_sum_or(Item *item_par) :Item_sum_bit(item_par,LL(0)) {}
+ Item_sum_or(Item *item_par) :Item_sum_bit(item_par, 0) {}
Item_sum_or(THD *thd, Item_sum_or *item) :Item_sum_bit(thd, item) {}
bool add();
const char *func_name() const { return "bit_or("; }
@@ -1140,7 +1141,7 @@ class Item_sum_and :public Item_sum_bit
class Item_sum_xor :public Item_sum_bit
{
public:
- Item_sum_xor(Item *item_par) :Item_sum_bit(item_par,LL(0)) {}
+ Item_sum_xor(Item *item_par) :Item_sum_bit(item_par, 0) {}
Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {}
bool add();
const char *func_name() const { return "bit_xor("; }
@@ -1427,7 +1428,7 @@ class Item_func_group_concat : public Item_sum
public:
Item_func_group_concat(Name_resolution_context *context_arg,
bool is_distinct, List<Item> *is_select,
- SQL_I_List<ORDER> *is_order, String *is_separator);
+ const SQL_I_List<ORDER> &is_order, String *is_separator);
Item_func_group_concat(THD *thd, Item_func_group_concat *item);
~Item_func_group_concat();
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index e9a6727162a..a4b5a18de35 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -415,10 +415,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
l_time->minute > 59 || l_time->second > 59)
goto err;
- if (((fuzzy_date & TIME_NO_ZERO_IN_DATE) &&
- (l_time->year == 0 || l_time->month == 0 || l_time->day == 0)) ||
- ((fuzzy_date & TIME_NO_ZERO_DATE) &&
- (l_time->year == 0 && l_time->month == 0 && l_time->day == 0)))
+ int was_cut;
+ if (check_date(l_time, fuzzy_date | TIME_INVALID_DATES, &was_cut))
goto err;
if (val != val_end)
@@ -711,8 +709,8 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
{
longlong value;
const char *start= str;
- for (value=0; str != end && my_isdigit(cs,*str) ; str++)
- value= value*LL(10) + (longlong) (*str - '0');
+ for (value=0; str != end && my_isdigit(cs, *str) ; str++)
+ value= value*10 + *str - '0';
msec_length= 6 - (str - start);
values[i]= value;
while (str != end && !my_isdigit(cs,*str))
@@ -782,7 +780,7 @@ longlong Item_func_to_seconds::val_int_endpoint(bool left_endp,
longlong seconds;
longlong days;
int dummy; /* unused */
- if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ if (get_arg0_date(&ltime, TIME_FUZZY_DATES))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
@@ -860,7 +858,7 @@ longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
MYSQL_TIME ltime;
longlong res;
int dummy; /* unused */
- if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ if (get_arg0_date(&ltime, 0))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
@@ -868,7 +866,6 @@ longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
res=(longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
/* Set to NULL if invalid date, but keep the value */
null_value= check_date(&ltime,
- (ltime.year || ltime.month || ltime.day),
(TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE),
&dummy);
if (null_value)
@@ -925,16 +922,14 @@ longlong Item_func_dayofmonth::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_date(&ltime, TIME_FUZZY_DATE);
- return (longlong) ltime.day;
+ return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.day;
}
longlong Item_func_month::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_date(&ltime, TIME_FUZZY_DATE);
- return (longlong) ltime.month;
+ return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.month;
}
@@ -958,7 +953,7 @@ String* Item_func_monthname::val_str(String* str)
uint err;
MYSQL_TIME ltime;
- if ((null_value= (get_arg0_date(&ltime, TIME_FUZZY_DATE) || !ltime.month)))
+ if ((null_value= (get_arg0_date(&ltime, 0) || !ltime.month)))
return (String *) 0;
month_name= locale->month_names->type_names[ltime.month - 1];
@@ -976,7 +971,7 @@ longlong Item_func_quarter::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ if (get_arg0_date(&ltime, 0))
return 0;
return (longlong) ((ltime.month+2)/3);
}
@@ -985,16 +980,14 @@ longlong Item_func_hour::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_time(&ltime);
- return ltime.hour;
+ return get_arg0_time(&ltime) ? 0 : ltime.hour;
}
longlong Item_func_minute::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_time(&ltime);
- return ltime.minute;
+ return get_arg0_time(&ltime) ? 0 : ltime.minute;
}
/**
@@ -1004,8 +997,7 @@ longlong Item_func_second::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_time(&ltime);
- return ltime.second;
+ return get_arg0_time(&ltime) ? 0 : ltime.second;
}
@@ -1122,8 +1114,7 @@ longlong Item_func_year::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- (void) get_arg0_date(&ltime, TIME_FUZZY_DATE);
- return (longlong) ltime.year;
+ return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.year;
}
@@ -1155,7 +1146,7 @@ longlong Item_func_year::val_int_endpoint(bool left_endp, bool *incl_endp)
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ if (get_arg0_date(&ltime, 0))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
@@ -1198,7 +1189,7 @@ bool Item_func_unix_timestamp::get_timestamp_value(my_time_t *seconds,
}
MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, 0))
+ if (get_arg0_date(&ltime, TIME_NO_ZERO_IN_DATE))
return 1;
uint error_code;
@@ -1476,7 +1467,7 @@ longlong Item_temporal_func::val_int()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE | sql_mode))
+ if (get_date(&ltime, sql_mode))
return 0;
longlong v= TIME_to_ulonglong(&ltime);
return ltime.neg ? -v : v;
@@ -1487,7 +1478,7 @@ double Item_temporal_func::val_real()
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_FUZZY_DATE | sql_mode))
+ if (get_date(&ltime, sql_mode))
return 0;
return TIME_to_double(&ltime);
}
@@ -1582,7 +1573,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
{
ltime->second_part= sec_part;
if (item->decimals < TIME_SECOND_PART_DIGITS)
- ltime->second_part= sec_part_truncate(ltime->second_part, item->decimals);
+ my_time_trunc(ltime, item->decimals);
}
}
@@ -1867,7 +1858,7 @@ String *Item_func_date_format::val_str(String *str)
int is_time_flag = is_time_format ? TIME_TIME_ONLY : 0;
DBUG_ASSERT(fixed == 1);
- if (get_arg0_date(&l_time, TIME_FUZZY_DATE | is_time_flag))
+ if (get_arg0_date(&l_time, is_time_flag))
return 0;
if (!(format = args[1]->val_str(str)) || !format->length())
@@ -2041,10 +2032,15 @@ bool Item_date_add_interval::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
INTERVAL interval;
- if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE | TIME_FUZZY_DATE | TIME_NO_ZERO_IN_DATE) ||
+ if (args[0]->get_date(ltime, 0) ||
get_interval_value(args[1], int_type, &interval))
return (null_value=1);
+ if (ltime->time_type != MYSQL_TIMESTAMP_TIME &&
+ check_date_with_warn(ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE,
+ MYSQL_TIMESTAMP_ERROR))
+ return (null_value=1);
+
if (date_sub_interval)
interval.neg = !interval.neg;
@@ -2137,7 +2133,7 @@ longlong Item_extract::val_int()
long neg;
int is_time_flag = date_value ? 0 : TIME_TIME_ONLY;
- if (get_arg0_date(&ltime, TIME_FUZZY_DATE | is_time_flag))
+ if (get_arg0_date(&ltime, is_time_flag))
return 0;
neg= ltime.neg ? -1 : 1;
@@ -2419,7 +2415,7 @@ bool Item_time_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
if (get_arg0_time(ltime))
return 1;
if (decimals < TIME_SECOND_PART_DIGITS)
- ltime->second_part= sec_part_truncate(ltime->second_part, decimals);
+ my_time_trunc(ltime, decimals);
/*
MYSQL_TIMESTAMP_TIME value can have non-zero day part,
which we should not lose.
@@ -2439,17 +2435,8 @@ bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0;
ltime->time_type= MYSQL_TIMESTAMP_DATE;
-
- int unused;
- if (check_date(ltime, ltime->year || ltime->month || ltime->day,
- fuzzy_date, &unused))
- {
- ErrConvTime str(ltime);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, MYSQL_TIMESTAMP_DATE, 0);
- return (null_value= 1);
- }
- return (null_value= 0);
+ return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ MYSQL_TIMESTAMP_DATE));
}
@@ -2460,8 +2447,7 @@ bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
return 1;
if (decimals < TIME_SECOND_PART_DIGITS)
- ltime->second_part= sec_part_truncate(ltime->second_part, decimals);
-
+ my_time_trunc(ltime, decimals);
/*
ltime is valid MYSQL_TYPE_TIME (according to fuzzy_date).
@@ -2573,7 +2559,7 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
if (is_date) // TIMESTAMP function
{
- if (get_arg0_date(&l_time1, TIME_FUZZY_DATE) ||
+ if (get_arg0_date(&l_time1, 0) ||
args[1]->get_time(&l_time2) ||
l_time1.time_type == MYSQL_TIMESTAMP_TIME ||
l_time2.time_type != MYSQL_TIMESTAMP_TIME)
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index f25f4544e47..11e84cfc1cd 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Function items used by mysql */
@@ -489,8 +489,8 @@ public:
Item_temporal_func(Item *a, Item *b) :Item_func(a,b) {}
Item_temporal_func(Item *a, Item *b, Item *c) :Item_func(a,b,c) {}
enum Item_result result_type () const { return STRING_RESULT; }
- CHARSET_INFO *charset_for_protocol(void) const { return &my_charset_bin; }
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ Item_result cmp_type() const { return TIME_RESULT; }
String *val_str(String *str);
longlong val_int();
double val_real();
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index c1b09d16430..1aab6b45c74 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -2704,8 +2704,12 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
node.parent= data->parent; // Set parent for the new node to old parent
data->parent= numnodes; // Remember current node as new parent
+ DBUG_ASSERT(data->level <= MAX_LEVEL);
data->pos[data->level]= numnodes;
- node.level= data->level++;
+ if (data->level < MAX_LEVEL)
+ node.level= data->level++;
+ else
+ return MY_XML_ERROR;
node.type= st->current_node_type; // TAG or ATTR
node.beg= attr;
node.end= attr + len;
diff --git a/sql/key.cc b/sql/key.cc
index c1def96797c..97388f43ebc 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -335,6 +335,70 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
return 0;
}
+
+/**
+ Unpack a field and append it.
+
+ @param[inout] to String to append the field contents to.
+ @param field Field to unpack.
+ @param rec Record which contains the field data.
+ @param max_length Maximum length of field to unpack
+ or 0 for unlimited.
+ @param prefix_key The field is used as a prefix key.
+*/
+
+void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
+ bool prefix_key)
+{
+ String tmp;
+ DBUG_ENTER("field_unpack");
+ if (!max_length)
+ max_length= field->pack_length();
+ if (field)
+ {
+ if (field->is_null())
+ {
+ to->append(STRING_WITH_LEN("NULL"));
+ DBUG_VOID_RETURN;
+ }
+ CHARSET_INFO *cs= field->charset();
+ field->val_str(&tmp);
+ /*
+ For BINARY(N) strip trailing zeroes to make
+ the error message nice-looking
+ */
+ if (field->binary() && field->type() == MYSQL_TYPE_STRING && tmp.length())
+ {
+ const char *tmp_end= tmp.ptr() + tmp.length();
+ while (tmp_end > tmp.ptr() && !*--tmp_end) ;
+ tmp.length(tmp_end - tmp.ptr() + 1);
+ }
+ if (cs->mbmaxlen > 1 && prefix_key)
+ {
+ /*
+ Prefix key, multi-byte charset.
+ For the columns of type CHAR(N), the above val_str()
+ call will return exactly "key_part->length" bytes,
+ which can break a multi-byte characters in the middle.
+ Align, returning not more than "char_length" characters.
+ */
+ uint charpos, char_length= max_length / cs->mbmaxlen;
+ if ((charpos= my_charpos(cs, tmp.ptr(),
+ tmp.ptr() + tmp.length(),
+ char_length)) < tmp.length())
+ tmp.length(charpos);
+ }
+ if (max_length < field->pack_length())
+ tmp.length(MY_MIN(tmp.length(),max_length));
+ ErrConvString err(&tmp);
+ to->append(err.ptr());
+ }
+ else
+ to->append(STRING_WITH_LEN("???"));
+ DBUG_VOID_RETURN;
+}
+
+
/*
unpack key-fields from record to some buffer.
@@ -352,8 +416,6 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
void key_unpack(String *to,TABLE *table, KEY *key)
{
KEY_PART_INFO *key_part,*key_part_end;
- Field *field;
- String tmp;
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
DBUG_ENTER("key_unpack");
@@ -373,43 +435,9 @@ void key_unpack(String *to,TABLE *table, KEY *key)
continue;
}
}
- if ((field=key_part->field))
- {
- CHARSET_INFO *cs= field->charset();
- field->val_str(&tmp);
- /*
- For BINARY(N) strip trailing zeroes to make
- the error message nice-looking
- */
- if (field->binary() && field->type() == MYSQL_TYPE_STRING && tmp.length())
- {
- const char *tmp_end= tmp.ptr() + tmp.length();
- while (tmp_end > tmp.ptr() && !*--tmp_end) ;
- tmp.length(tmp_end - tmp.ptr() + 1);
- }
- if (cs->mbmaxlen > 1 && (key_part->key_part_flag & HA_PART_KEY_SEG))
- {
- /*
- Prefix key, multi-byte charset.
- For the columns of type CHAR(N), the above val_str()
- call will return exactly "key_part->length" bytes,
- which can break a multi-byte characters in the middle.
- Align, returning not more than "char_length" characters.
- */
- uint charpos, char_length= key_part->length / cs->mbmaxlen;
- if ((charpos= my_charpos(cs, tmp.ptr(),
- tmp.ptr() + tmp.length(),
- char_length)) < tmp.length())
- tmp.length(charpos);
- }
- if (key_part->length < field->pack_length())
- tmp.length(MY_MIN(tmp.length(),key_part->length));
- ErrConvString err(&tmp);
- to->append(err.ptr());
- }
- else
- to->append(STRING_WITH_LEN("???"));
- }
+ field_unpack(to, key_part->field, table->record[0], key_part->length,
+ test(key_part->key_part_flag & HA_PART_KEY_SEG));
+ }
dbug_tmp_restore_column_map(table->read_set, old_map);
DBUG_VOID_RETURN;
}
diff --git a/sql/key.h b/sql/key.h
index 42e29a0937d..de2b00a4773 100644
--- a/sql/key.h
+++ b/sql/key.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -33,6 +33,8 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
uint key_length);
bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length);
void key_unpack(String *to, TABLE *form, KEY *key);
+void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
+ bool prefix_key);
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields);
int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length);
ulong key_hashnr(KEY *key_info, uint used_key_parts, const uchar *key);
diff --git a/sql/keycaches.cc b/sql/keycaches.cc
index 84ed67d00f0..120aa7e1029 100644
--- a/sql/keycaches.cc
+++ b/sql/keycaches.cc
@@ -20,6 +20,7 @@
****************************************************************************/
NAMED_ILIST key_caches;
+NAMED_ILIST rpl_filters;
/**
ilink (intrusive list element) with a name
@@ -66,6 +67,23 @@ uchar* find_named(I_List<NAMED_ILINK> *list, const char *name, uint length,
}
+bool NAMED_ILIST::delete_element(const char *name, uint length, void (*free_element)(const char *name, uchar*))
+{
+ I_List_iterator<NAMED_ILINK> it(*this);
+ NAMED_ILINK *element;
+ DBUG_ENTER("NAMED_ILIST::delete_element");
+ while ((element= it++))
+ {
+ if (element->cmp(name, length))
+ {
+ (*free_element)(element->name, element->data);
+ delete element;
+ DBUG_RETURN(0);
+ }
+ }
+ DBUG_RETURN(1);
+}
+
void NAMED_ILIST::delete_elements(void (*free_element)(const char *name, uchar*))
{
NAMED_ILINK *element;
@@ -159,3 +177,55 @@ bool process_key_caches(process_key_cache_t func, void *param)
return res != 0;
}
+/* Rpl_filter functions */
+
+LEX_STRING default_rpl_filter_base= {C_STRING_WITH_LEN("")};
+
+Rpl_filter *get_rpl_filter(LEX_STRING *filter_name)
+{
+ if (!filter_name->length)
+ filter_name= &default_rpl_filter_base;
+ return ((Rpl_filter*) find_named(&rpl_filters,
+ filter_name->str, filter_name->length, 0));
+}
+
+Rpl_filter *create_rpl_filter(const char *name, uint length)
+{
+ Rpl_filter *filter;
+ DBUG_ENTER("create_rpl_filter");
+ DBUG_PRINT("enter",("name: %.*s", length, name));
+
+ filter= new Rpl_filter;
+ if (filter)
+ {
+ if (!new NAMED_ILINK(&rpl_filters, name, length, (uchar*) filter))
+ {
+ delete filter;
+ filter= 0;
+ }
+ }
+ DBUG_RETURN(filter);
+}
+
+
+Rpl_filter *get_or_create_rpl_filter(const char *name, uint length)
+{
+ LEX_STRING rpl_filter_name;
+ Rpl_filter *filter;
+
+ rpl_filter_name.str= (char *) name;
+ rpl_filter_name.length= length;
+ if (!(filter= get_rpl_filter(&rpl_filter_name)))
+ filter= create_rpl_filter(name, length);
+ return filter;
+}
+
+void free_rpl_filter(const char *name, Rpl_filter *filter)
+{
+ delete filter;
+}
+
+void free_all_rpl_filters()
+{
+ rpl_filters.delete_elements((void (*)(const char*, uchar*)) free_rpl_filter);
+}
diff --git a/sql/keycaches.h b/sql/keycaches.h
index 04d3f6145e7..32537339e2e 100644
--- a/sql/keycaches.h
+++ b/sql/keycaches.h
@@ -18,6 +18,7 @@
#include "sql_list.h"
#include <keycache.h>
+#include <rpl_filter.h>
extern "C"
{
@@ -30,8 +31,10 @@ class NAMED_ILIST: public I_List<NAMED_ILINK>
{
public:
void delete_elements(void (*free_element)(const char*, uchar*));
+ bool delete_element(const char *name, uint length, void (*free_element)(const char*, uchar*));
};
+/* For key cache */
extern LEX_STRING default_key_cache_base;
extern KEY_CACHE zero_key_cache;
extern NAMED_ILIST key_caches;
@@ -42,4 +45,14 @@ KEY_CACHE *get_or_create_key_cache(const char *name, uint length);
void free_key_cache(const char *name, KEY_CACHE *key_cache);
bool process_key_caches(process_key_cache_t func, void *param);
+/* For Rpl_filter */
+extern LEX_STRING default_rpl_filter_base;
+extern NAMED_ILIST rpl_filters;
+
+Rpl_filter *create_rpl_filter(const char *name, uint length);
+Rpl_filter *get_rpl_filter(LEX_STRING *filter_name);
+Rpl_filter *get_or_create_rpl_filter(const char *name, uint length);
+void free_rpl_filter(const char *name, Rpl_filter *filter);
+void free_all_rpl_filters(void);
+
#endif /* KEYCACHES_INCLUDED */
diff --git a/sql/lex.h b/sql/lex.h
index 06d6012b469..c5229beb653 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -77,6 +77,7 @@ static SYMBOL symbols[] = {
{ "AUTHORS", SYM(AUTHORS_SYM)},
{ "AUTO_INCREMENT", SYM(AUTO_INC)},
{ "AUTOEXTEND_SIZE", SYM(AUTOEXTEND_SIZE_SYM)},
+ { "AUTO", SYM(AUTO_SYM)},
{ "AVG", SYM(AVG_SYM)},
{ "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)},
{ "BACKUP", SYM(BACKUP_SYM)},
@@ -152,6 +153,7 @@ static SYMBOL symbols[] = {
{ "CUBE", SYM(CUBE_SYM)},
{ "CURRENT", SYM(CURRENT_SYM)},
{ "CURRENT_DATE", SYM(CURDATE)},
+ { "CURRENT_POS", SYM(CURRENT_POS_SYM)},
{ "CURRENT_TIME", SYM(CURTIME)},
{ "CURRENT_TIMESTAMP", SYM(NOW_SYM)},
{ "CURRENT_USER", SYM(CURRENT_USER)},
@@ -333,6 +335,7 @@ static SYMBOL symbols[] = {
{ "LOW_PRIORITY", SYM(LOW_PRIORITY)},
{ "MASTER", SYM(MASTER_SYM)},
{ "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM)},
+ { "MASTER_GTID_POS", SYM(MASTER_GTID_POS_SYM)},
{ "MASTER_HOST", SYM(MASTER_HOST_SYM)},
{ "MASTER_LOG_FILE", SYM(MASTER_LOG_FILE_SYM)},
{ "MASTER_LOG_POS", SYM(MASTER_LOG_POS_SYM)},
@@ -349,6 +352,7 @@ static SYMBOL symbols[] = {
{ "MASTER_SSL_KEY", SYM(MASTER_SSL_KEY_SYM)},
{ "MASTER_SSL_VERIFY_SERVER_CERT", SYM(MASTER_SSL_VERIFY_SERVER_CERT_SYM)},
{ "MASTER_USER", SYM(MASTER_USER_SYM)},
+ { "MASTER_USE_GTID", SYM(MASTER_USE_GTID_SYM)},
{ "MASTER_HEARTBEAT_PERIOD", SYM(MASTER_HEARTBEAT_PERIOD_SYM)},
{ "MATCH", SYM(MATCH)},
{ "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)},
@@ -523,6 +527,7 @@ static SYMBOL symbols[] = {
{ "SIMPLE", SYM(SIMPLE_SYM)},
{ "SLAVE", SYM(SLAVE)},
{ "SLAVES", SYM(SLAVES)},
+ { "SLAVE_POS", SYM(SLAVE_POS_SYM)},
{ "SLOW", SYM(SLOW)},
{ "SNAPSHOT", SYM(SNAPSHOT_SYM)},
{ "SMALLINT", SYM(SMALLINT)},
diff --git a/sql/lex_symbol.h b/sql/lex_symbol.h
index 000c0709071..5f3c70a50a4 100644
--- a/sql/lex_symbol.h
+++ b/sql/lex_symbol.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* This struct includes all reserved words and functions */
diff --git a/sql/lock.cc b/sql/lock.cc
index de56ff09abc..c3f6da02ca1 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -93,7 +93,7 @@ extern HASH open_cache;
static int lock_external(THD *thd, TABLE **table,uint count);
static int unlock_external(THD *thd, TABLE **table,uint count);
-static void print_lock_error(int error, TABLE *table);
+static void print_lock_error(int error, TABLE *);
/* Map the return value of thr_lock to an error from errmsg.txt */
static int thr_lock_errno_to_mysql[]=
@@ -893,6 +893,7 @@ bool lock_object_name(THD *thd, MDL_key::enum_mdl_namespace mdl_type,
return FALSE;
}
+
static void print_lock_error(int error, TABLE *table)
{
int textno;
@@ -912,7 +913,6 @@ static void print_lock_error(int error, TABLE *table)
my_error(ER_ILLEGAL_HA, MYF(0), table->file->table_type(),
table->s->db.str, table->s->table_name.str);
DBUG_VOID_RETURN;
- break;
default:
textno=ER_CANT_LOCK;
break;
diff --git a/sql/log.cc b/sql/log.cc
index 78fbd2cf0ab..1295dc087fd 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@@ -119,6 +119,7 @@ static MYSQL_BIN_LOG::xid_count_per_binlog *
static bool start_binlog_background_thread();
+static rpl_binlog_state rpl_global_gtid_binlog_state;
/**
purge logs, master and slave sides both, related error code
@@ -185,7 +186,7 @@ Silence_log_table_errors::handle_condition(THD *,
Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
- strmake(m_message, msg, sizeof(m_message)-1);
+ strmake_buf(m_message, msg);
return TRUE;
}
@@ -686,7 +687,8 @@ bool Log_to_csv_event_handler::
/* do a write */
if (table->field[1]->store(user_host, user_host_len, client_cs) ||
table->field[2]->store((longlong) thread_id, TRUE) ||
- table->field[3]->store((longlong) server_id, TRUE) ||
+ table->field[3]->store((longlong) global_system_variables.server_id,
+ TRUE) ||
table->field[4]->store(command_type, command_type_len, client_cs))
goto err;
@@ -883,7 +885,7 @@ bool Log_to_csv_event_handler::
table->field[8]->set_notnull();
}
- if (table->field[9]->store((longlong) server_id, TRUE))
+ if (table->field[9]->store((longlong)global_system_variables.server_id, TRUE))
goto err;
table->field[9]->set_notnull();
@@ -2293,7 +2295,7 @@ static int find_uniq_filename(char *name)
DBUG_RETURN(1);
}
file_info= dir_info->dir_entry;
- for (i= dir_info->number_off_files ; i-- ; file_info++)
+ for (i= dir_info->number_of_files ; i-- ; file_info++)
{
if (memcmp(file_info->name, start, length) == 0 &&
test_if_number(file_info->name+length, &number,0))
@@ -2304,7 +2306,7 @@ static int find_uniq_filename(char *name)
my_dirend(dir_info);
/* check if reached the maximum possible extension number */
- if ((max_found == MAX_LOG_UNIQUE_FN_EXT))
+ if (max_found == MAX_LOG_UNIQUE_FN_EXT)
{
sql_print_error("Log filename extension number exhausted: %06lu. \
Please fix this by archiving old logs and \
@@ -2941,7 +2943,7 @@ MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
bytes_written(0), file_id(1), open_count(1),
group_commit_queue(0), group_commit_queue_busy(FALSE),
num_commits(0), num_group_commits(0),
- sync_period_ptr(sync_period), sync_counter(0),
+ sync_period_ptr(sync_period), sync_counter(0), state_read(false),
is_relay_log(0), signal_cnt(0),
checksum_alg_reset(BINLOG_CHECKSUM_ALG_UNDEF),
relay_log_checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF),
@@ -2968,6 +2970,19 @@ void MYSQL_BIN_LOG::cleanup()
{
xid_count_per_binlog *b;
+ /* Wait for the binlog background thread to stop. */
+ if (!is_relay_log && binlog_background_thread_started)
+ {
+ mysql_mutex_lock(&LOCK_binlog_background_thread);
+ binlog_background_thread_stop= true;
+ mysql_cond_signal(&COND_binlog_background_thread);
+ while (binlog_background_thread_stop)
+ mysql_cond_wait(&COND_binlog_background_thread_end,
+ &LOCK_binlog_background_thread);
+ mysql_mutex_unlock(&LOCK_binlog_background_thread);
+ binlog_background_thread_started= false;
+ }
+
inited= 0;
close(LOG_CLOSE_INDEX|LOG_CLOSE_STOP_EVENT);
delete description_event_for_queue;
@@ -2984,19 +2999,6 @@ void MYSQL_BIN_LOG::cleanup()
my_free(b);
}
- /* Wait for the binlog background thread to stop. */
- if (!is_relay_log && binlog_background_thread_started)
- {
- mysql_mutex_lock(&LOCK_binlog_background_thread);
- binlog_background_thread_stop= true;
- mysql_cond_signal(&COND_binlog_background_thread);
- while (binlog_background_thread_stop)
- mysql_cond_wait(&COND_binlog_background_thread_end,
- &LOCK_binlog_background_thread);
- mysql_mutex_unlock(&LOCK_binlog_background_thread);
- binlog_background_thread_started= false;
- }
-
mysql_mutex_destroy(&LOCK_log);
mysql_mutex_destroy(&LOCK_index);
mysql_mutex_destroy(&LOCK_xid_list);
@@ -3007,6 +3009,14 @@ void MYSQL_BIN_LOG::cleanup()
mysql_cond_destroy(&COND_binlog_background_thread);
mysql_cond_destroy(&COND_binlog_background_thread_end);
}
+
+ /*
+ Free data for global binlog state.
+ We can't do that automaticly as we need to do this before
+ safemalloc is shut down
+ */
+ if (!is_relay_log)
+ rpl_global_gtid_binlog_state.free();
DBUG_VOID_RETURN;
}
@@ -3135,6 +3145,9 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
DBUG_ENTER("MYSQL_BIN_LOG::open");
DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg));
+ if (!is_relay_log && read_state_from_file())
+ DBUG_RETURN(1);
+
if (!is_relay_log && !binlog_background_thread_started &&
start_binlog_background_thread())
DBUG_RETURN(1);
@@ -3248,6 +3261,47 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
if (!is_relay_log)
{
char buf[FN_REFLEN];
+
+ /*
+ Output a Gtid_list_log_event at the start of the binlog file.
+
+ This is used to quickly determine which GTIDs are found in binlog
+ files earlier than this one, and which are found in this (or later)
+ binlogs.
+
+ The list gives a mapping from (domain_id, server_id) -> seq_no (so
+ this means that there is at most one entry for every unique pair
+ (domain_id, server_id) in the list). It indicates that this seq_no is
+ the last one found in an earlier binlog file for this (domain_id,
+ server_id) combination - so any higher seq_no should be search for
+ from this binlog file, or a later one.
+
+ This allows to locate the binlog file containing a given GTID by
+ scanning backwards, reading just the Gtid_list_log_event at the
+ start of each file, and scanning only the relevant binlog file when
+ found, not all binlog files.
+
+ The existence of a given entry (domain_id, server_id, seq_no)
+ guarantees only that this seq_no will not be found in this or any
+ later binlog file. It does not guarantee that it can be found it an
+ earlier binlog file, for example the file may have been purged.
+
+ If there is no entry for a given (domain_id, server_id) pair, then
+ it means that no such GTID exists in any earlier binlog. It is
+ permissible to remove such pair from future Gtid_list_log_events
+ if all previous binlog files containing such GTIDs have been purged
+ (though such optimization is not performed at the time of this
+ writing). So if there is no entry for given GTID it means that such
+ GTID should be search for in this or later binlog file, same as if
+ there had been an entry (domain_id, server_id, 0).
+ */
+
+ Gtid_list_log_event gl_ev(&rpl_global_gtid_binlog_state, 0);
+ if (gl_ev.write(&log_file))
+ goto err;
+
+ /* Output a binlog checkpoint event at the start of the binlog file. */
+
/*
Construct an entry in the binlog_xid_count_list for the new binlog
file (we will not link it into the list until we know the new file
@@ -3334,8 +3388,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
mysql_file_sync(log_file.file, MYF(MY_WME|MY_SYNC_FILESIZE)))
goto err;
mysql_mutex_lock(&LOCK_commit_ordered);
- strmake(last_commit_pos_file, log_file_name,
- sizeof(last_commit_pos_file)-1);
+ strmake_buf(last_commit_pos_file, log_file_name);
last_commit_pos_offset= my_b_tell(&log_file);
mysql_mutex_unlock(&LOCK_commit_ordered);
@@ -3423,7 +3476,7 @@ int MYSQL_BIN_LOG::get_current_log(LOG_INFO* linfo)
int MYSQL_BIN_LOG::raw_get_current_log(LOG_INFO* linfo)
{
- strmake(linfo->log_file_name, log_file_name, sizeof(linfo->log_file_name)-1);
+ strmake_buf(linfo->log_file_name, log_file_name);
linfo->pos = my_b_tell(&log_file);
return 0;
}
@@ -3674,7 +3727,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
const char* save_name;
DBUG_ENTER("reset_logs");
- ha_reset_logs(thd);
+ if (thd)
+ ha_reset_logs(thd);
/*
We need to get both locks to be sure that no one is trying to
write to the index log file.
@@ -3718,17 +3772,10 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
/* Now wait for all checkpoint requests and pending unlog() to complete. */
mysql_mutex_lock(&LOCK_xid_list);
- xid_count_per_binlog *b;
for (;;)
{
- I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
- while ((b= it++))
- {
- if (b->xid_count > 0)
- break;
- }
- if (!b)
- break; /* No more pending XIDs */
+ if (is_xidlist_idle_nolock())
+ break;
/*
Wait until signalled that one more binlog dropped to zero, then check
again.
@@ -3777,7 +3824,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
for (;;)
{
- if ((error= my_delete_allow_opened(linfo.log_file_name, MYF(0))) != 0)
+ if ((error= my_delete(linfo.log_file_name, MYF(0))) != 0)
{
if (my_errno == ENOENT)
{
@@ -3806,9 +3853,14 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
break;
}
+ if (!is_relay_log)
+ {
+ rpl_global_gtid_binlog_state.reset();
+ }
+
/* Start logging with a new file */
close(LOG_CLOSE_INDEX | LOG_CLOSE_TO_BE_OPENED);
- if ((error= my_delete_allow_opened(index_file_name, MYF(0)))) // Reset (open will update)
+ if ((error= my_delete(index_file_name, MYF(0)))) // Reset (open will update)
{
if (my_errno == ENOENT)
{
@@ -3945,8 +3997,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
Reset rli's coordinates to the current log.
*/
rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
+ strmake_buf(rli->event_relay_log_name,rli->linfo.log_file_name);
/*
If we removed the rli->group_relay_log_name file,
@@ -3956,8 +4007,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
if (included)
{
rli->group_relay_log_pos = BIN_LOG_HEADER_SIZE;
- strmake(rli->group_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->group_relay_log_name)-1);
+ strmake_buf(rli->group_relay_log_name,rli->linfo.log_file_name);
rli->notify_group_relay_log_name_update();
}
@@ -4465,9 +4515,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
else
{
if (stat_area.st_mtime < purge_time)
- strmake(to_log,
- log_info.log_file_name,
- sizeof(log_info.log_file_name) - 1);
+ strmake_buf(to_log, log_info.log_file_name);
else
break;
}
@@ -4506,6 +4554,32 @@ MYSQL_BIN_LOG::can_purge_log(const char *log_file_name)
#endif /* HAVE_REPLICATION */
+bool
+MYSQL_BIN_LOG::is_xidlist_idle()
+{
+ bool res;
+ mysql_mutex_lock(&LOCK_xid_list);
+ res= is_xidlist_idle_nolock();
+ mysql_mutex_unlock(&LOCK_xid_list);
+ return res;
+}
+
+
+bool
+MYSQL_BIN_LOG::is_xidlist_idle_nolock()
+{
+ xid_count_per_binlog *b;
+
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ while ((b= it++))
+ {
+ if (b->xid_count > 0)
+ return false;
+ }
+ return true;
+}
+
+
/**
Create a new log file name.
@@ -5083,8 +5157,7 @@ binlog_start_consistent_snapshot(handlerton *hton, THD *thd)
binlog_cache_mngr *const cache_mngr= thd->binlog_setup_trx_data();
/* Server layer calls us with LOCK_commit_ordered locked, so this is safe. */
- strmake(cache_mngr->last_commit_pos_file, mysql_bin_log.last_commit_pos_file,
- sizeof(cache_mngr->last_commit_pos_file)-1);
+ strmake_buf(cache_mngr->last_commit_pos_file, mysql_bin_log.last_commit_pos_file);
cache_mngr->last_commit_pos_offset= mysql_bin_log.last_commit_pos_offset;
trans_register_ha(thd, TRUE, hton);
@@ -5294,6 +5367,237 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
DBUG_RETURN(error);
}
+
+/* Generate a new global transaction ID, and write it to the binlog */
+bool
+MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
+ bool is_transactional)
+{
+ rpl_gtid gtid;
+ uint32 domain_id= thd->variables.gtid_domain_id;
+ uint32 server_id= thd->variables.server_id;
+ uint64 seq_no= thd->variables.gtid_seq_no;
+ int err;
+
+ /*
+ Reset the session variable gtid_seq_no, to reduce the risk of accidentally
+ producing a duplicate GTID.
+ */
+ thd->variables.gtid_seq_no= 0;
+ if (seq_no != 0)
+ {
+ /* Use the specified sequence number. */
+ gtid.domain_id= domain_id;
+ gtid.server_id= server_id;
+ gtid.seq_no= seq_no;
+ mysql_mutex_lock(&LOCK_rpl_gtid_state);
+ err= rpl_global_gtid_binlog_state.update(&gtid, opt_gtid_strict_mode);
+ mysql_mutex_unlock(&LOCK_rpl_gtid_state);
+ if (err && thd->get_stmt_da()->sql_errno()==ER_GTID_STRICT_OUT_OF_ORDER)
+ errno= ER_GTID_STRICT_OUT_OF_ORDER;
+ }
+ else
+ {
+ /* Allocate the next sequence number for the GTID. */
+ mysql_mutex_lock(&LOCK_rpl_gtid_state);
+ err= rpl_global_gtid_binlog_state.update_with_next_gtid(domain_id,
+ server_id, &gtid);
+ mysql_mutex_unlock(&LOCK_rpl_gtid_state);
+ seq_no= gtid.seq_no;
+ }
+ if (err)
+ return true;
+
+ Gtid_log_event gtid_event(thd, seq_no, domain_id, standalone,
+ LOG_EVENT_SUPPRESS_USE_F, is_transactional);
+
+ /* Write the event to the binary log. */
+ if (gtid_event.write(&mysql_bin_log.log_file))
+ return true;
+ status_var_add(thd->status_var.binlog_bytes_written, gtid_event.data_written);
+
+ return false;
+}
+
+
+int
+MYSQL_BIN_LOG::write_state_to_file()
+{
+ File file_no;
+ IO_CACHE cache;
+ char buf[FN_REFLEN];
+ int err;
+ bool opened= false;
+ bool inited= false;
+
+ fn_format(buf, opt_bin_logname, mysql_data_home, ".state",
+ MY_UNPACK_FILENAME);
+ if ((file_no= mysql_file_open(key_file_binlog_state, buf,
+ O_RDWR|O_CREAT|O_TRUNC|O_BINARY,
+ MYF(MY_WME))) < 0)
+ {
+ err= 1;
+ goto err;
+ }
+ opened= true;
+ if ((err= init_io_cache(&cache, file_no, IO_SIZE, WRITE_CACHE, 0, 0,
+ MYF(MY_WME|MY_WAIT_IF_FULL))))
+ goto err;
+ inited= true;
+ if ((err= rpl_global_gtid_binlog_state.write_to_iocache(&cache)))
+ goto err;
+ inited= false;
+ if ((err= end_io_cache(&cache)))
+ goto err;
+ if ((err= mysql_file_sync(file_no, MYF(MY_WME|MY_SYNC_FILESIZE))))
+ goto err;
+ goto end;
+
+err:
+ sql_print_error("Error writing binlog state to file '%s'.\n", buf);
+ if (inited)
+ end_io_cache(&cache);
+end:
+ if (opened)
+ mysql_file_close(file_no, MYF(0));
+
+ return err;
+}
+
+
+int
+MYSQL_BIN_LOG::read_state_from_file()
+{
+ File file_no;
+ IO_CACHE cache;
+ char buf[FN_REFLEN];
+ int err;
+ bool opened= false;
+ bool inited= false;
+
+ if (state_read)
+ return 0;
+ state_read= true;
+
+ fn_format(buf, opt_bin_logname, mysql_data_home, ".state",
+ MY_UNPACK_FILENAME);
+ if ((file_no= mysql_file_open(key_file_binlog_state, buf,
+ O_RDONLY|O_BINARY, MYF(0))) < 0)
+ {
+ if (my_errno != ENOENT)
+ {
+ err= 1;
+ goto err;
+ }
+ else
+ {
+ /*
+ If the state file does not exist, this is the first server startup
+ with GTID enabled. So initialize to empty state.
+ */
+ rpl_global_gtid_binlog_state.reset();
+ err= 0;
+ goto end;
+ }
+ }
+ opened= true;
+ if ((err= init_io_cache(&cache, file_no, IO_SIZE, READ_CACHE, 0, 0,
+ MYF(MY_WME|MY_WAIT_IF_FULL))))
+ goto err;
+ inited= true;
+ if ((err= rpl_global_gtid_binlog_state.read_from_iocache(&cache)))
+ goto err;
+ goto end;
+
+err:
+ sql_print_error("Error reading binlog GTID state from file '%s'.\n", buf);
+end:
+ if (inited)
+ end_io_cache(&cache);
+ if (opened)
+ mysql_file_close(file_no, MYF(0));
+
+ return err;
+}
+
+
+int
+MYSQL_BIN_LOG::get_most_recent_gtid_list(rpl_gtid **list, uint32 *size)
+{
+ return rpl_global_gtid_binlog_state.get_most_recent_gtid_list(list, size);
+}
+
+
+bool
+MYSQL_BIN_LOG::append_state_pos(String *str)
+{
+ bool err;
+
+ mysql_mutex_lock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ err= rpl_global_gtid_binlog_state.append_pos(str);
+ mysql_mutex_unlock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ return err;
+}
+
+
+bool
+MYSQL_BIN_LOG::find_in_binlog_state(uint32 domain_id, uint32 server_id,
+ rpl_gtid *out_gtid)
+{
+ rpl_gtid *gtid;
+ mysql_mutex_lock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ if ((gtid= rpl_global_gtid_binlog_state.find(domain_id, server_id)))
+ *out_gtid= *gtid;
+ mysql_mutex_unlock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ return gtid != NULL;
+}
+
+
+bool
+MYSQL_BIN_LOG::lookup_domain_in_binlog_state(uint32 domain_id,
+ rpl_gtid *out_gtid)
+{
+ rpl_gtid *found_gtid;
+ bool res= false;
+
+ mysql_mutex_lock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ if ((found_gtid= rpl_global_gtid_binlog_state.find_most_recent(domain_id)))
+ {
+ *out_gtid= *found_gtid;
+ res= true;
+ }
+ mysql_mutex_unlock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+
+ return res;
+}
+
+
+int
+MYSQL_BIN_LOG::bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no)
+{
+ int err;
+
+ mysql_mutex_lock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ err= rpl_global_gtid_binlog_state.bump_seq_no_if_needed(domain_id, seq_no);
+ mysql_mutex_unlock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ return err;
+}
+
+
+bool
+MYSQL_BIN_LOG::check_strict_gtid_sequence(uint32 domain_id, uint32 server_id,
+ uint64 seq_no)
+{
+ bool err;
+
+ mysql_mutex_lock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ err= rpl_global_gtid_binlog_state.check_strict_sequence(domain_id, server_id,
+ seq_no);
+ mysql_mutex_unlock(&rpl_global_gtid_binlog_state.LOCK_binlog_state);
+ return err;
+}
+
+
/**
Write an event to the binary log. If with_annotate != NULL and
*with_annotate = TRUE write also Annotate_rows before the event
@@ -5363,6 +5667,8 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
my_org_b_tell= my_b_tell(file);
mysql_mutex_lock(&LOCK_log);
prev_binlog_id= current_binlog_id;
+ if (write_gtid_event(thd, true, using_trans))
+ goto err;
}
else
{
@@ -6235,19 +6541,6 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd,
break;
}
- /*
- Log "BEGIN" at the beginning of every transaction. Here, a transaction is
- either a BEGIN..COMMIT block or a single statement in autocommit mode.
-
- Create the necessary events here, where we have the correct THD (and
- thread context).
-
- Due to group commit the actual writing to binlog may happen in a different
- thread.
- */
- Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), using_trx_cache, TRUE,
- TRUE, 0);
- entry.begin_event= &qinfo;
entry.end_event= end_ev;
if (cache_mngr->stmt_cache.has_incident() ||
cache_mngr->trx_cache.has_incident())
@@ -6444,10 +6737,10 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
*/
DBUG_ASSERT(!cache_mngr->stmt_cache.empty() || !cache_mngr->trx_cache.empty());
- current->error= write_transaction_or_stmt(current);
+ if ((current->error= write_transaction_or_stmt(current)))
+ current->commit_errno= errno;
- strmake(cache_mngr->last_commit_pos_file, log_file_name,
- sizeof(cache_mngr->last_commit_pos_file)-1);
+ strmake_buf(cache_mngr->last_commit_pos_file, log_file_name);
commit_offset= my_b_write_tell(&log_file);
cache_mngr->last_commit_pos_offset= commit_offset;
if (cache_mngr->using_xa && cache_mngr->xa_xid)
@@ -6623,10 +6916,8 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry)
{
binlog_cache_mngr *mngr= entry->cache_mngr;
- if (entry->begin_event->write(&log_file))
+ if (write_gtid_event(entry->thd, false, entry->using_trx_cache))
return ER_ERROR_ON_WRITE;
- status_var_add(entry->thd->status_var.binlog_bytes_written,
- entry->begin_event->data_written);
if (entry->using_stmt_cache && !mngr->stmt_cache.empty() &&
write_cache(entry->thd, mngr->get_binlog_cache_log(FALSE)))
@@ -6766,6 +7057,8 @@ int MYSQL_BIN_LOG::wait_for_update_bin_log(THD* thd,
void MYSQL_BIN_LOG::close(uint exiting)
{ // One can't set log_type here!
+ bool failed_to_save_state= false;
+
DBUG_ENTER("MYSQL_BIN_LOG::close");
DBUG_PRINT("enter",("exiting: %d", (int) exiting));
if (log_state == LOG_OPENED)
@@ -6783,6 +7076,27 @@ void MYSQL_BIN_LOG::close(uint exiting)
s.write(&log_file);
bytes_written+= s.data_written;
signal_update();
+
+ /*
+ When we shut down server, write out the binlog state to a separate
+ file so we do not have to scan an entire binlog file to recover it
+ at next server start.
+
+ Note that this must be written and synced to disk before marking the
+ last binlog file as "not crashed".
+ */
+ if (!is_relay_log && write_state_to_file())
+ {
+ sql_print_error("Failed to save binlog GTID state during shutdown. "
+ "Binlog will be marked as crashed, so that crash "
+ "recovery can recover the state at next server "
+ "startup.");
+ /*
+ Leave binlog file marked as crashed, so we can recover state by
+ scanning it now that we failed to write out the state properly.
+ */
+ failed_to_save_state= true;
+ }
}
#endif /* HAVE_REPLICATION */
@@ -6791,7 +7105,8 @@ void MYSQL_BIN_LOG::close(uint exiting)
&& !(exiting & LOG_CLOSE_DELAYED_CLOSE))
{
my_off_t org_position= mysql_file_tell(log_file.file, MYF(0));
- clear_inuse_flag_when_closing(log_file.file);
+ if (!failed_to_save_state)
+ clear_inuse_flag_when_closing(log_file.file);
/*
Restore position so that anything we have in the IO_cache is written
to the correct position.
@@ -7944,7 +8259,7 @@ int TC_LOG_BINLOG::open(const char *opt_name)
do
{
- strmake(log_name, log_info.log_file_name, sizeof(log_name)-1);
+ strmake_buf(log_name, log_info.log_file_name);
} while (!(error= find_next_log(&log_info, 1)));
if (error != LOG_INFO_EOF)
@@ -7967,9 +8282,10 @@ int TC_LOG_BINLOG::open(const char *opt_name)
sql_print_information("Recovering after a crash using %s", opt_name);
error= recover(&log_info, log_name, &log,
(Format_description_log_event *)ev);
+ state_read= true;
}
else
- error=0;
+ error= read_state_from_file();
delete ev;
end_io_cache(&log);
@@ -8219,6 +8535,29 @@ binlog_background_thread(void *arg __attribute__((unused)))
mysql_mutex_unlock(&LOCK_thread_count);
thd->store_globals();
+ /*
+ Load the slave replication GTID state from the mysql.gtid_slave_pos
+ table.
+
+ This is mostly so that we can start our seq_no counter from the highest
+ seq_no seen by a slave. This way, we have a way to tell if a transaction
+ logged by ourselves as master is newer or older than a replicated
+ transaction.
+ */
+#ifdef HAVE_REPLICATION
+ if (rpl_load_gtid_slave_state(thd))
+ sql_print_warning("Failed to load slave replication state from table "
+ "%s.%s: %u: %s", "mysql",
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
+#endif
+
+ mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread);
+ binlog_background_thread_started= true;
+ mysql_cond_signal(&mysql_bin_log.COND_binlog_background_thread_end);
+ mysql_mutex_unlock(&mysql_bin_log.LOCK_binlog_background_thread);
+
for (;;)
{
/*
@@ -8231,6 +8570,13 @@ binlog_background_thread(void *arg __attribute__((unused)))
{
stop= binlog_background_thread_stop;
queue= binlog_background_thread_queue;
+ if (stop && !mysql_bin_log.is_xidlist_idle())
+ {
+ /*
+ Delay stop until all pending binlog checkpoints have been processed.
+ */
+ stop= false;
+ }
if (stop || queue)
break;
mysql_cond_wait(&mysql_bin_log.COND_binlog_background_thread,
@@ -8241,9 +8587,18 @@ binlog_background_thread(void *arg __attribute__((unused)))
mysql_mutex_unlock(&mysql_bin_log.LOCK_binlog_background_thread);
/* Process any incoming commit_checkpoint_notify() calls. */
+ DBUG_EXECUTE_IF("inject_binlog_background_thread_before_mark_xid_done",
+ DBUG_ASSERT(!debug_sync_set_action(
+ thd,
+ STRING_WITH_LEN("binlog_background_thread_before_mark_xid_done "
+ "SIGNAL injected_binlog_background_thread "
+ "WAIT_FOR something_that_will_never_happen "
+ "TIMEOUT 2")));
+ );
while (queue)
{
THD_STAGE_INFO(thd, stage_binlog_processing_checkpoint_notify);
+ DEBUG_SYNC(current_thd, "binlog_background_thread_before_mark_xid_done");
/* Grab next pointer first, as mark_xid_done() may free the element. */
next= queue->next_in_queue;
mysql_bin_log.mark_xid_done(queue->binlog_id, true);
@@ -8301,7 +8656,16 @@ start_binlog_background_thread()
binlog_background_thread, NULL))
return 1;
- binlog_background_thread_started= true;
+ /*
+ Wait for the thread to have started (so we know that the slave replication
+ state is loaded and we have correct global_gtid_counter).
+ */
+ mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread);
+ while (!binlog_background_thread_started)
+ mysql_cond_wait(&mysql_bin_log.COND_binlog_background_thread_end,
+ &mysql_bin_log.LOCK_binlog_background_thread);
+ mysql_mutex_unlock(&mysql_bin_log.LOCK_binlog_background_thread);
+
return 0;
}
@@ -8380,6 +8744,32 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name,
}
break;
}
+ case GTID_LIST_EVENT:
+ if (first_round)
+ {
+ Gtid_list_log_event *glev= (Gtid_list_log_event *)ev;
+
+ /* Initialise the binlog state from the Gtid_list event. */
+ if (rpl_global_gtid_binlog_state.load(glev->list, glev->count))
+ goto err2;
+ }
+ break;
+
+ case GTID_EVENT:
+ if (first_round)
+ {
+ Gtid_log_event *gev= (Gtid_log_event *)ev;
+ rpl_gtid gtid;
+
+ /* Update the binlog state with any GTID logged after Gtid_list. */
+ gtid.domain_id= gev->domain_id;
+ gtid.server_id= gev->server_id;
+ gtid.seq_no= gev->seq_no;
+ if (rpl_global_gtid_binlog_state.update(&gtid, false))
+ goto err2;
+ }
+ break;
+
default:
/* Nothing. */
break;
@@ -8596,7 +8986,7 @@ static void
set_binlog_snapshot_file(const char *src)
{
int dir_len = dirname_length(src);
- strmake(binlog_snapshot_file, src + dir_len, sizeof(binlog_snapshot_file)-1);
+ strmake_buf(binlog_snapshot_file, src + dir_len);
}
/*
diff --git a/sql/log.h b/sql/log.h
index 80fe34b5ff2..018ac64eff7 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -396,6 +396,7 @@ private:
( ((ulong)(c)>>1) == BINLOG_COOKIE_DUMMY_ID )
class binlog_cache_mngr;
+struct rpl_gtid;
class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
{
private:
@@ -420,11 +421,10 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
bool using_stmt_cache;
bool using_trx_cache;
/*
- Extra events (BEGIN, COMMIT/ROLLBACK/XID, and possibly INCIDENT) to be
+ Extra events (COMMIT/ROLLBACK/XID, and possibly INCIDENT) to be
written during group commit. The incident_event is only valid if
trx_data->has_incident() is true.
*/
- Log_event *begin_event;
Log_event *end_event;
Log_event *incident_event;
/* Set during group commit to record any per-thread error. */
@@ -507,6 +507,8 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
*/
uint *sync_period_ptr;
uint sync_counter;
+ /* Protect against reading the binlog state file twice. */
+ bool state_read;
inline uint get_sync_period()
{
@@ -526,6 +528,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
int write_transaction_or_stmt(group_commit_entry *entry);
bool write_transaction_to_binlog_events(group_commit_entry *entry);
void trx_group_commit_leader(group_commit_entry *leader);
+ bool is_xidlist_idle_nolock();
public:
/*
@@ -771,6 +774,18 @@ public:
inline IO_CACHE *get_index_file() { return &index_file;}
inline uint32 get_open_count() { return open_count; }
void set_status_variables(THD *thd);
+ bool is_xidlist_idle();
+ bool write_gtid_event(THD *thd, bool standalone, bool is_transactional);
+ int read_state_from_file();
+ int write_state_to_file();
+ int get_most_recent_gtid_list(rpl_gtid **list, uint32 *size);
+ bool append_state_pos(String *str);
+ bool find_in_binlog_state(uint32 domain_id, uint32 server_id,
+ rpl_gtid *out_gtid);
+ bool lookup_domain_in_binlog_state(uint32 domain_id, rpl_gtid *out_gtid);
+ int bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no);
+ bool check_strict_gtid_sequence(uint32 domain_id, uint32 server_id,
+ uint64 seq_no);
};
class Log_event_handler
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3bac1f5e0c7..01a5dd7f4e3 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -40,6 +40,8 @@
#include "transaction.h"
#include <my_dir.h>
#include "sql_show.h" // append_identifier
+#include <strfunc.h>
+#include "compat56.h"
#endif /* MYSQL_CLIENT */
@@ -513,11 +515,58 @@ pretty_print_str(String *packet, const char *str, int len)
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
/**
- Creates a temporary name for load data infile:.
+ Create a prefix for the temporary files that is to be used for
+ load data file name for this master
+
+ @param name Store prefix of name here
+ @param connection_name Connection name
+
+ @return pointer to end of name
+
+ @description
+ We assume that FN_REFLEN is big enough to hold
+ MAX_CONNECTION_NAME * MAX_FILENAME_MBWIDTH characters + 2 numbers +
+ a short extension.
+
+ The resulting file name has the following parts, each separated with a '-'
+ - PREFIX_SQL_LOAD (SQL_LOAD-)
+ - If a connection name is given (multi-master setup):
+ - Add an extra '-' to mark that this is a multi-master file
+ - connection name in lower case, converted to safe file characters.
+ (see create_logfile_name_with_suffix()).
+ - server_id
+ - A last '-' (after server_id).
+*/
+
+static char *load_data_tmp_prefix(char *name,
+ LEX_STRING *connection_name)
+{
+ name= strmov(name, PREFIX_SQL_LOAD);
+ if (connection_name->length)
+ {
+ uint buf_length;
+ uint errors;
+ /* Add marker that this is a multi-master-file */
+ *name++='-';
+ /* Convert connection_name to a safe filename */
+ buf_length= strconvert(system_charset_info, connection_name->str, FN_REFLEN,
+ &my_charset_filename, name, FN_REFLEN, &errors);
+ name+= buf_length;
+ *name++= '-';
+ }
+ name= int10_to_str(global_system_variables.server_id, name, 10);
+ *name++ = '-';
+ *name= '\0'; // For testing prefixes
+ return name;
+}
+
+
+/**
+ Creates a temporary name for LOAD DATA INFILE
@param buf Store new filename here
@param file_id File_id (part of file name)
- @param event_server_id Event_id (part of file name)
+ @param event_server_id Event_id (part of file name)
@param ext Extension for file name
@return
@@ -525,16 +574,14 @@ pretty_print_str(String *packet, const char *str, int len)
*/
static char *slave_load_file_stem(char *buf, uint file_id,
- int event_server_id, const char *ext)
+ int event_server_id, const char *ext,
+ LEX_STRING *connection_name)
{
char *res;
- fn_format(buf,PREFIX_SQL_LOAD,slave_load_tmpdir, "", MY_UNPACK_FILENAME);
+ res= buf+ unpack_dirname(buf, slave_load_tmpdir);
to_unix_path(buf);
-
- buf = strend(buf);
- buf = int10_to_str(::server_id, buf, 10);
- *buf++ = '-';
- buf = int10_to_str(event_server_id, buf, 10);
+ buf= load_data_tmp_prefix(res, connection_name);
+ buf= int10_to_str(event_server_id, buf, 10);
*buf++ = '-';
res= int10_to_str(file_id, buf, 10);
strmov(res, ext); // Add extension last
@@ -549,14 +596,17 @@ static char *slave_load_file_stem(char *buf, uint file_id,
Delete all temporary files used for SQL_LOAD.
*/
-static void cleanup_load_tmpdir()
+static void cleanup_load_tmpdir(LEX_STRING *connection_name)
{
MY_DIR *dirp;
FILEINFO *file;
uint i;
- char fname[FN_REFLEN], prefbuf[31], *p;
+ char dir[FN_REFLEN], fname[FN_REFLEN];
+ char prefbuf[31 + MAX_CONNECTION_NAME* MAX_FILENAME_MBWIDTH + 1];
+ DBUG_ENTER("cleanup_load_tmpdir");
- if (!(dirp=my_dir(slave_load_tmpdir,MYF(0))))
+ unpack_dirname(dir, slave_load_tmpdir);
+ if (!(dirp=my_dir(dir, MYF(MY_WME))))
return;
/*
@@ -567,12 +617,11 @@ static void cleanup_load_tmpdir()
we cannot meet Start_log event in the middle of events from one
LOAD DATA.
*/
- p= strmake(prefbuf, STRING_WITH_LEN(PREFIX_SQL_LOAD));
- p= int10_to_str(::server_id, p, 10);
- *(p++)= '-';
- *p= 0;
- for (i=0 ; i < (uint)dirp->number_off_files; i++)
+ load_data_tmp_prefix(prefbuf, connection_name);
+ DBUG_PRINT("enter", ("dir: '%s' prefix: '%s'", dir, prefbuf));
+
+ for (i=0 ; i < (uint)dirp->number_of_files; i++)
{
file=dirp->dir_entry+i;
if (is_prefix(file->name, prefbuf))
@@ -583,6 +632,7 @@ static void cleanup_load_tmpdir()
}
my_dirend(dirp);
+ DBUG_VOID_RETURN;
}
#endif
@@ -617,16 +667,18 @@ static inline int read_str(const char **buf, const char *buf_end,
/**
- Transforms a string into "" or its expression in 0x... form.
+ Transforms a string into "" or its expression in X'HHHH' form.
*/
char *str_to_hex(char *to, const char *from, uint len)
{
if (len)
{
- *to++= '0';
- *to++= 'x';
+ *to++= 'X';
+ *to++= '\'';
to= octet2hex(to, from, len);
+ *to++= '\'';
+ *to= '\0';
}
else
to= strmov(to, "\"\"");
@@ -647,7 +699,7 @@ append_query_string(THD *thd, CHARSET_INFO *csinfo,
{
char *beg, *ptr;
uint32 const orig_len= to->length();
- if (to->reserve(orig_len + from->length()*2+3))
+ if (to->reserve(orig_len + from->length() * 2 + 4))
return 1;
beg= (char*) to->ptr() + to->length();
@@ -744,6 +796,8 @@ const char* Log_event::get_type_str(Log_event_type type)
case INCIDENT_EVENT: return "Incident";
case ANNOTATE_ROWS_EVENT: return "Annotate_rows";
case BINLOG_CHECKPOINT_EVENT: return "Binlog_checkpoint";
+ case GTID_EVENT: return "Gtid";
+ case GTID_LIST_EVENT: return "Gtid_list";
default: return "Unknown"; /* impossible */
}
}
@@ -764,7 +818,7 @@ Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans)
crc(0), thd(thd_arg),
checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF)
{
- server_id= thd->server_id;
+ server_id= thd->variables.server_id;
when= thd->start_time;
when_sec_part=thd->start_time_sec_part;
@@ -789,7 +843,7 @@ Log_event::Log_event()
cache_type(Log_event::EVENT_INVALID_CACHE), crc(0),
thd(0), checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF)
{
- server_id= ::server_id;
+ server_id= global_system_variables.server_id;
/*
We can't call my_time() here as this would cause a call before
my_init() is called
@@ -904,9 +958,11 @@ int Log_event::do_update_pos(Relay_log_info *rli)
if (debug_not_change_ts_if_art_event == 1
&& is_artificial_event())
debug_not_change_ts_if_art_event= 0; );
- rli->stmt_done(log_pos, is_artificial_event() &&
- IF_DBUG(debug_not_change_ts_if_art_event > 0, 1) ?
- 0 : when);
+ rli->stmt_done(log_pos,
+ (is_artificial_event() &&
+ IF_DBUG(debug_not_change_ts_if_art_event > 0, 1) ?
+ 0 : when),
+ thd);
DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp",
if (debug_not_change_ts_if_art_event == 0)
debug_not_change_ts_if_art_event= 2; );
@@ -921,10 +977,11 @@ Log_event::do_shall_skip(Relay_log_info *rli)
DBUG_PRINT("info", ("ev->server_id: %lu, ::server_id: %lu,"
" rli->replicate_same_server_id: %d,"
" rli->slave_skip_counter: %lu",
- (ulong) server_id, (ulong) ::server_id,
+ (ulong) server_id, (ulong) global_system_variables.server_id,
rli->replicate_same_server_id,
rli->slave_skip_counter));
- if ((server_id == ::server_id && !rli->replicate_same_server_id) ||
+ if ((server_id == global_system_variables.server_id &&
+ !rli->replicate_same_server_id) ||
(rli->slave_skip_counter == 1 && rli->is_in_group()) ||
(flags & LOG_EVENT_SKIP_REPLICATION_F &&
opt_replicate_events_marked_for_skip != RPL_SKIP_REPLICATE))
@@ -1210,7 +1267,9 @@ bool Log_event::write_header(IO_CACHE* file, ulong event_data_length)
int Log_event::read_log_event(IO_CACHE* file, String* packet,
mysql_mutex_t* log_lock,
- uint8 checksum_alg_arg)
+ uint8 checksum_alg_arg,
+ const char *log_file_name_arg,
+ bool* is_binlog_active)
{
ulong data_len;
int result=0;
@@ -1220,6 +1279,10 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
if (log_lock)
mysql_mutex_lock(log_lock);
+
+ if (log_file_name_arg)
+ *is_binlog_active= mysql_bin_log.is_active(log_file_name_arg);
+
if (my_b_read(file, (uchar*) buf, sizeof(buf)))
{
/*
@@ -1365,7 +1428,7 @@ failed my_b_read"));
Log_event *res= 0;
#ifndef max_allowed_packet
THD *thd=current_thd;
- uint max_allowed_packet= thd ? slave_max_allowed_packet:~(ulong)0;
+ uint max_allowed_packet= thd ? slave_max_allowed_packet:~(uint)0;
#endif
if (data_len > max_allowed_packet)
@@ -1555,6 +1618,12 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case BINLOG_CHECKPOINT_EVENT:
ev = new Binlog_checkpoint_log_event(buf, event_len, description_event);
break;
+ case GTID_EVENT:
+ ev = new Gtid_log_event(buf, event_len, description_event);
+ break;
+ case GTID_LIST_EVENT:
+ ev = new Gtid_list_log_event(buf, event_len, description_event);
+ break;
#ifdef HAVE_REPLICATION
case SLAVE_EVENT: /* can never happen (unused event) */
ev = new Slave_log_event(buf, event_len, description_event);
@@ -1803,6 +1872,7 @@ void Log_event::print_header(IO_CACHE* file,
/**
Prints a quoted string to io cache.
Control characters are displayed as hex sequence, e.g. \x00
+ Single-quote and backslash characters are escaped with a \
@param[in] file IO cache
@param[in] prt Pointer to string
@@ -2056,6 +2126,17 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
return 4;
}
+ case MYSQL_TYPE_TIMESTAMP2:
+ {
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ struct timeval tm;
+ my_timestamp_from_binary(&tm, ptr, meta);
+ int buflen= my_timeval_to_str(&tm, buf, meta);
+ my_b_write(file, buf, buflen);
+ my_snprintf(typestr, typestr_length, "TIMESTAMP(%d)", meta);
+ return my_timestamp_binary_length(meta);
+ }
+
case MYSQL_TYPE_DATETIME:
{
ulong d, t;
@@ -2070,15 +2151,41 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
return 8;
}
+ case MYSQL_TYPE_DATETIME2:
+ {
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ MYSQL_TIME ltime;
+ longlong packed= my_datetime_packed_from_binary(ptr, meta);
+ TIME_from_longlong_datetime_packed(&ltime, packed);
+ int buflen= my_datetime_to_str(&ltime, buf, meta);
+ my_b_write_quoted(file, (uchar *) buf, buflen);
+ my_snprintf(typestr, typestr_length, "DATETIME(%d)", meta);
+ return my_datetime_binary_length(meta);
+ }
+
case MYSQL_TYPE_TIME:
{
- uint32 i32= uint3korr(ptr);
- my_b_printf(file, "'%02d:%02d:%02d'",
- i32 / 10000, (i32 % 10000) / 100, i32 % 100);
+ int32 tmp= sint3korr(ptr);
+ int32 i32= tmp >= 0 ? tmp : - tmp;
+ const char *sign= tmp < 0 ? "-" : "";
+ my_b_printf(file, "'%s%02d:%02d:%02d'",
+ sign, i32 / 10000, (i32 % 10000) / 100, i32 % 100, i32);
strmake(typestr, "TIME", typestr_length);
return 3;
}
-
+
+ case MYSQL_TYPE_TIME2:
+ {
+ char buf[MAX_DATE_STRING_REP_LENGTH];
+ MYSQL_TIME ltime;
+ longlong packed= my_time_packed_from_binary(ptr, meta);
+ TIME_from_longlong_time_packed(&ltime, packed);
+ int buflen= my_time_to_str(&ltime, buf, meta);
+ my_b_write_quoted(file, (uchar *) buf, buflen);
+ my_snprintf(typestr, typestr_length, "TIME(%d)", meta);
+ return my_time_binary_length(meta);
+ }
+
case MYSQL_TYPE_NEWDATE:
{
uint32 tmp= uint3korr(ptr);
@@ -3444,6 +3551,53 @@ Query_log_event::dummy_event(String *packet, ulong ev_offset,
return 0;
}
+/*
+ Replace an event (GTID event) with a BEGIN query event, to be compatible
+ with an old slave.
+*/
+int
+Query_log_event::begin_event(String *packet, ulong ev_offset,
+ uint8 checksum_alg)
+{
+ uchar *p= (uchar *)packet->ptr() + ev_offset;
+ uchar *q= p + LOG_EVENT_HEADER_LEN;
+ size_t data_len= packet->length() - ev_offset;
+ uint16 flags;
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ data_len-= BINLOG_CHECKSUM_LEN;
+ else
+ DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF ||
+ checksum_alg == BINLOG_CHECKSUM_ALG_OFF);
+
+ /* Currently we only need to replace GTID event. */
+ DBUG_ASSERT(data_len == LOG_EVENT_HEADER_LEN + GTID_HEADER_LEN);
+ if (data_len != LOG_EVENT_HEADER_LEN + GTID_HEADER_LEN)
+ return 1;
+
+ flags= uint2korr(p + FLAGS_OFFSET);
+ flags&= ~LOG_EVENT_THREAD_SPECIFIC_F;
+ flags|= LOG_EVENT_SUPPRESS_USE_F;
+ int2store(p + FLAGS_OFFSET, flags);
+
+ p[EVENT_TYPE_OFFSET]= QUERY_EVENT;
+ int4store(q + Q_THREAD_ID_OFFSET, 0);
+ int4store(q + Q_EXEC_TIME_OFFSET, 0);
+ q[Q_DB_LEN_OFFSET]= 0;
+ int2store(q + Q_ERR_CODE_OFFSET, 0);
+ int2store(q + Q_STATUS_VARS_LEN_OFFSET, 0);
+ q[Q_DATA_OFFSET]= 0; /* Zero terminator for empty db */
+ q+= Q_DATA_OFFSET + 1;
+ memcpy(q, "BEGIN", 5);
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ {
+ ha_checksum crc= my_checksum(0L, p, data_len);
+ int4store(p + data_len, crc);
+ }
+ return 0;
+}
+
#ifdef MYSQL_CLIENT
/**
@@ -3703,6 +3857,9 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli,
LEX_STRING new_db;
int expected_error,actual_error= 0;
HA_CREATE_INFO db_options;
+ uint64 sub_id= 0;
+ rpl_gtid gtid;
+ Rpl_filter *rpl_filter= rli->mi->rpl_filter;
DBUG_ENTER("Query_log_event::do_apply_event");
/*
@@ -3890,6 +4047,31 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli,
else
thd->variables.collation_database= thd->db_charset;
+ /*
+ Record any GTID in the same transaction, so slave state is
+ transactionally consistent.
+ */
+ if (strcmp("COMMIT", query) == 0 && (sub_id= rli->gtid_sub_id))
+ {
+ /* Clear the GTID from the RLI so we don't accidentally reuse it. */
+ const_cast<Relay_log_info*>(rli)->gtid_sub_id= 0;
+
+ gtid= rli->current_gtid;
+ if (rpl_global_gtid_slave_state.record_gtid(thd, &gtid, sub_id, true, false))
+ {
+ rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
+ "Error during COMMIT: failed to update GTID state in "
+ "%s.%s: %d: %s",
+ "mysql", rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
+ trans_rollback(thd);
+ sub_id= 0;
+ thd->is_slave_error= 1;
+ goto end;
+ }
+ }
+
thd->table_map_for_update= (table_map)table_map_for_update;
thd->set_invoker(&user, &host);
/*
@@ -4076,6 +4258,9 @@ Default database: '%s'. Query: '%s'",
}
end:
+ if (sub_id && !thd->is_slave_error)
+ rpl_global_gtid_slave_state.update_state_hash(sub_id, &gtid);
+
/*
Probably we have set thd->query, thd->db, thd->catalog to point to places
in the data_buf of this event. Now the event is going to be deleted
@@ -4153,6 +4338,28 @@ Query_log_event::do_shall_skip(Relay_log_info *rli)
DBUG_RETURN(Log_event::do_shall_skip(rli));
}
+
+bool
+Query_log_event::peek_is_commit_rollback(const char *event_start,
+ size_t event_len, uint8 checksum_alg)
+{
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ {
+ if (event_len > BINLOG_CHECKSUM_LEN)
+ event_len-= BINLOG_CHECKSUM_LEN;
+ else
+ event_len= 0;
+ }
+ else
+ DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF ||
+ checksum_alg == BINLOG_CHECKSUM_ALG_OFF);
+
+ if (event_len < LOG_EVENT_HEADER_LEN + QUERY_HEADER_LEN || event_len < 9)
+ return false;
+ return !memcmp(event_start + (event_len-7), "\0COMMIT", 7) ||
+ !memcmp(event_start + (event_len-9), "\0ROLLBACK", 9);
+}
+
#endif
@@ -4315,7 +4522,11 @@ int Start_log_event_v3::do_apply_event(Relay_log_info const *rli)
if (created)
{
error= close_temporary_tables(thd);
- cleanup_load_tmpdir();
+ /*
+ The following is only false if we get here with a BINLOG statement
+ */
+ if (rli->mi)
+ cleanup_load_tmpdir(&rli->mi->cmp_connection_name);
}
else
{
@@ -4467,6 +4678,8 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
post_header_len[ANNOTATE_ROWS_EVENT-1]= ANNOTATE_ROWS_HEADER_LEN;
post_header_len[BINLOG_CHECKPOINT_EVENT-1]=
BINLOG_CHECKPOINT_HEADER_LEN;
+ post_header_len[GTID_EVENT-1]= GTID_HEADER_LEN;
+ post_header_len[GTID_LIST_EVENT-1]= GTID_LIST_HEADER_LEN;
// Sanity-check that all post header lengths are initialized.
int i;
@@ -4576,109 +4789,6 @@ Format_description_log_event(const char* buf,
checksum_alg= (uint8) BINLOG_CHECKSUM_ALG_UNDEF;
}
- /*
- In some previous versions, the events were given other event type
- id numbers than in the present version. When replicating from such
- a version, we therefore set up an array that maps those id numbers
- to the id numbers of the present server.
-
- If post_header_len is null, it means malloc failed, and is_valid
- will fail, so there is no need to do anything.
-
- The trees in which events have wrong id's are:
-
- mysql-5.1-wl1012.old mysql-5.1-wl2325-5.0-drop6p13-alpha
- mysql-5.1-wl2325-5.0-drop6 mysql-5.1-wl2325-5.0
- mysql-5.1-wl2325-no-dd
-
- (this was found by grepping for two lines in sequence where the
- first matches "FORMAT_DESCRIPTION_EVENT," and the second matches
- "TABLE_MAP_EVENT," in log_event.h in all trees)
-
- In these trees, the following server_versions existed since
- TABLE_MAP_EVENT was introduced:
-
- 5.1.1-a_drop5p3 5.1.1-a_drop5p4 5.1.1-alpha
- 5.1.2-a_drop5p10 5.1.2-a_drop5p11 5.1.2-a_drop5p12
- 5.1.2-a_drop5p13 5.1.2-a_drop5p14 5.1.2-a_drop5p15
- 5.1.2-a_drop5p16 5.1.2-a_drop5p16b 5.1.2-a_drop5p16c
- 5.1.2-a_drop5p17 5.1.2-a_drop5p4 5.1.2-a_drop5p5
- 5.1.2-a_drop5p6 5.1.2-a_drop5p7 5.1.2-a_drop5p8
- 5.1.2-a_drop5p9 5.1.3-a_drop5p17 5.1.3-a_drop5p17b
- 5.1.3-a_drop5p17c 5.1.4-a_drop5p18 5.1.4-a_drop5p19
- 5.1.4-a_drop5p20 5.1.4-a_drop6p0 5.1.4-a_drop6p1
- 5.1.4-a_drop6p2 5.1.5-a_drop5p20 5.2.0-a_drop6p3
- 5.2.0-a_drop6p4 5.2.0-a_drop6p5 5.2.0-a_drop6p6
- 5.2.1-a_drop6p10 5.2.1-a_drop6p11 5.2.1-a_drop6p12
- 5.2.1-a_drop6p6 5.2.1-a_drop6p7 5.2.1-a_drop6p8
- 5.2.2-a_drop6p13 5.2.2-a_drop6p13-alpha 5.2.2-a_drop6p13b
- 5.2.2-a_drop6p13c
-
- (this was found by grepping for "mysql," in all historical
- versions of configure.in in the trees listed above).
-
- There are 5.1.1-alpha versions that use the new event id's, so we
- do not test that version string. So replication from 5.1.1-alpha
- with the other event id's to a new version does not work.
- Moreover, we can safely ignore the part after drop[56]. This
- allows us to simplify the big list above to the following regexes:
-
- 5\.1\.[1-5]-a_drop5.*
- 5\.1\.4-a_drop6.*
- 5\.2\.[0-2]-a_drop6.*
-
- This is what we test for in the 'if' below.
- */
- if (post_header_len &&
- server_version[0] == '5' && server_version[1] == '.' &&
- server_version[3] == '.' &&
- strncmp(server_version + 5, "-a_drop", 7) == 0 &&
- ((server_version[2] == '1' &&
- server_version[4] >= '1' && server_version[4] <= '5' &&
- server_version[12] == '5') ||
- (server_version[2] == '1' &&
- server_version[4] == '4' &&
- server_version[12] == '6') ||
- (server_version[2] == '2' &&
- server_version[4] >= '0' && server_version[4] <= '2' &&
- server_version[12] == '6')))
- {
- if (number_of_event_types != 22)
- {
- DBUG_PRINT("info", (" number_of_event_types=%d",
- number_of_event_types));
- /* this makes is_valid() return false. */
- my_free(post_header_len);
- post_header_len= NULL;
- DBUG_VOID_RETURN;
- }
- static const uint8 perm[23]=
- {
- UNKNOWN_EVENT, START_EVENT_V3, QUERY_EVENT, STOP_EVENT, ROTATE_EVENT,
- INTVAR_EVENT, LOAD_EVENT, SLAVE_EVENT, CREATE_FILE_EVENT,
- APPEND_BLOCK_EVENT, EXEC_LOAD_EVENT, DELETE_FILE_EVENT,
- NEW_LOAD_EVENT,
- RAND_EVENT, USER_VAR_EVENT,
- FORMAT_DESCRIPTION_EVENT,
- TABLE_MAP_EVENT,
- PRE_GA_WRITE_ROWS_EVENT,
- PRE_GA_UPDATE_ROWS_EVENT,
- PRE_GA_DELETE_ROWS_EVENT,
- XID_EVENT,
- BEGIN_LOAD_QUERY_EVENT,
- EXECUTE_LOAD_QUERY_EVENT,
- };
- event_type_permutation= perm;
- /*
- Since we use (permuted) event id's to index the post_header_len
- array, we need to permute the post_header_len array too.
- */
- uint8 post_header_len_temp[23];
- for (int i= 1; i < 23; i++)
- post_header_len_temp[perm[i] - 1]= post_header_len[i - 1];
- for (int i= 0; i < 22; i++)
- post_header_len[i] = post_header_len_temp[i];
- }
DBUG_VOID_RETURN;
}
@@ -4774,7 +4884,7 @@ int Format_description_log_event::do_apply_event(Relay_log_info const *rli)
perform, we don't call Start_log_event_v3::do_apply_event()
(this was just to update the log's description event).
*/
- if (server_id != (uint32) ::server_id)
+ if (server_id != (uint32) global_system_variables.server_id)
{
/*
If the event was not requested by the slave i.e. the master sent
@@ -4800,7 +4910,7 @@ int Format_description_log_event::do_apply_event(Relay_log_info const *rli)
int Format_description_log_event::do_update_pos(Relay_log_info *rli)
{
- if (server_id == (uint32) ::server_id)
+ if (server_id == (uint32) global_system_variables.server_id)
{
/*
We only increase the relay log position if we are skipping
@@ -4841,10 +4951,21 @@ do_server_version_split(char* version,
for (uint i= 0; i<=2; i++)
{
number= strtoul(p, &r, 10);
- split_versions->ver[i]= (uchar) number;
- DBUG_ASSERT(number < 256); // fit in uchar
+ /*
+ It is an invalid version if any version number greater than 255 or
+ first number is not followed by '.'.
+ */
+ if (number < 256 && (*r == '.' || i != 0))
+ split_versions->ver[i]= (uchar) number;
+ else
+ {
+ split_versions->ver[0]= 0;
+ split_versions->ver[1]= 0;
+ split_versions->ver[2]= 0;
+ break;
+ }
+
p= r;
- DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice
if (*r == '.')
p++; // skip the dot
}
@@ -4862,7 +4983,6 @@ do_server_version_split(char* version,
into 'server_version_split':
X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z}
X.Yabc -> {X,Y,0}
- Xabc -> {X,0,0}
'server_version_split' is then used for lookups to find if the server which
created this event has some known bug.
*/
@@ -5440,6 +5560,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli,
bool use_rli_only_for_errors)
{
LEX_STRING new_db;
+ Rpl_filter *rpl_filter= rli->mi->rpl_filter;
DBUG_ENTER("Load_log_event::do_apply_event");
new_db.length= db_len;
@@ -5846,7 +5967,7 @@ int Rotate_log_event::do_update_pos(Relay_log_info *rli)
#endif
DBUG_PRINT("info", ("server_id=%lu; ::server_id=%lu",
- (ulong) this->server_id, (ulong) ::server_id));
+ (ulong) this->server_id, (ulong) global_system_variables.server_id));
DBUG_PRINT("info", ("new_log_ident: %s", this->new_log_ident));
DBUG_PRINT("info", ("pos: %s", llstr(this->pos, buf)));
@@ -5866,7 +5987,8 @@ int Rotate_log_event::do_update_pos(Relay_log_info *rli)
5.0.0, there also are some rotates from the slave itself, in the
relay log, which shall not change the group positions.
*/
- if ((server_id != ::server_id || rli->replicate_same_server_id) &&
+ if ((server_id != global_system_variables.server_id ||
+ rli->replicate_same_server_id) &&
!is_relay_log_event() &&
!rli->is_in_group())
{
@@ -5883,6 +6005,7 @@ int Rotate_log_event::do_update_pos(Relay_log_info *rli)
rli->group_master_log_name,
(ulong) rli->group_master_log_pos));
mysql_mutex_unlock(&rli->data_lock);
+ rpl_global_gtid_slave_state.record_and_update_gtid(thd, rli);
flush_relay_log_info(rli);
/*
@@ -6007,6 +6130,473 @@ bool Binlog_checkpoint_log_event::write(IO_CACHE *file)
/**************************************************************************
+ Global transaction ID stuff
+**************************************************************************/
+
+Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event)
+ : Log_event(buf, description_event), seq_no(0)
+{
+ uint8 header_size= description_event->common_header_len;
+ uint8 post_header_len= description_event->post_header_len[GTID_EVENT-1];
+ if (event_len < header_size + post_header_len ||
+ post_header_len < GTID_HEADER_LEN)
+ return;
+
+ buf+= header_size;
+ seq_no= uint8korr(buf);
+ buf+= 8;
+ domain_id= uint4korr(buf);
+ buf+= 4;
+ flags2= *buf;
+}
+
+
+#ifdef MYSQL_SERVER
+
+Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
+ uint32 domain_id_arg, bool standalone,
+ uint16 flags_arg, bool is_transactional)
+ : Log_event(thd_arg, flags_arg, is_transactional),
+ seq_no(seq_no_arg), domain_id(domain_id_arg),
+ flags2(standalone ? FL_STANDALONE : 0)
+{
+ cache_type= Log_event::EVENT_NO_CACHE;
+}
+
+
+/*
+ Used to record GTID while sending binlog to slave, without having to
+ fully contruct every Gtid_log_event() needlessly.
+*/
+bool
+Gtid_log_event::peek(const char *event_start, size_t event_len,
+ uint8 checksum_alg,
+ uint32 *domain_id, uint32 *server_id, uint64 *seq_no,
+ uchar *flags2)
+{
+ const char *p;
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ {
+ if (event_len > BINLOG_CHECKSUM_LEN)
+ event_len-= BINLOG_CHECKSUM_LEN;
+ else
+ event_len= 0;
+ }
+ else
+ DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF ||
+ checksum_alg == BINLOG_CHECKSUM_ALG_OFF);
+
+ if (event_len < LOG_EVENT_HEADER_LEN + GTID_HEADER_LEN)
+ return true;
+ *server_id= uint4korr(event_start + SERVER_ID_OFFSET);
+ p= event_start + LOG_EVENT_HEADER_LEN;
+ *seq_no= uint8korr(p);
+ p+= 8;
+ *domain_id= uint4korr(p);
+ p+= 4;
+ *flags2= (uchar)*p;
+ return false;
+}
+
+
+bool
+Gtid_log_event::write(IO_CACHE *file)
+{
+ uchar buf[GTID_HEADER_LEN];
+ int8store(buf, seq_no);
+ int4store(buf+8, domain_id);
+ buf[12]= flags2;
+ bzero(buf+13, GTID_HEADER_LEN-13);
+ return write_header(file, GTID_HEADER_LEN) ||
+ wrapper_my_b_safe_write(file, buf, GTID_HEADER_LEN) ||
+ write_footer(file);
+}
+
+
+/*
+ Replace a GTID event with either a BEGIN event, dummy event, or nothing, as
+ appropriate to work with old slave that does not know global transaction id.
+
+ The need_dummy_event argument is an IN/OUT argument. It is passed as TRUE
+ if slave has capability lower than MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES.
+ It is returned TRUE if we return a BEGIN (or dummy) event to be sent to the
+ slave, FALSE if event should be skipped completely.
+*/
+int
+Gtid_log_event::make_compatible_event(String *packet, bool *need_dummy_event,
+ ulong ev_offset, uint8 checksum_alg)
+{
+ uchar flags2;
+ if (packet->length() - ev_offset < LOG_EVENT_HEADER_LEN + GTID_HEADER_LEN)
+ return 1;
+ flags2= (*packet)[ev_offset + LOG_EVENT_HEADER_LEN + 12];
+ if (flags2 & FL_STANDALONE)
+ {
+ if (*need_dummy_event)
+ return Query_log_event::dummy_event(packet, ev_offset, checksum_alg);
+ else
+ return 0;
+ }
+
+ *need_dummy_event= true;
+ return Query_log_event::begin_event(packet, ev_offset, checksum_alg);
+}
+
+
+#ifdef HAVE_REPLICATION
+void
+Gtid_log_event::pack_info(THD *thd, Protocol *protocol)
+{
+ char buf[6+5+10+1+10+1+20+1];
+ char *p;
+ p = strmov(buf, (flags2 & FL_STANDALONE ? "GTID " : "BEGIN GTID "));
+ p= longlong10_to_str(domain_id, p, 10);
+ *p++= '-';
+ p= longlong10_to_str(server_id, p, 10);
+ *p++= '-';
+ p= longlong10_to_str(seq_no, p, 10);
+
+ protocol->store(buf, p-buf, &my_charset_bin);
+}
+
+static char gtid_begin_string[] = "BEGIN";
+
+int
+Gtid_log_event::do_apply_event(Relay_log_info const *rli)
+{
+ thd->variables.server_id= this->server_id;
+ thd->variables.gtid_domain_id= this->domain_id;
+ thd->variables.gtid_seq_no= this->seq_no;
+
+ if (opt_gtid_strict_mode && opt_bin_log && opt_log_slave_updates)
+ {
+ /* Need to reset prior "ok" status to give an error. */
+ thd->clear_error();
+ thd->get_stmt_da()->reset_diagnostics_area();
+ if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id,
+ this->server_id, this->seq_no))
+ return 1;
+ }
+ if (flags2 & FL_STANDALONE)
+ return 0;
+
+ /* Execute this like a BEGIN query event. */
+ thd->set_query_and_id(gtid_begin_string, sizeof(gtid_begin_string)-1,
+ &my_charset_bin, next_query_id());
+ Parser_state parser_state;
+ if (!parser_state.init(thd, thd->query(), thd->query_length()))
+ {
+ mysql_parse(thd, thd->query(), thd->query_length(), &parser_state);
+ /* Finalize server status flags after executing a statement. */
+ thd->update_server_status();
+ log_slow_statement(thd);
+ if (unlikely(thd->is_fatal_error))
+ thd->is_slave_error= 1;
+ else if (likely(!thd->is_slave_error))
+ general_log_write(thd, COM_QUERY, thd->query(), thd->query_length());
+ }
+
+ thd->reset_query();
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ return thd->is_slave_error;
+}
+
+
+int
+Gtid_log_event::do_update_pos(Relay_log_info *rli)
+{
+ rli->inc_event_relay_log_pos();
+ return 0;
+}
+
+
+Log_event::enum_skip_reason
+Gtid_log_event::do_shall_skip(Relay_log_info *rli)
+{
+ /*
+ An event skipped due to @@skip_replication must not be counted towards the
+ number of events to be skipped due to @@sql_slave_skip_counter.
+ */
+ if (flags & LOG_EVENT_SKIP_REPLICATION_F &&
+ opt_replicate_events_marked_for_skip != RPL_SKIP_REPLICATE)
+ return Log_event::EVENT_SKIP_IGNORE;
+
+ if (rli->slave_skip_counter > 0)
+ {
+ if (!(flags2 & FL_STANDALONE))
+ thd->variables.option_bits|= OPTION_BEGIN;
+ return Log_event::continue_group(rli);
+ }
+ return Log_event::do_shall_skip(rli);
+}
+
+
+#endif /* HAVE_REPLICATION */
+
+#else /* !MYSQL_SERVER */
+
+void
+Gtid_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
+{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+ char buf[21];
+
+ if (!print_event_info->short_form)
+ {
+ print_header(&cache, print_event_info, FALSE);
+ longlong10_to_str(seq_no, buf, 10);
+ my_b_printf(&cache, "\tGTID %u-%u-%s\n", domain_id, server_id, buf);
+
+ if (!print_event_info->domain_id_printed ||
+ print_event_info->domain_id != domain_id)
+ {
+ my_b_printf(&cache, "/*!100001 SET @@session.gtid_domain_id=%u*/%s\n",
+ domain_id, print_event_info->delimiter);
+ print_event_info->domain_id= domain_id;
+ print_event_info->domain_id_printed= true;
+ }
+
+ if (!print_event_info->server_id_printed ||
+ print_event_info->server_id != server_id)
+ {
+ my_b_printf(&cache, "/*!100001 SET @@session.server_id=%u*/%s\n",
+ server_id, print_event_info->delimiter);
+ print_event_info->server_id= server_id;
+ print_event_info->server_id_printed= true;
+ }
+
+ my_b_printf(&cache, "/*!100001 SET @@session.gtid_seq_no=%s*/%s\n",
+ buf, print_event_info->delimiter);
+ }
+ if (!(flags2 & FL_STANDALONE))
+ my_b_printf(&cache, "BEGIN\n%s\n", print_event_info->delimiter);
+}
+
+#endif /* MYSQL_SERVER */
+
+
+/* GTID list. */
+
+Gtid_list_log_event::Gtid_list_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event)
+ : Log_event(buf, description_event), count(0), list(0)
+{
+ uint32 i;
+ uint32 val;
+ uint8 header_size= description_event->common_header_len;
+ uint8 post_header_len= description_event->post_header_len[GTID_LIST_EVENT-1];
+ if (event_len < header_size + post_header_len ||
+ post_header_len < GTID_LIST_HEADER_LEN)
+ return;
+
+ buf+= header_size;
+ val= uint4korr(buf);
+ count= val & ((1<<28)-1);
+ gl_flags= val & ((uint32)0xf << 28);
+ buf+= 4;
+ if (event_len - (header_size + post_header_len) < count*element_size ||
+ (!(list= (rpl_gtid *)my_malloc(count*sizeof(*list) + (count == 0),
+ MYF(MY_WME)))))
+ return;
+
+ for (i= 0; i < count; ++i)
+ {
+ list[i].domain_id= uint4korr(buf);
+ buf+= 4;
+ list[i].server_id= uint4korr(buf);
+ buf+= 4;
+ list[i].seq_no= uint8korr(buf);
+ buf+= 8;
+ }
+}
+
+
+#ifdef MYSQL_SERVER
+
+Gtid_list_log_event::Gtid_list_log_event(rpl_binlog_state *gtid_set,
+ uint32 gl_flags_)
+ : count(gtid_set->count()), gl_flags(gl_flags_), list(0)
+{
+ cache_type= EVENT_NO_CACHE;
+ /* Failure to allocate memory will be caught by is_valid() returning false. */
+ if (count < (1<<28) &&
+ (list = (rpl_gtid *)my_malloc(count * sizeof(*list) + (count == 0),
+ MYF(MY_WME))))
+ gtid_set->get_gtid_list(list, count);
+}
+
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+bool
+Gtid_list_log_event::to_packet(String *packet)
+{
+ uint32 i;
+ uchar *p;
+ uint32 needed_length;
+
+ DBUG_ASSERT(count < 1<<28);
+
+ needed_length= packet->length() + get_data_size();
+ if (packet->reserve(needed_length))
+ return true;
+ p= (uchar *)packet->ptr() + packet->length();;
+ packet->length(needed_length);
+ int4store(p, (count & ((1<<28)-1)) | gl_flags);
+ p += 4;
+ /* Initialise the padding for empty Gtid_list. */
+ if (count == 0)
+ int2store(p, 0);
+ for (i= 0; i < count; ++i)
+ {
+ int4store(p, list[i].domain_id);
+ int4store(p+4, list[i].server_id);
+ int8store(p+8, list[i].seq_no);
+ p += 16;
+ }
+
+ return false;
+}
+
+
+bool
+Gtid_list_log_event::write(IO_CACHE *file)
+{
+ char buf[128];
+ String packet(buf, sizeof(buf), system_charset_info);
+
+ packet.length(0);
+ if (to_packet(&packet))
+ return true;
+ return
+ write_header(file, get_data_size()) ||
+ wrapper_my_b_safe_write(file, (uchar *)packet.ptr(), packet.length()) ||
+ write_footer(file);
+}
+
+
+int
+Gtid_list_log_event::do_apply_event(Relay_log_info const *rli)
+{
+ int ret= Log_event::do_apply_event(rli);
+ if (rli->until_condition == Relay_log_info::UNTIL_GTID &&
+ (gl_flags & FLAG_UNTIL_REACHED))
+ {
+ char str_buf[128];
+ String str(str_buf, sizeof(str_buf), system_charset_info);
+ const_cast<Relay_log_info*>(rli)->until_gtid_pos.to_string(&str);
+ sql_print_information("Slave SQL thread stops because it reached its"
+ " UNTIL master_gtid_pos %s", str.c_ptr_safe());
+ const_cast<Relay_log_info*>(rli)->abort_slave= true;
+ }
+ return ret;
+}
+
+
+void
+Gtid_list_log_event::pack_info(THD *thd, Protocol *protocol)
+{
+ char buf_mem[1024];
+ String buf(buf_mem, sizeof(buf_mem), system_charset_info);
+ uint32 i;
+ bool first;
+
+ buf.length(0);
+ buf.append(STRING_WITH_LEN("["));
+ first= true;
+ for (i= 0; i < count; ++i)
+ rpl_slave_state_tostring_helper(&buf, &list[i], &first);
+ buf.append(STRING_WITH_LEN("]"));
+
+ protocol->store(&buf);
+}
+#endif /* HAVE_REPLICATION */
+
+#else /* !MYSQL_SERVER */
+
+void
+Gtid_list_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
+{
+ if (!print_event_info->short_form)
+ {
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+ char buf[21];
+ uint32 i;
+
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tGtid list [");
+ for (i= 0; i < count; ++i)
+ {
+ longlong10_to_str(list[i].seq_no, buf, 10);
+ my_b_printf(&cache, "%u-%u-%s", list[i].domain_id,
+ list[i].server_id, buf);
+ if (i < count-1)
+ my_b_printf(&cache, ",\n# ");
+ }
+ my_b_printf(&cache, "]\n");
+ }
+}
+
+#endif /* MYSQL_SERVER */
+
+
+/*
+ Used to record gtid_list event while sending binlog to slave, without having to
+ fully contruct the event object.
+*/
+bool
+Gtid_list_log_event::peek(const char *event_start, uint32 event_len,
+ uint8 checksum_alg,
+ rpl_gtid **out_gtid_list, uint32 *out_list_len)
+{
+ const char *p;
+ uint32 count_field, count;
+ rpl_gtid *gtid_list;
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ {
+ if (event_len > BINLOG_CHECKSUM_LEN)
+ event_len-= BINLOG_CHECKSUM_LEN;
+ else
+ event_len= 0;
+ }
+ else
+ DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF ||
+ checksum_alg == BINLOG_CHECKSUM_ALG_OFF);
+
+ if (event_len < LOG_EVENT_HEADER_LEN + GTID_LIST_HEADER_LEN)
+ return true;
+ p= event_start + LOG_EVENT_HEADER_LEN;
+ count_field= uint4korr(p);
+ p+= 4;
+ count= count_field & ((1<<28)-1);
+ if (event_len < LOG_EVENT_HEADER_LEN + GTID_LIST_HEADER_LEN +
+ 16 * count)
+ return true;
+ if (!(gtid_list= (rpl_gtid *)my_malloc(sizeof(rpl_gtid)*count + (count == 0),
+ MYF(MY_WME))))
+ return true;
+ *out_gtid_list= gtid_list;
+ *out_list_len= count;
+ while (count--)
+ {
+ gtid_list->domain_id= uint4korr(p);
+ p+= 4;
+ gtid_list->server_id= uint4korr(p);
+ p+= 4;
+ gtid_list->seq_no= uint8korr(p);
+ p+= 8;
+ ++gtid_list;
+ }
+
+ return false;
+}
+
+
+/**************************************************************************
Intvar_log_event methods
**************************************************************************/
@@ -6359,12 +6949,50 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
int Xid_log_event::do_apply_event(Relay_log_info const *rli)
{
bool res;
+ int err;
+ rpl_gtid gtid;
+ uint64 sub_id;
+
+ /*
+ Record any GTID in the same transaction, so slave state is transactionally
+ consistent.
+ */
+ if ((sub_id= rli->gtid_sub_id))
+ {
+ /* Clear the GTID from the RLI so we don't accidentally reuse it. */
+ const_cast<Relay_log_info*>(rli)->gtid_sub_id= 0;
+
+ gtid= rli->current_gtid;
+ err= rpl_global_gtid_slave_state.record_gtid(thd, &gtid, sub_id, true, false);
+ if (err)
+ {
+ rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
+ "Error during XID COMMIT: failed to update GTID state in "
+ "%s.%s: %d: %s",
+ "mysql", rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
+ trans_rollback(thd);
+ thd->is_slave_error= 1;
+ return err;
+ }
+
+ DBUG_EXECUTE_IF("gtid_fail_after_record_gtid",
+ { my_error(ER_ERROR_DURING_COMMIT, MYF(0), HA_ERR_WRONG_COMMAND);
+ thd->is_slave_error= 1;
+ return 1;
+ });
+ }
+
/* For a slave Xid_log_event is COMMIT */
general_log_print(thd, COM_QUERY,
"COMMIT /* implicit, from Xid_log_event */");
res= trans_commit(thd); /* Automatically rolls back on error. */
thd->mdl_context.release_transactional_locks();
+ if (sub_id)
+ rpl_global_gtid_slave_state.update_state_hash(sub_id, &gtid);
+
/*
Increment the global status commit count variable
*/
@@ -6483,7 +7111,7 @@ void User_var_log_event::pack_info(THD *thd, Protocol* protocol)
buf.append(" "))
return;
old_len= buf.length();
- if (buf.reserve(old_len + val_len*2 + 2 + sizeof(" COLLATE ") +
+ if (buf.reserve(old_len + val_len * 2 + 3 + sizeof(" COLLATE ") +
MY_CS_NAME_SIZE))
return;
beg= const_cast<char *>(buf.ptr()) + old_len;
@@ -6511,7 +7139,7 @@ User_var_log_event(const char* buf, uint event_len,
const Format_description_log_event* description_event)
:Log_event(buf, description_event)
#ifndef MYSQL_CLIENT
- , deferred(false)
+ , deferred(false), query_id(0)
#endif
{
bool error= false;
@@ -6526,10 +7154,9 @@ User_var_log_event(const char* buf, uint event_len,
/*
We don't know yet is_null value, so we must assume that name_len
may have the bigger value possible, is_null= True and there is no
- payload for val.
+ payload for val, or even that name_len is 0.
*/
- if (0 == name_len ||
- !valid_buffer_range<uint>(name_len, buf_start, name,
+ if (!valid_buffer_range<uint>(name_len, buf_start, name,
event_len - UV_VAL_IS_NULL))
{
error= true;
@@ -6752,7 +7379,8 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
char *hex_str;
CHARSET_INFO *cs;
- hex_str= (char *)my_malloc(2*val_len+1+2,MYF(MY_WME)); // 2 hex digits / byte
+ // 2 hex digits / byte
+ hex_str= (char *) my_malloc(2 * val_len + 1 + 3, MYF(MY_WME));
if (!hex_str)
return;
str_to_hex(hex_str, val, val_len);
@@ -6795,12 +7423,18 @@ int User_var_log_event::do_apply_event(Relay_log_info const *rli)
Item *it= 0;
CHARSET_INFO *charset;
DBUG_ENTER("User_var_log_event::do_apply_event");
+ query_id_t sav_query_id= 0; /* memorize orig id when deferred applying */
if (rli->deferred_events_collecting)
{
- set_deferred();
+ set_deferred(current_thd->query_id);
DBUG_RETURN(rli->deferred_events->add(this));
}
+ else if (is_deferred())
+ {
+ sav_query_id= current_thd->query_id;
+ current_thd->query_id= query_id; /* recreating original time context */
+ }
if (!(charset= get_charset(charset_number, MYF(MY_WME))))
DBUG_RETURN(1);
@@ -6874,6 +7508,8 @@ int User_var_log_event::do_apply_event(Relay_log_info const *rli)
(flags & User_var_log_event::UNSIGNED_F));
if (!is_deferred())
free_root(thd->mem_root, 0);
+ else
+ current_thd->query_id= sav_query_id; /* restore current query's context */
DBUG_RETURN(0);
}
@@ -7111,6 +7747,7 @@ int Stop_log_event::do_update_pos(Relay_log_info *rli)
rli->inc_event_relay_log_pos();
else
{
+ rpl_global_gtid_slave_state.record_and_update_gtid(thd, rli);
rli->inc_group_relay_log_pos(0);
flush_relay_log_info(rli);
}
@@ -7338,7 +7975,8 @@ int Create_file_log_event::do_apply_event(Relay_log_info const *rli)
THD_STAGE_INFO(thd, stage_making_temp_file_create_before_load_data);
bzero((char*)&file, sizeof(file));
- ext= slave_load_file_stem(fname_buf, file_id, server_id, ".info");
+ ext= slave_load_file_stem(fname_buf, file_id, server_id, ".info",
+ &rli->mi->connection_name);
/* old copy may exist already */
mysql_file_delete(key_file_log_event_info, fname_buf, MYF(0));
if ((fd= mysql_file_create(key_file_log_event_info,
@@ -7514,7 +8152,8 @@ int Append_block_log_event::do_apply_event(Relay_log_info const *rli)
DBUG_ENTER("Append_block_log_event::do_apply_event");
THD_STAGE_INFO(thd, stage_making_temp_file_append_before_load_data);
- slave_load_file_stem(fname, file_id, server_id, ".data");
+ slave_load_file_stem(fname, file_id, server_id, ".data",
+ &rli->mi->cmp_connection_name);
if (get_create_or_append())
{
/*
@@ -7549,7 +8188,7 @@ int Append_block_log_event::do_apply_event(Relay_log_info const *rli)
DBUG_EXECUTE_IF("remove_slave_load_file_before_write",
{
- my_delete_allow_opened(fname, MYF(0));
+ my_delete(fname, MYF(0));
});
if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP)))
@@ -7656,7 +8295,8 @@ void Delete_file_log_event::pack_info(THD *thd, Protocol *protocol)
int Delete_file_log_event::do_apply_event(Relay_log_info const *rli)
{
char fname[FN_REFLEN+10];
- char *ext= slave_load_file_stem(fname, file_id, server_id, ".data");
+ char *ext= slave_load_file_stem(fname, file_id, server_id, ".data",
+ &rli->mi->cmp_connection_name);
mysql_file_delete(key_file_log_event_data, fname, MYF(MY_WME));
strmov(ext, ".info");
mysql_file_delete(key_file_log_event_info, fname, MYF(MY_WME));
@@ -7760,7 +8400,8 @@ int Execute_load_log_event::do_apply_event(Relay_log_info const *rli)
IO_CACHE file;
Load_log_event *lev= 0;
- ext= slave_load_file_stem(fname, file_id, server_id, ".info");
+ ext= slave_load_file_stem(fname, file_id, server_id, ".info",
+ &rli->mi->cmp_connection_name);
if ((fd= mysql_file_open(key_file_log_event_info,
fname, O_RDONLY | O_BINARY | O_NOFOLLOW,
MYF(MY_WME))) < 0 ||
@@ -8047,7 +8688,8 @@ Execute_load_query_log_event::do_apply_event(Relay_log_info const *rli)
memcpy(p, query, fn_pos_start);
p+= fn_pos_start;
fname= (p= strmake(p, STRING_WITH_LEN(" INFILE \'")));
- p= slave_load_file_stem(p, file_id, server_id, ".data");
+ p= slave_load_file_stem(p, file_id, server_id, ".data",
+ &rli->mi->cmp_connection_name);
fname_end= p= strend(p); // Safer than p=p+5
*(p++)='\'';
switch (dup_handling) {
@@ -8908,7 +9550,7 @@ Rows_log_event::do_update_pos(Relay_log_info *rli)
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
- rli->stmt_done(log_pos, when);
+ rli->stmt_done(log_pos, when, thd);
/*
Clear any errors in thd->net.last_err*. It is not known if this is
needed or not. It is believed that any errors that may exist in
@@ -9281,7 +9923,7 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
{
m_coltype= reinterpret_cast<uchar*>(m_memory);
for (unsigned int i= 0 ; i < m_table->s->fields ; ++i)
- m_coltype[i]= m_table->field[i]->type();
+ m_coltype[i]= m_table->field[i]->binlog_type();
}
/*
@@ -9620,8 +10262,8 @@ check_table_map(Relay_log_info const *rli, RPL_TABLE_LIST *table_list)
enum_tbl_map_status res= OK_TO_PROCESS;
if (rli->sql_thd->slave_thread /* filtering is for slave only */ &&
- (!rpl_filter->db_ok(table_list->db) ||
- (rpl_filter->is_on() && !rpl_filter->tables_ok("", table_list))))
+ (!rli->mi->rpl_filter->db_ok(table_list->db) ||
+ (rli->mi->rpl_filter->is_on() && !rli->mi->rpl_filter->tables_ok("", table_list))))
res= FILTERED_OUT;
else
{
@@ -9655,6 +10297,7 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli)
char *db_mem, *tname_mem;
size_t dummy_len;
void *memory;
+ Rpl_filter *filter;
DBUG_ENTER("Table_map_log_event::do_apply_event(Relay_log_info*)");
DBUG_ASSERT(rli->sql_thd == thd);
@@ -9668,7 +10311,9 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli)
NullS)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- strmov(db_mem, rpl_filter->get_rewrite_db(m_dbnam, &dummy_len));
+ /* call from mysql_client_binlog_statement() will not set rli->mi */
+ filter= rli->sql_thd->slave_thread ? rli->mi->rpl_filter : global_rpl_filter;
+ strmov(db_mem, filter->get_rewrite_db(m_dbnam, &dummy_len));
strmov(tname_mem, m_tblnam);
table_list->init_one_table(db_mem, strlen(db_mem),
@@ -9677,6 +10322,7 @@ int Table_map_log_event::do_apply_event(Relay_log_info const *rli)
table_list->table_id= DBUG_EVALUATE_IF("inject_tblmap_same_id_maps_diff_table", 0, m_table_id);
table_list->updating= 1;
+ table_list->required_type= FRMTYPE_TABLE;
DBUG_PRINT("debug", ("table: %s is mapped to %u", table_list->table_name, table_list->table_id));
enum_tbl_map_status tblmap_status= check_table_map(rli, table_list);
if (tblmap_status == OK_TO_PROCESS)
@@ -10275,6 +10921,8 @@ Write_rows_log_event::do_exec_row(const Relay_log_info *const rli)
#ifdef MYSQL_CLIENT
void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info)
{
+ DBUG_EXECUTE_IF("simulate_cache_read_error",
+ {DBUG_SET("+d,simulate_my_b_fill_error");});
Rows_log_event::print_helper(file, print_event_info, "Write_rows");
}
#endif
@@ -11252,7 +11900,9 @@ st_print_event_info::st_print_event_info()
auto_increment_increment(0),auto_increment_offset(0), charset_inited(0),
lc_time_names_number(~0),
charset_database_number(ILLEGAL_CHARSET_INFO_NUMBER),
- thread_id(0), thread_id_printed(false), skip_replication(0),
+ thread_id(0), thread_id_printed(false), server_id(0),
+ server_id_printed(false), domain_id(0), domain_id_printed(false),
+ skip_replication(0),
base64_output_mode(BASE64_OUTPUT_UNSPEC), printed_fd_event(FALSE)
{
/*
diff --git a/sql/log_event.h b/sql/log_event.h
index ff13cab9cd5..b73c0e71f77 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -49,6 +50,8 @@
#include "sql_class.h" /* THD */
#endif
+#include "rpl_gtid.h"
+
/* Forward declarations */
class String;
@@ -260,6 +263,8 @@ struct sql_ex_info
#define HEARTBEAT_HEADER_LEN 0
#define ANNOTATE_ROWS_HEADER_LEN 0
#define BINLOG_CHECKPOINT_HEADER_LEN 4
+#define GTID_HEADER_LEN 19
+#define GTID_LIST_HEADER_LEN 4
/*
Max number of possible extra bytes in a replication event compared to a
@@ -551,7 +556,7 @@ struct sql_ex_info
/* Shouldn't be defined before */
#define EXPECTED_OPTIONS \
- ((ULL(1) << 14) | (ULL(1) << 26) | (ULL(1) << 27) | (ULL(1) << 19))
+ ((1ULL << 14) | (1ULL << 26) | (1ULL << 27) | (1ULL << 19))
#if OPTIONS_WRITTEN_TO_BIN_LOG != EXPECTED_OPTIONS
#error OPTIONS_WRITTEN_TO_BIN_LOG must NOT change their values!
@@ -599,16 +604,13 @@ enum enum_binlog_checksum_alg {
because they mis-compute the offsets into the master's binlog).
*/
#define MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES 2
-/* MariaDB > 5.5, which knows about binlog_checkpoint_log_event. */
+/* MariaDB >= 10.0, which knows about binlog_checkpoint_log_event. */
#define MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT 3
-/*
- MariaDB server which understands MySQL 5.6 ignorable events. This server
- can tolerate receiving any event with the LOG_EVENT_IGNORABLE_F flag set.
-*/
-#define MARIA_SLAVE_CAPABILITY_IGNORABLE 4
+/* MariaDB >= 10.0.1, which knows about global transaction id events. */
+#define MARIA_SLAVE_CAPABILITY_GTID 4
/* Our capability. */
-#define MARIA_SLAVE_CAPABILITY_MINE MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT
+#define MARIA_SLAVE_CAPABILITY_MINE MARIA_SLAVE_CAPABILITY_GTID
/**
@@ -694,6 +696,18 @@ enum Log_event_type
that are prepared in storage engines but not yet committed.
*/
BINLOG_CHECKPOINT_EVENT= 161,
+ /*
+ Gtid event. For global transaction ID, used to start a new event group,
+ instead of the old BEGIN query event, and also to mark stand-alone
+ events.
+ */
+ GTID_EVENT= 162,
+ /*
+ Gtid list event. Logged at the start of every binlog, to record the
+ current replication state. This consists of the last GTID seen for
+ each replication domain.
+ */
+ GTID_LIST_EVENT= 163,
/* Add new MariaDB events here - right above this comment! */
@@ -766,6 +780,11 @@ typedef struct st_print_event_info
uint charset_database_number;
uint thread_id;
bool thread_id_printed;
+ uint32 server_id;
+ bool server_id_printed;
+ uint32 domain_id;
+ bool domain_id_printed;
+
/*
Track when @@skip_replication changes so we need to output a SET
statement for it.
@@ -1102,8 +1121,35 @@ public:
const Format_description_log_event
*description_event,
my_bool crc_check);
+
+ /**
+ Reads an event from a binlog or relay log. Used by the dump thread
+ this method reads the event into a raw buffer without parsing it.
+
+ @Note If mutex is 0, the read will proceed without mutex.
+
+ @Note If a log name is given than the method will check if the
+ given binlog is still active.
+
+ @param[in] file log file to be read
+ @param[out] packet packet to hold the event
+ @param[in] lock the lock to be used upon read
+ @param[in] log_file_name_arg the log's file name
+ @param[out] is_binlog_active is the current log still active
+
+ @retval 0 success
+ @retval LOG_READ_EOF end of file, nothing was read
+ @retval LOG_READ_BOGUS malformed event
+ @retval LOG_READ_IO io error while reading
+ @retval LOG_READ_MEM packet memory allocation failed
+ @retval LOG_READ_TRUNC only a partial event could be read
+ @retval LOG_READ_TOO_LARGE event too large
+ */
static int read_log_event(IO_CACHE* file, String* packet,
- mysql_mutex_t* log_lock, uint8 checksum_alg_arg);
+ mysql_mutex_t* log_lock,
+ uint8 checksum_alg_arg,
+ const char *log_file_name_arg = NULL,
+ bool* is_binlog_active = NULL);
/*
init_show_field_list() prepares the column names and types for the
output of SHOW BINLOG EVENTS; it is used only by SHOW BINLOG
@@ -1127,7 +1173,7 @@ public:
return thd ? thd->db : 0;
}
#else
- Log_event() : temp_buf(0) {}
+ Log_event() : temp_buf(0), flags(0) {}
/* avoid having to link mysqlbinlog against libpthread */
static Log_event* read_log_event(IO_CACHE* file,
const Format_description_log_event
@@ -1301,6 +1347,35 @@ public:
return do_shall_skip(rli);
}
+
+ /*
+ Check if an event is non-final part of a stand-alone event group,
+ such as Intvar_log_event (such events should be processed as part
+ of the following event group, not individually).
+ */
+ static bool is_part_of_group(enum Log_event_type ev_type)
+ {
+ switch (ev_type)
+ {
+ case GTID_EVENT:
+ case INTVAR_EVENT:
+ case RAND_EVENT:
+ case USER_VAR_EVENT:
+ case TABLE_MAP_EVENT:
+ case ANNOTATE_ROWS_EVENT:
+ return true;
+ case DELETE_ROWS_EVENT:
+ case UPDATE_ROWS_EVENT:
+ case WRITE_ROWS_EVENT:
+ /*
+ ToDo: also check for non-final Rows_log_event (though such events
+ are usually in a BEGIN-COMMIT group).
+ */
+ default:
+ return false;
+ }
+ }
+
protected:
/**
@@ -1874,6 +1949,7 @@ public:
}
Log_event_type get_type_code() { return QUERY_EVENT; }
static int dummy_event(String *packet, ulong ev_offset, uint8 checksum_alg);
+ static int begin_event(String *packet, ulong ev_offset, uint8 checksum_alg);
#ifdef MYSQL_SERVER
bool write(IO_CACHE* file);
virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
@@ -1896,6 +1972,8 @@ public: /* !!! Public in this patch to allow old usage */
int do_apply_event(Relay_log_info const *rli,
const char *query_arg,
uint32 q_len_arg);
+ static bool peek_is_commit_rollback(const char *event_start,
+ size_t event_len, uint8 checksum_alg);
#endif /* HAVE_REPLICATION */
/*
If true, the event always be applied by slave SQL thread or be printed by
@@ -2409,7 +2487,7 @@ protected:
Events from ourself should be skipped, but they should not
decrease the slave skip counter.
*/
- if (this->server_id == ::server_id)
+ if (this->server_id == global_system_variables.server_id)
return Log_event::EVENT_SKIP_IGNORE;
else
return Log_event::EVENT_SKIP_NOT;
@@ -2464,12 +2542,26 @@ public:
#ifdef MYSQL_SERVER
bool write(IO_CACHE* file);
#endif
- bool is_valid() const
+ bool header_is_valid() const
{
return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
LOG_EVENT_MINIMAL_HEADER_LEN)) &&
(post_header_len != NULL));
}
+
+ bool version_is_valid() const
+ {
+ /* It is invalid only when all version numbers are 0 */
+ return !(server_version_split.ver[0] == 0 &&
+ server_version_split.ver[1] == 0 &&
+ server_version_split.ver[2] == 0);
+ }
+
+ bool is_valid() const
+ {
+ return header_is_valid() && version_is_valid();
+ }
+
int get_data_size()
{
/*
@@ -2722,6 +2814,7 @@ public:
uchar flags;
#ifdef MYSQL_SERVER
bool deferred;
+ query_id_t query_id;
User_var_log_event(THD* thd_arg, char *name_arg, uint name_len_arg,
char *val_arg, ulong val_len_arg, Item_result type_arg,
uint charset_number_arg, uchar flags_arg,
@@ -2752,7 +2845,11 @@ public:
and which case the applier adjusts execution path.
*/
bool is_deferred() { return deferred; }
- void set_deferred() { deferred= true; }
+ /*
+ In case of the deffered applying the variable instance is flagged
+ and the parsing time query id is stored to be used at applying time.
+ */
+ void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
#endif
bool is_valid() const { return name != 0; }
@@ -2800,7 +2897,7 @@ private:
Events from ourself should be skipped, but they should not
decrease the slave skip counter.
*/
- if (this->server_id == ::server_id)
+ if (this->server_id == global_system_variables.server_id)
return Log_event::EVENT_SKIP_IGNORE;
else
return Log_event::EVENT_SKIP_NOT;
@@ -2927,6 +3024,215 @@ public:
#endif
};
+
+/**
+ @class Gtid_log_event
+
+ This event is logged as part of every event group to give the global
+ transaction id (GTID) of that group.
+
+ It replaces the BEGIN query event used in earlier versions to begin most
+ event groups, but is also used for events that used to be stand-alone.
+
+ @section Gtid_log_event_binary_format Binary Format
+
+ The binary format for Gtid_log_event has 6 extra reserved bytes to make the
+ length a total of 19 byte (+ 19 bytes of header in common with all events).
+ This is just the minimal size for a BEGIN query event, which makes it easy
+ to replace this event with such BEGIN event to remain compatible with old
+ slave servers.
+
+ <table>
+ <caption>Post-Header</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Format</th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>seq_no</td>
+ <td>8 byte unsigned integer</td>
+ <td>increasing id within one server_id. Starts at 1, holes in the sequence
+ may occur</td>
+ </tr>
+
+ <tr>
+ <td>domain_id</td>
+ <td>4 byte unsigned integer</td>
+ <td>Replication domain id, identifying independent replication streams></td>
+ </tr>
+
+ <tr>
+ <td>flags</td>
+ <td>1 byte bitfield</td>
+ <td>Bit 0 set indicates stand-alone event (no terminating COMMIT)</td>
+ </tr>
+
+ <tr>
+ <td>Reserved</td>
+ <td>6 bytes</td>
+ <td>Reserved bytes, set to 0. Maybe be used for future expansion.</td>
+ </tr>
+ </table>
+
+ The Body of Gtid_log_event is empty. The total event size is 19 bytes +
+ the normal 19 bytes common-header.
+*/
+
+class Gtid_log_event: public Log_event
+{
+public:
+ uint64 seq_no;
+ uint32 domain_id;
+ uchar flags2;
+
+ /* Flags2. */
+
+ /* FL_STANDALONE is set when there is no terminating COMMIT event. */
+ static const uchar FL_STANDALONE= 1;
+
+#ifdef MYSQL_SERVER
+ Gtid_log_event(THD *thd_arg, uint64 seq_no, uint32 domain_id, bool standalone,
+ uint16 flags, bool is_transactional);
+#ifdef HAVE_REPLICATION
+ void pack_info(THD *thd, Protocol *protocol);
+ virtual int do_apply_event(Relay_log_info const *rli);
+ virtual int do_update_pos(Relay_log_info *rli);
+ virtual enum_skip_reason do_shall_skip(Relay_log_info *rli);
+#endif
+#else
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+ Gtid_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+ ~Gtid_log_event() { }
+ Log_event_type get_type_code() { return GTID_EVENT; }
+ int get_data_size() { return GTID_HEADER_LEN; }
+ bool is_valid() const { return seq_no != 0; }
+#ifdef MYSQL_SERVER
+ bool write(IO_CACHE *file);
+ static int make_compatible_event(String *packet, bool *need_dummy_event,
+ ulong ev_offset, uint8 checksum_alg);
+ static bool peek(const char *event_start, size_t event_len,
+ uint8 checksum_alg,
+ uint32 *domain_id, uint32 *server_id, uint64 *seq_no,
+ uchar *flags2);
+#endif
+};
+
+
+/**
+ @class Gtid_list_log_event
+
+ This event is logged at the start of every binlog file to record the
+ current replication state: the last global transaction id (GTID) applied
+ on the server within each replication domain.
+
+ It consists of a list of GTIDs, one for each replication domain ever seen
+ on the server.
+
+ @section Gtid_list_log_event_binary_format Binary Format
+
+ <table>
+ <caption>Post-Header</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Format</th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>count</td>
+ <td>4 byte unsigned integer</td>
+ <td>The lower 28 bits are the number of GTIDs. The upper 4 bits are
+ flags bits.</td>
+ </tr>
+ </table>
+
+ <table>
+ <caption>Body</caption>
+
+ <tr>
+ <th>Name</th>
+ <th>Format</th>
+ <th>Description</th>
+ </tr>
+
+ <tr>
+ <td>domain_id</td>
+ <td>4 byte unsigned integer</td>
+ <td>Replication domain id of one GTID</td>
+ </tr>
+
+ <tr>
+ <td>server_id</td>
+ <td>4 byte unsigned integer</td>
+ <td>Server id of one GTID</td>
+ </tr>
+
+ <tr>
+ <td>seq_no</td>
+ <td>8 byte unsigned integer</td>
+ <td>sequence number of one GTID</td>
+ </tr>
+ </table>
+
+ The three elements in the body repeat COUNT times to form the GTID list.
+
+ At the time of writing, only one flag bit is in use.
+
+ Bit 28 of `count' is used for flag FLAG_UNTIL_REACHED, which is sent in a
+ Gtid_list event from the master to the slave to indicate that the START
+ SLAVE UNTIL master_gtid_pos=xxx condition has been reached. (This flag is
+ only sent in "fake" events generated on the fly, it is not written into
+ the binlog).
+*/
+
+class Gtid_list_log_event: public Log_event
+{
+public:
+ uint32 count;
+ uint32 gl_flags;
+ struct rpl_gtid *list;
+
+ static const uint element_size= 4+4+8;
+ static const uint32 FLAG_UNTIL_REACHED= (1<<28);
+
+#ifdef MYSQL_SERVER
+ Gtid_list_log_event(rpl_binlog_state *gtid_set, uint32 gl_flags);
+#ifdef HAVE_REPLICATION
+ void pack_info(THD *thd, Protocol *protocol);
+#endif
+#else
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+ Gtid_list_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+ ~Gtid_list_log_event() { my_free(list); }
+ Log_event_type get_type_code() { return GTID_LIST_EVENT; }
+ int get_data_size() {
+ /*
+ Replacing with dummy event, needed for older slaves, requires a minimum
+ of 6 bytes in the body.
+ */
+ return (count==0 ?
+ GTID_LIST_HEADER_LEN+2 : GTID_LIST_HEADER_LEN+count*element_size);
+ }
+ bool is_valid() const { return list != NULL; }
+#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
+ bool to_packet(String *packet);
+ bool write(IO_CACHE *file);
+ virtual int do_apply_event(Relay_log_info const *rli);
+#endif
+ static bool peek(const char *event_start, uint32 event_len,
+ uint8 checksum_alg,
+ rpl_gtid **out_gtid_list, uint32 *out_list_len);
+};
+
+
/* the classes below are for the new LOAD DATA INFILE logging */
/**
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index a4ab111f613..6623d7655d7 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -1847,7 +1847,7 @@ Old_rows_log_event::do_update_pos(Relay_log_info *rli)
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
- rli->stmt_done(log_pos, when);
+ rli->stmt_done(log_pos, when, thd);
/*
Clear any errors in thd->net.last_err*. It is not known if this is
needed or not. It is believed that any errors that may exist in
diff --git a/sql/log_event_old.h b/sql/log_event_old.h
index 3e1efd8e2c0..0034bb9d142 100644
--- a/sql/log_event_old.h
+++ b/sql/log_event_old.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef LOG_EVENT_OLD_H
#define LOG_EVENT_OLD_H
diff --git a/sql/mdl.cc b/sql/mdl.cc
index dd7d3f0fbf4..c3a78f4c40b 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -85,7 +85,8 @@ PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]=
{0, "Waiting for stored procedure metadata lock", 0},
{0, "Waiting for trigger metadata lock", 0},
{0, "Waiting for event metadata lock", 0},
- {0, "Waiting for commit lock", 0}
+ {0, "Waiting for commit lock", 0},
+ {0, "User lock", 0} /* Be compatible with old status. */
};
#ifdef HAVE_PSI_INTERFACE
@@ -125,6 +126,8 @@ public:
~MDL_map_partition();
inline MDL_lock *find_or_insert(const MDL_key *mdl_key,
my_hash_value_type hash_value);
+ unsigned long get_lock_owner(const MDL_key *key,
+ my_hash_value_type hash_value);
inline void remove(MDL_lock *lock);
my_hash_value_type get_key_hash(const MDL_key *mdl_key) const
{
@@ -175,6 +178,7 @@ public:
void init();
void destroy();
MDL_lock *find_or_insert(const MDL_key *key);
+ unsigned long get_lock_owner(const MDL_key *key);
void remove(MDL_lock *lock);
private:
/** Array of partitions where the locks are actually stored. */
@@ -429,6 +433,8 @@ public:
inline static MDL_lock *create(const MDL_key *key,
MDL_map_partition *map_part);
+ inline unsigned long get_lock_owner() const;
+
void reschedule_waiters();
void remove_ticket(Ticket_list MDL_lock::*queue, MDL_ticket *ticket);
@@ -955,6 +961,57 @@ bool MDL_map_partition::move_from_hash_to_lock_mutex(MDL_lock *lock)
/**
+ * Return thread id of the owner of the lock, if it is owned.
+ */
+
+unsigned long
+MDL_map::get_lock_owner(const MDL_key *mdl_key)
+{
+ MDL_lock *lock;
+ unsigned long res= 0;
+
+ if (mdl_key->mdl_namespace() == MDL_key::GLOBAL ||
+ mdl_key->mdl_namespace() == MDL_key::COMMIT)
+ {
+ lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock :
+ m_commit_lock;
+ mysql_prlock_rdlock(&lock->m_rwlock);
+ res= lock->get_lock_owner();
+ mysql_prlock_unlock(&lock->m_rwlock);
+ }
+ else
+ {
+ my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key);
+ uint part_id= hash_value % mdl_locks_hash_partitions;
+ MDL_map_partition *part= m_partitions.at(part_id);
+ res= part->get_lock_owner(mdl_key, hash_value);
+ }
+ return res;
+}
+
+
+
+unsigned long
+MDL_map_partition::get_lock_owner(const MDL_key *mdl_key,
+ my_hash_value_type hash_value)
+{
+ MDL_lock *lock;
+ unsigned long res= 0;
+
+ mysql_mutex_lock(&m_mutex);
+ lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks,
+ hash_value,
+ mdl_key->ptr(),
+ mdl_key->length());
+ if (lock)
+ res= lock->get_lock_owner();
+ mysql_mutex_unlock(&m_mutex);
+
+ return res;
+}
+
+
+/**
Destroy MDL_lock object or delegate this responsibility to
whatever thread that holds the last outstanding reference to
it.
@@ -1746,6 +1803,23 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg,
}
+/**
+ Return thread id of the thread to which the first ticket was
+ granted.
+*/
+
+inline unsigned long
+MDL_lock::get_lock_owner() const
+{
+ Ticket_iterator it(m_granted);
+ MDL_ticket *ticket;
+
+ if ((ticket= it++))
+ return ticket->get_ctx()->get_thread_id();
+ return 0;
+}
+
+
/** Remove a ticket from waiting or pending queue and wakeup up waiters. */
void MDL_lock::remove_ticket(Ticket_list MDL_lock::*list, MDL_ticket *ticket)
@@ -2223,31 +2297,37 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
find_deadlock();
- if (lock->needs_notification(ticket))
+ struct timespec abs_shortwait;
+ set_timespec(abs_shortwait, 1);
+ wait_status= MDL_wait::EMPTY;
+
+ while (cmp_timespec(abs_shortwait, abs_timeout) <= 0)
{
- struct timespec abs_shortwait;
- set_timespec(abs_shortwait, 1);
- wait_status= MDL_wait::EMPTY;
+ /* abs_timeout is far away. Wait a short while and notify locks. */
+ wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE,
+ mdl_request->key.get_wait_state_name());
- while (cmp_timespec(abs_shortwait, abs_timeout) <= 0)
+ if (wait_status != MDL_wait::EMPTY)
+ break;
+ /* Check if the client is gone while we were waiting. */
+ if (! thd_is_connected(m_owner->get_thd()))
{
- /* abs_timeout is far away. Wait a short while and notify locks. */
- wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE,
- mdl_request->key.get_wait_state_name());
-
- if (wait_status != MDL_wait::EMPTY)
- break;
+ /*
+ * The client is disconnected. Don't wait forever:
+ * assume it's the same as a wait timeout, this
+ * ensures all error handling is correct.
+ */
+ wait_status= MDL_wait::TIMEOUT;
+ break;
+ }
- mysql_prlock_wrlock(&lock->m_rwlock);
+ mysql_prlock_wrlock(&lock->m_rwlock);
+ if (lock->needs_notification(ticket))
lock->notify_conflicting_locks(this);
- mysql_prlock_unlock(&lock->m_rwlock);
- set_timespec(abs_shortwait, 1);
- }
- if (wait_status == MDL_wait::EMPTY)
- wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
- mdl_request->key.get_wait_state_name());
+ mysql_prlock_unlock(&lock->m_rwlock);
+ set_timespec(abs_shortwait, 1);
}
- else
+ if (wait_status == MDL_wait::EMPTY)
wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
mdl_request->key.get_wait_state_name());
@@ -2745,7 +2825,7 @@ void MDL_context::release_lock(MDL_ticket *ticket)
the corresponding lists, i.e. stored in reverse temporal order.
This allows to employ this function to:
- back off in case of a lock conflict.
- - release all locks in the end of a statment or transaction
+ - release all locks in the end of a statement or transaction
- rollback to a savepoint.
*/
@@ -2864,6 +2944,22 @@ MDL_context::is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
/**
+ Return thread id of the owner of the lock or 0 if
+ there is no owner.
+ @note: Lock type is not considered at all, the function
+ simply checks that there is some lock for the given key.
+
+ @return thread id of the owner of the lock or 0
+*/
+
+unsigned long
+MDL_context::get_lock_owner(MDL_key *key)
+{
+ return mdl_locks.get_lock_owner(key);
+}
+
+
+/**
Check if we have any pending locks which conflict with existing shared lock.
@pre The ticket must match an acquired lock.
@@ -2876,6 +2972,11 @@ bool MDL_ticket::has_pending_conflicting_lock() const
return m_lock->has_pending_conflicting_lock(m_type);
}
+/** Return a key identifying this lock. */
+MDL_key *MDL_ticket::get_key() const
+{
+ return &m_lock->key;
+}
/**
Releases metadata locks that were acquired after a specific savepoint.
diff --git a/sql/mdl.h b/sql/mdl.h
index ddbd55ac467..e79df9b6cd7 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -305,6 +305,7 @@ public:
TRIGGER,
EVENT,
COMMIT,
+ USER_LOCK, /* user level locks. */
/* This should be the last ! */
NAMESPACE_END };
@@ -340,7 +341,8 @@ public:
are not longer than NAME_LEN. Still we play safe and try to avoid
buffer overruns.
*/
- DBUG_ASSERT(strlen(db) <= NAME_LEN && strlen(name) <= NAME_LEN);
+ DBUG_ASSERT(strlen(db) <= NAME_LEN);
+ DBUG_ASSERT(strlen(name) <= NAME_LEN);
m_db_name_length= static_cast<uint16>(strmake(m_ptr + 1, db, NAME_LEN) -
m_ptr - 1);
m_length= static_cast<uint16>(strmake(m_ptr + m_db_name_length + 2, name,
@@ -589,6 +591,7 @@ public:
}
enum_mdl_type get_type() const { return m_type; }
MDL_lock *get_lock() const { return m_lock; }
+ MDL_key *get_key() const;
void downgrade_lock(enum_mdl_type type);
bool has_stronger_or_equal_type(enum_mdl_type type) const;
@@ -753,6 +756,7 @@ public:
bool is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
const char *db, const char *name,
enum_mdl_type mdl_type);
+ unsigned long get_lock_owner(MDL_key *mdl_key);
bool has_lock(const MDL_savepoint &mdl_savepoint, MDL_ticket *mdl_ticket);
@@ -821,9 +825,9 @@ private:
Lists of MDL tickets:
---------------------
The entire set of locks acquired by a connection can be separated
- in three subsets according to their: locks released at the end of
- statement, at the end of transaction and locks are released
- explicitly.
+ in three subsets according to their duration: locks released at
+ the end of statement, at the end of transaction and locks are
+ released explicitly.
Statement and transactional locks are locks with automatic scope.
They are accumulated in the course of a transaction, and released
@@ -832,11 +836,12 @@ private:
locks). They must not be (and never are) released manually,
i.e. with release_lock() call.
- Locks with explicit duration are taken for locks that span
+ Tickets with explicit duration are taken for locks that span
multiple transactions or savepoints.
These are: HANDLER SQL locks (HANDLER SQL is
transaction-agnostic), LOCK TABLES locks (you can COMMIT/etc
- under LOCK TABLES, and the locked tables stay locked), and
+ under LOCK TABLES, and the locked tables stay locked), user level
+ locks (GET_LOCK()/RELEASE_LOCK() functions) and
locks implementing "global read lock".
Statement/transactional locks are always prepended to the
@@ -845,20 +850,19 @@ private:
a savepoint, we start popping and releasing tickets from the
front until we reach the last ticket acquired after the savepoint.
- Locks with explicit duration stored are not stored in any
+ Locks with explicit duration are not stored in any
particular order, and among each other can be split into
- three sets:
+ four sets:
- [LOCK TABLES locks] [HANDLER locks] [GLOBAL READ LOCK locks]
+ [LOCK TABLES locks] [USER locks] [HANDLER locks] [GLOBAL READ LOCK locks]
The following is known about these sets:
- * GLOBAL READ LOCK locks are always stored after LOCK TABLES
- locks and after HANDLER locks. This is because one can't say
- SET GLOBAL read_only=1 or FLUSH TABLES WITH READ LOCK
- if one has locked tables. One can, however, LOCK TABLES
- after having entered the read only mode. Note, that
- subsequent LOCK TABLES statement will unlock the previous
+ * GLOBAL READ LOCK locks are always stored last.
+ This is because one can't say SET GLOBAL read_only=1 or
+ FLUSH TABLES WITH READ LOCK if one has locked tables. One can,
+ however, LOCK TABLES after having entered the read only mode.
+ Note, that subsequent LOCK TABLES statement will unlock the previous
set of tables, but not the GRL!
There are no HANDLER locks after GRL locks because
SET GLOBAL read_only performs a FLUSH TABLES WITH
@@ -910,6 +914,8 @@ private:
public:
void find_deadlock();
+ ulong get_thread_id() const { return thd_get_thread_id(get_thd()); }
+
bool visit_subgraph(MDL_wait_for_graph_visitor *dvisitor);
/** Inform the deadlock detector there is an edge in the wait-for graph. */
@@ -944,6 +950,17 @@ private:
void mdl_init();
void mdl_destroy();
+extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd);
+
+/**
+ Check if a connection in question is no longer connected.
+
+ @details
+ Replication apply thread is always connected. Otherwise,
+ does a poll on the associated socket to check if the client
+ is gone.
+*/
+extern "C" int thd_is_connected(MYSQL_THD thd);
#ifndef DBUG_OFF
extern mysql_mutex_t LOCK_open;
diff --git a/sql/mem_root_array.h b/sql/mem_root_array.h
index 5ce4dcb584d..9dc9638c13f 100644
--- a/sql/mem_root_array.h
+++ b/sql/mem_root_array.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef MEM_ROOT_ARRAY_INCLUDED
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 2f41f2ebaf2..e42ea9ec452 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -1114,6 +1114,7 @@ void DsMrr_impl::close_second_handler()
{
if (secondary_file)
{
+ secondary_file->extra(HA_EXTRA_NO_KEYREAD);
secondary_file->ha_index_or_rnd_end();
secondary_file->ha_external_lock(current_thd, F_UNLCK);
secondary_file->ha_close();
@@ -1199,9 +1200,9 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf,
statistics?
*/
uint parts= my_count_bits(key_tuple_map);
- ulong rpc;
+ ha_rows rpc;
ulonglong rowids_size= rowid_buf_elem_size;
- if ((rpc= key_info->actual_rec_per_key(parts - 1)))
+ if ((rpc= (ha_rows) key_info->actual_rec_per_key(parts - 1)))
rowids_size= rowid_buf_elem_size * rpc;
double fraction_for_rowids=
@@ -1648,7 +1649,7 @@ int DsMrr_impl::dsmrr_explain_info(uint mrr_mode, char *str, size_t size)
uint used_str_len= strlen(used_str);
uint copy_len= MY_MIN(used_str_len, size);
- memcpy(str, used_str, size);
+ memcpy(str, used_str, copy_len);
return copy_len;
}
return 0;
diff --git a/sql/my_apc.cc b/sql/my_apc.cc
index 5d1adb6bca7..755b3890433 100644
--- a/sql/my_apc.cc
+++ b/sql/my_apc.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2011 - 2012, Monty Program Ab
+ Copyright (c) 2011, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,6 @@
#ifndef MY_APC_STANDALONE
-#include "sql_priv.h"
#include "sql_class.h"
#endif
diff --git a/sql/my_apc.h b/sql/my_apc.h
index 7f19809c082..c84074b2da5 100644
--- a/sql/my_apc.h
+++ b/sql/my_apc.h
@@ -1,7 +1,7 @@
-#ifndef INCLUDES_MY_APC_H
-#define INCLUDES_MY_APC_H
+#ifndef SQL_MY_APC_INCLUDED
+#define SQL_MY_APC_INCLUDED
/*
- Copyright (c) 2011 - 2012, Monty Program Ab
+ Copyright (c) 2011, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -134,5 +134,5 @@ private:
void init_show_explain_psi_keys(void);
#endif
-#endif //INCLUDES_MY_APC_H
+#endif //SQL_MY_APC_INCLUDED
diff --git a/sql/my_decimal.h b/sql/my_decimal.h
index 3b104bbdee6..e561d180d12 100644
--- a/sql/my_decimal.h
+++ b/sql/my_decimal.h
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@file
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index 6a1ca2e09d3..6f28760c055 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -54,6 +54,7 @@ static char *opt_os_password;
static my_bool opt_default_user;
static my_bool opt_allow_remote_root_access;
static my_bool opt_skip_networking;
+static my_bool opt_verbose_bootstrap;
static my_bool verbose_errors;
@@ -83,6 +84,8 @@ static struct my_option my_long_options[]=
0, 0},
{"silent", 's', "Print less information", &opt_silent,
&opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"verbose-bootstrap", 'o', "Include mysqld bootstrap output",&opt_verbose_bootstrap,
+ &opt_verbose_bootstrap, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -244,11 +247,12 @@ static char *init_bootstrap_command_line(char *cmdline, size_t size)
get_basedir(basedir, sizeof(basedir), mysqld_path);
my_snprintf(cmdline, size-1,
- "\"\"%s\" --no-defaults --bootstrap"
- " \"--language=%s\\share\\english\""
+ "\"\"%s\" --no-defaults %s --bootstrap"
+ " \"--lc-messages-dir=%s/share\""
" --basedir=. --datadir=. --default-storage-engine=myisam"
" --max_allowed_packet=9M "
- " --net-buffer-length=16k\"", mysqld_path, basedir);
+ " --net-buffer-length=16k\"", mysqld_path,
+ opt_verbose_bootstrap?"--console":"", basedir );
return cmdline;
}
@@ -377,7 +381,7 @@ static int register_service()
static void clean_directory(const char *dir)
{
char dir2[MAX_PATH+2];
- *(strmake(dir2, dir, MAX_PATH+1)+1)= 0;
+ *(strmake_buf(dir2, dir)+1)= 0;
SHFILEOPSTRUCT fileop;
fileop.hwnd= NULL; /* no status display */
@@ -552,7 +556,9 @@ static int create_db_instance()
/* Do mysqld --bootstrap. */
init_bootstrap_command_line(cmdline, sizeof(cmdline));
- /* verbose("Executing %s", cmdline); */
+
+ if(opt_verbose_bootstrap)
+ printf("Executing %s\n", cmdline);
in= popen(cmdline, "wt");
if (!in)
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index d0b54100c43..2575ebed209 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -12,9 +12,9 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
-#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "sql_plugin.h"
#include "sql_priv.h"
#include "unireg.h"
#include <signal.h>
@@ -469,7 +469,7 @@ ulong delay_key_write_options;
uint protocol_version;
uint lower_case_table_names;
ulong tc_heuristic_recover= 0;
-uint volatile thread_count;
+int32 thread_count;
int32 thread_running;
ulong thread_created;
ulong back_log, connect_timeout, concurrency, server_id;
@@ -488,11 +488,13 @@ ulong slave_max_allowed_packet= 0;
ulonglong binlog_stmt_cache_size=0;
ulonglong max_binlog_stmt_cache_size=0;
ulonglong query_cache_size=0;
+ulong query_cache_limit=0;
ulong refresh_version; /* Increments on each reload */
ulong executed_events=0;
query_id_t global_query_id;
my_atomic_rwlock_t global_query_id_lock;
my_atomic_rwlock_t thread_running_lock;
+my_atomic_rwlock_t thread_count_lock;
my_atomic_rwlock_t statistics_lock;
ulong aborted_threads, aborted_connects;
ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
@@ -637,7 +639,8 @@ MYSQL_FILE *bootstrap_file;
int bootstrap_error;
I_List<THD> threads;
-Rpl_filter* rpl_filter;
+Rpl_filter* cur_rpl_filter;
+Rpl_filter* global_rpl_filter;
Rpl_filter* binlog_filter;
THD *first_global_thread()
@@ -680,7 +683,7 @@ SHOW_COMP_OPTION have_openssl;
pthread_key(MEM_ROOT**,THR_MALLOC);
pthread_key(THD*, THR_THD);
-mysql_mutex_t LOCK_thread_count;
+mysql_mutex_t LOCK_thread_count, LOCK_thread_cache;
mysql_mutex_t
LOCK_status, LOCK_error_log, LOCK_short_uuid_generator,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
@@ -692,6 +695,8 @@ mysql_mutex_t
mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
LOCK_global_table_stats, LOCK_global_index_stats;
+mysql_mutex_t LOCK_rpl_gtid_state;
+
/**
The below lock protects access to two global server variables:
max_prepared_stmt_count and prepared_stmt_count. These variables
@@ -751,8 +756,7 @@ static char **remaining_argv;
int orig_argc;
char **orig_argv;
-#ifndef EMBEDDED_LIBRARY
-static struct my_option pfs_early_options[]=
+static struct my_option pfs_early_options[] __attribute__((unused)) =
{
{"performance_schema_instrument", OPT_PFS_INSTRUMENT,
"Default startup value for a performance schema instrument.",
@@ -819,7 +823,6 @@ static struct my_option pfs_early_options[]=
&pfs_param.m_consumer_statement_digest_enabled, 0,
GET_BOOL, OPT_ARG, TRUE, 0, 0, 0, 0, 0}
};
-#endif
#ifdef HAVE_PSI_INTERFACE
#ifdef HAVE_MMAP
@@ -848,16 +851,21 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_relay_log_info_sleep_lock,
key_relay_log_info_log_space_lock, key_relay_log_info_run_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
- key_LOCK_error_messages, key_LOG_INFO_lock, key_LOCK_thread_count,
+ key_LOCK_error_messages, key_LOG_INFO_lock,
+ key_LOCK_thread_count, key_LOCK_thread_cache,
key_PARTITION_LOCK_auto_inc;
PSI_mutex_key key_RELAYLOG_LOCK_index;
+PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state;
PSI_mutex_key key_LOCK_stats,
key_LOCK_global_user_client_stats, key_LOCK_global_table_stats,
key_LOCK_global_index_stats,
key_LOCK_wakeup_ready;
+PSI_mutex_key key_LOCK_rpl_gtid_state;
+
PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
+PSI_mutex_key key_TABLE_SHARE_LOCK_share;
static PSI_mutex_info all_server_mutexes[]=
{
@@ -900,6 +908,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_global_table_stats, "LOCK_global_table_stats", PSI_FLAG_GLOBAL},
{ &key_LOCK_global_index_stats, "LOCK_global_index_stats", PSI_FLAG_GLOBAL},
{ &key_LOCK_wakeup_ready, "THD::LOCK_wakeup_ready", 0},
+ { &key_LOCK_rpl_gtid_state, "LOCK_rpl_gtid_state", PSI_FLAG_GLOBAL},
{ &key_LOCK_thd_data, "THD::LOCK_thd_data", 0},
{ &key_LOCK_user_conn, "LOCK_user_conn", PSI_FLAG_GLOBAL},
{ &key_LOCK_uuid_short_generator, "LOCK_uuid_short_generator", PSI_FLAG_GLOBAL},
@@ -914,12 +923,16 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_relay_log_info_sleep_lock, "Relay_log_info::sleep_lock", 0},
{ &key_structure_guard_mutex, "Query_cache::structure_guard_mutex", 0},
{ &key_TABLE_SHARE_LOCK_ha_data, "TABLE_SHARE::LOCK_ha_data", 0},
+ { &key_TABLE_SHARE_LOCK_share, "TABLE_SHARE::LOCK_share", 0},
{ &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL},
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
{ &key_LOG_INFO_lock, "LOG_INFO::lock", 0},
{ &key_LOCK_thread_count, "LOCK_thread_count", PSI_FLAG_GLOBAL},
- { &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0}
+ { &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
+ { &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0},
+ { &key_LOCK_slave_state, "LOCK_slave_state", 0},
+ { &key_LOCK_binlog_state, "LOCK_binlog_state", 0}
};
PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
@@ -1005,7 +1018,8 @@ static PSI_cond_info all_server_conds[]=
PSI_thread_key key_thread_bootstrap, key_thread_delayed_insert,
key_thread_handle_manager, key_thread_main,
- key_thread_one_connection, key_thread_signal_hand;
+ key_thread_one_connection, key_thread_signal_hand,
+ key_thread_slave_init;
static PSI_thread_info all_server_threads[]=
{
@@ -1030,7 +1044,8 @@ static PSI_thread_info all_server_threads[]=
{ &key_thread_handle_manager, "manager", PSI_FLAG_GLOBAL},
{ &key_thread_main, "main", PSI_FLAG_GLOBAL},
{ &key_thread_one_connection, "one_connection", 0},
- { &key_thread_signal_hand, "signal_handler", PSI_FLAG_GLOBAL}
+ { &key_thread_signal_hand, "signal_handler", PSI_FLAG_GLOBAL},
+ { &key_thread_slave_init, "slave_init", PSI_FLAG_GLOBAL}
};
#ifdef HAVE_MMAP
@@ -1046,6 +1061,7 @@ PSI_file_key key_file_binlog, key_file_binlog_index, key_file_casetest,
key_file_trg, key_file_trn, key_file_init;
PSI_file_key key_file_query_log, key_file_slow_log;
PSI_file_key key_file_relaylog, key_file_relaylog_index;
+PSI_file_key key_file_binlog_state;
#endif /* HAVE_PSI_INTERFACE */
@@ -1384,6 +1400,9 @@ struct st_VioSSLFd *ssl_acceptor_fd;
*/
uint connection_count= 0, extra_connection_count= 0;
+my_bool opt_gtid_strict_mode= FALSE;
+
+
/* Function declarations */
pthread_handler_t signal_hand(void *arg);
@@ -1413,6 +1432,7 @@ static void clean_up(bool print_message);
static int test_if_case_insensitive(const char *dir_name);
#ifndef EMBEDDED_LIBRARY
+static bool pid_file_created= false;
static void usage(void);
static void start_signal_handler(void);
static void close_server_sock();
@@ -1421,6 +1441,7 @@ static void wait_for_signal_thread_to_end(void);
static void create_pid_file();
static void mysqld_exit(int exit_code) __attribute__((noreturn));
#endif
+static void delete_pid_file(myf flags);
static void end_ssl();
@@ -1577,8 +1598,21 @@ static void close_connections(void)
Events::deinit();
end_slave();
- /* Give threads time to die. */
- for (int i= 0; thread_count && i < 100; i++)
+ /*
+ Give threads time to die.
+
+ In 5.5, this was waiting 100 rounds @ 20 milliseconds/round, so as little
+ as 2 seconds, depending on thread scheduling.
+
+ From 10.0, we increase this to 1000 rounds / 20 seconds. The rationale is
+ that on a server with heavy I/O load, it is quite possible for eg. an
+ fsync() of the binlog or whatever to cause something like LOCK_log to be
+ held for more than 2 seconds. We do not want to force kill threads in
+ such cases, if it can be avoided. Note that normally, the wait will be
+ much smaller than even 2 seconds, this is only a safety fallback against
+ stuck threads so server shutdown is not held up forever.
+ */
+ for (int i= 0; *(volatile int32*) &thread_count && i < 1000; i++)
my_sleep(20000);
/*
@@ -1869,13 +1903,14 @@ static void mysqld_exit(int exit_code)
but if a kill -15 signal was sent, the signal thread did
spawn the kill_server_thread thread, which is running concurrently.
*/
+ rpl_deinit_gtid_slave_state();
wait_for_signal_thread_to_end();
mysql_audit_finalize();
clean_up_mutexes();
clean_up_error_log_mutex();
my_end((opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0));
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
- shutdown_performance_schema();
+ shutdown_performance_schema(); // we do it as late as possible
#endif
DBUG_LEAVE;
exit(exit_code); /* purecov: inspected */
@@ -1912,17 +1947,16 @@ void clean_up(bool print_message)
my_tz_free();
my_dboptions_cache_free();
ignore_db_dirs_free();
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
servers_free(1);
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
acl_free(1);
grant_free();
#endif
query_cache_destroy();
hostname_cache_free();
- item_user_lock_free();
+ item_func_sleep_free();
lex_free(); /* Free some memory */
item_create_cleanup();
- free_charsets();
if (!opt_noacl)
{
#ifdef HAVE_DLOPEN
@@ -1960,7 +1994,7 @@ void clean_up(bool print_message)
#endif
my_uuid_end();
delete binlog_filter;
- delete rpl_filter;
+ delete global_rpl_filter;
end_ssl();
vio_end();
my_regex_end();
@@ -1969,10 +2003,8 @@ void clean_up(bool print_message)
debug_sync_end();
#endif /* defined(ENABLED_DEBUG_SYNC) */
-#if !defined(EMBEDDED_LIBRARY)
- if (!opt_bootstrap)
- mysql_file_delete(key_file_pid, pidfile_name, MYF(0)); // This may not always exist
-#endif
+ delete_pid_file(MYF(0));
+
if (print_message && my_default_lc_messages && server_start_time)
sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname);
cleanup_errmsgs();
@@ -1986,7 +2018,9 @@ void clean_up(bool print_message)
sys_var_end();
my_atomic_rwlock_destroy(&global_query_id_lock);
my_atomic_rwlock_destroy(&thread_running_lock);
+ my_atomic_rwlock_destroy(&thread_count_lock);
my_atomic_rwlock_destroy(&statistics_lock);
+ free_charsets();
mysql_mutex_lock(&LOCK_thread_count);
DBUG_PRINT("quit", ("got thread count lock"));
ready_to_exit=1;
@@ -2030,6 +2064,7 @@ static void clean_up_mutexes()
DBUG_ENTER("clean_up_mutexes");
mysql_rwlock_destroy(&LOCK_grant);
mysql_mutex_destroy(&LOCK_thread_count);
+ mysql_mutex_destroy(&LOCK_thread_cache);
mysql_mutex_destroy(&LOCK_status);
mysql_mutex_destroy(&LOCK_delayed_insert);
mysql_mutex_destroy(&LOCK_delayed_status);
@@ -2041,6 +2076,7 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_global_user_client_stats);
mysql_mutex_destroy(&LOCK_global_table_stats);
mysql_mutex_destroy(&LOCK_global_index_stats);
+ mysql_mutex_destroy(&LOCK_rpl_gtid_state);
#ifdef HAVE_OPENSSL
mysql_mutex_destroy(&LOCK_des_key_file);
#ifndef HAVE_YASSL
@@ -2280,8 +2316,27 @@ static MYSQL_SOCKET activate_tcp_port(uint port)
{
ip_sock= mysql_socket_socket(key_socket_tcpip, a->ai_family,
a->ai_socktype, a->ai_protocol);
- if (mysql_socket_getfd(ip_sock) != INVALID_SOCKET)
+
+ char ip_addr[INET6_ADDRSTRLEN];
+ if (vio_get_normalized_ip_string(a->ai_addr, a->ai_addrlen,
+ ip_addr, sizeof (ip_addr)))
+ {
+ ip_addr[0]= 0;
+ }
+
+ if (mysql_socket_getfd(ip_sock) == INVALID_SOCKET)
+ {
+ sql_print_error("Failed to create a socket for %s '%s': errno: %d.",
+ (a->ai_family == AF_INET) ? "IPv4" : "IPv6",
+ (const char *) ip_addr,
+ (int) socket_errno);
+ }
+ else
+ {
+ sql_print_information("Server socket created on IP: '%s'.",
+ (const char *) ip_addr);
break;
+ }
}
if (mysql_socket_getfd(ip_sock) == INVALID_SOCKET)
@@ -2518,7 +2573,7 @@ void close_connection(THD *thd, uint sql_errno)
{
sleep(0); /* Workaround to avoid tailcall optimisation */
}
- MYSQL_AUDIT_NOTIFY_CONNECTION_DISCONNECT(thd, sql_errno);
+ mysql_audit_notify_connection_disconnect(thd, sql_errno);
DBUG_VOID_RETURN;
}
#endif /* EMBEDDED_LIBRARY */
@@ -2565,6 +2620,28 @@ void dec_connection_count(THD *thd)
/*
+ Delete THD and decrement thread counters, including thread_running
+*/
+
+void delete_running_thd(THD *thd)
+{
+ mysql_mutex_lock(&LOCK_thread_count);
+ thd->unlink();
+ mysql_mutex_unlock(&LOCK_thread_count);
+
+ delete thd;
+ dec_thread_running();
+ thread_safe_decrement32(&thread_count, &thread_count_lock);
+ if (!thread_count)
+ {
+ mysql_mutex_lock(&LOCK_thread_count);
+ mysql_cond_broadcast(&COND_thread_count);
+ mysql_mutex_unlock(&LOCK_thread_count);
+ }
+}
+
+
+/*
Unlink thd from global list of available connections and free thd
SYNOPSIS
@@ -2588,7 +2665,6 @@ void unlink_thd(THD *thd)
mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_thread_count);
- thread_count--;
thd->unlink();
/*
Used by binlog_reset_master. It would be cleaner to use
@@ -2596,13 +2672,11 @@ void unlink_thd(THD *thd)
sync feature has been shut down at this point.
*/
DBUG_EXECUTE_IF("sleep_after_lock_thread_count_before_delete_thd", sleep(5););
- /*
- We must delete thd inside the lock to ensure that we don't start cleanup
- before THD is deleted
- */
- delete thd;
mysql_mutex_unlock(&LOCK_thread_count);
+ delete thd;
+ thread_safe_decrement32(&thread_count, &thread_count_lock);
+
DBUG_VOID_RETURN;
}
@@ -2614,7 +2688,7 @@ void unlink_thd(THD *thd)
cache_thread()
NOTES
- LOCK_thread_count has to be locked
+ LOCK_thread_cache is used to protect the cache variables
RETURN
0 Thread was not put in cache
@@ -2625,7 +2699,9 @@ void unlink_thd(THD *thd)
static bool cache_thread()
{
- mysql_mutex_assert_owner(&LOCK_thread_count);
+ DBUG_ENTER("cache_thread");
+
+ mysql_mutex_lock(&LOCK_thread_cache);
if (cached_thread_count < thread_cache_size &&
! abort_loop && !kill_cached_threads)
{
@@ -2642,7 +2718,7 @@ static bool cache_thread()
#endif
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
- mysql_cond_wait(&COND_thread_cache, &LOCK_thread_count);
+ mysql_cond_wait(&COND_thread_cache, &LOCK_thread_cache);
cached_thread_count--;
if (kill_cached_threads)
mysql_cond_signal(&COND_flush_thread_cache);
@@ -2651,6 +2727,8 @@ static bool cache_thread()
THD *thd;
wake_thread--;
thd= thread_cache.get();
+ mysql_mutex_unlock(&LOCK_thread_cache);
+
thd->thread_stack= (char*) &thd; // For store_globals
(void) thd->store_globals();
@@ -2672,11 +2750,16 @@ static bool cache_thread()
thd->mysys_var->abort= 0;
thd->thr_create_utime= microsecond_interval_timer();
thd->start_utime= thd->thr_create_utime;
+
+ /* Link thd into list of all active threads (THD's) */
+ mysql_mutex_lock(&LOCK_thread_count);
threads.append(thd);
- return(1);
+ mysql_mutex_unlock(&LOCK_thread_count);
+ DBUG_RETURN(1);
}
}
- return(0);
+ mysql_mutex_unlock(&LOCK_thread_cache);
+ DBUG_RETURN(0);
}
@@ -2705,19 +2788,22 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
unlink_thd(thd);
/* Mark that current_thd is not valid anymore */
set_current_thd(0);
- if (put_in_cache)
+ if (put_in_cache && cache_thread())
+ DBUG_RETURN(0); // Thread is reused
+
+ /*
+ It's safe to check for thread_count outside of the mutex
+ as we are only interested to see if it was counted to 0 by the
+ above unlink_thd() call. We should only signal COND_thread_count if
+ thread_count is likely to be 0. (false positives are ok)
+ */
+ if (!thread_count)
{
mysql_mutex_lock(&LOCK_thread_count);
- put_in_cache= cache_thread();
+ DBUG_PRINT("signal", ("Broadcasting COND_thread_count"));
+ mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
- if (put_in_cache)
- DBUG_RETURN(0); // Thread is reused
}
-
- /* It's safe to broadcast outside a lock (COND... is not deleted here) */
- DBUG_PRINT("signal", ("Broadcasting COND_thread_count"));
- mysql_cond_broadcast(&COND_thread_count);
-
DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
@@ -2728,15 +2814,17 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
void flush_thread_cache()
{
- mysql_mutex_lock(&LOCK_thread_count);
+ DBUG_ENTER("flush_thread_cache");
+ mysql_mutex_lock(&LOCK_thread_cache);
kill_cached_threads++;
while (cached_thread_count)
{
mysql_cond_broadcast(&COND_thread_cache);
- mysql_cond_wait(&COND_flush_thread_cache, &LOCK_thread_count);
+ mysql_cond_wait(&COND_flush_thread_cache, &LOCK_thread_cache);
}
kill_cached_threads--;
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_thread_cache);
+ DBUG_VOID_RETURN;
}
@@ -3326,14 +3414,7 @@ pthread_handler_t handle_shutdown(void *arg)
}
#endif
-const char *load_default_groups[]= {
-#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
-"mysql_cluster",
-#endif
-"mysqld", "server", MYSQL_BASE_VERSION,
-"mariadb", MARIADB_BASE_VERSION,
-"client-server",
-0, 0};
+#include <mysqld_default_groups.h>
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
static const int load_default_groups_sz=
@@ -3342,14 +3423,25 @@ sizeof(load_default_groups)/sizeof(load_default_groups[0]);
#ifndef EMBEDDED_LIBRARY
-static
-int
-check_enough_stack_size()
+/**
+ This function is used to check for stack overrun for pathological
+ cases of regular expressions and 'like' expressions.
+ The call to current_thd is quite expensive, so we try to avoid it
+ for the normal cases.
+ The size of each stack frame for the wildcmp() routines is ~128 bytes,
+ so checking *every* recursive call is not necessary.
+ */
+extern "C" int
+check_enough_stack_size(int recurse_level)
{
uchar stack_top;
+ if (recurse_level % 16 != 0)
+ return 0;
- return check_stack_overrun(current_thd, STACK_MIN_SIZE,
- &stack_top);
+ THD *my_thd= current_thd;
+ if (my_thd != NULL)
+ return check_stack_overrun(my_thd, STACK_MIN_SIZE * 2, &stack_top);
+ return 0;
}
#endif
@@ -3516,6 +3608,7 @@ SHOW_VAR com_status_vars[]= {
{"show_user_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_USER_STATS]), SHOW_LONG_STATUS},
{"show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
+ {"shutdown", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHUTDOWN]), SHOW_LONG_STATUS},
{"start_all_slaves", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_ALL_START]), SHOW_LONG_STATUS},
{"start_slave", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
{"stmt_close", (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS},
@@ -3689,9 +3782,9 @@ static int init_common_variables()
max_system_variables.pseudo_thread_id= (ulong)~0;
server_start_time= flush_status_time= my_time(0);
- rpl_filter= new Rpl_filter;
+ global_rpl_filter= new Rpl_filter;
binlog_filter= new Rpl_filter;
- if (!rpl_filter || !binlog_filter)
+ if (!global_rpl_filter || !binlog_filter)
{
sql_perror("Could not allocate replication and binlog filters");
return 1;
@@ -3718,7 +3811,7 @@ static int init_common_variables()
WideCharToMultiByte(CP_UTF8,0, wtz_name, -1, system_time_zone,
sizeof(system_time_zone) - 1, NULL, NULL);
#else
- strmake(system_time_zone, tz_name, sizeof(system_time_zone)-1);
+ strmake_buf(system_time_zone, tz_name);
#endif /* _WIN32 */
#endif /* HAVE_TZNAME */
@@ -3984,6 +4077,7 @@ static int init_common_variables()
item_init();
#ifndef EMBEDDED_LIBRARY
my_regex_init(&my_charset_latin1, check_enough_stack_size);
+ my_string_stack_guard= check_enough_stack_size;
#else
my_regex_init(&my_charset_latin1, NULL);
#endif
@@ -4151,6 +4245,7 @@ static int init_thread_environment()
{
DBUG_ENTER("init_thread_environment");
mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_thread_cache, &LOCK_thread_cache, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_delayed_insert,
&LOCK_delayed_insert, MY_MUTEX_INIT_FAST);
@@ -4163,6 +4258,7 @@ static int init_thread_environment()
mysql_mutex_init(key_LOCK_active_mi, &LOCK_active_mi, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_global_system_variables,
&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
+ mysql_mutex_record_order(&LOCK_active_mi, &LOCK_global_system_variables);
mysql_rwlock_init(key_rwlock_LOCK_system_variables_hash,
&LOCK_system_variables_hash);
mysql_mutex_init(key_LOCK_prepared_stmt_count,
@@ -4180,6 +4276,8 @@ static int init_thread_environment()
&LOCK_global_table_stats, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_global_index_stats,
&LOCK_global_index_stats, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_rpl_gtid_state,
+ &LOCK_rpl_gtid_state, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_prepare_ordered, &LOCK_prepare_ordered,
MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered,
@@ -4223,6 +4321,10 @@ static int init_thread_environment()
PTHREAD_CREATE_DETACHED);
pthread_attr_setscope(&connection_attrib, PTHREAD_SCOPE_SYSTEM);
+#ifdef HAVE_REPLICATION
+ rpl_init_gtid_slave_state();
+#endif
+
DBUG_RETURN(0);
}
@@ -4381,6 +4483,7 @@ static int init_server_components()
query_cache_set_min_res_unit(query_cache_min_res_unit);
query_cache_init();
query_cache_resize(query_cache_size);
+ query_cache_result_size_limit(query_cache_limit);
my_rnd_init(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2);
setup_fpu();
init_thr_lock();
@@ -4492,11 +4595,13 @@ will be ignored as the --log-bin option is not defined.");
}
#endif
+ DBUG_ASSERT(!opt_bin_log || opt_bin_logname);
+
if (opt_bin_log)
{
/* Reports an error and aborts, if the --log-bin's path
is a directory.*/
- if (opt_bin_logname &&
+ if (opt_bin_logname[0] &&
opt_bin_logname[strlen(opt_bin_logname) - 1] == FN_LIBCHAR)
{
sql_print_error("Path '%s' is a directory name, please specify \
@@ -4518,7 +4623,7 @@ a file name for --log-bin-index option", opt_binlog_index_name);
char buf[FN_REFLEN];
const char *ln;
ln= mysql_bin_log.generate_name(opt_bin_logname, "-bin", 1, buf);
- if (!opt_bin_logname && !opt_binlog_index_name)
+ if (!opt_bin_logname[0] && !opt_binlog_index_name)
{
/*
User didn't give us info to name the binlog index file.
@@ -4656,7 +4761,7 @@ a file name for --log-bin-index option", opt_binlog_index_name);
plugin_ref plugin;
handlerton *hton;
if ((plugin= ha_resolve_by_name(0, &name)))
- hton= plugin_data(plugin, handlerton*);
+ hton= plugin_hton(plugin);
else
{
sql_print_error("Unknown/unsupported storage engine: %s",
@@ -4756,6 +4861,8 @@ a file name for --log-bin-index option", opt_binlog_index_name);
init_update_queries();
init_global_user_stats();
init_global_client_stats();
+ if (!opt_bootstrap)
+ servers_init(0);
DBUG_RETURN(0);
}
@@ -5116,9 +5223,9 @@ int mysqld_main(int argc, char **argv)
set_user(mysqld_user, user_info);
}
- if (opt_bin_log && !server_id)
+ if (opt_bin_log && !global_system_variables.server_id)
{
- server_id= 1;
+ global_system_variables.server_id= ::server_id= 1;
#ifdef EXTRA_DEBUG
sql_print_warning("You have enabled the binary log, but you haven't set "
"server-id to a non-zero value: we force server id to 1; "
@@ -5171,9 +5278,7 @@ int mysqld_main(int argc, char **argv)
(void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL);
-
- if (!opt_bootstrap)
- mysql_file_delete(key_file_pid, pidfile_name, MYF(MY_WME)); // Not needed anymore
+ delete_pid_file(MYF(MY_WME));
if (mysql_socket_getfd(unix_sock) != INVALID_SOCKET)
unlink(mysqld_unix_port);
@@ -5183,9 +5288,6 @@ int mysqld_main(int argc, char **argv)
if (!opt_noacl)
(void) grant_init();
- if (!opt_bootstrap)
- servers_init(0);
-
if (!opt_noacl)
{
#ifdef HAVE_DLOPEN
@@ -5243,6 +5345,9 @@ int mysqld_main(int argc, char **argv)
create_shutdown_thread();
start_handle_manager();
+ /* Copy default global rpl_filter to global_rpl_filter */
+ copy_filter_setting(global_rpl_filter, get_or_create_rpl_filter("", 0));
+
/*
init_slave() must be called after the thread keys are created.
Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other
@@ -5559,7 +5664,7 @@ static void bootstrap(MYSQL_FILE *file)
thd->max_client_packet_length= thd->net.max_packet;
thd->security_ctx->master_access= ~(ulong)0;
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- thread_count++;
+ thread_count++; // Safe as only one thread running
in_bootstrap= TRUE;
bootstrap_file=file;
@@ -5640,55 +5745,70 @@ void handle_connection_in_main_thread(THD *thd)
void create_thread_to_handle_connection(THD *thd)
{
+ DBUG_ENTER("create_thread_to_handle_connection");
+ mysql_mutex_assert_owner(&LOCK_thread_count);
+
+ /* Check if we can get thread from the cache */
if (cached_thread_count > wake_thread)
{
- /* Get thread from cache */
- thread_cache.push_back(thd);
- wake_thread++;
- mysql_cond_signal(&COND_thread_cache);
- }
- else
- {
- char error_message_buff[MYSQL_ERRMSG_SIZE];
- /* Create new thread to handle connection */
- int error;
- thread_created++;
- threads.append(thd);
- DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id));
- thd->prior_thr_create_utime= microsecond_interval_timer();
- if ((error= mysql_thread_create(key_thread_one_connection,
- &thd->real_id, &connection_attrib,
- handle_one_connection,
- (void*) thd)))
+ mysql_mutex_lock(&LOCK_thread_cache);
+ /* Recheck condition when we have the lock */
+ if (cached_thread_count > wake_thread)
{
- /* purecov: begin inspected */
- DBUG_PRINT("error",
- ("Can't create thread to handle request (error %d)",
- error));
- thread_count--;
- thd->killed= KILL_CONNECTION; // Safety
mysql_mutex_unlock(&LOCK_thread_count);
+ /* Get thread from cache */
+ thread_cache.push_back(thd);
+ wake_thread++;
+ mysql_cond_signal(&COND_thread_cache);
+ mysql_mutex_unlock(&LOCK_thread_cache);
+ DBUG_PRINT("info",("Thread created"));
+ DBUG_VOID_RETURN;
+ }
+ mysql_mutex_unlock(&LOCK_thread_cache);
+ }
- mysql_mutex_lock(&LOCK_connection_count);
- (*thd->scheduler->connection_count)--;
- mysql_mutex_unlock(&LOCK_connection_count);
+ char error_message_buff[MYSQL_ERRMSG_SIZE];
+ /* Create new thread to handle connection */
+ int error;
+ thread_created++;
+ threads.append(thd);
+ DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id));
+ thd->prior_thr_create_utime= microsecond_interval_timer();
+ if ((error= mysql_thread_create(key_thread_one_connection,
+ &thd->real_id, &connection_attrib,
+ handle_one_connection,
+ (void*) thd)))
+ {
+ /* purecov: begin inspected */
+ DBUG_PRINT("error",
+ ("Can't create thread to handle request (error %d)",
+ error));
+ thd->killed= KILL_CONNECTION; // Safety
+ mysql_mutex_unlock(&LOCK_thread_count);
- statistic_increment(aborted_connects,&LOCK_status);
- statistic_increment(connection_errors_internal, &LOCK_status);
- /* Can't use my_error() since store_globals has not been called. */
- my_snprintf(error_message_buff, sizeof(error_message_buff),
- ER_THD(thd, ER_CANT_CREATE_THREAD), error);
- net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff, NULL);
- close_connection(thd, ER_OUT_OF_RESOURCES);
- mysql_mutex_lock(&LOCK_thread_count);
- delete thd;
- mysql_mutex_unlock(&LOCK_thread_count);
- return;
- /* purecov: end */
- }
+ mysql_mutex_lock(&LOCK_connection_count);
+ (*thd->scheduler->connection_count)--;
+ mysql_mutex_unlock(&LOCK_connection_count);
+
+ statistic_increment(aborted_connects,&LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
+ /* Can't use my_error() since store_globals has not been called. */
+ my_snprintf(error_message_buff, sizeof(error_message_buff),
+ ER_THD(thd, ER_CANT_CREATE_THREAD), error);
+ net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff, NULL);
+ close_connection(thd, ER_OUT_OF_RESOURCES);
+
+ mysql_mutex_lock(&LOCK_thread_count);
+ thd->unlink();
+ mysql_mutex_unlock(&LOCK_thread_count);
+ delete thd;
+ thread_safe_decrement32(&thread_count, &thread_count_lock);
+ return;
+ /* purecov: end */
}
mysql_mutex_unlock(&LOCK_thread_count);
DBUG_PRINT("info",("Thread created"));
+ DBUG_VOID_RETURN;
}
@@ -5736,10 +5856,10 @@ static void create_new_thread(THD *thd)
mysql_mutex_unlock(&LOCK_connection_count);
- /* Start a new thread to handle connection. */
+ thread_safe_increment32(&thread_count, &thread_count_lock);
+ /* Start a new thread to handle connection. */
mysql_mutex_lock(&LOCK_thread_count);
-
/*
The initialization of thread_id is done in create_embedded_thd() for
the embedded library.
@@ -5747,8 +5867,6 @@ static void create_new_thread(THD *thd)
*/
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
- thread_count++;
-
MYSQL_CALLBACK(thd->scheduler, add_connection, (thd));
DBUG_VOID_RETURN;
@@ -5982,21 +6100,6 @@ void handle_connections_sockets()
}
#endif /* HAVE_LIBWRAP */
- {
- size_socket dummyLen;
- struct sockaddr_storage dummy;
- dummyLen = sizeof(dummy);
- if (getsockname(mysql_socket_getfd(new_sock),
- (struct sockaddr *)&dummy,
- (SOCKET_SIZE_TYPE *)&dummyLen) < 0 )
- {
- sql_perror("Error on new connection socket");
- (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
- (void) mysql_socket_close(new_sock);
- continue;
- }
- }
-
/*
** Don't allow too many connections
*/
@@ -6692,28 +6795,28 @@ struct my_option my_long_options[]=
"while having selected a different or no database. If you need cross "
"database updates to work, make sure you have 3.23.28 or later, and use "
"replicate-wild-do-table=db_name.%.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-do-table", OPT_REPLICATE_DO_TABLE,
"Tells the slave thread to restrict replication to the specified table. "
"To specify more than one table, use the directive multiple times, once "
"for each table. This will work for cross-database updates, in contrast "
- "to replicate-do-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ "to replicate-do-db.", 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-db", OPT_REPLICATE_IGNORE_DB,
"Tells the slave thread to not replicate to the specified database. To "
"specify more than one database to ignore, use the directive multiple "
"times, once for each database. This option will not work if you use "
"cross database updates. If you need cross database updates to work, "
"make sure you have 3.23.28 or later, and use replicate-wild-ignore-"
- "table=db_name.%. ", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ "table=db_name.%. ", 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE,
"Tells the slave thread to not replicate to the specified table. To specify "
"more than one table to ignore, use the directive multiple times, once for "
"each table. This will work for cross-database updates, in contrast to "
- "replicate-ignore-db.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ "replicate-ignore-db.", 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB,
"Updates to a database with a different name than the original. Example: "
"replicate-rewrite-db=master_db_name->slave_db_name.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_REPLICATION
{"replicate-same-server-id", 0,
"In replication, if set to 1, do not skip events having our server id. "
@@ -6729,7 +6832,7 @@ struct my_option my_long_options[]=
"database updates. Example: replicate-wild-do-table=foo%.bar% will "
"replicate only updates to tables in all databases that start with foo "
"and whose table names start with bar.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE,
"Tells the slave thread to not replicate to the tables that match the "
"given wildcard pattern. To specify more than one table to ignore, use "
@@ -6737,7 +6840,7 @@ struct my_option my_long_options[]=
"cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% "
"will not do updates to tables in databases that start with foo and whose "
"table names start with bar.",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ 0, 0, 0, GET_STR | GET_ASK_ADDR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"safe-mode", OPT_SAFE, "Skip some optimize stages (for testing). Deprecated.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"safe-user-create", 0,
@@ -6891,19 +6994,25 @@ static int show_rpl_status(THD *thd, SHOW_VAR *var, char *buff)
static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
{
Master_info *mi;
+ bool tmp;
+ LINT_INIT(tmp);
+
var->type= SHOW_MY_BOOL;
var->value= buff;
+ mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
Sql_condition::WARN_LEVEL_NOTE);
if (mi)
- *((my_bool *)buff)= (my_bool) (mi->slave_running ==
- MYSQL_SLAVE_RUN_CONNECT &&
- mi->rli.slave_running);
+ tmp= (my_bool) (mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
+ mi->rli.slave_running);
+ mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_status);
+ if (mi)
+ *((my_bool *)buff)= tmp;
else
var->type= SHOW_UNDEF;
- mysql_mutex_unlock(&LOCK_active_mi);
return 0;
}
@@ -6911,17 +7020,24 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
{
Master_info *mi;
+ longlong tmp;
+ LINT_INIT(tmp);
+
var->type= SHOW_LONGLONG;
var->value= buff;
+ mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
Sql_condition::WARN_LEVEL_NOTE);
if (mi)
- *((longlong *)buff)= mi->received_heartbeats;
+ tmp= mi->received_heartbeats;
+ mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_status);
+ if (mi)
+ *((longlong *)buff)= tmp;
else
var->type= SHOW_UNDEF;
- mysql_mutex_unlock(&LOCK_active_mi);
return 0;
}
@@ -6929,17 +7045,24 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
{
Master_info *mi;
+ float tmp;
+ LINT_INIT(tmp);
+
var->type= SHOW_CHAR;
var->value= buff;
+ mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
Sql_condition::WARN_LEVEL_NOTE);
if (mi)
- sprintf(buff, "%.3f", mi->heartbeat_period);
+ tmp= mi->heartbeat_period;
+ mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_status);
+ if (mi)
+ sprintf(buff, "%.3f", tmp);
else
var->type= SHOW_UNDEF;
- mysql_mutex_unlock(&LOCK_active_mi);
return 0;
}
@@ -7425,8 +7548,8 @@ SHOW_VAR status_vars[]= {
{"Feature_locale", (char*) offsetof(STATUS_VAR, feature_locale), SHOW_LONG_STATUS},
{"Feature_subquery", (char*) offsetof(STATUS_VAR, feature_subquery), SHOW_LONG_STATUS},
{"Feature_timezone", (char*) offsetof(STATUS_VAR, feature_timezone), SHOW_LONG_STATUS},
- {"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS},
- {"Feature_xml", (char*) offsetof(STATUS_VAR, feature_xml), SHOW_LONG_STATUS},
+ {"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS},
+ {"Feature_xml", (char*) offsetof(STATUS_VAR, feature_xml), SHOW_LONG_STATUS},
{"Flush_commands", (char*) &refresh_version, SHOW_LONG_NOFLUSH},
{"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
{"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS},
@@ -7435,8 +7558,8 @@ SHOW_VAR status_vars[]= {
{"Handler_icp_attempts", (char*) offsetof(STATUS_VAR, ha_icp_attempts), SHOW_LONG_STATUS},
{"Handler_icp_match", (char*) offsetof(STATUS_VAR, ha_icp_match), SHOW_LONG_STATUS},
{"Handler_mrr_init", (char*) offsetof(STATUS_VAR, ha_mrr_init_count), SHOW_LONG_STATUS},
- {"Handler_mrr_key_refills", (char*) offsetof(STATUS_VAR, ha_mrr_key_refills_count), SHOW_LONG_STATUS},
- {"Handler_mrr_rowid_refills", (char*) offsetof(STATUS_VAR, ha_mrr_rowid_refills_count), SHOW_LONG_STATUS},
+ {"Handler_mrr_key_refills", (char*) offsetof(STATUS_VAR, ha_mrr_key_refills_count), SHOW_LONG_STATUS},
+ {"Handler_mrr_rowid_refills",(char*) offsetof(STATUS_VAR, ha_mrr_rowid_refills_count), SHOW_LONG_STATUS},
{"Handler_prepare", (char*) offsetof(STATUS_VAR, ha_prepare_count), SHOW_LONG_STATUS},
{"Handler_read_first", (char*) offsetof(STATUS_VAR, ha_read_first_count), SHOW_LONG_STATUS},
{"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), SHOW_LONG_STATUS},
@@ -7463,9 +7586,10 @@ SHOW_VAR status_vars[]= {
{"Open_table_definitions", (char*) &show_table_definitions, SHOW_SIMPLE_FUNC},
{"Open_tables", (char*) &show_open_tables, SHOW_SIMPLE_FUNC},
{"Opened_files", (char*) &my_file_total_opened, SHOW_LONG_NOFLUSH},
+ {"Opened_plugin_libraries", (char*) &dlopen_count, SHOW_LONG},
{"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
- {"Opened_views", (char*) offsetof(STATUS_VAR, opened_views), SHOW_LONG_STATUS},
+ {"Opened_views", (char*) offsetof(STATUS_VAR, opened_views), SHOW_LONG_STATUS},
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_SIMPLE_FUNC},
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
{"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
@@ -7759,12 +7883,13 @@ static int mysql_init_variables(void)
log_error_file_ptr= log_error_file;
protocol_version= PROTOCOL_VERSION;
what_to_log= ~ (1L << (uint) COM_TIME);
- refresh_version= 1L; /* Increments on each reload */
+ refresh_version= 2L; /* Increments on each reload. 0 and 1 are reserved */
denied_connections= 0;
executed_events= 0;
global_query_id= thread_id= 1L;
my_atomic_rwlock_init(&global_query_id_lock);
my_atomic_rwlock_init(&thread_running_lock);
+ my_atomic_rwlock_init(&thread_count_lock);
my_atomic_rwlock_init(&statistics_lock);
strmov(server_version, MYSQL_SERVER_VERSION);
threads.empty();
@@ -7782,8 +7907,8 @@ static int mysql_init_variables(void)
/* Set directory paths */
mysql_real_data_home_len=
- strmake(mysql_real_data_home, get_relative_path(MYSQL_DATADIR),
- sizeof(mysql_real_data_home)-1) - mysql_real_data_home;
+ strmake_buf(mysql_real_data_home,
+ get_relative_path(MYSQL_DATADIR)) - mysql_real_data_home;
/* Replication parameters */
master_info_file= (char*) "master.info",
relay_log_info_file= (char*) "relay-log.info";
@@ -7892,7 +8017,7 @@ static int mysql_init_variables(void)
const char *tmpenv;
if (!(tmpenv = getenv("MY_BASEDIR_VERSION")))
tmpenv = DEFAULT_MYSQL_HOME;
- (void) strmake(mysql_home, tmpenv, sizeof(mysql_home)-1);
+ strmake_buf(mysql_home, tmpenv);
#endif
return 0;
}
@@ -7931,7 +8056,7 @@ mysqld_get_one_option(int optid,
global_system_variables.tx_isolation= ISO_SERIALIZABLE;
break;
case 'b':
- strmake(mysql_home,argument,sizeof(mysql_home)-1);
+ strmake_buf(mysql_home, argument);
break;
case 'C':
if (default_collation_name == compiled_default_collation_name)
@@ -7942,7 +8067,7 @@ mysqld_get_one_option(int optid,
opt_log=1;
break;
case 'h':
- strmake(mysql_real_data_home,argument, sizeof(mysql_real_data_home)-1);
+ strmake_buf(mysql_real_data_home, argument);
/* Correct pointer set by my_getopt (for embedded library) */
mysql_real_data_home_ptr= mysql_real_data_home;
break;
@@ -7953,7 +8078,7 @@ mysqld_get_one_option(int optid,
sql_print_warning("Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user);
break;
case 'L':
- strmake(lc_messages_dir, argument, sizeof(lc_messages_dir)-1);
+ strmake_buf(lc_messages_dir, argument);
break;
case OPT_BINLOG_FORMAT:
binlog_format_used= true;
@@ -8015,12 +8140,12 @@ mysqld_get_one_option(int optid,
#ifdef HAVE_REPLICATION
case (int)OPT_REPLICATE_IGNORE_DB:
{
- rpl_filter->add_ignore_db(argument);
+ cur_rpl_filter->add_ignore_db(argument);
break;
}
case (int)OPT_REPLICATE_DO_DB:
{
- rpl_filter->add_do_db(argument);
+ cur_rpl_filter->add_do_db(argument);
break;
}
case (int)OPT_REPLICATE_REWRITE_DB:
@@ -8051,7 +8176,7 @@ mysqld_get_one_option(int optid,
return 1;
}
- rpl_filter->add_db_rewrite(key, val);
+ cur_rpl_filter->add_db_rewrite(key, val);
break;
}
@@ -8067,7 +8192,7 @@ mysqld_get_one_option(int optid,
}
case (int)OPT_REPLICATE_DO_TABLE:
{
- if (rpl_filter->add_do_table(argument))
+ if (cur_rpl_filter->add_do_table(argument))
{
sql_print_error("Could not add do table rule '%s'!\n", argument);
return 1;
@@ -8076,7 +8201,7 @@ mysqld_get_one_option(int optid,
}
case (int)OPT_REPLICATE_WILD_DO_TABLE:
{
- if (rpl_filter->add_wild_do_table(argument))
+ if (cur_rpl_filter->add_wild_do_table(argument))
{
sql_print_error("Could not add do table rule '%s'!\n", argument);
return 1;
@@ -8085,7 +8210,7 @@ mysqld_get_one_option(int optid,
}
case (int)OPT_REPLICATE_WILD_IGNORE_TABLE:
{
- if (rpl_filter->add_wild_ignore_table(argument))
+ if (cur_rpl_filter->add_wild_ignore_table(argument))
{
sql_print_error("Could not add ignore table rule '%s'!\n", argument);
return 1;
@@ -8094,7 +8219,7 @@ mysqld_get_one_option(int optid,
}
case (int)OPT_REPLICATE_IGNORE_TABLE:
{
- if (rpl_filter->add_ignore_table(argument))
+ if (cur_rpl_filter->add_ignore_table(argument))
{
sql_print_error("Could not add ignore table rule '%s'!\n", argument);
return 1;
@@ -8133,28 +8258,6 @@ mysqld_get_one_option(int optid,
case (int) OPT_WANT_CORE:
test_flags |= TEST_CORE_ON_SIGNAL;
break;
- case (int) OPT_BIND_ADDRESS:
- {
- struct addrinfo *res_lst, hints;
-
- bzero(&hints, sizeof(struct addrinfo));
- hints.ai_socktype= SOCK_STREAM;
- hints.ai_protocol= IPPROTO_TCP;
-
- if (getaddrinfo(argument, NULL, &hints, &res_lst) != 0)
- {
- sql_print_error("Can't start server: cannot resolve hostname!");
- return 1;
- }
-
- if (res_lst->ai_next)
- {
- sql_print_error("Can't start server: bind-address refers to multiple interfaces!");
- return 1;
- }
- freeaddrinfo(res_lst);
- }
- break;
case OPT_CONSOLE:
if (opt_console)
opt_error_log= 0; // Force logs to stdout
@@ -8164,6 +8267,7 @@ mysqld_get_one_option(int optid,
break;
case OPT_SERVER_ID:
server_id_supplied = 1;
+ ::server_id= global_system_variables.server_id;
break;
case OPT_ONE_THREAD:
thread_handling= SCHEDULER_NO_THREADS;
@@ -8308,7 +8412,7 @@ mysqld_get_one_option(int optid,
C_MODE_START
static void*
-mysql_getopt_value(const char *keyname, uint key_length,
+mysql_getopt_value(const char *name, uint length,
const struct my_option *option, int *error)
{
if (error)
@@ -8321,7 +8425,7 @@ mysql_getopt_value(const char *keyname, uint key_length,
case OPT_KEY_CACHE_PARTITIONS:
{
KEY_CACHE *key_cache;
- if (!(key_cache= get_or_create_key_cache(keyname, key_length)))
+ if (!(key_cache= get_or_create_key_cache(name, length)))
{
if (error)
*error= EXIT_OUT_OF_MEMORY;
@@ -8340,6 +8444,22 @@ mysql_getopt_value(const char *keyname, uint key_length,
return (uchar**) &key_cache->param_partitions;
}
}
+ case OPT_REPLICATE_DO_DB:
+ case OPT_REPLICATE_DO_TABLE:
+ case OPT_REPLICATE_IGNORE_DB:
+ case OPT_REPLICATE_IGNORE_TABLE:
+ case OPT_REPLICATE_WILD_DO_TABLE:
+ case OPT_REPLICATE_WILD_IGNORE_TABLE:
+ case OPT_REPLICATE_REWRITE_DB:
+ {
+ /* Store current filter for mysqld_get_one_option() */
+ if (!(cur_rpl_filter= get_or_create_rpl_filter(name, length)))
+ {
+ if (error)
+ *error= EXIT_OUT_OF_MEMORY;
+ }
+ return 0;
+ }
}
return option->value;
}
@@ -8467,7 +8587,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
global_system_variables.sql_mode=
expand_sql_mode(global_system_variables.sql_mode);
-#if defined(HAVE_BROKEN_REALPATH)
+#if !defined(HAVE_REALPATH) || defined(HAVE_BROKEN_REALPATH)
my_use_symdir=0;
my_disable_symlinks=1;
have_symlink=SHOW_OPTION_NO;
@@ -8751,7 +8871,7 @@ static int fix_paths(void)
char *sharedir=get_relative_path(SHAREDIR);
if (test_if_hard_path(sharedir))
- strmake(buff,sharedir,sizeof(buff)-1); /* purecov: tested */
+ strmake_buf(buff, sharedir); /* purecov: tested */
else
strxnmov(buff,sizeof(buff)-1,mysql_home,sharedir,NullS);
convert_dirname(buff,buff,NullS);
@@ -8759,7 +8879,7 @@ static int fix_paths(void)
/* If --character-sets-dir isn't given, use shared library dir */
if (charsets_dir)
- strmake(mysql_charsets_dir, charsets_dir, sizeof(mysql_charsets_dir)-1);
+ strmake_buf(mysql_charsets_dir, charsets_dir);
else
strxnmov(mysql_charsets_dir, sizeof(mysql_charsets_dir)-1, buff,
CHARSET_DIR, NullS);
@@ -8854,13 +8974,14 @@ static void create_pid_file()
if ((file= mysql_file_create(key_file_pid, pidfile_name, 0664,
O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0)
{
- char buff[21], *end;
+ char buff[MAX_BIGINT_WIDTH + 1], *end;
end= int10_to_str((long) getpid(), buff, 10);
*end++= '\n';
if (!mysql_file_write(file, (uchar*) buff, (uint) (end-buff),
MYF(MY_WME | MY_NABP)))
{
mysql_file_close(file, MYF(0));
+ pid_file_created= true;
return;
}
mysql_file_close(file, MYF(0));
@@ -8870,6 +8991,26 @@ static void create_pid_file()
}
#endif /* EMBEDDED_LIBRARY */
+
+/**
+ Remove the process' pid file.
+
+ @param flags file operation flags
+*/
+
+static void delete_pid_file(myf flags)
+{
+#ifndef EMBEDDED_LIBRARY
+ if (pid_file_created)
+ {
+ mysql_file_delete(key_file_pid, pidfile_name, flags);
+ pid_file_created= false;
+ }
+#endif /* EMBEDDED_LIBRARY */
+ return;
+}
+
+
/** Clear most status variables. */
void refresh_status(THD *thd)
{
@@ -8893,13 +9034,9 @@ void refresh_status(THD *thd)
/*
Set max_used_connections to the number of currently open
- connections. Lock LOCK_thread_count out of LOCK_status to avoid
- deadlocks. Status reset becomes not atomic, but status data is
- not exact anyway.
+ connections. This is not perfect, but status data is not exact anyway.
*/
- mysql_mutex_lock(&LOCK_thread_count);
max_used_connections= thread_count-delayed_insert_threads;
- mysql_mutex_unlock(&LOCK_thread_count);
}
#ifdef HAVE_PSI_INTERFACE
@@ -8935,7 +9072,8 @@ static PSI_file_info all_server_files[]=
{ &key_file_tclog, "tclog", 0},
{ &key_file_trg, "trigger_name", 0},
{ &key_file_trn, "trigger", 0},
- { &key_file_init, "init", 0}
+ { &key_file_init, "init", 0},
+ { &key_file_binlog_state, "binlog_state", 0}
};
#endif /* HAVE_PSI_INTERFACE */
diff --git a/sql/mysqld.h b/sql/mysqld.h
index b754cb0627b..2cf63d093ad 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -58,6 +58,7 @@ void kill_mysql(void);
void close_connection(THD *thd, uint sql_errno= 0);
void handle_connection_in_main_thread(THD *thd);
void create_thread_to_handle_connection(THD *thd);
+void delete_running_thd(THD *thd);
void unlink_thd(THD *thd);
bool one_thread_per_connection_end(THD *thd, bool put_in_cache);
void flush_thread_cache();
@@ -91,7 +92,6 @@ extern bool opt_ignore_builtin_innodb;
extern my_bool opt_character_set_client_handshake;
extern bool volatile abort_loop;
extern bool in_bootstrap;
-extern uint volatile thread_count;
extern uint connection_count;
extern my_bool opt_safe_user_create;
extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
@@ -157,6 +157,7 @@ extern ulong delayed_insert_threads, delayed_insert_writes;
extern ulong delayed_rows_in_use,delayed_insert_errors;
extern ulong slave_open_temp_tables;
extern ulonglong query_cache_size;
+extern ulong query_cache_limit;
extern ulong query_cache_min_res_unit;
extern ulong slow_launch_threads, slow_launch_time;
extern ulong table_cache_size, table_def_size;
@@ -256,11 +257,14 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
key_LOCK_error_messages, key_LOCK_thread_count, key_PARTITION_LOCK_auto_inc;
extern PSI_mutex_key key_RELAYLOG_LOCK_index;
+extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state;
-extern PSI_mutex_key key_LOCK_stats,
+extern PSI_mutex_key key_TABLE_SHARE_LOCK_share, key_LOCK_stats,
key_LOCK_global_user_client_stats, key_LOCK_global_table_stats,
key_LOCK_global_index_stats, key_LOCK_wakeup_ready;
+extern PSI_mutex_key key_LOCK_rpl_gtid_state;
+
extern PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
key_rwlock_LOCK_sys_init_connect, key_rwlock_LOCK_sys_init_slave,
key_rwlock_LOCK_system_variables_hash, key_rwlock_query_cache_query_lock;
@@ -289,7 +293,7 @@ extern PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy;
extern PSI_thread_key key_thread_bootstrap, key_thread_delayed_insert,
key_thread_handle_manager, key_thread_kill_server, key_thread_main,
- key_thread_one_connection, key_thread_signal_hand;
+ key_thread_one_connection, key_thread_signal_hand, key_thread_slave_init;
extern PSI_file_key key_file_binlog, key_file_binlog_index, key_file_casetest,
key_file_dbopt, key_file_des_key_file, key_file_ERRMSG, key_select_to_file,
@@ -302,6 +306,7 @@ extern PSI_file_key key_file_query_log, key_file_slow_log;
extern PSI_file_key key_file_relaylog, key_file_relaylog_index;
extern PSI_socket_key key_socket_tcpip, key_socket_unix,
key_socket_client_connection;
+extern PSI_file_key key_file_binlog_state;
void init_server_psi_keys();
#endif /* HAVE_PSI_INTERFACE */
@@ -476,12 +481,13 @@ extern MYSQL_PLUGIN_IMPORT key_map key_map_full; /* Should be threaded
Server mutex locks and condition variables.
*/
extern mysql_mutex_t
- LOCK_user_locks, LOCK_status,
+ LOCK_item_func_sleep, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager,
LOCK_global_system_variables, LOCK_user_conn,
LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count;
+extern mysql_mutex_t LOCK_rpl_gtid_state;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count;
#ifdef HAVE_OPENSSL
extern mysql_mutex_t LOCK_des_key_file;
@@ -493,7 +499,8 @@ extern mysql_rwlock_t LOCK_system_variables_hash;
extern mysql_cond_t COND_thread_count;
extern mysql_cond_t COND_manager;
extern int32 thread_running;
-extern my_atomic_rwlock_t thread_running_lock;
+extern int32 thread_count;
+extern my_atomic_rwlock_t thread_running_lock, thread_count_lock;
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
*opt_ssl_key, *opt_ssl_crl, *opt_ssl_crlpath;
@@ -595,7 +602,7 @@ inline query_id_t next_query_id()
my_atomic_rwlock_wrlock(&global_query_id_lock);
id= my_atomic_add64(&global_query_id, 1);
my_atomic_rwlock_wrunlock(&global_query_id_lock);
- return (id+1);
+ return (id);
}
inline query_id_t get_query_id()
@@ -625,42 +632,30 @@ inline void table_case_convert(char * name, uint length)
name, length, name, length);
}
-inline ulong sql_rnd_with_mutex()
+inline void thread_safe_increment32(int32 *value, my_atomic_rwlock_t *lock)
{
- mysql_mutex_lock(&LOCK_thread_count);
- ulong tmp=(ulong) (my_rnd(&sql_rand) * 0xffffffff); /* make all bits random */
- mysql_mutex_unlock(&LOCK_thread_count);
- return tmp;
+ my_atomic_rwlock_wrlock(lock);
+ (void) my_atomic_add32(value, 1);
+ my_atomic_rwlock_wrunlock(lock);
}
-inline int32
-inc_thread_running()
+inline void thread_safe_decrement32(int32 *value, my_atomic_rwlock_t *lock)
{
- int32 num_thread_running;
- my_atomic_rwlock_wrlock(&thread_running_lock);
- num_thread_running= my_atomic_add32(&thread_running, 1);
- my_atomic_rwlock_wrunlock(&thread_running_lock);
- return (num_thread_running+1);
+ my_atomic_rwlock_wrlock(lock);
+ (void) my_atomic_add32(value, -1);
+ my_atomic_rwlock_wrunlock(lock);
}
-inline int32
-dec_thread_running()
+inline void
+inc_thread_running()
{
- int32 num_thread_running;
- my_atomic_rwlock_wrlock(&thread_running_lock);
- num_thread_running= my_atomic_add32(&thread_running, -1);
- my_atomic_rwlock_wrunlock(&thread_running_lock);
- return (num_thread_running-1);
+ thread_safe_increment32(&thread_running, &thread_running_lock);
}
-inline int32
-get_thread_running()
+inline void
+dec_thread_running()
{
- int32 num_thread_running;
- my_atomic_rwlock_wrlock(&thread_running_lock);
- num_thread_running= my_atomic_load32(&thread_running);
- my_atomic_rwlock_wrunlock(&thread_running_lock);
- return num_thread_running;
+ thread_safe_decrement32(&thread_running, &thread_running_lock);
}
void set_server_version(void);
@@ -692,6 +687,8 @@ inline int set_current_thd(THD *thd)
extern handlerton *maria_hton;
extern uint extra_connection_count;
+extern uint64 global_gtid_counter;
+extern my_bool opt_gtid_strict_mode;
extern my_bool opt_userstat_running, debug_assert_if_crashed_table;
extern uint mysqld_extra_port;
extern ulong opt_progress_report_time;
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index a9e3af13403..fcb08bfbfc9 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@file
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 1de603193c4..d9838543b58 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008-2011 Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -117,6 +117,7 @@
#include "records.h" // init_read_record, end_read_record
#include <m_ctype.h>
#include "sql_select.h"
+#include "sql_statistics.h"
#include "filesort.h" // filesort_free_buffers
#ifndef EXTRA_DEBUG
@@ -132,7 +133,12 @@
static int sel_cmp(Field *f,uchar *a,uchar *b,uint8 a_flag,uint8 b_flag);
-static uchar is_null_string[2]= {1,0};
+/*
+ this should be long enough so that any memcmp with a string that
+ starts from '\0' won't cross is_null_string boundaries, even
+ if the memcmp is optimized to compare 4- 8- or 16- bytes at once
+*/
+static uchar is_null_string[20]= {1,0};
class RANGE_OPT_PARAM;
/*
@@ -838,8 +844,17 @@ public:
/* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */
uint alloced_sel_args;
+
bool force_default_mrr;
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
+
+ bool statement_should_be_aborted() const
+ {
+ return
+ thd->is_fatal_error ||
+ thd->is_error() ||
+ alloced_sel_args > SEL_ARG::MAX_SEL_ARGS;
+ }
};
class PARAM : public RANGE_OPT_PARAM
@@ -2000,7 +2015,7 @@ int QUICK_ROR_INTERSECT_SELECT::init()
1 error
*/
-int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
+int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler, MEM_ROOT *alloc)
{
handler *save_file= file, *org_file;
my_bool org_key_read;
@@ -2028,7 +2043,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
}
- if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root)))
+ if (!(file= head->file->clone(head->s->normalized_path.str, alloc)))
{
/*
Manually set the error flag. Note: there seems to be quite a few
@@ -2068,30 +2083,16 @@ end:
org_key_read= head->key_read;
head->file= file;
head->key_read= 0;
+ head->mark_columns_used_by_index_no_reset(index, head->read_set);
+
if (!head->no_keyread)
{
doing_key_read= 1;
- head->mark_columns_used_by_index_no_reset(index, head->read_set);
head->enable_keyread();
}
head->prepare_for_position();
- if (head->no_keyread)
- {
- /*
- We can get here when doing multi-table delete and having index_merge
- condition on a table that we're deleting from. It probably doesn't make
- sense to use index_merge, but de-facto it is used.
-
- When it is used, we need to index columns to be read (before maria-5.3,
- read_multi_range_first() would set it).
- We shouldn't call mark_columns_used_by_index(), because it calls
- enable_keyread(), which is not allowed.
- */
- head->mark_columns_used_by_index_no_reset(index, head->read_set);
- }
-
head->file= org_file;
head->key_read= org_key_read;
@@ -2129,7 +2130,8 @@ failure:
0 OK
other error code
*/
-int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
+int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler,
+ MEM_ROOT *alloc)
{
List_iterator_fast<QUICK_SELECT_WITH_RECORD> quick_it(quick_selects);
QUICK_SELECT_WITH_RECORD *cur;
@@ -2146,7 +2148,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
There is no use of this->file. Use it for the first of merged range
selects.
*/
- int error= quick->init_ror_merged_scan(TRUE);
+ int error= quick->init_ror_merged_scan(TRUE, alloc);
if (error)
DBUG_RETURN(error);
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
@@ -2158,7 +2160,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
const MY_BITMAP * const save_read_set= quick->head->read_set;
const MY_BITMAP * const save_write_set= quick->head->write_set;
#endif
- if (quick->init_ror_merged_scan(FALSE))
+ if (quick->init_ror_merged_scan(FALSE, alloc))
DBUG_RETURN(1);
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
@@ -2192,7 +2194,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
int QUICK_ROR_INTERSECT_SELECT::reset()
{
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset");
- if (!scans_inited && init_ror_merged_scan(TRUE))
+ if (!scans_inited && init_ror_merged_scan(TRUE, &alloc))
DBUG_RETURN(1);
scans_inited= TRUE;
List_iterator_fast<QUICK_SELECT_WITH_RECORD> it(quick_selects);
@@ -2329,7 +2331,7 @@ int QUICK_ROR_UNION_SELECT::reset()
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
while ((quick= it++))
{
- if (quick->init_ror_merged_scan(FALSE))
+ if (quick->init_ror_merged_scan(FALSE, &alloc))
DBUG_RETURN(1);
}
scans_inited= TRUE;
@@ -3212,8 +3214,434 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
}
/****************************************************************************
+ * Condition selectivity module
+ ****************************************************************************/
+
+
+/*
+ Build descriptors of pseudo-indexes over columns to perform range analysis
+
+ SYNOPSIS
+ create_key_parts_for_pseudo_indexes()
+ param IN/OUT data structure for the descriptors to be built
+ used_fields bitmap of columns for which the descriptors are to be built
+
+ DESCRIPTION
+ For each column marked in the bitmap used_fields the function builds
+ a descriptor of a single-component pseudo-index over this column that
+ can be used for the range analysis of the predicates over this columns.
+ The descriptors are created in the memory of param->mem_root.
+
+ RETURN
+ FALSE in the case of success
+ TRUE otherwise
+*/
+
+static
+bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
+ MY_BITMAP *used_fields)
+{
+ Field **field_ptr;
+ TABLE *table= param->table;
+ uint parts= 0;
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ if (bitmap_is_set(used_fields, (*field_ptr)->field_index))
+ parts++;
+ }
+
+ KEY_PART *key_part;
+ uint keys= 0;
+
+ if (!(key_part= (KEY_PART *) alloc_root(param->mem_root,
+ sizeof(KEY_PART) * parts)))
+ return TRUE;
+
+ param->key_parts= key_part;
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ if (bitmap_is_set(used_fields, (*field_ptr)->field_index))
+ {
+ Field *field= *field_ptr;
+ uint16 store_length;
+ key_part->key= keys;
+ key_part->part= 0;
+ key_part->length= (uint16) field->key_length();
+ store_length= key_part->length;
+ if (field->real_maybe_null())
+ store_length+= HA_KEY_NULL_LENGTH;
+ if (field->real_type() == MYSQL_TYPE_VARCHAR)
+ store_length+= HA_KEY_BLOB_LENGTH;
+ key_part->store_length= store_length;
+ key_part->field= field;
+ key_part->image_type= Field::itRAW;
+ key_part->flag= 0;
+ param->key[keys]= key_part;
+ keys++;
+ key_part++;
+ }
+ }
+ param->keys= keys;
+ param->key_parts_end= key_part;
+
+ return FALSE;
+}
+
+
+/*
+ Estimate the number of rows in all ranges built for a column
+ by the range optimizer
+
+ SYNOPSIS
+ records_in_column_ranges()
+ param the data structure to access descriptors of pseudo indexes
+ built over columns used in the condition of the processed query
+ idx the index of the descriptor of interest in param
+ tree the tree representing ranges built for the interesting column
+
+ DESCRIPTION
+ This function retrieves the ranges represented by the SEL_ARG 'tree' and
+ for each of them r it calls the function get_column_range_cardinality()
+ that estimates the number of expected rows in r. It is assumed that param
+ is the data structure containing the descriptors of pseudo-indexes that
+ has been built to perform range analysis of the range conditions imposed
+ on the columns used in the processed query, while idx is the index of the
+ descriptor created in 'param' exactly for the column for which 'tree'
+ has been built by the range optimizer.
+
+ RETURN
+ the number of rows in the retrieved ranges
+*/
+
+static
+double records_in_column_ranges(PARAM *param, uint idx,
+ SEL_ARG *tree)
+{
+ SEL_ARG_RANGE_SEQ seq;
+ KEY_MULTI_RANGE range;
+ range_seq_t seq_it;
+ double rows;
+ Field *field;
+ uint flags= 0;
+ double total_rows= 0;
+ RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init,
+ sel_arg_range_seq_next, 0, 0};
+
+ /* Handle cases when we don't have a valid non-empty list of range */
+ if (!tree)
+ return HA_POS_ERROR;
+ if (tree->type == SEL_ARG::IMPOSSIBLE)
+ return (0L);
+
+ field= tree->field;
+
+ seq.keyno= idx;
+ seq.real_keyno= MAX_KEY;
+ seq.param= param;
+ seq.start= tree;
+
+ seq_it= seq_if.init((void *) &seq, 0, flags);
+
+ while (!seq_if.next(seq_it, &range))
+ {
+ key_range *min_endp, *max_endp;
+ min_endp= range.start_key.length? &range.start_key : NULL;
+ max_endp= range.end_key.length? &range.end_key : NULL;
+ rows= get_column_range_cardinality(field, min_endp, max_endp,
+ range.range_flag);
+ if (HA_POS_ERROR == rows)
+ {
+ total_rows= HA_POS_ERROR;
+ break;
+ }
+ total_rows += rows;
+ }
+ return total_rows;
+}
+
+
+/*
+ Calculate the selectivity of the condition imposed on the rows of a table
+
+ SYNOPSIS
+ calculate_cond_selectivity_for_table()
+ thd the context handle
+ table the table of interest
+ cond conditions imposed on the rows of the table
+
+ DESCRIPTION
+ This function calculates the selectivity of range conditions cond imposed
+ on the rows of 'table' in the processed query.
+ The calculated selectivity is assigned to the field table->cond_selectivity.
+
+ NOTE
+ Currently the selectivities of range conditions over different columns are
+ considered independent.
+
+ RETURN
+ FALSE on success
+ TRUE otherwise
+*/
+
+bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item *cond)
+{
+ uint keynr;
+ uint max_quick_key_parts= 0;
+ MY_BITMAP *used_fields= &table->cond_set;
+ double table_records= table->stat_records();
+ DBUG_ENTER("calculate_cond_selectivity_for_table");
+
+ table->cond_selectivity= 1.0;
+
+ if (table_records == 0)
+ DBUG_RETURN(FALSE);
+
+ if (thd->variables.optimizer_use_condition_selectivity > 2 &&
+ !bitmap_is_clear_all(used_fields))
+ {
+ /*
+ Calculate the selectivity of the range conditions not supported
+ by any index
+ */
+
+ PARAM param;
+ MEM_ROOT alloc;
+ SEL_TREE *tree;
+ SEL_ARG **key, **end;
+ double rows;
+ uint idx= 0;
+
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
+ param.thd= thd;
+ param.mem_root= &alloc;
+ param.old_root= thd->mem_root;
+ param.table= table;
+ param.is_ror_scan= FALSE;
+
+ if (create_key_parts_for_pseudo_indexes(&param, used_fields))
+ goto free_alloc;
+
+ param.prev_tables= param.read_tables= 0;
+ param.current_table= table->map;
+ param.using_real_indexes= FALSE;
+ param.real_keynr[0]= 0;
+ param.alloced_sel_args= 0;
+
+ thd->no_errors=1;
+
+ tree= get_mm_tree(&param, cond);
+
+ if (!tree)
+ goto free_alloc;
+
+ table->reginfo.impossible_range= 0;
+ if (tree->type == SEL_TREE::IMPOSSIBLE)
+ {
+ rows= 0;
+ table->reginfo.impossible_range= 1;
+ goto free_alloc;
+ }
+ else if (tree->type == SEL_TREE::MAYBE)
+ {
+ rows= table_records;
+ goto free_alloc;
+ }
+
+ for (key= tree->keys, end= key + param.keys; key != end; key++, idx++)
+ {
+ if (*key)
+ {
+ if ((*key)->type == SEL_ARG::IMPOSSIBLE)
+ {
+ rows= 0;
+ table->reginfo.impossible_range= 1;
+ goto free_alloc;
+ }
+ else
+ {
+ rows= records_in_column_ranges(&param, idx, *key);
+ if (rows != HA_POS_ERROR)
+ (*key)->field->cond_selectivity= rows/table_records;
+ }
+ }
+ }
+
+ for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ Field *table_field= *field_ptr;
+ if (bitmap_is_set(table->read_set, table_field->field_index) &&
+ table_field->cond_selectivity < 1.0)
+ table->cond_selectivity*= table_field->cond_selectivity;
+ }
+
+ free_alloc:
+ thd->mem_root= param.old_root;
+ free_root(&alloc, MYF(0));
+
+ }
+
+ /* Calculate the selectivity of the range conditions supported by indexes */
+
+ bitmap_clear_all(used_fields);
+
+ for (keynr= 0; keynr < table->s->keys; keynr++)
+ {
+ if (table->quick_keys.is_set(keynr))
+ set_if_bigger(max_quick_key_parts, table->quick_key_parts[keynr]);
+ }
+
+ for (uint quick_key_parts= max_quick_key_parts;
+ quick_key_parts; quick_key_parts--)
+ {
+ for (keynr= 0; keynr < table->s->keys; keynr++)
+ {
+ if (table->quick_keys.is_set(keynr) &&
+ table->quick_key_parts[keynr] == quick_key_parts)
+ {
+ uint i;
+ uint used_key_parts= table->quick_key_parts[keynr];
+ double quick_cond_selectivity= table->quick_rows[keynr] /
+ table_records;
+ KEY *key_info= table->key_info + keynr;
+ KEY_PART_INFO* key_part= key_info->key_part;
+ for (i= 0; i < used_key_parts; i++, key_part++)
+ {
+ if (bitmap_is_set(used_fields, key_part->fieldnr-1))
+ break;
+ bitmap_set_bit(used_fields, key_part->fieldnr-1);
+ }
+ if (i)
+ {
+ table->cond_selectivity*= quick_cond_selectivity;
+ if (i != used_key_parts)
+ {
+ double f1= key_info->actual_rec_per_key(i-1);
+ double f2= key_info->actual_rec_per_key(i);
+ table->cond_selectivity*= f1 / f2;
+ }
+ }
+ }
+ }
+ }
+
+ /* Calculate selectivity of probably highly selective predicates */
+ ulong check_rows=
+ MY_MIN(thd->variables.optimizer_selectivity_sampling_limit,
+ (ulong) (table_records * SELECTIVITY_SAMPLING_SHARE));
+ if (cond && check_rows > SELECTIVITY_SAMPLING_THRESHOLD &&
+ thd->variables.optimizer_use_condition_selectivity > 4)
+ {
+ find_selective_predicates_list_processor_data *dt=
+ (find_selective_predicates_list_processor_data *)
+ alloc_root(thd->mem_root,
+ sizeof(find_selective_predicates_list_processor_data));
+ if (!dt)
+ DBUG_RETURN(TRUE);
+ dt->list.empty();
+ dt->table= table;
+ if (cond->walk(&Item::find_selective_predicates_list_processor, 0,
+ (uchar*) dt))
+ DBUG_RETURN(TRUE);
+ if (dt->list.elements > 0)
+ {
+ check_rows= check_selectivity(thd, check_rows, table, &dt->list);
+ if (check_rows > SELECTIVITY_SAMPLING_THRESHOLD)
+ {
+ COND_STATISTIC *stat;
+ List_iterator_fast<COND_STATISTIC> it(dt->list);
+ double examined_rows= check_rows;
+ while ((stat= it++))
+ {
+ if (!stat->positive)
+ {
+ DBUG_PRINT("info", ("To avoid 0 assigned 1 to the counter"));
+ stat->positive= 1; // avoid 0
+ }
+ DBUG_PRINT("info", ("The predicate selectivity : %g",
+ (double)stat->positive / examined_rows));
+ double selectivity= ((double)stat->positive) / examined_rows;
+ table->cond_selectivity*= selectivity;
+ /*
+ If a field is involved then we register its selectivity in case
+ there in an equality with the field.
+ For example in case
+ t1.a LIKE "%bla%" and t1.a = t2.b
+ the selectivity we have found could be used also for t2.
+ */
+ if (stat->field_arg)
+ {
+ stat->field_arg->cond_selectivity*= selectivity;
+
+ if (stat->field_arg->next_equal_field)
+ {
+ for (Field *next_field= stat->field_arg->next_equal_field;
+ next_field != stat->field_arg;
+ next_field= next_field->next_equal_field)
+ {
+ next_field->cond_selectivity*= selectivity;
+ next_field->table->cond_selectivity*= selectivity;
+ }
+ }
+ }
+ }
+
+ }
+ /* This list and its elements put to mem_root so should not be freed */
+ table->cond_selectivity_sampling_explain= &dt->list;
+ }
+ }
+
+ DBUG_RETURN(FALSE);
+}
+
+/****************************************************************************
+ * Condition selectivity code ends
+ ****************************************************************************/
+
+/****************************************************************************
* Partition pruning module
****************************************************************************/
+
+/*
+ Store field key image to table record
+
+ SYNOPSIS
+ store_key_image_to_rec()
+ field Field which key image should be stored
+ ptr Field value in key format
+ len Length of the value, in bytes
+
+ DESCRIPTION
+ Copy the field value from its key image to the table record. The source
+ is the value in key image format, occupying len bytes in buffer pointed
+ by ptr. The destination is table record, in "field value in table record"
+ format.
+*/
+
+void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
+{
+ /* Do the same as print_key() does */
+ my_bitmap_map *old_map;
+
+ if (field->real_maybe_null())
+ {
+ if (*ptr)
+ {
+ field->set_null();
+ return;
+ }
+ field->set_notnull();
+ ptr++;
+ }
+ old_map= dbug_tmp_use_all_columns(field->table,
+ field->table->write_set);
+ field->set_key_image(ptr, len);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
+}
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
@@ -3570,44 +3998,6 @@ end:
/*
- Store field key image to table record
-
- SYNOPSIS
- store_key_image_to_rec()
- field Field which key image should be stored
- ptr Field value in key format
- len Length of the value, in bytes
-
- DESCRIPTION
- Copy the field value from its key image to the table record. The source
- is the value in key image format, occupying len bytes in buffer pointed
- by ptr. The destination is table record, in "field value in table record"
- format.
-*/
-
-void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
-{
- /* Do the same as print_key() does */
- my_bitmap_map *old_map;
-
- if (field->real_maybe_null())
- {
- if (*ptr)
- {
- field->set_null();
- return;
- }
- field->set_notnull();
- ptr++;
- }
- old_map= dbug_tmp_use_all_columns(field->table,
- field->table->write_set);
- field->set_key_image(ptr, len);
- dbug_tmp_restore_column_map(field->table->write_set, old_map);
-}
-
-
-/*
For SEL_ARG* array, store sel_arg->min values into table record buffer
SYNOPSIS
@@ -4954,6 +5344,8 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
bzero((*changed_tree)->keys,
sizeof((*changed_tree)->keys[0])*param->keys);
(*changed_tree)->keys_map.clear_all();
+ key->incr_refs();
+ (*tree)->keys[key_idx]->incr_refs();
if (((*changed_tree)->keys[key_idx]=
key_or(param, key, (*tree)->keys[key_idx])))
(*changed_tree)->keys_map.set_bit(key_idx);
@@ -5547,8 +5939,8 @@ ha_rows records_in_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
ha_rows ext_records= ext_index_scan->records;
if (i < used_key_parts)
{
- ulong f1= key_info->actual_rec_per_key(i-1);
- ulong f2= key_info->actual_rec_per_key(i);
+ double f1= key_info->actual_rec_per_key(i-1);
+ double f2= key_info->actual_rec_per_key(i);
ext_records= (ha_rows) ((double) ext_records / f2 * f1);
}
if (ext_records < table_cardinality)
@@ -7159,6 +7551,34 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
{
new_interval->min_value= last_val->max_value;
new_interval->min_flag= NEAR_MIN;
+
+ /*
+ If the interval is over a partial keypart, the
+ interval must be "c_{i-1} <= X < c_i" instead of
+ "c_{i-1} < X < c_i". Reason:
+
+ Consider a table with a column "my_col VARCHAR(3)",
+ and an index with definition
+ "INDEX my_idx my_col(1)". If the table contains rows
+ with my_col values "f" and "foo", the index will not
+ distinguish the two rows.
+
+ Note that tree_or() below will effectively merge
+ this range with the range created for c_{i-1} and
+ we'll eventually end up with only one range:
+ "NULL < X".
+
+ Partitioning indexes are never partial.
+ */
+ if (param->using_real_indexes)
+ {
+ const KEY key=
+ param->table->key_info[param->real_keynr[idx]];
+ const KEY_PART_INFO *kpi= key.key_part + new_interval->part;
+
+ if (kpi->key_part_flag & HA_PART_KEY_SEG)
+ new_interval->min_flag= 0;
+ }
}
}
/*
@@ -7317,6 +7737,14 @@ static SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
param->current_table);
DBUG_ENTER("get_full_func_mm_tree");
+#ifdef HAVE_SPATIAL
+ if (field_item->field->type() == MYSQL_TYPE_GEOMETRY)
+ {
+ /* We have to be able to store all sorts of spatial features here */
+ ((Field_geom*) field_item->field)->geom_type= Field::GEOM_GEOMETRY;
+ }
+#endif /*HAVE_SPATIAL*/
+
for (uint i= 0; i < cond_func->arg_count; i++)
{
Item *arg= cond_func->arguments()[i]->real_item();
@@ -7363,41 +7791,44 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
{
- tree=0;
+ tree= NULL;
Item *item;
while ((item=li++))
{
- SEL_TREE *new_tree=get_mm_tree(param,item);
- if (param->thd->is_fatal_error ||
- param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS)
- DBUG_RETURN(0); // out of memory
- tree=tree_and(param,tree,new_tree);
- if (tree && tree->type == SEL_TREE::IMPOSSIBLE)
- break;
+ SEL_TREE *new_tree= get_mm_tree(param,item);
+ if (param->statement_should_be_aborted())
+ DBUG_RETURN(NULL);
+ tree= tree_and(param,tree,new_tree);
+ if (tree && tree->type == SEL_TREE::IMPOSSIBLE)
+ break;
}
}
else
- { // COND OR
- tree=get_mm_tree(param,li++);
+ { // COND OR
+ tree= get_mm_tree(param,li++);
+ if (param->statement_should_be_aborted())
+ DBUG_RETURN(NULL);
if (tree)
{
- Item *item;
- while ((item=li++))
- {
- SEL_TREE *new_tree=get_mm_tree(param,item);
- if (!new_tree)
- DBUG_RETURN(0); // out of memory
- tree=tree_or(param,tree,new_tree);
- if (!tree || tree->type == SEL_TREE::ALWAYS)
- break;
- }
+ Item *item;
+ while ((item=li++))
+ {
+ SEL_TREE *new_tree=get_mm_tree(param,item);
+ if (new_tree == NULL || param->statement_should_be_aborted())
+ DBUG_RETURN(NULL);
+ tree= tree_or(param,tree,new_tree);
+ if (tree == NULL || tree->type == SEL_TREE::ALWAYS)
+ break;
+ }
}
}
DBUG_RETURN(tree);
}
/* Here when simple cond */
- if (cond->const_item() && !cond->is_expensive())
+ if (cond->const_item())
{
+ if (cond->is_expensive())
+ DBUG_RETURN(0);
/*
During the cond->val_int() evaluation we can come across a subselect
item which may allocate memory on the thd->mem_root and assumes
@@ -7632,16 +8063,17 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
*/
if (field->result_type() == STRING_RESULT &&
- ((Field_str*) field)->match_collation_to_optimize_range() &&
+ field->match_collation_to_optimize_range() &&
value->result_type() == STRING_RESULT &&
key_part->image_type == Field::itRAW &&
- ((Field_str*)field)->charset() != conf_func->compare_collation() &&
+ field->charset() != conf_func->compare_collation() &&
!(conf_func->compare_collation()->state & MY_CS_BINSORT &&
(type == Item_func::EQUAL_FUNC || type == Item_func::EQ_FUNC)))
goto end;
if (key_part->image_type == Field::itMBR)
{
+ // @todo: use is_spatial_operator() instead?
switch (type) {
case Item_func::SP_EQUALS_FUNC:
case Item_func::SP_DISJOINT_FUNC:
@@ -10611,12 +11043,13 @@ int read_keys_and_merge_scans(THD *thd,
Unique *unique= *unique_ptr;
handler *file= head->file;
bool with_cpk_filter= pk_quick_select != NULL;
-
+ bool enabled_keyread= 0;
DBUG_ENTER("read_keys_and_merge");
/* We're going to just read rowids. */
if (!head->key_read)
{
+ enabled_keyread= 1;
head->enable_keyread();
}
head->prepare_for_position();
@@ -10710,13 +11143,15 @@ int read_keys_and_merge_scans(THD *thd,
/*
index merge currently doesn't support "using index" at all
*/
- head->disable_keyread();
+ if (enabled_keyread)
+ head->disable_keyread();
if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
err:
- head->disable_keyread();
+ if (enabled_keyread)
+ head->disable_keyread();
DBUG_RETURN(1);
}
@@ -10874,9 +11309,13 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
do
{
+ DBUG_EXECUTE_IF("innodb_quick_report_deadlock",
+ DBUG_SET("+d,innodb_report_deadlock"););
if ((error= quick->get_next()))
{
- quick_with_last_rowid->file->unlock_row();
+ /* On certain errors like deadlock, trx might be rolled back.*/
+ if (!current_thd->transaction_rollback_request)
+ quick_with_last_rowid->file->unlock_row();
DBUG_RETURN(error);
}
quick->file->position(quick->record);
@@ -10902,7 +11341,9 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
quick->file->unlock_row(); /* row not in range; unlock */
if ((error= quick->get_next()))
{
- quick_with_last_rowid->file->unlock_row();
+ /* On certain errors like deadlock, trx might be rolled back.*/
+ if (!current_thd->transaction_rollback_request)
+ quick_with_last_rowid->file->unlock_row();
DBUG_RETURN(error);
}
}
@@ -11714,7 +12155,8 @@ static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
KEY_PART_INFO **first_non_infix_part);
static bool
check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
- Field::imagetype image_type);
+ Field::imagetype image_type,
+ bool *has_min_max_fld, bool *has_other_fld);
static void
cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
@@ -11786,13 +12228,15 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
NGA1.If in the index I there is a gap between the last GROUP attribute G_k,
and the MIN/MAX attribute C, then NGA must consist of exactly the
index attributes that constitute the gap. As a result there is a
- permutation of NGA that coincides with the gap in the index
- <B_1, ..., B_m>.
+ permutation of NGA, BA=<B_1,...,B_m>, that coincides with the gap
+ in the index.
NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of
equality conditions for all NG_i of the form (NG_i = const) or
(const = NG_i), such that each NG_i is referenced in exactly one
conjunct. Informally, the predicates provide constants to fill the
gap in the index.
+ NGA3.If BA <> {}, there can only be one range. TODO: This is a code
+ limitation and is not strictly needed. See BUG#15947433
WA1. There are no other attributes in the WHERE clause except the ones
referenced in predicates RNG, PA, PC, EQ defined above. Therefore
WA is subset of (GA union NGA union C) for GA,NGA,C that pass the
@@ -12054,6 +12498,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
else
goto next_index;
}
+ /*
+ This function is called on the precondition that the index is covering.
+ Therefore if the GROUP BY list contains more elements than the index,
+ these are duplicates. The GROUP BY list cannot be a prefix of the index.
+ */
+ if (cur_part == end_part && tmp_group)
+ goto next_index;
}
/*
Check (GA2) if this is a DISTINCT query.
@@ -12107,7 +12558,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
cur_parts have bits set for only used keyparts.
*/
ulonglong all_parts, cur_parts;
- all_parts= (1<<max_key_part) - 1;
+ all_parts= (1ULL << max_key_part) - 1;
cur_parts= used_key_parts_map.to_ulonglong() >> 1;
if (all_parts != cur_parts)
goto next_index;
@@ -12264,10 +12715,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
DBUG_RETURN(NULL);
/* Check (SA3) for the where clause. */
+ bool has_min_max_fld= false, has_other_fld= false;
if (join->conds && min_max_arg_item &&
!check_group_min_max_predicates(join->conds, min_max_arg_item,
(index_info->flags & HA_SPATIAL) ?
- Field::itMBR : Field::itRAW))
+ Field::itMBR : Field::itRAW,
+ &has_min_max_fld, &has_other_fld))
DBUG_RETURN(NULL);
/*
@@ -12315,16 +12768,24 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
SYNOPSIS
check_group_min_max_predicates()
- cond tree (or subtree) describing all or part of the WHERE
- clause being analyzed
- min_max_arg_item the field referenced by the MIN/MAX function(s)
- min_max_arg_part the keypart of the MIN/MAX argument if any
+ cond [in] the expression tree being analyzed
+ min_max_arg [in] the field referenced by the MIN/MAX function(s)
+ image_type [in]
+ has_min_max_arg [out] true if the subtree being analyzed references min_max_arg
+ has_other_arg [out] true if the subtree being analyzed references a column
+ other min_max_arg
DESCRIPTION
The function walks recursively over the cond tree representing a WHERE
clause, and checks condition (SA3) - if a field is referenced by a MIN/MAX
aggregate function, it is referenced only by one of the following
- predicates: {=, !=, <, <=, >, >=, between, is null, is not null}.
+ predicates $FUNC$:
+ {=, !=, <, <=, >, >=, between, is [not] null, multiple equal}.
+ In addition the function checks that the WHERE condition is equivalent to
+ "cond1 AND cond2" where :
+ cond1 - does not use min_max_column at all.
+ cond2 - is an AND/OR tree with leaves in form
+ "$FUNC$(min_max_column[, const])".
RETURN
TRUE if cond passes the test
@@ -12333,7 +12794,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
static bool
check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
- Field::imagetype image_type)
+ Field::imagetype image_type,
+ bool *has_min_max_arg, bool *has_other_arg)
{
DBUG_ENTER("check_group_min_max_predicates");
DBUG_ASSERT(cond && min_max_arg_item);
@@ -12345,12 +12807,24 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
DBUG_PRINT("info", ("Analyzing: %s", ((Item_func*) cond)->func_name()));
List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
Item *and_or_arg;
+ Item_func::Functype func_type= ((Item_cond*) cond)->functype();
+ bool has_min_max= false, has_other= false;
while ((and_or_arg= li++))
{
- if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item,
- image_type))
+ /*
+ The WHERE clause doesn't pass the condition if:
+ (1) any subtree doesn't pass the condition or
+ (2) the subtree passes the test, but it is an OR and it references both
+ the min/max argument and other columns.
+ */
+ if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1
+ image_type,
+ &has_min_max, &has_other) ||
+ (func_type == Item_func::COND_OR_FUNC && has_min_max && has_other))//2
DBUG_RETURN(FALSE);
}
+ *has_min_max_arg= has_min_max || *has_min_max_arg;
+ *has_other_arg= has_other || *has_other_arg;
DBUG_RETURN(TRUE);
}
@@ -12384,6 +12858,10 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
if (cond_type == Item::FIELD_ITEM)
{
DBUG_PRINT("info", ("Analyzing: %s", cond->full_name()));
+ if (min_max_arg_item->eq((Item_field*)cond, 1))
+ *has_min_max_arg= true;
+ else
+ *has_other_arg= true;
DBUG_RETURN(TRUE);
}
@@ -12392,9 +12870,33 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
/* Test if cond references only group-by or non-group fields. */
Item_func *pred= (Item_func*) cond;
+ Item_func::Functype pred_type= pred->functype();
+ DBUG_PRINT("info", ("Analyzing: %s", pred->func_name()));
+ if (pred_type == Item_func::MULT_EQUAL_FUNC)
+ {
+ /*
+ Check that each field in a multiple equality is either a constant or
+ it is a reference to the min/max argument, or it doesn't contain the
+ min/max argument at all.
+ */
+ Item_equal_fields_iterator eq_it(*((Item_equal*)pred));
+ Item *eq_item;
+ bool has_min_max= false, has_other= false;
+ while ((eq_item= eq_it++))
+ {
+ if (min_max_arg_item->eq(eq_item->real_item(), 1))
+ has_min_max= true;
+ else
+ has_other= true;
+ }
+ *has_min_max_arg= has_min_max || *has_min_max_arg;
+ *has_other_arg= has_other || *has_other_arg;
+ DBUG_RETURN(!(has_min_max && has_other));
+ }
+
Item **arguments= pred->arguments();
Item *cur_arg;
- DBUG_PRINT("info", ("Analyzing: %s", pred->func_name()));
+ bool has_min_max= false, has_other= false;
for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++)
{
cur_arg= arguments[arg_idx]->real_item();
@@ -12403,11 +12905,11 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
{
if (min_max_arg_item->eq(cur_arg, 1))
{
- /*
- If pred references the MIN/MAX argument, check whether pred is a range
- condition that compares the MIN/MAX argument with a constant.
- */
- Item_func::Functype pred_type= pred->functype();
+ has_min_max= true;
+ /*
+ If pred references the MIN/MAX argument, check whether pred is a range
+ condition that compares the MIN/MAX argument with a constant.
+ */
if (pred_type != Item_func::EQUAL_FUNC &&
pred_type != Item_func::LT_FUNC &&
pred_type != Item_func::LE_FUNC &&
@@ -12436,7 +12938,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
*/
((args[1]->result_type() == STRING_RESULT &&
image_type == Field::itRAW &&
- ((Field_str*) min_max_arg_item->field)->charset() !=
+ min_max_arg_item->field->charset() !=
pred->compare_collation())
||
/*
@@ -12447,14 +12949,16 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
min_max_arg_item->field->cmp_type() != args[1]->result_type())))
DBUG_RETURN(FALSE);
}
+ else
+ has_other= true;
}
else if (cur_arg->type() == Item::FUNC_ITEM)
{
- if (!check_group_min_max_predicates(cur_arg, min_max_arg_item,
- image_type))
+ if (!check_group_min_max_predicates(cur_arg, min_max_arg_item, image_type,
+ &has_min_max, &has_other))
DBUG_RETURN(FALSE);
}
- else if (cur_arg->const_item())
+ else if (cur_arg->const_item() && !cur_arg->is_expensive())
{
/*
For predicates of the form "const OP expr" we also have to check 'expr'
@@ -12464,13 +12968,85 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
}
else
DBUG_RETURN(FALSE);
+ if(has_min_max && has_other)
+ DBUG_RETURN(FALSE);
}
+ *has_min_max_arg= has_min_max || *has_min_max_arg;
+ *has_other_arg= has_other || *has_other_arg;
DBUG_RETURN(TRUE);
}
/*
+ Get SEL_ARG tree, if any, for the keypart covering non grouping
+ attribute (NGA) field 'nga_field'.
+
+ This function enforces the NGA3 test: If 'keypart_tree' contains a
+ condition for 'nga_field', there can only be one range. In the
+ opposite case, this function returns with error and 'cur_range'
+ should not be used.
+
+ Note that the NGA1 and NGA2 requirements, like whether or not the
+ range predicate for 'nga_field' is equality, is not tested by this
+ function.
+
+ @param[in] nga_field The NGA field we want the SEL_ARG tree for
+ @param[in] keypart_tree Root node of the SEL_ARG* tree for the index
+ @param[out] cur_range The SEL_ARG tree, if any, for the keypart
+ covering field 'keypart_field'
+ @retval true 'keypart_tree' contained a predicate for 'nga_field' but
+ multiple ranges exists. 'cur_range' should not be used.
+ @retval false otherwise
+*/
+
+static bool
+get_sel_arg_for_keypart(Field *nga_field,
+ SEL_ARG *keypart_tree,
+ SEL_ARG **cur_range)
+{
+ if(keypart_tree == NULL)
+ return false;
+ if(keypart_tree->field->eq(nga_field))
+ {
+ /*
+ Enforce NGA3: If a condition for nga_field has been found, only
+ a single range is allowed.
+ */
+ if (keypart_tree->prev || keypart_tree->next)
+ return true; // There are multiple ranges
+
+ *cur_range= keypart_tree;
+ return false;
+ }
+
+ SEL_ARG *found_tree= NULL;
+ SEL_ARG *first_kp= keypart_tree->first();
+
+ for (SEL_ARG *cur_kp= first_kp; cur_kp && !found_tree;
+ cur_kp= cur_kp->next)
+ {
+ if (cur_kp->next_key_part)
+ {
+ if (get_sel_arg_for_keypart(nga_field,
+ cur_kp->next_key_part,
+ &found_tree))
+ return true;
+
+ }
+ /*
+ Enforce NGA3: If a condition for nga_field has been found,only
+ a single range is allowed.
+ */
+ if (found_tree && first_kp->next)
+ return true; // There are multiple ranges
+ }
+ *cur_range= found_tree;
+ return false;
+}
+
+
+/*
Extract a sequence of constants from a conjunction of equality predicates.
SYNOPSIS
@@ -12484,12 +13060,13 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
key_infix [out] Infix of constants to be used for index lookup
key_infix_len [out] Lenghth of the infix
first_non_infix_part [out] The first keypart after the infix (if any)
-
+
DESCRIPTION
- Test conditions (NGA1, NGA2) from get_best_group_min_max(). Namely,
- for each keypart field NGF_i not in GROUP-BY, check that there is a
- constant equality predicate among conds with the form (NGF_i = const_ci) or
- (const_ci = NGF_i).
+ Test conditions (NGA1, NGA2, NGA3) from get_best_group_min_max(). Namely,
+ for each keypart field NG_i not in GROUP-BY, check that there is exactly one
+ constant equality predicate among conds with the form (NG_i = const_ci) or
+ (const_ci = NG_i).. In addition, there can only be one range when there is
+ such a gap.
Thus all the NGF_i attributes must fill the 'gap' between the last group-by
attribute and the MIN/MAX attribute in the index (if present). If these
conditions hold, copy each constant from its corresponding predicate into
@@ -12518,17 +13095,14 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
uchar *key_ptr= key_infix;
for (cur_part= first_non_group_part; cur_part != end_part; cur_part++)
{
+ cur_range= NULL;
/*
Find the range tree for the current keypart. We assume that
- index_range_tree points to the leftmost keypart in the index.
+ index_range_tree points to the first keypart in the index.
*/
- for (cur_range= index_range_tree;
- cur_range && cur_range->type == SEL_ARG::KEY_RANGE;
- cur_range= cur_range->next_key_part)
- {
- if (cur_range->field->eq(cur_part->field))
- break;
- }
+ if(get_sel_arg_for_keypart(cur_part->field, index_range_tree, &cur_range))
+ return false;
+
if (!cur_range || cur_range->type != SEL_ARG::KEY_RANGE)
{
if (min_max_arg_part)
@@ -12540,9 +13114,6 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
}
}
- /* Check that the current range tree is a single point interval. */
- if (cur_range->prev || cur_range->next)
- return FALSE; /* This is not the only range predicate for the field. */
if ((cur_range->min_flag & NO_MIN_RANGE) ||
(cur_range->max_flag & NO_MAX_RANGE) ||
(cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX))
@@ -12718,11 +13289,11 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double *read_cost, ha_rows *records)
{
ha_rows table_records;
- uint num_groups;
- uint num_blocks;
- uint keys_per_block;
- uint keys_per_group;
- uint keys_per_subgroup; /* Average number of keys in sub-groups */
+ ha_rows num_groups;
+ ha_rows num_blocks;
+ uint keys_per_block;
+ ha_rows keys_per_group;
+ ha_rows keys_per_subgroup; /* Average number of keys in sub-groups */
/* formed by a key infix. */
double p_overlap; /* Probability that a sub-group overlaps two blocks. */
double quick_prefix_selectivity;
@@ -12731,24 +13302,24 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
DBUG_ENTER("cost_group_min_max");
table_records= table->stat_records();
- keys_per_block= (table->file->stats.block_size / 2 /
- (index_info->key_length + table->file->ref_length)
- + 1);
- num_blocks= (uint)(table_records / keys_per_block) + 1;
+ keys_per_block= (uint) (table->file->stats.block_size / 2 /
+ (index_info->key_length + table->file->ref_length)
+ + 1);
+ num_blocks= (ha_rows)(table_records / keys_per_block) + 1;
/* Compute the number of keys in a group. */
- keys_per_group= index_info->actual_rec_per_key(group_key_parts - 1);
+ keys_per_group= (ha_rows) index_info->actual_rec_per_key(group_key_parts - 1);
if (keys_per_group == 0) /* If there is no statistics try to guess */
/* each group contains 10% of all records */
- keys_per_group= (uint)(table_records / 10) + 1;
- num_groups= (uint)(table_records / keys_per_group) + 1;
+ keys_per_group= (table_records / 10) + 1;
+ num_groups= (table_records / keys_per_group) + 1;
/* Apply the selectivity of the quick select for group prefixes. */
if (range_tree && (quick_prefix_records != HA_POS_ERROR))
{
quick_prefix_selectivity= (double) quick_prefix_records /
(double) table_records;
- num_groups= (uint) rint(num_groups * quick_prefix_selectivity);
+ num_groups= (ha_rows) rint(num_groups * quick_prefix_selectivity);
set_if_bigger(num_groups, 1);
}
@@ -12757,7 +13328,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
Compute the probability that two ends of a subgroup are inside
different blocks.
*/
- keys_per_subgroup= index_info->actual_rec_per_key(used_key_parts - 1);
+ keys_per_subgroup= (ha_rows) index_info->actual_rec_per_key(used_key_parts - 1);
if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */
p_overlap= 1.0; /* a block, it will overlap at least two blocks. */
else
@@ -12785,9 +13356,9 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
*records= num_groups;
DBUG_PRINT("info",
- ("table rows: %lu keys/block: %u keys/group: %u result rows: %lu blocks: %u",
- (ulong)table_records, keys_per_block, keys_per_group,
- (ulong) *records, num_blocks));
+ ("table rows: %lu keys/block: %u keys/group: %lu result rows: %lu blocks: %lu",
+ (ulong)table_records, keys_per_block, (ulong) keys_per_group,
+ (ulong) *records, (ulong) num_blocks));
DBUG_VOID_RETURN;
}
@@ -13066,7 +13637,11 @@ QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT()
DBUG_ASSERT(file == head->file);
if (doing_key_read)
head->disable_keyread();
- file->ha_index_end();
+ /*
+ There may be a code path when the same table was first accessed by index,
+ then the index is closed, and the table is scanned (order by + loose scan).
+ */
+ file->ha_index_or_rnd_end();
}
if (min_max_arg_part)
delete_dynamic(&min_max_ranges);
diff --git a/sql/opt_range.h b/sql/opt_range.h
index fd9d0b3923f..3dbdce00e9d 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -323,7 +323,7 @@ public:
0 Ok
other Error
*/
- virtual int init_ror_merged_scan(bool reuse_handler)
+ virtual int init_ror_merged_scan(bool reuse_handler, MEM_ROOT *alloc)
{ DBUG_ASSERT(0); return 1; }
/*
@@ -473,7 +473,7 @@ public:
uchar *cur_prefix);
bool reverse_sorted() { return 0; }
bool unique_key_range();
- int init_ror_merged_scan(bool reuse_handler);
+ int init_ror_merged_scan(bool reuse_handler, MEM_ROOT *alloc);
void save_last_pos()
{ file->position(record); }
int get_type() { return QS_TYPE_RANGE; }
@@ -722,7 +722,7 @@ public:
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
- int init_ror_merged_scan(bool reuse_handler);
+ int init_ror_merged_scan(bool reuse_handler, MEM_ROOT *alloc);
bool push_quick_back(MEM_ROOT *alloc, QUICK_RANGE_SELECT *quick_sel_range);
class QUICK_SELECT_WITH_RECORD : public Sql_alloc
@@ -1042,11 +1042,20 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
table_map read_tables, COND *conds,
bool allow_null_cond, int *error);
+bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item *cond);
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
-void store_key_image_to_rec(Field *field, uchar *ptr, uint len);
#endif
+void store_key_image_to_rec(Field *field, uchar *ptr, uint len);
extern String null_string;
+/* check this number of rows (default value) */
+#define SELECTIVITY_SAMPLING_LIMIT 100
+/* but no more then this part of table (10%) */
+#define SELECTIVITY_SAMPLING_SHARE 0.10
+/* do not check if we are going check less then this number of records */
+#define SELECTIVITY_SAMPLING_THRESHOLD 10
+
#endif
diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc
index 72c1fa0a8c9..bff96c7d4cb 100644
--- a/sql/opt_range_mrr.cc
+++ b/sql/opt_range_mrr.cc
@@ -268,8 +268,10 @@ walk_up_n_right:
range->end_key.keypart_map= make_prev_keypart_map(cur->max_key_parts);
if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
- (uint)key_tree->part+1 == seq->param->table->key_info[seq->real_keyno].user_defined_key_parts &&
- (seq->param->table->key_info[seq->real_keyno].flags & HA_NOSAME) &&
+ (seq->real_keyno == MAX_KEY ||
+ ((uint)key_tree->part+1 ==
+ seq->param->table->key_info[seq->real_keyno].user_defined_key_parts &&
+ (seq->param->table->key_info[seq->real_keyno].flags & HA_NOSAME))) &&
range->start_key.length == range->end_key.length &&
!memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
range->range_flag= UNIQUE_RANGE | (cur->min_key_flag & NULL_RANGE);
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index aa31c44a385..7d6d58a3414 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -666,6 +666,9 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
8. No execution method was already chosen (by a prepared statement)
9. Parent select is not a table-less select
10. Neither parent nor child select have STRAIGHT_JOIN option.
+ 11. It is first optimisation (the subquery could be moved from ON
+ clause during first optimisation and then be considered for SJ
+ on the second when it is too late)
*/
if (optimizer_flag(thd, OPTIMIZER_SWITCH_SEMIJOIN) &&
in_subs && // 1
@@ -679,7 +682,8 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
select_lex->outer_select()->leaf_tables.elements && // 9
!((join->select_options | // 10
select_lex->outer_select()->join->select_options) // 10
- & SELECT_STRAIGHT_JOIN)) // 10
+ & SELECT_STRAIGHT_JOIN) && // 10
+ select_lex->first_cond_optimization) // 11
{
DBUG_PRINT("info", ("Subquery is semi-join conversion candidate"));
@@ -1509,6 +1513,9 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
*/
parent_lex->leaf_tables.concat(&subq_lex->leaf_tables);
+ if (subq_lex->options & OPTION_SCHEMA_TABLE)
+ parent_lex->options |= OPTION_SCHEMA_TABLE;
+
/*
Same as above for next_local chain
(a theory: a next_local chain always starts with ::leaf_tables
@@ -1726,6 +1733,9 @@ static bool convert_subq_to_jtbm(JOIN *parent_join,
*/
parent_lex->leaf_tables.push_back(jtbm);
+ if (subq_pred->unit->first_select()->options & OPTION_SCHEMA_TABLE)
+ parent_lex->options |= OPTION_SCHEMA_TABLE;
+
/*
Same as above for TABLE_LIST::next_local chain
(a theory: a next_local chain always starts with ::leaf_tables
@@ -2545,6 +2555,10 @@ void advance_sj_state(JOIN *join, table_map remaining_tables, uint idx,
/* Mark strategy as used */
(*strategy)->mark_used();
pos->sj_strategy= sj_strategy;
+ if (sj_strategy == SJ_OPT_MATERIALIZE)
+ join->sjm_lookup_tables |= handled_fanout;
+ else
+ join->sjm_lookup_tables &= ~handled_fanout;
*current_read_time= read_time;
*current_record_count= rec_count;
join->cur_dups_producing_tables &= ~handled_fanout;
@@ -3069,6 +3083,13 @@ void restore_prev_sj_state(const table_map remaining_tables,
const JOIN_TAB *tab, uint idx)
{
TABLE_LIST *emb_sj_nest;
+
+ if (tab->emb_sj_nest)
+ {
+ table_map subq_tables= tab->emb_sj_nest->sj_inner_tables;
+ tab->join->sjm_lookup_tables &= ~subq_tables;
+ }
+
if ((emb_sj_nest= tab->emb_sj_nest))
{
/* If we're removing the last SJ-inner table, remove the sj-nest */
@@ -3246,6 +3267,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
uint tablenr;
table_map remaining_tables= 0;
table_map handled_tabs= 0;
+ join->sjm_lookup_tables= 0;
for (tablenr= table_count - 1 ; tablenr != join->const_tables - 1; tablenr--)
{
POSITION *pos= join->best_positions + tablenr;
@@ -3271,6 +3293,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
first= tablenr - sjm->tables + 1;
join->best_positions[first].n_sj_tables= sjm->tables;
join->best_positions[first].sj_strategy= SJ_OPT_MATERIALIZE;
+ join->sjm_lookup_tables|= s->table->map;
}
else if (pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN)
{
@@ -3885,7 +3908,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
&tmpname, (uint) strlen(path)+1,
&group_buff, (!using_unique_constraint ?
uniq_tuple_length_arg : 0),
- &bitmaps, bitmap_buffer_size(1)*3,
+ &bitmaps, bitmap_buffer_size(1)*5,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
@@ -4096,7 +4119,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
recinfo++;
if (share->db_type() == TMP_ENGINE_HTON)
{
- if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0, 0))
+ if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0))
goto err;
}
if (open_tmp_table(table))
@@ -4216,9 +4239,13 @@ int SJ_TMP_TABLE::sj_weedout_check_row(THD *thd)
/* create_internal_tmp_table_from_heap will generate error if needed */
if (!tmp_table->file->is_fatal_error(error, HA_CHECK_DUP))
DBUG_RETURN(1); /* Duplicate */
+
+ bool is_duplicate;
if (create_internal_tmp_table_from_heap(thd, tmp_table, start_recinfo,
- &recinfo, error, 1))
+ &recinfo, error, 1, &is_duplicate))
DBUG_RETURN(-1);
+ if (is_duplicate)
+ DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
diff --git a/sql/opt_subselect.h b/sql/opt_subselect.h
index 007ce2e6d15..01da437504b 100644
--- a/sql/opt_subselect.h
+++ b/sql/opt_subselect.h
@@ -283,6 +283,7 @@ public:
{
pos->records_read= best_loose_scan_records;
pos->key= best_loose_scan_start_key;
+ pos->cond_selectivity= 1.0;
pos->loosescan_picker.loosescan_key= best_loose_scan_key;
pos->loosescan_picker.loosescan_parts= best_max_loose_keypart + 1;
pos->use_join_buffer= FALSE;
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 5d78b689e58..b8d39057ba8 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -84,7 +84,7 @@ static ulonglong get_exact_record_count(List<TABLE_LIST> &tables)
while ((tl= ti++))
{
ha_rows tmp= tl->table->file->records();
- if ((tmp == HA_POS_ERROR))
+ if (tmp == HA_POS_ERROR)
return ULONGLONG_MAX;
count*= tmp;
}
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index afb07b9a6cb..7454e756416 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -1482,7 +1482,7 @@ void check_equality(Dep_analysis_context *ctx, Dep_module_expr **eq_mod,
collation of the operation differ from the field collation.
*/
if (field->cmp_type() == STRING_RESULT &&
- ((Field_str*)field)->charset() != cond->compare_collation())
+ field->charset() != cond->compare_collation())
return;
}
}
diff --git a/sql/partition_element.h b/sql/partition_element.h
index 87f3d00e68c..4f03d91035a 100644
--- a/sql/partition_element.h
+++ b/sql/partition_element.h
@@ -1,7 +1,7 @@
#ifndef PARTITION_ELEMENT_INCLUDED
#define PARTITION_ELEMENT_INCLUDED
-/* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -107,9 +107,8 @@ public:
enum partition_state part_state;
uint16 nodegroup_id;
bool has_null_value;
- /* signed_flag and max_value only relevant for subpartitions */
- bool signed_flag;
- bool max_value;
+ bool signed_flag; // Range value signed
+ bool max_value; // MAXVALUE range
partition_element()
: part_max_rows(0), part_min_rows(0), range_value(0),
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 1e351b97d8e..6556d50b218 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1,5 +1,4 @@
-/*
- Copyright (c) 2006, 2010, Oracle and/or its affiliates.
+/* Copyright (c) 2006, 2013, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -985,6 +984,48 @@ error:
/*
+ A support function to check if a partition element's name is unique
+
+ SYNOPSIS
+ has_unique_name()
+ partition_element element to check
+
+ RETURN VALUES
+ TRUE Has unique name
+ FALSE Doesn't
+*/
+
+bool partition_info::has_unique_name(partition_element *element)
+{
+ DBUG_ENTER("partition_info::has_unique_name");
+
+ const char *name_to_check= element->partition_name;
+ List_iterator<partition_element> parts_it(partitions);
+
+ partition_element *el;
+ while ((el= (parts_it++)))
+ {
+ if (!(my_strcasecmp(system_charset_info, el->partition_name,
+ name_to_check)) && el != element)
+ DBUG_RETURN(FALSE);
+
+ if (!el->subpartitions.is_empty())
+ {
+ partition_element *sub_el;
+ List_iterator<partition_element> subparts_it(el->subpartitions);
+ while ((sub_el= (subparts_it++)))
+ {
+ if (!(my_strcasecmp(system_charset_info, sub_el->partition_name,
+ name_to_check)) && sub_el != element)
+ DBUG_RETURN(FALSE);
+ }
+ }
+ }
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
Check that the partition/subpartition is setup to use the correct
storage engine
SYNOPSIS
@@ -2062,29 +2103,21 @@ bool check_partition_dirs(partition_info *part_info)
partition_element *subpart_elem;
while ((subpart_elem= sub_it++))
{
- if (test_if_data_home_dir(subpart_elem->data_file_name))
- goto dd_err;
- if (test_if_data_home_dir(subpart_elem->index_file_name))
- goto id_err;
+ if (error_if_data_home_dir(subpart_elem->data_file_name,
+ "DATA DIRECTORY") ||
+ error_if_data_home_dir(subpart_elem->index_file_name,
+ "INDEX DIRECTORY"))
+ return 1;
}
}
else
{
- if (test_if_data_home_dir(part_elem->data_file_name))
- goto dd_err;
- if (test_if_data_home_dir(part_elem->index_file_name))
- goto id_err;
+ if (error_if_data_home_dir(part_elem->data_file_name, "DATA DIRECTORY") ||
+ error_if_data_home_dir(part_elem->index_file_name, "INDEX DIRECTORY"))
+ return 1;
}
}
return 0;
-
-dd_err:
- my_error(ER_WRONG_ARGUMENTS,MYF(0),"DATA DIRECTORY");
- return 1;
-
-id_err:
- my_error(ER_WRONG_ARGUMENTS,MYF(0),"INDEX DIRECTORY");
- return 1;
}
@@ -2726,9 +2759,36 @@ int partition_info::fix_parser_data(THD *thd)
if (!(part_type == RANGE_PARTITION ||
part_type == LIST_PARTITION))
{
- /* Nothing to do for HASH/KEY partitioning */
+ if (part_type == HASH_PARTITION && list_of_part_fields)
+ {
+ /* KEY partitioning, check ALGORITHM = N. Should not pass the parser! */
+ if (key_algorithm > KEY_ALGORITHM_55)
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ DBUG_RETURN(true);
+ }
+ /* If not set, use DEFAULT = 2 for CREATE and ALTER! */
+ if ((thd_sql_command(thd) == SQLCOM_CREATE_TABLE ||
+ thd_sql_command(thd) == SQLCOM_ALTER_TABLE) &&
+ key_algorithm == KEY_ALGORITHM_NONE)
+ key_algorithm= KEY_ALGORITHM_55;
+ }
DBUG_RETURN(FALSE);
}
+ if (is_sub_partitioned() && list_of_subpart_fields)
+ {
+ /* KEY subpartitioning, check ALGORITHM = N. Should not pass the parser! */
+ if (key_algorithm > KEY_ALGORITHM_55)
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ DBUG_RETURN(true);
+ }
+ /* If not set, use DEFAULT = 2 for CREATE and ALTER! */
+ if ((thd_sql_command(thd) == SQLCOM_CREATE_TABLE ||
+ thd_sql_command(thd) == SQLCOM_ALTER_TABLE) &&
+ key_algorithm == KEY_ALGORITHM_NONE)
+ key_algorithm= KEY_ALGORITHM_55;
+ }
do
{
part_elem= it++;
@@ -2777,6 +2837,7 @@ int partition_info::fix_parser_data(THD *thd)
DBUG_RETURN(FALSE);
}
+
void partition_info::print_debug(const char *str, uint *value)
{
DBUG_ENTER("print_debug");
@@ -2831,4 +2892,9 @@ void partition_info::print_debug(const char *str, uint *value)
{
}
+bool check_partition_dirs(partition_info *part_info)
+{
+ return 0;
+}
+
#endif /* WITH_PARTITION_STORAGE_ENGINE */
diff --git a/sql/partition_info.h b/sql/partition_info.h
index e3e88a67a27..01f6b53a148 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -1,7 +1,7 @@
#ifndef PARTITION_INFO_INCLUDED
#define PARTITION_INFO_INCLUDED
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -210,6 +210,19 @@ public:
but mainly of use to handlers supporting partitioning.
*/
uint16 linear_hash_mask;
+ /*
+ PARTITION BY KEY ALGORITHM=N
+ Which algorithm to use for hashing the fields.
+ N = 1 - Use 5.1 hashing (numeric fields are hashed as binary)
+ N = 2 - Use 5.5 hashing (numeric fields are hashed like latin1 bytes)
+ */
+ enum enum_key_algorithm
+ {
+ KEY_ALGORITHM_NONE= 0,
+ KEY_ALGORITHM_51= 1,
+ KEY_ALGORITHM_55= 2
+ };
+ enum_key_algorithm key_algorithm;
bool use_default_partitions;
bool use_default_num_partitions;
@@ -260,6 +273,7 @@ public:
count_curr_subparts(0), part_error_code(0),
num_list_values(0), num_part_fields(0), num_subpart_fields(0),
num_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0),
+ key_algorithm(KEY_ALGORITHM_NONE),
use_default_partitions(TRUE), use_default_num_partitions(TRUE),
use_default_subpartitions(TRUE), use_default_num_subpartitions(TRUE),
default_partitions_setup(FALSE), defined_max_value(FALSE),
diff --git a/sql/protocol.cc b/sql/protocol.cc
index ee90cc46941..effeee9b4aa 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -1427,7 +1427,7 @@ bool Protocol_binary::store(MYSQL_TIME *tm, int decimals)
DBUG_ASSERT(decimals == AUTO_SEC_PART_DIGITS ||
(decimals >= 0 && decimals <= TIME_SECOND_PART_DIGITS));
if (decimals != AUTO_SEC_PART_DIGITS)
- tm->second_part= sec_part_truncate(tm->second_part, decimals);
+ my_time_trunc(tm, decimals);
int4store(pos+7, tm->second_part);
if (tm->second_part)
length=11;
@@ -1469,7 +1469,7 @@ bool Protocol_binary::store_time(MYSQL_TIME *tm, int decimals)
DBUG_ASSERT(decimals == AUTO_SEC_PART_DIGITS ||
(decimals >= 0 && decimals <= TIME_SECOND_PART_DIGITS));
if (decimals != AUTO_SEC_PART_DIGITS)
- tm->second_part= sec_part_truncate(tm->second_part, decimals);
+ my_time_trunc(tm, decimals);
int4store(pos+8, tm->second_part);
if (tm->second_part)
length=12;
@@ -1546,14 +1546,14 @@ bool Protocol_binary::send_out_parameters(List<Item_param> *sp_params)
/* Restore THD::server_status. */
thd->server_status&= ~SERVER_PS_OUT_PARAMS;
+ /* Send EOF-packet. */
+ net_send_eof(thd, thd->server_status, 0);
+
/*
Reset SERVER_MORE_RESULTS_EXISTS bit, because this is the last packet
for sure.
*/
thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
- /* Send EOF-packet. */
- net_send_eof(thd, thd->server_status, 0);
-
return FALSE;
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 86f19f1b28e..334de1337d6 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -87,14 +87,15 @@ void change_rpl_status(ulong from_status, ulong to_status)
void unregister_slave(THD* thd, bool only_mine, bool need_mutex)
{
- if (thd->server_id)
+ uint32 thd_server_id= thd->variables.server_id;
+ if (thd_server_id)
{
if (need_mutex)
mysql_mutex_lock(&LOCK_slave_list);
SLAVE_INFO* old_si;
if ((old_si = (SLAVE_INFO*)my_hash_search(&slave_list,
- (uchar*)&thd->server_id, 4)) &&
+ (uchar*)&thd_server_id, 4)) &&
(!only_mine || old_si->thd == thd))
my_hash_delete(&slave_list, (uchar*)old_si);
@@ -125,7 +126,7 @@ int register_slave(THD* thd, uchar* packet, uint packet_length)
if (!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME))))
goto err2;
- thd->server_id= si->server_id= uint4korr(p);
+ thd->variables.server_id= si->server_id= uint4korr(p);
p+= 4;
get_object(p,si->host, "Failed to register slave: too long 'report-host'");
get_object(p,si->user, "Failed to register slave: too long 'report-user'");
@@ -143,7 +144,7 @@ int register_slave(THD* thd, uchar* packet, uint packet_length)
// si->rpl_recovery_rank= uint4korr(p);
p += 4;
if (!(si->master_id= uint4korr(p)))
- si->master_id= server_id;
+ si->master_id= global_system_variables.server_id;
si->thd= thd;
mysql_mutex_lock(&LOCK_slave_list);
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index f2bd036896d..72e7770b6ee 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -156,14 +156,15 @@ Rpl_filter::db_ok(const char* db)
DBUG_RETURN(1); // Ok to replicate if the user puts no constraints
/*
- If the user has specified restrictions on which databases to replicate
- and db was not selected, do not replicate.
+ Previous behaviour "if the user has specified restrictions on which
+ databases to replicate and db was not selected, do not replicate" has
+ been replaced with "do replicate".
+ Since the filtering criteria is not equal to "NULL" the statement should
+ be logged into binlog.
*/
if (!db)
- {
- DBUG_PRINT("exit", ("Don't replicate"));
- DBUG_RETURN(0);
- }
+ DBUG_RETURN(1);
+
if (!do_db.is_empty()) // if the do's are not empty
{
I_List_iterator<i_string> it(do_db);
@@ -734,6 +735,18 @@ Rpl_filter::get_rewrite_db(const char* db, size_t *new_len)
}
+void
+Rpl_filter::copy_rewrite_db(Rpl_filter *from)
+{
+ I_List_iterator<i_string_pair> it(from->rewrite_db);
+ i_string_pair* tmp;
+ DBUG_ASSERT(rewrite_db.is_empty());
+
+ /* TODO: Add memory checking here and in all add_xxxx functions ! */
+ while ((tmp=it++))
+ add_db_rewrite(tmp->key, tmp->val);
+}
+
I_List<i_string>*
Rpl_filter::get_do_db()
{
diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h
index 2eb0340b714..65d11cfb6e6 100644
--- a/sql/rpl_filter.h
+++ b/sql/rpl_filter.h
@@ -88,6 +88,7 @@ public:
bool rewrite_db_is_empty();
const char* get_rewrite_db(const char* db, size_t *new_len);
+ void copy_rewrite_db(Rpl_filter *from);
I_List<i_string>* get_do_db();
I_List<i_string>* get_ignore_db();
@@ -139,7 +140,7 @@ private:
I_List<i_string_pair> rewrite_db;
};
-extern Rpl_filter *rpl_filter;
+extern Rpl_filter *global_rpl_filter;
extern Rpl_filter *binlog_filter;
#endif // RPL_FILTER_H
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
new file mode 100644
index 00000000000..4783fb763c8
--- /dev/null
+++ b/sql/rpl_gtid.cc
@@ -0,0 +1,1431 @@
+/* Copyright (c) 2013, Kristian Nielsen and MariaDB Services Ab.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+/* Definitions for MariaDB global transaction ID (GTID). */
+
+
+#include "sql_priv.h"
+#include "my_sys.h"
+#include "unireg.h"
+#include "my_global.h"
+#include "sql_base.h"
+#include "sql_parse.h"
+#include "key.h"
+#include "rpl_gtid.h"
+#include "rpl_rli.h"
+
+
+const LEX_STRING rpl_gtid_slave_state_table_name=
+ { C_STRING_WITH_LEN("gtid_slave_pos") };
+
+
+void
+rpl_slave_state::update_state_hash(uint64 sub_id, rpl_gtid *gtid)
+{
+ int err;
+ /*
+ Add the gtid to the HASH in the replication slave state.
+
+ We must do this only _after_ commit, so that for parallel replication,
+ there will not be an attempt to delete the corresponding table row before
+ it is even committed.
+ */
+ lock();
+ err= update(gtid->domain_id, gtid->server_id, sub_id, gtid->seq_no);
+ unlock();
+ if (err)
+ {
+ sql_print_warning("Slave: Out of memory during slave state maintenance. "
+ "Some no longer necessary rows in table "
+ "mysql.%s may be left undeleted.",
+ rpl_gtid_slave_state_table_name.str);
+ /*
+ Such failure is not fatal. We will fail to delete the row for this
+ GTID, but it will do no harm and will be removed automatically on next
+ server restart.
+ */
+ }
+}
+
+
+int
+rpl_slave_state::record_and_update_gtid(THD *thd, Relay_log_info *rli)
+{
+ uint64 sub_id;
+
+ /*
+ Update the GTID position, if we have it and did not already update
+ it in a GTID transaction.
+ */
+ if ((sub_id= rli->gtid_sub_id))
+ {
+ rli->gtid_sub_id= 0;
+ if (record_gtid(thd, &rli->current_gtid, sub_id, false, false))
+ return 1;
+ update_state_hash(sub_id, &rli->current_gtid);
+ }
+ return 0;
+}
+
+
+rpl_slave_state::rpl_slave_state()
+ : inited(false), loaded(false)
+{
+ my_hash_init(&hash, &my_charset_bin, 32, offsetof(element, domain_id),
+ sizeof(uint32), NULL, my_free, HASH_UNIQUE);
+}
+
+
+rpl_slave_state::~rpl_slave_state()
+{
+}
+
+
+void
+rpl_slave_state::init()
+{
+ DBUG_ASSERT(!inited);
+ mysql_mutex_init(key_LOCK_slave_state, &LOCK_slave_state, MY_MUTEX_INIT_SLOW);
+ inited= true;
+}
+
+
+void
+rpl_slave_state::truncate_hash()
+{
+ uint32 i;
+
+ for (i= 0; i < hash.records; ++i)
+ {
+ element *e= (element *)my_hash_element(&hash, i);
+ list_element *l= e->list;
+ list_element *next;
+ while (l)
+ {
+ next= l->next;
+ my_free(l);
+ l= next;
+ }
+ /* The element itself is freed by the hash element free function. */
+ }
+ my_hash_reset(&hash);
+}
+
+void
+rpl_slave_state::deinit()
+{
+ if (!inited)
+ return;
+ truncate_hash();
+ my_hash_free(&hash);
+ mysql_mutex_destroy(&LOCK_slave_state);
+}
+
+
+int
+rpl_slave_state::update(uint32 domain_id, uint32 server_id, uint64 sub_id,
+ uint64 seq_no)
+{
+ element *elem= NULL;
+ list_element *list_elem= NULL;
+
+ if (!(elem= get_element(domain_id)))
+ return 1;
+
+ if (!(list_elem= (list_element *)my_malloc(sizeof(*list_elem), MYF(MY_WME))))
+ return 1;
+ list_elem->server_id= server_id;
+ list_elem->sub_id= sub_id;
+ list_elem->seq_no= seq_no;
+
+ elem->add(list_elem);
+ return 0;
+}
+
+
+struct rpl_slave_state::element *
+rpl_slave_state::get_element(uint32 domain_id)
+{
+ struct element *elem;
+
+ elem= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0);
+ if (elem)
+ return elem;
+
+ if (!(elem= (element *)my_malloc(sizeof(*elem), MYF(MY_WME))))
+ return NULL;
+ elem->list= NULL;
+ elem->last_sub_id= 0;
+ elem->domain_id= domain_id;
+ if (my_hash_insert(&hash, (uchar *)elem))
+ {
+ my_free(elem);
+ return NULL;
+ }
+ return elem;
+}
+
+
+int
+rpl_slave_state::put_back_list(uint32 domain_id, list_element *list)
+{
+ element *e;
+ if (!(e= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0)))
+ return 1;
+ while (list)
+ {
+ list_element *next= list->next;
+ e->add(list);
+ list= next;
+ }
+ return 0;
+}
+
+
+int
+rpl_slave_state::truncate_state_table(THD *thd)
+{
+ TABLE_LIST tlist;
+ int err= 0;
+ TABLE *table;
+
+ tlist.init_one_table(STRING_WITH_LEN("mysql"),
+ rpl_gtid_slave_state_table_name.str,
+ rpl_gtid_slave_state_table_name.length,
+ NULL, TL_WRITE);
+ if (!(err= open_and_lock_tables(thd, &tlist, FALSE, 0)))
+ {
+ table= tlist.table;
+ table->no_replicate= 1;
+ err= table->file->ha_truncate();
+
+ if (err)
+ {
+ ha_rollback_trans(thd, FALSE);
+ close_thread_tables(thd);
+ ha_rollback_trans(thd, TRUE);
+ }
+ else
+ {
+ ha_commit_trans(thd, FALSE);
+ close_thread_tables(thd);
+ ha_commit_trans(thd, TRUE);
+ }
+ thd->mdl_context.release_transactional_locks();
+ }
+
+ return err;
+}
+
+
+static const TABLE_FIELD_TYPE mysql_rpl_slave_state_coltypes[4]= {
+ { { C_STRING_WITH_LEN("domain_id") },
+ { C_STRING_WITH_LEN("int(10) unsigned") },
+ {NULL, 0} },
+ { { C_STRING_WITH_LEN("sub_id") },
+ { C_STRING_WITH_LEN("bigint(20) unsigned") },
+ {NULL, 0} },
+ { { C_STRING_WITH_LEN("server_id") },
+ { C_STRING_WITH_LEN("int(10) unsigned") },
+ {NULL, 0} },
+ { { C_STRING_WITH_LEN("seq_no") },
+ { C_STRING_WITH_LEN("bigint(20) unsigned") },
+ {NULL, 0} },
+};
+
+static const uint mysql_rpl_slave_state_pk_parts[]= {0, 1};
+
+static const TABLE_FIELD_DEF mysql_gtid_slave_pos_tabledef= {
+ array_elements(mysql_rpl_slave_state_coltypes),
+ mysql_rpl_slave_state_coltypes,
+ array_elements(mysql_rpl_slave_state_pk_parts),
+ mysql_rpl_slave_state_pk_parts
+};
+
+class Gtid_db_intact : public Table_check_intact
+{
+protected:
+ void report_error(uint, const char *fmt, ...)
+ {
+ va_list args;
+ va_start(args, fmt);
+ error_log_print(ERROR_LEVEL, fmt, args);
+ va_end(args);
+ }
+};
+
+static Gtid_db_intact gtid_table_intact;
+
+/*
+ Check that the mysql.gtid_slave_pos table has the correct definition.
+*/
+int
+gtid_check_rpl_slave_state_table(TABLE *table)
+{
+ int err;
+
+ if ((err= gtid_table_intact.check(table, &mysql_gtid_slave_pos_tabledef)))
+ my_error(ER_GTID_OPEN_TABLE_FAILED, MYF(0), "mysql",
+ rpl_gtid_slave_state_table_name.str);
+ return err;
+}
+
+
+/*
+ Write a gtid to the replication slave state table.
+
+ Do it as part of the transaction, to get slave crash safety, or as a separate
+ transaction if !in_transaction (eg. MyISAM or DDL).
+
+ gtid The global transaction id for this event group.
+ sub_id Value allocated within the sub_id when the event group was
+ read (sub_id must be consistent with commit order in master binlog).
+
+ Note that caller must later ensure that the new gtid and sub_id is inserted
+ into the appropriate HASH element with rpl_slave_state.add(), so that it can
+ be deleted later. But this must only be done after COMMIT if in transaction.
+*/
+int
+rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
+ bool in_transaction, bool in_statement)
+{
+ TABLE_LIST tlist;
+ int err= 0;
+ bool table_opened= false;
+ TABLE *table;
+ list_element *elist= 0, *next;
+ element *elem;
+ ulonglong thd_saved_option= thd->variables.option_bits;
+ Query_tables_list lex_backup;
+
+ if (unlikely(!loaded))
+ {
+ /*
+ Probably the mysql.gtid_slave_pos table is missing (eg. upgrade) or
+ corrupt.
+
+ We already complained loudly about this, but we can try to continue
+ until the DBA fixes it.
+ */
+ return 0;
+ }
+
+ if (!in_statement)
+ mysql_reset_thd_for_next_command(thd, 0);
+
+ DBUG_EXECUTE_IF("gtid_inject_record_gtid",
+ {
+ my_error(ER_CANNOT_UPDATE_GTID_STATE, MYF(0));
+ return 1;
+ } );
+
+ thd->lex->reset_n_backup_query_tables_list(&lex_backup);
+ tlist.init_one_table(STRING_WITH_LEN("mysql"),
+ rpl_gtid_slave_state_table_name.str,
+ rpl_gtid_slave_state_table_name.length,
+ NULL, TL_WRITE);
+ if ((err= open_and_lock_tables(thd, &tlist, FALSE, 0)))
+ goto end;
+ table_opened= true;
+ table= tlist.table;
+
+ if ((err= gtid_check_rpl_slave_state_table(table)))
+ goto end;
+
+ table->no_replicate= 1;
+ if (!in_transaction)
+ thd->variables.option_bits&=
+ ~(ulonglong)(OPTION_NOT_AUTOCOMMIT|OPTION_BEGIN);
+
+ bitmap_set_all(table->write_set);
+
+ table->field[0]->store((ulonglong)gtid->domain_id, true);
+ table->field[1]->store(sub_id, true);
+ table->field[2]->store((ulonglong)gtid->server_id, true);
+ table->field[3]->store(gtid->seq_no, true);
+ DBUG_EXECUTE_IF("inject_crash_before_write_rpl_slave_state", DBUG_SUICIDE(););
+ if ((err= table->file->ha_write_row(table->record[0])))
+ {
+ table->file->print_error(err, MYF(0));
+ goto end;
+ }
+
+ if(opt_bin_log &&
+ (err= mysql_bin_log.bump_seq_no_counter_if_needed(gtid->domain_id,
+ gtid->seq_no)))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto end;
+ }
+
+ lock();
+ if ((elem= get_element(gtid->domain_id)) == NULL)
+ {
+ unlock();
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ err= 1;
+ goto end;
+ }
+ if ((elist= elem->grab_list()) != NULL)
+ {
+ /* Delete any old stuff, but keep around the most recent one. */
+ list_element *cur= elist;
+ uint64 best_sub_id= cur->sub_id;
+ list_element **best_ptr_ptr= &elist;
+ while ((next= cur->next))
+ {
+ if (next->sub_id > best_sub_id)
+ {
+ best_sub_id= next->sub_id;
+ best_ptr_ptr= &cur->next;
+ }
+ cur= next;
+ }
+ /*
+ Delete the highest sub_id element from the old list, and put it back as
+ the single-element new list.
+ */
+ cur= *best_ptr_ptr;
+ *best_ptr_ptr= cur->next;
+ cur->next= NULL;
+ elem->list= cur;
+ }
+ unlock();
+
+ if (!elist)
+ goto end;
+
+ /* Now delete any already committed rows. */
+ bitmap_set_bit(table->read_set, table->field[0]->field_index);
+ bitmap_set_bit(table->read_set, table->field[1]->field_index);
+
+ if ((err= table->file->ha_index_init(0, 0)))
+ {
+ table->file->print_error(err, MYF(0));
+ goto end;
+ }
+ while (elist)
+ {
+ uchar key_buffer[4+8];
+
+ DBUG_EXECUTE_IF("gtid_slave_pos_simulate_failed_delete",
+ { err= ENOENT;
+ table->file->print_error(err, MYF(0));
+ /* `break' does not work inside DBUG_EXECUTE_IF */
+ goto dbug_break; });
+
+ next= elist->next;
+
+ table->field[1]->store(elist->sub_id, true);
+ /* domain_id is already set in table->record[0] from write_row() above. */
+ key_copy(key_buffer, table->record[0], &table->key_info[0], 0, false);
+ if (table->file->ha_index_read_map(table->record[1], key_buffer,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
+ /* We cannot find the row, assume it is already deleted. */
+ ;
+ else if ((err= table->file->ha_delete_row(table->record[1])))
+ table->file->print_error(err, MYF(0));
+ /*
+ In case of error, we still discard the element from the list. We do
+ not want to endlessly error on the same element in case of table
+ corruption or such.
+ */
+ my_free(elist);
+ elist= next;
+ if (err)
+ break;
+ }
+IF_DBUG(dbug_break:, )
+ table->file->ha_index_end();
+
+end:
+
+ if (table_opened)
+ {
+ if (err)
+ {
+ /*
+ If error, we need to put any remaining elist back into the HASH so we
+ can do another delete attempt later.
+ */
+ if (elist)
+ {
+ lock();
+ put_back_list(gtid->domain_id, elist);
+ unlock();
+ }
+
+ ha_rollback_trans(thd, FALSE);
+ close_thread_tables(thd);
+ }
+ else
+ {
+ ha_commit_trans(thd, FALSE);
+ close_thread_tables(thd);
+ }
+ if (in_transaction)
+ thd->mdl_context.release_statement_locks();
+ else
+ thd->mdl_context.release_transactional_locks();
+ }
+ thd->lex->restore_backup_query_tables_list(&lex_backup);
+ thd->variables.option_bits= thd_saved_option;
+ return err;
+}
+
+
+uint64
+rpl_slave_state::next_sub_id(uint32 domain_id)
+{
+ uint64 sub_id= 0;
+ element *elem;
+
+ lock();
+ elem= get_element(domain_id);
+ if (elem)
+ sub_id= ++elem->last_sub_id;
+ unlock();
+
+ return sub_id;
+}
+
+
+bool
+rpl_slave_state_tostring_helper(String *dest, const rpl_gtid *gtid, bool *first)
+{
+ if (*first)
+ *first= false;
+ else
+ if (dest->append(",",1))
+ return true;
+ return
+ dest->append_ulonglong(gtid->domain_id) ||
+ dest->append("-",1) ||
+ dest->append_ulonglong(gtid->server_id) ||
+ dest->append("-",1) ||
+ dest->append_ulonglong(gtid->seq_no);
+}
+
+
+int
+rpl_slave_state::iterate(int (*cb)(rpl_gtid *, void *), void *data,
+ rpl_gtid *extra_gtids, uint32 num_extra)
+{
+ uint32 i;
+ HASH gtid_hash;
+ uchar *rec;
+ rpl_gtid *gtid;
+ int res= 1;
+
+ my_hash_init(&gtid_hash, &my_charset_bin, 32, offsetof(rpl_gtid, domain_id),
+ sizeof(uint32), NULL, NULL, HASH_UNIQUE);
+ for (i= 0; i < num_extra; ++i)
+ if (extra_gtids[i].server_id == global_system_variables.server_id &&
+ my_hash_insert(&gtid_hash, (uchar *)(&extra_gtids[i])))
+ goto err;
+
+ lock();
+
+ for (i= 0; i < hash.records; ++i)
+ {
+ uint64 best_sub_id;
+ rpl_gtid best_gtid;
+ element *e= (element *)my_hash_element(&hash, i);
+ list_element *l= e->list;
+
+ if (!l)
+ continue; /* Nothing here */
+
+ best_gtid.domain_id= e->domain_id;
+ best_gtid.server_id= l->server_id;
+ best_gtid.seq_no= l->seq_no;
+ best_sub_id= l->sub_id;
+ while ((l= l->next))
+ {
+ if (l->sub_id > best_sub_id)
+ {
+ best_sub_id= l->sub_id;
+ best_gtid.server_id= l->server_id;
+ best_gtid.seq_no= l->seq_no;
+ }
+ }
+
+ /* Check if we have something newer in the extra list. */
+ rec= my_hash_search(&gtid_hash, (const uchar *)&best_gtid.domain_id, 0);
+ if (rec)
+ {
+ gtid= (rpl_gtid *)rec;
+ if (gtid->seq_no > best_gtid.seq_no)
+ memcpy(&best_gtid, gtid, sizeof(best_gtid));
+ if (my_hash_delete(&gtid_hash, rec))
+ {
+ unlock();
+ goto err;
+ }
+ }
+
+ if ((res= (*cb)(&best_gtid, data)))
+ {
+ unlock();
+ goto err;
+ }
+ }
+
+ unlock();
+
+ /* Also add any remaining extra domain_ids. */
+ for (i= 0; i < gtid_hash.records; ++i)
+ {
+ gtid= (rpl_gtid *)my_hash_element(&gtid_hash, i);
+ if ((res= (*cb)(gtid, data)))
+ goto err;
+ }
+
+ res= 0;
+
+err:
+ my_hash_free(&gtid_hash);
+
+ return res;
+}
+
+
+struct rpl_slave_state_tostring_data {
+ String *dest;
+ bool first;
+};
+static int
+rpl_slave_state_tostring_cb(rpl_gtid *gtid, void *data)
+{
+ rpl_slave_state_tostring_data *p= (rpl_slave_state_tostring_data *)data;
+ return rpl_slave_state_tostring_helper(p->dest, gtid, &p->first);
+}
+
+
+/*
+ Prepare the current slave state as a string, suitable for sending to the
+ master to request to receive binlog events starting from that GTID state.
+
+ The state consists of the most recently applied GTID for each domain_id,
+ ie. the one with the highest sub_id within each domain_id.
+
+ Optinally, extra_gtids is a list of GTIDs from the binlog. This is used when
+ a server was previously a master and now needs to connect to a new master as
+ a slave. For each domain_id, if the GTID in the binlog was logged with our
+ own server_id _and_ has a higher seq_no than what is in the slave state,
+ then this should be used as the position to start replicating at. This
+ allows to promote a slave as new master, and connect the old master as a
+ slave with MASTER_GTID_POS=AUTO.
+*/
+int
+rpl_slave_state::tostring(String *dest, rpl_gtid *extra_gtids, uint32 num_extra)
+{
+ struct rpl_slave_state_tostring_data data;
+ data.first= true;
+ data.dest= dest;
+
+ return iterate(rpl_slave_state_tostring_cb, &data, extra_gtids, num_extra);
+}
+
+
+/*
+ Lookup a domain_id in the current replication slave state.
+
+ Returns false if the domain_id has no entries in the slave state.
+ Otherwise returns true, and fills in out_gtid with the corresponding
+ GTID.
+*/
+bool
+rpl_slave_state::domain_to_gtid(uint32 domain_id, rpl_gtid *out_gtid)
+{
+ element *elem;
+ list_element *list;
+ uint64 best_sub_id;
+
+ lock();
+ elem= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0);
+ if (!elem || !(list= elem->list))
+ {
+ unlock();
+ return false;
+ }
+
+ out_gtid->domain_id= domain_id;
+ out_gtid->server_id= list->server_id;
+ out_gtid->seq_no= list->seq_no;
+ best_sub_id= list->sub_id;
+
+ while ((list= list->next))
+ {
+ if (best_sub_id > list->sub_id)
+ continue;
+ best_sub_id= list->sub_id;
+ out_gtid->server_id= list->server_id;
+ out_gtid->seq_no= list->seq_no;
+ }
+
+ unlock();
+ return true;
+}
+
+
+/*
+ Parse a GTID at the start of a string, and update the pointer to point
+ at the first character after the parsed GTID.
+
+ Returns 0 on ok, non-zero on parse error.
+*/
+static int
+gtid_parser_helper(char **ptr, char *end, rpl_gtid *out_gtid)
+{
+ char *q;
+ char *p= *ptr;
+ uint64 v1, v2, v3;
+ int err= 0;
+
+ q= end;
+ v1= (uint64)my_strtoll10(p, &q, &err);
+ if (err != 0 || v1 > (uint32)0xffffffff || q == end || *q != '-')
+ return 1;
+ p= q+1;
+ q= end;
+ v2= (uint64)my_strtoll10(p, &q, &err);
+ if (err != 0 || v2 > (uint32)0xffffffff || q == end || *q != '-')
+ return 1;
+ p= q+1;
+ q= end;
+ v3= (uint64)my_strtoll10(p, &q, &err);
+ if (err != 0)
+ return 1;
+
+ out_gtid->domain_id= v1;
+ out_gtid->server_id= v2;
+ out_gtid->seq_no= v3;
+ *ptr= q;
+ return 0;
+}
+
+
+/*
+ Update the slave replication state with the GTID position obtained from
+ master when connecting with old-style (filename,offset) position.
+
+ If RESET is true then all existing entries are removed. Otherwise only
+ domain_ids mentioned in the STATE_FROM_MASTER are changed.
+
+ Returns 0 if ok, non-zero if error.
+*/
+int
+rpl_slave_state::load(THD *thd, char *state_from_master, size_t len,
+ bool reset, bool in_statement)
+{
+ char *end= state_from_master + len;
+
+ if (reset)
+ {
+ if (truncate_state_table(thd))
+ return 1;
+ truncate_hash();
+ }
+ if (state_from_master == end)
+ return 0;
+ for (;;)
+ {
+ rpl_gtid gtid;
+ uint64 sub_id;
+
+ if (gtid_parser_helper(&state_from_master, end, &gtid) ||
+ !(sub_id= next_sub_id(gtid.domain_id)) ||
+ record_gtid(thd, &gtid, sub_id, false, in_statement) ||
+ update(gtid.domain_id, gtid.server_id, sub_id, gtid.seq_no))
+ return 1;
+ if (state_from_master == end)
+ break;
+ if (*state_from_master != ',')
+ return 1;
+ ++state_from_master;
+ }
+ return 0;
+}
+
+
+bool
+rpl_slave_state::is_empty()
+{
+ uint32 i;
+ bool result= true;
+
+ lock();
+ for (i= 0; i < hash.records; ++i)
+ {
+ element *e= (element *)my_hash_element(&hash, i);
+ if (e->list)
+ {
+ result= false;
+ break;
+ }
+ }
+ unlock();
+
+ return result;
+}
+
+
+rpl_binlog_state::rpl_binlog_state()
+{
+ my_hash_init(&hash, &my_charset_bin, 32, offsetof(element, domain_id),
+ sizeof(uint32), NULL, my_free, HASH_UNIQUE);
+ mysql_mutex_init(key_LOCK_binlog_state, &LOCK_binlog_state,
+ MY_MUTEX_INIT_SLOW);
+ initialized= 1;
+}
+
+
+void
+rpl_binlog_state::reset()
+{
+ uint32 i;
+
+ for (i= 0; i < hash.records; ++i)
+ my_hash_free(&((element *)my_hash_element(&hash, i))->hash);
+ my_hash_reset(&hash);
+}
+
+void rpl_binlog_state::free()
+{
+ if (initialized)
+ {
+ initialized= 0;
+ reset();
+ my_hash_free(&hash);
+ mysql_mutex_destroy(&LOCK_binlog_state);
+ }
+}
+
+
+bool
+rpl_binlog_state::load(struct rpl_gtid *list, uint32 count)
+{
+ uint32 i;
+
+ reset();
+ for (i= 0; i < count; ++i)
+ {
+ if (update(&(list[i]), false))
+ return true;
+ }
+ return false;
+}
+
+
+rpl_binlog_state::~rpl_binlog_state()
+{
+ free();
+}
+
+
+/*
+ Update replication state with a new GTID.
+
+ If the (domain_id, server_id) pair already exists, then the new GTID replaces
+ the old one for that domain id. Else a new entry is inserted.
+
+ Returns 0 for ok, 1 for error.
+*/
+int
+rpl_binlog_state::update(const struct rpl_gtid *gtid, bool strict)
+{
+ element *elem;
+
+ if ((elem= (element *)my_hash_search(&hash,
+ (const uchar *)(&gtid->domain_id), 0)))
+ {
+ if (strict && elem->last_gtid && elem->last_gtid->seq_no >= gtid->seq_no)
+ {
+ my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), gtid->domain_id,
+ gtid->server_id, gtid->seq_no, elem->last_gtid->domain_id,
+ elem->last_gtid->server_id, elem->last_gtid->seq_no);
+ return 1;
+ }
+ if (elem->seq_no_counter < gtid->seq_no)
+ elem->seq_no_counter= gtid->seq_no;
+ if (!elem->update_element(gtid))
+ return 0;
+ }
+ else if (!alloc_element(gtid))
+ return 0;
+
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return 1;
+}
+
+
+/*
+ Fill in a new GTID, allocating next sequence number, and update state
+ accordingly.
+*/
+int
+rpl_binlog_state::update_with_next_gtid(uint32 domain_id, uint32 server_id,
+ rpl_gtid *gtid)
+{
+ element *elem;
+
+ gtid->domain_id= domain_id;
+ gtid->server_id= server_id;
+
+ if ((elem= (element *)my_hash_search(&hash, (const uchar *)(&domain_id), 0)))
+ {
+ gtid->seq_no= ++elem->seq_no_counter;
+ if (!elem->update_element(gtid))
+ return 0;
+ }
+ else
+ {
+ gtid->seq_no= 1;
+ if (!alloc_element(gtid))
+ return 0;
+ }
+
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return 1;
+}
+
+
+/* Helper functions for update. */
+int
+rpl_binlog_state::element::update_element(const rpl_gtid *gtid)
+{
+ rpl_gtid *lookup_gtid;
+
+ /*
+ By far the most common case is that successive events within same
+ replication domain have the same server id (it changes only when
+ switching to a new master). So save a hash lookup in this case.
+ */
+ if (likely(last_gtid && last_gtid->server_id == gtid->server_id))
+ {
+ last_gtid->seq_no= gtid->seq_no;
+ return 0;
+ }
+
+ lookup_gtid= (rpl_gtid *)
+ my_hash_search(&hash, (const uchar *)&gtid->server_id, 0);
+ if (lookup_gtid)
+ {
+ lookup_gtid->seq_no= gtid->seq_no;
+ last_gtid= lookup_gtid;
+ return 0;
+ }
+
+ /* Allocate a new GTID and insert it. */
+ lookup_gtid= (rpl_gtid *)my_malloc(sizeof(*lookup_gtid), MYF(MY_WME));
+ if (!lookup_gtid)
+ return 1;
+ memcpy(lookup_gtid, gtid, sizeof(*lookup_gtid));
+ if (my_hash_insert(&hash, (const uchar *)lookup_gtid))
+ {
+ my_free(lookup_gtid);
+ return 1;
+ }
+ last_gtid= lookup_gtid;
+ return 0;
+}
+
+
+int
+rpl_binlog_state::alloc_element(const rpl_gtid *gtid)
+{
+ element *elem;
+ rpl_gtid *lookup_gtid;
+
+ /* First time we see this domain_id; allocate a new element. */
+ elem= (element *)my_malloc(sizeof(*elem), MYF(MY_WME));
+ lookup_gtid= (rpl_gtid *)my_malloc(sizeof(*lookup_gtid), MYF(MY_WME));
+ if (elem && lookup_gtid)
+ {
+ elem->domain_id= gtid->domain_id;
+ my_hash_init(&elem->hash, &my_charset_bin, 32,
+ offsetof(rpl_gtid, server_id), sizeof(uint32), NULL, my_free,
+ HASH_UNIQUE);
+ elem->last_gtid= lookup_gtid;
+ elem->seq_no_counter= gtid->seq_no;
+ memcpy(lookup_gtid, gtid, sizeof(*lookup_gtid));
+ if (0 == my_hash_insert(&elem->hash, (const uchar *)lookup_gtid))
+ {
+ lookup_gtid= NULL; /* Do not free. */
+ if (0 == my_hash_insert(&hash, (const uchar *)elem))
+ return 0;
+ }
+ my_hash_free(&elem->hash);
+ }
+
+ /* An error. */
+ if (elem)
+ my_free(elem);
+ if (lookup_gtid)
+ my_free(lookup_gtid);
+ return 1;
+}
+
+
+/*
+ Check that a new GTID can be logged without creating an out-of-order
+ sequence number with existing GTIDs.
+*/
+bool
+rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id,
+ uint64 seq_no)
+{
+ element *elem;
+
+ if ((elem= (element *)my_hash_search(&hash,
+ (const uchar *)(&domain_id), 0)) &&
+ elem->last_gtid && elem->last_gtid->seq_no >= seq_no)
+ {
+ my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no,
+ elem->last_gtid->domain_id, elem->last_gtid->server_id,
+ elem->last_gtid->seq_no);
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+ When we see a new GTID that will not be binlogged (eg. slave thread
+ with --log-slave-updates=0), then we need to remember to allocate any
+ GTID seq_no of our own within that domain starting from there.
+
+ Returns 0 if ok, non-zero if out-of-memory.
+*/
+int
+rpl_binlog_state::bump_seq_no_if_needed(uint32 domain_id, uint64 seq_no)
+{
+ element *elem;
+
+ if ((elem= (element *)my_hash_search(&hash, (const uchar *)(&domain_id), 0)))
+ {
+ if (elem->seq_no_counter < seq_no)
+ elem->seq_no_counter= seq_no;
+ return 0;
+ }
+
+ /* We need to allocate a new, empty element to remember the next seq_no. */
+ if (!(elem= (element *)my_malloc(sizeof(*elem), MYF(MY_WME))))
+ return 1;
+
+ elem->domain_id= domain_id;
+ my_hash_init(&elem->hash, &my_charset_bin, 32,
+ offsetof(rpl_gtid, server_id), sizeof(uint32), NULL, my_free,
+ HASH_UNIQUE);
+ elem->last_gtid= NULL;
+ elem->seq_no_counter= seq_no;
+ if (0 == my_hash_insert(&hash, (const uchar *)elem))
+ return 0;
+
+ my_hash_free(&elem->hash);
+ my_free(elem);
+ return 1;
+}
+
+
+/*
+ Write binlog state to text file, so we can read it in again without having
+ to scan last binlog file (normal shutdown/startup, not crash recovery).
+
+ The most recent GTID within each domain_id is written after any other GTID
+ within this domain.
+*/
+int
+rpl_binlog_state::write_to_iocache(IO_CACHE *dest)
+{
+ ulong i, j;
+ char buf[21];
+
+ for (i= 0; i < hash.records; ++i)
+ {
+ size_t res;
+ element *e= (element *)my_hash_element(&hash, i);
+ if (!e->last_gtid)
+ {
+ DBUG_ASSERT(e->hash.records == 0);
+ continue;
+ }
+ for (j= 0; j <= e->hash.records; ++j)
+ {
+ const rpl_gtid *gtid;
+ if (j < e->hash.records)
+ {
+ gtid= (const rpl_gtid *)my_hash_element(&e->hash, j);
+ if (gtid == e->last_gtid)
+ continue;
+ }
+ else
+ gtid= e->last_gtid;
+
+ longlong10_to_str(gtid->seq_no, buf, 10);
+ res= my_b_printf(dest, "%u-%u-%s\n", gtid->domain_id, gtid->server_id, buf);
+ if (res == (size_t) -1)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+int
+rpl_binlog_state::read_from_iocache(IO_CACHE *src)
+{
+ /* 10-digit - 10-digit - 20-digit \n \0 */
+ char buf[10+1+10+1+20+1+1];
+ char *p, *end;
+ rpl_gtid gtid;
+
+ reset();
+ for (;;)
+ {
+ size_t res= my_b_gets(src, buf, sizeof(buf));
+ if (!res)
+ break;
+ p= buf;
+ end= buf + res;
+ if (gtid_parser_helper(&p, end, &gtid))
+ return 1;
+ if (update(&gtid, false))
+ return 1;
+ }
+ return 0;
+}
+
+
+rpl_gtid *
+rpl_binlog_state::find(uint32 domain_id, uint32 server_id)
+{
+ element *elem;
+ if (!(elem= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0)))
+ return NULL;
+ return (rpl_gtid *)my_hash_search(&elem->hash, (const uchar *)&server_id, 0);
+}
+
+rpl_gtid *
+rpl_binlog_state::find_most_recent(uint32 domain_id)
+{
+ element *elem;
+
+ elem= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0);
+ if (elem && elem->last_gtid)
+ return elem->last_gtid;
+ return NULL;
+}
+
+
+uint32
+rpl_binlog_state::count()
+{
+ uint32 c= 0;
+ uint32 i;
+
+ for (i= 0; i < hash.records; ++i)
+ c+= ((element *)my_hash_element(&hash, i))->hash.records;
+
+ return c;
+}
+
+
+int
+rpl_binlog_state::get_gtid_list(rpl_gtid *gtid_list, uint32 list_size)
+{
+ uint32 i, j, pos;
+
+ pos= 0;
+ for (i= 0; i < hash.records; ++i)
+ {
+ element *e= (element *)my_hash_element(&hash, i);
+ if (!e->last_gtid)
+ {
+ DBUG_ASSERT(e->hash.records==0);
+ continue;
+ }
+ for (j= 0; j <= e->hash.records; ++j)
+ {
+ const rpl_gtid *gtid;
+ if (j < e->hash.records)
+ {
+ gtid= (rpl_gtid *)my_hash_element(&e->hash, j);
+ if (gtid == e->last_gtid)
+ continue;
+ }
+ else
+ gtid= e->last_gtid;
+
+ if (pos >= list_size)
+ return 1;
+ memcpy(&gtid_list[pos++], gtid, sizeof(*gtid));
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ Get a list of the most recently binlogged GTID, for each domain_id.
+
+ This can be used when switching from being a master to being a slave,
+ to know where to start replicating from the new master.
+
+ The returned list must be de-allocated with my_free().
+
+ Returns 0 for ok, non-zero for out-of-memory.
+*/
+int
+rpl_binlog_state::get_most_recent_gtid_list(rpl_gtid **list, uint32 *size)
+{
+ uint32 i;
+ uint32 alloc_size, out_size;
+
+ alloc_size= hash.records;
+ if (!(*list= (rpl_gtid *)my_malloc(alloc_size * sizeof(rpl_gtid),
+ MYF(MY_WME))))
+ return 1;
+ out_size= 0;
+ for (i= 0; i < alloc_size; ++i)
+ {
+ element *e= (element *)my_hash_element(&hash, i);
+ if (!e->last_gtid)
+ continue;
+ memcpy(&((*list)[out_size++]), e->last_gtid, sizeof(rpl_gtid));
+ }
+
+ *size= out_size;
+ return 0;
+}
+
+
+bool
+rpl_binlog_state::append_pos(String *str)
+{
+ uint32 i;
+ bool first= true;
+
+ for (i= 0; i < hash.records; ++i)
+ {
+ element *e= (element *)my_hash_element(&hash, i);
+ if (e->last_gtid &&
+ rpl_slave_state_tostring_helper(str, e->last_gtid, &first))
+ return true;
+ }
+
+ return false;
+}
+
+
+slave_connection_state::slave_connection_state()
+{
+ my_hash_init(&hash, &my_charset_bin, 32,
+ offsetof(rpl_gtid, domain_id), sizeof(uint32), NULL, my_free,
+ HASH_UNIQUE);
+}
+
+
+slave_connection_state::~slave_connection_state()
+{
+ my_hash_free(&hash);
+}
+
+
+/*
+ Create a hash from the slave GTID state that is sent to master when slave
+ connects to start replication.
+
+ The state is sent as <GTID>,<GTID>,...,<GTID>, for example:
+
+ 0-2-112,1-4-1022
+
+ The state gives for each domain_id the GTID to start replication from for
+ the corresponding replication stream. So domain_id must be unique.
+
+ Returns 0 if ok, non-zero if error due to malformed input.
+
+ Note that input string is built by slave server, so it will not be incorrect
+ unless bug/corruption/malicious server. So we just need basic sanity check,
+ not fancy user-friendly error message.
+*/
+
+int
+slave_connection_state::load(char *slave_request, size_t len)
+{
+ char *p, *end;
+ uchar *rec;
+ rpl_gtid *gtid;
+ const rpl_gtid *gtid2;
+
+ reset();
+ p= slave_request;
+ end= slave_request + len;
+ if (p == end)
+ return 0;
+ for (;;)
+ {
+ if (!(rec= (uchar *)my_malloc(sizeof(*gtid), MYF(MY_WME))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(*gtid));
+ return 1;
+ }
+ gtid= (rpl_gtid *)rec;
+ if (gtid_parser_helper(&p, end, gtid))
+ {
+ my_free(rec);
+ my_error(ER_INCORRECT_GTID_STATE, MYF(0));
+ return 1;
+ }
+ if ((gtid2= (const rpl_gtid *)
+ my_hash_search(&hash, (const uchar *)(&gtid->domain_id), 0)))
+ {
+ my_error(ER_DUPLICATE_GTID_DOMAIN, MYF(0), gtid->domain_id,
+ gtid->server_id, (ulonglong)gtid->seq_no, gtid2->domain_id,
+ gtid2->server_id, (ulonglong)gtid2->seq_no, gtid->domain_id);
+ my_free(rec);
+ return 1;
+ }
+ if (my_hash_insert(&hash, rec))
+ {
+ my_free(rec);
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return 1;
+ }
+ if (p == end)
+ break; /* Finished. */
+ if (*p != ',')
+ {
+ my_error(ER_INCORRECT_GTID_STATE, MYF(0));
+ return 1;
+ }
+ ++p;
+ }
+
+ return 0;
+}
+
+
+int
+slave_connection_state::load(const rpl_gtid *gtid_list, uint32 count)
+{
+ uint32 i;
+
+ reset();
+ for (i= 0; i < count; ++i)
+ if (update(&gtid_list[i]))
+ return 1;
+ return 0;
+}
+
+
+static int
+slave_connection_state_load_cb(rpl_gtid *gtid, void *data)
+{
+ slave_connection_state *state= (slave_connection_state *)data;
+ return state->update(gtid);
+}
+
+
+/*
+ Same as rpl_slave_state::tostring(), but populates a slave_connection_state
+ instead.
+*/
+int
+slave_connection_state::load(rpl_slave_state *state,
+ rpl_gtid *extra_gtids, uint32 num_extra)
+{
+ reset();
+ return state->iterate(slave_connection_state_load_cb, this,
+ extra_gtids, num_extra);
+}
+
+
+rpl_gtid *
+slave_connection_state::find(uint32 domain_id)
+{
+ return (rpl_gtid *) my_hash_search(&hash, (const uchar *)(&domain_id), 0);
+}
+
+
+int
+slave_connection_state::update(const rpl_gtid *in_gtid)
+{
+ rpl_gtid *new_gtid;
+ uchar *rec= my_hash_search(&hash, (const uchar *)(&in_gtid->domain_id), 0);
+ if (rec)
+ {
+ memcpy(rec, in_gtid, sizeof(*in_gtid));
+ return 0;
+ }
+
+ if (!(new_gtid= (rpl_gtid *)my_malloc(sizeof(*new_gtid), MYF(MY_WME))))
+ return 1;
+ memcpy(new_gtid, in_gtid, sizeof(*new_gtid));
+ if (my_hash_insert(&hash, (uchar *)new_gtid))
+ {
+ my_free(new_gtid);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+void
+slave_connection_state::remove(const rpl_gtid *in_gtid)
+{
+ uchar *rec= my_hash_search(&hash, (const uchar *)(&in_gtid->domain_id), 0);
+#ifndef DBUG_OFF
+ bool err;
+ rpl_gtid *slave_gtid= (rpl_gtid *)rec;
+ DBUG_ASSERT(rec /* We should never try to remove not present domain_id. */);
+ DBUG_ASSERT(slave_gtid->server_id == in_gtid->server_id);
+ DBUG_ASSERT(slave_gtid->seq_no == in_gtid->seq_no);
+#endif
+
+ IF_DBUG(err=, )
+ my_hash_delete(&hash, rec);
+ DBUG_ASSERT(!err);
+}
+
+
+int
+slave_connection_state::to_string(String *out_str)
+{
+ out_str->length(0);
+ return append_to_string(out_str);
+}
+
+
+int
+slave_connection_state::append_to_string(String *out_str)
+{
+ uint32 i;
+ bool first;
+
+ first= true;
+ for (i= 0; i < hash.records; ++i)
+ {
+ const rpl_gtid *gtid= (const rpl_gtid *)my_hash_element(&hash, i);
+ if (rpl_slave_state_tostring_helper(out_str, gtid, &first))
+ return 1;
+ }
+ return 0;
+}
diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h
new file mode 100644
index 00000000000..1a94ee76eca
--- /dev/null
+++ b/sql/rpl_gtid.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2013, Kristian Nielsen and MariaDB Services Ab.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef RPL_GTID_H
+#define RPL_GTID_H
+
+/* Definitions for MariaDB global transaction ID (GTID). */
+
+
+extern const LEX_STRING rpl_gtid_slave_state_table_name;
+
+class String;
+
+struct rpl_gtid
+{
+ uint32 domain_id;
+ uint32 server_id;
+ uint64 seq_no;
+};
+
+
+enum enum_gtid_skip_type {
+ GTID_SKIP_NOT, GTID_SKIP_STANDALONE, GTID_SKIP_TRANSACTION
+};
+
+
+/*
+ Replication slave state.
+
+ For every independent replication stream (identified by domain_id), this
+ remembers the last gtid applied on the slave within this domain.
+
+ Since events are always committed in-order within a single domain, this is
+ sufficient to maintain the state of the replication slave.
+*/
+struct rpl_slave_state
+{
+ /* Elements in the list of GTIDs kept for each domain_id. */
+ struct list_element
+ {
+ struct list_element *next;
+ uint64 sub_id;
+ uint64 seq_no;
+ uint32 server_id;
+ };
+
+ /* Elements in the HASH that hold the state for one domain_id. */
+ struct element
+ {
+ struct list_element *list;
+ uint64 last_sub_id;
+ uint32 domain_id;
+
+ list_element *grab_list() { list_element *l= list; list= NULL; return l; }
+ void add(list_element *l)
+ {
+ l->next= list;
+ list= l;
+ if (last_sub_id < l->sub_id)
+ last_sub_id= l->sub_id;
+ }
+ };
+
+ /* Mapping from domain_id to its element. */
+ HASH hash;
+ /* Mutex protecting access to the state. */
+ mysql_mutex_t LOCK_slave_state;
+
+ bool inited;
+ bool loaded;
+
+ rpl_slave_state();
+ ~rpl_slave_state();
+
+ void init();
+ void deinit();
+ void truncate_hash();
+ ulong count() const { return hash.records; }
+ int update(uint32 domain_id, uint32 server_id, uint64 sub_id, uint64 seq_no);
+ int truncate_state_table(THD *thd);
+ int record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
+ bool in_transaction, bool in_statement);
+ uint64 next_sub_id(uint32 domain_id);
+ int iterate(int (*cb)(rpl_gtid *, void *), void *data,
+ rpl_gtid *extra_gtids, uint32 num_extra);
+ int tostring(String *dest, rpl_gtid *extra_gtids, uint32 num_extra);
+ bool domain_to_gtid(uint32 domain_id, rpl_gtid *out_gtid);
+ int load(THD *thd, char *state_from_master, size_t len, bool reset,
+ bool in_statement);
+ bool is_empty();
+
+ void lock() { DBUG_ASSERT(inited); mysql_mutex_lock(&LOCK_slave_state); }
+ void unlock() { DBUG_ASSERT(inited); mysql_mutex_unlock(&LOCK_slave_state); }
+
+ element *get_element(uint32 domain_id);
+ int put_back_list(uint32 domain_id, list_element *list);
+
+ void update_state_hash(uint64 sub_id, rpl_gtid *gtid);
+ int record_and_update_gtid(THD *thd, Relay_log_info *rli);
+};
+
+
+/*
+ Binlog state.
+ This keeps the last GTID written to the binlog for every distinct
+ (domain_id, server_id) pair.
+ This will be logged at the start of the next binlog file as a
+ Gtid_list_log_event; this way, it is easy to find the binlog file
+ containing a gigen GTID, by simply scanning backwards from the newest
+ one until a lower seq_no is found in the Gtid_list_log_event at the
+ start of a binlog for the given domain_id and server_id.
+
+ We also remember the last logged GTID for every domain_id. This is used
+ to know where to start when a master is changed to a slave. As a side
+ effect, it also allows to skip a hash lookup in the very common case of
+ logging a new GTID with same server id as last GTID.
+*/
+struct rpl_binlog_state
+{
+ struct element {
+ uint32 domain_id;
+ HASH hash; /* Containing all server_id for one domain_id */
+ /* The most recent entry in the hash. */
+ rpl_gtid *last_gtid;
+ /* Counter to allocate next seq_no for this domain. */
+ uint64 seq_no_counter;
+
+ int update_element(const rpl_gtid *gtid);
+ };
+ /* Mapping from domain_id to collection of elements. */
+ HASH hash;
+ /* Mutex protecting access to the state. */
+ mysql_mutex_t LOCK_binlog_state;
+ my_bool initialized;
+
+ rpl_binlog_state();
+ ~rpl_binlog_state();
+
+ void reset();
+ void free();
+ bool load(struct rpl_gtid *list, uint32 count);
+ int update(const struct rpl_gtid *gtid, bool strict);
+ int update_with_next_gtid(uint32 domain_id, uint32 server_id,
+ rpl_gtid *gtid);
+ int alloc_element(const rpl_gtid *gtid);
+ bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no);
+ int bump_seq_no_if_needed(uint32 domain_id, uint64 seq_no);
+ int write_to_iocache(IO_CACHE *dest);
+ int read_from_iocache(IO_CACHE *src);
+ uint32 count();
+ int get_gtid_list(rpl_gtid *gtid_list, uint32 list_size);
+ int get_most_recent_gtid_list(rpl_gtid **list, uint32 *size);
+ bool append_pos(String *str);
+ rpl_gtid *find(uint32 domain_id, uint32 server_id);
+ rpl_gtid *find_most_recent(uint32 domain_id);
+};
+
+
+/*
+ Represent the GTID state that a slave connection to a master requests
+ the master to start sending binlog events from.
+*/
+struct slave_connection_state
+{
+ /* Mapping from domain_id to the GTID requested for that domain. */
+ HASH hash;
+
+ slave_connection_state();
+ ~slave_connection_state();
+
+ void reset() { my_hash_reset(&hash); }
+ int load(char *slave_request, size_t len);
+ int load(const rpl_gtid *gtid_list, uint32 count);
+ int load(rpl_slave_state *state, rpl_gtid *extra_gtids, uint32 num_extra);
+ rpl_gtid *find(uint32 domain_id);
+ int update(const rpl_gtid *in_gtid);
+ void remove(const rpl_gtid *gtid);
+ ulong count() const { return hash.records; }
+ int to_string(String *out_str);
+ int append_to_string(String *out_str);
+};
+
+extern bool rpl_slave_state_tostring_helper(String *dest, const rpl_gtid *gtid,
+ bool *first);
+extern int gtid_check_rpl_slave_state_table(TABLE *table);
+
+#endif /* RPL_GTID_H */
diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc
index 258dae0edb2..2777dabf451 100644
--- a/sql/rpl_handler.cc
+++ b/sql/rpl_handler.cc
@@ -176,7 +176,7 @@ void delegates_destroy()
plugins add to thd->lex will be automatically unlocked.
*/
#define FOREACH_OBSERVER(r, f, thd, args) \
- param.server_id= thd->server_id; \
+ param.server_id= thd->variables.server_id; \
/*
Use a struct to make sure that they are allocated adjacent, check
delete_dynamic().
@@ -348,7 +348,7 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags,
ulong hlen;
Binlog_transmit_param param;
param.flags= flags;
- param.server_id= thd->server_id;
+ param.server_id= thd->variables.server_id;
int ret= 0;
read_lock();
@@ -555,4 +555,24 @@ int unregister_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void
{
return binlog_relay_io_delegate->remove_observer(observer, (st_plugin_int *)p);
}
+#else
+int register_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p)
+{
+ return 0;
+}
+
+int unregister_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p)
+{
+ return 0;
+}
+
+int register_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p)
+{
+ return 0;
+}
+
+int unregister_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p)
+{
+ return 0;
+}
#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc
index ec1a96e8a2b..a4b04d2e047 100644
--- a/sql/rpl_injector.cc
+++ b/sql/rpl_injector.cc
@@ -108,7 +108,7 @@ int injector::transaction::use_table(server_id_type sid, table tbl)
if ((error= check_state(TABLE_STATE)))
DBUG_RETURN(error);
- server_id_type save_id= m_thd->server_id;
+ server_id_type save_id= m_thd->variables.server_id;
m_thd->set_server_id(sid);
error= m_thd->binlog_write_table_map(tbl.get_table(),
tbl.is_transactional());
@@ -127,7 +127,7 @@ int injector::transaction::write_row (server_id_type sid, table tbl,
if (error)
DBUG_RETURN(error);
- server_id_type save_id= m_thd->server_id;
+ server_id_type save_id= m_thd->variables.server_id;
m_thd->set_server_id(sid);
error= m_thd->binlog_write_row(tbl.get_table(), tbl.is_transactional(),
cols, colcnt, record);
@@ -146,7 +146,7 @@ int injector::transaction::delete_row(server_id_type sid, table tbl,
if (error)
DBUG_RETURN(error);
- server_id_type save_id= m_thd->server_id;
+ server_id_type save_id= m_thd->variables.server_id;
m_thd->set_server_id(sid);
error= m_thd->binlog_delete_row(tbl.get_table(), tbl.is_transactional(),
cols, colcnt, record);
@@ -165,7 +165,7 @@ int injector::transaction::update_row(server_id_type sid, table tbl,
if (error)
DBUG_RETURN(error);
- server_id_type save_id= m_thd->server_id;
+ server_id_type save_id= m_thd->variables.server_id;
m_thd->set_server_id(sid);
error= m_thd->binlog_update_row(tbl.get_table(), tbl.is_transactional(),
cols, colcnt, before, after);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 7b363fbaf7d..fced238e334 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -37,7 +37,9 @@ Master_info::Master_info(LEX_STRING *connection_name_arg,
checksum_alg_before_fd(BINLOG_CHECKSUM_ALG_UNDEF),
connect_retry(DEFAULT_CONNECT_RETRY), inited(0), abort_slave(0),
slave_running(0), slave_run_id(0), sync_counter(0),
- heartbeat_period(0), received_heartbeats(0), master_id(0)
+ heartbeat_period(0), received_heartbeats(0), master_id(0),
+ using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0),
+ gtid_reconnect_event_skip_count(0), gtid_event_seen(false)
{
host[0] = 0; user[0] = 0; password[0] = 0;
ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0;
@@ -61,8 +63,17 @@ Master_info::Master_info(LEX_STRING *connection_name_arg,
connection_name.length+1);
my_casedn_str(system_charset_info, cmp_connection_name.str);
}
-
- my_init_dynamic_array(&ignore_server_ids, sizeof(::server_id), 16, 16, MYF(0));
+ /* When MySQL restarted, all Rpl_filter settings which aren't in the my.cnf
+ * will lose. So if you want a setting will not lose after restarting, you
+ * should add them into my.cnf
+ * */
+ rpl_filter= get_or_create_rpl_filter(connection_name.str,
+ connection_name.length);
+ copy_filter_setting(rpl_filter, global_rpl_filter);
+
+ my_init_dynamic_array(&ignore_server_ids,
+ sizeof(global_system_variables.server_id), 16, 16,
+ MYF(0));
bzero((char*) &file, sizeof(file));
mysql_mutex_init(key_master_info_run_lock, &run_lock, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_master_info_data_lock, &data_lock, MY_MUTEX_INIT_FAST);
@@ -77,6 +88,8 @@ Master_info::Master_info(LEX_STRING *connection_name_arg,
Master_info::~Master_info()
{
+ rpl_filters.delete_element(connection_name.str, connection_name.length,
+ (void (*)(const char*, uchar*)) free_rpl_filter);
my_free(connection_name.str);
delete_dynamic(&ignore_server_ids);
mysql_mutex_destroy(&run_lock);
@@ -136,12 +149,34 @@ void Master_info::clear_in_memory_info(bool all)
}
}
+
+const char *
+Master_info::using_gtid_astext(enum enum_using_gtid arg)
+{
+ switch (arg)
+ {
+ case USE_GTID_NO:
+ return "No";
+ case USE_GTID_SLAVE_POS:
+ return "Slave_Pos";
+ default:
+ DBUG_ASSERT(arg == USE_GTID_CURRENT_POS);
+ return "Current_Pos";
+ }
+}
+
+
void init_master_log_pos(Master_info* mi)
{
DBUG_ENTER("init_master_log_pos");
mi->master_log_name[0] = 0;
mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
+ mi->using_gtid= Master_info::USE_GTID_NO;
+ mi->gtid_current_pos.reset();
+ mi->events_queued_since_last_gtid= 0;
+ mi->gtid_reconnect_event_skip_count= 0;
+ mi->gtid_event_seen= false;
/* Intentionally init ssl_verify_server_cert to 0, no option available */
mi->ssl_verify_server_cert= 0;
@@ -187,8 +222,13 @@ enum {
/* line for ssl_crl */
LINE_FOR_SSL_CRLPATH= 22,
- /* Number of lines currently used when saving master info file */
- LINES_IN_MASTER_INFO= LINE_FOR_SSL_CRLPATH
+ /* MySQL 5.6 fixed-position lines. */
+ LINE_FOR_FIRST_MYSQL_5_6=23,
+ LINE_FOR_LAST_MYSQL_5_6=23,
+ /* Reserved lines for MySQL future versions. */
+ LINE_FOR_LAST_MYSQL_FUTURE=33,
+ /* Number of (fixed-position) lines used when saving master info file */
+ LINES_IN_MASTER_INFO= LINE_FOR_LAST_MYSQL_FUTURE
};
int init_master_info(Master_info* mi, const char* master_info_fname,
@@ -316,7 +356,7 @@ file '%s')", fname);
int ssl= 0, ssl_verify_server_cert= 0;
float master_heartbeat_period= 0.0;
char *first_non_digit;
- char dummy_buf[HOSTNAME_LENGTH+1];
+ char buf[HOSTNAME_LENGTH+1];
/*
Starting from 4.1.x master.info has new format. Now its
@@ -410,7 +450,7 @@ file '%s')", fname);
(this is just a reservation to avoid future upgrade problems)
*/
if (lines >= LINE_FOR_MASTER_BIND &&
- init_strvar_from_file(dummy_buf, sizeof(dummy_buf), &mi->file, ""))
+ init_strvar_from_file(buf, sizeof(buf), &mi->file, ""))
goto errwithmsg;
/*
Starting from 6.0 list of server_id of ignorable servers might be
@@ -425,12 +465,12 @@ file '%s')", fname);
/* reserved */
if (lines >= LINE_FOR_MASTER_UUID &&
- init_strvar_from_file(dummy_buf, sizeof(dummy_buf), &mi->file, ""))
+ init_strvar_from_file(buf, sizeof(buf), &mi->file, ""))
goto errwithmsg;
/* Starting from 5.5 the master_retry_count may be in the repository. */
if (lines >= LINE_FOR_MASTER_RETRY_COUNT &&
- init_strvar_from_file(dummy_buf, sizeof(dummy_buf), &mi->file, ""))
+ init_strvar_from_file(buf, sizeof(buf), &mi->file, ""))
goto errwithmsg;
if (lines >= LINE_FOR_SSL_CRLPATH &&
@@ -439,6 +479,42 @@ file '%s')", fname);
init_strvar_from_file(mi->ssl_crlpath, sizeof(mi->ssl_crlpath),
&mi->file, "")))
goto errwithmsg;
+
+ /*
+ Starting with MariaDB 10.0, we use a key=value syntax, which is nicer
+ in several ways. But we leave a bunch of empty lines to accomodate
+ any future old-style additions in MySQL (this will make it easier for
+ users moving from MariaDB to MySQL, to not have MySQL try to
+ interpret a MariaDB key=value line.)
+ */
+ if (lines >= LINE_FOR_LAST_MYSQL_FUTURE)
+ {
+ uint i;
+ /* Skip lines used by / reserved for MySQL >= 5.6. */
+ for (i= LINE_FOR_FIRST_MYSQL_5_6; i <= LINE_FOR_LAST_MYSQL_FUTURE; ++i)
+ {
+ if (init_strvar_from_file(buf, sizeof(buf), &mi->file, ""))
+ goto errwithmsg;
+ }
+
+ /*
+ Parse any extra key=value lines.
+ Ignore unknown lines, to facilitate downgrades.
+ */
+ while (!init_strvar_from_file(buf, sizeof(buf), &mi->file, 0))
+ {
+ if (0 == strncmp(buf, STRING_WITH_LEN("using_gtid=")))
+ {
+ int val= atoi(buf + sizeof("using_gtid"));
+ if (val == Master_info::USE_GTID_CURRENT_POS)
+ mi->using_gtid= Master_info::USE_GTID_CURRENT_POS;
+ else if (val == Master_info::USE_GTID_SLAVE_POS)
+ mi->using_gtid= Master_info::USE_GTID_SLAVE_POS;
+ else
+ mi->using_gtid= Master_info::USE_GTID_NO;
+ }
+ }
+ }
}
#ifndef HAVE_OPENSSL
@@ -544,7 +620,7 @@ int flush_master_info(Master_info* mi,
char* ignore_server_ids_buf;
{
ignore_server_ids_buf=
- (char *) my_malloc((sizeof(::server_id) * 3 + 1) *
+ (char *) my_malloc((sizeof(global_system_variables.server_id) * 3 + 1) *
(1 + mi->ignore_server_ids.elements), MYF(MY_WME));
if (!ignore_server_ids_buf)
DBUG_RETURN(1);
@@ -578,7 +654,9 @@ int flush_master_info(Master_info* mi,
sprintf(heartbeat_buf, "%.3f", mi->heartbeat_period);
my_b_seek(file, 0L);
my_b_printf(file,
- "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n",
+ "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n"
+ "\n\n\n\n\n\n\n\n\n\n\n"
+ "using_gtid=%d\n",
LINES_IN_MASTER_INFO,
mi->master_log_name, llstr(mi->master_log_pos, lbuf),
mi->host, mi->user,
@@ -587,7 +665,7 @@ int flush_master_info(Master_info* mi,
mi->ssl_cipher, mi->ssl_key, mi->ssl_verify_server_cert,
heartbeat_buf, "", ignore_server_ids_buf,
"", 0,
- mi->ssl_crl, mi->ssl_crlpath);
+ mi->ssl_crl, mi->ssl_crlpath, mi->using_gtid);
my_free(ignore_server_ids_buf);
err= flush_io_cache(file);
if (sync_masterinfo_period && !err &&
@@ -675,11 +753,12 @@ bool check_master_connection_name(LEX_STRING *name)
file names without a prefix.
*/
-void create_logfile_name_with_suffix(char *res_file_name, uint length,
- const char *info_file, bool append,
- LEX_STRING *suffix)
+void create_logfile_name_with_suffix(char *res_file_name, size_t length,
+ const char *info_file, bool append,
+ LEX_STRING *suffix)
{
- char buff[MAX_CONNECTION_NAME+1], res[MAX_CONNECTION_NAME+1], *p;
+ char buff[MAX_CONNECTION_NAME+1],
+ res[MAX_CONNECTION_NAME * MAX_FILENAME_MBWIDTH+1], *p;
p= strmake(res_file_name, info_file, length);
/* If not empty suffix and there is place left for some part of the suffix */
@@ -692,8 +771,6 @@ void create_logfile_name_with_suffix(char *res_file_name, uint length,
/* Create null terminated string */
from_length= strmake(buff, suffix->str, suffix->length) - buff;
- /* Convert to lower case */
- my_casedn_str(system_charset_info, buff);
/* Convert to characters usable in a file name */
res_length= strconvert(system_charset_info, buff, from_length,
&my_charset_filename, res, sizeof(res), &errors);
@@ -709,6 +786,65 @@ void create_logfile_name_with_suffix(char *res_file_name, uint length,
}
}
+void copy_filter_setting(Rpl_filter* dst_filter, Rpl_filter* src_filter)
+{
+ char buf[256];
+ String tmp(buf, sizeof(buf), &my_charset_bin);
+
+ dst_filter->get_do_db(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_do_db(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_do_db(tmp.ptr());
+ }
+
+ dst_filter->get_do_table(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_do_table(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_do_table(tmp.ptr());
+ }
+
+ dst_filter->get_ignore_db(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_ignore_db(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_ignore_db(tmp.ptr());
+ }
+
+ dst_filter->get_ignore_table(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_ignore_table(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_ignore_table(tmp.ptr());
+ }
+
+ dst_filter->get_wild_do_table(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_wild_do_table(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_wild_do_table(tmp.ptr());
+ }
+
+ dst_filter->get_wild_ignore_table(&tmp);
+ if (tmp.is_empty())
+ {
+ src_filter->get_wild_ignore_table(&tmp);
+ if (!tmp.is_empty())
+ dst_filter->set_wild_ignore_table(tmp.ptr());
+ }
+
+ if (dst_filter->rewrite_db_is_empty())
+ {
+ if (!src_filter->rewrite_db_is_empty())
+ dst_filter->copy_rewrite_db(src_filter);
+ }
+}
Master_info_index::Master_info_index()
{
@@ -750,7 +886,7 @@ bool Master_info_index::init_all_master_info()
{
int thread_mask;
int err_num= 0, succ_num= 0; // The number of success read Master_info
- char sign[MAX_CONNECTION_NAME];
+ char sign[MAX_CONNECTION_NAME+1];
File index_file_nr;
DBUG_ENTER("init_all_master_info");
@@ -802,11 +938,14 @@ bool Master_info_index::init_all_master_info()
lock_slave_threads(mi);
init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
- create_logfile_name_with_suffix(buf_master_info_file, sizeof(buf_master_info_file),
- master_info_file, 0, &connection_name);
+ create_logfile_name_with_suffix(buf_master_info_file,
+ sizeof(buf_master_info_file),
+ master_info_file, 0,
+ &mi->cmp_connection_name);
create_logfile_name_with_suffix(buf_relay_log_info_file,
- sizeof(buf_relay_log_info_file),
- relay_log_info_file, 0, &connection_name);
+ sizeof(buf_relay_log_info_file),
+ relay_log_info_file, 0,
+ &mi->cmp_connection_name);
if (global_system_variables.log_warnings > 1)
sql_print_information("Reading Master_info: '%s' Relay_info:'%s'",
buf_master_info_file, buf_relay_log_info_file);
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index 09c7fce5b14..991f6673c3a 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -21,6 +21,8 @@
#include "rpl_rli.h"
#include "rpl_reporting.h"
#include "my_sys.h"
+#include "rpl_filter.h"
+#include "keycaches.h"
typedef struct st_mysql MYSQL;
@@ -59,6 +61,10 @@ typedef struct st_mysql MYSQL;
class Master_info : public Slave_reporting_capability
{
public:
+ enum enum_using_gtid {
+ USE_GTID_NO= 0, USE_GTID_CURRENT_POS= 1, USE_GTID_SLAVE_POS= 2
+ };
+
Master_info(LEX_STRING *connection_name, bool is_slave_recovery);
~Master_info();
bool shall_ignore_server_id(ulong s_id);
@@ -68,11 +74,12 @@ class Master_info : public Slave_reporting_capability
/* If malloc() in initialization failed */
return connection_name.str == 0;
}
+ static const char *using_gtid_astext(enum enum_using_gtid arg);
/* the variables below are needed because we can change masters on the fly */
char master_log_name[FN_REFLEN+6]; /* Room for multi-*/
char host[HOSTNAME_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
- char user[USERNAME_LENGTH*+1];
+ char user[USERNAME_LENGTH+1];
char password[MAX_PASSWORD_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
LEX_STRING connection_name; /* User supplied connection name */
LEX_STRING cmp_connection_name; /* Connection name in lower case */
@@ -93,6 +100,7 @@ class Master_info : public Slave_reporting_capability
uint32 file_id; /* for 3.23 load data infile */
Relay_log_info rli;
uint port;
+ Rpl_filter* rpl_filter; /* Each replication can set its filter rule*/
/*
to hold checksum alg in use until IO thread has received FD.
Initialized to novalue, then set to the queried from master
@@ -127,6 +135,41 @@ class Master_info : public Slave_reporting_capability
ulonglong received_heartbeats; // counter of received heartbeat events
DYNAMIC_ARRAY ignore_server_ids;
ulong master_id;
+ /*
+ Which kind of GTID position (if any) is used when connecting to master.
+
+ Note that you can not change the numeric values of these, they are used
+ in master.info.
+ */
+ enum enum_using_gtid using_gtid;
+
+ /*
+ This GTID position records how far we have fetched into the relay logs.
+ This is used to continue fetching when the IO thread reconnects to the
+ master.
+
+ (Full slave stop/start does not use it, as it resets the relay logs).
+ */
+ slave_connection_state gtid_current_pos;
+ /*
+ If events_queued_since_last_gtid is non-zero, it is the number of events
+ queued so far in the relaylog of a GTID-prefixed event group.
+ It is zero when no partial event group has been queued at the moment.
+ */
+ uint64 events_queued_since_last_gtid;
+ /*
+ The GTID of the partially-queued event group, when
+ events_queued_since_last_gtid is non-zero.
+ */
+ rpl_gtid last_queued_gtid;
+ /*
+ When slave IO thread needs to reconnect, gtid_reconnect_event_skip_count
+ counts number of events to skip from the first GTID-prefixed event group,
+ to avoid duplicating events in the relay log.
+ */
+ uint64 gtid_reconnect_event_skip_count;
+ /* gtid_event_seen is false until we receive first GTID event from master. */
+ bool gtid_event_seen;
};
int init_master_info(Master_info* mi, const char* master_info_fname,
const char* slave_info_fname,
@@ -137,6 +180,7 @@ int flush_master_info(Master_info* mi,
bool flush_relay_log_cache,
bool need_lock_relay_log);
int change_master_server_id_cmp(ulong *id1, ulong *id2);
+void copy_filter_setting(Rpl_filter* dst_filter, Rpl_filter* src_filter);
/*
Multi master are handled trough this struct.
@@ -171,7 +215,7 @@ public:
};
bool check_master_connection_name(LEX_STRING *name);
-void create_logfile_name_with_suffix(char *res_file_name, uint length,
+void create_logfile_name_with_suffix(char *res_file_name, size_t length,
const char *info_file,
bool append,
LEX_STRING *suffix);
diff --git a/sql/rpl_reporting.cc b/sql/rpl_reporting.cc
index f442f3a37c0..96fe6242ac3 100644
--- a/sql/rpl_reporting.cc
+++ b/sql/rpl_reporting.cc
@@ -67,7 +67,7 @@ Slave_reporting_capability::report(loglevel level, int err_code,
va_end(args);
/* If the msg string ends with '.', do not add a ',' it would be ugly */
- report_function("Slave %s: %s%s Error_code: %d",
+ report_function("Slave %s: %s%s Internal MariaDB error code: %d",
m_thread_name, pbuff,
(pbuff[0] && *(strend(pbuff)-1) == '.') ? "" : ",",
err_code);
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index a11bdde9d19..3a6bb4c33dc 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -32,6 +32,13 @@
static int count_relay_log_space(Relay_log_info* rli);
+/**
+ Current replication state (hash of last GTID executed, per replication
+ domain).
+*/
+rpl_slave_state rpl_global_gtid_slave_state;
+
+
// Defined in slave.cc
int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
@@ -42,7 +49,8 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id),
info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period),
sync_counter(0), is_relay_log_recovery(is_slave_recovery),
- save_temporary_tables(0), cur_log_old_open_count(0), group_relay_log_pos(0),
+ save_temporary_tables(0), mi(0),
+ cur_log_old_open_count(0), group_relay_log_pos(0),
event_relay_log_pos(0),
#if HAVE_valgrind
is_fake(FALSE),
@@ -52,7 +60,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
abort_pos_wait(0), slave_run_id(0), sql_thd(0),
inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE),
until_log_pos(0), retried_trans(0), executed_entries(0),
- tables_to_lock(0), tables_to_lock_count(0),
+ gtid_sub_id(0), tables_to_lock(0), tables_to_lock_count(0),
last_event_start_time(0), deferred_events(NULL),m_flags(0),
row_stmt_start_timestamp(0), long_find_row_note_printed(false),
m_annotate_event(0)
@@ -206,17 +214,18 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
char buf_relay_logname[FN_REFLEN], buf_relaylog_index_name_buff[FN_REFLEN];
char *buf_relaylog_index_name= opt_relaylog_index_name;
- create_logfile_name_with_suffix(buf_relay_logname, sizeof(buf_relay_logname),
- ln, 1, &mi->connection_name);
+ create_logfile_name_with_suffix(buf_relay_logname,
+ sizeof(buf_relay_logname),
+ ln, 1, &mi->cmp_connection_name);
ln= buf_relay_logname;
if (opt_relaylog_index_name)
{
buf_relaylog_index_name= buf_relaylog_index_name_buff;
create_logfile_name_with_suffix(buf_relaylog_index_name_buff,
- sizeof(buf_relaylog_index_name_buff),
- opt_relaylog_index_name, 0,
- &mi->connection_name);
+ sizeof(buf_relaylog_index_name_buff),
+ opt_relaylog_index_name, 0,
+ &mi->cmp_connection_name);
}
/*
@@ -318,8 +327,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
msg="Error reading slave log configuration";
goto err;
}
- strmake(rli->event_relay_log_name,rli->group_relay_log_name,
- sizeof(rli->event_relay_log_name)-1);
+ strmake_buf(rli->event_relay_log_name,rli->group_relay_log_name);
rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
rli->group_master_log_pos= master_log_pos;
@@ -537,10 +545,8 @@ int init_relay_log_pos(Relay_log_info* rli,const char* log,
*errmsg="Could not find target log during relay log initialization";
goto err;
}
- strmake(rli->group_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
+ strmake_buf(rli->group_relay_log_name,rli->linfo.log_file_name);
+ strmake_buf(rli->event_relay_log_name,rli->linfo.log_file_name);
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
/*
@@ -880,8 +886,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
mysql_mutex_lock(&data_lock);
inc_event_relay_log_pos();
group_relay_log_pos= event_relay_log_pos;
- strmake(group_relay_log_name,event_relay_log_name,
- sizeof(group_relay_log_name)-1);
+ strmake_buf(group_relay_log_name,event_relay_log_name);
notify_group_relay_log_name_update();
@@ -1016,10 +1021,8 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
if (!just_reset)
{
/* Save name of used relay log file */
- strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->event_relay_log_name)-1);
+ strmake_buf(rli->group_relay_log_name, rli->relay_log.get_log_fname());
+ strmake_buf(rli->event_relay_log_name, rli->relay_log.get_log_fname());
rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
rli->log_space_total= 0;
@@ -1088,11 +1091,13 @@ bool Relay_log_info::is_until_satisfied(THD *thd, Log_event *ev)
ulonglong log_pos;
DBUG_ENTER("Relay_log_info::is_until_satisfied");
- DBUG_ASSERT(until_condition != UNTIL_NONE);
+ DBUG_ASSERT(until_condition == UNTIL_MASTER_POS ||
+ until_condition == UNTIL_RELAY_POS);
if (until_condition == UNTIL_MASTER_POS)
{
- if (ev && ev->server_id == (uint32) ::server_id && !replicate_same_server_id)
+ if (ev && ev->server_id == (uint32) global_system_variables.server_id &&
+ !replicate_same_server_id)
DBUG_RETURN(FALSE);
log_name= group_master_log_name;
log_pos= (!ev)? group_master_log_pos :
@@ -1190,7 +1195,7 @@ bool Relay_log_info::cached_charset_compare(char *charset) const
void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
- time_t event_creation_time)
+ time_t event_creation_time, THD *thd)
{
#ifndef DBUG_OFF
extern uint debug_not_change_ts_if_art_event;
@@ -1225,7 +1230,23 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
else
{
inc_group_relay_log_pos(event_master_log_pos);
+ if (rpl_global_gtid_slave_state.record_and_update_gtid(thd, this))
+ {
+ report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
+ "Failed to update GTID state in %s.%s, slave state may become "
+ "inconsistent: %d: %s",
+ "mysql", rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message());
+ /*
+ At this point we are not in a transaction (for example after DDL),
+ so we can not roll back. Anyway, normally updates to the slave
+ state table should not fail, and if they do, at least we made the
+ DBA aware of the problem in the error log.
+ */
+ }
+ DBUG_EXECUTE_IF("inject_crash_before_flush_rli", DBUG_SUICIDE(););
flush_relay_log_info(this);
+ DBUG_EXECUTE_IF("inject_crash_after_flush_rli", DBUG_SUICIDE(););
/*
Note that Rotate_log_event::do_apply_event() does not call this
function, so there is no chance that a fake rotate event resets
@@ -1357,4 +1378,181 @@ void Relay_log_info::slave_close_thread_tables(THD *thd)
clear_tables_to_lock();
DBUG_VOID_RETURN;
}
+
+
+int
+rpl_load_gtid_slave_state(THD *thd)
+{
+ TABLE_LIST tlist;
+ TABLE *table;
+ bool table_opened= false;
+ bool table_scanned= false;
+ bool array_inited= false;
+ struct local_element { uint64 sub_id; rpl_gtid gtid; };
+ struct local_element tmp_entry, *entry;
+ HASH hash;
+ DYNAMIC_ARRAY array;
+ int err= 0;
+ uint32 i;
+ DBUG_ENTER("rpl_load_gtid_slave_state");
+
+ rpl_global_gtid_slave_state.lock();
+ bool loaded= rpl_global_gtid_slave_state.loaded;
+ rpl_global_gtid_slave_state.unlock();
+ if (loaded)
+ DBUG_RETURN(0);
+
+ my_hash_init(&hash, &my_charset_bin, 32,
+ offsetof(local_element, gtid) + offsetof(rpl_gtid, domain_id),
+ sizeof(uint32), NULL, my_free, HASH_UNIQUE);
+ if ((err= my_init_dynamic_array(&array, sizeof(local_element), 0, 0, MYF(0))))
+ goto end;
+ array_inited= true;
+
+ mysql_reset_thd_for_next_command(thd, 0);
+
+ tlist.init_one_table(STRING_WITH_LEN("mysql"),
+ rpl_gtid_slave_state_table_name.str,
+ rpl_gtid_slave_state_table_name.length,
+ NULL, TL_READ);
+ if ((err= open_and_lock_tables(thd, &tlist, FALSE, 0)))
+ goto end;
+ table_opened= true;
+ table= tlist.table;
+
+ if ((err= gtid_check_rpl_slave_state_table(table)))
+ goto end;
+
+ bitmap_set_all(table->read_set);
+ if ((err= table->file->ha_rnd_init_with_error(1)))
+ {
+ table->file->print_error(err, MYF(0));
+ goto end;
+ }
+ table_scanned= true;
+ for (;;)
+ {
+ uint32 domain_id, server_id;
+ uint64 sub_id, seq_no;
+ uchar *rec;
+
+ if ((err= table->file->ha_rnd_next(table->record[0])))
+ {
+ if (err == HA_ERR_RECORD_DELETED)
+ continue;
+ else if (err == HA_ERR_END_OF_FILE)
+ break;
+ else
+ {
+ table->file->print_error(err, MYF(0));
+ goto end;
+ }
+ }
+ domain_id= (ulonglong)table->field[0]->val_int();
+ sub_id= (ulonglong)table->field[1]->val_int();
+ server_id= (ulonglong)table->field[2]->val_int();
+ seq_no= (ulonglong)table->field[3]->val_int();
+ DBUG_PRINT("info", ("Read slave state row: %u-%u-%lu sub_id=%lu\n",
+ (unsigned)domain_id, (unsigned)server_id,
+ (ulong)seq_no, (ulong)sub_id));
+
+ tmp_entry.sub_id= sub_id;
+ tmp_entry.gtid.domain_id= domain_id;
+ tmp_entry.gtid.server_id= server_id;
+ tmp_entry.gtid.seq_no= seq_no;
+ if ((err= insert_dynamic(&array, (uchar *)&tmp_entry)))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto end;
+ }
+
+ if ((rec= my_hash_search(&hash, (const uchar *)&domain_id, 0)))
+ {
+ entry= (struct local_element *)rec;
+ if (entry->sub_id >= sub_id)
+ continue;
+ entry->sub_id= sub_id;
+ DBUG_ASSERT(entry->gtid.domain_id == domain_id);
+ entry->gtid.server_id= server_id;
+ entry->gtid.seq_no= seq_no;
+ }
+ else
+ {
+ if (!(entry= (struct local_element *)my_malloc(sizeof(*entry),
+ MYF(MY_WME))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*entry));
+ err= 1;
+ goto end;
+ }
+ entry->sub_id= sub_id;
+ entry->gtid.domain_id= domain_id;
+ entry->gtid.server_id= server_id;
+ entry->gtid.seq_no= seq_no;
+ if ((err= my_hash_insert(&hash, (uchar *)entry)))
+ {
+ my_free(entry);
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto end;
+ }
+ }
+ }
+
+ rpl_global_gtid_slave_state.lock();
+ if (rpl_global_gtid_slave_state.loaded)
+ {
+ rpl_global_gtid_slave_state.unlock();
+ goto end;
+ }
+
+ for (i= 0; i < array.elements; ++i)
+ {
+ get_dynamic(&array, (uchar *)&tmp_entry, i);
+ if ((err= rpl_global_gtid_slave_state.update(tmp_entry.gtid.domain_id,
+ tmp_entry.gtid.server_id,
+ tmp_entry.sub_id,
+ tmp_entry.gtid.seq_no)))
+ {
+ rpl_global_gtid_slave_state.unlock();
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto end;
+ }
+ }
+
+ for (i= 0; i < hash.records; ++i)
+ {
+ entry= (struct local_element *)my_hash_element(&hash, i);
+ if (opt_bin_log &&
+ mysql_bin_log.bump_seq_no_counter_if_needed(entry->gtid.domain_id,
+ entry->gtid.seq_no))
+ {
+ rpl_global_gtid_slave_state.unlock();
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto end;
+ }
+ }
+
+ rpl_global_gtid_slave_state.loaded= true;
+ rpl_global_gtid_slave_state.unlock();
+
+ err= 0; /* Clear HA_ERR_END_OF_FILE */
+
+end:
+ if (table_scanned)
+ {
+ table->file->ha_index_or_rnd_end();
+ ha_commit_trans(thd, FALSE);
+ ha_commit_trans(thd, TRUE);
+ }
+ if (table_opened)
+ {
+ close_thread_tables(thd);
+ thd->mdl_context.release_transactional_locks();
+ }
+ if (array_inited)
+ delete_dynamic(&array);
+ my_hash_free(&hash);
+ DBUG_RETURN(err);
+}
+
#endif
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 6144d37026b..6dd757343fd 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -263,7 +263,9 @@ public:
thread is running).
*/
- enum {UNTIL_NONE= 0, UNTIL_MASTER_POS, UNTIL_RELAY_POS} until_condition;
+ enum {
+ UNTIL_NONE= 0, UNTIL_MASTER_POS, UNTIL_RELAY_POS, UNTIL_GTID
+ } until_condition;
char until_log_name[FN_REFLEN];
ulonglong until_log_pos;
/* extension extracted from log_name and converted to int */
@@ -277,6 +279,8 @@ public:
UNTIL_LOG_NAMES_CMP_UNKNOWN= -2, UNTIL_LOG_NAMES_CMP_LESS= -1,
UNTIL_LOG_NAMES_CMP_EQUAL= 0, UNTIL_LOG_NAMES_CMP_GREATER= 1
} until_log_names_cmp_result;
+ /* Condition for UNTIL master_gtid_pos. */
+ slave_connection_state until_gtid_pos;
char cached_charset[6];
/*
@@ -307,6 +311,14 @@ public:
char slave_patternload_file[FN_REFLEN];
size_t slave_patternload_file_size;
+ /*
+ Current GTID being processed.
+ The sub_id gives the binlog order within one domain_id. A zero sub_id
+ means that there is no active GTID.
+ */
+ uint64 gtid_sub_id;
+ rpl_gtid current_gtid;
+
Relay_log_info(bool is_slave_recovery);
~Relay_log_info();
@@ -346,6 +358,8 @@ public:
bool is_until_satisfied(THD *thd, Log_event *ev);
inline ulonglong until_pos()
{
+ DBUG_ASSERT(until_condition == UNTIL_MASTER_POS ||
+ until_condition == UNTIL_RELAY_POS);
return ((until_condition == UNTIL_MASTER_POS) ? group_master_log_pos :
group_relay_log_pos);
}
@@ -445,7 +459,7 @@ public:
the <code>Seconds_behind_master</code> field.
*/
void stmt_done(my_off_t event_log_pos,
- time_t event_creation_time);
+ time_t event_creation_time, THD *thd);
/**
@@ -584,4 +598,8 @@ private:
int init_relay_log_info(Relay_log_info* rli, const char* info_fname);
+extern struct rpl_slave_state rpl_global_gtid_slave_state;
+
+int rpl_load_gtid_slave_state(THD *thd);
+
#endif /* RPL_RLI_H */
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index 33e04e488cb..db47c3c164a 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2006, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2011, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -108,12 +109,15 @@ max_display_length_for_field(enum_field_types sql_type, unsigned int metadata)
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_TIME2:
return 3;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
return 4;
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
return 8;
case MYSQL_TYPE_BIT:
@@ -261,12 +265,21 @@ uint32 table_def::calc_field_size(uint col, uchar *master_data) const
case MYSQL_TYPE_TIME:
length= 3;
break;
+ case MYSQL_TYPE_TIME2:
+ length= my_time_binary_length(m_field_metadata[col]);
+ break;
case MYSQL_TYPE_TIMESTAMP:
length= 4;
break;
+ case MYSQL_TYPE_TIMESTAMP2:
+ length= my_timestamp_binary_length(m_field_metadata[col]);
+ break;
case MYSQL_TYPE_DATETIME:
length= 8;
break;
+ case MYSQL_TYPE_DATETIME2:
+ length= my_datetime_binary_length(m_field_metadata[col]);
+ break;
case MYSQL_TYPE_BIT:
{
/*
@@ -375,6 +388,7 @@ void show_sql_type(enum_field_types type, uint16 metadata, String *str, CHARSET_
break;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
str->set_ascii(STRING_WITH_LEN("timestamp"));
break;
@@ -392,10 +406,12 @@ void show_sql_type(enum_field_types type, uint16 metadata, String *str, CHARSET_
break;
case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_TIME2:
str->set_ascii(STRING_WITH_LEN("time"));
break;
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
str->set_ascii(STRING_WITH_LEN("datetime"));
break;
@@ -521,9 +537,9 @@ bool is_conversion_ok(int order, Relay_log_info *rli)
bool allow_non_lossy, allow_lossy;
allow_non_lossy = slave_type_conversions_options &
- (ULL(1) << SLAVE_TYPE_CONVERSIONS_ALL_NON_LOSSY);
+ (1ULL << SLAVE_TYPE_CONVERSIONS_ALL_NON_LOSSY);
allow_lossy= slave_type_conversions_options &
- (ULL(1) << SLAVE_TYPE_CONVERSIONS_ALL_LOSSY);
+ (1ULL << SLAVE_TYPE_CONVERSIONS_ALL_LOSSY);
DBUG_PRINT("enter", ("order: %d, flags:%s%s", order,
allow_non_lossy ? " ALL_NON_LOSSY" : "",
@@ -614,6 +630,23 @@ can_convert_field_to(Field *field,
else
DBUG_RETURN(false);
}
+ else if (metadata == 0 &&
+ ((field->real_type() == MYSQL_TYPE_TIMESTAMP2 &&
+ source_type == MYSQL_TYPE_TIMESTAMP) ||
+ (field->real_type() == MYSQL_TYPE_TIME2 &&
+ source_type == MYSQL_TYPE_TIME) ||
+ (field->real_type() == MYSQL_TYPE_DATETIME2 &&
+ source_type == MYSQL_TYPE_DATETIME)))
+ {
+ /*
+ TS-TODO: conversion from FSP1>FSP2.
+ Can do non-lossy conversion
+ from old TIME, TIMESTAMP, DATETIME
+ to MySQL56 TIME(0), TIMESTAMP(0), DATETIME(0).
+ */
+ *order_var= -1;
+ DBUG_RETURN(true);
+ }
else if (!slave_type_conversions_options)
DBUG_RETURN(false);
@@ -738,6 +771,9 @@ can_convert_field_to(Field *field,
case MYSQL_TYPE_NULL:
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_TIMESTAMP2:
+ case MYSQL_TYPE_DATETIME2:
+ case MYSQL_TYPE_TIME2:
DBUG_RETURN(false);
}
DBUG_RETURN(false); // To keep GCC happy
@@ -878,8 +914,13 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *
DBUG_ENTER("table_def::create_conversion_table");
List<Create_field> field_list;
-
- for (uint col= 0 ; col < size() ; ++col)
+ /*
+ At slave, columns may differ. So we should create
+ MY_MIN(columns@master, columns@slave) columns in the
+ conversion table.
+ */
+ uint const cols_to_create= MY_MIN(target_table->s->fields, size());
+ for (uint col= 0 ; col < cols_to_create; ++col)
{
Create_field *field_def=
(Create_field*) alloc_root(thd->mem_root, sizeof(Create_field));
@@ -933,7 +974,7 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *
DBUG_PRINT("debug", ("sql_type: %d, target_field: '%s', max_length: %d, decimals: %d,"
" maybe_null: %d, unsigned_flag: %d, pack_length: %u",
- type(col), target_table->field[col]->field_name,
+ binlog_type(col), target_table->field[col]->field_name,
max_length, decimals, TRUE, FALSE, pack_length));
field_def->init_for_tmp_table(type(col),
max_length,
@@ -987,7 +1028,7 @@ table_def::table_def(unsigned char *types, ulong size,
int index= 0;
for (unsigned int i= 0; i < m_size; i++)
{
- switch (m_type[i]) {
+ switch (binlog_type(i)) {
case MYSQL_TYPE_TINY_BLOB:
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_MEDIUM_BLOB:
@@ -1036,6 +1077,11 @@ table_def::table_def(unsigned char *types, ulong size,
m_field_metadata[i]= x;
break;
}
+ case MYSQL_TYPE_TIME2:
+ case MYSQL_TYPE_DATETIME2:
+ case MYSQL_TYPE_TIMESTAMP2:
+ m_field_metadata[i]= field_metadata[index++];
+ break;
default:
m_field_metadata[i]= 0;
break;
@@ -1167,6 +1213,7 @@ void Deferred_log_events::rewind()
Log_event *ev= *(Log_event **) dynamic_array_ptr(&array, i);
delete ev;
}
+ last_added= NULL;
if (array.elements > array.max_element)
freeze_size(&array);
reset_dynamic(&array);
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index 19a5f621f69..9ac17f68a1f 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -65,6 +65,14 @@ public:
ulong size() const { return m_size; }
+ /**
+ Returns internal binlog type code for one field,
+ without translation to real types.
+ */
+ enum_field_types binlog_type(ulong index) const
+ {
+ return static_cast<enum_field_types>(m_type[index]);
+ }
/*
Return a representation of the type data for one field.
@@ -82,7 +90,7 @@ public:
either MYSQL_TYPE_STRING, MYSQL_TYPE_ENUM, or MYSQL_TYPE_SET, so
we might need to modify the type to get the real type.
*/
- enum_field_types source_type= static_cast<enum_field_types>(m_type[index]);
+ enum_field_types source_type= binlog_type(index);
uint16 source_metadata= m_field_metadata[index];
switch (source_type)
{
diff --git a/sql/scheduler.cc b/sql/scheduler.cc
index 54653557b16..71789b0303b 100644
--- a/sql/scheduler.cc
+++ b/sql/scheduler.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2012, Monty Program Ab
+ Copyright (c) 2012, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
#include "mysqld.h"
#include "sql_class.h"
#include "sql_callback.h"
+#include <violite.h>
/*
End connection, in case when we are using 'no-threads'
@@ -61,6 +62,15 @@ static void scheduler_wait_sync_begin(void) {
static void scheduler_wait_sync_end(void) {
thd_wait_end(NULL);
}
+
+static void scheduler_wait_net_begin(void) {
+ thd_wait_begin(NULL, THD_WAIT_NET);
+}
+
+static void scheduler_wait_net_end(void) {
+ thd_wait_end(NULL);
+}
+
};
/**@}*/
@@ -76,6 +86,9 @@ void scheduler_init() {
scheduler_wait_lock_end);
thr_set_sync_wait_callback(scheduler_wait_sync_begin,
scheduler_wait_sync_end);
+
+ vio_set_wait_callback(scheduler_wait_net_begin,
+ scheduler_wait_net_end);
}
@@ -139,52 +152,3 @@ void one_thread_scheduler(scheduler_functions *func)
func->end_thread= no_threads_end;
}
-
-
-/*
- no pluggable schedulers in mariadb.
- when we'll want it, we'll do it properly
-*/
-#if 0
-
-static scheduler_functions *saved_thread_scheduler;
-static uint saved_thread_handling;
-
-extern "C"
-int my_thread_scheduler_set(scheduler_functions *scheduler)
-{
- DBUG_ASSERT(scheduler != 0);
-
- if (scheduler == NULL)
- return 1;
-
- saved_thread_scheduler= thread_scheduler;
- saved_thread_handling= thread_handling;
- thread_scheduler= scheduler;
- // Scheduler loaded dynamically
- thread_handling= SCHEDULER_TYPES_COUNT;
- return 0;
-}
-
-
-extern "C"
-int my_thread_scheduler_reset()
-{
- DBUG_ASSERT(saved_thread_scheduler != NULL);
-
- if (saved_thread_scheduler == NULL)
- return 1;
-
- thread_scheduler= saved_thread_scheduler;
- thread_handling= saved_thread_handling;
- saved_thread_scheduler= 0;
- return 0;
-}
-#else
-extern "C" int my_thread_scheduler_set(scheduler_functions *scheduler)
-{ return 1; }
-
-extern "C" int my_thread_scheduler_reset()
-{ return 1; }
-#endif
-
diff --git a/sql/set_var.cc b/sql/set_var.cc
index c808e2dc11c..db74d8f0d9d 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2002, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008, 2012, Monty Program Ab
+/* Copyright (c) 2002, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,7 +16,7 @@
/* variable declarations are in sys_vars.cc now !!! */
-#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "sql_plugin.h"
#include "sql_class.h" // set_var.h: session_var_ptr
#include "set_var.h"
#include "sql_priv.h"
@@ -251,19 +251,124 @@ uchar *sys_var::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
return session_value_ptr(thd, base);
}
-bool sys_var::set_default(THD *thd, enum_var_type type)
+bool sys_var::set_default(THD *thd, set_var* var)
{
- LEX_STRING empty={0,0};
- set_var var(type, this, &empty, 0);
-
- if (type == OPT_GLOBAL || scope() == GLOBAL)
- global_save_default(thd, &var);
+ if (var->type == OPT_GLOBAL || scope() == GLOBAL)
+ global_save_default(thd, var);
else
- session_save_default(thd, &var);
+ session_save_default(thd, var);
+
+ return check(thd, var) || update(thd, var);
+}
+
+
+#define do_num_val(T,CMD) \
+do { \
+ mysql_mutex_lock(&LOCK_global_system_variables); \
+ T val= *(T*) value_ptr(thd, type, base); \
+ mysql_mutex_unlock(&LOCK_global_system_variables); \
+ CMD; \
+} while (0)
+
+#define case_for_integers(CMD) \
+ case SHOW_SINT: do_num_val (int,CMD); \
+ case SHOW_SLONG: do_num_val (long,CMD); \
+ case SHOW_SLONGLONG:do_num_val (longlong,CMD); \
+ case SHOW_UINT: do_num_val (uint,CMD); \
+ case SHOW_ULONG: do_num_val (ulong,CMD); \
+ case SHOW_ULONGLONG:do_num_val (ulonglong,CMD); \
+ case SHOW_HA_ROWS: do_num_val (ha_rows,CMD); \
+ case SHOW_BOOL: do_num_val (bool,CMD); \
+ case SHOW_MY_BOOL: do_num_val (my_bool,CMD)
+
+#define case_for_double(CMD) \
+ case SHOW_DOUBLE: do_num_val (double,CMD)
+
+#define case_get_string_as_lex_string \
+ case SHOW_CHAR: \
+ mysql_mutex_lock(&LOCK_global_system_variables); \
+ sval.str= (char*) value_ptr(thd, type, base); \
+ sval.length= sval.str ? strlen(sval.str) : 0; \
+ break; \
+ case SHOW_CHAR_PTR: \
+ mysql_mutex_lock(&LOCK_global_system_variables); \
+ sval.str= *(char**) value_ptr(thd, type, base); \
+ sval.length= sval.str ? strlen(sval.str) : 0; \
+ break; \
+ case SHOW_LEX_STRING: \
+ mysql_mutex_lock(&LOCK_global_system_variables); \
+ sval= *(LEX_STRING *) value_ptr(thd, type, base); \
+ break
+
+longlong sys_var::val_int(bool *is_null,
+ THD *thd, enum_var_type type, LEX_STRING *base)
+{
+ LEX_STRING sval;
+ *is_null= false;
+ switch (show_type())
+ {
+ case_get_string_as_lex_string;
+ case_for_integers(return val);
+ case_for_double(return (longlong) val);
+ default:
+ my_error(ER_VAR_CANT_BE_READ, MYF(0), name.str);
+ return 0;
+ }
- return check(thd, &var) || update(thd, &var);
+ longlong ret= 0;
+ if (!(*is_null= !sval.str))
+ ret= longlong_from_string_with_check(system_charset_info,
+ sval.str, sval.str + sval.length);
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ return ret;
}
+
+String *sys_var::val_str(String *str,
+ THD *thd, enum_var_type type, LEX_STRING *base)
+{
+ LEX_STRING sval;
+ switch (show_type())
+ {
+ case_get_string_as_lex_string;
+ case_for_integers(return str->set((ulonglong)val, system_charset_info) ? 0 : str);
+ case_for_double(return str->set_real(val, 6, system_charset_info) ? 0 : str);
+ default:
+ my_error(ER_VAR_CANT_BE_READ, MYF(0), name.str);
+ return 0;
+ }
+
+ if (!sval.str || str->copy(sval.str, sval.length, system_charset_info))
+ str= NULL;
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ return str;
+}
+
+
+double sys_var::val_real(bool *is_null,
+ THD *thd, enum_var_type type, LEX_STRING *base)
+{
+ LEX_STRING sval;
+ *is_null= false;
+ switch (show_type())
+ {
+ case_get_string_as_lex_string;
+ case_for_integers(return val);
+ case_for_double(return val);
+ default:
+ my_error(ER_VAR_CANT_BE_READ, MYF(0), name.str);
+ return 0;
+ }
+
+ double ret= 0;
+ if (!(*is_null= !sval.str))
+ ret= double_from_string_with_check(system_charset_info,
+ sval.str, sval.str + sval.length);
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ return ret;
+}
+
+
void sys_var::do_deprecated_warning(THD *thd)
{
if (deprecation_substitute != NULL)
@@ -664,7 +769,7 @@ int set_var::light_check(THD *thd)
*/
int set_var::update(THD *thd)
{
- return value ? var->update(thd, this) : var->set_default(thd, type);
+ return value ? var->update(thd, this) : var->set_default(thd, this);
}
diff --git a/sql/set_var.h b/sql/set_var.h
index f912c9fffad..f248dc2894f 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -1,6 +1,6 @@
#ifndef SET_VAR_INCLUDED
#define SET_VAR_INCLUDED
-/* Copyright (c) 2002, 2011, Oracle and/or its affiliates.
+/* Copyright (c) 2002, 2013, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@file
@@ -105,9 +105,19 @@ public:
bool check(THD *thd, set_var *var);
uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
- bool set_default(THD *thd, enum_var_type type);
+
+ /**
+ Update the system variable with the default value from either
+ session or global scope. The default value is stored in the
+ 'var' argument. Return false when successful.
+ */
+ bool set_default(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
+ longlong val_int(bool *is_null, THD *thd, enum_var_type type, LEX_STRING *base);
+ String *val_str(String *str, THD *thd, enum_var_type type, LEX_STRING *base);
+ double val_real(bool *is_null, THD *thd, enum_var_type type, LEX_STRING *base);
+
SHOW_TYPE show_type() { return show_val_type; }
int scope() const { return flags & SCOPE_MASK; }
CHARSET_INFO *charset(THD *thd);
diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml
index f32d8bf6127..e82ffc85ea6 100644
--- a/sql/share/charsets/Index.xml
+++ b/sql/share/charsets/Index.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<description>
diff --git a/sql/share/charsets/armscii8.xml b/sql/share/charsets/armscii8.xml
index 714e57bb12e..52382c83af0 100644
--- a/sql/share/charsets/armscii8.xml
+++ b/sql/share/charsets/armscii8.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="armscii8">
diff --git a/sql/share/charsets/ascii.xml b/sql/share/charsets/ascii.xml
index f4fb79ac632..bec34ad525e 100644
--- a/sql/share/charsets/ascii.xml
+++ b/sql/share/charsets/ascii.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="ascii">
diff --git a/sql/share/charsets/cp1250.xml b/sql/share/charsets/cp1250.xml
index bd0d7d3f3c0..58e55de9bdc 100644
--- a/sql/share/charsets/cp1250.xml
+++ b/sql/share/charsets/cp1250.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp1250">
diff --git a/sql/share/charsets/cp1256.xml b/sql/share/charsets/cp1256.xml
index 64cb253145c..806fef961f7 100644
--- a/sql/share/charsets/cp1256.xml
+++ b/sql/share/charsets/cp1256.xml
@@ -18,7 +18,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp1256">
diff --git a/sql/share/charsets/cp1257.xml b/sql/share/charsets/cp1257.xml
index 0c2688c264e..8ae73fdf25a 100644
--- a/sql/share/charsets/cp1257.xml
+++ b/sql/share/charsets/cp1257.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp1257">
diff --git a/sql/share/charsets/cp850.xml b/sql/share/charsets/cp850.xml
index 4076a5f6a56..198b336daef 100644
--- a/sql/share/charsets/cp850.xml
+++ b/sql/share/charsets/cp850.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp850">
diff --git a/sql/share/charsets/cp852.xml b/sql/share/charsets/cp852.xml
index 25b622d2a4b..7608296d5b7 100644
--- a/sql/share/charsets/cp852.xml
+++ b/sql/share/charsets/cp852.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp852">
diff --git a/sql/share/charsets/cp866.xml b/sql/share/charsets/cp866.xml
index fa2e1865de6..d35f3d68b05 100644
--- a/sql/share/charsets/cp866.xml
+++ b/sql/share/charsets/cp866.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="cp866">
diff --git a/sql/share/charsets/dec8.xml b/sql/share/charsets/dec8.xml
index 2cd52de464a..66bb421b674 100644
--- a/sql/share/charsets/dec8.xml
+++ b/sql/share/charsets/dec8.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="dec8">
diff --git a/sql/share/charsets/geostd8.xml b/sql/share/charsets/geostd8.xml
index 5e3816975d6..a789d07e6d8 100644
--- a/sql/share/charsets/geostd8.xml
+++ b/sql/share/charsets/geostd8.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="geostd8">
diff --git a/sql/share/charsets/greek.xml b/sql/share/charsets/greek.xml
index 000019a8ce0..5b66a7ab442 100644
--- a/sql/share/charsets/greek.xml
+++ b/sql/share/charsets/greek.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="greek">
diff --git a/sql/share/charsets/hebrew.xml b/sql/share/charsets/hebrew.xml
index 20d68487301..e7f896a3e12 100644
--- a/sql/share/charsets/hebrew.xml
+++ b/sql/share/charsets/hebrew.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="hebrew">
diff --git a/sql/share/charsets/hp8.xml b/sql/share/charsets/hp8.xml
index 3ab383ef386..83a076237f7 100644
--- a/sql/share/charsets/hp8.xml
+++ b/sql/share/charsets/hp8.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="hp8">
diff --git a/sql/share/charsets/keybcs2.xml b/sql/share/charsets/keybcs2.xml
index 7335a0f428d..a9f305deab8 100644
--- a/sql/share/charsets/keybcs2.xml
+++ b/sql/share/charsets/keybcs2.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="keybcs2">
diff --git a/sql/share/charsets/koi8r.xml b/sql/share/charsets/koi8r.xml
index 2d8473f6440..21ebf78b79e 100644
--- a/sql/share/charsets/koi8r.xml
+++ b/sql/share/charsets/koi8r.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="koi8r">
diff --git a/sql/share/charsets/koi8u.xml b/sql/share/charsets/koi8u.xml
index 16177627ffe..65145c97593 100644
--- a/sql/share/charsets/koi8u.xml
+++ b/sql/share/charsets/koi8u.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="koi8u">
diff --git a/sql/share/charsets/languages.html b/sql/share/charsets/languages.html
index 76af973113e..2b1c44421bf 100644
--- a/sql/share/charsets/languages.html
+++ b/sql/share/charsets/languages.html
@@ -13,7 +13,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#<pre>
(
diff --git a/sql/share/charsets/latin1.xml b/sql/share/charsets/latin1.xml
index 88ceff440d5..8963c3481d3 100644
--- a/sql/share/charsets/latin1.xml
+++ b/sql/share/charsets/latin1.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="latin1">
diff --git a/sql/share/charsets/latin2.xml b/sql/share/charsets/latin2.xml
index 6b887b927a4..183da7b6cd3 100644
--- a/sql/share/charsets/latin2.xml
+++ b/sql/share/charsets/latin2.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="latin2">
diff --git a/sql/share/charsets/latin5.xml b/sql/share/charsets/latin5.xml
index 9c23200a46d..489299564f1 100644
--- a/sql/share/charsets/latin5.xml
+++ b/sql/share/charsets/latin5.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="latin5">
diff --git a/sql/share/charsets/latin7.xml b/sql/share/charsets/latin7.xml
index 02d3ff8b17e..fb384b3a5ff 100644
--- a/sql/share/charsets/latin7.xml
+++ b/sql/share/charsets/latin7.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="latin7">
diff --git a/sql/share/charsets/macce.xml b/sql/share/charsets/macce.xml
index 21e303609cf..d7242f26297 100644
--- a/sql/share/charsets/macce.xml
+++ b/sql/share/charsets/macce.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="macce">
diff --git a/sql/share/charsets/macroman.xml b/sql/share/charsets/macroman.xml
index 2b43fe73b07..a2485cf9379 100644
--- a/sql/share/charsets/macroman.xml
+++ b/sql/share/charsets/macroman.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="macroman">
diff --git a/sql/share/charsets/swe7.xml b/sql/share/charsets/swe7.xml
index 17fa6b7d9bc..f12a2238718 100644
--- a/sql/share/charsets/swe7.xml
+++ b/sql/share/charsets/swe7.xml
@@ -16,7 +16,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
</copyright>
<charset name="swe7">
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index ae070df6fdf..35f2cfb330c 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -75,29 +75,30 @@ ER_CANT_CREATE_FILE
swe "Kan inte skapa filen '%-.200s' (Felkod: %M)"
ukr "Не можу створити файл '%-.200s' (помилка: %M)"
ER_CANT_CREATE_TABLE
- cze "Nemohu vytvořit tabulku '%-.200s' (chybový kód: %M)"
- dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %M)"
- nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %M)"
- eng "Can't create table '%-.200s' (errno: %M)"
- est "Ei suuda luua tabelit '%-.200s' (veakood: %M)"
- fre "Ne peut créer la table '%-.200s' (Errcode: %M)"
- ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %M)"
- greek "Αδύνατη η δημιουργία του πίνακα '%-.200s' (κωδικός λάθους: %M)"
- hun "A '%-.200s' tabla nem hozhato letre (hibakod: %M)"
- ita "Impossibile creare la tabella '%-.200s' (errno: %M)"
- jpn "表 '%-.200s' を作成できません。(エラー番号: %M)"
- kor "테이블 '%-.200s'를 만들지 못했습니다. (에러번호: %M)"
- nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %M)"
- norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %M)"
- pol "Nie można stworzyć tabeli '%-.200s' (Kod błędu: %M)"
- por "Não pode criar a tabela '%-.200s' (erro no. %M)"
- rum "Nu pot sa creez tabla '%-.200s' (Eroare: %M)"
- rus "Невозможно создать таблицу '%-.200s' (ошибка: %M)"
- serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %M)"
- slo "Nemôžem vytvoriť tabuľku '%-.200s' (chybový kód: %M)"
- spa "No puedo crear tabla '%-.200s' (Error: %M)"
- swe "Kan inte skapa tabellen '%-.200s' (Felkod: %M)"
- ukr "Не можу створити таблицю '%-.200s' (помилка: %M)"
+ cze "Nemohu vytvořit tabulku %`s.%`s (chybový kód: %M)"
+ dan "Kan ikke oprette tabellen %`s.%`s (Fejlkode: %M)"
+ nla "Kan tabel %`s.%`s niet aanmaken (Errcode: %M)"
+ eng "Can't create table %`s.%`s (errno: %M)"
+ jps "%`s.%`s テーブルが作れません.(errno: %M)",
+ est "Ei suuda luua tabelit %`s.%`s (veakood: %M)"
+ fre "Ne peut créer la table %`s.%`s (Errcode: %M)"
+ ger "Kann Tabelle %`s.%`s nicht erzeugen (Fehler: %M)"
+ greek "Αδύνατη η δημιουργία του πίνακα %`s.%`s (κωδικός λάθους: %M)"
+ hun "A %`s.%`s tabla nem hozhato letre (hibakod: %M)"
+ ita "Impossibile creare la tabella %`s.%`s (errno: %M)"
+ jpn "%`s.%`s テーブルが作れません.(errno: %M)"
+ kor "테이블 %`s.%`s를 만들지 못했습니다. (에러번호: %M)"
+ nor "Kan ikke opprette tabellen %`s.%`s (Feilkode: %M)"
+ norwegian-ny "Kan ikkje opprette tabellen %`s.%`s (Feilkode: %M)"
+ pol "Nie można stworzyć tabeli %`s.%`s (Kod błędu: %M)"
+ por "Não pode criar a tabela %`s.%`s (erro no. %M)"
+ rum "Nu pot sa creez tabla %`s.%`s (Eroare: %M)"
+ rus "Невозможно создать таблицу %`s.%`s (ошибка: %M)"
+ serbian "Ne mogu da kreiram tabelu %`s.%`s (errno: %M)"
+ slo "Nemôžem vytvoriť tabuľku %`s.%`s (chybový kód: %M)"
+ spa "No puedo crear tabla %`s.%`s (Error: %M)"
+ swe "Kan inte skapa tabellen %`s.%`s (Felkod: %M)"
+ ukr "Не можу створити таблицю %`s.%`s (помилка: %M)"
ER_CANT_CREATE_DB
cze "Nemohu vytvořit databázi '%-.192s' (chybový kód: %M)"
dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %M)"
@@ -675,29 +676,21 @@ ER_FORM_NOT_FOUND
swe "Formulär '%-.192s' finns inte i '%-.192s'"
ukr "Вигляд '%-.192s' не існує для '%-.192s'"
ER_GET_ERRNO
- cze "Obsluha tabulky vrátila chybu %M"
- dan "Modtog fejl %M fra tabel håndteringen"
- nla "Fout %M van tabel handler"
- eng "Got error %M from storage engine"
- est "Tabeli handler tagastas vea %M"
- fre "Reçu l'erreur %M du handler de la table"
- ger "Fehler %M (Speicher-Engine)"
- greek "Ελήφθη μήνυμα λάθους %M από τον χειριστή πίνακα (table handler)"
- hun "%M hibajelzes a tablakezelotol"
- ita "Rilevato l'errore %M dal gestore delle tabelle"
- jpn "ストレージエンジンがエラー %M を返しました。"
- kor "테이블 handler에서 %M 에러가 발생 하였습니다."
- nor "Mottok feil %M fra tabell håndterer"
- norwegian-ny "Mottok feil %M fra tabell handterar"
- pol "Otrzymano bł?d %M z obsługi tabeli"
- por "Obteve erro %M no manipulador de tabelas"
- rum "Eroarea %M obtinuta din handlerul tabelei"
- rus "Получена ошибка %M от обработчика таблиц"
- serbian "Handler tabela je vratio grešku %M"
- slo "Obsluha tabuľky vrátila chybu %M"
- spa "Error %M desde el manejador de la tabla"
- swe "Fick felkod %M från databashanteraren"
- ukr "Отримано помилку %M від дескриптора таблиці"
+ nla "Fout %M van tabel handler %s"
+ eng "Got error %M from storage engine %s"
+ fre "Reçu l'erreur %M du handler de la table %s"
+ ger "Fehler %M von Speicher-Engine %s"
+ greek "Ελήφθη μήνυμα λάθους %M από τον χειριστή πίνακα (table handler) %s"
+ ita "Rilevato l'errore %M dal gestore delle tabelle %s"
+ nor "Mottok feil %M fra tabell håndterer %s"
+ norwegian-ny "Mottok feil %M fra tabell handterar %s"
+ pol "Otrzymano bł?d %M z obsługi tabeli %s"
+ por "Obteve erro %M no manipulador de tabelas %s"
+ rum "Eroarea %M obtinuta din handlerul tabelei %s"
+ rus "Получена ошибка %M от обработчика таблиц %s"
+ spa "Error %M desde el manejador de la tabla %s"
+ swe "Fick felkod %M från databashanteraren %s"
+ ukr "Отримано помилку %M від дескриптора таблиці %s"
ER_ILLEGAL_HA
eng "Storage engine %s of the table %`s.%`s doesn't have this option"
ger "Diese Option gibt es nicht in Speicher-Engine %s für %`s.%`s"
@@ -991,51 +984,53 @@ ER_HANDSHAKE_ERROR 08S01
swe "Fel vid initiering av kommunikationen med klienten"
ukr "Невірна установка зв'язку"
ER_DBACCESS_DENIED_ERROR 42000
- cze "Přístup pro uživatele '%-.48s'@'%-.64s' k databázi '%-.192s' není povolen"
- dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' til databasen '%-.192s'"
- nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' naar database '%-.192s'"
- eng "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'"
- est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' andmebaasile '%-.192s'"
- fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s'. Base '%-.192s'"
- ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'"
- greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%-.48s'@'%-.64s' στη βάση δεδομένων '%-.192s'"
- hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz."
- ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' al database '%-.192s'"
- jpn "ユーザー '%-.48s'@'%-.64s' によるデータベース '%-.192s' へのアクセスは拒否されました。"
- kor "'%-.48s'@'%-.64s' 사용자는 '%-.192s' 데이타베이스에 접근이 거부 되었습니다."
- nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' til databasen '%-.192s' nektet"
- norwegian-ny "Tilgang ikkje tillate for brukar: '%-.48s'@'%-.64s' til databasen '%-.192s' nekta"
- por "Acesso negado para o usuário '%-.48s'@'%-.64s' ao banco de dados '%-.192s'"
- rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' la baza de date '%-.192s'"
- rus "Для пользователя '%-.48s'@'%-.64s' доступ к базе данных '%-.192s' закрыт"
- serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' za bazu '%-.192s'"
- slo "Zakázaný prístup pre užívateľa: '%-.48s'@'%-.64s' k databázi '%-.192s'"
- spa "Acceso negado para usuario: '%-.48s'@'%-.64s' para la base de datos '%-.192s'"
- swe "Användare '%-.48s'@'%-.64s' är ej berättigad att använda databasen %-.192s"
- ukr "Доступ заборонено для користувача: '%-.48s'@'%-.64s' до бази данних '%-.192s'"
+ cze "Přístup pro uživatele '%s'@'%s' k databázi '%-.192s' není povolen"
+ dan "Adgang nægtet bruger: '%s'@'%s' til databasen '%-.192s'"
+ nla "Toegang geweigerd voor gebruiker: '%s'@'%s' naar database '%-.192s'"
+ eng "Access denied for user '%s'@'%s' to database '%-.192s'"
+ jps "ユーザー '%s'@'%s' の '%-.192s' データベースへのアクセスを拒否します",
+ est "Ligipääs keelatud kasutajale '%s'@'%s' andmebaasile '%-.192s'"
+ fre "Accès refusé pour l'utilisateur: '%s'@'%s'. Base '%-.192s'"
+ ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'"
+ greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' στη βάση δεδομένων '%-.192s'"
+ hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz."
+ ita "Accesso non consentito per l'utente: '%s'@'%s' al database '%-.192s'"
+ jpn "ユーザー '%s'@'%s' の '%-.192s' データベースへのアクセスを拒否します"
+ kor "'%s'@'%s' 사용자는 '%-.192s' 데이타베이스에 접근이 거부 되었습니다."
+ nor "Tilgang nektet for bruker: '%s'@'%s' til databasen '%-.192s' nektet"
+ norwegian-ny "Tilgang ikkje tillate for brukar: '%s'@'%s' til databasen '%-.192s' nekta"
+ por "Acesso negado para o usuário '%s'@'%s' ao banco de dados '%-.192s'"
+ rum "Acces interzis pentru utilizatorul: '%s'@'%s' la baza de date '%-.192s'"
+ rus "Для пользователя '%s'@'%s' доступ к базе данных '%-.192s' закрыт"
+ serbian "Pristup je zabranjen korisniku '%s'@'%s' za bazu '%-.192s'"
+ slo "Zakázaný prístup pre užívateľa: '%s'@'%s' k databázi '%-.192s'"
+ spa "Acceso negado para usuario: '%s'@'%s' para la base de datos '%-.192s'"
+ swe "Användare '%s'@'%s' är ej berättigad att använda databasen %-.192s"
+ ukr "Доступ заборонено для користувача: '%s'@'%s' до бази данних '%-.192s'"
ER_ACCESS_DENIED_ERROR 28000
- cze "Přístup pro uživatele '%-.48s'@'%-.64s' (s heslem %s)"
- dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' (Bruger adgangskode: %s)"
- nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' (Wachtwoord gebruikt: %s)"
- eng "Access denied for user '%-.48s'@'%-.64s' (using password: %s)"
- est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' (kasutab parooli: %s)"
- fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s' (mot de passe: %s)"
- ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)"
- greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%-.48s'@'%-.64s' (χρήση password: %s)"
- hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
- ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' (Password: %s)"
- jpn "ユーザー '%-.48s'@'%-.64s' のアクセスは拒否されました。(using password: %s)"
- kor "'%-.48s'@'%-.64s' 사용자는 접근이 거부 되었습니다. (using password: %s)"
- nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' (Bruker passord: %s)"
- norwegian-ny "Tilgang ikke tillate for brukar: '%-.48s'@'%-.64s' (Brukar passord: %s)"
- por "Acesso negado para o usuário '%-.48s'@'%-.64s' (senha usada: %s)"
- rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' (Folosind parola: %s)"
- rus "Доступ закрыт для пользователя '%-.48s'@'%-.64s' (был использован пароль: %s)"
- serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' (koristi lozinku: '%s')"
- slo "Zakázaný prístup pre užívateľa: '%-.48s'@'%-.64s' (použitie hesla: %s)"
- spa "Acceso negado para usuario: '%-.48s'@'%-.64s' (Usando clave: %s)"
- swe "Användare '%-.48s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)"
- ukr "Доступ заборонено для користувача: '%-.48s'@'%-.64s' (Використано пароль: %s)"
+ cze "Přístup pro uživatele '%s'@'%s' (s heslem %s)"
+ dan "Adgang nægtet bruger: '%s'@'%s' (Bruger adgangskode: %s)"
+ nla "Toegang geweigerd voor gebruiker: '%s'@'%s' (Wachtwoord gebruikt: %s)"
+ eng "Access denied for user '%s'@'%s' (using password: %s)"
+ jps "ユーザー '%s'@'%s' を拒否します.uUsing password: %s)",
+ est "Ligipääs keelatud kasutajale '%s'@'%s' (kasutab parooli: %s)"
+ fre "Accès refusé pour l'utilisateur: '%s'@'%s' (mot de passe: %s)"
+ ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)"
+ greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' (χρήση password: %s)"
+ hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
+ ita "Accesso non consentito per l'utente: '%s'@'%s' (Password: %s)"
+ jpn "ユーザー '%s'@'%s' を拒否します.uUsing password: %s)"
+ kor "'%s'@'%s' 사용자는 접근이 거부 되었습니다. (using password: %s)"
+ nor "Tilgang nektet for bruker: '%s'@'%s' (Bruker passord: %s)"
+ norwegian-ny "Tilgang ikke tillate for brukar: '%s'@'%s' (Brukar passord: %s)"
+ por "Acesso negado para o usuário '%s'@'%s' (senha usada: %s)"
+ rum "Acces interzis pentru utilizatorul: '%s'@'%s' (Folosind parola: %s)"
+ rus "Доступ закрыт для пользователя '%s'@'%s' (был использован пароль: %s)"
+ serbian "Pristup je zabranjen korisniku '%s'@'%s' (koristi lozinku: '%s')"
+ slo "Zakázaný prístup pre užívateľa: '%s'@'%s' (použitie hesla: %s)"
+ spa "Acceso negado para usuario: '%s'@'%s' (Usando clave: %s)"
+ swe "Användare '%s'@'%s' är ej berättigad att logga in (Använder lösen: %s)"
+ ukr "Доступ заборонено для користувача: '%s'@'%s' (Використано пароль: %s)"
ER_NO_DB_ERROR 3D000
cze "Nebyla vybrána žádná databáze"
dan "Ingen database valgt"
@@ -1685,29 +1680,10 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
swe "Nyckelkolumn '%-.192s' finns inte"
ukr "Ключовий стовбець '%-.192s' не існує у таблиці"
ER_BLOB_USED_AS_KEY 42000 S1009
- cze "Blob sloupec '%-.192s' nemůže být použit jako klíč"
- dan "BLOB feltet '%-.192s' kan ikke bruges ved specifikation af indeks"
- nla "BLOB kolom '%-.192s' kan niet gebruikt worden bij zoeksleutel specificatie"
- eng "BLOB column '%-.192s' can't be used in key specification with the used table type"
- est "BLOB-tüüpi tulpa '%-.192s' ei saa kasutada võtmena"
- fre "Champ BLOB '%-.192s' ne peut être utilisé dans une clé"
- ger "BLOB-Feld '%-.192s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden"
- greek "Πεδίο τύπου Blob '%-.192s' δεν μπορεί να χρησιμοποιηθεί στον ορισμό ενός κλειδιού (key specification)"
- hun "Blob objektum '%-.192s' nem hasznalhato kulcskent"
- ita "La colonna BLOB '%-.192s' non puo` essere usata nella specifica della chiave"
- jpn "指定されたストレージエンジンでは、BLOB列 '%-.192s' は索引キーにできません。"
- kor "BLOB 칼럼 '%-.192s'는 키 정의에서 사용될 수 없습니다."
- nor "Blob felt '%-.192s' kan ikke brukes ved spesifikasjon av nøkler"
- norwegian-ny "Blob kolonne '%-.192s' kan ikkje brukast ved spesifikasjon av nyklar"
- pol "Kolumna typu Blob '%-.192s' nie może być użyta w specyfikacji klucza"
- por "Coluna BLOB '%-.192s' não pode ser utilizada na especificação de chave para o tipo de tabela usado"
- rum "Coloana de tip BLOB '%-.192s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit"
- rus "Столбец типа BLOB '%-.192s' не может быть использован как значение ключа в таблице такого типа"
- serbian "BLOB kolona '%-.192s' ne može biti upotrebljena za navođenje ključa sa tipom tabele koji se trenutno koristi"
- slo "Blob pole '%-.192s' nemôže byť použité ako kľúč"
- spa "La columna Blob '%-.192s' no puede ser usada en una declaracion de clave"
- swe "En BLOB '%-.192s' kan inte vara nyckel med den använda tabelltypen"
- ukr "BLOB стовбець '%-.192s' не може бути використаний у визначенні ключа в цьому типі таблиці"
+ eng "BLOB column %`s can't be used in key specification in the %s table"
+ ger "BLOB-Feld %`s kann beim %s Tabellen nicht als Schlüssel verwendet werden"
+ rus "Столбец типа BLOB %`s не может быть использован как значение ключа в %s таблице"
+ ukr "BLOB стовбець %`s не може бути використаний у визначенні ключа в %s таблиці"
ER_TOO_BIG_FIELDLENGTH 42000 S1009
cze "Příliš velká délka sloupce '%-.192s' (nejvíce %lu). Použijte BLOB"
dan "For stor feltlængde for kolonne '%-.192s' (maks = %lu). Brug BLOB i stedet"
@@ -3201,43 +3177,45 @@ ER_NONEXISTING_GRANT 42000
swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'"
ukr "Повноважень не визначено для користувача '%-.48s' з хосту '%-.64s'"
ER_TABLEACCESS_DENIED_ERROR 42000
- cze "%-.128s příkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro tabulku '%-.192s'"
- dan "%-.128s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for tabellen '%-.192s'"
- nla "%-.128s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor tabel '%-.192s'"
- eng "%-.128s command denied to user '%-.48s'@'%-.64s' for table '%-.192s'"
- est "%-.128s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tabelis '%-.192s'"
- fre "La commande '%-.128s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la table '%-.192s'"
- ger "%-.128s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' auf Tabelle '%-.192s'"
- hun "%-.128s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban"
- ita "Comando %-.128s negato per l'utente: '%-.48s'@'%-.64s' sulla tabella '%-.192s'"
- jpn "コマンド %-.128s は ユーザー '%-.48s'@'%-.64s' の表 '%-.192s' の使用に関して許可されていません。"
- kor "'%-.128s' 명령은 다음 사용자에게 거부되었습니다. : '%-.48s'@'%-.64s' for 테이블 '%-.192s'"
- por "Comando '%-.128s' negado para o usuário '%-.48s'@'%-.64s' na tabela '%-.192s'"
- rum "Comanda %-.128s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru tabela '%-.192s'"
- rus "Команда %-.128s запрещена пользователю '%-.48s'@'%-.64s' для таблицы '%-.192s'"
- serbian "%-.128s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za tabelu '%-.192s'"
- spa "%-.128s comando negado para usuario: '%-.48s'@'%-.64s' para tabla '%-.192s'"
- swe "%-.128s ej tillåtet för '%-.48s'@'%-.64s' för tabell '%-.192s'"
- ukr "%-.128s команда заборонена користувачу: '%-.48s'@'%-.64s' у таблиці '%-.192s'"
+ cze "%-.32s příkaz nepřístupný pro uživatele: '%s'@'%s' pro tabulku '%-.192s'"
+ dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for tabellen '%-.192s'"
+ nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor tabel '%-.192s'"
+ eng "%-.32s command denied to user '%s'@'%s' for table '%-.192s'"
+ jps "コマンド %-.32s は ユーザー '%s'@'%s' ,テーブル '%-.192s' に対して許可されていません",
+ est "%-.32s käsk ei ole lubatud kasutajale '%s'@'%s' tabelis '%-.192s'"
+ fre "La commande '%-.32s' est interdite à l'utilisateur: '%s'@'%s' sur la table '%-.192s'"
+ ger "%-.32s Befehl nicht erlaubt für Benutzer '%s'@'%s' auf Tabelle '%-.192s'"
+ hun "%-.32s parancs a '%s'@'%s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban"
+ ita "Comando %-.32s negato per l'utente: '%s'@'%s' sulla tabella '%-.192s'"
+ jpn "コマンド %-.32s は ユーザー '%s'@'%s' ,テーブル '%-.192s' に対して許可されていません"
+ kor "'%-.32s' 명령은 다음 사용자에게 거부되었습니다. : '%s'@'%s' for 테이블 '%-.192s'"
+ por "Comando '%-.32s' negado para o usuário '%s'@'%s' na tabela '%-.192s'"
+ rum "Comanda %-.32s interzisa utilizatorului: '%s'@'%s' pentru tabela '%-.192s'"
+ rus "Команда %-.32s запрещена пользователю '%s'@'%s' для таблицы '%-.192s'"
+ serbian "%-.32s komanda zabranjena za korisnika '%s'@'%s' za tabelu '%-.192s'"
+ spa "%-.32s comando negado para usuario: '%s'@'%s' para tabla '%-.192s'"
+ swe "%-.32s ej tillåtet för '%s'@'%s' för tabell '%-.192s'"
+ ukr "%-.32s команда заборонена користувачу: '%s'@'%s' у таблиці '%-.192s'"
ER_COLUMNACCESS_DENIED_ERROR 42000
- cze "%-.128s příkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro sloupec '%-.192s' v tabulce '%-.192s'"
- dan "%-.128s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for kolonne '%-.192s' in tabellen '%-.192s'"
- nla "%-.128s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor kolom '%-.192s' in tabel '%-.192s'"
- eng "%-.128s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'"
- est "%-.128s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tulbale '%-.192s' tabelis '%-.192s'"
- fre "La commande '%-.128s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la colonne '%-.192s' de la table '%-.192s'"
- ger "%-.128s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' und Feld '%-.192s' in Tabelle '%-.192s'"
- hun "%-.128s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban"
- ita "Comando %-.128s negato per l'utente: '%-.48s'@'%-.64s' sulla colonna '%-.192s' della tabella '%-.192s'"
- jpn "コマンド %-.128s は ユーザー '%-.48s'@'%-.64s'\n の列 '%-.192s'(表 '%-.192s') の利用に関して許可されていません。"
- kor "'%-.128s' 명령은 다음 사용자에게 거부되었습니다. : '%-.48s'@'%-.64s' for 칼럼 '%-.192s' in 테이블 '%-.192s'"
- por "Comando '%-.128s' negado para o usuário '%-.48s'@'%-.64s' na coluna '%-.192s', na tabela '%-.192s'"
- rum "Comanda %-.128s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru coloana '%-.192s' in tabela '%-.192s'"
- rus "Команда %-.128s запрещена пользователю '%-.48s'@'%-.64s' для столбца '%-.192s' в таблице '%-.192s'"
- serbian "%-.128s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za kolonu '%-.192s' iz tabele '%-.192s'"
- spa "%-.128s comando negado para usuario: '%-.48s'@'%-.64s' para columna '%-.192s' en la tabla '%-.192s'"
- swe "%-.128s ej tillåtet för '%-.48s'@'%-.64s' för kolumn '%-.192s' i tabell '%-.192s'"
- ukr "%-.128s команда заборонена користувачу: '%-.48s'@'%-.64s' для стовбця '%-.192s' у таблиці '%-.192s'"
+ cze "%-.32s příkaz nepřístupný pro uživatele: '%s'@'%s' pro sloupec '%-.192s' v tabulce '%-.192s'"
+ dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for kolonne '%-.192s' in tabellen '%-.192s'"
+ nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor kolom '%-.192s' in tabel '%-.192s'"
+ eng "%-.32s command denied to user '%s'@'%s' for column '%-.192s' in table '%-.192s'"
+ jps "コマンド %-.32s は ユーザー '%s'@'%s'¥n カラム '%-.192s' テーブル '%-.192s' に対して許可されていません",
+ est "%-.32s käsk ei ole lubatud kasutajale '%s'@'%s' tulbale '%-.192s' tabelis '%-.192s'"
+ fre "La commande '%-.32s' est interdite à l'utilisateur: '%s'@'%s' sur la colonne '%-.192s' de la table '%-.192s'"
+ ger "%-.32s Befehl nicht erlaubt für Benutzer '%s'@'%s' und Feld '%-.192s' in Tabelle '%-.192s'"
+ hun "%-.32s parancs a '%s'@'%s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban"
+ ita "Comando %-.32s negato per l'utente: '%s'@'%s' sulla colonna '%-.192s' della tabella '%-.192s'"
+ jpn "コマンド %-.32s は ユーザー '%s'@'%s'\n カラム '%-.192s' テーブル '%-.192s' に対して許可されていません"
+ kor "'%-.32s' 명령은 다음 사용자에게 거부되었습니다. : '%s'@'%s' for 칼럼 '%-.192s' in 테이블 '%-.192s'"
+ por "Comando '%-.32s' negado para o usuário '%s'@'%s' na coluna '%-.192s', na tabela '%-.192s'"
+ rum "Comanda %-.32s interzisa utilizatorului: '%s'@'%s' pentru coloana '%-.192s' in tabela '%-.192s'"
+ rus "Команда %-.32s запрещена пользователю '%s'@'%s' для столбца '%-.192s' в таблице '%-.192s'"
+ serbian "%-.32s komanda zabranjena za korisnika '%s'@'%s' za kolonu '%-.192s' iz tabele '%-.192s'"
+ spa "%-.32s comando negado para usuario: '%s'@'%s' para columna '%-.192s' en la tabla '%-.192s'"
+ swe "%-.32s ej tillåtet för '%s'@'%s' för kolumn '%-.192s' i tabell '%-.192s'"
+ ukr "%-.32s команда заборонена користувачу: '%s'@'%s' для стовбця '%-.192s' у таблиці '%-.192s'"
ER_ILLEGAL_GRANT_FOR_TABLE 42000
cze "Neplatný příkaz GRANT/REVOKE. Prosím, přečtěte si v manuálu, jaká privilegia je možné použít."
dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
@@ -3617,41 +3595,39 @@ ER_TOO_LONG_STRING 42000
swe "Resultatsträngen är längre än max_allowed_packet"
ukr "Строка результату довша ніж max_allowed_packet"
ER_TABLE_CANT_HANDLE_BLOB 42000
- cze "Typ použité tabulky nepodporuje BLOB/TEXT sloupce"
- dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner"
- nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen"
- eng "The used table type doesn't support BLOB/TEXT columns"
- est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju"
- fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT"
- ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Felder"
- hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket"
- ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT"
- jpn "指定されたストレージエンジンでは、BLOB/TEXT型の列を使用できません。"
- por "Tipo de tabela usado não permite colunas BLOB/TEXT"
- rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT"
- rus "Используемая таблица не поддерживает типы BLOB/TEXT"
- serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'"
- spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT"
- swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner"
- ukr "Використаний тип таблиці не підтримує BLOB/TEXT стовбці"
+ cze "Typ použité tabulky (%s) nepodporuje BLOB/TEXT sloupce"
+ dan "Denne tabeltype (%s) understøtter ikke brug af BLOB og TEXT kolonner"
+ nla "Het gebruikte tabel type (%s) ondersteunt geen BLOB/TEXT kolommen"
+ eng "Storage engine %s doesn't support BLOB/TEXT columns"
+ est "Valitud tabelitüüp (%s) ei toeta BLOB/TEXT tüüpi välju"
+ fre "Ce type de table (%s) ne supporte pas les colonnes BLOB/TEXT"
+ ger "Der verwendete Tabellentyp (%s) unterstützt keine BLOB- und TEXT-Felder"
+ hun "A hasznalt tabla tipus (%s) nem tamogatja a BLOB/TEXT mezoket"
+ ita "Il tipo di tabella usata (%s) non supporta colonne di tipo BLOB/TEXT"
+ por "Tipo de tabela usado (%s) não permite colunas BLOB/TEXT"
+ rum "Tipul de tabela folosit (%s) nu suporta coloane de tip BLOB/TEXT"
+ rus "%s таблицы не поддерживают типы BLOB/TEXT"
+ serbian "Iskorišteni tip tabele (%s) ne podržava kolone tipa 'BLOB' odnosno 'TEXT'"
+ spa "El tipo de tabla usada (%s) no permite soporte para columnas BLOB/TEXT"
+ swe "Den använda tabelltypen (%s) kan inte hantera BLOB/TEXT-kolumner"
+ ukr "%s таблиці не підтримують BLOB/TEXT стовбці"
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
- cze "Typ použité tabulky nepodporuje AUTO_INCREMENT sloupce"
- dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner"
- nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen"
- eng "The used table type doesn't support AUTO_INCREMENT columns"
- est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju"
- fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT"
- ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Felder"
- hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket"
- jpn "指定されたストレージエンジンでは、AUTO_INCREMENT列を使用できません。"
- ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT"
- por "Tipo de tabela usado não permite colunas AUTO_INCREMENT"
- rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT"
- rus "Используемая таблица не поддерживает автоинкрементные столбцы"
- serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'"
- spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT"
- swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner"
- ukr "Використаний тип таблиці не підтримує AUTO_INCREMENT стовбці"
+ cze "Typ použité tabulky (%s) nepodporuje AUTO_INCREMENT sloupce"
+ dan "Denne tabeltype understøtter (%s) ikke brug af AUTO_INCREMENT kolonner"
+ nla "Het gebruikte tabel type (%s) ondersteunt geen AUTO_INCREMENT kolommen"
+ eng "Storage engine %s doesn't support AUTO_INCREMENT columns"
+ est "Valitud tabelitüüp (%s) ei toeta AUTO_INCREMENT tüüpi välju"
+ fre "Ce type de table (%s) ne supporte pas les colonnes AUTO_INCREMENT"
+ ger "Der verwendete Tabellentyp (%s) unterstützt keine AUTO_INCREMENT-Felder"
+ hun "A hasznalt tabla tipus (%s) nem tamogatja az AUTO_INCREMENT tipusu mezoket"
+ ita "Il tipo di tabella usata (%s) non supporta colonne di tipo AUTO_INCREMENT"
+ por "Tipo de tabela usado (%s) não permite colunas AUTO_INCREMENT"
+ rum "Tipul de tabela folosit (%s) nu suporta coloane de tip AUTO_INCREMENT"
+ rus "%s таблицы не поддерживают автоинкрементные столбцы"
+ serbian "Iskorišteni tip tabele (%s) ne podržava kolone tipa 'AUTO_INCREMENT'"
+ spa "El tipo de tabla usada (%s) no permite soporte para columnas AUTO_INCREMENT"
+ swe "Den använda tabelltypen (%s) kan inte hantera AUTO_INCREMENT-kolumner"
+ ukr "%s таблиці не підтримують AUTO_INCREMENT стовбці"
ER_DELAYED_INSERT_TABLE_LOCKED
cze "INSERT DELAYED není možno s tabulkou '%-.192s' použít, protože je zamčená pomocí LOCK TABLES"
dan "INSERT DELAYED kan ikke bruges med tabellen '%-.192s', fordi tabellen er låst med LOCK TABLES"
@@ -3695,29 +3671,10 @@ ER_WRONG_COLUMN_NAME 42000
swe "Felaktigt kolumnnamn '%-.100s'"
ukr "Невірне ім'я стовбця '%-.100s'"
ER_WRONG_KEY_COLUMN 42000
- cze "Handler použité tabulky neumí indexovat sloupce '%-.192s'"
- dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.192s'"
- nla "De gebruikte tabel 'handler' kan kolom '%-.192s' niet indexeren"
- eng "The used storage engine can't index column '%-.192s'"
- est "Tabelihandler ei oska indekseerida tulpa '%-.192s'"
- fre "Le handler de la table ne peut indexé la colonne '%-.192s'"
- ger "Die verwendete Speicher-Engine kann die Spalte '%-.192s' nicht indizieren"
- greek "The used table handler can't index column '%-.192s'"
- hun "A hasznalt tablakezelo nem tudja a '%-.192s' mezot indexelni"
- ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.192s'"
- jpn "使用のストレージエンジンは列 '%-.192s' の索引を作成できません。"
- kor "The used table handler can't index column '%-.192s'"
- nor "The used table handler can't index column '%-.192s'"
- norwegian-ny "The used table handler can't index column '%-.192s'"
- pol "The used table handler can't index column '%-.192s'"
- por "O manipulador de tabela usado não pode indexar a coluna '%-.192s'"
- rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.192s'"
- rus "Использованный обработчик таблицы не может проиндексировать столбец '%-.192s'"
- serbian "Handler tabele ne može da indeksira kolonu '%-.192s'"
- slo "The used table handler can't index column '%-.192s'"
- spa "El manipulador de tabla usado no puede indexar columna '%-.192s'"
- swe "Den använda tabelltypen kan inte indexera kolumn '%-.192s'"
- ukr "Використаний вказівник таблиці не може індексувати стовбець '%-.192s'"
+ eng "The storage engine %s can't index column %`s"
+ ger "Die Speicher-Engine %s kann die Spalte %`s nicht indizieren"
+ rus "Обработчик таблиц %s не может проиндексировать столбец %`s"
+ ukr "Вказівник таблиц %s не може індексувати стовбець %`s"
ER_WRONG_MRG_TABLE
cze "Všechny tabulky v MERGE tabulce nejsou definovány stejně"
dan "Tabellerne i MERGE er ikke defineret ens"
@@ -4031,22 +3988,8 @@ ER_NEW_ABORTING_CONNECTION 08S01
spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
ukr "Перервано з'єднання %ld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)"
-ER_DUMP_NOT_IMPLEMENTED
- cze "Handler tabulky nepodporuje binární dump"
- dan "Denne tabeltype unserstøtter ikke binært tabeldump"
- nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump"
- eng "The storage engine for the table does not support binary table dump"
- fre "Ce type de table ne supporte pas les copies binaires"
- ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump"
- ita "Il gestore per la tabella non supporta il dump binario"
- jpn "この表のストレージエンジンはバイナリ形式の表ダンプを利用できません。"
- por "O manipulador de tabela não suporta 'dump' binário de tabela"
- rum "The handler for the table does not support binary table dump"
- rus "Обработчик этой таблицы не поддерживает двоичного сохранения образа таблицы (dump)"
- serbian "Handler tabele ne podržava binarni dump tabele"
- spa "El manipulador de tabla no soporta dump para tabla binaria"
- swe "Tabellhanteraren klarar inte en binär kopiering av tabellen"
- ukr "Цей тип таблиці не підтримує бінарну передачу таблиці"
+ER_unused_2
+ eng "You should never see it"
ER_FLUSH_MASTER_BINLOG_CLOSED
eng "Binlog closed, cannot RESET MASTER"
ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen"
@@ -4407,19 +4350,18 @@ ER_WRONG_ARGUMENTS
swe "Felaktiga argument till %s"
ukr "Хибний аргумент для %s"
ER_NO_PERMISSION_TO_CREATE_USER 42000
- nla "'%-.48s'@'%-.64s' mag geen nieuwe gebruikers creeren"
- eng "'%-.48s'@'%-.64s' is not allowed to create new users"
- est "Kasutajal '%-.48s'@'%-.64s' ei ole lubatud luua uusi kasutajaid"
- fre "'%-.48s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs"
- ger "'%-.48s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
- ita "A '%-.48s'@'%-.64s' non e' permesso creare nuovi utenti"
- jpn "'%-.48s'@'%-.64s' は新しいユーザーを作成できません。"
- por "Não é permitido a '%-.48s'@'%-.64s' criar novos usuários"
- rus "'%-.48s'@'%-.64s' не разрешается создавать новых пользователей"
- serbian "Korisniku '%-.48s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
- spa "'%-.48s`@`%-.64s` no es permitido para crear nuevos usuarios"
- swe "'%-.48s'@'%-.64s' har inte rättighet att skapa nya användare"
- ukr "Користувачу '%-.48s'@'%-.64s' не дозволено створювати нових користувачів"
+ nla "'%s'@'%s' mag geen nieuwe gebruikers creeren"
+ eng "'%s'@'%s' is not allowed to create new users"
+ est "Kasutajal '%s'@'%s' ei ole lubatud luua uusi kasutajaid"
+ fre "'%s'@'%s' n'est pas autorisé à créer de nouveaux utilisateurs"
+ ger "'%s'@'%s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
+ ita "A '%s'@'%s' non e' permesso creare nuovi utenti"
+ por "Não é permitido a '%s'@'%s' criar novos usuários"
+ rus "'%s'@'%s' не разрешается создавать новых пользователей"
+ serbian "Korisniku '%s'@'%s' nije dozvoljeno da kreira nove korisnike"
+ spa "'%s'@'%s' no es permitido para crear nuevos usuarios"
+ swe "'%s'@'%s' har inte rättighet att skapa nya användare"
+ ukr "Користувачу '%s'@'%s' не дозволено створювати нових користувачів"
ER_UNION_TABLES_IN_DIFFERENT_DIR
nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren"
eng "Incorrect table definition; all MERGE tables must be in the same database"
@@ -4447,19 +4389,18 @@ ER_LOCK_DEADLOCK 40001
spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen"
ER_TABLE_CANT_HANDLE_FT
- nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen"
- eng "The used table type doesn't support FULLTEXT indexes"
- est "Antud tabelitüüp ei toeta FULLTEXT indekseid"
- fre "Le type de table utilisé ne supporte pas les index FULLTEXT"
- ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes"
- ita "La tabella usata non supporta gli indici FULLTEXT"
- jpn "使用の表は全文索引を利用できません。"
- por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)"
- rus "Используемый тип таблиц не поддерживает полнотекстовых индексов"
- serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
- spa "El tipo de tabla usada no soporta índices FULLTEXT"
- swe "Tabelltypen har inte hantering av FULLTEXT-index"
- ukr "Використаний тип таблиці не підтримує FULLTEXT індексів"
+ nla "Het gebruikte tabel type (%s) ondersteund geen FULLTEXT indexen"
+ eng "The storage engine %s doesn't support FULLTEXT indexes"
+ est "Antud tabelitüüp (%s) ei toeta FULLTEXT indekseid"
+ fre "Le type de table utilisé (%s) ne supporte pas les index FULLTEXT"
+ ger "Der verwendete Tabellentyp (%s) unterstützt keine FULLTEXT-Indizes"
+ ita "La tabella usata (%s) non supporta gli indici FULLTEXT"
+ por "O tipo de tabela utilizado (%s) não suporta índices de texto completo (fulltext indexes)"
+ rus "Используемый тип таблиц (%s) не поддерживает полнотекстовых индексов"
+ serbian "Upotrebljeni tip tabele (%s) ne podržava 'FULLTEXT' indekse"
+ spa "El tipo de tabla usada (%s) no soporta índices FULLTEXT"
+ swe "Tabelltypen (%s) har inte hantering av FULLTEXT-index"
+ ukr "Використаний тип таблиці (%s) не підтримує FULLTEXT індексів"
ER_CANNOT_ADD_FOREIGN
nla "Kan foreign key beperking niet toevoegen"
eng "Cannot add foreign key constraint"
@@ -5024,14 +4965,13 @@ ER_WRONG_NAME_FOR_CATALOG 42000
spa "Nombre de catalog incorrecto '%-.100s'"
swe "Felaktigt katalog namn '%-.100s'"
ER_WARN_QC_RESIZE
- eng "Query cache failed to set size %lu; new query cache size is %lu"
- ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu"
- jpn "クエリキャッシュのサイズを %lu にできませんでした。サイズは %lu になりました。"
- por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu"
- rus "Кеш запросов не может установить размер %lu, новый размер кеша зпросов - %lu"
- spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu"
- swe "Storleken av 'Query cache' kunde inte sättas till %lu, ny storlek är %lu"
- ukr "Кеш запитів неспроможен встановити розмір %lu, новий розмір кеша запитів - %lu"
+ eng "Query cache failed to set size %llu; new query cache size is %lu"
+ ger "Änderung der Query-Cache-Größe auf %llu fehlgeschlagen; neue Query-Cache-Größe ist %lu"
+ por "Falha em Query cache para configurar tamanho %llu, novo tamanho de query cache é %lu"
+ rus "Кеш запросов не может установить размер %llu, новый размер кеша зпросов - %lu"
+ spa "Query cache fallada para configurar tamaño %llu, nuevo tamaño de query cache es %lu"
+ swe "Storleken av "Query cache" kunde inte sättas till %llu, ny storlek är %lu"
+ ukr "Кеш запитів неспроможен встановити розмір %llu, новий розмір кеша запитів - %lu"
ER_BAD_FT_COLUMN
eng "Column '%-.192s' cannot be part of FULLTEXT index"
ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein"
@@ -5380,8 +5320,8 @@ ER_VIEW_CHECK_FAILED
rus "проверка CHECK OPTION для VIEW '%-.192s.%-.192s' провалилась"
ukr "Перевірка CHECK OPTION для VIEW '%-.192s.%-.192s' не пройшла"
ER_PROCACCESS_DENIED_ERROR 42000
- eng "%-.128s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'"
- ger "Befehl %-.128s nicht zulässig für Benutzer '%-.48s'@'%-.64s' in Routine '%-.192s'"
+ eng "%-.32s command denied to user '%s'@'%s' for routine '%-.192s'"
+ ger "Befehl %-.32s nicht zulässig für Benutzer '%s'@'%s' in Routine '%-.192s'"
ER_RELAY_LOG_FAIL
eng "Failed purging old relay logs: %s"
ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s"
@@ -5673,8 +5613,8 @@ ER_NON_GROUPING_FIELD_USED 42000
eng "Non-grouping field '%-.192s' is used in %-.64s clause"
ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
ER_TABLE_CANT_HANDLE_SPKEYS
- eng "The used table type doesn't support SPATIAL indexes"
- ger "Der verwendete Tabellentyp unterstützt keine SPATIAL-Indizes"
+ eng "The storage engine %s doesn't support SPATIAL indexes"
+ ger "Der verwendete Tabellentyp (%s) unterstützt keine SPATIAL-Indizes"
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
eng "Triggers can not be created on system tables"
ger "Trigger können nicht auf Systemtabellen erzeugt werden"
@@ -6067,8 +6007,8 @@ ER_ONLY_INTEGERS_ALLOWED
eng "Only integers allowed as number here"
ger "An dieser Stelle sind nur Ganzzahlen zulässig"
ER_UNSUPORTED_LOG_ENGINE
- eng "This storage engine cannot be used for log tables"
- ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden"
+ eng "Storage engine %s cannot be used for log tables"
+ ger "Speicher-Engine %s kann für Logtabellen nicht verwendet werden"
ER_BAD_LOG_STATEMENT
eng "You cannot '%s' a log table if logging is enabled"
ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
@@ -6209,7 +6149,7 @@ WARN_NO_MASTER_INFO
WARN_OPTION_IGNORED
eng "<%-.64s> option ignored"
ger "Option <%-.64s> ignoriert"
-WARN_PLUGIN_DELETE_BUILTIN
+ER_PLUGIN_DELETE_BUILTIN
eng "Built-in plugins cannot be deleted"
ger "Eingebaute Plugins können nicht gelöscht werden"
WARN_PLUGIN_BUSY
@@ -6236,13 +6176,13 @@ ER_EXCEPTIONS_WRITE_ERROR
eng "Write to exceptions table failed. Message: %-.128s""
ger "Schreiben in Ausnahme-Tabelle fehlgeschlagen. Meldung: %-.128s""
ER_TOO_LONG_TABLE_COMMENT
- eng "Comment for table '%-.64s' is too long (max = %lu)"
- por "Comentário para a tabela '%-.64s' é longo demais (max = %lu)"
- ger "Kommentar für Tabelle '%-.64s' ist zu lang (max = %lu)"
+ eng "Comment for table '%-.64s' is too long (max = %u)"
+ por "Comentário para a tabela '%-.64s' é longo demais (max = %u)"
+ ger "Kommentar für Tabelle '%-.64s' ist zu lang (max = %u)"
ER_TOO_LONG_FIELD_COMMENT
- eng "Comment for field '%-.64s' is too long (max = %lu)"
- por "Comentário para o campo '%-.64s' é longo demais (max = %lu)"
- ger "Kommentar für Feld '%-.64s' ist zu lang (max = %lu)"
+ eng "Comment for field '%-.64s' is too long (max = %u)"
+ por "Comentário para o campo '%-.64s' é longo demais (max = %u)"
+ ger "Kommentar für Feld '%-.64s' ist zu lang (max = %u)"
ER_FUNC_INEXISTENT_NAME_COLLISION 42000
eng "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual"
ger "FUNCTION %s existiert nicht. Erläuterungen im Abschnitt 'Function Name Parsing and Resolution' im Referenzhandbuch"
@@ -6441,28 +6381,27 @@ ER_VALUES_IS_NOT_INT_TYPE_ERROR
swe "Värden i VALUES för partition '%-.64s' måste ha typen INT"
ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000
- cze "Přístup pro uživatele '%-.48s'@'%-.64s'"
- dan "Adgang nægtet bruger: '%-.48s'@'%-.64s'"
- nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s'"
- eng "Access denied for user '%-.48s'@'%-.64s'"
- est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s'"
- fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s'"
- ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung"
- greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%-.48s'@'%-.64s'"
- hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres."
- ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s'"
- jpn "ユーザー '%-.48s'@'%-.64s' のアクセスは拒否されました。"
- kor "'%-.48s'@'%-.64s' 사용자는 접근이 거부 되었습니다."
- nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s'"
- norwegian-ny "Tilgang ikke tillate for brukar: '%-.48s'@'%-.64s'"
- por "Acesso negado para o usuário '%-.48s'@'%-.64s'"
- rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s'"
- rus "Доступ закрыт для пользователя '%-.48s'@'%-.64s'"
- serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s'"
- slo "Zakázaný prístup pre užívateľa: '%-.48s'@'%-.64s'"
- spa "Acceso negado para usuario: '%-.48s'@'%-.64s'"
- swe "Användare '%-.48s'@'%-.64s' är ej berättigad att logga in"
- ukr "Доступ заборонено для користувача: '%-.48s'@'%-.64s'"
+ cze "Přístup pro uživatele '%s'@'%s'"
+ dan "Adgang nægtet bruger: '%s'@'%s'"
+ nla "Toegang geweigerd voor gebruiker: '%s'@'%s'"
+ eng "Access denied for user '%s'@'%s'"
+ est "Ligipääs keelatud kasutajale '%s'@'%s'"
+ fre "Accès refusé pour l'utilisateur: '%s'@'%s'"
+ ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung"
+ greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s'"
+ hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres."
+ ita "Accesso non consentito per l'utente: '%s'@'%s'"
+ kor "'%s'@'%s' 사용자는 접근이 거부 되었습니다."
+ nor "Tilgang nektet for bruker: '%s'@'%s'"
+ norwegian-ny "Tilgang ikke tillate for brukar: '%s'@'%s'"
+ por "Acesso negado para o usuário '%s'@'%s'"
+ rum "Acces interzis pentru utilizatorul: '%s'@'%s'"
+ rus "Доступ закрыт для пользователя '%s'@'%s'"
+ serbian "Pristup je zabranjen korisniku '%s'@'%s'"
+ slo "Zakázaný prístup pre užívateľa: '%s'@'%s'"
+ spa "Acceso negado para usuario: '%s'@'%s'"
+ swe "Användare '%s'@'%s' är ej berättigad att logga in"
+ ukr "Доступ заборонено для користувача: '%s'@'%s'"
ER_SET_PASSWORD_AUTH_PLUGIN
eng "SET PASSWORD has no significance for users authenticating via plugins"
@@ -6547,8 +6486,8 @@ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS
ER_TABLE_IN_FK_CHECK
eng "Table is being used in foreign key check."
-ER_UNSUPPORTED_ENGINE
- eng "Storage engine '%s' does not support system tables. [%s.%s]"
+ER_unused_1
+ eng "You should never see it"
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST
eng "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe."
@@ -7078,3 +7017,36 @@ ER_SLAVE_STARTED
eng "SLAVE '%.*s' started"
ER_SLAVE_STOPPED
eng "SLAVE '%.*s' stopped"
+ER_SQL_DISCOVER_ERROR
+ eng "Engine %s failed to discover table %`-.192s.%`-.192s with '%s'"
+ER_FAILED_GTID_STATE_INIT
+ eng "Failed initializing replication GTID state"
+ER_INCORRECT_GTID_STATE
+ eng "Could not parse GTID list for GTID_POS"
+ER_CANNOT_UPDATE_GTID_STATE
+ eng "Could not update replication slave gtid state"
+ER_DUPLICATE_GTID_DOMAIN
+ eng "GTID %u-%u-%llu and %u-%u-%llu conflict (duplicate domain id %u)"
+ER_GTID_OPEN_TABLE_FAILED
+ eng "Failed to open %s.%s"
+ ger "Öffnen von %s.%s fehlgeschlagen"
+ER_GTID_POSITION_NOT_FOUND_IN_BINLOG
+ eng "Connecting slave requested to start from GTID %u-%u-%llu, which is not in the master's binlog"
+ER_CANNOT_LOAD_SLAVE_GTID_STATE
+ eng "Failed to load replication slave GTID position from table %s.%s"
+ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG
+ eng "Specified GTID %u-%u-%llu conflicts with the binary log which contains a more recent GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos."
+ER_MASTER_GTID_POS_MISSING_DOMAIN
+ eng "Specified value for @@gtid_slave_pos contains no value for replication domain %u. This conflicts with the binary log which contains GTID %u-%u-%llu. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos."
+ER_UNTIL_REQUIRES_USING_GTID
+ eng "START SLAVE UNTIL master_gtid_pos requires that slave is using GTID"
+ER_GTID_STRICT_OUT_OF_ORDER
+ eng "An attempt was made to binlog GTID %u-%u-%llu which would create an out-of-order sequence number with existing GTID %u-%u-%llu, and gtid strict mode is enabled."
+ER_GTID_START_FROM_BINLOG_HOLE
+ eng "The binlog on the master is missing the GTID %u-%u-%llu requested by the slave (even though both a prior and a subsequent sequence number does exist), and GTID strict mode is enabled"
+ER_SLAVE_UNEXPECTED_MASTER_SWITCH
+ eng "Unexpected GTID received from master after reconnect. This normally indicates that the master server was replaced without restarting the slave threads. %s"
+ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO
+ eng "Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a transaction"
+ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO
+ eng "Cannot modify @@session.gtid_domain_id or @@session.gtid_seq_no inside a stored function or trigger"
diff --git a/sql/slave.cc b/sql/slave.cc
index 38f41685e6c..d46be570b5e 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -114,7 +114,7 @@ static const char *reconnect_messages[SLAVE_RECON_ACT_MAX][SLAVE_RECON_MSG_MAX]=
registration on master",
"Reconnecting after a failed registration on master",
"failed registering on master, reconnecting to try again, \
-log '%s' at position %s",
+log '%s' at position %s%s",
"COM_REGISTER_SLAVE",
"Slave I/O thread killed during or after reconnect"
},
@@ -122,7 +122,7 @@ log '%s' at position %s",
"Waiting to reconnect after a failed binlog dump request",
"Slave I/O thread killed while retrying master dump",
"Reconnecting after a failed binlog dump request",
- "failed dump request, reconnecting to try again, log '%s' at position %s",
+ "failed dump request, reconnecting to try again, log '%s' at position %s%s",
"COM_BINLOG_DUMP",
"Slave I/O thread killed during or after reconnect"
},
@@ -131,7 +131,7 @@ log '%s' at position %s",
"Slave I/O thread killed while waiting to reconnect after a failed read",
"Reconnecting after a failed master event read",
"Slave I/O thread: Failed reading log event, reconnecting to retry, \
-log '%s' at position %s",
+log '%s' at position %s%s",
"",
"Slave I/O thread killed during or after a reconnect done to recover from \
failed read"
@@ -162,8 +162,41 @@ static int terminate_slave_thread(THD *thd,
volatile uint *slave_running,
bool skip_lock);
static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info);
-static bool send_show_master_info_header(THD *thd, bool full);
-static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full);
+static bool send_show_master_info_header(THD *thd, bool full,
+ size_t gtid_pos_length);
+static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
+ String *gtid_pos);
+/*
+ Function to set the slave's max_allowed_packet based on the value
+ of slave_max_allowed_packet.
+
+ @in_param thd Thread handler for slave
+ @in_param mysql MySQL connection handle
+*/
+
+static void set_slave_max_allowed_packet(THD *thd, MYSQL *mysql)
+{
+ DBUG_ENTER("set_slave_max_allowed_packet");
+ // thd and mysql must be valid
+ DBUG_ASSERT(thd && mysql);
+
+ thd->variables.max_allowed_packet= slave_max_allowed_packet;
+ thd->net.max_packet_size= slave_max_allowed_packet;
+ /*
+ Adding MAX_LOG_EVENT_HEADER_LEN to the max_packet_size on the I/O
+ thread and the mysql->option max_allowed_packet, since a
+ replication event can become this much larger than
+ the corresponding packet (query) sent from client to master.
+ */
+ thd->net.max_packet_size+= MAX_LOG_EVENT_HEADER;
+ /*
+ Skipping the setting of mysql->net.max_packet size to slave
+ max_allowed_packet since this is done during mysql_real_connect.
+ */
+ mysql->options.max_allowed_packet=
+ slave_max_allowed_packet+MAX_LOG_EVENT_HEADER;
+ DBUG_VOID_RETURN;
+}
/*
Find out which replications threads are running
@@ -251,6 +284,67 @@ static void init_slave_psi_keys(void)
}
#endif /* HAVE_PSI_INTERFACE */
+
+static bool slave_init_thread_running;
+
+
+pthread_handler_t
+handle_slave_init(void *arg __attribute__((unused)))
+{
+ THD *thd;
+
+ my_thread_init();
+ thd= new THD;
+ thd->thread_stack= (char*) &thd; /* Set approximate stack start */
+ mysql_mutex_lock(&LOCK_thread_count);
+ thd->thread_id= thread_id++;
+ mysql_mutex_unlock(&LOCK_thread_count);
+ thd->store_globals();
+
+ thd_proc_info(thd, "Loading slave GTID position from table");
+ if (rpl_load_gtid_slave_state(thd))
+ sql_print_warning("Failed to load slave replication state from table "
+ "%s.%s: %u: %s", "mysql",
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
+
+ mysql_mutex_lock(&LOCK_thread_count);
+ delete thd;
+ mysql_mutex_unlock(&LOCK_thread_count);
+ my_thread_end();
+
+ mysql_mutex_lock(&LOCK_thread_count);
+ slave_init_thread_running= false;
+ mysql_cond_signal(&COND_thread_count);
+ mysql_mutex_unlock(&LOCK_thread_count);
+
+ return 0;
+}
+
+
+static int
+run_slave_init_thread()
+{
+ pthread_t th;
+
+ slave_init_thread_running= true;
+ if (mysql_thread_create(key_thread_slave_init, &th, NULL,
+ handle_slave_init, NULL))
+ {
+ sql_print_error("Failed to create thread while initialising slave");
+ return 1;
+ }
+
+ mysql_mutex_lock(&LOCK_thread_count);
+ while (slave_init_thread_running)
+ mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_thread_count);
+
+ return 0;
+}
+
+
/* Initialize slave structures */
int init_slave()
@@ -262,6 +356,9 @@ int init_slave()
init_slave_psi_keys();
#endif
+ if (run_slave_init_thread())
+ return 1;
+
/*
This is called when mysqld starts. Before client connections are
accepted. However bootstrap may conflict with us if it does START SLAVE.
@@ -379,22 +476,20 @@ int init_recovery(Master_info* mi, const char** errmsg)
{
mi->master_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE,
rli->group_master_log_pos);
- strmake(mi->master_log_name, rli->group_master_log_name,
- sizeof(mi->master_log_name)-1);
+ strmake_buf(mi->master_log_name, rli->group_master_log_name);
sql_print_warning("Recovery from master pos %ld and file %s.",
(ulong) mi->master_log_pos, mi->master_log_name);
- strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(mi->rli.event_relay_log_name)-1);
+ strmake_buf(rli->group_relay_log_name, rli->relay_log.get_log_fname());
+ strmake_buf(rli->event_relay_log_name, rli->relay_log.get_log_fname());
rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
}
DBUG_RETURN(0);
}
+
/**
Convert slave skip errors bitmap into a printable string.
@@ -718,7 +813,7 @@ int start_slave_thread(
if (start_lock)
mysql_mutex_lock(start_lock);
- if (!server_id)
+ if (!global_system_variables.server_id)
{
if (start_cond)
mysql_cond_broadcast(start_cond);
@@ -796,6 +891,7 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
mysql_mutex_t *lock_io=0, *lock_sql=0, *lock_cond_io=0, *lock_cond_sql=0;
mysql_cond_t* cond_io=0, *cond_sql=0;
int error=0;
+ const char *errmsg;
DBUG_ENTER("start_slave_threads");
if (need_slave_mutex)
@@ -811,7 +907,41 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
lock_cond_sql = &mi->rli.run_lock;
}
- if (thread_mask & SLAVE_IO)
+ /*
+ If we are using GTID and both SQL and IO threads are stopped, then get
+ rid of all relay logs.
+
+ Relay logs are not very useful when using GTID, except as a buffer
+ between the fetch in the IO thread and the apply in SQL thread. However
+ while one of the threads is running, they are in use and cannot be
+ removed.
+ */
+ if (mi->using_gtid != Master_info::USE_GTID_NO &&
+ !mi->slave_running && !mi->rli.slave_running)
+ {
+ /*
+ purge_relay_logs() clears the mi->rli.group_master_log_pos.
+ So save and restore them, like we do in CHANGE MASTER.
+ (We are not going to use them for GTID, but it might be worth to
+ keep them in case connection with GTID fails and user wants to go
+ back and continue with previous old-style replication coordinates).
+ */
+ mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE,
+ mi->rli.group_master_log_pos);
+ strmake(mi->master_log_name, mi->rli.group_master_log_name,
+ sizeof(mi->master_log_name)-1);
+ purge_relay_logs(&mi->rli, NULL, 0, &errmsg);
+ mi->rli.group_master_log_pos= mi->master_log_pos;
+ strmake(mi->rli.group_master_log_name, mi->master_log_name,
+ sizeof(mi->rli.group_master_log_name)-1);
+
+ error= rpl_load_gtid_state(&mi->gtid_current_pos, mi->using_gtid ==
+ Master_info::USE_GTID_CURRENT_POS);
+ mi->events_queued_since_last_gtid= 0;
+ mi->gtid_reconnect_event_skip_count= 0;
+ }
+
+ if (!error && (thread_mask & SLAVE_IO))
error= start_slave_thread(
#ifdef HAVE_PSI_INTERFACE
key_thread_slave_io,
@@ -864,6 +994,7 @@ void end_slave()
master_info_index= 0;
active_mi= 0;
mysql_mutex_unlock(&LOCK_active_mi);
+ free_all_rpl_filters();
DBUG_VOID_RETURN;
}
@@ -1351,6 +1482,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
unavailable (very old master not supporting UNIX_TIMESTAMP()?).
*/
+#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("dbug.before_get_UNIX_TIMESTAMP",
{
const char act[]=
@@ -1360,6 +1492,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
+#endif
master_res= NULL;
if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) &&
@@ -1401,6 +1534,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
Note: we could have put a @@SERVER_ID in the previous SELECT
UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters.
*/
+#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("dbug.before_get_SERVER_ID",
{
const char act[]=
@@ -1410,6 +1544,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
+#endif
master_res= NULL;
master_row= NULL;
if (!mysql_real_query(mysql,
@@ -1417,7 +1552,8 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
- if ((::server_id == (mi->master_id= strtoul(master_row[1], 0, 10))) &&
+ if ((global_system_variables.server_id ==
+ (mi->master_id= strtoul(master_row[1], 0, 10))) &&
!mi->rli.replicate_same_server_id)
{
errmsg= "The slave I/O thread stops because master and slave have equal \
@@ -1797,6 +1933,194 @@ past_checksum:
after_set_capability:
#endif
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ /* Request dump to start from slave replication GTID state. */
+ int rc;
+ char str_buf[256];
+ String query_str(str_buf, sizeof(str_buf), system_charset_info);
+ query_str.length(0);
+
+ /*
+ Read the master @@GLOBAL.gtid_domain_id variable.
+ This is mostly to check that master is GTID aware, but we could later
+ perhaps use it to check that different multi-source masters are correctly
+ configured with distinct domain_id.
+ */
+ if (mysql_real_query(mysql,
+ STRING_WITH_LEN("SELECT @@GLOBAL.gtid_domain_id")) ||
+ !(master_res= mysql_store_result(mysql)) ||
+ !(master_row= mysql_fetch_row(master_res)))
+ {
+ err_code= mysql_errno(mysql);
+ errmsg= "The slave I/O thread stops because master does not support "
+ "MariaDB global transaction id. A fatal error is encountered when "
+ "it tries to SELECT @@GLOBAL.gtid_domain_id.";
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ goto err;
+ }
+ mysql_free_result(master_res);
+ master_res= NULL;
+
+ query_str.append(STRING_WITH_LEN("SET @slave_connect_state='"),
+ system_charset_info);
+ if (mi->gtid_current_pos.append_to_string(&query_str))
+ {
+ err_code= ER_OUTOFMEMORY;
+ errmsg= "The slave I/O thread stops because a fatal out-of-memory "
+ "error is encountered when it tries to compute @slave_connect_state.";
+ sprintf(err_buff, "%s Error: Out of memory", errmsg);
+ goto err;
+ }
+ query_str.append(STRING_WITH_LEN("'"), system_charset_info);
+
+ rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
+ if (rc)
+ {
+ err_code= mysql_errno(mysql);
+ if (is_network_error(err_code))
+ {
+ mi->report(ERROR_LEVEL, err_code,
+ "Setting @slave_connect_state failed with error: %s",
+ mysql_error(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /* Fatal error */
+ errmsg= "The slave I/O thread stops because a fatal error is "
+ "encountered when it tries to set @slave_connect_state.";
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ goto err;
+ }
+ }
+
+ query_str.length(0);
+ if (query_str.append(STRING_WITH_LEN("SET @slave_gtid_strict_mode="),
+ system_charset_info) ||
+ query_str.append_ulonglong(opt_gtid_strict_mode != false))
+ {
+ err_code= ER_OUTOFMEMORY;
+ errmsg= "The slave I/O thread stops because a fatal out-of-memory "
+ "error is encountered when it tries to set @slave_gtid_strict_mode.";
+ sprintf(err_buff, "%s Error: Out of memory", errmsg);
+ goto err;
+ }
+
+ rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
+ if (rc)
+ {
+ err_code= mysql_errno(mysql);
+ if (is_network_error(err_code))
+ {
+ mi->report(ERROR_LEVEL, err_code,
+ "Setting @slave_gtid_strict_mode failed with error: %s",
+ mysql_error(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /* Fatal error */
+ errmsg= "The slave I/O thread stops because a fatal error is "
+ "encountered when it tries to set @slave_gtid_strict_mode.";
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ goto err;
+ }
+ }
+
+ if (mi->rli.until_condition == Relay_log_info::UNTIL_GTID)
+ {
+ query_str.length(0);
+ query_str.append(STRING_WITH_LEN("SET @slave_until_gtid='"),
+ system_charset_info);
+ if (mi->rli.until_gtid_pos.append_to_string(&query_str))
+ {
+ err_code= ER_OUTOFMEMORY;
+ errmsg= "The slave I/O thread stops because a fatal out-of-memory "
+ "error is encountered when it tries to compute @slave_until_gtid.";
+ sprintf(err_buff, "%s Error: Out of memory", errmsg);
+ goto err;
+ }
+ query_str.append(STRING_WITH_LEN("'"), system_charset_info);
+
+ rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
+ if (rc)
+ {
+ err_code= mysql_errno(mysql);
+ if (is_network_error(err_code))
+ {
+ mi->report(ERROR_LEVEL, err_code,
+ "Setting @slave_until_gtid failed with error: %s",
+ mysql_error(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /* Fatal error */
+ errmsg= "The slave I/O thread stops because a fatal error is "
+ "encountered when it tries to set @slave_until_gtid.";
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ goto err;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ If we are not using GTID to connect this time, then instead request
+ the corresponding GTID position from the master, so that the user
+ can reconnect the next time using MASTER_GTID_POS=AUTO.
+ */
+ char quote_buf[2*sizeof(mi->master_log_name)+1];
+ char str_buf[28+2*sizeof(mi->master_log_name)+10];
+ String query(str_buf, sizeof(str_buf), system_charset_info);
+ query.length(0);
+
+ query.append("SELECT binlog_gtid_pos('");
+ escape_quotes_for_mysql(&my_charset_bin, quote_buf, sizeof(quote_buf),
+ mi->master_log_name, strlen(mi->master_log_name));
+ query.append(quote_buf);
+ query.append("',");
+ query.append_ulonglong(mi->master_log_pos);
+ query.append(")");
+
+ if (!mysql_real_query(mysql, query.c_ptr_safe(), query.length()) &&
+ (master_res= mysql_store_result(mysql)) &&
+ (master_row= mysql_fetch_row(master_res)) &&
+ (master_row[0] != NULL))
+ {
+ rpl_global_gtid_slave_state.load(mi->io_thd, master_row[0],
+ strlen(master_row[0]), false, false);
+ }
+ else if (check_io_slave_killed(mi->io_thd, mi, NULL))
+ goto slave_killed_err;
+ else if (is_network_error(mysql_errno(mysql)))
+ {
+ mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ "Get master GTID position failed with error: %s", mysql_error(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /*
+ ToDo: If the master does not have the binlog_gtid_pos() function, it
+ just means that it is an old master with no GTID support, so we should
+ do nothing.
+
+ However, if binlog_gtid_pos() exists, but fails or returns NULL, then
+ it means that the requested position is not valid. We could use this
+ to catch attempts to replicate from within the middle of an event,
+ avoiding strange failures or possible corruption.
+ */
+ }
+ if (master_res)
+ {
+ mysql_free_result(master_res);
+ master_res= NULL;
+ }
+ }
+
err:
if (errmsg)
{
@@ -1990,7 +2314,7 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi,
DBUG_RETURN(0);
}
- int4store(pos, server_id); pos+= 4;
+ int4store(pos, global_system_variables.server_id); pos+= 4;
pos= net_store_data(pos, (uchar*) report_host, report_host_len);
pos= net_store_data(pos, (uchar*) report_user, report_user_len);
pos= net_store_data(pos, (uchar*) report_password, report_password_len);
@@ -2039,16 +2363,20 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi,
bool show_master_info(THD *thd, Master_info *mi, bool full)
{
DBUG_ENTER("show_master_info");
+ String gtid_pos;
- if (send_show_master_info_header(thd, full))
+ if (full && rpl_global_gtid_slave_state.tostring(&gtid_pos, NULL, 0))
DBUG_RETURN(TRUE);
- if (send_show_master_info_data(thd, mi, full))
+ if (send_show_master_info_header(thd, full, gtid_pos.length()))
+ DBUG_RETURN(TRUE);
+ if (send_show_master_info_data(thd, mi, full, &gtid_pos))
DBUG_RETURN(TRUE);
my_eof(thd);
DBUG_RETURN(FALSE);
}
-static bool send_show_master_info_header(THD *thd, bool full)
+static bool send_show_master_info_header(THD *thd, bool full,
+ size_t gtid_pos_length)
{
List<Item> field_list;
Protocol *protocol= thd->protocol;
@@ -2131,6 +2459,8 @@ static bool send_show_master_info_header(THD *thd, bool full)
sizeof(mi->ssl_crl)));
field_list.push_back(new Item_empty_string("Master_SSL_Crlpath",
sizeof(mi->ssl_crlpath)));
+ field_list.push_back(new Item_empty_string("Using_Gtid",
+ sizeof("Current_Pos")-1));
if (full)
{
field_list.push_back(new Item_return_int("Retried_transactions",
@@ -2143,6 +2473,8 @@ static bool send_show_master_info_header(THD *thd, bool full)
10, MYSQL_TYPE_LONG));
field_list.push_back(new Item_float("Slave_heartbeat_period",
0.0, 3, 10));
+ field_list.push_back(new Item_empty_string("Gtid_Slave_Pos",
+ gtid_pos_length));
}
if (protocol->send_result_set_metadata(&field_list,
@@ -2152,7 +2484,8 @@ static bool send_show_master_info_header(THD *thd, bool full)
}
-static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
+static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
+ String *gtid_pos)
{
DBUG_ENTER("send_show_master_info_data");
@@ -2161,6 +2494,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
DBUG_PRINT("info",("host is set: '%s'", mi->host));
String *packet= &thd->packet;
Protocol *protocol= thd->protocol;
+ Rpl_filter *rpl_filter= mi->rpl_filter;
char buf[256];
String tmp(buf, sizeof(buf), &my_charset_bin);
@@ -2220,7 +2554,8 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
protocol->store(
mi->rli.until_condition==Relay_log_info::UNTIL_NONE ? "None":
( mi->rli.until_condition==Relay_log_info::UNTIL_MASTER_POS? "Master":
- "Relay"), &my_charset_bin);
+ ( mi->rli.until_condition==Relay_log_info::UNTIL_RELAY_POS? "Relay":
+ "Gtid")), &my_charset_bin);
protocol->store(mi->rli.until_log_name, &my_charset_bin);
protocol->store((ulonglong) mi->rli.until_log_pos);
@@ -2311,6 +2646,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
protocol->store(mi->ssl_ca, &my_charset_bin);
// Master_Ssl_Crlpath
protocol->store(mi->ssl_capath, &my_charset_bin);
+ protocol->store(mi->using_gtid_astext(mi->using_gtid), &my_charset_bin);
if (full)
{
protocol->store((uint32) mi->rli.retried_trans);
@@ -2318,6 +2654,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
protocol->store((uint32) mi->rli.executed_entries);
protocol->store((uint32) mi->received_heartbeats);
protocol->store((double) mi->heartbeat_period, 3, &tmp);
+ protocol->store(gtid_pos->ptr(), gtid_pos->length(), &my_charset_bin);
}
mysql_mutex_unlock(&mi->rli.err_lock);
@@ -2360,11 +2697,19 @@ static int cmp_mi_by_name(const Master_info **arg1,
bool show_all_master_info(THD* thd)
{
uint i, elements;
+ String gtid_pos;
Master_info **tmp;
DBUG_ENTER("show_master_info");
mysql_mutex_assert_owner(&LOCK_active_mi);
- if (send_show_master_info_header(thd, 1))
+ gtid_pos.length(0);
+ if (rpl_append_gtid_state(&gtid_pos, true))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ if (send_show_master_info_header(thd, 1, gtid_pos.length()))
DBUG_RETURN(TRUE);
if (!(elements= master_info_index->master_info_hash.records))
@@ -2386,7 +2731,7 @@ bool show_all_master_info(THD* thd)
for (i= 0; i < elements; i++)
{
- if (send_show_master_info_data(thd, tmp[i], 1))
+ if (send_show_master_info_data(thd, tmp[i], 1, &gtid_pos))
DBUG_RETURN(TRUE);
}
@@ -2465,12 +2810,6 @@ static int init_slave_thread(THD* thd, Master_info *mi,
thd->system_thread = (thd_type == SLAVE_THD_SQL) ?
SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
thd->security_ctx->skip_grants();
-/*
- Adding MAX_LOG_EVENT_HEADER_LEN to the max_allowed_packet on all
- slave threads, since a replication event can become this much larger
- than the corresponding packet (query) sent from client to master.
-*/
- thd->variables.max_allowed_packet= slave_max_allowed_packet;
thd->slave_thread= 1;
thd->connection_name= mi->connection_name;
thd->enable_slow_log= opt_log_slow_slave_statements;
@@ -2551,7 +2890,7 @@ static int request_dump(THD *thd, MYSQL* mysql, Master_info* mi,
// TODO if big log files: Change next to int8store()
int4store(buf, (ulong) mi->master_log_pos);
int2store(buf + 4, binlog_flags);
- int4store(buf + 6, server_id);
+ int4store(buf + 6, global_system_variables.server_id);
len = (uint) strlen(logname);
memcpy(buf + 10, logname,len);
if (simple_command(mysql, COM_BINLOG_DUMP, buf, len + 10, 1))
@@ -2760,7 +3099,8 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli)
has a Rotate etc).
*/
- thd->server_id = ev->server_id; // use the original server id for logging
+ /* Use the original server id for logging. */
+ thd->variables.server_id = ev->server_id;
thd->set_time(); // time the query
thd->lex->current_select= 0;
if (!ev->when)
@@ -2906,7 +3246,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
This tests if the position of the beginning of the current event
hits the UNTIL barrier.
*/
- if (rli->until_condition != Relay_log_info::UNTIL_NONE &&
+ if ((rli->until_condition == Relay_log_info::UNTIL_MASTER_POS ||
+ rli->until_condition == Relay_log_info::UNTIL_RELAY_POS) &&
rli->is_until_satisfied(thd, ev))
{
char buf[22];
@@ -3131,8 +3472,22 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi,
if (!suppress_warnings)
{
char buf[256], llbuff[22];
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN("; GTID position '"));
+ mi->gtid_current_pos.append_to_string(&tmp);
+ if (mi->events_queued_since_last_gtid == 0)
+ tmp.append(STRING_WITH_LEN("'"));
+ else
+ {
+ tmp.append(STRING_WITH_LEN("', GTID event skip "));
+ tmp.append_ulonglong((ulonglong)mi->events_queued_since_last_gtid);
+ }
+ }
my_snprintf(buf, sizeof(buf), messages[SLAVE_RECON_MSG_FAILED],
- IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff));
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff),
+ tmp.c_ptr_safe());
/*
Raise a warining during registering on master/requesting dump.
Log a message reading event.
@@ -3224,6 +3579,23 @@ pthread_handler_t handle_slave_io(void *arg)
/* This must be called before run any binlog_relay_io hooks */
my_pthread_setspecific_ptr(RPL_MASTER_INFO, mi);
+ /* Load the set of seen GTIDs, if we did not already. */
+ if (rpl_load_gtid_slave_state(thd))
+ {
+ mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ "Unable to load replication GTID slave state from mysql.%s: %s",
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->message());
+ /*
+ If we are using old-style replication, we can continue, even though we
+ then will not be able to record the GTIDs we receive. But if using GTID,
+ we must give up.
+ */
+ if (mi->using_gtid != Master_info::USE_GTID_NO || opt_gtid_strict_mode)
+ goto err;
+ }
+
+
if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi)))
{
mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
@@ -3242,18 +3614,20 @@ pthread_handler_t handle_slave_io(void *arg)
// we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi))
{
- sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
- "replication started in log '%s' at position %s",
- mi->user, mi->host, mi->port,
- IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
- /*
- Adding MAX_LOG_EVENT_HEADER_LEN to the max_packet_size on the I/O
- thread, since a replication event can become this much larger than
- the corresponding packet (query) sent from client to master.
- */
- thd->net.max_packet_size= slave_max_allowed_packet;
- mysql->net.max_packet_size= thd->net.max_packet_size+= MAX_LOG_EVENT_HEADER;
+ if (mi->using_gtid == Master_info::USE_GTID_NO)
+ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
+ "replication started in log '%s' at position %s",
+ mi->user, mi->host, mi->port,
+ IO_RPL_LOG_NAME,
+ llstr(mi->master_log_pos,llbuff));
+ else
+ {
+ String tmp;
+ mi->gtid_current_pos.to_string(&tmp);
+ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
+ "replication starts at GTID position '%s'",
+ mi->user, mi->host, mi->port, tmp.c_ptr_safe());
+ }
}
else
{
@@ -3263,6 +3637,26 @@ pthread_handler_t handle_slave_io(void *arg)
connected:
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ /*
+ When the IO thread (re)connects to the master using GTID, it will
+ connect at the start of an event group. But the IO thread may have
+ previously logged part of the following event group to the relay
+ log.
+
+ When the IO and SQL thread are started together, we erase any previous
+ relay logs, but this is not possible/desirable while the SQL thread is
+ running. To avoid duplicating partial event groups in the relay logs in
+ this case, we remember the count of events in any partially logged event
+ group before the reconnect, and then here at connect we set up a counter
+ to skip the already-logged part of the group.
+ */
+ mi->gtid_reconnect_event_skip_count= mi->events_queued_since_last_gtid;
+ mi->gtid_event_seen= false;
+ }
+
+#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("dbug.before_get_running_status_yes",
{
const char act[]=
@@ -3272,6 +3666,7 @@ connected:
DBUG_ASSERT(!debug_sync_set_action(thd,
STRING_WITH_LEN(act)));
};);
+#endif
// TODO: the assignment below should be under mutex (5.0)
mi->slave_running= MYSQL_SLAVE_RUN_CONNECT;
@@ -3486,8 +3881,19 @@ log space");
// error = 0;
err:
// print the current replication position
- sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
- IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
+ if (mi->using_gtid == Master_info::USE_GTID_NO)
+ sql_print_information("Slave I/O thread exiting, read up to log '%s', "
+ "position %s",
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
+ else
+ {
+ String tmp;
+ mi->gtid_current_pos.to_string(&tmp);
+ sql_print_information("Slave I/O thread exiting, read up to log '%s', "
+ "position %s; GTID position %s",
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff),
+ tmp.c_ptr_safe());
+ }
RUN_HOOK(binlog_relay_io, thread_stop, (thd, mi));
thd->reset_query();
thd->reset_db(NULL, 0);
@@ -3542,17 +3948,33 @@ err_during_init:
/*
Check the temporary directory used by commands like
LOAD DATA INFILE.
+
+ As the directory never changes during a mysqld run, we only
+ test this once and cache the result. This also resolve a race condition
+ when this can be run by multiple threads at the same time.
*/
+
+static bool check_temp_dir_run= 0;
+static int check_temp_dir_result= 0;
+
static
int check_temp_dir(char* tmp_file)
{
- int fd;
+ File fd;
+ int result= 1; // Assume failure
MY_DIR *dirp;
char tmp_dir[FN_REFLEN];
size_t tmp_dir_size;
-
DBUG_ENTER("check_temp_dir");
+ mysql_mutex_lock(&LOCK_thread_count);
+ if (check_temp_dir_run)
+ {
+ result= check_temp_dir_result;
+ goto end;
+ }
+ check_temp_dir_run= 1;
+
/*
Get the directory from the temporary file.
*/
@@ -3562,27 +3984,33 @@ int check_temp_dir(char* tmp_file)
Check if the directory exists.
*/
if (!(dirp=my_dir(tmp_dir,MYF(MY_WME))))
- DBUG_RETURN(1);
+ goto end;
my_dirend(dirp);
/*
- Check permissions to create a file.
+ Check permissions to create a file. We use O_TRUNC to ensure that
+ things works even if we happen to have and old file laying around.
*/
if ((fd= mysql_file_create(key_file_misc,
tmp_file, CREATE_MODE,
- O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
+ O_WRONLY | O_BINARY | O_TRUNC | O_NOFOLLOW,
MYF(MY_WME))) < 0)
- DBUG_RETURN(1);
+ goto end;
+ result= 0; // Directory name ok
/*
Clean up.
*/
mysql_file_close(fd, MYF(0));
mysql_file_delete(key_file_misc, tmp_file, MYF(0));
- DBUG_RETURN(0);
+end:
+ check_temp_dir_result= result;
+ mysql_mutex_unlock(&LOCK_thread_count);
+ DBUG_RETURN(result);
}
+
/**
Slave SQL thread entry point.
@@ -3613,6 +4041,7 @@ pthread_handler_t handle_slave_sql(void *arg)
thd = new THD; // note that contructor of THD uses DBUG_ !
thd->thread_stack = (char*)&thd; // remember where our stack is
+ thd->rpl_filter = mi->rpl_filter;
DBUG_ASSERT(rli->inited);
DBUG_ASSERT(rli->mi == mi);
@@ -3643,7 +4072,7 @@ pthread_handler_t handle_slave_sql(void *arg)
}
thd->init_for_queries();
thd->rli_slave= rli;
- if ((rli->deferred_events_collecting= rpl_filter->is_on()))
+ if ((rli->deferred_events_collecting= mi->rpl_filter->is_on()))
{
rli->deferred_events= new Deferred_log_events(rli);
}
@@ -3731,10 +4160,20 @@ pthread_handler_t handle_slave_sql(void *arg)
rli->group_master_log_name,
llstr(rli->group_master_log_pos,llbuff)));
if (global_system_variables.log_warnings)
+ {
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN("; GTID position '"));
+ rpl_append_gtid_state(&tmp,
+ mi->using_gtid==Master_info::USE_GTID_CURRENT_POS);
+ tmp.append(STRING_WITH_LEN("'"));
+ }
sql_print_information("Slave SQL thread initialized, starting replication in \
-log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
+log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
- llstr(rli->group_relay_log_pos,llbuff1));
+ llstr(rli->group_relay_log_pos,llbuff1), tmp.c_ptr_safe());
+ }
if (check_temp_dir(rli->slave_patternload_file))
{
@@ -3744,6 +4183,22 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
goto err;
}
+ /* Load the set of seen GTIDs, if we did not already. */
+ if (rpl_load_gtid_slave_state(thd))
+ {
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ "Unable to load replication GTID slave state from mysql.%s: %s",
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->message());
+ /*
+ If we are using old-style replication, we can continue, even though we
+ then will not be able to record the GTIDs we receive. But if using GTID,
+ we must give up.
+ */
+ if (mi->using_gtid != Master_info::USE_GTID_NO || opt_gtid_strict_mode)
+ goto err;
+ }
+
/* execute init_slave variable */
if (opt_init_slave.length)
{
@@ -3763,13 +4218,14 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
mysql_mutex_lock(&rli->data_lock);
if (rli->slave_skip_counter)
{
- strmake(saved_log_name, rli->group_relay_log_name, FN_REFLEN - 1);
- strmake(saved_master_log_name, rli->group_master_log_name, FN_REFLEN - 1);
+ strmake_buf(saved_log_name, rli->group_relay_log_name);
+ strmake_buf(saved_master_log_name, rli->group_master_log_name);
saved_log_pos= rli->group_relay_log_pos;
saved_master_log_pos= rli->group_master_log_pos;
saved_skip= rli->slave_skip_counter;
}
- if (rli->until_condition != Relay_log_info::UNTIL_NONE &&
+ if ((rli->until_condition == Relay_log_info::UNTIL_MASTER_POS ||
+ rli->until_condition == Relay_log_info::UNTIL_RELAY_POS) &&
rli->is_until_satisfied(thd, NULL))
{
char buf[22];
@@ -3859,16 +4315,35 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
sql_print_warning("Slave: %s Error_code: %d", err->get_message_text(), err->get_sql_errno());
}
if (udf_error)
+ {
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN("; GTID position '"));
+ rpl_append_gtid_state(&tmp, false);
+ tmp.append(STRING_WITH_LEN("'"));
+ }
sql_print_error("Error loading user-defined library, slave SQL "
"thread aborted. Install the missing library, and restart the "
"slave SQL thread with \"SLAVE START\". We stopped at log '%s' "
- "position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,
- llbuff));
+ "position %s%s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,
+ llbuff), tmp.c_ptr_safe());
+ }
else
+ {
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN("; GTID position '"));
+ rpl_append_gtid_state(&tmp, false);
+ tmp.append(STRING_WITH_LEN("'"));
+ }
sql_print_error("\
Error running query, slave SQL thread aborted. Fix the problem, and restart \
the slave SQL thread with \"SLAVE START\". We stopped at log \
-'%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff));
+'%s' position %s%s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff),
+ tmp.c_ptr_safe());
+ }
}
goto err;
}
@@ -3876,9 +4351,20 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
}
/* Thread stopped. Print the current replication position to the log */
- sql_print_information("Slave SQL thread exiting, replication stopped in log "
- "'%s' at position %s",
- RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
+ {
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN("; GTID position '"));
+ rpl_append_gtid_state(&tmp, false);
+ tmp.append(STRING_WITH_LEN("'"));
+ }
+ sql_print_information("Slave SQL thread exiting, replication stopped in "
+ "log '%s' at position %s%s",
+ RPL_LOG_NAME,
+ llstr(rli->group_master_log_pos,llbuff),
+ tmp.c_ptr_safe());
+ }
err:
@@ -3963,14 +4449,14 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
if (unlikely(!cev->is_valid()))
DBUG_RETURN(1);
- if (!rpl_filter->db_ok(cev->db))
+ if (!mi->rpl_filter->db_ok(cev->db))
{
skip_load_data_infile(net);
DBUG_RETURN(0);
}
DBUG_ASSERT(cev->inited_from_old);
thd->file_id = cev->file_id = mi->file_id++;
- thd->server_id = cev->server_id;
+ thd->variables.server_id = cev->server_id;
cev_not_written = 1;
if (unlikely(net_request_file(net,cev->fname)))
@@ -4353,6 +4839,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
mysql_mutex_t *log_lock= rli->relay_log.get_log_lock();
ulong s_id;
bool unlock_data_lock= TRUE;
+ bool gtid_skip_enqueue= false;
+
/*
FD_q must have been prepared for the first R_a event
inside get_master_version_and_clock()
@@ -4540,6 +5028,19 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
mi->rli.relay_log.relay_log_checksum_alg= tmp->checksum_alg;
/*
+ Do not queue any format description event that we receive after a
+ reconnect where we are skipping over a partial event group received
+ before the reconnect.
+
+ (If we queued such an event, and it was the first format_description
+ event after master restart, the slave SQL thread would think that
+ the partial event group before it in the relay log was from a
+ previous master crash and should be rolled back).
+ */
+ if (unlikely(mi->gtid_reconnect_event_skip_count && !mi->gtid_event_seen))
+ gtid_skip_enqueue= true;
+
+ /*
Though this does some conversion to the slave's format, this will
preserve the master's binlog format version, and number of event types.
*/
@@ -4582,16 +5083,18 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
Heartbeat is sent only after an event corresponding to the corrdinates
the heartbeat carries.
- Slave can not have a difference in coordinates except in the only
+ Slave can not have a higher coordinate except in the only
special case when mi->master_log_name, master_log_pos have never
been updated by Rotate event i.e when slave does not have any history
with the master (and thereafter mi->master_log_pos is NULL).
+ Slave can have lower coordinates, if some event from master was omitted.
+
TODO: handling `when' for SHOW SLAVE STATUS' snds behind
*/
if ((memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len())
&& mi->master_log_name != NULL)
- || mi->master_log_pos != hb.log_pos)
+ || mi->master_log_pos > hb.log_pos)
{
/* missed events of heartbeat from the past */
error= ER_SLAVE_HEARTBEAT_FAILURE;
@@ -4607,7 +5110,140 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
}
break;
+ case GTID_LIST_EVENT:
+ {
+ const char *errmsg;
+ Gtid_list_log_event *glev;
+ Log_event *tmp;
+ uint32 flags;
+
+ if (!(tmp= Log_event::read_log_event(buf, event_len, &errmsg,
+ mi->rli.relay_log.description_event_for_queue,
+ opt_slave_sql_verify_checksum)))
+ {
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
+ goto err;
+ }
+ glev= static_cast<Gtid_list_log_event *>(tmp);
+ event_pos= glev->log_pos;
+ flags= glev->gl_flags;
+ delete glev;
+
+ /*
+ We use fake Gtid_list events to update the old-style position (among
+ other things).
+
+ Early code created fake Gtid_list events with zero log_pos, those should
+ not modify old-style position.
+ */
+ if (event_pos == 0 || event_pos <= mi->master_log_pos)
+ inc_pos= 0;
+ else
+ inc_pos= event_pos - mi->master_log_pos;
+
+ if (mi->rli.until_condition == Relay_log_info::UNTIL_GTID &&
+ flags & Gtid_list_log_event::FLAG_UNTIL_REACHED)
+ {
+ char str_buf[128];
+ String str(str_buf, sizeof(str_buf), system_charset_info);
+ mi->rli.until_gtid_pos.to_string(&str);
+ sql_print_information("Slave IO thread stops because it reached its"
+ " UNTIL master_gtid_pos %s", str.c_ptr_safe());
+ mi->abort_slave= true;
+ }
+ }
+ break;
+
+ case GTID_EVENT:
+ {
+ uchar dummy_flag;
+
+ if (mi->using_gtid == Master_info::USE_GTID_NO)
+ goto default_action;
+ if (unlikely(!mi->gtid_event_seen))
+ {
+ mi->gtid_event_seen= true;
+ if (mi->gtid_reconnect_event_skip_count)
+ {
+ rpl_gtid gtid;
+
+ /*
+ If we are reconnecting, and we need to skip a partial event group
+ already queued to the relay log before the reconnect, then we check
+ that we actually get the same event group (same GTID) as before, so
+ we do not end up with half of one group and half another.
+
+ The only way we should be able to receive a different GTID than what
+ we expect is if the binlog on the master (or more likely the whole
+ master server) was replaced with a different one, one the same IP
+ address, _and_ the new master happens to have domains in a different
+ order so we get the GTID from a different domain first. Still, it is
+ best to protect against this case.
+ */
+ if (Gtid_log_event::peek(buf, event_len, checksum_alg,
+ &gtid.domain_id, &gtid.server_id,
+ &gtid.seq_no, &dummy_flag))
+ {
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
+ goto err;
+ }
+ if (gtid.domain_id != mi->last_queued_gtid.domain_id ||
+ gtid.server_id != mi->last_queued_gtid.server_id ||
+ gtid.seq_no != mi->last_queued_gtid.seq_no)
+ {
+ bool first;
+ error= ER_SLAVE_UNEXPECTED_MASTER_SWITCH;
+ error_msg.append(STRING_WITH_LEN("Expected: "));
+ first= true;
+ rpl_slave_state_tostring_helper(&error_msg, &mi->last_queued_gtid,
+ &first);
+ error_msg.append(STRING_WITH_LEN(", received: "));
+ first= true;
+ rpl_slave_state_tostring_helper(&error_msg, &gtid, &first);
+ goto err;
+ }
+ }
+ }
+
+ if (unlikely(mi->gtid_reconnect_event_skip_count))
+ {
+ goto default_action;
+ }
+
+ /*
+ We have successfully queued to relay log everything before this GTID, so
+ in case of reconnect we can start from after any previous GTID.
+ */
+ if (mi->events_queued_since_last_gtid)
+ {
+ mi->gtid_current_pos.update(&mi->last_queued_gtid);
+ mi->events_queued_since_last_gtid= 0;
+ }
+ if (Gtid_log_event::peek(buf, event_len, checksum_alg,
+ &mi->last_queued_gtid.domain_id,
+ &mi->last_queued_gtid.server_id,
+ &mi->last_queued_gtid.seq_no, &dummy_flag))
+ {
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
+ goto err;
+ }
+ ++mi->events_queued_since_last_gtid;
+ }
+ break;
+
default:
+ default_action:
+ if (mi->using_gtid != Master_info::USE_GTID_NO && mi->gtid_event_seen)
+ {
+ if (unlikely(mi->gtid_reconnect_event_skip_count))
+ {
+ --mi->gtid_reconnect_event_skip_count;
+ gtid_skip_enqueue= true;
+ }
+ else if (mi->events_queued_since_last_gtid)
+ ++mi->events_queued_since_last_gtid;
+ }
+
inc_pos= event_len;
break;
}
@@ -4643,7 +5279,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
mysql_mutex_lock(log_lock);
s_id= uint4korr(buf + SERVER_ID_OFFSET);
- if ((s_id == ::server_id && !mi->rli.replicate_same_server_id) ||
+ if ((s_id == global_system_variables.server_id &&
+ !mi->rli.replicate_same_server_id) ||
/*
the following conjunction deals with IGNORE_SERVER_IDS, if set
If the master is on the ignore list, execution of
@@ -4674,7 +5311,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
IGNORE_SERVER_IDS it increments mi->master_log_pos
as well as rli->group_relay_log_pos.
*/
- if (!(s_id == ::server_id && !mi->rli.replicate_same_server_id) ||
+ if (!(s_id == global_system_variables.server_id &&
+ !mi->rli.replicate_same_server_id) ||
(buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT &&
buf[EVENT_TYPE_OFFSET] != STOP_EVENT))
@@ -4690,8 +5328,16 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
}
else
{
- /* write the event to the relay log */
- if (likely(!(rli->relay_log.appendv(buf,event_len,0))))
+ /*
+ Write the event to the relay log, unless we reconnected in the middle
+ of an event group and now need to skip the initial part of the group that
+ we already wrote before reconnecting.
+ */
+ if (unlikely(gtid_skip_enqueue))
+ {
+ mi->master_log_pos+= inc_pos;
+ }
+ else if (likely(!(rli->relay_log.appendv(buf,event_len,0))))
{
mi->master_log_pos+= inc_pos;
DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
@@ -4815,8 +5461,9 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi,
int last_errno= -2; // impossible error
ulong err_count=0;
char llbuff[22];
+ my_bool my_true= 1;
DBUG_ENTER("connect_to_master");
-
+ set_slave_max_allowed_packet(thd, mysql);
#ifndef DBUG_OFF
mi->events_till_disconnect = disconnect_slave_event_count;
#endif
@@ -4826,6 +5473,8 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi,
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
+ mysql_options(mysql, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY,
+ (char*) &my_true);
#ifdef HAVE_OPENSSL
if (mi->ssl)
@@ -4945,14 +5594,15 @@ MYSQL *rpl_connect_master(MYSQL *mysql)
{
THD *thd= current_thd;
Master_info *mi= my_pthread_getspecific_ptr(Master_info*, RPL_MASTER_INFO);
+ bool allocated= false;
+ my_bool my_true= 1;
+
if (!mi)
{
sql_print_error("'rpl_connect_master' must be called in slave I/O thread context.");
return NULL;
}
- bool allocated= false;
-
if (!mysql)
{
if(!(mysql= mysql_init(NULL)))
@@ -4972,6 +5622,8 @@ MYSQL *rpl_connect_master(MYSQL *mysql)
*/
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
+ mysql_options(mysql, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY,
+ (char*) &my_true);
#ifdef HAVE_OPENSSL
if (mi->ssl)
@@ -5212,6 +5864,27 @@ static Log_event* next_event(Relay_log_info* rli)
inc_event_relay_log_pos()
*/
rli->future_event_relay_log_pos= my_b_tell(cur_log);
+ /*
+ For GTID, allocate a new sub_id for the given domain_id.
+ The sub_id must be allocated in increasing order of binlog order.
+ */
+ if (ev->get_type_code() == GTID_EVENT)
+ {
+ Gtid_log_event *gev= static_cast<Gtid_log_event *>(ev);
+ uint64 sub_id= rpl_global_gtid_slave_state.next_sub_id(gev->domain_id);
+ if (!sub_id)
+ {
+ errmsg = "slave SQL thread aborted because of out-of-memory error";
+ if (hot_log)
+ mysql_mutex_unlock(log_lock);
+ goto err;
+ }
+ rli->gtid_sub_id= sub_id;
+ rli->current_gtid.server_id= gev->server_id;
+ rli->current_gtid.domain_id= gev->domain_id;
+ rli->current_gtid.seq_no= gev->seq_no;
+ }
+
if (hot_log)
mysql_mutex_unlock(log_lock);
DBUG_RETURN(ev);
@@ -5402,8 +6075,7 @@ static Log_event* next_event(Relay_log_info* rli)
goto err;
}
rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
+ strmake_buf(rli->event_relay_log_name,rli->linfo.log_file_name);
flush_relay_log_info(rli);
}
diff --git a/sql/sp.cc b/sql/sp.cc
index 32998e42df9..c1c162267a8 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -114,7 +114,7 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] =
},
{
{ C_STRING_WITH_LEN("definer") },
- { C_STRING_WITH_LEN("char(77)") },
+ { C_STRING_WITH_LEN("char(") },
{ C_STRING_WITH_LEN("utf8") }
},
{
@@ -168,7 +168,7 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] =
};
static const TABLE_FIELD_DEF
- proc_table_def= {MYSQL_PROC_FIELD_COUNT, proc_table_fields};
+proc_table_def= {MYSQL_PROC_FIELD_COUNT, proc_table_fields, 0, (uint*) 0 };
/*************************************************************************/
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 045051c0b14..cb689735925 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -41,6 +41,7 @@
#include "sql_parse.h" // cleanup_items
#include "sql_base.h" // close_thread_tables
#include "transaction.h" // trans_commit_stmt
+#include "sql_audit.h"
/*
Sufficient max length of printed destinations and frame offsets (all uints).
@@ -1320,6 +1321,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
Will write this SP statement into binlog separately.
TODO: consider changing the condition to "not inside event union".
*/
+ MEM_ROOT *user_var_events_alloc_saved= thd->user_var_events_alloc;
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
thd->user_var_events_alloc= thd->mem_root;
@@ -1335,7 +1337,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
{
reset_dynamic(&thd->user_var_events);
- thd->user_var_events_alloc= NULL;//DEBUG
+ thd->user_var_events_alloc= user_var_events_alloc_saved;
}
/* we should cleanup free_list and memroot, used by instruction */
@@ -2359,7 +2361,7 @@ sp_head::fill_field_definition(THD *thd, LEX *lex,
lex->charset ? lex->charset :
thd->variables.collation_database,
lex->uint_geom_type,
- lex->vcol_info, NULL))
+ lex->vcol_info, NULL, FALSE))
return TRUE;
if (field_def->interval_list.elements)
@@ -3055,6 +3057,11 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
query_cache_end_of_result(thd);
+ mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
+ thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0,
+ command_name[COM_QUERY].str);
+
if (!res && unlikely(thd->enable_slow_log))
log_slow_statement(thd);
}
diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc
index fc2f3e8bb2f..7c44e675811 100644
--- a/sql/sp_pcontext.cc
+++ b/sql/sp_pcontext.cc
@@ -82,7 +82,7 @@ sp_pcontext::sp_pcontext(sp_pcontext *prev, sp_pcontext::enum_scope scope)
sp_pcontext::~sp_pcontext()
{
- for (int i= 0; i < m_children.elements(); ++i)
+ for (size_t i= 0; i < m_children.elements(); ++i)
delete m_children.at(i);
}
@@ -289,7 +289,7 @@ sp_handler *sp_pcontext::add_handler(THD *thd,
bool sp_pcontext::check_duplicate_handler(
const sp_condition_value *cond_value) const
{
- for (int i= 0; i < m_handlers.elements(); ++i)
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
{
sp_handler *h= m_handlers.at(i);
@@ -315,7 +315,7 @@ sp_pcontext::find_handler(const char *sql_state,
sp_handler *found_handler= NULL;
sp_condition_value *found_cv= NULL;
- for (int i= 0; i < m_handlers.elements(); ++i)
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
{
sp_handler *h= m_handlers.at(i);
@@ -419,7 +419,7 @@ sp_pcontext::find_handler(const char *sql_state,
bool sp_pcontext::add_cursor(LEX_STRING name)
{
- if (m_cursors.elements() == (int) m_max_cursor_index)
+ if (m_cursors.elements() == m_max_cursor_index)
++m_max_cursor_index;
return m_cursors.append(name);
@@ -456,7 +456,7 @@ void sp_pcontext::retrieve_field_definitions(
{
/* Put local/context fields in the result list. */
- for (int i= 0; i < m_vars.elements(); ++i)
+ for (size_t i= 0; i < m_vars.elements(); ++i)
{
sp_variable *var_def= m_vars.at(i);
@@ -465,7 +465,7 @@ void sp_pcontext::retrieve_field_definitions(
/* Put the fields of the enclosed contexts in the result list. */
- for (int i= 0; i < m_children.elements(); ++i)
+ for (size_t i= 0; i < m_children.elements(); ++i)
m_children.at(i)->retrieve_field_definitions(field_def_lst);
}
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index 6b1b0730548..42476f7a596 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -196,11 +196,11 @@ bool sp_rcontext::push_handler(sp_handler *handler, uint first_ip)
}
-void sp_rcontext::pop_handlers(int count)
+void sp_rcontext::pop_handlers(size_t count)
{
DBUG_ASSERT(m_handlers.elements() >= count);
- for (int i= 0; i < count; ++i)
+ for (size_t i= 0; i < count; ++i)
m_handlers.pop();
}
@@ -288,7 +288,7 @@ bool sp_rcontext::handle_sql_condition(THD *thd,
DBUG_ASSERT(found_condition);
sp_handler_entry *handler_entry= NULL;
- for (int i= 0; i < m_handlers.elements(); ++i)
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
{
sp_handler_entry *h= m_handlers.at(i);
diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h
index 1634367bfa9..ce692024d0d 100644
--- a/sql/sp_rcontext.h
+++ b/sql/sp_rcontext.h
@@ -230,7 +230,7 @@ public:
/// call stack.
///
/// @param count Number of handler entries to pop & delete.
- void pop_handlers(int count);
+ void pop_handlers(size_t count);
const Sql_condition_info *raised_condition() const
{
diff --git a/sql/spatial.cc b/sql/spatial.cc
index de0b563eaf4..a01d2c59a49 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2002, 2012, Oracle and/or its affiliates.
- Copyright (c) 2011, 2012, Monty Program Ab
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2011, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -312,6 +312,9 @@ bool Geometry::envelope(String *result) const
const char *end;
if (get_mbr(&mbr, &end))
+ return 1;
+
+ if (!mbr.valid())
{
/* Empty geometry */
if (result->reserve(1 + 4*2))
@@ -444,18 +447,19 @@ const char *Geometry::append_points(String *txt, uint32 n_points,
const char *Geometry::get_mbr_for_points(MBR *mbr, const char *data,
uint offset) const
{
- uint32 points;
+ uint32 n_points;
/* read number of points */
if (no_data(data, 4))
return 0;
- points= uint4korr(data);
+ n_points= uint4korr(data);
data+= 4;
- if (no_data(data, (POINT_DATA_SIZE + offset) * points))
+ if (n_points > max_n_points ||
+ no_data(data, (POINT_DATA_SIZE + offset) * n_points))
return 0;
/* Calculate MBR for points */
- while (points--)
+ while (n_points--)
{
data+= offset;
mbr->add_xy(data, data + SIZEOF_STORED_DOUBLE);
@@ -559,9 +563,12 @@ const Geometry::Class_info *Gis_point::get_class_info() const
uint32 Gis_line_string::get_data_size() const
{
- if (no_data(m_data, 4))
+ uint32 n_points, size;
+ if (no_data(m_data, 4) ||
+ (n_points= uint4korr(m_data)) > max_n_points ||
+ no_data(m_data, (size= 4 + n_points * POINT_DATA_SIZE)))
return GET_SIZE_ERROR;
- return 4 + uint4korr(m_data) * POINT_DATA_SIZE;
+ return size;
}
@@ -631,7 +638,7 @@ bool Gis_line_string::get_data_as_wkt(String *txt, const char **end) const
n_points= uint4korr(data);
data += 4;
- if (n_points < 1 ||
+ if (n_points < 1 || n_points > max_n_points ||
no_data(data, POINT_DATA_SIZE * n_points) ||
txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1)*2 + 1) * n_points))
return 1;
@@ -669,7 +676,8 @@ int Gis_line_string::geom_length(double *len, const char **end) const
return 1;
n_points= uint4korr(data);
data+= 4;
- if (n_points < 1 || no_data(data, POINT_DATA_SIZE * n_points))
+ if (n_points < 1 || n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points))
return 1;
get_point(&prev_x, &prev_y, data);
@@ -717,7 +725,7 @@ int Gis_line_string::is_closed(int *closed) const
return 0;
}
data+= 4;
- if (n_points == 0 ||
+ if (n_points == 0 || n_points > max_n_points ||
no_data(data, POINT_DATA_SIZE * n_points))
return 1;
@@ -753,6 +761,9 @@ int Gis_line_string::end_point(String *result) const
if (no_data(m_data, 4))
return 1;
n_points= uint4korr(m_data);
+ if (n_points == 0 || n_points > max_n_points ||
+ no_data(m_data, POINT_DATA_SIZE * n_points))
+ return 1;
return create_point(result, m_data + 4 + (n_points - 1) * POINT_DATA_SIZE);
}
@@ -762,11 +773,14 @@ int Gis_line_string::point_n(uint32 num, String *result) const
uint32 n_points;
if (no_data(m_data, 4))
return 1;
+ num--;
n_points= uint4korr(m_data);
- if ((uint32) (num - 1) >= n_points) // means (num > n_points || num < 1)
+ if (num >= n_points ||
+ num > max_n_points || // means (num > n_points || num < 1)
+ no_data(m_data, num * POINT_DATA_SIZE))
return 1;
- return create_point(result, m_data + 4 + (num - 1) * POINT_DATA_SIZE);
+ return create_point(result, m_data + 4 + num*POINT_DATA_SIZE);
}
@@ -782,7 +796,8 @@ int Gis_line_string::store_shapes(Gcalc_shape_transporter *trn) const
return 1;
n_points= uint4korr(data);
data+= 4;
- if (n_points < 1 || no_data(data, POINT_DATA_SIZE * n_points))
+ if (n_points < 1 || n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points))
return 1;
trn->start_line();
@@ -803,7 +818,6 @@ int Gis_line_string::store_shapes(Gcalc_shape_transporter *trn) const
return trn->complete_line();
}
-
const Geometry::Class_info *Gis_line_string::get_class_info() const
{
return &linestring_class;
@@ -815,6 +829,7 @@ const Geometry::Class_info *Gis_line_string::get_class_info() const
uint32 Gis_polygon::get_data_size() const
{
uint32 n_linear_rings;
+ uint32 n_points;
const char *data= m_data;
if (no_data(data, 4))
@@ -824,10 +839,13 @@ uint32 Gis_polygon::get_data_size() const
while (n_linear_rings--)
{
- if (no_data(data, 4))
+ if (no_data(data, 4) ||
+ (n_points= uint4korr(data)) > max_n_points)
return GET_SIZE_ERROR;
- data+= 4 + uint4korr(data)*POINT_DATA_SIZE;
+ data+= 4 + n_points*POINT_DATA_SIZE;
}
+ if (no_data(data, 0))
+ return GET_SIZE_ERROR;
return (uint32) (data - m_data);
}
@@ -920,7 +938,9 @@ uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
if (len < 4)
return 0;
- n_linear_rings= wkb_get_uint(wkb, bo);
+ if (!(n_linear_rings= wkb_get_uint(wkb, bo)))
+ return 0;
+
if (res->reserve(4, 512))
return 0;
wkb+= 4;
@@ -966,7 +986,7 @@ bool Gis_polygon::get_data_as_wkt(String *txt, const char **end) const
return 1;
n_points= uint4korr(data);
data+= 4;
- if (no_data(data, POINT_DATA_SIZE * n_points) ||
+ if (n_points > max_n_points || no_data(data, POINT_DATA_SIZE * n_points) ||
txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points))
return 1;
txt->qs_append('(');
@@ -1020,7 +1040,8 @@ int Gis_polygon::area(double *ar, const char **end_of_data) const
if (no_data(data, 4))
return 1;
n_points= uint4korr(data);
- if (no_data(data, POINT_DATA_SIZE * n_points))
+ if (n_points == 0 || n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points))
return 1;
get_point(&prev_x, &prev_y, data+4);
data+= (4+POINT_DATA_SIZE);
@@ -1056,7 +1077,8 @@ int Gis_polygon::exterior_ring(String *result) const
n_points= uint4korr(data);
data+= 4;
length= n_points * POINT_DATA_SIZE;
- if (no_data(data, length) || result->reserve(1 + 4 + 4 + length))
+ if (n_points > max_n_points ||
+ no_data(data, length) || result->reserve(1 + 4 + 4 + length))
return 1;
result->q_append((char) wkb_ndr);
@@ -1102,7 +1124,8 @@ int Gis_polygon::interior_ring_n(uint32 num, String *result) const
n_points= uint4korr(data);
points_size= n_points * POINT_DATA_SIZE;
data+= 4;
- if (no_data(data, points_size) || result->reserve(1 + 4 + 4 + points_size))
+ if (n_points > max_n_points ||
+ no_data(data, points_size) || result->reserve(1 + 4 + 4 + points_size))
return 1;
result->q_append((char) wkb_ndr);
@@ -1122,13 +1145,11 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
const char *data= m_data;
bool first_loop= 1;
- if (no_data(data, 4))
+ if (no_data(data, 4) ||
+ (n_linear_rings= uint4korr(data)) == 0)
return 1;
- n_linear_rings= uint4korr(data);
data+= 4;
- DBUG_ASSERT(n_linear_rings > 0);
-
while (n_linear_rings--)
{
uint32 n_points, org_n_points;
@@ -1141,7 +1162,8 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
return 1;
org_n_points= n_points= uint4korr(data);
data+= 4;
- if (no_data(data, POINT_DATA_SIZE * n_points))
+ if (n_points == 0 || n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points))
return 1;
get_point(&prev_x, &prev_y, data);
data+= POINT_DATA_SIZE;
@@ -1215,7 +1237,8 @@ int Gis_polygon::store_shapes(Gcalc_shape_transporter *trn) const
return 1;
n_points= uint4korr(data);
data+= 4;
- if (!n_points || no_data(data, POINT_DATA_SIZE * n_points))
+ if (!n_points || n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points))
return 1;
trn->start_ring();
@@ -1268,9 +1291,14 @@ const Geometry::Class_info *Gis_polygon::get_class_info() const
uint32 Gis_multi_point::get_data_size() const
{
- if (no_data(m_data, 4))
- return GET_SIZE_ERROR;
- return 4 + uint4korr(m_data)*(POINT_DATA_SIZE + WKB_HEADER_SIZE);
+ uint32 n_points;
+ uint32 size;
+
+ if (no_data(m_data, 4) ||
+ (n_points= uint4korr(m_data)) > max_n_points ||
+ no_data(m_data, (size= 4 + n_points*(POINT_DATA_SIZE + WKB_HEADER_SIZE))))
+ return GET_SIZE_ERROR;
+ return size;
}
@@ -1364,8 +1392,8 @@ bool Gis_multi_point::get_data_as_wkt(String *txt, const char **end) const
return 1;
n_points= uint4korr(m_data);
- if (no_data(m_data+4,
- n_points * (POINT_DATA_SIZE + WKB_HEADER_SIZE)) ||
+ if (n_points > max_n_points ||
+ no_data(m_data+4, n_points * (POINT_DATA_SIZE + WKB_HEADER_SIZE)) ||
txt->reserve(((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points))
return 1;
*end= append_points(txt, n_points, m_data+4, WKB_HEADER_SIZE);
@@ -1446,6 +1474,7 @@ const Geometry::Class_info *Gis_multi_point::get_class_info() const
uint32 Gis_multi_line_string::get_data_size() const
{
uint32 n_line_strings;
+ uint32 n_points;
const char *data= m_data;
if (no_data(data, 4))
@@ -1455,11 +1484,13 @@ uint32 Gis_multi_line_string::get_data_size() const
while (n_line_strings--)
{
- if (no_data(data, WKB_HEADER_SIZE + 4))
+ if (no_data(data, WKB_HEADER_SIZE + 4) ||
+ (n_points= uint4korr(data + WKB_HEADER_SIZE)) > max_n_points)
return GET_SIZE_ERROR;
- data+= (WKB_HEADER_SIZE + 4 + uint4korr(data + WKB_HEADER_SIZE) *
- POINT_DATA_SIZE);
+ data+= (WKB_HEADER_SIZE + 4 + n_points*POINT_DATA_SIZE);
}
+ if (no_data(data, 0))
+ return GET_SIZE_ERROR;
return (uint32) (data - m_data);
}
@@ -1583,7 +1614,7 @@ bool Gis_multi_line_string::get_data_as_wkt(String *txt,
return 1;
n_points= uint4korr(data + WKB_HEADER_SIZE);
data+= WKB_HEADER_SIZE + 4;
- if (no_data(data, n_points * POINT_DATA_SIZE) ||
+ if (n_points > max_n_points || no_data(data, n_points * POINT_DATA_SIZE) ||
txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points))
return 1;
txt->qs_append('(');
@@ -1644,7 +1675,7 @@ int Gis_multi_line_string::geometry_n(uint32 num, String *result) const
return 1;
n_points= uint4korr(data + WKB_HEADER_SIZE);
length= WKB_HEADER_SIZE + 4+ POINT_DATA_SIZE * n_points;
- if (no_data(data, length))
+ if (n_points > max_n_points || no_data(data, length))
return 1;
if (!--num)
break;
@@ -1755,6 +1786,7 @@ const Geometry::Class_info *Gis_multi_line_string::get_class_info() const
uint32 Gis_multi_polygon::get_data_size() const
{
uint32 n_polygons;
+ uint32 n_points;
const char *data= m_data;
if (no_data(data, 4))
@@ -1773,11 +1805,14 @@ uint32 Gis_multi_polygon::get_data_size() const
while (n_linear_rings--)
{
- if (no_data(data, 4))
+ if (no_data(data, 4) ||
+ (n_points= uint4korr(data)) > max_n_points)
return GET_SIZE_ERROR;
- data+= 4 + uint4korr(data) * POINT_DATA_SIZE;
+ data+= 4 + n_points * POINT_DATA_SIZE;
}
}
+ if (no_data(data, 0))
+ return GET_SIZE_ERROR;
return (uint32) (data - m_data);
}
@@ -1905,7 +1940,8 @@ bool Gis_multi_polygon::get_data_as_wkt(String *txt, const char **end) const
return 1;
uint32 n_points= uint4korr(data);
data+= 4;
- if (no_data(data, POINT_DATA_SIZE * n_points) ||
+ if (n_points > max_n_points ||
+ no_data(data, POINT_DATA_SIZE * n_points) ||
txt->reserve(2 + ((MAX_DIGITS_IN_DOUBLE + 1) * 2 + 1) * n_points,
512))
return 1;
@@ -1988,6 +2024,8 @@ int Gis_multi_polygon::geometry_n(uint32 num, String *result) const
if (no_data(data, 4))
return 1;
n_points= uint4korr(data);
+ if (n_points > max_n_points)
+ return 1;
data+= 4 + POINT_DATA_SIZE * n_points;
}
} while (--num);
@@ -2317,7 +2355,7 @@ bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const
n_objects= uint4korr(data);
data+= 4;
if (n_objects == 0)
- return 1;
+ goto exit;
while (n_objects--)
{
@@ -2334,6 +2372,7 @@ bool Gis_geometry_collection::get_mbr(MBR *mbr, const char **end) const
if (geom->get_mbr(mbr, &data))
return 1;
}
+exit:
*end= data;
return 0;
}
@@ -2351,10 +2390,11 @@ int Gis_geometry_collection::area(double *ar, const char **end) const
return 1;
n_objects= uint4korr(data);
data+= 4;
- if (n_objects == 0)
- return 1;
result= 0.0;
+ if (n_objects == 0)
+ goto exit;
+
while (n_objects--)
{
uint32 wkb_type;
@@ -2371,6 +2411,7 @@ int Gis_geometry_collection::area(double *ar, const char **end) const
return 1;
result+= *ar;
}
+exit:
*end= data;
*ar= result;
return 0;
@@ -2389,10 +2430,11 @@ int Gis_geometry_collection::geom_length(double *len, const char **end) const
return 1;
n_objects= uint4korr(data);
data+= 4;
+ result= 0.0;
+
if (n_objects == 0)
- return 1;
+ goto exit;
- result= 0.0;
while (n_objects--)
{
uint32 wkb_type;
@@ -2409,6 +2451,8 @@ int Gis_geometry_collection::geom_length(double *len, const char **end) const
return 1;
result+= *len;
}
+
+exit:
*end= data;
*len= result;
return 0;
diff --git a/sql/spatial.h b/sql/spatial.h
index 1db9b5767e5..b0e4b83bf6a 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -1,5 +1,6 @@
/*
- Copyright (c) 2002, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -28,7 +29,7 @@ class Gis_read_stream;
const uint SRID_SIZE= 4;
const uint SIZEOF_STORED_DOUBLE= 8;
-const uint POINT_DATA_SIZE= SIZEOF_STORED_DOUBLE*2;
+const uint POINT_DATA_SIZE= (SIZEOF_STORED_DOUBLE * 2);
const uint WKB_HEADER_SIZE= 1+4;
const uint32 GET_SIZE_ERROR= ((uint32) -1);
@@ -200,6 +201,9 @@ struct MBR
return (d == intersection.dimension());
}
+
+ int valid() const
+ { return xmin <= xmax && ymin <= ymax; }
};
@@ -210,6 +214,11 @@ struct Geometry_buffer;
class Geometry
{
public:
+ // Maximum number of points in feature that can fit into String
+ static const uint32 max_n_points=
+ (uint32) (INT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) /
+ POINT_DATA_SIZE;
+
Geometry() {} /* Remove gcc warning */
virtual ~Geometry() {} /* Remove gcc warning */
static void *operator new(size_t size, void *buffer)
@@ -326,10 +335,36 @@ protected:
const char *get_mbr_for_points(MBR *mbr, const char *data, uint offset)
const;
- inline bool no_data(const char *cur_data, uint32 data_amount) const
+ /**
+ Check if there're enough data remaining as requested
+
+ @arg cur_data pointer to the position in the binary form
+ @arg data_amount number of points expected
+ @return true if not enough data
+ */
+ inline bool no_data(const char *cur_data, size_t data_amount) const
{
return (cur_data + data_amount > m_data_end);
}
+
+ /**
+ Check if there're enough points remaining as requested
+
+ Need to perform the calculation in logical units, since multiplication
+ can overflow the size data type.
+
+ @arg data pointer to the begining of the points array
+ @arg expected_points number of points expected
+ @arg extra_point_space extra space for each point element in the array
+ @return true if there are not enough points
+ */
+ inline bool not_enough_points(const char *data, uint32 expected_points,
+ uint32 extra_point_space = 0) const
+ {
+ return (m_data_end < data ||
+ (expected_points > ((m_data_end - data) /
+ (POINT_DATA_SIZE + extra_point_space))));
+ }
const char *m_data;
const char *m_data_end;
};
@@ -391,10 +426,6 @@ public:
class Gis_line_string: public Geometry
{
- // Maximum number of points in LineString that can fit into String
- static const uint32 max_n_points=
- (uint32) (UINT_MAX32 - WKB_HEADER_SIZE - 4 /* n_points */) /
- POINT_DATA_SIZE;
public:
Gis_line_string() {} /* Remove gcc warning */
virtual ~Gis_line_string() {} /* Remove gcc warning */
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 601fb5bf786..80e7d405a04 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/*
@@ -52,6 +52,8 @@
#include "sql_db.h"
#include "sql_array.h"
+#include "sql_plugin_compat.h"
+
bool mysql_user_table_is_in_short_password_format= false;
static const
@@ -68,7 +70,7 @@ TABLE_FIELD_TYPE mysql_db_table_fields[MYSQL_DB_FIELD_COUNT] = {
},
{
{ C_STRING_WITH_LEN("User") },
- { C_STRING_WITH_LEN("char(16)") },
+ { C_STRING_WITH_LEN("char(") },
{NULL, 0}
},
{
@@ -169,7 +171,7 @@ TABLE_FIELD_TYPE mysql_db_table_fields[MYSQL_DB_FIELD_COUNT] = {
};
const TABLE_FIELD_DEF
- mysql_db_table_def= {MYSQL_DB_FIELD_COUNT, mysql_db_table_fields};
+mysql_db_table_def= {MYSQL_DB_FIELD_COUNT, mysql_db_table_fields, 0, (uint*) 0 };
static LEX_STRING native_password_plugin_name= {
C_STRING_WITH_LEN("mysql_native_password")
@@ -827,6 +829,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
goto end;
table->use_all_columns();
(void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER), 50, 100, MYF(0));
+ username_char_length= MY_MIN(table->field[1]->char_length(),
+ USERNAME_CHAR_LENGTH);
password_length= table->field[2]->field_length /
table->field[2]->charset()->mbmaxlen;
if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
@@ -1434,12 +1438,12 @@ bool acl_getroot(Security_context *sctx, char *user, char *host,
sctx->master_access= acl_user->access;
if (acl_user->user)
- strmake(sctx->priv_user, user, USERNAME_LENGTH);
+ strmake_buf(sctx->priv_user, user);
else
*sctx->priv_user= 0;
if (acl_user->host.hostname)
- strmake(sctx->priv_host, acl_user->host.hostname, MAX_HOSTNAME - 1);
+ strmake_buf(sctx->priv_host, acl_user->host.hostname);
else
*sctx->priv_host= 0;
}
@@ -1911,6 +1915,7 @@ bool change_password(THD *thd, const char *host, const char *user,
{
TABLE_LIST tables;
TABLE *table;
+ Rpl_filter *rpl_filter= thd->rpl_filter;
/* Buffer should be extended when password length is extended. */
char buff[512];
ulong query_length;
@@ -3599,6 +3604,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
TABLE_LIST tables[3];
bool create_new_users=0;
char *db_name, *table_name;
+ Rpl_filter *rpl_filter= thd->rpl_filter;
DBUG_ENTER("mysql_table_grant");
if (!initialized)
@@ -3875,6 +3881,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
TABLE_LIST tables[2];
bool create_new_users=0, result=0;
char *db_name, *table_name;
+ Rpl_filter *rpl_filter= thd->rpl_filter;
DBUG_ENTER("mysql_routine_grant");
if (!initialized)
@@ -4014,6 +4021,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
char tmp_db[SAFE_NAME_LEN+1];
bool create_new_users=0;
TABLE_LIST tables[2];
+ Rpl_filter *rpl_filter= thd->rpl_filter;
DBUG_ENTER("mysql_grant");
if (!initialized)
@@ -4539,12 +4547,16 @@ end:
@see check_access
@see check_table_access
- @note This functions assumes that either number of tables to be inspected
+ @note
+ This functions assumes that either number of tables to be inspected
by it is limited explicitly (i.e. is is not UINT_MAX) or table list
used and thd->lex->query_tables_own_last value correspond to each
other (the latter should be either 0 or point to next_global member
of one of elements of this table list).
+ We delay locking of LOCK_grant until we really need it as we assume that
+ most privileges be resolved with user or db level accesses.
+
@return Access status
@retval FALSE Access granted; But column privileges might need to be
checked.
@@ -4561,6 +4573,7 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
Security_context *sctx= thd->security_ctx;
uint i;
ulong orig_want_access= want_access;
+ my_bool locked= 0;
DBUG_ENTER("check_grant");
DBUG_ASSERT(number > 0);
@@ -4584,11 +4597,9 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
*/
tl->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL);
}
+ number= i;
- mysql_rwlock_rdlock(&LOCK_grant);
- for (tl= tables;
- tl && number-- && tl != first_not_own_table;
- tl= tl->next_global)
+ for (tl= tables; number-- ; tl= tl->next_global)
{
sctx = test(tl->security_ctx) ? tl->security_ctx : thd->security_ctx;
@@ -4655,6 +4666,12 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
continue;
}
+ if (!locked)
+ {
+ locked= 1;
+ mysql_rwlock_rdlock(&LOCK_grant);
+ }
+
GRANT_TABLE *grant_table= table_hash_search(sctx->host, sctx->ip,
tl->get_db_name(),
sctx->priv_user,
@@ -4688,11 +4705,13 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
goto err; // impossible
}
}
- mysql_rwlock_unlock(&LOCK_grant);
+ if (locked)
+ mysql_rwlock_unlock(&LOCK_grant);
DBUG_RETURN(FALSE);
err:
- mysql_rwlock_unlock(&LOCK_grant);
+ if (locked)
+ mysql_rwlock_unlock(&LOCK_grant);
if (!no_errors) // Not a silent skip of table
{
char command[128];
@@ -5770,6 +5789,7 @@ void get_mqh(const char *user, const char *host, USER_CONN *uc)
#define GRANT_TABLES 6
int open_grant_tables(THD *thd, TABLE_LIST *tables)
{
+ Rpl_filter *rpl_filter= thd->rpl_filter;
DBUG_ENTER("open_grant_tables");
if (!initialized)
@@ -7046,10 +7066,8 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
tables->db= (char*)sp_db;
tables->table_name= tables->alias= (char*)sp_name;
- thd->make_lex_string(&combo->user,
- combo->user.str, strlen(combo->user.str), 0);
- thd->make_lex_string(&combo->host,
- combo->host.str, strlen(combo->host.str), 0);
+ thd->make_lex_string(&combo->user, combo->user.str, strlen(combo->user.str));
+ thd->make_lex_string(&combo->host, combo->host.str, strlen(combo->host.str));
combo->password= empty_lex_str;
combo->plugin= empty_lex_str;
@@ -7971,7 +7989,7 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
data_len= SCRAMBLE_LENGTH;
}
- end= strnmov(end, server_version, SERVER_VERSION_LENGTH) + 1;
+ end= strxnmov(end, SERVER_VERSION_LENGTH, RPL_VERSION_HACK, server_version, NullS) + 1;
int4store((uchar*) end, mpvio->thd->thread_id);
end+= 4;
@@ -8161,6 +8179,12 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
cs->coll->hash_sort(cs, (uchar*) sctx->user, strlen(sctx->user), &nr1, &nr2);
mysql_mutex_lock(&acl_cache->lock);
+ if (!acl_users.elements)
+ {
+ mysql_mutex_unlock(&acl_cache->lock);
+ login_failed_error(mpvio->thd);
+ DBUG_RETURN(1);
+ }
uint i= nr1 % acl_users.elements;
ACL_USER *acl_user_tmp= dynamic_element(&acl_users, i, ACL_USER*);
mpvio->acl_user= acl_user_tmp->copy(mpvio->thd->mem_root);
@@ -8186,10 +8210,9 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
mpvio->auth_info.user_name= sctx->user;
mpvio->auth_info.user_name_length= strlen(sctx->user);
mpvio->auth_info.auth_string= mpvio->acl_user->auth_string.str;
- mpvio->auth_info.auth_string_length=
- (unsigned long) mpvio->acl_user->auth_string.length;
- strmake(mpvio->auth_info.authenticated_as, mpvio->acl_user->user ?
- mpvio->acl_user->user : "", USERNAME_LENGTH);
+ mpvio->auth_info.auth_string_length= (unsigned long) mpvio->acl_user->auth_string.length;
+ strmake_buf(mpvio->auth_info.authenticated_as, mpvio->acl_user->user ?
+ mpvio->acl_user->user : "");
DBUG_PRINT("info", ("exit: user=%s, auth_string=%s, authenticated as=%s"
"plugin=%s",
@@ -8273,9 +8296,9 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
/* Clear variables that are allocated */
thd->user_connect= 0;
- strmake(sctx->priv_user, sctx->user, USERNAME_LENGTH);
+ strmake_buf(sctx->priv_user, sctx->user);
- if (thd->make_lex_string(&mpvio->db, db_buff, db_len, 0) == 0)
+ if (thd->make_lex_string(&mpvio->db, db_buff, db_len) == 0)
DBUG_RETURN(1); /* The error is set by make_lex_string(). */
/*
@@ -8488,20 +8511,21 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
/*
Clip username to allowed length in characters (not bytes). This is
- mostly for backward compatibility.
+ mostly for backward compatibility (to truncate long usernames, as
+ old 5.1 did)
*/
{
CHARSET_INFO *cs= system_charset_info;
int err;
user_len= (uint) cs->cset->well_formed_len(cs, user, user + user_len,
- USERNAME_CHAR_LENGTH, &err);
+ username_char_length, &err);
user[user_len]= '\0';
}
Security_context *sctx= thd->security_ctx;
- if (thd->make_lex_string(&mpvio->db, db, db_len, 0) == 0)
+ if (thd->make_lex_string(&mpvio->db, db, db_len) == 0)
return packet_error; /* The error is set by make_lex_string(). */
my_free(sctx->user);
if (!(sctx->user= my_strndup(user, user_len, MYF(MY_WME))))
@@ -8889,7 +8913,20 @@ static int do_auth_once(THD *thd, const LEX_STRING *auth_plugin_name,
if (plugin)
{
st_mysql_auth *auth= (st_mysql_auth *) plugin_decl(plugin)->info;
- res= auth->authenticate_user(mpvio, &mpvio->auth_info);
+ switch (auth->interface_version) {
+ case 0x0200:
+ res= auth->authenticate_user(mpvio, &mpvio->auth_info);
+ break;
+ case 0x0100:
+ {
+ MYSQL_SERVER_AUTH_INFO_0x0100 compat;
+ compat.downgrade(&mpvio->auth_info);
+ res= auth->authenticate_user(mpvio, (MYSQL_SERVER_AUTH_INFO *)&compat);
+ compat.upgrade(&mpvio->auth_info);
+ }
+ break;
+ default: DBUG_ASSERT(0);
+ }
if (unlock_plugin)
plugin_unlock(thd, plugin);
@@ -8943,8 +8980,6 @@ bool acl_authenticate(THD *thd, uint connect_errors,
: COM_CONNECT;
DBUG_ENTER("acl_authenticate");
- compile_time_assert(MYSQL_USERNAME_LENGTH == USERNAME_LENGTH);
-
bzero(&mpvio, sizeof(mpvio));
mpvio.read_packet= server_mpvio_read_packet;
mpvio.write_packet= server_mpvio_write_packet;
@@ -9105,12 +9140,12 @@ bool acl_authenticate(THD *thd, uint connect_errors,
sctx->master_access= acl_user->access;
if (acl_user->user)
- strmake(sctx->priv_user, acl_user->user, USERNAME_LENGTH - 1);
+ strmake_buf(sctx->priv_user, acl_user->user);
else
*sctx->priv_user= 0;
if (acl_user->host.hostname)
- strmake(sctx->priv_host, acl_user->host.hostname, MAX_HOSTNAME - 1);
+ strmake_buf(sctx->priv_host, acl_user->host.hostname);
else
*sctx->priv_host= 0;
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index ca5ef9a2fc8..8f3ea0fedb1 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2010, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2012, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -99,6 +100,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
const char **ext;
MY_STAT stat_info;
Open_table_context ot_ctx(thd, (MYSQL_OPEN_IGNORE_FLUSH |
+ MYSQL_OPEN_FOR_REPAIR |
MYSQL_OPEN_HAS_MDL_LOCK |
MYSQL_LOCK_IGNORE_TIMEOUT));
DBUG_ENTER("prepare_for_repair");
@@ -108,8 +110,6 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
if (!(table= table_list->table))
{
- const char *key;
- uint key_length;
/*
If the table didn't exist, we have a shared metadata lock
on it that is left from mysql_admin_table()'s attempt to
@@ -122,9 +122,6 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
Attempt to do full-blown table open in mysql_admin_table() has failed.
Let us try to open at least a .FRM for this table.
*/
- my_hash_value_type hash_value;
-
- key_length= get_table_def_key(table_list, &key);
table_list->mdl_request.init(MDL_key::TABLE,
table_list->db, table_list->table_name,
@@ -135,11 +132,8 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(0);
has_mdl_lock= TRUE;
- hash_value= my_calc_hash(&table_def_cache, (uchar*) key, key_length);
- mysql_mutex_lock(&LOCK_open);
- share= get_table_share(thd, table_list, key, key_length, 0,
- &error, hash_value);
- mysql_mutex_unlock(&LOCK_open);
+ share= get_table_share(thd, table_list->db, table_list->table_name,
+ GTS_TABLE);
if (share == NULL)
DBUG_RETURN(0); // Can't open frm file
@@ -209,7 +203,8 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
*/
pos_in_locked_tables= table->pos_in_locked_tables;
if (wait_while_table_is_used(thd, table,
- HA_EXTRA_PREPARE_FOR_FORCED_CLOSE))
+ HA_EXTRA_PREPARE_FOR_FORCED_CLOSE,
+ TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
goto end;
/* Close table but don't remove from locked list */
close_all_tables_for_name(thd, table_list->table->s,
@@ -344,7 +339,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
item->maybe_null = 1;
field_list.push_back(item = new Item_empty_string("Msg_type", 10));
item->maybe_null = 1;
- field_list.push_back(item = new Item_empty_string("Msg_text", 255));
+ field_list.push_back(item = new Item_empty_string("Msg_text",
+ SQL_ADMIN_MSG_TEXT_SIZE));
item->maybe_null = 1;
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
@@ -613,8 +609,10 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
*/
if (lock_type == TL_WRITE && !table->table->s->tmp_table)
{
+ table->table->s->protect_against_usage();
if (wait_while_table_is_used(thd, table->table,
- HA_EXTRA_PREPARE_FOR_RENAME))
+ HA_EXTRA_PREPARE_FOR_RENAME,
+ TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
goto err;
DEBUG_SYNC(thd, "after_admin_flush");
/* Flush entries in the query cache involving this table. */
@@ -882,6 +880,8 @@ send_result_message:
if (!result_code) // recreation went ok
{
+ /* Clear the ticket released above. */
+ table->mdl_request.ticket= NULL;
DEBUG_SYNC(thd, "ha_admin_open_ltable");
table->mdl_request.set_type(MDL_SHARED_WRITE);
if (!open_temporary_tables(thd, table) &&
diff --git a/sql/sql_admin.h b/sql/sql_admin.h
index 1f884866ddc..fa89fc9063f 100644
--- a/sql/sql_admin.h
+++ b/sql/sql_admin.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,6 +16,8 @@
#ifndef SQL_TABLE_MAINTENANCE_H
#define SQL_TABLE_MAINTENANCE_H
+/* Must be able to hold ALTER TABLE t PARTITION BY ... KEY ALGORITHM = 1 ... */
+#define SQL_ADMIN_MSG_TEXT_SIZE 128 * 1024
bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list,
LEX_STRING *key_cache_name);
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 6a590c91e5e..be35340df27 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Analyse database */
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 71377b91ef9..697819787f2 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -115,12 +115,12 @@ public:
@note Though formally this could be declared "const" it would be
misleading at it returns a non-const pointer to array's data.
*/
- Elem& at(int idx)
+ Elem& at(size_t idx)
{
return *(((Elem*)array.buffer) + idx);
}
/// Const variant of at(), which cannot change data
- const Elem& at(int idx) const
+ const Elem& at(size_t idx) const
{
return *(((Elem*)array.buffer) + idx);
}
@@ -162,6 +162,11 @@ public:
return insert_dynamic(&array, &el);
}
+ bool append_val(Elem el)
+ {
+ return (insert_dynamic(&array, (uchar*)&el));
+ }
+
/// Pops the last element. Does nothing if array is empty.
Elem& pop()
{
@@ -173,12 +178,12 @@ public:
delete_dynamic_element(&array, idx);
}
- int elements() const
+ size_t elements() const
{
return array.elements;
}
- void elements(uint num_elements)
+ void elements(size_t num_elements)
{
DBUG_ASSERT(num_elements <= array.max_element);
array.elements= num_elements;
diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc
index 793eead9869..07a5243e836 100644
--- a/sql/sql_audit.cc
+++ b/sql/sql_audit.cc
@@ -31,8 +31,7 @@ unsigned long mysql_global_audit_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
static mysql_mutex_t LOCK_audit_mask;
-static void event_class_dispatch(THD *thd, unsigned int event_class,
- const void *event);
+static void event_class_dispatch(THD *, unsigned int, const void *);
static inline
@@ -111,9 +110,36 @@ static void connection_class_handler(THD *thd, uint event_subclass, va_list ap)
}
+static void table_class_handler(THD *thd, uint event_subclass, va_list ap)
+{
+ mysql_event_table event;
+ event.event_subclass= event_subclass;
+ event.read_only= va_arg(ap, int);
+ event.thread_id= va_arg(ap, unsigned long);
+ event.user= va_arg(ap, const char *);
+ event.priv_user= va_arg(ap, const char *);
+ event.priv_host= va_arg(ap, const char *);
+ event.external_user= va_arg(ap, const char *);
+ event.proxy_user= va_arg(ap, const char *);
+ event.host= va_arg(ap, const char *);
+ event.ip= va_arg(ap, const char *);
+ event.database= va_arg(ap, const char *);
+ event.database_length= va_arg(ap, unsigned int);
+ event.table= va_arg(ap, const char *);
+ event.table_length= va_arg(ap, unsigned int);
+ event.new_database= va_arg(ap, const char *);
+ event.new_database_length= va_arg(ap, unsigned int);
+ event.new_table= va_arg(ap, const char *);
+ event.new_table_length= va_arg(ap, unsigned int);
+ event_class_dispatch(thd, MYSQL_AUDIT_TABLE_CLASS, &event);
+}
+
+
static audit_handler_t audit_handlers[] =
{
- general_class_handler, connection_class_handler
+ general_class_handler, connection_class_handler,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0, /* placeholders */
+ table_class_handler
};
static const uint audit_handlers_count=
diff --git a/sql/sql_audit.h b/sql/sql_audit.h
index 00de3d52261..1c7d6a1c224 100644
--- a/sql/sql_audit.h
+++ b/sql/sql_audit.h
@@ -43,17 +43,29 @@ static inline bool mysql_audit_general_enabled()
return mysql_global_audit_mask[0] & MYSQL_AUDIT_GENERAL_CLASSMASK;
}
+static inline bool mysql_audit_connection_enabled()
+{
+ return mysql_global_audit_mask[0] & MYSQL_AUDIT_CONNECTION_CLASSMASK;
+}
+
+static inline bool mysql_audit_table_enabled()
+{
+ return mysql_global_audit_mask[0] & MYSQL_AUDIT_TABLE_CLASSMASK;
+}
+
#else
static inline void mysql_audit_notify(THD *thd, uint event_class,
uint event_subtype, ...) { }
#define mysql_audit_general_enabled() 0
+#define mysql_audit_connection_enabled() 0
+#define mysql_audit_table_enabled() 0
#endif
extern void mysql_audit_release(THD *thd);
#define MAX_USER_HOST_SIZE 512
static inline uint make_user_name(THD *thd, char *buf)
{
- Security_context *sctx= thd->security_ctx;
+ const Security_context *sctx= thd->security_ctx;
return strxnmov(buf, MAX_USER_HOST_SIZE,
sctx->priv_user[0] ? sctx->priv_user : "", "[",
sctx->user ? sctx->user : "", "] @ ",
@@ -137,41 +149,143 @@ void mysql_audit_general(THD *thd, uint event_subtype,
}
}
-#define MYSQL_AUDIT_NOTIFY_CONNECTION_CONNECT(thd) mysql_audit_notify(\
- (thd), MYSQL_AUDIT_CONNECTION_CLASS, MYSQL_AUDIT_CONNECTION_CONNECT,\
- (thd)->get_stmt_da()->is_error() ? (thd)->get_stmt_da()->sql_errno() : 0,\
- (thd)->thread_id, (thd)->security_ctx->user,\
- (thd)->security_ctx->user ? strlen((thd)->security_ctx->user) : 0,\
- (thd)->security_ctx->priv_user, strlen((thd)->security_ctx->priv_user),\
- (thd)->security_ctx->external_user,\
- (thd)->security_ctx->external_user ?\
- strlen((thd)->security_ctx->external_user) : 0,\
- (thd)->security_ctx->proxy_user, strlen((thd)->security_ctx->proxy_user),\
- (thd)->security_ctx->host,\
- (thd)->security_ctx->host ? strlen((thd)->security_ctx->host) : 0,\
- (thd)->security_ctx->ip,\
- (thd)->security_ctx->ip ? strlen((thd)->security_ctx->ip) : 0,\
- (thd)->db, (thd)->db ? strlen((thd)->db) : 0)
-
-#define MYSQL_AUDIT_NOTIFY_CONNECTION_DISCONNECT(thd, errcode)\
- mysql_audit_notify(\
- (thd), MYSQL_AUDIT_CONNECTION_CLASS, MYSQL_AUDIT_CONNECTION_DISCONNECT,\
- (errcode), (thd)->thread_id, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
-
-#define MYSQL_AUDIT_NOTIFY_CONNECTION_CHANGE_USER(thd) mysql_audit_notify(\
- (thd), MYSQL_AUDIT_CONNECTION_CLASS, MYSQL_AUDIT_CONNECTION_CHANGE_USER,\
- (thd)->get_stmt_da()->is_error() ? (thd)->get_stmt_da()->sql_errno() : 0,\
- (thd)->thread_id, (thd)->security_ctx->user,\
- (thd)->security_ctx->user ? strlen((thd)->security_ctx->user) : 0,\
- (thd)->security_ctx->priv_user, strlen((thd)->security_ctx->priv_user),\
- (thd)->security_ctx->external_user,\
- (thd)->security_ctx->external_user ?\
- strlen((thd)->security_ctx->external_user) : 0,\
- (thd)->security_ctx->proxy_user, strlen((thd)->security_ctx->proxy_user),\
- (thd)->security_ctx->host,\
- (thd)->security_ctx->host ? strlen((thd)->security_ctx->host) : 0,\
- (thd)->security_ctx->ip,\
- (thd)->security_ctx->ip ? strlen((thd)->security_ctx->ip) : 0,\
- (thd)->db, (thd)->db ? strlen((thd)->db) : 0)
+static inline
+void mysql_audit_notify_connection_connect(THD *thd)
+{
+ if (mysql_audit_connection_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ Diagnostics_area *da= thd->get_stmt_da();
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
+ MYSQL_AUDIT_CONNECTION_CONNECT,
+ da->is_error() ? da->sql_errno() : 0,
+ thd->thread_id,
+ sctx->user, sctx->user ? strlen(sctx->user) : 0,
+ sctx->priv_user, strlen(sctx->priv_user),
+ sctx->external_user,
+ sctx->external_user ? strlen(sctx->external_user) : 0,
+ sctx->proxy_user, strlen(sctx->proxy_user),
+ sctx->host, sctx->host ? strlen(sctx->host) : 0,
+ sctx->ip, sctx->ip ? strlen(sctx->ip) : 0,
+ thd->db, thd->db ? strlen(thd->db) : 0);
+ }
+}
+
+static inline
+void mysql_audit_notify_connection_disconnect(THD *thd, int errcode)
+{
+ if (mysql_audit_connection_enabled())
+ {
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
+ MYSQL_AUDIT_CONNECTION_DISCONNECT,
+ errcode, thd->thread_id,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+}
+
+static inline
+void mysql_audit_notify_connection_change_user(THD *thd)
+{
+ if (mysql_audit_connection_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ Diagnostics_area *da= thd->get_stmt_da();
+ mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
+ MYSQL_AUDIT_CONNECTION_CHANGE_USER,
+ da->is_error() ? da->sql_errno() : 0,
+ thd->thread_id,
+ sctx->user, sctx->user ? strlen(sctx->user) : 0,
+ sctx->priv_user, strlen(sctx->priv_user),
+ sctx->external_user,
+ sctx->external_user ? strlen(sctx->external_user) : 0,
+ sctx->proxy_user, strlen(sctx->proxy_user),
+ sctx->host, sctx->host ? strlen(sctx->host) : 0,
+ sctx->ip, sctx->ip ? strlen(sctx->ip) : 0,
+ thd->db, thd->db ? strlen(thd->db) : 0);
+ }
+}
+
+static inline
+void mysql_audit_external_lock(THD *thd, TABLE_SHARE *share, int lock)
+{
+ if (lock != F_UNLCK && mysql_audit_table_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_LOCK,
+ (int)(lock == F_RDLCK), (ulong)thd->thread_id,
+ sctx->user, sctx->priv_user, sctx->priv_host,
+ sctx->external_user, sctx->proxy_user, sctx->host,
+ sctx->ip, share->db.str, (uint)share->db.length,
+ share->table_name.str, (uint)share->table_name.length,
+ 0,0,0,0);
+ }
+}
+
+static inline
+void mysql_audit_create_table(TABLE *table)
+{
+ if (mysql_audit_table_enabled())
+ {
+ THD *thd= table->in_use;
+ const TABLE_SHARE *share= table->s;
+ const Security_context *sctx= thd->security_ctx;
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_CREATE,
+ 0, (ulong)thd->thread_id,
+ sctx->user, sctx->priv_user, sctx->priv_host,
+ sctx->external_user, sctx->proxy_user, sctx->host,
+ sctx->ip, share->db.str, (uint)share->db.length,
+ share->table_name.str, (uint)share->table_name.length,
+ 0,0,0,0);
+ }
+}
+
+static inline
+void mysql_audit_drop_table(THD *thd, TABLE_LIST *table)
+{
+ if (mysql_audit_table_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_DROP,
+ 0, (ulong)thd->thread_id,
+ sctx->user, sctx->priv_user, sctx->priv_host,
+ sctx->external_user, sctx->proxy_user, sctx->host,
+ sctx->ip, table->db, (uint)table->db_length,
+ table->table_name, (uint)table->table_name_length,
+ 0,0,0,0);
+ }
+}
+
+static inline
+void mysql_audit_rename_table(THD *thd, const char *old_db, const char *old_tb,
+ const char *new_db, const char *new_tb)
+{
+ if (mysql_audit_table_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_RENAME,
+ 0, (ulong)thd->thread_id,
+ sctx->user, sctx->priv_user, sctx->priv_host,
+ sctx->external_user, sctx->proxy_user, sctx->host,
+ sctx->ip,
+ old_db, (uint)strlen(old_db), old_tb, (uint)strlen(old_tb),
+ new_db, (uint)strlen(new_db), new_tb, (uint)strlen(new_tb));
+ }
+}
+
+static inline
+void mysql_audit_alter_table(THD *thd, TABLE_LIST *table)
+{
+ if (mysql_audit_table_enabled())
+ {
+ const Security_context *sctx= thd->security_ctx;
+ mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, MYSQL_AUDIT_TABLE_ALTER,
+ 0, (ulong)thd->thread_id,
+ sctx->user, sctx->priv_user, sctx->priv_host,
+ sctx->external_user, sctx->proxy_user, sctx->host,
+ sctx->ip, table->db, (uint)table->db_length,
+ table->table_name, (uint)table->table_name_length,
+ 0,0,0,0);
+ }
+}
#endif /* SQL_AUDIT_INCLUDED */
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 1adaa20cd94..5baf05c7f38 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Basic functions needed by many modules */
@@ -55,7 +55,7 @@
#include <hash.h>
#include "rpl_filter.h"
#include "sql_table.h" // build_table_filename
-#include "datadict.h" // dd_frm_type()
+#include "datadict.h" // dd_frm_is_view()
#include "sql_hset.h" // Hash_set
#ifdef __WIN__
#include <io.h>
@@ -166,35 +166,22 @@ Repair_mrg_table_error_handler::handle_condition(THD *,
/**
LOCK_open protects the following variables/objects:
- 1) The table_def_hash
- This is the hash table mapping table name to a table
- share object. The hash table can only be manipulated
- while holding LOCK_open.
- 2) last_table_id
- Generation of a new unique table_map_id for a table
- share is done through incrementing last_table_id, a
- global variable used for this purpose.
- 3) LOCK_open protects the initialisation of the table share
- object and all its members and also protects reading the
- .frm file from where the table share is initialised.
- 4) In particular the share->ref_count is updated each time
- a new table object is created that refers to a table share.
- This update is protected by LOCK_open.
- 5) oldest_unused_share, end_of_unused_share and share->next
- and share->prev are variables to handle the lists of table
- share objects, these can only be read and manipulated while
- holding the LOCK_open mutex.
- 6) table_def_shutdown_in_progress can be updated only while
- holding LOCK_open and ALL table cache mutexes.
- 7) refresh_version
- This variable can only be updated while holding LOCK_open AND
- all table cache mutexes.
- 8) share->version
- This variable is initialised while holding LOCK_open. It can only
- be updated while holding LOCK_open AND all table cache mutexes.
- So if a table share is found through a reference its version won't
- change if any of those mutexes are held.
- 9) share->m_flush_tickets
+ end_of_unused_share
+ last_table_id
+ oldest_unused_share
+ refresh_version
+ table_cache_count
+ table_def_cache
+ table_def_shutdown_in_progress
+ unused_tables
+ TABLE::next
+ TABLE::prev
+ TABLE_SHARE::free_tables
+ TABLE_SHARE::m_flush_tickets
+ TABLE_SHARE::next
+ TABLE_SHARE::prev
+ TABLE_SHARE::ref_count
+ TABLE_SHARE::used_tables
*/
mysql_mutex_t LOCK_open;
@@ -332,14 +319,13 @@ static void check_unused(THD *thd)
size MAX_DBKEY_LENGTH).
@param db_name Database name.
@param table_name Table name.
- @param tmp_table Set if table is a tmp table.
@note
The table cache_key is created from:
db_name + \0
table_name + \0
- if the table is a tmp table, we add the following to make each tmp table
+ additionally we add the following to make each tmp table
unique on the slave:
4 bytes for master thread id
@@ -348,26 +334,13 @@ static void check_unused(THD *thd)
@return Length of key.
*/
-static uint create_table_def_key(THD *thd, char *key,
- const char *db_name, const char *table_name,
- bool tmp_table)
+uint create_tmp_table_def_key(THD *thd, char *key,
+ const char *db, const char *table_name)
{
- /*
- In theory caller should ensure that both db and table_name are
- not longer than NAME_LEN bytes. In practice we play safe to avoid
- buffer overruns.
- */
- DBUG_ASSERT(strlen(db_name) <= NAME_LEN && strlen(table_name) <= NAME_LEN);
- uint key_length= static_cast<uint>(strmake(strmake(key, db_name, NAME_LEN) +
- 1, table_name, NAME_LEN) - key +
- 1);
-
- if (tmp_table)
- {
- int4store(key + key_length, thd->server_id);
- int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
- key_length+= TMP_TABLE_KEY_EXTRA;
- }
+ uint key_length= create_table_def_key(key, db, table_name);
+ int4store(key + key_length, thd->variables.server_id);
+ int4store(key + key_length + 4, thd->variables.pseudo_thread_id);
+ key_length+= TMP_TABLE_KEY_EXTRA;
return key_length;
}
@@ -445,6 +418,14 @@ bool table_def_init(void)
init_tdc_psi_keys();
#endif
mysql_mutex_init(key_LOCK_open, &LOCK_open, MY_MUTEX_INIT_FAST);
+ mysql_mutex_record_order(&LOCK_active_mi, &LOCK_open);
+ /*
+ When we delete from the table_def_cache(), the free function
+ table_def_free_entry() is invoked from my_hash_delete(), which calls
+ free_table_share(), which may unload plugins, which can remove status
+ variables and hence takes LOCK_status. Record this locking order here.
+ */
+ mysql_mutex_record_order(&LOCK_open, &LOCK_status);
oldest_unused_share= &end_of_unused_share;
end_of_unused_share.prev= &oldest_unused_share;
@@ -626,105 +607,108 @@ static void table_def_unuse_table(TABLE *table)
table_list Table that should be opened
key Table cache key
key_length Length of key
- db_flags Flags to open_table_def():
- OPEN_VIEW
- error out: Error code from open_table_def()
+ flags operation: what to open table or view
+ hash_value = my_calc_hash(&table_def_cache, key, key_length)
IMPLEMENTATION
Get a table definition from the table definition cache.
If it doesn't exist, create a new from the table definition file.
- NOTES
- We must have wrlock on LOCK_open when we come here
- (To be changed later)
-
RETURN
0 Error
# Share for table
*/
-TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list,
- const char *key, uint key_length,
- uint db_flags, int *error,
+TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
+ const char *key, uint key_length, uint flags,
my_hash_value_type hash_value)
{
TABLE_SHARE *share;
DBUG_ENTER("get_table_share");
- *error= 0;
-
- /*
- To be able perform any operation on table we should own
- some kind of metadata lock on it.
- */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE,
- table_list->db,
- table_list->table_name,
- MDL_SHARED));
+ mysql_mutex_lock(&LOCK_open);
/* Read table definition from cache */
- if ((share= (TABLE_SHARE*) my_hash_search_using_hash_value(&table_def_cache,
- hash_value, (uchar*) key, key_length)))
- goto found;
+ share= (TABLE_SHARE*) my_hash_search_using_hash_value(&table_def_cache,
+ hash_value, (uchar*) key, key_length);
- if (!(share= alloc_table_share(table_list, key, key_length)))
+ if (!share)
{
- DBUG_RETURN(0);
- }
+ if (!(share= alloc_table_share(db, table_name, key, key_length)))
+ goto err;
- /*
- We assign a new table id under the protection of LOCK_open.
- We do this instead of creating a new mutex
- and using it for the sole purpose of serializing accesses to a
- static variable, we assign the table id here. We assign it to the
- share before inserting it into the table_def_cache to be really
- sure that it cannot be read from the cache without having a table
- id assigned.
-
- CAVEAT. This means that the table cannot be used for
- binlogging/replication purposes, unless get_table_share() has been
- called directly or indirectly.
- */
- assign_new_table_id(share);
-
- if (my_hash_insert(&table_def_cache, (uchar*) share))
- {
- free_table_share(share);
- DBUG_RETURN(0); // return error
- }
- if (open_table_def(thd, share, db_flags))
- {
- *error= share->error;
- (void) my_hash_delete(&table_def_cache, (uchar*) share);
- DBUG_RETURN(0);
+ /*
+ We assign a new table id under the protection of LOCK_open.
+ We do this instead of creating a new mutex
+ and using it for the sole purpose of serializing accesses to a
+ static variable, we assign the table id here. We assign it to the
+ share before inserting it into the table_def_cache to be really
+ sure that it cannot be read from the cache without having a table
+ id assigned.
+
+ CAVEAT. This means that the table cannot be used for
+ binlogging/replication purposes, unless get_table_share() has been
+ called directly or indirectly.
+ */
+ assign_new_table_id(share);
+
+ if (my_hash_insert(&table_def_cache, (uchar*) share))
+ {
+ free_table_share(share);
+ goto err;
+ }
+ share->ref_count++; // Mark in use
+ share->error= OPEN_FRM_OPEN_ERROR;
+ mysql_mutex_lock(&share->LOCK_share);
+ mysql_mutex_unlock(&LOCK_open);
+
+ /* note that get_table_share() *always* uses discovery */
+ open_table_def(thd, share, flags | GTS_USE_DISCOVERY);
+
+ mysql_mutex_unlock(&share->LOCK_share);
+ mysql_mutex_lock(&LOCK_open);
+
+ if (share->error)
+ {
+ share->ref_count--;
+ (void) my_hash_delete(&table_def_cache, (uchar*) share);
+ goto err;
+ }
+
+ share->m_psi= PSI_CALL_get_table_share(false, share);
+
+ DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
+ (ulong) share, share->ref_count));
+
+ goto end;
}
- share->ref_count++; // Mark in use
-#ifdef HAVE_PSI_TABLE_INTERFACE
- share->m_psi= PSI_TABLE_CALL(get_table_share)(false, share);
-#else
- share->m_psi= NULL;
-#endif
+ /* cannot force discovery of a cached share */
+ DBUG_ASSERT(!(flags & GTS_FORCE_DISCOVERY));
- DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
- (ulong) share, share->ref_count));
- DBUG_RETURN(share);
+ /* make sure that open_table_def() for this share is not running */
+ mysql_mutex_lock(&share->LOCK_share);
+ mysql_mutex_unlock(&share->LOCK_share);
-found:
/*
We found an existing table definition. Return it if we didn't get
an error when reading the table definition from file.
*/
if (share->error)
{
- /* Table definition contained an error */
- open_table_error(share, share->error, share->open_errno, share->errarg);
- DBUG_RETURN(0);
+ open_table_error(share, share->error, share->open_errno);
+ goto err;
}
- if (share->is_view && !(db_flags & OPEN_VIEW))
+
+ if (share->is_view && !(flags & GTS_VIEW))
{
- open_table_error(share, 1, ENOENT, 0);
- DBUG_RETURN(0);
+ open_table_error(share, OPEN_FRM_NOT_A_TABLE, ENOENT);
+ goto err;
+ }
+ if (!share->is_view && !(flags & GTS_TABLE))
+ {
+ open_table_error(share, OPEN_FRM_NOT_A_VIEW, ENOENT);
+ goto err;
}
++share->ref_count;
@@ -749,98 +733,28 @@ found:
DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
(ulong) share, share->ref_count));
- DBUG_RETURN(share);
-}
-
-
-/**
- Get a table share. If it didn't exist, try creating it from engine
+ goto end;
- For arguments and return values, see get_table_share()
-*/
-
-static TABLE_SHARE *
-get_table_share_with_discover(THD *thd, TABLE_LIST *table_list,
- const char *key, uint key_length,
- uint db_flags, int *error,
- my_hash_value_type hash_value)
-{
- TABLE_SHARE *share;
- bool exists;
- DBUG_ENTER("get_table_share_with_discover");
-
- share= get_table_share(thd, table_list, key, key_length, db_flags, error,
- hash_value);
- /*
- If share is not NULL, we found an existing share.
-
- If share is NULL, and there is no error, we're inside
- pre-locking, which silences 'ER_NO_SUCH_TABLE' errors
- with the intention to silently drop non-existing tables
- from the pre-locking list. In this case we still need to try
- auto-discover before returning a NULL share.
-
- Or, we're inside SHOW CREATE VIEW, which
- also installs a silencer for ER_NO_SUCH_TABLE error.
-
- If share is NULL and the error is ER_NO_SUCH_TABLE, this is
- the same as above, only that the error was not silenced by
- pre-locking or SHOW CREATE VIEW.
-
- In both these cases it won't harm to try to discover the
- table.
-
- Finally, if share is still NULL, it's a real error and we need
- to abort.
-
- @todo Rework alternative ways to deal with ER_NO_SUCH TABLE.
- */
- if (share ||
- (thd->is_error() && thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE &&
- thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE))
- DBUG_RETURN(share);
-
- *error= 0;
+err:
+ mysql_mutex_unlock(&LOCK_open);
+ DBUG_RETURN(0);
- /* Table didn't exist. Check if some engine can provide it */
- if (ha_check_if_table_exists(thd, table_list->db, table_list->table_name,
- &exists))
- {
- thd->clear_error();
- /* Conventionally, the storage engine API does not report errors. */
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- }
- else if (! exists)
+end:
+ if (flags & GTS_NOLOCK)
{
+ release_table_share(share);
/*
- No such table in any engine.
- Hide "Table doesn't exist" errors if the table belongs to a view.
- The check for thd->is_error() is necessary to not push an
- unwanted error in case the error was already silenced.
- @todo Rework the alternative ways to deal with ER_NO_SUCH TABLE.
+ if GTS_NOLOCK is requested, the returned share pointer cannot be used,
+ the share it points to may go away any moment.
+ But perhaps the caller is only interested to know whether a share or
+ table existed?
+ Let's return an invalid pointer here to catch dereferencing attempts.
*/
- if (thd->is_error())
- {
- if (table_list->parent_l)
- {
- thd->clear_error();
- my_error(ER_WRONG_MRG_TABLE, MYF(0));
- }
- else if (table_list->belong_to_view)
- {
- TABLE_LIST *view= table_list->belong_to_view;
- thd->clear_error();
- my_error(ER_VIEW_INVALID, MYF(0),
- view->view_db.str, view->view_name.str);
- }
- }
- }
- else
- {
- thd->clear_error();
- *error= 7; /* Run auto-discover. */
+ share= (TABLE_SHARE*) 1;
}
- DBUG_RETURN(NULL);
+
+ mysql_mutex_unlock(&LOCK_open);
+ DBUG_RETURN(share);
}
@@ -911,9 +825,10 @@ TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
uint key_length;
mysql_mutex_assert_owner(&LOCK_open);
- key_length= create_table_def_key((THD*) 0, key, db, table_name, 0);
- return (TABLE_SHARE*) my_hash_search(&table_def_cache,
- (uchar*) key, key_length);
+ key_length= create_table_def_key(key, db, table_name);
+ TABLE_SHARE* share= (TABLE_SHARE*)my_hash_search(&table_def_cache,
+ (uchar*) key, key_length);
+ return !share || share->error ? 0 : share;
}
@@ -1147,7 +1062,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
if (share)
{
kill_delayed_threads_for_table(share);
- /* tdc_remove_table() also sets TABLE_SHARE::version to 0. */
+ /* tdc_remove_table() calls share->remove_from_cache_at_close() */
tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, table->db,
table->table_name, TRUE);
found=1;
@@ -2183,7 +2098,7 @@ void update_non_unique_table_error(TABLE_LIST *update,
TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
{
char key[MAX_DBKEY_LENGTH];
- uint key_length= create_table_def_key(thd, key, db, table_name, 1);
+ uint key_length= create_tmp_table_def_key(thd, key, db, table_name);
return find_temporary_table(thd, key, key_length);
}
@@ -2204,7 +2119,7 @@ TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl)
key_length= get_table_def_key(tl, &key);
- int4store(key_suffix, thd->server_id);
+ int4store(key_suffix, thd->variables.server_id);
int4store(key_suffix + 4, thd->variables.pseudo_thread_id);
for (table= thd->temporary_tables; table; table= table->next)
@@ -2393,7 +2308,7 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *db,
if (!(key=(char*) alloc_root(&share->mem_root, MAX_DBKEY_LENGTH)))
DBUG_RETURN(1); /* purecov: inspected */
- key_length= create_table_def_key(thd, key, db, table_name, 1);
+ key_length= create_tmp_table_def_key(thd, key, db, table_name);
share->set_table_cache_key(key, key_length);
DBUG_RETURN(0);
}
@@ -2418,7 +2333,8 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *db,
*/
bool wait_while_table_is_used(THD *thd, TABLE *table,
- enum ha_extra_function function)
+ enum ha_extra_function function,
+ enum_tdc_remove_table_type remove_type)
{
DBUG_ENTER("wait_while_table_is_used");
DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
@@ -2430,7 +2346,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
thd->variables.lock_wait_timeout))
DBUG_RETURN(TRUE);
- tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN,
+ tdc_remove_table(thd, remove_type,
table->s->db.str, table->s->table_name.str,
FALSE);
/* extra() call must come only after all instances above are closed */
@@ -2483,71 +2399,6 @@ void drop_open_table(THD *thd, TABLE *table, const char *db_name,
/**
- Check that table exists in table definition cache, on disk
- or in some storage engine.
-
- @param thd Thread context
- @param table Table list element
- @param fast_check Check only if share or .frm file exists
- @param[out] exists Out parameter which is set to TRUE if table
- exists and to FALSE otherwise.
-
- @note This function acquires LOCK_open internally.
-
- @note If there is no .FRM file for the table but it exists in one
- of engines (e.g. it was created on another node of NDB cluster)
- this function will fetch and create proper .FRM file for it.
-
- @retval TRUE Some error occurred
- @retval FALSE No error. 'exists' out parameter set accordingly.
-*/
-
-bool check_if_table_exists(THD *thd, TABLE_LIST *table, bool fast_check,
- bool *exists)
-{
- char path[FN_REFLEN + 1];
- TABLE_SHARE *share;
- DBUG_ENTER("check_if_table_exists");
-
- *exists= TRUE;
-
- DBUG_ASSERT(fast_check ||
- thd->mdl_context.
- is_lock_owner(MDL_key::TABLE, table->db,
- table->table_name, MDL_SHARED));
-
- mysql_mutex_lock(&LOCK_open);
- share= get_cached_table_share(table->db, table->table_name);
- mysql_mutex_unlock(&LOCK_open);
-
- if (share)
- goto end;
-
- build_table_filename(path, sizeof(path) - 1, table->db, table->table_name,
- reg_ext, 0);
-
- if (!access(path, F_OK))
- goto end;
-
- if (fast_check)
- {
- *exists= FALSE;
- goto end;
- }
-
- /* .FRM file doesn't exist. Check if some engine can provide it. */
- if (ha_check_if_table_exists(thd, table->db, table->table_name, exists))
- {
- my_printf_error(ER_OUT_OF_RESOURCES, "Failed to open '%-.64s', error while "
- "unpacking from engine", MYF(0), table->table_name);
- DBUG_RETURN(TRUE);
- }
-end:
- DBUG_RETURN(FALSE);
-}
-
-
-/**
An error handler which converts, if possible, ER_LOCK_DEADLOCK error
that can occur when we are trying to acquire a metadata lock to
a request for back-off and re-start of open_tables() process.
@@ -2815,10 +2666,9 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
char *alias= table_list->alias;
uint flags= ot_ctx->get_flags();
MDL_ticket *mdl_ticket;
- int error;
TABLE_SHARE *share;
my_hash_value_type hash_value;
- bool recycled_free_table;
+ uint gts_flags;
DBUG_ENTER("open_table");
/*
@@ -2930,7 +2780,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
MDL_SHARED))
{
char path[FN_REFLEN + 1];
- enum legacy_db_type not_used;
build_table_filename(path, sizeof(path) - 1,
table_list->db, table_list->table_name, reg_ext, 0);
/*
@@ -2940,7 +2789,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
during prelocking process (in this case in theory we still
should hold shared metadata lock on it).
*/
- if (dd_frm_type(thd, path, &not_used) == FRMTYPE_VIEW)
+ if (dd_frm_is_view(thd, path))
{
if (!tdc_open_view(thd, table_list, alias, key, key_length,
mem_root, CHECK_METADATA_VERSION))
@@ -2977,12 +2826,12 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
global read lock until end of this statement in order to have
this statement blocked by active FLUSH TABLES WITH READ LOCK.
- We don't block acquire this protection under LOCK TABLES as
+ We don't need to acquire this protection under LOCK TABLES as
such protection already acquired at LOCK TABLES time and
not released until UNLOCK TABLES.
We don't block statements which modify only temporary tables
- as these tables are not preserved by backup by any form of
+ as these tables are not preserved by any form of
backup which uses FLUSH TABLES WITH READ LOCK.
TODO: The fact that we sometimes acquire protection against
@@ -3047,12 +2896,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (table_list->open_strategy == TABLE_LIST::OPEN_IF_EXISTS)
{
- bool exists;
-
- if (check_if_table_exists(thd, table_list, 0, &exists))
- DBUG_RETURN(TRUE);
-
- if (!exists)
+ if (!ha_table_exists(thd, table_list->db, table_list->table_name))
DBUG_RETURN(FALSE);
/* Table exists. Let us try to open it. */
@@ -3060,26 +2904,40 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
else if (table_list->open_strategy == TABLE_LIST::OPEN_STUB)
DBUG_RETURN(FALSE);
+ if (table_list->i_s_requested_object & OPEN_TABLE_ONLY)
+ gts_flags= GTS_TABLE;
+ else if (table_list->i_s_requested_object & OPEN_VIEW_ONLY)
+ gts_flags= GTS_VIEW;
+ else
+ gts_flags= GTS_TABLE | GTS_VIEW;
+
retry_share:
- mysql_mutex_lock(&LOCK_open);
+ share= get_table_share(thd, table_list->db, table_list->table_name,
+ key, key_length, gts_flags, hash_value);
- if (!(share= get_table_share_with_discover(thd, table_list, key,
- key_length, OPEN_VIEW,
- &error,
- hash_value)))
+ if (!share)
{
- mysql_mutex_unlock(&LOCK_open);
/*
- If thd->is_error() is not set, we either need discover
- (error == 7), or the error was silenced by the prelocking
- handler (error == 0), in which case we should skip this
- table.
+ Hide "Table doesn't exist" errors if the table belongs to a view.
+ The check for thd->is_error() is necessary to not push an
+ unwanted error in case the error was already silenced.
+ @todo Rework the alternative ways to deal with ER_NO_SUCH TABLE.
*/
- if (error == 7 && !thd->is_error())
+ if (thd->is_error())
{
- (void) ot_ctx->request_backoff_action(Open_table_context::OT_DISCOVER,
- table_list);
+ if (table_list->parent_l)
+ {
+ thd->clear_error();
+ my_error(ER_WRONG_MRG_TABLE, MYF(0));
+ }
+ else if (table_list->belong_to_view)
+ {
+ TABLE_LIST *view= table_list->belong_to_view;
+ thd->clear_error();
+ my_error(ER_VIEW_INVALID, MYF(0),
+ view->view_db.str, view->view_name.str);
+ }
}
DBUG_RETURN(TRUE);
}
@@ -3098,7 +2956,7 @@ retry_share:
if (table_list->parent_l)
{
my_error(ER_WRONG_MRG_TABLE, MYF(0));
- goto err_unlock;
+ goto err_lock;
}
/*
@@ -3106,13 +2964,7 @@ retry_share:
that it was a view when the statement was prepared.
*/
if (check_and_update_table_version(thd, table_list, share))
- goto err_unlock;
- if (table_list->i_s_requested_object & OPEN_TABLE_ONLY)
- {
- my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db,
- table_list->table_name);
- goto err_unlock;
- }
+ goto err_lock;
/* Open view */
if (open_new_frm(thd, share, alias,
@@ -3121,7 +2973,9 @@ retry_share:
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
thd->open_options,
0, table_list, mem_root))
- goto err_unlock;
+ goto err_lock;
+
+ mysql_mutex_lock(&LOCK_open);
/* TODO: Don't free this */
release_table_share(share);
@@ -3132,21 +2986,9 @@ retry_share:
DBUG_RETURN(FALSE);
}
- /*
- Note that situation when we are trying to open a table for what
- was a view during previous execution of PS will be handled in by
- the caller. Here we should simply open our table even if
- TABLE_LIST::view is true.
- */
-
- if (table_list->i_s_requested_object & OPEN_VIEW_ONLY)
- {
- my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db,
- table_list->table_name);
- goto err_unlock;
- }
-
- if (!(flags & MYSQL_OPEN_IGNORE_FLUSH))
+ mysql_mutex_lock(&LOCK_open);
+ if (!(flags & MYSQL_OPEN_IGNORE_FLUSH) ||
+ (share->protected_against_usage() && !(flags & MYSQL_OPEN_FOR_REPAIR)))
{
if (share->has_old_version())
{
@@ -3198,19 +3040,24 @@ retry_share:
{
table= share->free_tables.front();
table_def_use_table(thd, table);
- recycled_free_table= true;
- /* We need to release share as we have EXTRA reference to it in our hands. */
+
+ /* Release the share as we hold an extra reference to it */
release_table_share(share);
+ mysql_mutex_unlock(&LOCK_open);
+
+ DBUG_ASSERT(table->file != NULL);
+ table->file->rebind_psi();
}
else
{
- /* We have too many TABLE instances around let us try to get rid of them. */
+ enum open_frm_error error;
+
+ /* If we have too many TABLE instances around, try to get rid of them */
while (table_cache_count > table_cache_size && unused_tables)
free_cache_entry(unused_tables);
mysql_mutex_unlock(&LOCK_open);
- recycled_free_table= false;
/* make a new table */
if (!(table=(TABLE*) my_malloc(sizeof(*table),MYF(MY_WME))))
goto err_lock;
@@ -3228,7 +3075,7 @@ retry_share:
{
my_free(table);
- if (error == 7)
+ if (error == OPEN_FRM_DISCOVER)
(void) ot_ctx->request_backoff_action(Open_table_context::OT_DISCOVER,
table_list);
else if (share->crashed)
@@ -3242,17 +3089,11 @@ retry_share:
my_free(table);
goto err_lock;
}
- mysql_mutex_lock(&LOCK_open);
+
/* Add table to the share's used tables list. */
+ mysql_mutex_lock(&LOCK_open);
table_def_add_used_table(thd, table);
- }
- mysql_mutex_unlock(&LOCK_open);
-
- /* Call rebind_psi outside of the LOCK_open critical section. */
- if (recycled_free_table)
- {
- DBUG_ASSERT(table->file != NULL);
- table->file->rebind_psi();
+ mysql_mutex_unlock(&LOCK_open);
}
table->mdl_ticket= mdl_ticket;
@@ -3292,7 +3133,6 @@ retry_share:
err_lock:
mysql_mutex_lock(&LOCK_open);
-err_unlock:
release_table_share(share);
mysql_mutex_unlock(&LOCK_open);
@@ -3313,7 +3153,7 @@ err_unlock:
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name)
{
char key[MAX_DBKEY_LENGTH];
- uint key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
+ uint key_length= create_table_def_key(key, db, table_name);
for (TABLE *table= list; table ; table=table->next)
{
@@ -3921,21 +3761,16 @@ bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
MEM_ROOT *mem_root, uint flags)
{
TABLE not_used;
- int error;
- my_hash_value_type hash_value;
TABLE_SHARE *share;
+ bool err= TRUE;
- hash_value= my_calc_hash(&table_def_cache, (uchar*) cache_key,
- cache_key_length);
- mysql_mutex_lock(&LOCK_open);
+ if (!(share= get_table_share(thd, table_list->db, table_list->table_name,
+ cache_key, cache_key_length, GTS_VIEW)))
+ return TRUE;
- if (!(share= get_table_share(thd, table_list, cache_key,
- cache_key_length,
- OPEN_VIEW, &error,
- hash_value)))
- goto err;
+ DBUG_ASSERT(share->is_view);
- if ((flags & CHECK_METADATA_VERSION))
+ if (flags & CHECK_METADATA_VERSION)
{
/*
Check TABLE_SHARE-version of view only if we have been instructed to do
@@ -3947,30 +3782,21 @@ bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
table-definition-cache interface.
*/
if (check_and_update_table_version(thd, table_list, share))
- {
- release_table_share(share);
- goto err;
- }
+ goto ret;
}
- if (share->is_view &&
- !open_new_frm(thd, share, alias,
- (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX | HA_TRY_READ_ONLY),
- READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD |
- flags, thd->open_options, &not_used, table_list,
- mem_root))
- {
- release_table_share(share);
- mysql_mutex_unlock(&LOCK_open);
- return FALSE;
- }
+ err= open_new_frm(thd, share, alias,
+ (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
+ HA_GET_INDEX | HA_TRY_READ_ONLY),
+ READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD | flags,
+ thd->open_options, &not_used, table_list, mem_root);
- my_error(ER_WRONG_OBJECT, MYF(0), share->db.str, share->table_name.str, "VIEW");
+ret:
+ mysql_mutex_lock(&LOCK_open);
release_table_share(share);
-err:
mysql_mutex_unlock(&LOCK_open);
- return TRUE;
+
+ return err;
}
@@ -4024,40 +3850,20 @@ static bool open_table_entry_fini(THD *thd, TABLE_SHARE *share, TABLE *entry)
static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
{
- const char *cache_key;
- uint cache_key_length;
TABLE_SHARE *share;
TABLE *entry;
- int not_used;
bool result= TRUE;
- my_hash_value_type hash_value;
-
- cache_key_length= get_table_def_key(table_list, &cache_key);
thd->clear_error();
- hash_value= my_calc_hash(&table_def_cache, (uchar*) cache_key,
- cache_key_length);
- mysql_mutex_lock(&LOCK_open);
+ if (!(entry= (TABLE*)my_malloc(sizeof(TABLE), MYF(MY_WME))))
+ return result;
- if (!(share= get_table_share(thd, table_list, cache_key,
- cache_key_length,
- OPEN_VIEW, &not_used,
- hash_value)))
- goto end_unlock;
+ if (!(share= get_table_share(thd, table_list->db, table_list->table_name,
+ GTS_TABLE)))
+ goto end_free;
- if (share->is_view)
- {
- release_table_share(share);
- goto end_unlock;
- }
-
- if (!(entry= (TABLE*)my_malloc(sizeof(TABLE), MYF(MY_WME))))
- {
- release_table_share(share);
- goto end_unlock;
- }
- mysql_mutex_unlock(&LOCK_open);
+ DBUG_ASSERT(! share->is_view);
if (open_table_from_share(thd, share, table_list->alias,
(uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
@@ -4082,7 +3888,6 @@ static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
closefrm(entry, 0);
result= FALSE;
}
- my_free(entry);
mysql_mutex_lock(&LOCK_open);
release_table_share(share);
@@ -4090,8 +3895,9 @@ static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
tdc_remove_table(thd, TDC_RT_REMOVE_ALL,
table_list->db, table_list->table_name,
TRUE);
-end_unlock:
mysql_mutex_unlock(&LOCK_open);
+end_free:
+ my_free(entry);
return result;
}
@@ -4231,11 +4037,16 @@ recover_from_failed_open(THD *thd)
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, m_failed_table->db,
m_failed_table->table_name, FALSE);
- ha_create_table_from_engine(thd, m_failed_table->db,
- m_failed_table->table_name);
thd->get_stmt_da()->clear_warning_info(thd->query_id);
thd->clear_error(); // Clear error message
+
+ if ((result=
+ !get_table_share(thd, m_failed_table->db,
+ m_failed_table->table_name,
+ GTS_TABLE | GTS_FORCE_DISCOVERY | GTS_NOLOCK)))
+ break;
+
thd->mdl_context.release_transactional_locks();
break;
}
@@ -4928,7 +4739,7 @@ lock_table_names(THD *thd,
if (mdl_requests.is_empty())
DBUG_RETURN(FALSE);
- /* Check if CREATE TABLE IF NOT EXISTS was used */
+ /* Check if CREATE TABLE was used */
create_table= (tables_start && tables_start->open_strategy ==
TABLE_LIST::OPEN_IF_EXISTS);
@@ -4967,12 +4778,9 @@ lock_table_names(THD *thd,
for (;;)
{
- bool exists= TRUE;
- bool res;
-
if (create_table)
thd->push_internal_handler(&error_handler); // Avoid warnings & errors
- res= thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout);
+ bool res= thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout);
if (create_table)
thd->pop_internal_handler();
if (!res)
@@ -4982,13 +4790,10 @@ lock_table_names(THD *thd,
DBUG_RETURN(TRUE); // Return original error
/*
- We come here in the case of lock timeout when executing
- CREATE TABLE IF NOT EXISTS.
- Verify that table really exists (it should as we got a lock conflict)
+ We come here in the case of lock timeout when executing CREATE TABLE.
+ Verify that table does exist (it usually does, as we got a lock conflict)
*/
- if (check_if_table_exists(thd, tables_start, 1, &exists))
- DBUG_RETURN(TRUE); // Should never happen
- if (exists)
+ if (ha_table_exists(thd, tables_start->db, tables_start->table_name))
{
if (thd->lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
@@ -5000,17 +4805,16 @@ lock_table_names(THD *thd,
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), tables_start->table_name);
DBUG_RETURN(TRUE);
}
- /* purecov: begin inspected */
/*
- We got error from acquire_locks but table didn't exists.
- In theory this should never happen, except maybe in
- CREATE or DROP DATABASE scenario.
+ We got error from acquire_locks, but the table didn't exists.
+ This could happen if another connection runs a statement
+ involving this non-existent table, and this statement took the mdl,
+ but didn't error out with ER_NO_SUCH_TABLE yet (yes, a race condition).
We play safe and restart the original acquire_locks with the
- original timeout
+ original timeout.
*/
create_table= 0;
lock_wait_timeout= org_lock_wait_timeout;
- /* purecov: end */
}
}
@@ -6244,6 +6048,8 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
the opened TABLE instance will be addded to THD::temporary_tables list.
@param thd Thread context.
+ @param hton Storage engine of the table, if known,
+ or NULL otherwise.
@param path Path (without .frm)
@param db Database name.
@param table_name Table name.
@@ -6262,7 +6068,8 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
@retval NULL on error.
*/
-TABLE *open_table_uncached(THD *thd, const char *path, const char *db,
+TABLE *open_table_uncached(THD *thd, handlerton *hton,
+ const char *path, const char *db,
const char *table_name,
bool add_to_temporary_tables_list,
bool open_in_engine)
@@ -6271,18 +6078,16 @@ TABLE *open_table_uncached(THD *thd, const char *path, const char *db,
TABLE_SHARE *share;
char cache_key[MAX_DBKEY_LENGTH], *saved_cache_key, *tmp_path;
uint key_length;
- TABLE_LIST table_list;
DBUG_ENTER("open_table_uncached");
DBUG_PRINT("enter",
("table: '%s'.'%s' path: '%s' server_id: %u "
"pseudo_thread_id: %lu",
db, table_name, path,
- (uint) thd->server_id, (ulong) thd->variables.pseudo_thread_id));
+ (uint) thd->variables.server_id,
+ (ulong) thd->variables.pseudo_thread_id));
- table_list.db= (char*) db;
- table_list.table_name= (char*) table_name;
/* Create the cache_key for temporary tables */
- key_length= create_table_def_key(thd, cache_key, db, table_name, 1);
+ key_length= create_tmp_table_def_key(thd, cache_key, db, table_name);
if (!(tmp_table= (TABLE*) my_malloc(sizeof(*tmp_table) + sizeof(*share) +
strlen(path)+1 + key_length,
@@ -6303,8 +6108,9 @@ TABLE *open_table_uncached(THD *thd, const char *path, const char *db,
init_tmp_table_share(thd, share, saved_cache_key, key_length,
strend(saved_cache_key)+1, tmp_path);
+ share->db_plugin= ha_lock_engine(thd, hton);
- if (open_table_def(thd, share, 0))
+ if (open_table_def(thd, share, GTS_TABLE | GTS_USE_DISCOVERY))
{
/* No need to lock share->mutex as this is not needed for tmp tables */
free_table_share(share);
@@ -6312,11 +6118,7 @@ TABLE *open_table_uncached(THD *thd, const char *path, const char *db,
DBUG_RETURN(0);
}
-#ifdef HAVE_PSI_TABLE_INTERFACE
- share->m_psi= PSI_TABLE_CALL(get_table_share)(true, share);
-#else
- share->m_psi= NULL;
-#endif
+ share->m_psi= PSI_CALL_get_table_share(true, share);
if (open_table_from_share(thd, share, table_name,
open_in_engine ?
@@ -6544,7 +6346,7 @@ bool open_temporary_table(THD *thd, TABLE_LIST *tl)
DBUG_PRINT("error",
("query_id: %lu server_id: %u pseudo_thread_id: %lu",
- (ulong) table->query_id, (uint) thd->server_id,
+ (ulong) table->query_id, (uint) thd->variables.server_id,
(ulong) thd->variables.pseudo_thread_id));
my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
DBUG_RETURN(TRUE);
@@ -7292,7 +7094,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
We can't do this in Item_field as this would change the
'name' of the item which may be used in the select list
*/
- strmake(name_buff, db, sizeof(name_buff)-1);
+ strmake_buf(name_buff, db);
my_casedn_str(files_charset_info, name_buff);
db= name_buff;
}
@@ -8463,7 +8265,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
thd->mark_used_columns= mark_used_columns;
DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns));
if (allow_sum_func)
- thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
+ thd->lex->allow_sum_func|=
+ (nesting_map)1 << thd->lex->current_select->nest_level;
thd->where= THD::DEFAULT_WHERE;
save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup;
thd->lex->current_select->is_item_list_lookup= 0;
@@ -8864,7 +8667,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
We can't do this in Item_field as this would change the
'name' of the item which may be used in the select list
*/
- strmake(name_buff, db_name, sizeof(name_buff)-1);
+ strmake_buf(name_buff, db_name);
my_casedn_str(files_charset_info, name_buff);
db_name= name_buff;
}
@@ -9573,15 +9376,10 @@ my_bool mysql_rm_tmp_tables(void)
/* Remove all SQLxxx tables from directory */
- for (idx=0 ; idx < (uint) dirp->number_off_files ; idx++)
+ for (idx=0 ; idx < (uint) dirp->number_of_files ; idx++)
{
file=dirp->dir_entry+idx;
- /* skiping . and .. */
- if (file->name[0] == '.' && (!file->name[1] ||
- (file->name[1] == '.' && !file->name[2])))
- continue;
-
if (!memcmp(file->name, tmp_file_prefix,
tmp_file_prefix_length))
{
@@ -9597,7 +9395,7 @@ my_bool mysql_rm_tmp_tables(void)
memcpy(filePathCopy, filePath, filePath_len - ext_len);
filePathCopy[filePath_len - ext_len]= 0;
init_tmp_table_share(thd, &share, "", 0, "", filePathCopy);
- if (!open_table_def(thd, &share, 0) &&
+ if (!open_table_def(thd, &share) &&
((handler_file= get_new_handler(&share, thd->mem_root,
share.db_type()))))
{
@@ -9761,6 +9559,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
TABLE *table;
TABLE_SHARE *share;
DBUG_ENTER("tdc_remove_table");
+ DBUG_PRINT("enter",("name: %s remove_type: %d", table_name, remove_type));
if (! has_lock)
mysql_mutex_lock(&LOCK_open);
@@ -9773,7 +9572,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
MDL_EXCLUSIVE));
- key_length= create_table_def_key(thd, key, db, table_name, false);
+ key_length= create_table_def_key(key, db, table_name);
if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key,
key_length)))
@@ -9787,7 +9586,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
DBUG_ASSERT(share->used_tables.is_empty());
}
else if (remove_type == TDC_RT_REMOVE_NOT_OWN ||
- remove_type == TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE)
+ remove_type == TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)
{
TABLE_SHARE::TABLE_list::Iterator it2(share->used_tables);
while ((table= it2++))
@@ -9800,8 +9599,8 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
}
#endif
/*
- Set share's version to zero in order to ensure that it gets
- automatically deleted once it is no longer referenced.
+ Mark share to ensure that it gets automatically deleted once
+ it is no longer referenced.
Note that code in TABLE_SHARE::wait_for_old_version() assumes
that marking share as old and removal of its unused tables
@@ -9810,8 +9609,13 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
TDC does not contain old shares which don't have any tables
used.
*/
- if (remove_type != TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE)
- share->version= 0;
+ if (remove_type == TDC_RT_REMOVE_NOT_OWN)
+ share->remove_from_cache_at_close();
+ else
+ {
+ /* Ensure that no can open the table while it's used */
+ share->protect_against_usage();
+ }
while ((table= it++))
free_cache_entry(table);
@@ -9858,7 +9662,6 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
List_iterator<Item_func_match> li(*(select_lex->ftfunc_list));
Item_func_match *ifm;
DBUG_PRINT("info",("Performing FULLTEXT search"));
- THD_STAGE_INFO(thd, stage_fulltext_initialization);
while ((ifm=li++))
ifm->init_search(no_order);
diff --git a/sql/sql_base.h b/sql/sql_base.h
index ef54c2b813f..a4f35b59ba9 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -61,7 +61,8 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND,
enum enum_tdc_remove_table_type {TDC_RT_REMOVE_ALL, TDC_RT_REMOVE_NOT_OWN,
TDC_RT_REMOVE_UNUSED,
- TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE};
+ TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE};
+#define TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE
/* bits for last argument to remove_table_from_cache() */
#define RTFC_NO_FLAG 0x0000
@@ -69,6 +70,8 @@ enum enum_tdc_remove_table_type {TDC_RT_REMOVE_ALL, TDC_RT_REMOVE_NOT_OWN,
#define RTFC_WAIT_OTHER_THREAD_FLAG 0x0002
#define RTFC_CHECK_KILLED_FLAG 0x0004
+extern HASH table_def_cache;
+
bool check_dup(const char *db, const char *name, TABLE_LIST *tables);
extern mysql_mutex_t LOCK_open;
bool table_cache_init(void);
@@ -79,14 +82,60 @@ void table_def_start_shutdown(void);
void assign_new_table_id(TABLE_SHARE *share);
uint cached_table_definitions(void);
uint cached_open_tables(void);
+
+/**
+ Create a table cache key for non-temporary table.
+
+ @param key Buffer for key (must be at least MAX_DBKEY_LENGTH bytes).
+ @param db Database name.
+ @param table_name Table name.
+
+ @return Length of key.
+
+ @sa create_table_def_key(thd, char *, table_list, bool)
+*/
+
+inline uint
+create_table_def_key(char *key, const char *db, const char *table_name)
+{
+ /*
+ In theory caller should ensure that both db and table_name are
+ not longer than NAME_LEN bytes. In practice we play safe to avoid
+ buffer overruns.
+ */
+ return (uint)(strmake(strmake(key, db, NAME_LEN) + 1, table_name,
+ NAME_LEN) - key + 1);
+}
+
+uint create_tmp_table_def_key(THD *thd, char *key, const char *db,
+ const char *table_name);
uint get_table_def_key(const TABLE_LIST *table_list, const char **key);
-TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list,
- const char *key, uint key_length,
- uint db_flags, int *error,
+TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
+ const char *key, uint key_length, uint flags,
my_hash_value_type hash_value);
void release_table_share(TABLE_SHARE *share);
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
+// convenience helper: call get_table_share() without precomputed hash_value
+static inline TABLE_SHARE *get_table_share(THD *thd, const char *db,
+ const char *table_name,
+ const char *key, uint key_length,
+ uint flags)
+{
+ return get_table_share(thd, db, table_name, key, key_length, flags,
+ my_calc_hash(&table_def_cache, (uchar*) key, key_length));
+}
+
+// convenience helper: call get_table_share() without precomputed cache key
+static inline TABLE_SHARE *get_table_share(THD *thd, const char *db,
+ const char *table_name, uint flags)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ key_length= create_table_def_key(key, db, table_name);
+ return get_table_share(thd, db, table_name, key, key_length, flags);
+}
+
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
uint lock_flags);
@@ -127,11 +176,12 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
*/
#define MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK 0x1000
#define MYSQL_LOCK_NOT_TEMPORARY 0x2000
+#define MYSQL_OPEN_FOR_REPAIR 0x4000
/**
Only check THD::killed if waits happen (e.g. wait on MDL, wait on
table flush, wait on thr_lock.c locks) while opening and locking table.
*/
-#define MYSQL_OPEN_IGNORE_KILLED 0x4000
+#define MYSQL_OPEN_IGNORE_KILLED 0x8000
/** Please refer to the internals manual. */
#define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\
@@ -151,8 +201,8 @@ bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
bool get_key_map_from_key_list(key_map *map, TABLE *table,
List<String> *index_list);
-TABLE *open_table_uncached(THD *thd, const char *path, const char *db,
- const char *table_name,
+TABLE *open_table_uncached(THD *thd, handlerton *hton, const char *path,
+ const char *db, const char *table_name,
bool add_to_temporary_tables_list,
bool open_in_engine);
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name);
@@ -236,7 +286,9 @@ bool setup_tables_and_check_access(THD *thd,
ulong want_access,
bool full_table_list);
bool wait_while_table_is_used(THD *thd, TABLE *table,
- enum ha_extra_function function);
+ enum ha_extra_function function,
+ enum_tdc_remove_table_type remove_type=
+ TDC_RT_REMOVE_NOT_OWN);
void drop_open_table(THD *thd, TABLE *table, const char *db_name,
const char *table_name);
@@ -308,13 +360,23 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
const char *cache_key, uint cache_key_length,
MEM_ROOT *mem_root, uint flags);
+
+static inline bool tdc_open_view(THD *thd, TABLE_LIST *table_list,
+ const char *alias, MEM_ROOT *mem_root,
+ uint flags)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ key_length= create_table_def_key(key, table_list->db, table_list->table_name);
+ return tdc_open_view(thd, table_list, alias, key, key_length, mem_root, flags);
+}
+
void tdc_flush_unused_tables();
TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
const char *table_name,
bool no_error);
void mark_tmp_table_for_reuse(TABLE *table);
-bool check_if_table_exists(THD *thd, TABLE_LIST *table, bool fast_check,
- bool *exists);
+
int update_virtual_fields(THD *thd, TABLE *table,
enum enum_vcol_update_mode vcol_update_mode= VCOL_UPDATE_FOR_READ);
int dynamic_column_error_message(enum_dyncol_func_result rc);
@@ -331,7 +393,6 @@ extern TABLE *unused_tables;
extern Item **not_found_item;
extern Field *not_found_field;
extern Field *view_ref_found;
-extern HASH table_def_cache;
/**
clean/setup table fields and map.
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index a4664da2bf6..3bb5deab406 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -107,7 +107,7 @@ void mysql_client_binlog_statement(THD* thd)
rli->relay_log.description_event_for_exec &&
buf))
{
- my_error(ER_OUTOFMEMORY, MYF(0), 1); /* needed 1 bytes */
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
goto end;
}
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index db4c7110ac7..5e86a889053 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2003, 2010, Oracle and/or its affiliates
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -60,7 +61,7 @@ public:
intersect(map2buff);
if (map.n_bits > sizeof(ulonglong) * 8)
bitmap_set_above(&map, sizeof(ulonglong),
- test(map2buff & (LL(1) << (sizeof(ulonglong) * 8 - 1))));
+ test(map2buff & (1LL << (sizeof(ulonglong) * 8 - 1))));
}
void subtract(Bitmap& map2) { bitmap_subtract(&map, &map2.map); }
void merge(Bitmap& map2) { bitmap_union(&map, &map2.map); }
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 73bdf9fa984..007a1b3b585 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -474,6 +474,8 @@ static void make_base_query(String *new_query,
/* The following is guaranteed by the query_cache interface */
DBUG_ASSERT(query[query_length] == 0);
DBUG_ASSERT(!is_white_space(query[0]));
+ /* We do not support UCS2, UTF16, UTF32 as a client character set */
+ DBUG_ASSERT(current_thd->variables.character_set_client->mbminlen == 1);
new_query->length(0); // Don't copy anything from old buffer
if (new_query->realloc(query_length + additional_length))
@@ -836,18 +838,18 @@ void Query_cache_block::destroy()
DBUG_VOID_RETURN;
}
-inline uint Query_cache_block::headers_len()
+uint Query_cache_block::headers_len()
{
return (ALIGN_SIZE(sizeof(Query_cache_block_table)*n_tables) +
ALIGN_SIZE(sizeof(Query_cache_block)));
}
-inline uchar* Query_cache_block::data(void)
+uchar* Query_cache_block::data(void)
{
return (uchar*)( ((uchar*)this) + headers_len() );
}
-inline Query_cache_query * Query_cache_block::query()
+Query_cache_query * Query_cache_block::query()
{
#ifndef DBUG_OFF
if (type != QUERY)
@@ -856,7 +858,7 @@ inline Query_cache_query * Query_cache_block::query()
return (Query_cache_query *) data();
}
-inline Query_cache_table * Query_cache_block::table()
+Query_cache_table * Query_cache_block::table()
{
#ifndef DBUG_OFF
if (type != TABLE)
@@ -865,7 +867,7 @@ inline Query_cache_table * Query_cache_block::table()
return (Query_cache_table *) data();
}
-inline Query_cache_result * Query_cache_block::result()
+Query_cache_result * Query_cache_block::result()
{
#ifndef DBUG_OFF
if (type != RESULT && type != RES_CONT && type != RES_BEG &&
@@ -875,7 +877,7 @@ inline Query_cache_result * Query_cache_block::result()
return (Query_cache_result *) data();
}
-inline Query_cache_block_table * Query_cache_block::table(TABLE_COUNTER_TYPE n)
+Query_cache_block_table * Query_cache_block::table(TABLE_COUNTER_TYPE n)
{
return ((Query_cache_block_table *)
(((uchar*)this)+ALIGN_SIZE(sizeof(Query_cache_block)) +
@@ -2491,7 +2493,28 @@ void Query_cache::init()
m_cache_status= Query_cache::OK;
m_requests_in_progress= 0;
initialized = 1;
- query_state_map= default_charset_info->state_map;
+ /*
+ Using state_map from latin1 should be fine in all cases:
+ 1. We do not support UCS2, UTF16, UTF32 as a client character set.
+ 2. The other character sets are compatible on the lower ASCII-range
+ 0x00-0x20, and have the following characters marked as spaces:
+
+ 0x09 TAB
+ 0x0A LINE FEED
+ 0x0B VERTICAL TAB
+ 0x0C FORM FEED
+ 0x0D CARRIAGE RETUR
+ 0x20 SPACE
+
+ Additionally, only some of the ASCII-compatible character sets
+ (including latin1) can have 0xA0 mapped to "NON-BREAK SPACE"
+ and thus marked as space.
+ That should not be a problem for those charsets that map 0xA0
+ to something else: the parser will just return syntax error
+ if this character appears straight in the query
+ (i.e. not inside a string literal or comment).
+ */
+ query_state_map= my_charset_latin1.state_map;
/*
If we explicitly turn off query cache from the command line query
cache will be disabled for the reminder of the server life
@@ -3184,8 +3207,8 @@ void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list)
char key[MAX_DBKEY_LENGTH];
uint key_length;
- key_length=(uint) (strmov(strmov(key,table_list->db)+1,
- table_list->table_name) -key)+ 1;
+ key_length= create_table_def_key(key, table_list->db,
+ table_list->table_name);
// We don't store temporary tables => no key_length+=4 ...
invalidate_table(thd, (uchar *)key, key_length);
@@ -3303,8 +3326,8 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
- key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
- tables_used->view_name.str) - key) + 1;
+ key_length= create_table_def_key(key, tables_used->view_db.str,
+ tables_used->view_name.str);
/*
There are not callback function for for VIEWs
*/
@@ -4026,6 +4049,18 @@ Query_cache::process_and_count_tables(THD *thd, TABLE_LIST *tables_used,
/*
+In non-embedded QC intercepts result in net_real_write
+but if we have no net.vio then net_real_write
+will not be called, so QC can't get results of the query
+*/
+#ifdef EMBEDDED_LIBRARY
+#define qc_is_able_to_intercept_result(T) 1
+#else
+#define qc_is_able_to_intercept_result(T) ((T)->net.vio)
+#endif
+
+
+/*
If query is cacheable return number tables in query
(query without tables are not cached)
*/
@@ -4040,7 +4075,8 @@ Query_cache::is_cacheable(THD *thd, LEX *lex,
if (thd->lex->safe_to_cache_query &&
(thd->variables.query_cache_type == 1 ||
(thd->variables.query_cache_type == 2 && (lex->select_lex.options &
- OPTION_TO_QUERY_CACHE))))
+ OPTION_TO_QUERY_CACHE))) &&
+ qc_is_able_to_intercept_result(thd))
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
(long) OPTION_TO_QUERY_CACHE,
@@ -4062,11 +4098,12 @@ Query_cache::is_cacheable(THD *thd, LEX *lex,
}
DBUG_PRINT("qcache",
- ("not interesting query: %d or not cacheable, options %lx %lx type: %u",
+ ("not interesting query: %d or not cacheable, options %lx %lx type: %u net->vio present: %u",
(int) lex->sql_command,
(long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
- (int) thd->variables.query_cache_type));
+ (int) thd->variables.query_cache_type,
+ (uint) test(qc_is_able_to_intercept_result(thd))));
DBUG_RETURN(0);
}
@@ -4351,14 +4388,13 @@ my_bool Query_cache::move_by_type(uchar **border,
case Query_cache_block::RESULT:
{
DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block,
- (int) block->type));
+ (int) block->type));
if (*border == 0)
break;
- Query_cache_block *query_block = block->result()->parent(),
- *next = block->next,
- *prev = block->prev;
- Query_cache_block::block_type type = block->type;
+ Query_cache_block *query_block= block->result()->parent();
BLOCK_LOCK_WR(query_block);
+ Query_cache_block *next= block->next, *prev= block->prev;
+ Query_cache_block::block_type type= block->type;
ulong len = block->length, used = block->used;
Query_cache_block *pprev = block->pprev,
*pnext = block->pnext,
@@ -4520,8 +4556,9 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
*db_length= (filename - dbname) - 1;
DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename));
- DBUG_RETURN((uint) (strmov(strmake(key, dbname, *db_length) + 1,
- filename) -key) + 1);
+ DBUG_RETURN((uint) (strmake(strmake(key, dbname,
+ MY_MIN(*db_length, NAME_LEN)) + 1,
+ filename, NAME_LEN) - key) + 1);
}
/****************************************************************************
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 2d6392911f1..15848dabd33 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -141,12 +141,12 @@ struct Query_cache_block
inline bool is_free(void) { return type == FREE; }
void init(ulong length);
void destroy();
- inline uint headers_len();
- inline uchar* data(void);
- inline Query_cache_query *query();
- inline Query_cache_table *table();
- inline Query_cache_result *result();
- inline Query_cache_block_table *table(TABLE_COUNTER_TYPE n);
+ uint headers_len();
+ uchar* data(void);
+ Query_cache_query *query();
+ Query_cache_table *table();
+ Query_cache_result *result();
+ Query_cache_block_table *table(TABLE_COUNTER_TYPE n);
};
struct Query_cache_query
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index bd8f72d408d..c9f07c4d036 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2012, Monty Program Ab
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -35,6 +35,7 @@
#include "sql_cache.h" // query_cache_abort
#include "sql_base.h" // close_thread_tables
#include "sql_time.h" // date_time_format_copy
+#include "tztime.h" // MYSQL_TIME <-> my_time_t
#include "sql_acl.h" // NO_ACCESS,
// acl_getroot_no_password
#include "sql_base.h" // close_temporary_tables
@@ -115,7 +116,8 @@ Key::Key(const Key &rhs, MEM_ROOT *mem_root)
columns(rhs.columns, mem_root),
name(rhs.name),
option_list(rhs.option_list),
- generated(rhs.generated)
+ generated(rhs.generated),
+ create_if_not_exists(rhs.create_if_not_exists)
{
list_copy_and_replace_each_value(columns, mem_root);
}
@@ -354,17 +356,6 @@ void thd_set_thread_stack(THD *thd, char *stack_start)
}
/**
- Lock connection data for the set of connections this connection
- belongs to
-
- @param thd THD object
-*/
-void thd_lock_thread_count(THD *)
-{
- mysql_mutex_lock(&LOCK_thread_count);
-}
-
-/**
Close the socket used by this connection
@param thd THD object
@@ -899,6 +890,7 @@ THD::THD()
col_access=0;
is_slave_error= thread_specific_used= FALSE;
my_hash_clear(&handler_tables_hash);
+ my_hash_clear(&ull_hash);
tmp_table=0;
cuted_fields= 0L;
m_sent_row_count= 0L;
@@ -939,7 +931,6 @@ THD::THD()
net.vio=0;
net.buff= 0;
client_capabilities= 0; // minimalistic client
- ull=0;
system_thread= NON_SYSTEM_THREAD;
cleanup_done= abort_on_warning= 0;
peer_port= 0; // For SHOW PROCESSLIST
@@ -964,7 +955,7 @@ THD::THD()
/* Variables with default values */
proc_info="login";
where= THD::DEFAULT_WHERE;
- server_id = ::server_id;
+ variables.server_id = global_system_variables.server_id;
slave_net = 0;
m_command=COM_CONNECT;
*scramble= '\0';
@@ -997,7 +988,14 @@ THD::THD()
protocol_binary.init(this);
tablespace_op=FALSE;
- tmp= sql_rnd_with_mutex();
+
+ /*
+ Initialize the random generator. We call my_rnd() without a lock as
+ it's not really critical if two threads modifies the structure at the
+ same time. We ensure that we have an unique number foreach thread
+ by adding the address of the stack.
+ */
+ tmp= (ulong) (my_rnd(&sql_rand) * 0xffffffff);
my_rnd_init(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
substitute_null_with_insert_id = FALSE;
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
@@ -1204,11 +1202,16 @@ Sql_condition* THD::raise_condition(uint sql_errno,
if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
DBUG_RETURN(cond);
- /* When simulating OOM, skip writing to error log to avoid mtr errors. */
- cond= DBUG_EVALUATE_IF(
- "simulate_out_of_memory",
- NULL,
- da->push_warning(this, sql_errno, sqlstate, level, msg));
+ /*
+ Avoid pushing a condition for fatal out of memory errors as this will
+ require memory allocation and therefore might fail. Non fatal out of
+ memory errors can occur if raised by SIGNAL/RESIGNAL statement.
+ */
+ if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
+ sql_errno == ER_OUTOFMEMORY)))
+ {
+ cond= da->push_warning(this, sql_errno, sqlstate, level, msg);
+ }
if (level == Sql_condition::WARN_LEVEL_ERROR)
@@ -1256,8 +1259,8 @@ LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
const char *str, unsigned int size,
int allocate_lex_string)
{
- return thd->make_lex_string(lex_str, str, size,
- (bool) allocate_lex_string);
+ return allocate_lex_string ? thd->make_lex_string(str, size)
+ : thd->make_lex_string(lex_str, str, size);
}
extern "C"
@@ -1272,6 +1275,26 @@ void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
*xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
}
+
+extern "C"
+my_time_t thd_TIME_to_gmt_sec(MYSQL_THD thd, const MYSQL_TIME *ltime,
+ unsigned int *errcode)
+{
+ Time_zone *tz= thd ? thd->variables.time_zone :
+ global_system_variables.time_zone;
+ return tz->TIME_to_gmt_sec(ltime, errcode);
+}
+
+
+extern "C"
+void thd_gmt_sec_to_TIME(MYSQL_THD thd, MYSQL_TIME *ltime, my_time_t t)
+{
+ Time_zone *tz= thd ? thd->variables.time_zone :
+ global_system_variables.time_zone;
+ tz->gmt_sec_to_TIME(ltime, t);
+}
+
+
#ifdef _WIN32
extern "C" THD *_current_thd_noinline(void)
{
@@ -1449,15 +1472,17 @@ void THD::cleanup(void)
#error xid_state in the cache should be replaced by the allocated value
}
#endif
- {
- transaction.xid_state.xa_state= XA_NOTR;
- trans_rollback(this);
- xid_cache_delete(&transaction.xid_state);
- }
- locked_tables_list.unlock_locked_tables(this);
mysql_ha_cleanup(this);
+ close_temporary_tables(this);
+
+ transaction.xid_state.xa_state= XA_NOTR;
+ trans_rollback(this);
+ xid_cache_delete(&transaction.xid_state);
+
+ locked_tables_list.unlock_locked_tables(this);
+
DBUG_ASSERT(open_tables == NULL);
/*
If the thread was in the middle of an ongoing transaction (rolled
@@ -1471,8 +1496,6 @@ void THD::cleanup(void)
if (global_read_lock.is_acquired())
global_read_lock.unlock_global_read_lock(this);
- /* All metadata locks must have been released by now. */
- DBUG_ASSERT(!mdl_context.has_locks());
if (user_connect)
{
decrease_user_connections(user_connect);
@@ -1487,17 +1510,12 @@ void THD::cleanup(void)
delete_dynamic(&user_var_events);
my_hash_free(&user_vars);
- close_temporary_tables(this);
sp_cache_clear(&sp_proc_cache);
sp_cache_clear(&sp_func_cache);
- if (ull)
- {
- mysql_mutex_lock(&LOCK_user_locks);
- item_user_lock_release(ull);
- mysql_mutex_unlock(&LOCK_user_locks);
- ull= NULL;
- }
+ mysql_ull_cleanup(this);
+ /* All metadata locks must have been released by now. */
+ DBUG_ASSERT(!mdl_context.has_locks());
apc_target.destroy();
cleanup_done=1;
@@ -1519,7 +1537,6 @@ THD::~THD()
/* Ensure that no one is using THD */
mysql_mutex_lock(&LOCK_thd_data);
- mysys_var=0; // Safety (shouldn't be needed)
mysql_mutex_unlock(&LOCK_thd_data);
/* Close connection */
@@ -1755,8 +1772,8 @@ void THD::awake(killed_state state_to_set)
mysql_mutex_unlock(mysys_var->current_mutex);
break;
}
+ my_sleep(1000000L / WAIT_FOR_KILL_TRY_TIMES);
}
- my_sleep(1000000L / WAIT_FOR_KILL_TRY_TIMES);
}
mysql_mutex_unlock(&mysys_var->mutex);
}
@@ -1973,6 +1990,19 @@ void THD::cleanup_after_query()
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
auto_inc_intervals_in_cur_stmt_for_binlog.empty();
rand_used= 0;
+#ifndef EMBEDDED_LIBRARY
+ /*
+ Clean possible unused INSERT_ID events by current statement.
+ is_update_query() is needed to ignore SET statements:
+ Statements that don't update anything directly and don't
+ used stored functions. This is mostly necessary to ignore
+ statements in binlog between SET INSERT_ID and DML statement
+ which is intended to consume its event (there can be other
+ SET statements between them).
+ */
+ if ((rli_slave || rli_fake) && is_update_query(lex->sql_command))
+ auto_inc_intervals_forced.empty();
+#endif
}
/*
Forget the binlog stmt filter for the next query.
@@ -2008,30 +2038,6 @@ void THD::cleanup_after_query()
}
-/**
- Create a LEX_STRING in this connection.
-
- @param lex_str pointer to LEX_STRING object to be initialized
- @param str initializer to be copied into lex_str
- @param length length of str, in bytes
- @param allocate_lex_string if TRUE, allocate new LEX_STRING object,
- instead of using lex_str value
- @return NULL on failure, or pointer to the LEX_STRING object
-*/
-LEX_STRING *THD::make_lex_string(LEX_STRING *lex_str,
- const char* str, uint length,
- bool allocate_lex_string)
-{
- if (allocate_lex_string)
- if (!(lex_str= (LEX_STRING *)alloc_root(mem_root, sizeof(LEX_STRING))))
- return 0;
- if (!(lex_str->str= strmake_root(mem_root, str, length)))
- return 0;
- lex_str->length= length;
- return lex_str;
-}
-
-
/*
Convert a string to another character set
@@ -2202,7 +2208,7 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
key_length + 1);
if (!new_table)
{
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL),
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATALERROR),
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
killed= KILL_CONNECTION;
return 0;
@@ -2668,7 +2674,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
bool string_results= FALSE, non_string_results= FALSE;
unit= u;
if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN)
- strmake(path,exchange->file_name,FN_REFLEN-1);
+ strmake_buf(path,exchange->file_name);
write_cs= exchange->cs ? exchange->cs : &my_charset_bin;
@@ -2806,7 +2812,7 @@ int select_export::send_data(List<Item> &items)
set_if_smaller(estimated_bytes, UINT_MAX32);
if (cvt_str.realloc((uint32) estimated_bytes))
{
- my_error(ER_OUTOFMEMORY, MYF(0), (uint32) estimated_bytes);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), (uint32) estimated_bytes);
goto err;
}
@@ -3242,42 +3248,13 @@ int select_exists_subselect::send_data(List<Item> &items)
int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
- List_iterator_fast<my_var> var_li(var_list);
- List_iterator_fast<Item> it(list);
- Item *item;
- my_var *mv;
- Item_func_set_user_var **suv;
if (var_list.elements != list.elements)
{
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0));
return 1;
- }
-
- /*
- Iterate over the destination variables and mark them as being
- updated in this query.
- We need to do this at JOIN::prepare time to ensure proper
- const detection of Item_func_get_user_var that is determined
- by the presence of Item_func_set_user_vars
- */
-
- suv= set_var_items= (Item_func_set_user_var **)
- sql_alloc(sizeof(Item_func_set_user_var *) * list.elements);
-
- while ((mv= var_li++) && (item= it++))
- {
- if (!mv->local)
- {
- *suv= new Item_func_set_user_var(mv->s, item);
- (*suv)->fix_fields(thd, 0);
- }
- else
- *suv= NULL;
- suv++;
- }
-
+ }
return 0;
}
@@ -3607,7 +3584,6 @@ int select_dumpvar::send_data(List<Item> &items)
List_iterator<Item> it(items);
Item *item;
my_var *mv;
- Item_func_set_user_var **suv;
DBUG_ENTER("select_dumpvar::send_data");
if (unit->offset_limit_cnt)
@@ -3620,19 +3596,20 @@ int select_dumpvar::send_data(List<Item> &items)
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
DBUG_RETURN(1);
}
- for (suv= set_var_items; ((mv= var_li++) && (item= it++)); suv++)
+ while ((mv= var_li++) && (item= it++))
{
if (mv->local)
{
- DBUG_ASSERT(!*suv);
if (thd->spcont->set_variable(thd, mv->offset, &item))
DBUG_RETURN(1);
}
else
{
- DBUG_ASSERT(*suv);
- (*suv)->save_item_result(item);
- if ((*suv)->update())
+ Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item);
+ suv->save_item_result(item);
+ if (suv->fix_fields(thd, 0))
+ DBUG_RETURN (1);
+ if (suv->update())
DBUG_RETURN (1);
}
}
@@ -4164,6 +4141,15 @@ extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd)
}
+/**
+ Check if THD socket is still connected.
+ */
+extern "C" int thd_is_connected(MYSQL_THD thd)
+{
+ return thd->is_connected();
+}
+
+
#ifdef INNODB_COMPATIBILITY_HOOKS
extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
{
@@ -4623,17 +4609,8 @@ void THD::set_query_and_id(char *query_arg, uint32 query_length_arg,
{
mysql_mutex_lock(&LOCK_thd_data);
set_query_inner(query_arg, query_length_arg, cs);
- query_id= new_query_id;
mysql_mutex_unlock(&LOCK_thd_data);
-}
-
-/** Assign a new value to thd->query_id. */
-
-void THD::set_query_id(query_id_t new_query_id)
-{
- mysql_mutex_lock(&LOCK_thd_data);
query_id= new_query_id;
- mysql_mutex_unlock(&LOCK_thd_data);
}
/** Assign a new value to thd->mysys_var. */
@@ -4667,6 +4644,8 @@ void THD::leave_locked_tables_mode()
/* Also ensure that we don't release metadata locks for open HANDLERs. */
if (handler_tables_hash.records)
mysql_ha_set_explicit_lock_duration(this);
+ if (ull_hash.records)
+ mysql_ull_set_explicit_lock_duration(this);
}
locked_tables_mode= LTM_NONE;
}
@@ -4803,9 +4782,14 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state)
bool xid_cache_insert(XID_STATE *xid_state)
{
mysql_mutex_lock(&LOCK_xid_cache);
- DBUG_ASSERT(my_hash_search(&xid_cache, xid_state->xid.key(),
- xid_state->xid.key_length())==0);
- my_bool res=my_hash_insert(&xid_cache, (uchar*)xid_state);
+ if (my_hash_search(&xid_cache, xid_state->xid.key(),
+ xid_state->xid.key_length()))
+ {
+ mysql_mutex_unlock(&LOCK_xid_cache);
+ my_error(ER_XAER_DUPID, MYF(0));
+ return true;
+ }
+ bool res= my_hash_insert(&xid_cache, (uchar*)xid_state);
mysql_mutex_unlock(&LOCK_xid_cache);
return res;
}
@@ -5252,6 +5236,46 @@ int THD::decide_logging_format(TABLE_LIST *tables)
DBUG_PRINT("info", ("decision: logging in %s format",
is_current_stmt_binlog_format_row() ?
"ROW" : "STATEMENT"));
+
+ if (variables.binlog_format == BINLOG_FORMAT_ROW &&
+ (lex->sql_command == SQLCOM_UPDATE ||
+ lex->sql_command == SQLCOM_UPDATE_MULTI ||
+ lex->sql_command == SQLCOM_DELETE ||
+ lex->sql_command == SQLCOM_DELETE_MULTI))
+ {
+ String table_names;
+ /*
+ Generate a warning for UPDATE/DELETE statements that modify a
+ BLACKHOLE table, as row events are not logged in row format.
+ */
+ for (TABLE_LIST *table= tables; table; table= table->next_global)
+ {
+ if (table->placeholder())
+ continue;
+ if (table->table->file->ht->db_type == DB_TYPE_BLACKHOLE_DB &&
+ table->lock_type >= TL_WRITE_ALLOW_WRITE)
+ {
+ table_names.append(table->table_name);
+ table_names.append(",");
+ }
+ }
+ if (!table_names.is_empty())
+ {
+ bool is_update= (lex->sql_command == SQLCOM_UPDATE ||
+ lex->sql_command == SQLCOM_UPDATE_MULTI);
+ /*
+ Replace the last ',' with '.' for table_names
+ */
+ table_names.replace(table_names.length()-1, 1, ".", 1);
+ push_warning_printf(this, Sql_condition::WARN_LEVEL_WARN,
+ ER_UNKNOWN_ERROR,
+ "Row events are not logged for %s statements "
+ "that modify BLACKHOLE tables in row format. "
+ "Table(s): '%-.192s'",
+ is_update ? "UPDATE" : "DELETE",
+ table_names.c_ptr());
+ }
+ }
}
#ifndef DBUG_OFF
else
@@ -5510,7 +5534,7 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
size_t const len= pack_row(table, cols, row_data, record);
Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
len, is_trans,
static_cast<Write_rows_log_event*>(0));
@@ -5554,7 +5578,7 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
#endif
Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
before_size + after_size, is_trans,
static_cast<Update_rows_log_event*>(0));
@@ -5585,7 +5609,7 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
size_t const len= pack_row(table, cols, row_data, record);
Rows_log_event* const ev=
- binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
+ binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
len, is_trans,
static_cast<Delete_rows_log_event*>(0));
diff --git a/sql/sql_class.h b/sql/sql_class.h
index e38196c2aac..889028ce8e5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009-2013, Monty Program Ab & SkySQL Ab
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef SQL_CLASS_INCLUDED
#define SQL_CLASS_INCLUDED
@@ -59,6 +59,7 @@ void set_thd_stage_info(void *thd,
class Reprepare_observer;
class Relay_log_info;
+class Rpl_filter;
class Query_log_event;
class Load_log_event;
@@ -69,7 +70,6 @@ class Lex_input_stream;
class Parser_state;
class Rows_log_event;
class Sroutine_hash_entry;
-class User_level_lock;
class user_var_entry;
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
@@ -86,38 +86,38 @@ enum enum_mark_columns
enum enum_filetype { FILETYPE_CSV, FILETYPE_XML };
/* Bits for different SQL modes modes (including ANSI mode) */
-#define MODE_REAL_AS_FLOAT 1
-#define MODE_PIPES_AS_CONCAT 2
-#define MODE_ANSI_QUOTES 4
-#define MODE_IGNORE_SPACE 8
-#define MODE_IGNORE_BAD_TABLE_OPTIONS 16
-#define MODE_ONLY_FULL_GROUP_BY 32
-#define MODE_NO_UNSIGNED_SUBTRACTION 64
-#define MODE_NO_DIR_IN_CREATE 128
-#define MODE_POSTGRESQL 256
-#define MODE_ORACLE 512
-#define MODE_MSSQL 1024
-#define MODE_DB2 2048
-#define MODE_MAXDB 4096
-#define MODE_NO_KEY_OPTIONS 8192
-#define MODE_NO_TABLE_OPTIONS 16384
-#define MODE_NO_FIELD_OPTIONS 32768
-#define MODE_MYSQL323 65536L
-#define MODE_MYSQL40 (MODE_MYSQL323*2)
-#define MODE_ANSI (MODE_MYSQL40*2)
-#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2)
-#define MODE_NO_BACKSLASH_ESCAPES (MODE_NO_AUTO_VALUE_ON_ZERO*2)
-#define MODE_STRICT_TRANS_TABLES (MODE_NO_BACKSLASH_ESCAPES*2)
-#define MODE_STRICT_ALL_TABLES (MODE_STRICT_TRANS_TABLES*2)
-#define MODE_NO_ZERO_IN_DATE (MODE_STRICT_ALL_TABLES*2)
-#define MODE_NO_ZERO_DATE (MODE_NO_ZERO_IN_DATE*2)
-#define MODE_INVALID_DATES (MODE_NO_ZERO_DATE*2)
-#define MODE_ERROR_FOR_DIVISION_BY_ZERO (MODE_INVALID_DATES*2)
-#define MODE_TRADITIONAL (MODE_ERROR_FOR_DIVISION_BY_ZERO*2)
-#define MODE_NO_AUTO_CREATE_USER (MODE_TRADITIONAL*2)
-#define MODE_HIGH_NOT_PRECEDENCE (MODE_NO_AUTO_CREATE_USER*2)
-#define MODE_NO_ENGINE_SUBSTITUTION (MODE_HIGH_NOT_PRECEDENCE*2)
-#define MODE_PAD_CHAR_TO_FULL_LENGTH (ULL(1) << 31)
+#define MODE_REAL_AS_FLOAT (1ULL << 0)
+#define MODE_PIPES_AS_CONCAT (1ULL << 1)
+#define MODE_ANSI_QUOTES (1ULL << 2)
+#define MODE_IGNORE_SPACE (1ULL << 3)
+#define MODE_IGNORE_BAD_TABLE_OPTIONS (1ULL << 4)
+#define MODE_ONLY_FULL_GROUP_BY (1ULL << 5)
+#define MODE_NO_UNSIGNED_SUBTRACTION (1ULL << 6)
+#define MODE_NO_DIR_IN_CREATE (1ULL << 7)
+#define MODE_POSTGRESQL (1ULL << 8)
+#define MODE_ORACLE (1ULL << 9)
+#define MODE_MSSQL (1ULL << 10)
+#define MODE_DB2 (1ULL << 11)
+#define MODE_MAXDB (1ULL << 12)
+#define MODE_NO_KEY_OPTIONS (1ULL << 13)
+#define MODE_NO_TABLE_OPTIONS (1ULL << 14)
+#define MODE_NO_FIELD_OPTIONS (1ULL << 15)
+#define MODE_MYSQL323 (1ULL << 16)
+#define MODE_MYSQL40 (1ULL << 17)
+#define MODE_ANSI (1ULL << 18)
+#define MODE_NO_AUTO_VALUE_ON_ZERO (1ULL << 19)
+#define MODE_NO_BACKSLASH_ESCAPES (1ULL << 20)
+#define MODE_STRICT_TRANS_TABLES (1ULL << 21)
+#define MODE_STRICT_ALL_TABLES (1ULL << 22)
+#define MODE_NO_ZERO_IN_DATE (1ULL << 23)
+#define MODE_NO_ZERO_DATE (1ULL << 24)
+#define MODE_INVALID_DATES (1ULL << 25)
+#define MODE_ERROR_FOR_DIVISION_BY_ZERO (1ULL << 26)
+#define MODE_TRADITIONAL (1ULL << 27)
+#define MODE_NO_AUTO_CREATE_USER (1ULL << 28)
+#define MODE_HIGH_NOT_PRECEDENCE (1ULL << 29)
+#define MODE_NO_ENGINE_SUBSTITUTION (1ULL << 30)
+#define MODE_PAD_CHAR_TO_FULL_LENGTH (1ULL << 31)
extern char internal_table_name[2];
extern char empty_c_string[1];
@@ -236,8 +236,8 @@ public:
const char *name;
enum drop_type type;
bool drop_if_exists;
- Alter_drop(enum drop_type par_type,const char *par_name)
- :name(par_name), type(par_type)
+ Alter_drop(enum drop_type par_type,const char *par_name, bool par_exists)
+ :name(par_name), type(par_type), drop_if_exists(par_exists)
{
DBUG_ASSERT(par_name != NULL);
}
@@ -274,20 +274,23 @@ public:
LEX_STRING name;
engine_option_value *option_list;
bool generated;
+ bool create_if_not_exists;
Key(enum Keytype type_par, const LEX_STRING &name_arg,
KEY_CREATE_INFO *key_info_arg,
bool generated_arg, List<Key_part_spec> &cols,
- engine_option_value *create_opt)
+ engine_option_value *create_opt, bool if_not_exists_opt)
:type(type_par), key_create_info(*key_info_arg), columns(cols),
- name(name_arg), option_list(create_opt), generated(generated_arg)
+ name(name_arg), option_list(create_opt), generated(generated_arg),
+ create_if_not_exists(if_not_exists_opt)
{}
Key(enum Keytype type_par, const char *name_arg, size_t name_len_arg,
KEY_CREATE_INFO *key_info_arg, bool generated_arg,
List<Key_part_spec> &cols,
- engine_option_value *create_opt)
+ engine_option_value *create_opt, bool if_not_exists_opt)
:type(type_par), key_create_info(*key_info_arg), columns(cols),
- option_list(create_opt), generated(generated_arg)
+ option_list(create_opt), generated(generated_arg),
+ create_if_not_exists(if_not_exists_opt)
{
name.str= (char *)name_arg;
name.length= name_len_arg;
@@ -319,8 +322,10 @@ public:
Foreign_key(const LEX_STRING &name_arg, List<Key_part_spec> &cols,
const LEX_STRING &ref_db_arg, const LEX_STRING &ref_table_arg,
List<Key_part_spec> &ref_cols,
- uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
- :Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols, NULL),
+ uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg,
+ bool if_not_exists_opt)
+ :Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols, NULL,
+ if_not_exists_opt),
ref_db(ref_db_arg), ref_table(ref_table_arg), ref_columns(ref_cols),
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
match_opt(match_opt_arg)
@@ -521,7 +526,11 @@ typedef struct system_variables
ulong net_write_timeout;
ulong optimizer_prune_level;
ulong optimizer_search_depth;
+ ulong optimizer_selectivity_sampling_limit;
+ ulong optimizer_use_condition_selectivity;
ulong use_stat_tables;
+ ulong histogram_size;
+ ulong histogram_type;
ulong preload_buff_size;
ulong profiling_history_size;
ulong read_buff_size;
@@ -551,12 +560,19 @@ typedef struct system_variables
ulong tx_isolation;
ulong updatable_views_with_limit;
int max_user_connections;
+ ulong server_id;
/**
In slave thread we need to know in behalf of which
thread the query is being run to replicate temp tables properly
*/
my_thread_id pseudo_thread_id;
/**
+ When replicating an event group with GTID, keep these values around so
+ slave binlog can receive the same GTID as the original.
+ */
+ uint32 gtid_domain_id;
+ uint64 gtid_seq_no;
+ /**
Place holders to store Multi-source variables in sys_var.cc during
update and show of variables.
*/
@@ -607,6 +623,9 @@ typedef struct system_variables
ulong wt_timeout_long, wt_deadlock_search_depth_long;
double long_query_time_double;
+
+ my_bool pseudo_slave_mode;
+
} SV;
/**
@@ -1598,6 +1617,9 @@ public:
/* Slave applier execution context */
Relay_log_info* rli_slave;
+ /* Used to SLAVE SQL thread */
+ Rpl_filter* rpl_filter;
+
void reset_for_next_command(bool calculate_userstat);
/*
Constant for THD::where initialization in the beginning of every query.
@@ -1649,6 +1671,7 @@ public:
Protects THD data accessed from other threads:
- thd->query and thd->query_length (used by SHOW ENGINE
INNODB STATUS and SHOW PROCESSLIST
+ - thd->db and thd->db_length (used in SHOW PROCESSLIST)
- thd->mysys_var (used by KILL statement and shutdown).
Is locked when THD is deleted.
*/
@@ -1722,11 +1745,11 @@ public:
HASH handler_tables_hash;
/*
- One thread can hold up to one named user-level lock. This variable
- points to a lock object if the lock is present. See item_func.cc and
+ A thread can hold named user-level locks. This variable
+ contains granted tickets if a lock is present. See item_func.cc and
chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
*/
- User_level_lock *ull;
+ HASH ull_hash;
#ifndef DBUG_OFF
uint dbug_sentry; // watch out for memory corruption
#endif
@@ -1739,7 +1762,6 @@ private:
enum enum_server_command m_command;
public:
- uint32 server_id;
uint32 file_id; // for LOAD DATA INFILE
/* remote (peer) port */
uint16 peer_port;
@@ -1816,7 +1838,7 @@ public:
MY_BITMAP const* cols, size_t colcnt,
const uchar *old_data, const uchar *new_data);
- void set_server_id(uint32 sid) { server_id = sid; }
+ void set_server_id(uint32 sid) { variables.server_id = sid; }
/*
Member functions to handle pending event for row-level logging.
@@ -2803,9 +2825,21 @@ public:
return alloc_root(&transaction.mem_root,size);
}
- LEX_STRING *make_lex_string(LEX_STRING *lex_str,
- const char* str, uint length,
- bool allocate_lex_string);
+ LEX_STRING *make_lex_string(LEX_STRING *lex_str, const char* str, uint length)
+ {
+ if (!(lex_str->str= strmake_root(mem_root, str, length)))
+ return 0;
+ lex_str->length= length;
+ return lex_str;
+ }
+
+ LEX_STRING *make_lex_string(const char* str, uint length)
+ {
+ LEX_STRING *lex_str;
+ if (!(lex_str= (LEX_STRING *)alloc_root(mem_root, sizeof(LEX_STRING))))
+ return 0;
+ return make_lex_string(lex_str, str, length);
+ }
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
const char *from, uint from_length,
@@ -3109,6 +3143,7 @@ public:
bool set_db(const char *new_db, size_t new_db_len)
{
bool result;
+ mysql_mutex_lock(&LOCK_thd_data);
/* Do not reallocate memory if current chunk is big enough. */
if (db && new_db && db_length >= new_db_len)
memcpy(db, new_db, new_db_len+1);
@@ -3122,6 +3157,7 @@ public:
}
db_length= db ? new_db_len : 0;
result= new_db && !db;
+ mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
if (result)
PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
@@ -3142,11 +3178,16 @@ public:
*/
void reset_db(char *new_db, size_t new_db_len)
{
- db= new_db;
- db_length= new_db_len;
+ if (new_db != db || new_db_len != db_length)
+ {
+ mysql_mutex_lock(&LOCK_thd_data);
+ db= new_db;
+ db_length= new_db_len;
+ mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
+ PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
#endif
+ }
}
/*
Copy the current database to the argument. Use the current arena to
@@ -3287,7 +3328,10 @@ public:
{ set_query(CSET_STRING()); }
void set_query_and_id(char *query_arg, uint32 query_length_arg,
CHARSET_INFO *cs, query_id_t new_query_id);
- void set_query_id(query_id_t new_query_id);
+ void set_query_id(query_id_t new_query_id)
+ {
+ query_id= new_query_id;
+ }
void set_open_tables(TABLE *open_tables_arg)
{
mysql_mutex_lock(&LOCK_thd_data);
@@ -3574,6 +3618,7 @@ public:
#else
void begin_dataset() {}
#endif
+ virtual void update_used_tables() {}
};
@@ -4204,6 +4249,7 @@ class Unique :public Sql_alloc
uint size;
uint full_size;
uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
+ bool with_counters;
bool merge(TABLE *table, uchar *buff, bool without_last_merge);
@@ -4347,6 +4393,7 @@ public:
return updated;
}
virtual void abort_result_set();
+ void update_used_tables();
};
class my_var : public Sql_alloc {
@@ -4370,7 +4417,6 @@ public:
class select_dumpvar :public select_result_interceptor {
ha_rows row_count;
- Item_func_set_user_var **set_var_items;
public:
List<my_var> var_list;
select_dumpvar() { var_list.empty(); row_count= 0;}
@@ -4532,6 +4578,11 @@ inline bool add_order_to_list(THD *thd, Item *item, bool asc)
return thd->lex->current_select->add_order_to_list(thd, item, asc);
}
+inline bool add_gorder_to_list(THD *thd, Item *item, bool asc)
+{
+ return thd->lex->current_select->add_gorder_to_list(thd, item, asc);
+}
+
inline bool add_group_to_list(THD *thd, Item *item, bool asc)
{
return thd->lex->current_select->add_group_to_list(thd, item, asc);
diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h
index f21fc399b94..de7ef5fc832 100644
--- a/sql/sql_cmd.h
+++ b/sql/sql_cmd.h
@@ -92,7 +92,7 @@ enum enum_sql_command {
SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS,
SQLCOM_SHOW_CLIENT_STATS,
SQLCOM_SLAVE_ALL_START, SQLCOM_SLAVE_ALL_STOP,
- SQLCOM_SHOW_EXPLAIN,
+ SQLCOM_SHOW_EXPLAIN, SQLCOM_SHUTDOWN,
/*
When a command is added here, be sure it's also added in mysqld.cc
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 2a5fcf59efc..f14c43d4c54 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2007, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2012, Monty Program Ab
+ Copyright (c) 2008, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -237,7 +237,7 @@ void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
DBUG_ENTER("time_out_user_resource_limits");
/* If more than a hour since last check, reset resource checking */
- if (check_time - uc->reset_utime >= LL(3600000000))
+ if (check_time - uc->reset_utime >= 3600000000ULL)
{
uc->questions=0;
uc->updates=0;
@@ -446,7 +446,7 @@ void init_user_stats(USER_STATS *user_stats,
memcpy(user_stats->user, user, user_length);
user_stats->user[user_length]= 0;
user_stats->user_name_length= user_length;
- strmake(user_stats->priv_user, priv_user, sizeof(user_stats->priv_user)-1);
+ strmake_buf(user_stats->priv_user, priv_user);
user_stats->total_connections= total_connections;
user_stats->concurrent_connections= concurrent_connections;
@@ -1299,7 +1299,7 @@ bool thd_prepare_connection(THD *thd)
bool rc;
lex_start(thd);
rc= login_connection(thd);
- MYSQL_AUDIT_NOTIFY_CONNECTION_CONNECT(thd);
+ mysql_audit_notify_connection_connect(thd);
if (rc)
return rc;
diff --git a/sql/sql_const.h b/sql/sql_const.h
index ec91fd02289..4ad39bad14a 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -41,6 +41,7 @@
#define MAX_CONNECTION_NAME NAME_LEN
#define MAX_MBWIDTH 3 /* Max multibyte sequence */
+#define MAX_FILENAME_MBWIDTH 5
#define MAX_FIELD_CHARLENGTH 255
#define MAX_FIELD_VARCHARLENGTH 65535
#define MAX_FIELD_BLOBLENGTH UINT_MAX32 /* cf field_blob::get_length() */
@@ -92,7 +93,7 @@
#define FIELD_NR_MASK 16383 /* To get fieldnumber */
#define FERR -1 /* Error from my_functions */
#define CREATE_MODE 0 /* Default mode on new files */
-#define NAMES_SEP_CHAR '\377' /* Char to sep. names */
+#define NAMES_SEP_CHAR 255 /* Char to sep. names */
#define READ_RECORD_BUFFER (uint) (IO_SIZE*8) /* Pointer_buffer_size */
#define DISK_BUFFER_SIZE (uint) (IO_SIZE*16) /* Size of diskbuffer */
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 6a664e4d20b..c1820c0187e 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation /* gcc class implementation */
#endif
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 38df88076e6..9e30ed4513e 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* create and drop of databases */
@@ -49,15 +49,12 @@
#define MAX_DROP_TABLE_Q_LEN 1024
-const char *del_exts[]= {".frm", ".BAK", ".TMD",".opt", NullS};
+const char *del_exts[]= {".BAK", ".TMD",".opt", NullS};
static TYPELIB deletable_extentions=
{array_elements(del_exts)-1,"del_exts", del_exts, NULL};
-static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp,
- const char *db,
- const char *path,
- TABLE_LIST **tables,
- bool *found_other_files);
+static bool find_db_tables_and_rm_known_files(THD *, MY_DIR *, char *,
+ const char *, TABLE_LIST **);
long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path);
static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error);
@@ -760,7 +757,6 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
char path[FN_REFLEN + 16];
MY_DIR *dirp;
uint length;
- bool found_other_files= false;
TABLE_LIST *tables= NULL;
TABLE_LIST *table;
Drop_table_error_handler err_handler;
@@ -792,8 +788,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
}
}
- if (find_db_tables_and_rm_known_files(thd, dirp, db, path, &tables,
- &found_other_files))
+ if (find_db_tables_and_rm_known_files(thd, dirp, db, path, &tables))
goto exit;
/*
@@ -835,11 +830,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
mysql_ha_rm_tables(thd, tables);
for (table= tables; table; table= table->next_local)
- {
- tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table->db, table->table_name,
- false);
deleted_tables++;
- }
thd->push_internal_handler(&err_handler);
if (!thd->killed &&
@@ -878,10 +869,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
If the directory is a symbolic link, remove the link first, then
remove the directory the symbolic link pointed at
*/
- if (found_other_files)
- my_error(ER_DB_DROP_RMDIR, MYF(0), path, EEXIST);
- else
- error= rm_dir_w_symlink(path, true);
+ error= rm_dir_w_symlink(path, true);
}
thd->pop_internal_handler();
@@ -937,16 +925,10 @@ update_binlog:
for (tbl= tables; tbl; tbl= tbl->next_local)
{
uint tbl_name_len;
- bool exists;
char quoted_name[FN_REFLEN+3];
// Only write drop table to the binlog for tables that no longer exist.
- if (check_if_table_exists(thd, tbl, 0, &exists))
- {
- error= true;
- goto exit;
- }
- if (exists)
+ if (ha_table_exists(thd, tbl->db, tbl->table_name))
continue;
my_snprintf(quoted_name, sizeof(quoted_name), "%`s", tbl->table_name);
@@ -998,31 +980,66 @@ exit:
static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp,
- const char *db,
+ char *dbname,
const char *path,
- TABLE_LIST **tables,
- bool *found_other_files)
+ TABLE_LIST **tables)
{
char filePath[FN_REFLEN];
+ LEX_STRING db= { dbname, strlen(dbname) };
TABLE_LIST *tot_list=0, **tot_list_next_local, **tot_list_next_global;
DBUG_ENTER("find_db_tables_and_rm_known_files");
DBUG_PRINT("enter",("path: %s", path));
+ /* first, get the list of tables */
+ Dynamic_array<LEX_STRING*> files(dirp->number_of_files);
+ Discovered_table_list tl(thd, &files, &null_lex_str);
+ if (ha_discover_table_names(thd, &db, dirp, &tl, true))
+ DBUG_RETURN(1);
+
+ /* Now put the tables in the list */
tot_list_next_local= tot_list_next_global= &tot_list;
+ for (size_t idx=0; idx < files.elements(); idx++)
+ {
+ LEX_STRING *table= files.at(idx);
+
+ /* Drop the table nicely */
+ TABLE_LIST *table_list=(TABLE_LIST*)thd->calloc(sizeof(*table_list));
+
+ if (!table_list)
+ DBUG_RETURN(true);
+ table_list->db= db.str;
+ table_list->db_length= db.length;
+ table_list->table_name= table->str;
+ table_list->table_name_length= table->length;
+ table_list->open_type= OT_BASE_ONLY;
+
+ /* To be able to correctly look up the table in the table cache. */
+ if (lower_case_table_names)
+ table_list->table_name_length= my_casedn_str(files_charset_info,
+ table_list->table_name);
+
+ table_list->alias= table_list->table_name; // If lower_case_table_names=2
+ table_list->mdl_request.init(MDL_key::TABLE, table_list->db,
+ table_list->table_name, MDL_EXCLUSIVE,
+ MDL_TRANSACTION);
+ /* Link into list */
+ (*tot_list_next_local)= table_list;
+ (*tot_list_next_global)= table_list;
+ tot_list_next_local= &table_list->next_local;
+ tot_list_next_global= &table_list->next_global;
+ }
+ *tables= tot_list;
+
+ /* and at last delete all non-table files */
for (uint idx=0 ;
- idx < (uint) dirp->number_off_files && !thd->killed ;
+ idx < (uint) dirp->number_of_files && !thd->killed ;
idx++)
{
FILEINFO *file=dirp->dir_entry+idx;
char *extension;
DBUG_PRINT("info",("Examining: %s", file->name));
- /* skiping . and .. */
- if (file->name[0] == '.' && (!file->name[1] ||
- (file->name[1] == '.' && !file->name[2])))
- continue;
-
if (file->name[0] == 'a' && file->name[1] == 'r' &&
file->name[2] == 'c' && file->name[3] == '\0')
{
@@ -1039,59 +1056,12 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp,
DBUG_PRINT("my",("Archive subdir found: %s", newpath));
if ((mysql_rm_arc_files(thd, new_dirp, newpath)) < 0)
DBUG_RETURN(true);
- continue;
}
- *found_other_files= true;
continue;
}
if (!(extension= strrchr(file->name, '.')))
extension= strend(file->name);
- if (find_type(extension, &deletable_extentions, FIND_TYPE_NO_PREFIX) <= 0)
- {
- if (find_type(extension, ha_known_exts(), FIND_TYPE_NO_PREFIX) <= 0)
- *found_other_files= true;
- continue;
- }
- /* just for safety we use files_charset_info */
- if (db && !my_strcasecmp(files_charset_info,
- extension, reg_ext))
- {
- /* Drop the table nicely */
- *extension= 0; // Remove extension
- TABLE_LIST *table_list=(TABLE_LIST*)
- thd->calloc(sizeof(*table_list) +
- strlen(db) + 1 +
- MYSQL50_TABLE_NAME_PREFIX_LENGTH +
- strlen(file->name) + 1);
-
- if (!table_list)
- DBUG_RETURN(true);
- table_list->db= (char*) (table_list+1);
- table_list->db_length= strmov(table_list->db, db) - table_list->db;
- table_list->table_name= table_list->db + table_list->db_length + 1;
- table_list->table_name_length= filename_to_tablename(file->name,
- table_list->table_name,
- MYSQL50_TABLE_NAME_PREFIX_LENGTH +
- strlen(file->name) + 1);
- table_list->open_type= OT_BASE_ONLY;
-
- /* To be able to correctly look up the table in the table cache. */
- if (lower_case_table_names)
- table_list->table_name_length= my_casedn_str(files_charset_info,
- table_list->table_name);
-
- table_list->alias= table_list->table_name; // If lower_case_table_names=2
- table_list->internal_tmp_table= is_prefix(file->name, tmp_file_prefix);
- table_list->mdl_request.init(MDL_key::TABLE, table_list->db,
- table_list->table_name, MDL_EXCLUSIVE,
- MDL_TRANSACTION);
- /* Link into list */
- (*tot_list_next_local)= table_list;
- (*tot_list_next_global)= table_list;
- tot_list_next_local= &table_list->next_local;
- tot_list_next_global= &table_list->next_global;
- }
- else
+ if (find_type(extension, &deletable_extentions, FIND_TYPE_NO_PREFIX) > 0)
{
strxmov(filePath, path, "/", file->name, NullS);
/*
@@ -1106,7 +1076,7 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp,
}
}
}
- *tables= tot_list;
+
DBUG_RETURN(false);
}
@@ -1190,18 +1160,13 @@ long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path)
DBUG_PRINT("enter", ("path: %s", org_path));
for (uint idx=0 ;
- idx < (uint) dirp->number_off_files && !thd->killed ;
+ idx < (uint) dirp->number_of_files && !thd->killed ;
idx++)
{
FILEINFO *file=dirp->dir_entry+idx;
char *extension, *revision;
DBUG_PRINT("info",("Examining: %s", file->name));
- /* skiping . and .. */
- if (file->name[0] == '.' && (!file->name[1] ||
- (file->name[1] == '.' && !file->name[2])))
- continue;
-
extension= fn_ext(file->name);
if (extension[0] != '.' ||
extension[1] != 'f' || extension[2] != 'r' ||
@@ -1688,7 +1653,7 @@ bool mysql_upgrade_db(THD *thd, LEX_STRING *old_db)
/* Step2: Move tables to the new database */
if ((dirp = my_dir(path,MYF(MY_DONT_SORT))))
{
- uint nfiles= (uint) dirp->number_off_files;
+ uint nfiles= (uint) dirp->number_of_files;
for (uint idx=0 ; idx < nfiles && !thd->killed ; idx++)
{
FILEINFO *file= dirp->dir_entry + idx;
@@ -1779,17 +1744,15 @@ bool mysql_upgrade_db(THD *thd, LEX_STRING *old_db)
if ((dirp = my_dir(path,MYF(MY_DONT_SORT))))
{
- uint nfiles= (uint) dirp->number_off_files;
+ uint nfiles= (uint) dirp->number_of_files;
for (uint idx=0 ; idx < nfiles ; idx++)
{
FILEINFO *file= dirp->dir_entry + idx;
char oldname[FN_REFLEN + 1], newname[FN_REFLEN + 1];
DBUG_PRINT("info",("Examining: %s", file->name));
- /* skiping . and .. and MY_DB_OPT_FILE */
- if ((file->name[0] == '.' &&
- (!file->name[1] || (file->name[1] == '.' && !file->name[2]))) ||
- !my_strcasecmp(files_charset_info, file->name, MY_DB_OPT_FILE))
+ /* skiping MY_DB_OPT_FILE */
+ if (!my_strcasecmp(files_charset_info, file->name, MY_DB_OPT_FILE))
continue;
/* pass empty file name, and file->name as extension to avoid encoding */
diff --git a/sql/sql_db.h b/sql/sql_db.h
index 1f447c11a52..62d379c515d 100644
--- a/sql/sql_db.h
+++ b/sql/sql_db.h
@@ -19,7 +19,6 @@
#include "hash.h" /* HASH */
class THD;
-typedef struct st_ha_create_information HA_CREATE_INFO;
int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d0a83eac189..300f12c5971 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -682,7 +682,7 @@ multi_delete::initialize_tables(JOIN *join)
tab;
tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
{
- if (tab->table->map & tables_to_delete_from)
+ if (!tab->bush_children && tab->table->map & tables_to_delete_from)
{
/* We are going to delete from this table */
TABLE *tbl=walk->table=tab->table;
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 5997aa03cb0..d9dd538f96d 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -87,7 +87,16 @@ mysql_handle_derived(LEX *lex, uint phases)
sl && !res;
sl= sl->next_select_in_list())
{
- for (TABLE_LIST *cursor= sl->get_table_list();
+ TABLE_LIST *cursor= sl->get_table_list();
+ /*
+ DT_MERGE_FOR_INSERT is not needed for views/derived tables inside
+ subqueries. Views and derived tables of subqueries should be
+ processed normally.
+ */
+ if (phases == DT_MERGE_FOR_INSERT &&
+ cursor && cursor->top_table()->select_lex != &lex->select_lex)
+ continue;
+ for (;
cursor && !res;
cursor= cursor->next_local)
{
@@ -812,8 +821,7 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
result->tmp_table_param.start_recinfo,
&result->tmp_table_param.recinfo,
(unit->first_select()->options |
- thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS),
- thd->variables.big_tables))
+ thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS)))
return(TRUE);
}
if (open_tmp_table(table))
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index 40173351d54..f382f18a983 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**********************************************************************
This file contains the implementation of error and warnings related
@@ -386,7 +386,7 @@ Diagnostics_area::set_ok_status(ulonglong affected_rows,
m_affected_rows= affected_rows;
m_last_insert_id= last_insert_id;
if (message)
- strmake(m_message, message, sizeof(m_message) - 1);
+ strmake_buf(m_message, message);
else
m_message[0]= '\0';
m_status= DA_OK;
@@ -489,7 +489,7 @@ Diagnostics_area::set_error_status(uint sql_errno,
m_sql_errno= sql_errno;
memcpy(m_sqlstate, sqlstate, SQLSTATE_LENGTH);
m_sqlstate[SQLSTATE_LENGTH]= '\0';
- strmake(m_message, message, sizeof(m_message)-1);
+ strmake_buf(m_message, message);
get_warning_info()->set_error_condition(error_condition);
diff --git a/sql/sql_error.h b/sql/sql_error.h
index 70805d93450..0a75d7a392d 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef SQL_ERROR_H
#define SQL_ERROR_H
diff --git a/sql/sql_expression_cache.cc b/sql/sql_expression_cache.cc
index 1193c7c27f4..1e64bc10a7c 100644
--- a/sql/sql_expression_cache.cc
+++ b/sql/sql_expression_cache.cc
@@ -288,7 +288,7 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
if (create_internal_tmp_table_from_heap(table_thd, cache_table,
cache_table_param.start_recinfo,
&cache_table_param.recinfo,
- error, 1))
+ error, 1, NULL))
goto err;
}
}
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index d2819360417..4187327d622 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2001, 2011, Oracle and/or its affiliates.
- Copyright (c) 2011 Monty Program Ab
+/* Copyright (c) 2001, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2011, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* HANDLER ... commands - direct access to ISAM */
@@ -1012,11 +1012,13 @@ static SQL_HANDLER *mysql_ha_find_match(THD *thd, TABLE_LIST *tables)
for (tables= first; tables; tables= tables->next_local)
{
+ if (tables->is_anonymous_derived_table())
+ continue;
if ((! *tables->db ||
! my_strcasecmp(&my_charset_latin1, hash_tables->db.str,
- tables->db)) &&
+ tables->get_db_name())) &&
! my_strcasecmp(&my_charset_latin1, hash_tables->table_name.str,
- tables->table_name))
+ tables->get_table_name()))
{
/* Link into hash_tables list */
hash_tables->next= head;
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 458904ebe1d..d3c36e2c5d7 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#include "sql_priv.h"
#include "unireg.h"
diff --git a/sql/sql_hset.h b/sql/sql_hset.h
index 2ea70b91da8..f3a1467737f 100644
--- a/sql/sql_hset.h
+++ b/sql/sql_hset.h
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#include "my_global.h"
#include "hash.h"
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 37b355f1d66..b5178f865d1 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -95,15 +95,13 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view);
/*
Check that insert/update fields are from the same single table of a view.
- SYNOPSIS
- check_view_single_update()
- fields The insert/update fields to be checked.
- values Values to use for update
- view The view for insert.
- map [in/out] The insert table map.
+ @param fields The insert/update fields to be checked.
+ @param values The insert/update values to be checked, NULL if
+ checking is not wanted.
+ @param view The view for insert.
+ @param map [in/out] The insert table map.
- DESCRIPTION
- This function is called in 2 cases:
+ This function is called in 2 cases:
1. to check insert fields. In this case *map will be set to 0.
Insert fields are checked to be all from the same single underlying
table of the given view. Otherwise the error is thrown. Found table
@@ -113,9 +111,7 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view);
the function to check insert fields. Update fields are checked to be
from the same table as the insert fields.
- RETURN
- 0 OK
- 1 Error
+ @returns false if success.
*/
bool check_view_single_update(List<Item> &fields, List<Item> *values,
@@ -180,21 +176,16 @@ error:
/*
Check if insert fields are correct.
- SYNOPSIS
- check_insert_fields()
- thd The current thread.
- table The table for insert.
- fields The insert fields.
- values The insert values.
- check_unique If duplicate values should be rejected.
- fields_and_values_from_different_maps
- Set to 1 if fields and values are using
- different table maps, like on select ... insert
- map Store here table map for used fields
-
- RETURN
- 0 OK
- -1 Error
+ @param thd The current thread.
+ @param table_list The table we are inserting into (may be view)
+ @param fields The insert fields.
+ @param values The insert values.
+ @param check_unique If duplicate values should be rejected.
+ @param fields_and_values_from_different_maps If 'values' are allowed to
+ refer to other tables than those of 'fields'
+ @param map See check_view_single_update
+
+ @returns 0 if success, -1 if error
*/
static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
@@ -312,28 +303,29 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
-/*
- Check update fields for the timestamp and auto_increment fields.
+/**
+ Check if update fields are correct.
- SYNOPSIS
- check_update_fields()
- thd The current thread.
- insert_table_list The insert table list.
- table The table for update.
- update_fields The update fields.
+ @param thd The current thread.
+ @param insert_table_list The table we are inserting into (may be view)
+ @param update_fields The update fields.
+ @param update_values The update values.
+ @param fields_and_values_from_different_maps If 'update_values' are allowed to
+ refer to other tables than those of 'update_fields'
+ @param map See check_view_single_update
- NOTE
- If the update fields include an autoinc field, set the
- table->next_number_field_updated flag.
+ @note
+ If the update fields include an autoinc field, set the
+ table->next_number_field_updated flag.
- RETURN
- 0 OK
- -1 Error
+ @returns 0 if success, -1 if error
*/
static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
List<Item> &update_fields,
- List<Item> &update_values, table_map *map)
+ List<Item> &update_values,
+ bool fields_and_values_from_different_maps,
+ table_map *map)
{
TABLE *table= insert_table_list->table;
my_bool autoinc_mark;
@@ -358,7 +350,9 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
if (insert_table_list->is_view() &&
insert_table_list->is_merged_derived() &&
- check_view_single_update(update_fields, &update_values,
+ check_view_single_update(update_fields,
+ fields_and_values_from_different_maps ?
+ (List<Item>*) 0 : &update_values,
insert_table_list, map, false))
return -1;
@@ -1451,7 +1445,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
{
select_lex->no_wrap_view_item= TRUE;
res= check_update_fields(thd, context->table_list, update_fields,
- update_values, &map);
+ update_values, false, &map);
select_lex->no_wrap_view_item= FALSE;
}
@@ -1995,8 +1989,7 @@ public:
DBUG_ENTER("Delayed_insert constructor");
thd.security_ctx->user=(char*) delayed_user;
thd.security_ctx->host=(char*) my_localhost;
- strmake(thd.security_ctx->priv_user, thd.security_ctx->user,
- USERNAME_LENGTH);
+ strmake_buf(thd.security_ctx->priv_user, thd.security_ctx->user);
thd.current_tablenr=0;
thd.set_command(COM_DELAYED_INSERT);
thd.lex->current_select= 0; // for my_message_sql
@@ -2041,9 +2034,9 @@ public:
thd.unlink(); // Must be unlinked under lock
my_free(thd.query());
thd.security_ctx->user= thd.security_ctx->host=0;
- thread_count--;
delayed_insert_threads--;
mysql_mutex_unlock(&LOCK_thread_count);
+ thread_safe_decrement32(&thread_count, &thread_count_lock);
mysql_cond_broadcast(&COND_thread_count); /* Tell main we are ready */
}
@@ -2178,9 +2171,9 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
{
if (!(di= new Delayed_insert()))
goto end_create;
- mysql_mutex_lock(&LOCK_thread_count);
- thread_count++;
- mysql_mutex_unlock(&LOCK_thread_count);
+
+ thread_safe_increment32(&thread_count, &thread_count_lock);
+
/*
Annotating delayed inserts is not supported.
*/
@@ -3383,9 +3376,16 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
context->resolve_in_table_list_only(table_list);
lex->select_lex.no_wrap_view_item= TRUE;
- res= res || check_update_fields(thd, context->table_list,
- *info.update_fields, *info.update_values,
- &map);
+ res= res ||
+ check_update_fields(thd, context->table_list,
+ *info.update_fields, *info.update_values,
+ /*
+ In INSERT SELECT ON DUPLICATE KEY UPDATE col=x
+ 'x' can legally refer to a non-inserted table.
+ 'x' is not even resolved yet.
+ */
+ true,
+ &map);
lex->select_lex.no_wrap_view_item= FALSE;
/*
When we are not using GROUP BY and there are no ungrouped aggregate functions
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index ad327c378b8..0acccfcee48 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -775,7 +775,7 @@ ulong JOIN_CACHE::get_min_join_buffer_size()
tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
{
len+= tab->get_max_used_fieldlength();
- len_last=+ tab->get_used_fieldlength();
+ len_last+= tab->get_used_fieldlength();
}
size_t len_addon= get_record_max_affix_length() +
get_max_key_addon_space_per_record();
@@ -3812,8 +3812,8 @@ uint JOIN_TAB_SCAN_MRR::aux_buffer_incr(ulong recno)
uint incr= 0;
TABLE_REF *ref= &join_tab->ref;
TABLE *tab= join_tab->table;
- uint rec_per_key=
- tab->key_info[ref->key].actual_rec_per_key(ref->key_parts-1);
+ ha_rows rec_per_key=
+ (ha_rows) tab->key_info[ref->key].actual_rec_per_key(ref->key_parts-1);
set_if_bigger(rec_per_key, 1);
if (recno == 1)
incr= ref->key_length + tab->file->ref_length;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 67fa3301b6c..1bf0d49214e 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -508,6 +508,7 @@ void lex_start(THD *thd)
lex->expr_allows_subselect= TRUE;
lex->use_only_table_context= FALSE;
lex->parse_vcol_expr= FALSE;
+ lex->check_exists= FALSE;
lex->verbose= 0;
lex->contains_plaintext_password= false;
@@ -1424,7 +1425,7 @@ int lex_one_token(void *arg, void *yythd)
yylval->lex_str=get_token(lip,
2, // skip x'
length-3); // don't count x' and last '
- return (HEX_NUM);
+ return HEX_STRING;
case MY_LEX_BIN_NUMBER: // Found b'bin-string'
lip->yySkip(); // Accept opening '
@@ -1840,8 +1841,11 @@ void st_select_lex::init_query()
cond_count= between_count= with_wild= 0;
max_equal_elems= 0;
ref_pointer_array= 0;
+ ref_pointer_array_size= 0;
select_n_where_fields= 0;
+ select_n_reserved= 0;
select_n_having_items= 0;
+ n_sum_items= 0;
n_child_sum_items= 0;
subquery_in_having= explicit_limit= 0;
is_item_list_lookup= 0;
@@ -1895,6 +1899,7 @@ void st_select_lex::init_select()
merged_into= 0;
m_non_agg_field_used= false;
m_agg_func_used= false;
+ name_visibility_map= 0;
}
/*
@@ -2202,6 +2207,11 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc)
}
+bool st_select_lex::add_gorder_to_list(THD *thd, Item *item, bool asc)
+{
+ return add_to_list(thd, gorder_list, item, asc);
+}
+
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
@@ -2272,11 +2282,6 @@ ulong st_select_lex::get_table_join_options()
bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
{
- DBUG_ENTER("st_select_lex::setup_ref_array");
-
- if (ref_pointer_array)
- DBUG_RETURN(0);
-
// find_order_in_list() may need some extra space, so multiply by two.
order_group_num*= 2;
@@ -2284,13 +2289,31 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num)
We have to create array in prepared statement memory if it is a
prepared statement
*/
- ref_pointer_array=
- (Item **)thd->stmt_arena->alloc(sizeof(Item*) * (n_child_sum_items +
- item_list.elements +
- select_n_having_items +
- select_n_where_fields +
- order_group_num)*5);
- DBUG_RETURN(ref_pointer_array == 0);
+ Query_arena *arena= thd->stmt_arena;
+ const uint n_elems= (n_sum_items +
+ n_child_sum_items +
+ item_list.elements +
+ select_n_reserved +
+ select_n_having_items +
+ select_n_where_fields +
+ order_group_num) * 5;
+ if (ref_pointer_array != NULL)
+ {
+ /*
+ We need to take 'n_sum_items' into account when allocating the array,
+ and this may actually increase during the optimization phase due to
+ MIN/MAX rewrite in Item_in_subselect::single_value_transformer.
+ In the usual case we can reuse the array from the prepare phase.
+ If we need a bigger array, we must allocate a new one.
+ */
+ if (ref_pointer_array_size >= n_elems)
+ return false;
+ }
+ ref_pointer_array= static_cast<Item**>(arena->alloc(sizeof(Item*) * n_elems));
+ if (ref_pointer_array != NULL)
+ ref_pointer_array_size= n_elems;
+
+ return ref_pointer_array == NULL;
}
@@ -2842,7 +2865,7 @@ void st_select_lex_unit::set_limit(st_select_lex *sl)
val= fix_fields_successful ? item->val_uint() : 0;
}
else
- val= ULL(0);
+ val= 0;
offset_limit_cnt= (ha_rows)val;
#ifndef BIG_TABLES
@@ -3836,7 +3859,8 @@ void SELECT_LEX::update_used_tables()
{
for (ORDER *order= order_list.first; order; order= order->next)
(*order->item)->update_used_tables();
- }
+ }
+ join->result->update_used_tables();
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 77ef72126a1..59f7c122646 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -209,6 +210,8 @@ struct LEX_MASTER_INFO
char *ssl_crl, *ssl_crlpath;
char *relay_log_name;
LEX_STRING connection_name;
+ /* Value in START SLAVE UNTIL master_gtid_pos=xxx */
+ LEX_STRING gtid_pos_str;
ulonglong pos;
ulong relay_log_pos;
ulong server_id;
@@ -220,6 +223,9 @@ struct LEX_MASTER_INFO
*/
enum {LEX_MI_UNCHANGED, LEX_MI_DISABLE, LEX_MI_ENABLE}
ssl, ssl_verify_server_cert, heartbeat_opt, repl_ignore_server_ids_opt;
+ enum {
+ LEX_GTID_UNCHANGED, LEX_GTID_NO, LEX_GTID_CURRENT_POS, LEX_GTID_SLAVE_POS
+ } use_gtid_opt;
void init()
{
@@ -236,6 +242,9 @@ struct LEX_MASTER_INFO
heartbeat_period= 0;
ssl= ssl_verify_server_cert= heartbeat_opt=
repl_ignore_server_ids_opt= LEX_MI_UNCHANGED;
+ gtid_pos_str.length= 0;
+ gtid_pos_str.str= NULL;
+ use_gtid_opt= LEX_GTID_UNCHANGED;
}
};
@@ -716,10 +725,11 @@ public:
const char *type; /* type of select for EXPLAIN */
SQL_I_List<ORDER> order_list; /* ORDER clause */
- SQL_I_List<ORDER> *gorder_list;
+ SQL_I_List<ORDER> gorder_list;
Item *select_limit, *offset_limit; /* LIMIT clause parameters */
// Arrays of pointers to top elements of all_fields list
Item **ref_pointer_array;
+ size_t ref_pointer_array_size; // Number of elements in array.
/*
number of items in select_list and HAVING clause used to get number
@@ -735,6 +745,8 @@ public:
and all inner subselects.
*/
uint select_n_where_fields;
+ /* reserved for exists 2 in */
+ uint select_n_reserved;
enum_parsing_place parsing_place; /* where we are parsing expression */
bool with_sum_func; /* sum function indicator */
@@ -816,6 +828,9 @@ public:
*/
List<String> *prev_join_using;
+ /* namp of nesting SELECT visibility (for aggregate functions check) */
+ nesting_map name_visibility_map;
+
void init_query();
void init_select();
st_select_lex_unit* master_unit();
@@ -849,6 +864,7 @@ public:
bool add_group_to_list(THD *thd, Item *item, bool asc);
bool add_ftfunc_to_list(Item_func_match *func);
bool add_order_to_list(THD *thd, Item *item, bool asc);
+ bool add_gorder_to_list(THD *thd, Item *item, bool asc);
TABLE_LIST* add_table_to_list(THD *thd, Table_ident *table,
LEX_STRING *alias,
ulong table_options,
@@ -2285,7 +2301,7 @@ struct LEX: public Query_tables_list
LEX_STRING relay_log_connection_name;
USER_RESOURCES mqh;
LEX_RESET_SLAVE reset_slave_info;
- ulong type;
+ ulonglong type;
/* The following is used by KILL */
killed_state kill_signal;
killed_type kill_type;
@@ -2356,7 +2372,7 @@ struct LEX: public Query_tables_list
uint16 create_view_algorithm;
uint8 create_view_check;
uint8 context_analysis_only;
- bool drop_if_exists, drop_temporary, local_file, one_shot_set;
+ bool drop_temporary, local_file, one_shot_set;
bool check_exists;
bool autocommit;
bool verbose, no_write_to_binlog;
diff --git a/sql/sql_list.h b/sql/sql_list.h
index b4e0ab84aab..aef2f8d5f25 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -287,13 +287,15 @@ public:
if (node == &end_of_list)
return;
}
- *prev= *last;
+ *prev= &end_of_list;
last= prev;
}
inline void prepand(base_list *list)
{
if (!list->is_empty())
{
+ if (is_empty())
+ last= list->last;
*list->last= first;
first= list->first;
elements+= list->elements;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 3e80dee40df..0d0efb0c21f 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -1426,7 +1426,7 @@ inline int READ_INFO::terminator(char *ptr,uint length)
uint i;
for (i=1 ; i < length ; i++)
{
- if ((chr=GET) != *++ptr)
+ if ((chr=GET) != *(uchar*)++ptr)
{
break;
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 6788c6e2c38..7d057f4e91a 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2012, Monty Program Ab
+ Copyright (c) 2008, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -170,8 +170,8 @@ const char *xa_state_names[]={
*/
inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables)
{
- return rpl_filter->is_on() && tables && !thd->spcont &&
- !rpl_filter->tables_ok(thd->db, tables);
+ return thd->rpl_filter->is_on() && tables && !thd->spcont &&
+ !thd->rpl_filter->tables_ok(thd->db, tables);
}
#endif
@@ -804,9 +804,10 @@ end:
delete thd;
#ifndef EMBEDDED_LIBRARY
- mysql_mutex_lock(&LOCK_thread_count);
- thread_count--;
+ thread_safe_decrement32(&thread_count, &thread_count_lock);
in_bootstrap= FALSE;
+
+ mysql_mutex_lock(&LOCK_thread_count);
mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
my_thread_end();
@@ -915,7 +916,7 @@ bool do_command(THD *thd)
packet_length= my_net_read(net);
thd->m_server_idle= FALSE;
- if ((packet_length == packet_error))
+ if (packet_length == packet_error)
{
DBUG_PRINT("info",("Got error %d reading command from socket %s",
net->error,
@@ -1109,10 +1110,20 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->enable_slow_log= TRUE;
thd->query_plan_flags= QPLAN_INIT;
thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */
+
+ DEBUG_SYNC(thd,"dispatch_command_before_set_time");
+
thd->set_time();
- thd->set_query_id(get_query_id());
if (!(server_command_flags[command] & CF_SKIP_QUERY_ID))
- next_query_id();
+ thd->set_query_id(next_query_id());
+ else
+ {
+ /*
+ ping, get statistics or similar stateless command.
+ No reason to increase query id here.
+ */
+ thd->set_query_id(get_query_id());
+ }
inc_thread_running();
if (!(server_command_flags[command] & CF_SKIP_QUESTIONS))
@@ -1183,7 +1194,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
else
auth_rc= acl_authenticate(thd, 0, packet_length);
- MYSQL_AUDIT_NOTIFY_CONNECTION_CHANGE_USER(thd);
+ mysql_audit_notify_connection_change_user(thd);
if (auth_rc)
{
/* Free user if allocated by acl_authenticate */
@@ -1281,6 +1292,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->update_server_status();
thd->protocol->end_statement();
query_cache_end_of_result(thd);
+
+ mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
+ thd->get_stmt_da()->is_error()
+ ? thd->get_stmt_da()->sql_errno()
+ : 0,
+ command_name[command].str);
+
ulong length= (ulong)(packet_end - beginning_of_next_stmt);
log_slow_statement(thd);
@@ -1458,10 +1476,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* TODO: The following has to be changed to an 8 byte integer */
pos = uint4korr(packet);
flags = uint2korr(packet + 4);
- thd->server_id=0; /* avoid suicide */
+ thd->variables.server_id=0; /* avoid suicide */
if ((slave_server_id= uint4korr(packet+6))) // mysqlbinlog.server_id==0
kill_zombie_dump_threads(slave_server_id);
- thd->server_id = slave_server_id;
+ thd->variables.server_id = slave_server_id;
general_log_print(thd, command, "Log: '%s' Pos: %ld", packet+10,
(long) pos);
@@ -1484,7 +1502,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
lex_start(thd);
status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
- ulong options= (ulong) (uchar) packet[0];
+ ulonglong options= (ulonglong) (uchar) packet[0];
if (trans_commit_implicit(thd))
break;
thd->mdl_context.release_transactional_locks();
@@ -1572,7 +1590,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (!(uptime= (ulong) (thd->start_time - server_start_time)))
queries_per_second1000= 0;
else
- queries_per_second1000= thd->query_id * LL(1000) / uptime;
+ queries_per_second1000= thd->query_id * 1000 / uptime;
length= my_snprintf(buff, buff_len - 1,
"Uptime: %lu Threads: %d Questions: %lu "
@@ -1685,6 +1703,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
thd->m_statement_psi= NULL;
+ thd->set_time();
dec_thread_running();
thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
@@ -1723,7 +1742,8 @@ void log_slow_statement(THD *thd)
/* Follow the slow log filter configuration. */
if (!thd->enable_slow_log ||
- !(thd->variables.log_slow_filter & thd->query_plan_flags))
+ (thd->variables.log_slow_filter
+ && !(thd->variables.log_slow_filter & thd->query_plan_flags)))
DBUG_VOID_RETURN;
if (((thd->server_status & SERVER_QUERY_WAS_SLOW) ||
@@ -1855,7 +1875,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
break;
case SCH_USER_STATS:
case SCH_CLIENT_STATS:
- if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true))
DBUG_RETURN(1);
case SCH_TABLE_STATS:
case SCH_INDEX_STATS:
@@ -2030,7 +2050,7 @@ bool sp_process_definer(THD *thd)
if ((strcmp(lex->definer->user.str, thd->security_ctx->priv_user) ||
my_strcasecmp(system_charset_info, lex->definer->host.str,
thd->security_ctx->priv_host)) &&
- check_global_access(thd, SUPER_ACL))
+ check_global_access(thd, SUPER_ACL, true))
{
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
DBUG_RETURN(TRUE);
@@ -2151,6 +2171,8 @@ mysql_execute_command(THD *thd)
#ifdef HAVE_REPLICATION
/* have table map for update for multi-update statement (BUG#37051) */
bool have_table_map_for_update= FALSE;
+ /* */
+ Rpl_filter *rpl_filter= thd->rpl_filter;
#endif
DBUG_ENTER("mysql_execute_command");
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -2281,7 +2303,7 @@ mysql_execute_command(THD *thd)
if (!(lex->sql_command == SQLCOM_UPDATE_MULTI) &&
!(lex->sql_command == SQLCOM_SET_OPTION) &&
!(lex->sql_command == SQLCOM_DROP_TABLE &&
- lex->drop_temporary && lex->drop_if_exists) &&
+ lex->drop_temporary && lex->check_exists) &&
all_tables_not_ok(thd, all_tables))
{
/* we warn the slave SQL thread */
@@ -2668,6 +2690,11 @@ case SQLCOM_PREPARE:
else
delete mi;
}
+ else
+ {
+ mi->rpl_filter= get_or_create_rpl_filter(lex_mi->connection_name.str,
+ lex_mi->connection_name.length);
+ }
mysql_mutex_unlock(&LOCK_active_mi);
break;
@@ -2996,13 +3023,34 @@ end_with_restore_list:
{
LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
Master_info *mi;
+ int load_error;
+
+ load_error= rpl_load_gtid_slave_state(thd);
+
mysql_mutex_lock(&LOCK_active_mi);
if ((mi= (master_info_index->
get_master_info(&lex_mi->connection_name,
Sql_condition::WARN_LEVEL_ERROR))))
+ {
+ if (load_error)
+ {
+ /*
+ We cannot start a slave using GTID if we cannot load the GTID position
+ from the mysql.gtid_slave_pos table. But we can allow non-GTID
+ replication (useful eg. during upgrade).
+ */
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ mysql_mutex_unlock(&LOCK_active_mi);
+ break;
+ }
+ else
+ thd->clear_error();
+ }
if (!start_slave(thd, mi, 1 /* net report*/))
my_ok(thd);
+ }
mysql_mutex_unlock(&LOCK_active_mi);
break;
}
@@ -3341,6 +3389,7 @@ end_with_restore_list:
thd->first_successful_insert_id_in_cur_stmt=
thd->first_successful_insert_id_in_prev_stmt;
+#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("after_mysql_insert",
{
const char act1[]=
@@ -3356,6 +3405,7 @@ end_with_restore_list:
STRING_WITH_LEN(act2)));
};);
DEBUG_SYNC(thd, "after_mysql_insert");
+#endif
break;
}
case SQLCOM_REPLACE_SELECT:
@@ -3529,7 +3579,7 @@ end_with_restore_list:
thd->variables.option_bits|= OPTION_KEEP_LOG;
}
/* DDL and binlog write order are protected by metadata locks. */
- res= mysql_rm_table(thd, first_table, lex->drop_if_exists,
+ res= mysql_rm_table(thd, first_table, lex->check_exists,
lex->drop_temporary);
}
break;
@@ -3756,7 +3806,7 @@ end_with_restore_list:
#endif
if (check_access(thd, DROP_ACL, lex->name.str, NULL, NULL, 1, 0))
break;
- res= mysql_rm_db(thd, lex->name.str, lex->drop_if_exists, 0);
+ res= mysql_rm_db(thd, lex->name.str, lex->check_exists, 0);
break;
}
case SQLCOM_ALTER_DB_UPGRADE:
@@ -3884,7 +3934,7 @@ end_with_restore_list:
case SQLCOM_DROP_EVENT:
if (!(res= Events::drop_event(thd,
lex->spname->m_db, lex->spname->m_name,
- lex->drop_if_exists)))
+ lex->check_exists)))
my_ok(thd);
break;
#else
@@ -4144,6 +4194,17 @@ end_with_restore_list:
lex->kill_signal);
break;
}
+ case SQLCOM_SHUTDOWN:
+#ifndef EMBEDDED_LIBRARY
+ if (check_global_access(thd,SHUTDOWN_ACL))
+ goto error;
+ kill_mysql();
+ my_ok(thd);
+#else
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0), "embedded server");
+#endif
+ break;
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
case SQLCOM_SHOW_GRANTS:
{
@@ -4576,7 +4637,7 @@ create_sp_error:
if (lex->spname->m_db.str == NULL)
{
- if (lex->drop_if_exists)
+ if (lex->check_exists)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
@@ -4645,7 +4706,7 @@ create_sp_error:
my_ok(thd);
break;
case SP_KEY_NOT_FOUND:
- if (lex->drop_if_exists)
+ if (lex->check_exists)
{
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
@@ -4860,7 +4921,7 @@ create_sp_error:
if ((err_code= drop_server(thd, &lex->server_options)))
{
- if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST)
+ if (! lex->check_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST)
{
DBUG_PRINT("info", ("problem dropping server %s",
lex->server_options.server_name));
@@ -5132,100 +5193,6 @@ static bool execute_rename_table(THD *thd, TABLE_LIST *first_table,
}
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
-/**
- Check grants for commands which work only with one table.
-
- @param thd Thread handler
- @param privilege requested privilege
- @param all_tables global table list of query
- @param no_errors FALSE/TRUE - report/don't report error to
- the client (using my_error() call).
-
- @retval
- 0 OK
- @retval
- 1 access denied, error is sent to client
-*/
-
-bool check_single_table_access(THD *thd, ulong privilege,
- TABLE_LIST *all_tables, bool no_errors)
-{
- Security_context * backup_ctx= thd->security_ctx;
-
- /* we need to switch to the saved context (if any) */
- if (all_tables->security_ctx)
- thd->security_ctx= all_tables->security_ctx;
-
- const char *db_name;
- if ((all_tables->view || all_tables->field_translation) &&
- !all_tables->schema_table)
- db_name= all_tables->view_db.str;
- else
- db_name= all_tables->db;
-
- if (check_access(thd, privilege, db_name,
- &all_tables->grant.privilege,
- &all_tables->grant.m_internal,
- 0, no_errors))
- goto deny;
-
- /* Show only 1 table for check_grant */
- if (!(all_tables->belong_to_view &&
- (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) &&
- check_grant(thd, privilege, all_tables, FALSE, 1, no_errors))
- goto deny;
-
- thd->security_ctx= backup_ctx;
- return 0;
-
-deny:
- thd->security_ctx= backup_ctx;
- return 1;
-}
-
-/**
- Check grants for commands which work only with one table and all other
- tables belonging to subselects or implicitly opened tables.
-
- @param thd Thread handler
- @param privilege requested privilege
- @param all_tables global table list of query
-
- @retval
- 0 OK
- @retval
- 1 access denied, error is sent to client
-*/
-
-bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
-{
- if (check_single_table_access (thd,privilege,all_tables, FALSE))
- return 1;
-
- /* Check rights on tables of subselects and implictly opened tables */
- TABLE_LIST *subselects_tables, *view= all_tables->view ? all_tables : 0;
- if ((subselects_tables= all_tables->next_global))
- {
- /*
- Access rights asked for the first table of a view should be the same
- as for the view
- */
- if (view && subselects_tables->belong_to_view == view)
- {
- if (check_single_table_access (thd, privilege, subselects_tables, FALSE))
- return 1;
- subselects_tables= subselects_tables->next_global;
- }
- if (subselects_tables &&
- (check_table_access(thd, SELECT_ACL, subselects_tables, FALSE,
- UINT_MAX, FALSE)))
- return 1;
- }
- return 0;
-}
-
-
/**
@brief Compare requested privileges with the privileges acquired from the
User- and Db-tables.
@@ -5258,6 +5225,11 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
GRANT_INTERNAL_INFO *grant_internal_info,
bool dont_check_global_grants, bool no_errors)
{
+#ifdef NO_EMBEDDED_ACCESS_CHECKS
+ if (save_priv)
+ *save_priv= GLOBAL_ACLS;
+ return false;
+#else
Security_context *sctx= thd->security_ctx;
ulong db_access;
@@ -5296,6 +5268,10 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
if ((db != NULL) && (db != any_db))
{
+ /*
+ Check if this is reserved database, like information schema or
+ performance schema
+ */
const ACL_internal_schema_access *access;
access= get_cached_schema_access(grant_internal_info, db);
if (access)
@@ -5436,6 +5412,101 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
"unknown")));
}
DBUG_RETURN(TRUE);
+#endif // NO_EMBEDDED_ACCESS_CHECKS
+}
+
+
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+/**
+ Check grants for commands which work only with one table.
+
+ @param thd Thread handler
+ @param privilege requested privilege
+ @param all_tables global table list of query
+ @param no_errors FALSE/TRUE - report/don't report error to
+ the client (using my_error() call).
+
+ @retval
+ 0 OK
+ @retval
+ 1 access denied, error is sent to client
+*/
+
+bool check_single_table_access(THD *thd, ulong privilege,
+ TABLE_LIST *all_tables, bool no_errors)
+{
+ Security_context * backup_ctx= thd->security_ctx;
+
+ /* we need to switch to the saved context (if any) */
+ if (all_tables->security_ctx)
+ thd->security_ctx= all_tables->security_ctx;
+
+ const char *db_name;
+ if ((all_tables->view || all_tables->field_translation) &&
+ !all_tables->schema_table)
+ db_name= all_tables->view_db.str;
+ else
+ db_name= all_tables->db;
+
+ if (check_access(thd, privilege, db_name,
+ &all_tables->grant.privilege,
+ &all_tables->grant.m_internal,
+ 0, no_errors))
+ goto deny;
+
+ /* Show only 1 table for check_grant */
+ if (!(all_tables->belong_to_view &&
+ (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) &&
+ check_grant(thd, privilege, all_tables, FALSE, 1, no_errors))
+ goto deny;
+
+ thd->security_ctx= backup_ctx;
+ return 0;
+
+deny:
+ thd->security_ctx= backup_ctx;
+ return 1;
+}
+
+/**
+ Check grants for commands which work only with one table and all other
+ tables belonging to subselects or implicitly opened tables.
+
+ @param thd Thread handler
+ @param privilege requested privilege
+ @param all_tables global table list of query
+
+ @retval
+ 0 OK
+ @retval
+ 1 access denied, error is sent to client
+*/
+
+bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
+{
+ if (check_single_table_access (thd,privilege,all_tables, FALSE))
+ return 1;
+
+ /* Check rights on tables of subselects and implictly opened tables */
+ TABLE_LIST *subselects_tables, *view= all_tables->view ? all_tables : 0;
+ if ((subselects_tables= all_tables->next_global))
+ {
+ /*
+ Access rights asked for the first table of a view should be the same
+ as for the view
+ */
+ if (view && subselects_tables->belong_to_view == view)
+ {
+ if (check_single_table_access (thd, privilege, subselects_tables, FALSE))
+ return 1;
+ subselects_tables= subselects_tables->next_global;
+ }
+ if (subselects_tables &&
+ (check_table_access(thd, SELECT_ACL, subselects_tables, FALSE,
+ UINT_MAX, FALSE)))
+ return 1;
+ }
+ return 0;
}
@@ -5747,14 +5818,17 @@ bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table)
1 Access denied. In this case an error is sent to the client
*/
-bool check_global_access(THD *thd, ulong want_access)
+bool check_global_access(THD *thd, ulong want_access, bool no_errors)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
char command[128];
if ((thd->security_ctx->master_access & want_access))
return 0;
- get_privilege_desc(command, sizeof(command), want_access);
- my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
+ if (!no_errors)
+ {
+ get_privilege_desc(command, sizeof(command), want_access);
+ my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
+ }
status_var_increment(thd->status_var.access_denied_errors);
return 1;
#else
@@ -6293,7 +6367,7 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
lex->col_list.push_back(new Key_part_spec(*field_name, 0));
key= new Key(Key::PRIMARY, null_lex_str,
&default_key_create_info,
- 0, lex->col_list, NULL);
+ 0, lex->col_list, NULL, lex->check_exists);
lex->alter_info.key_list.push_back(key);
lex->col_list.empty();
}
@@ -6303,7 +6377,7 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
lex->col_list.push_back(new Key_part_spec(*field_name, 0));
key= new Key(Key::UNIQUE, null_lex_str,
&default_key_create_info, 0,
- lex->col_list, NULL);
+ lex->col_list, NULL, lex->check_exists);
lex->alter_info.key_list.push_back(key);
lex->col_list.empty();
}
@@ -6355,7 +6429,7 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
new_field->init(thd, field_name->str, type, length, decimals, type_modifier,
default_value, on_update_value, comment, change,
interval_list, cs, uint_geom_type, vcol_info,
- create_options))
+ create_options, lex->check_exists))
DBUG_RETURN(1);
lex->alter_info.create_list.push_back(new_field);
@@ -6718,6 +6792,8 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
for (uint i=0; i < 2; i++)
{
TABLE_LIST *table= join_list->pop();
+ if (!table)
+ DBUG_RETURN(NULL);
table->join_list= embedded_list;
table->embedding= ptr;
embedded_list->push_back(table);
@@ -8014,6 +8090,7 @@ bool check_string_char_length(LEX_STRING *str, const char *err_msg,
return TRUE;
}
+C_MODE_START
/*
Check if path does not contain mysql data home directory
@@ -8026,7 +8103,6 @@ bool check_string_char_length(LEX_STRING *str, const char *err_msg,
0 ok
1 error ; Given path contains data directory
*/
-C_MODE_START
int test_if_data_home_dir(const char *dir)
{
@@ -8037,6 +8113,22 @@ int test_if_data_home_dir(const char *dir)
if (!dir)
DBUG_RETURN(0);
+ /*
+ data_file_name and index_file_name include the table name without
+ extension. Mostly this does not refer to an existing file. When
+ comparing data_file_name or index_file_name against the data
+ directory, we try to resolve all symbolic links. On some systems,
+ we use realpath(3) for the resolution. This returns ENOENT if the
+ resolved path does not refer to an existing file. my_realpath()
+ does then copy the requested path verbatim, without symlink
+ resolution. Thereafter the comparison can fail even if the
+ requested path is within the data directory. E.g. if symlinks to
+ another file system are used. To make realpath(3) return the
+ resolved path, we strip the table name and compare the directory
+ path only. If the directory doesn't exist either, table creation
+ will fail anyway.
+ */
+
(void) fn_format(path, dir, "", "",
(MY_RETURN_REAL_PATH|MY_RESOLVE_SYMLINKS));
dir_len= strlen(path);
@@ -8070,6 +8162,22 @@ int test_if_data_home_dir(const char *dir)
C_MODE_END
+int error_if_data_home_dir(const char *path, const char *what)
+{
+ size_t dirlen;
+ char dirpath[FN_REFLEN];
+ if (path)
+ {
+ dirname_part(dirpath, path, &dirlen);
+ if (test_if_data_home_dir(dirpath))
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), what);
+ return 1;
+ }
+ }
+ return 0;
+}
+
/**
Check that host name string is valid.
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 12d42b57618..84256aa2256 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -34,6 +34,7 @@ enum enum_mysql_completiontype {
};
extern "C" int test_if_data_home_dir(const char *dir);
+int error_if_data_home_dir(const char *path, const char *what);
bool multi_update_precheck(THD *thd, TABLE_LIST *tables);
bool multi_delete_precheck(THD *thd, TABLE_LIST *tables);
@@ -146,6 +147,15 @@ inline bool check_identifier_name(LEX_STRING *str)
return check_identifier_name(str, NAME_CHAR_LEN, 0, "");
}
+
+/*
+ check_access() is needed for the connect engine.
+ It cannot be inlined - it must be exported.
+*/
+bool check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
+ GRANT_INTERNAL_INFO *grant_internal_info,
+ bool dont_check_global_grants, bool no_errors);
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables);
bool check_single_table_access(THD *thd, ulong privilege,
@@ -154,9 +164,6 @@ bool check_routine_access(THD *thd,ulong want_access,char *db,char *name,
bool is_proc, bool no_errors);
bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table);
bool check_some_routine_access(THD *thd, const char *db, const char *name, bool is_proc);
-bool check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
- GRANT_INTERNAL_INFO *grant_internal_info,
- bool dont_check_global_grants, bool no_errors);
bool check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
bool any_combination_of_privileges_will_do,
uint number,
@@ -178,13 +185,6 @@ inline bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table)
inline bool check_some_routine_access(THD *thd, const char *db,
const char *name, bool is_proc)
{ return false; }
-inline bool check_access(THD *, ulong, const char *, ulong *save_priv,
- GRANT_INTERNAL_INFO *, bool, bool)
-{
- if (save_priv)
- *save_priv= GLOBAL_ACLS;
- return false;
-}
inline bool
check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
bool any_combination_of_privileges_will_do,
@@ -195,7 +195,7 @@ check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
/* These were under the INNODB_COMPATIBILITY_HOOKS */
-bool check_global_access(THD *thd, ulong want_access);
+bool check_global_access(THD *thd, ulong want_access, bool no_errors= false);
inline bool is_supported_parser_charset(CHARSET_INFO *cs)
{
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index aff92331bd9..9e4c48b47ff 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2009-2013, Monty Program Ab & SkySQL Ab
+/* Copyright (c) 2005, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/*
This file is a container for general functionality related
@@ -71,6 +71,8 @@
#include "sql_alter.h" // Alter_table_ctx
#include <algorithm>
+using std::max;
+using std::min;
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -1518,7 +1520,7 @@ bool field_is_partition_charset(Field *field)
!(field->type() == MYSQL_TYPE_VARCHAR))
return FALSE;
{
- const CHARSET_INFO *cs= field->charset();
+ CHARSET_INFO *cs= field->charset();
if (!(field->type() == MYSQL_TYPE_STRING) ||
!(cs->state & MY_CS_BINSORT))
return TRUE;
@@ -1561,7 +1563,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets)
*/
if (field_is_partition_charset(field))
{
- const CHARSET_INFO *cs= field->charset();
+ CHARSET_INFO *cs= field->charset();
if (!ok_with_charsets ||
cs->mbmaxlen > 1 ||
cs->strxfrm_multiply > 1)
@@ -2121,6 +2123,8 @@ static int check_part_field(enum_field_types sql_type,
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_TIME2:
+ case MYSQL_TYPE_DATETIME2:
*result_type= STRING_RESULT;
*need_cs_check= TRUE;
return FALSE;
@@ -2133,6 +2137,7 @@ static int check_part_field(enum_field_types sql_type,
case MYSQL_TYPE_NEWDECIMAL:
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
case MYSQL_TYPE_NULL:
case MYSQL_TYPE_FLOAT:
case MYSQL_TYPE_DOUBLE:
@@ -2682,35 +2687,6 @@ static inline int part_val_int(Item *item_expr, longlong *result)
*/
/*
- Calculate hash value for KEY partitioning using an array of fields.
-
- SYNOPSIS
- calculate_key_value()
- field_array An array of the fields in KEY partitioning
-
- RETURN VALUE
- hash_value calculated
-
- DESCRIPTION
- Uses the hash function on the character set of the field. Integer and
- floating point fields use the binary character set by default.
-*/
-
-static uint32 calculate_key_value(Field **field_array)
-{
- ulong nr1= 1;
- ulong nr2= 4;
-
- do
- {
- Field *field= *field_array;
- field->hash(&nr1, &nr2);
- } while (*(++field_array));
- return (uint32) nr1;
-}
-
-
-/*
A simple support function to calculate part_id given local part and
sub part.
@@ -2815,7 +2791,7 @@ static uint32 get_part_id_key(handler *file,
longlong *func_value)
{
DBUG_ENTER("get_part_id_key");
- *func_value= calculate_key_value(field_array);
+ *func_value= ha_partition::calculate_key_hash_value(field_array);
DBUG_RETURN((uint32) (*func_value % num_parts));
}
@@ -2842,7 +2818,7 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
{
DBUG_ENTER("get_part_id_linear_key");
- *func_value= calculate_key_value(field_array);
+ *func_value= ha_partition::calculate_key_hash_value(field_array);
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
num_parts));
@@ -2878,7 +2854,7 @@ static void copy_to_part_field_buffers(Field **ptr,
restore_ptr++;
if (!field->maybe_null() || !field->is_null())
{
- const CHARSET_INFO *cs= field->charset();
+ CHARSET_INFO *cs= field->charset();
uint max_len= field->pack_length();
uint data_len= field->data_length();
uchar *field_buf= *field_bufs;
@@ -4933,8 +4909,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- if (alt_part_info->set_up_defaults_for_partitioning(table->file,
- ULL(0),
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0,
tab_part_info->num_parts))
{
goto err;
@@ -5055,7 +5030,7 @@ that are reorganised.
*/
start_part= 0;
end_part= new_total_partitions - (upper_2n + 1);
- end_part= MY_MAX(lower_2n - 1, end_part);
+ end_part= max(lower_2n - 1, end_part);
}
else if (new_total_partitions <= upper_2n)
{
@@ -5351,9 +5326,7 @@ state of p1.
DBUG_ASSERT(!alt_part_info->use_default_partitions);
/* We specified partitions explicitly so don't use defaults anymore. */
tab_part_info->use_default_partitions= FALSE;
- if (alt_part_info->set_up_defaults_for_partitioning(table->file,
- ULL(0),
- 0))
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0, 0))
{
goto err;
}
@@ -5486,7 +5459,7 @@ the generated partition syntax in a correct manner.
tab_part_info->use_default_num_subpartitions= FALSE;
}
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
- table->file, ULL(0), TRUE))
+ table->file, 0, TRUE))
{
goto err;
}
@@ -5637,7 +5610,7 @@ the generated partition syntax in a correct manner.
Need to cater for engine types that can handle partition without
using the partition handler.
*/
- if (thd->work_part_info != tab_part_info)
+ if (part_info != tab_part_info)
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
@@ -7225,7 +7198,8 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
void mem_alloc_error(size_t size)
{
- my_error(ER_OUTOFMEMORY, MYF(0), static_cast<int>(size));
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ static_cast<int>(size));
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -7311,12 +7285,12 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
definition)
IMPLEMENTATION
- There are two available interval analyzer functions:
- (1) get_part_iter_for_interval_via_mapping
+ There are three available interval analyzer functions:
+ (1) get_part_iter_for_interval_via_mapping
(2) get_part_iter_for_interval_cols_via_map
(3) get_part_iter_for_interval_via_walking
- They both have limited applicability:
+ They all have limited applicability:
(1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where
func is a monotonic function.
@@ -7689,6 +7663,9 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
get_endpoint_func UNINIT_VAR(get_endpoint);
bool can_match_multiple_values; /* is not '=' */
uint field_len= field->pack_length_in_rec();
+ MYSQL_TIME start_date;
+ bool check_zero_dates= false;
+ bool zero_in_start_date= true;
DBUG_ENTER("get_part_iter_for_interval_via_mapping");
DBUG_ASSERT(!is_subpart);
(void) store_length_array;
@@ -7745,6 +7722,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
{
/* col is NOT NULL, but F(col) can return NULL, add NULL partition */
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ check_zero_dates= true;
}
}
@@ -7788,6 +7766,19 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
DBUG_RETURN(1);
}
part_iter->part_nums.cur= part_iter->part_nums.start;
+ if (check_zero_dates && !part_info->part_expr->null_value)
+ {
+ if (!(flags & NO_MAX_RANGE) &&
+ (field->type() == MYSQL_TYPE_DATE ||
+ field->type() == MYSQL_TYPE_DATETIME))
+ {
+ /* Monotonic, but return NULL for dates with zeros in month/day. */
+ zero_in_start_date= field->get_date(&start_date, 0);
+ DBUG_PRINT("info", ("zero start %u %04d-%02d-%02d",
+ zero_in_start_date, start_date.year,
+ start_date.month, start_date.day));
+ }
+ }
if (part_iter->part_nums.start == max_endpoint_val)
DBUG_RETURN(0); /* No partitions */
}
@@ -7801,6 +7792,29 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
store_key_image_to_rec(field, max_value, field_len);
bool include_endp= !test(flags & NEAR_MAX);
part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
+ if (check_zero_dates &&
+ !zero_in_start_date &&
+ !part_info->part_expr->null_value)
+ {
+ MYSQL_TIME end_date;
+ bool zero_in_end_date= field->get_date(&end_date, 0);
+ /*
+ This is an optimization for TO_DAYS()/TO_SECONDS() to avoid scanning
+ the NULL partition for ranges that cannot include a date with 0 as
+ month/day.
+ */
+ DBUG_PRINT("info", ("zero end %u %04d-%02d-%02d",
+ zero_in_end_date,
+ end_date.year, end_date.month, end_date.day));
+ DBUG_ASSERT(!memcmp(((Item_func*) part_info->part_expr)->func_name(),
+ "to_days", 7) ||
+ !memcmp(((Item_func*) part_info->part_expr)->func_name(),
+ "to_seconds", 10));
+ if (!zero_in_end_date &&
+ start_date.month == end_date.month &&
+ start_date.year == end_date.year)
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= false;
+ }
if (part_iter->part_nums.start >= part_iter->part_nums.end &&
!part_iter->ret_null_part)
DBUG_RETURN(0); /* No partitions */
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index 537c4cb9f17..7f39ddd7a3f 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -1,8 +1,7 @@
#ifndef SQL_PARTITION_INCLUDED
#define SQL_PARTITION_INCLUDED
-/*
- Copyright (c) 2006, 2010, Oracle and/or its affiliates.
+/* Copyright (c) 2006, 2013, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -33,7 +32,6 @@ class partition_info;
struct TABLE;
struct TABLE_LIST;
typedef struct st_bitmap MY_BITMAP;
-typedef struct st_ha_create_information HA_CREATE_INFO;
typedef struct st_key KEY;
typedef struct st_key_range key_range;
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index d9128fab8ab..1a82413bb07 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -775,7 +775,9 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
TODO: Add support for TRUNCATE PARTITION for NDB and other
engines supporting native partitioning.
*/
- if (first_table->table->s->db_type() != partition_hton)
+
+ if (!first_table->table || first_table->view ||
+ first_table->table->s->db_type() != partition_hton)
{
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index fbd0f0426c9..e89054ac849 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -15,9 +15,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+#include "sql_plugin.h"
#include "sql_priv.h" // SHOW_MY_BOOL
#include "unireg.h"
-#include "my_global.h" // REQUIRED by m_string.h
#include "sql_class.h" // set_var.h: THD
#include "sys_vars_shared.h"
#include "sql_locale.h"
@@ -35,6 +35,8 @@
#include <mysql/plugin_auth.h>
#include "lock.h" // MYSQL_LOCK_IGNORE_TIMEOUT
#include <mysql/plugin_auth.h>
+#include "sql_plugin_compat.h"
+
#define REPORT_TO_LOG 1
#define REPORT_TO_USER 2
@@ -135,7 +137,7 @@ static int min_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION,
MYSQL_AUDIT_INTERFACE_VERSION,
MYSQL_REPLICATION_INTERFACE_VERSION,
- MYSQL_AUTHENTICATION_INTERFACE_VERSION
+ MIN_AUTHENTICATION_INTERFACE_VERSION
};
static int cur_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
{
@@ -195,6 +197,8 @@ static bool reap_needed= false;
static int plugin_array_version=0;
static bool initialized= 0;
+ulong dlopen_count;
+
/*
write-lock on LOCK_system_variables_hash is required before modifying
@@ -306,10 +310,6 @@ static void unlock_variables(THD *thd, struct system_variables *vars);
static void cleanup_variables(THD *thd, struct system_variables *vars);
static void plugin_vars_free_values(sys_var *vars);
static void restore_pluginvar_names(sys_var *first);
-static void plugin_opt_set_limits(struct my_option *,
- const struct st_mysql_sys_var *);
-#define my_intern_plugin_lock(A,B) intern_plugin_lock(A,B)
-#define my_intern_plugin_lock_ci(A,B) intern_plugin_lock(A,B)
static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref plugin);
static void intern_plugin_unlock(LEX *lex, plugin_ref plugin);
static void reap_plugins(void);
@@ -507,7 +507,6 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl,
/* Determine interface version */
if (!sym)
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_FIND_DL_ENTRY, plugin_interface_version_sym);
DBUG_RETURN(TRUE);
}
@@ -517,7 +516,6 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl,
if (plugin_dl->mysqlversion < min_plugin_interface_version ||
(plugin_dl->mysqlversion >> 8) > (MYSQL_PLUGIN_INTERFACE_VERSION >> 8))
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, 0,
"plugin interface version mismatch");
DBUG_RETURN(TRUE);
@@ -525,7 +523,6 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl,
/* Find plugin declarations */
if (!(sym= dlsym(plugin_dl->handle, plugin_declarations_sym)))
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_FIND_DL_ENTRY, plugin_declarations_sym);
DBUG_RETURN(TRUE);
}
@@ -556,7 +553,6 @@ static my_bool read_mysql_plugin_info(struct st_plugin_dl *plugin_dl,
MYF(MY_ZEROFILL|MY_WME));
if (!cur)
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_OUTOFMEMORY,
static_cast<int>(plugin_dl->dl.length));
DBUG_RETURN(TRUE);
@@ -631,7 +627,6 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
Actually this branch impossible because in case of absence of maria
version we try mysql version.
*/
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_FIND_DL_ENTRY,
maria_plugin_interface_version_sym);
DBUG_RETURN(TRUE);
@@ -642,7 +637,6 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
if (plugin_dl->mariaversion < min_maria_plugin_interface_version ||
(plugin_dl->mariaversion >> 8) > (MARIA_PLUGIN_INTERFACE_VERSION >> 8))
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, 0,
"plugin interface version mismatch");
DBUG_RETURN(TRUE);
@@ -650,7 +644,6 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
/* Find plugin declarations */
if (!(sym= dlsym(plugin_dl->handle, maria_plugin_declarations_sym)))
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_FIND_DL_ENTRY, maria_plugin_declarations_sym);
DBUG_RETURN(TRUE);
}
@@ -664,7 +657,6 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
sizeof_st_plugin= *(int *)sym;
else
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_CANT_FIND_DL_ENTRY, maria_sizeof_st_plugin_sym);
DBUG_RETURN(TRUE);
}
@@ -682,7 +674,6 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
MYF(MY_ZEROFILL|MY_WME));
if (!cur)
{
- free_plugin_mem(plugin_dl);
report_error(report, ER_OUTOFMEMORY,
static_cast<int>(plugin_dl->dl.length));
DBUG_RETURN(TRUE);
@@ -712,11 +703,12 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
#ifdef HAVE_DLOPEN
char dlpath[FN_REFLEN];
uint plugin_dir_len, dummy_errors, dlpathlen, i;
- struct st_plugin_dl *tmp, plugin_dl;
+ struct st_plugin_dl *tmp= 0, plugin_dl;
void *sym;
DBUG_ENTER("plugin_dl_add");
DBUG_PRINT("enter", ("dl->str: '%s', dl->length: %d",
dl->str, (int) dl->length));
+ mysql_mutex_assert_owner(&LOCK_plugin);
plugin_dir_len= strlen(opt_plugin_dir);
/*
Ensure that the dll doesn't have a path.
@@ -754,8 +746,9 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (*errmsg == ' ') errmsg++;
}
report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, errno, errmsg);
- DBUG_RETURN(0);
+ goto ret;
}
+ dlopen_count++;
/* Checks which plugin interface present and reads info */
if (!(sym= dlsym(plugin_dl.handle, maria_plugin_interface_version_sym)))
@@ -765,12 +758,12 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
plugin_interface_version_sym),
dlpath,
report))
- DBUG_RETURN(0);
+ goto ret;
}
else
{
if (read_maria_plugin_info(&plugin_dl, sym, dlpath, report))
- DBUG_RETURN(0);
+ goto ret;
}
/* link the services in */
@@ -787,7 +780,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
"service '%s' interface version mismatch",
list_of_services[i].name);
report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, 0, buf);
- DBUG_RETURN(0);
+ goto ret;
}
*(void**)sym= list_of_services[i].service;
}
@@ -797,10 +790,9 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
plugin_dl.dl.length= dl->length * files_charset_info->mbmaxlen + 1;
if (! (plugin_dl.dl.str= (char*) my_malloc(plugin_dl.dl.length, MYF(0))))
{
- free_plugin_mem(&plugin_dl);
report_error(report, ER_OUTOFMEMORY,
static_cast<int>(plugin_dl.dl.length));
- DBUG_RETURN(0);
+ goto ret;
}
plugin_dl.dl.length= copy_and_convert(plugin_dl.dl.str, plugin_dl.dl.length,
files_charset_info, dl->str, dl->length, system_charset_info,
@@ -809,12 +801,17 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
/* Add this dll to array */
if (! (tmp= plugin_dl_insert_or_reuse(&plugin_dl)))
{
- free_plugin_mem(&plugin_dl);
report_error(report, ER_OUTOFMEMORY,
static_cast<int>(sizeof(struct st_plugin_dl)));
- DBUG_RETURN(0);
+ goto ret;
}
+
+ret:
+ if (!tmp)
+ free_plugin_mem(&plugin_dl);
+
DBUG_RETURN(tmp);
+
#else
DBUG_ENTER("plugin_dl_add");
report_error(report, ER_FEATURE_DISABLED, "plugin", "HAVE_DLOPEN");
@@ -823,34 +820,23 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
}
-static void plugin_dl_del(const LEX_STRING *dl)
+static void plugin_dl_del(struct st_plugin_dl *plugin_dl)
{
-#ifdef HAVE_DLOPEN
- uint i;
DBUG_ENTER("plugin_dl_del");
+ if (!plugin_dl)
+ DBUG_VOID_RETURN;
+
mysql_mutex_assert_owner(&LOCK_plugin);
- for (i= 0; i < plugin_dl_array.elements; i++)
+ /* Do not remove this element, unless no other plugin uses this dll. */
+ if (! --plugin_dl->ref_count)
{
- struct st_plugin_dl *tmp= *dynamic_element(&plugin_dl_array, i,
- struct st_plugin_dl **);
- if (tmp->ref_count &&
- ! my_strnncoll(files_charset_info,
- (const uchar *)dl->str, dl->length,
- (const uchar *)tmp->dl.str, tmp->dl.length))
- {
- /* Do not remove this element, unless no other plugin uses this dll. */
- if (! --tmp->ref_count)
- {
- free_plugin_mem(tmp);
- bzero(tmp, sizeof(struct st_plugin_dl));
- }
- break;
- }
+ free_plugin_mem(plugin_dl);
+ bzero(plugin_dl, sizeof(struct st_plugin_dl));
}
+
DBUG_VOID_RETURN;
-#endif
}
@@ -921,7 +907,8 @@ static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref rc)
mysql_mutex_assert_owner(&LOCK_plugin);
- if (pi->state & (PLUGIN_IS_READY | PLUGIN_IS_UNINITIALIZED))
+ if (pi->state & (PLUGIN_IS_READY | PLUGIN_IS_UNINITIALIZED |
+ PLUGIN_IS_DELETED))
{
plugin_ref plugin;
#ifdef DBUG_OFF
@@ -986,7 +973,7 @@ plugin_ref plugin_lock(THD *thd, plugin_ref ptr)
#endif
mysql_mutex_lock(&LOCK_plugin);
plugin_ref_to_int(ptr)->locks_total++;
- rc= my_intern_plugin_lock_ci(lex, ptr);
+ rc= intern_plugin_lock(lex, ptr);
mysql_mutex_unlock(&LOCK_plugin);
DBUG_RETURN(rc);
}
@@ -1000,7 +987,7 @@ plugin_ref plugin_lock_by_name(THD *thd, const LEX_STRING *name, int type)
DBUG_ENTER("plugin_lock_by_name");
mysql_mutex_lock(&LOCK_plugin);
if ((plugin= plugin_find_internal(name, type)))
- rc= my_intern_plugin_lock_ci(lex, plugin_int_to_ref(plugin));
+ rc= intern_plugin_lock(lex, plugin_int_to_ref(plugin));
mysql_mutex_unlock(&LOCK_plugin);
DBUG_RETURN(rc);
}
@@ -1136,7 +1123,7 @@ err:
if (errs == 0 && oks == 0 && !dupes) // no plugin was found
report_error(report, ER_CANT_FIND_DL_ENTRY, name->str);
- plugin_dl_del(dl);
+ plugin_dl_del(tmp.plugin_dl);
DBUG_RETURN(errs > 0 || oks + dupes == 0);
}
@@ -1152,22 +1139,21 @@ static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check)
if (plugin->plugin->status_vars)
{
-#ifdef FIX_LATER
- /**
- @todo
- unfortunately, status variables were introduced without a
- pluginname_ namespace, that is pluginname_ was not added automatically
- to status variable names. It should be fixed together with the next
- incompatible API change.
+ /*
+ historical ndb behavior caused MySQL plugins to specify
+ status var names in full, with the plugin name prefix.
+ this was never fixed in MySQL.
+ MariaDB fixes that but support MySQL style too.
*/
- SHOW_VAR array[2]= {
+ SHOW_VAR *show_vars= plugin->plugin->status_vars;
+ SHOW_VAR tmp_array[2]= {
{plugin->plugin->name, (char*)plugin->plugin->status_vars, SHOW_ARRAY},
{0, 0, SHOW_UNDEF}
};
- remove_status_vars(array);
-#else
- remove_status_vars(plugin->plugin->status_vars);
-#endif /* FIX_LATER */
+ if (strncasecmp(show_vars->name, plugin->name.str, plugin->name.length))
+ show_vars= tmp_array;
+
+ remove_status_vars(show_vars);
}
if (plugin_type_deinitialize[plugin->plugin->type])
@@ -1213,8 +1199,7 @@ static void plugin_del(struct st_plugin_int *plugin)
restore_pluginvar_names(plugin->system_vars);
plugin_vars_free_values(plugin->system_vars);
my_hash_delete(&plugin_hash[plugin->plugin->type], (uchar*)plugin);
- if (plugin->plugin_dl)
- plugin_dl_del(&plugin->plugin_dl->dl);
+ plugin_dl_del(plugin->plugin_dl);
plugin->state= PLUGIN_IS_FREED;
plugin_array_version++;
free_root(&plugin->mem_root, MYF(0));
@@ -1378,24 +1363,22 @@ static int plugin_initialize(struct st_plugin_int *plugin)
if (plugin->plugin->status_vars)
{
-#ifdef FIX_LATER
/*
- We have a problem right now where we can not prepend without
- breaking backwards compatibility. We will fix this shortly so
- that engines have "use names" and we wil use those for
- CREATE TABLE, and use the plugin name then for adding automatic
- variable names.
+ historical ndb behavior caused MySQL plugins to specify
+ status var names in full, with the plugin name prefix.
+ this was never fixed in MySQL.
+ MariaDB fixes that, but supports MySQL style too.
*/
- SHOW_VAR array[2]= {
+ SHOW_VAR *show_vars= plugin->plugin->status_vars;
+ SHOW_VAR tmp_array[2]= {
{plugin->plugin->name, (char*)plugin->plugin->status_vars, SHOW_ARRAY},
{0, 0, SHOW_UNDEF}
};
- if (add_status_vars(array)) // add_status_vars makes a copy
- goto err;
-#else
- if (add_status_vars(plugin->plugin->status_vars))
+ if (strncasecmp(show_vars->name, plugin->name.str, plugin->name.length))
+ show_vars= tmp_array;
+
+ if (add_status_vars(show_vars))
goto err;
-#endif /* FIX_LATER */
}
/*
@@ -1507,6 +1490,8 @@ int plugin_init(int *argc, char **argv, int flags)
if (initialized)
DBUG_RETURN(0);
+ dlopen_count =0;
+
#ifdef HAVE_PSI_INTERFACE
init_plugin_psi_keys();
#endif
@@ -1535,8 +1520,8 @@ int plugin_init(int *argc, char **argv, int flags)
}
/* prepare debug_sync service */
- DBUG_ASSERT(strcmp(list_of_services[5].name, "debug_sync_service") == 0);
- list_of_services[5].service= *(void**)&debug_sync_C_callback_ptr;
+ DBUG_ASSERT(strcmp(list_of_services[4].name, "debug_sync_service") == 0);
+ list_of_services[4].service= *(void**)&debug_sync_C_callback_ptr;
mysql_mutex_lock(&LOCK_plugin);
@@ -1616,7 +1601,7 @@ int plugin_init(int *argc, char **argv, int flags)
{
DBUG_ASSERT(!global_system_variables.table_plugin);
global_system_variables.table_plugin=
- my_intern_plugin_lock(NULL, plugin_int_to_ref(plugin_ptr));
+ intern_plugin_lock(NULL, plugin_int_to_ref(plugin_ptr));
DBUG_ASSERT(plugin_ptr->ref_count == 1);
}
}
@@ -1729,9 +1714,6 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
int error;
THD *new_thd= new THD;
bool result;
-#ifdef EMBEDDED_LIBRARY
- No_such_table_error_handler error_handler;
-#endif /* EMBEDDED_LIBRARY */
DBUG_ENTER("plugin_load");
new_thd->thread_stack= (char*) &tables;
@@ -1740,22 +1722,13 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
new_thd->db_length= 5;
bzero((char*) &new_thd->net, sizeof(new_thd->net));
tables.init_one_table("mysql", 5, "plugin", 6, "plugin", TL_READ);
-
-#ifdef EMBEDDED_LIBRARY
- /*
- When building an embedded library, if the mysql.plugin table
- does not exist, we silently ignore the missing table
- */
- new_thd->push_internal_handler(&error_handler);
-#endif /* EMBEDDED_LIBRARY */
+ tables.open_strategy= TABLE_LIST:: IF_EMBEDDED(OPEN_IF_EXISTS, OPEN_NORMAL);
result= open_and_lock_tables(new_thd, &tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT);
-#ifdef EMBEDDED_LIBRARY
- new_thd->pop_internal_handler();
- if (error_handler.safely_trapped_errors())
+ table= tables.table;
+ if (IF_EMBEDDED(!table, false))
goto end;
-#endif /* EMBEDDED_LIBRARY */
if (result)
{
@@ -1767,7 +1740,7 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
sql_print_warning("Could not open mysql.plugin table. Some options may be missing from the help text");
goto end;
}
- table= tables.table;
+
if (init_read_record(&read_record_info, new_thd, table, NULL, 1, 0, FALSE))
{
sql_print_error("Could not initialize init_read_record; Plugins not "
@@ -1799,7 +1772,7 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
mysql_mutex_unlock(&LOCK_plugin);
}
if (error > 0)
- sql_print_error(ER(ER_GET_ERRNO), my_errno);
+ sql_print_error(ER(ER_GET_ERRNO), my_errno, table->file->table_type());
end_read_record(&read_record_info);
table->m_needs_reopen= TRUE; // Force close to free memory
close_mysql_tables(new_thd);
@@ -2040,9 +2013,13 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_STRING *name)
ER_CANT_INITIALIZE_UDF, ER(ER_CANT_INITIALIZE_UDF),
name->str, "Plugin is disabled");
}
+ else if (tmp->state != PLUGIN_IS_UNINITIALIZED)
+ {
+ /* already installed */
+ return 0;
+ }
else
{
- DBUG_ASSERT(tmp->state == PLUGIN_IS_UNINITIALIZED);
if (plugin_initialize(tmp))
{
report_error(REPORT_TO_USER, ER_CANT_INITIALIZE_UDF, name->str,
@@ -2182,9 +2159,7 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_STRING *name)
}
if (!plugin->plugin_dl)
{
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- WARN_PLUGIN_DELETE_BUILTIN, ER(WARN_PLUGIN_DELETE_BUILTIN));
- my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
+ my_error(ER_PLUGIN_DELETE_BUILTIN, MYF(0));
return 1;
}
if (plugin->load_option == PLUGIN_FORCE_PLUS_PERMANENT)
@@ -2370,14 +2345,88 @@ err:
}
+static bool plugin_dl_foreach_internal(THD *thd, st_plugin_dl *plugin_dl,
+ st_maria_plugin *plug,
+ plugin_foreach_func *func, void *arg)
+{
+ for (; plug->name; plug++)
+ {
+ st_plugin_int tmp, *plugin;
+
+ tmp.name.str= const_cast<char*>(plug->name);
+ tmp.name.length= strlen(plug->name);
+ tmp.plugin= plug;
+ tmp.plugin_dl= plugin_dl;
+
+ mysql_mutex_lock(&LOCK_plugin);
+ if ((plugin= plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN)) &&
+ plugin->plugin == plug)
+
+ {
+ tmp.state= plugin->state;
+ tmp.load_option= plugin->load_option;
+ }
+ else
+ {
+ tmp.state= PLUGIN_IS_FREED;
+ tmp.load_option= PLUGIN_OFF;
+ }
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ plugin= &tmp;
+ if (func(thd, plugin_int_to_ref(plugin), arg))
+ return 1;
+ }
+ return 0;
+}
+
+bool plugin_dl_foreach(THD *thd, const LEX_STRING *dl,
+ plugin_foreach_func *func, void *arg)
+{
+ bool err= 0;
+
+ if (dl)
+ {
+ mysql_mutex_lock(&LOCK_plugin);
+ st_plugin_dl *plugin_dl= plugin_dl_add(dl, REPORT_TO_USER);
+ mysql_mutex_unlock(&LOCK_plugin);
+
+ if (!plugin_dl)
+ return 1;
+
+ err= plugin_dl_foreach_internal(thd, plugin_dl, plugin_dl->plugins,
+ func, arg);
+
+ mysql_mutex_lock(&LOCK_plugin);
+ plugin_dl_del(plugin_dl);
+ mysql_mutex_unlock(&LOCK_plugin);
+ }
+ else
+ {
+ struct st_maria_plugin **builtins;
+ for (builtins= mysql_mandatory_plugins; !err && *builtins; builtins++)
+ err= plugin_dl_foreach_internal(thd, 0, *builtins, func, arg);
+ for (builtins= mysql_optional_plugins; !err && *builtins; builtins++)
+ err= plugin_dl_foreach_internal(thd, 0, *builtins, func, arg);
+ }
+ return err;
+}
+
+
/****************************************************************************
Internal type declarations for variables support
****************************************************************************/
#undef MYSQL_SYSVAR_NAME
#define MYSQL_SYSVAR_NAME(name) name
-#define PLUGIN_VAR_TYPEMASK 0x007f
-#define PLUGIN_VAR_BOOKMARK_KEY (PLUGIN_VAR_TYPEMASK | PLUGIN_VAR_MEMALLOC)
+#define PLUGIN_VAR_TYPEMASK 0x7f
+#define BOOKMARK_MEMALLOC 0x80
+
+static inline char plugin_var_bookmark_key(uint flags)
+{
+ return (flags & PLUGIN_VAR_TYPEMASK) |
+ (flags & PLUGIN_VAR_MEMALLOC ? BOOKMARK_MEMALLOC : 0);
+}
#define EXTRA_OPTIONS 3 /* options for: 'foo', 'plugin-foo' and NULL */
@@ -2680,7 +2729,7 @@ sys_var *find_sys_var(THD *thd, const char *str, uint length)
{
mysql_rwlock_unlock(&LOCK_system_variables_hash);
LEX *lex= thd ? thd->lex : 0;
- if (!(plugin= my_intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin))))
+ if (!(plugin= intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin))))
var= NULL; /* failed to lock it, it must be uninstalling */
else
if (!(plugin_state(plugin) & PLUGIN_IS_READY))
@@ -2731,7 +2780,7 @@ static st_bookmark *find_bookmark(const char *plugin, const char *name,
else
memcpy(varname + 1, name, namelen + 1);
- varname[0]= flags & PLUGIN_VAR_BOOKMARK_KEY;
+ varname[0]= plugin_var_bookmark_key(flags);
result= (st_bookmark*) my_hash_search(&bookmark_hash,
(const uchar*) varname, length - 1);
@@ -2789,7 +2838,7 @@ static st_bookmark *register_var(const char *plugin, const char *name,
{
result= (st_bookmark*) alloc_root(&plugin_mem_root,
sizeof(struct st_bookmark) + length-1);
- varname[0]= flags & PLUGIN_VAR_BOOKMARK_KEY;
+ varname[0]= plugin_var_bookmark_key(flags);
memcpy(result->key, varname, length);
result->name_len= length - 2;
result->offset= -1;
@@ -2911,7 +2960,7 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
if (!(var= intern_find_sys_var(v->key + 1, v->name_len)) ||
!(pi= var->cast_pluginvar()) ||
- v->key[0] != (pi->plugin_var->flags & PLUGIN_VAR_BOOKMARK_KEY))
+ v->key[0] != plugin_var_bookmark_key(pi->plugin_var->flags))
continue;
/* Here we do anything special that may be required of the data types */
@@ -3004,7 +3053,7 @@ void plugin_thdvar_init(THD *thd)
mysql_mutex_lock(&LOCK_plugin);
thd->variables.table_plugin=
- my_intern_plugin_lock(NULL, global_system_variables.table_plugin);
+ intern_plugin_lock(NULL, global_system_variables.table_plugin);
intern_plugin_unlock(NULL, old_table_plugin);
mysql_mutex_unlock(&LOCK_plugin);
DBUG_VOID_RETURN;
@@ -3043,7 +3092,7 @@ static void cleanup_variables(THD *thd, struct system_variables *vars)
DBUG_ASSERT((uint)v->offset <= vars->dynamic_variables_head);
if ((v->key[0] & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR &&
- v->key[0] & PLUGIN_VAR_MEMALLOC)
+ v->key[0] & BOOKMARK_MEMALLOC)
{
char **ptr= (char**)(vars->dynamic_variables_ptr + v->offset);
my_free(*ptr);
@@ -3315,8 +3364,8 @@ bool sys_var_pluginvar::global_update(THD *thd, set_var *var)
options->block_size= (long) (opt)->blk_sz
-static void plugin_opt_set_limits(struct my_option *options,
- const struct st_mysql_sys_var *opt)
+void plugin_opt_set_limits(struct my_option *options,
+ const struct st_mysql_sys_var *opt)
{
options->sub_size= 0;
@@ -3416,17 +3465,6 @@ static void plugin_opt_set_limits(struct my_option *options,
options->arg_type= OPT_ARG;
}
-extern "C" my_bool get_one_plugin_option(int optid, const struct my_option *,
- char *);
-
-my_bool get_one_plugin_option(int optid __attribute__((unused)),
- const struct my_option *opt,
- char *argument)
-{
- return 0;
-}
-
-
/**
Creates a set of my_option objects associated with a specified plugin-
handle.
@@ -3906,3 +3944,18 @@ void add_plugin_options(DYNAMIC_ARRAY *options, MEM_ROOT *mem_root)
}
}
+
+/**
+ Returns a sys_var corresponding to a particular MYSQL_SYSVAR(...)
+*/
+sys_var *find_plugin_sysvar(st_plugin_int *plugin, st_mysql_sys_var *plugin_var)
+{
+ for (sys_var *var= plugin->system_vars; var; var= var->next)
+ {
+ sys_var_pluginvar *pvar=var->cast_pluginvar();
+ if (pvar->plugin_var == plugin_var)
+ return var;
+ }
+ return 0;
+}
+
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index dc713826fe2..5327b27e97c 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -17,7 +17,6 @@
#ifndef _sql_plugin_h
#define _sql_plugin_h
-#include <my_global.h>
/*
the following #define adds server-only members to enum_mysql_show_type,
@@ -27,7 +26,7 @@
SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, \
SHOW_HAVE, SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, \
SHOW_LONG_NOFLUSH, SHOW_LONGLONG_STATUS, SHOW_LEX_STRING
-#include <mysql/plugin.h>
+#include <my_global.h>
#undef SHOW_always_last
#include "m_string.h" /* LEX_STRING */
@@ -39,6 +38,8 @@ enum enum_plugin_load_option { PLUGIN_OFF, PLUGIN_ON, PLUGIN_FORCE,
PLUGIN_FORCE_PLUS_PERMANENT };
extern const char *global_plugin_typelib_names[];
+extern ulong dlopen_count;
+
#include <my_sys.h>
#include "sql_list.h"
@@ -151,9 +152,7 @@ extern void plugin_shutdown(void);
void add_plugin_options(DYNAMIC_ARRAY *options, MEM_ROOT *mem_root);
extern bool plugin_is_ready(const LEX_STRING *name, int type);
#define my_plugin_lock_by_name(A,B,C) plugin_lock_by_name(A,B,C)
-#define my_plugin_lock_by_name_ci(A,B,C) plugin_lock_by_name(A,B,C)
#define my_plugin_lock(A,B) plugin_lock(A,B)
-#define my_plugin_lock_ci(A,B) plugin_lock(A,B)
extern plugin_ref plugin_lock(THD *thd, plugin_ref ptr);
extern plugin_ref plugin_lock_by_name(THD *thd, const LEX_STRING *name,
int type);
@@ -166,6 +165,8 @@ extern bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name,
extern bool plugin_register_builtin(struct st_mysql_plugin *plugin);
extern void plugin_thdvar_init(THD *thd);
extern void plugin_thdvar_cleanup(THD *thd);
+sys_var *find_plugin_sysvar(st_plugin_int *plugin, st_mysql_sys_var *var);
+void plugin_opt_set_limits(struct my_option *, const struct st_mysql_sys_var *);
extern SHOW_COMP_OPTION plugin_status(const char *name, size_t len, int type);
extern bool check_valid_path(const char *path, size_t length);
@@ -175,4 +176,6 @@ typedef my_bool (plugin_foreach_func)(THD *thd,
#define plugin_foreach(A,B,C,D) plugin_foreach_with_mask(A,B,C,PLUGIN_IS_READY,D)
extern bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func,
int type, uint state_mask, void *arg);
+extern bool plugin_dl_foreach(THD *thd, const LEX_STRING *dl,
+ plugin_foreach_func *func, void *arg);
#endif
diff --git a/sql/sql_plugin_compat.h b/sql/sql_plugin_compat.h
new file mode 100644
index 00000000000..5c7bb620575
--- /dev/null
+++ b/sql/sql_plugin_compat.h
@@ -0,0 +1,65 @@
+/* Copyright (C) 2013 Sergei Golubchik and Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/* old plugin api structures, used for backward compatibility */
+
+#define upgrade_var(X) latest->X= X
+#define upgrade_str(X) strmake_buf(latest->X, X)
+#define downgrade_var(X) X= latest->X
+#define downgrade_str(X) strmake_buf(X, latest->X)
+
+/**************************************************************/
+/* Authentication API, version 0x0100 *************************/
+#define MIN_AUTHENTICATION_INTERFACE_VERSION 0x0100
+
+struct MYSQL_SERVER_AUTH_INFO_0x0100 {
+ char *user_name;
+ unsigned int user_name_length;
+ const char *auth_string;
+ unsigned long auth_string_length;
+ char authenticated_as[49];
+ char external_user[512];
+ int password_used;
+ const char *host_or_ip;
+ unsigned int host_or_ip_length;
+
+ void upgrade(MYSQL_SERVER_AUTH_INFO *latest)
+ {
+ upgrade_var(user_name);
+ upgrade_var(user_name_length);
+ upgrade_var(auth_string);
+ upgrade_var(auth_string_length);
+ upgrade_str(authenticated_as);
+ upgrade_str(external_user);
+ upgrade_var(password_used);
+ upgrade_var(host_or_ip);
+ upgrade_var(host_or_ip_length);
+ }
+ void downgrade(MYSQL_SERVER_AUTH_INFO *latest)
+ {
+ downgrade_var(user_name);
+ downgrade_var(user_name_length);
+ downgrade_var(auth_string);
+ downgrade_var(auth_string_length);
+ downgrade_str(authenticated_as);
+ downgrade_str(external_user);
+ downgrade_var(password_used);
+ downgrade_var(host_or_ip);
+ downgrade_var(host_or_ip_length);
+ }
+};
+
+/**************************************************************/
+
diff --git a/sql/sql_plugin_services.h b/sql/sql_plugin_services.h
index b9d54d25c8a..6b70048345a 100644
--- a/sql/sql_plugin_services.h
+++ b/sql/sql_plugin_services.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2009, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2012, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -41,11 +42,6 @@ static struct thd_wait_service_st thd_wait_handler= {
thd_wait_end
};
-static struct my_thread_scheduler_service my_thread_scheduler_handler= {
- my_thread_scheduler_set,
- my_thread_scheduler_reset,
-};
-
static struct progress_report_service_st progress_report_handler= {
thd_progress_init,
thd_progress_report,
@@ -58,6 +54,11 @@ static struct kill_statement_service_st thd_kill_statement_handler= {
thd_kill_level
};
+static struct thd_timezone_service_st thd_timezone_handler= {
+ thd_TIME_to_gmt_sec,
+ thd_gmt_sec_to_TIME
+};
+
static struct my_sha1_service_st my_sha1_handler = {
my_sha1,
my_sha1_multi
@@ -68,10 +69,10 @@ static struct st_service_ref list_of_services[]=
{ "my_snprintf_service", VERSION_my_snprintf, &my_snprintf_handler },
{ "thd_alloc_service", VERSION_thd_alloc, &thd_alloc_handler },
{ "thd_wait_service", VERSION_thd_wait, &thd_wait_handler },
- { "my_thread_scheduler_service", VERSION_my_thread_scheduler, &my_thread_scheduler_handler },
{ "progress_report_service", VERSION_progress_report, &progress_report_handler },
{ "debug_sync_service", VERSION_debug_sync, 0 }, // updated in plugin_init()
{ "thd_kill_statement_service", VERSION_kill_statement, &thd_kill_statement_handler },
+ { "thd_timezone_service", VERSION_thd_timezone, &thd_timezone_handler },
{ "my_sha1_service", VERSION_my_sha1, &my_sha1_handler}
};
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index d6fcfab3a31..120cfc3e350 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -880,7 +880,7 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array,
if (param->state == Item_param::NO_VALUE)
DBUG_RETURN(1);
- if (param->limit_clause_param && param->item_type != Item::INT_ITEM)
+ if (param->limit_clause_param)
{
param->set_int(param->val_int(), MY_INT64_NUM_DECIMAL_DIGITS);
param->item_type= Item::INT_ITEM;
@@ -1515,7 +1515,8 @@ static int mysql_test_select(Prepared_statement *stmt,
if (!lex->result && !(lex->result= new (stmt->mem_root) select_send))
{
- my_error(ER_OUTOFMEMORY, MYF(0), static_cast<int>(sizeof(select_send)));
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ static_cast<int>(sizeof(select_send)));
goto error;
}
@@ -1896,7 +1897,7 @@ static bool mysql_test_multidelete(Prepared_statement *stmt,
stmt->thd->lex->current_select= &stmt->thd->lex->select_lex;
if (add_item_to_list(stmt->thd, new Item_null()))
{
- my_error(ER_OUTOFMEMORY, MYF(0), 0);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 0);
goto error;
}
@@ -2208,6 +2209,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
case SQLCOM_GRANT:
case SQLCOM_REVOKE:
case SQLCOM_KILL:
+ case SQLCOM_SHUTDOWN:
break;
case SQLCOM_PREPARE:
@@ -3895,7 +3897,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
alloc_query(thd, (char*) expanded_query->ptr(),
expanded_query->length()))
{
- my_error(ER_OUTOFMEMORY, 0, expanded_query->length());
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), expanded_query->length());
goto error;
}
/*
@@ -4433,7 +4435,7 @@ bool Protocol_local::store(const char *str, size_t length,
bool Protocol_local::store(MYSQL_TIME *time, int decimals)
{
if (decimals != AUTO_SEC_PART_DIGITS)
- time->second_part= sec_part_truncate(time->second_part, decimals);
+ my_time_trunc(time, decimals);
return store_column(time, sizeof(MYSQL_TIME));
}
@@ -4451,7 +4453,7 @@ bool Protocol_local::store_date(MYSQL_TIME *time)
bool Protocol_local::store_time(MYSQL_TIME *time, int decimals)
{
if (decimals != AUTO_SEC_PART_DIGITS)
- time->second_part= sec_part_truncate(time->second_part, decimals);
+ my_time_trunc(time, decimals);
return store_column(time, sizeof(MYSQL_TIME));
}
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index a44f46a1a02..383888bac30 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2010-2011 Monty Program Ab
+ Copyright (c) 2010, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -143,15 +143,15 @@
however, needs to rollback the effects of the
succeeded statement to keep replication consistent.
*/
-#define OPTION_MASTER_SQL_ERROR (1ULL << 35)
+#define OPTION_MASTER_SQL_ERROR (1ULL << 35)
/*
Dont report errors for individual rows,
But just report error on commit (or read ofcourse)
Note! Reserved for use in MySQL Cluster
*/
-#define OPTION_ALLOW_BATCH (ULL(1) << 36) // THD, intern (slave)
-#define OPTION_SKIP_REPLICATION (ULL(1) << 37) // THD, user
+#define OPTION_ALLOW_BATCH (1ULL << 36) // THD, intern (slave)
+#define OPTION_SKIP_REPLICATION (1ULL << 37) // THD, user
/*
Check how many bytes are available on buffer.
@@ -227,7 +227,8 @@ template <class T> bool valid_buffer_range(T jump,
#define OPTIMIZER_SWITCH_OPTIMIZE_JOIN_BUFFER_SIZE (1ULL << 25)
#define OPTIMIZER_SWITCH_TABLE_ELIMINATION (1ULL << 26)
#define OPTIMIZER_SWITCH_EXTENDED_KEYS (1ULL << 27)
-#define OPTIMIZER_SWITCH_LAST (1ULL << 27)
+#define OPTIMIZER_SWITCH_EXISTS_TO_IN (1ULL << 28)
+#define OPTIMIZER_SWITCH_USE_CONDITION_SELECTIVITY (1ULL << 29)
#define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \
OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 052e303798c..f3eab6b84cf 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -53,7 +53,7 @@ static void disable_checkpoints(THD *thd);
@retval !=0 Error; thd->killed is set or thd->is_error() is true
*/
-bool reload_acl_and_cache(THD *thd, unsigned long options,
+bool reload_acl_and_cache(THD *thd, unsigned long long options,
TABLE_LIST *tables, int *write_to_binlog)
{
bool result=0;
@@ -205,6 +205,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
DBUG_ASSERT(!thd || thd->locked_tables_mode ||
!thd->mdl_context.has_locks() ||
thd->handler_tables_hash.records ||
+ thd->ull_hash.records ||
thd->global_read_lock.is_acquired());
/*
diff --git a/sql/sql_reload.h b/sql/sql_reload.h
index ebb3d78c003..33ca022dc14 100644
--- a/sql/sql_reload.h
+++ b/sql/sql_reload.h
@@ -18,7 +18,7 @@
class THD;
struct TABLE_LIST;
-bool reload_acl_and_cache(THD *thd, unsigned long options,
+bool reload_acl_and_cache(THD *thd, unsigned long long options,
TABLE_LIST *tables, int *write_to_binlog);
bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables);
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index dd729125223..78acb4a519f 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -29,10 +29,12 @@
#include "sql_base.h" // tdc_remove_table, lock_table_names,
#include "sql_handler.h" // mysql_ha_rm_tables
#include "sql_statistics.h"
-#include "datadict.h"
static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list,
bool skip_error);
+static bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db,
+ char *new_table_name, char *new_table_alias,
+ bool skip_error);
static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list);
@@ -145,10 +147,6 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
0))
goto err;
- for (ren_table= table_list; ren_table; ren_table= ren_table->next_local)
- tdc_remove_table(thd, TDC_RT_REMOVE_ALL, ren_table->db,
- ren_table->table_name, FALSE);
-
error=0;
/*
An exclusive lock on table names is satisfactory to ensure
@@ -236,16 +234,14 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list)
true rename failed
*/
-bool
+static bool
do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
char *new_table_alias, bool skip_error)
{
int rc= 1;
- char new_name[FN_REFLEN + 1], old_name[FN_REFLEN + 1];
+ handlerton *hton;
+ bool new_exists, old_exists;
const char *new_alias, *old_alias;
- frm_type_enum frm_type;
- enum legacy_db_type table_type;
-
DBUG_ENTER("do_rename");
if (lower_case_table_names == 2)
@@ -260,53 +256,52 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
}
DBUG_ASSERT(new_alias);
- build_table_filename(new_name, sizeof(new_name) - 1,
- new_db, new_alias, reg_ext, 0);
- build_table_filename(old_name, sizeof(old_name) - 1,
- ren_table->db, old_alias, reg_ext, 0);
- if (check_table_file_presence(old_name,
- new_name, new_db, new_alias, new_alias, TRUE))
+ new_exists= ha_table_exists(thd, new_db, new_alias);
+
+ if (new_exists)
{
- DBUG_RETURN(1); // This can't be skipped
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
+ DBUG_RETURN(1); // This can't be skipped
}
- frm_type= dd_frm_type(thd, old_name, &table_type);
- switch (frm_type)
+ old_exists= ha_table_exists(thd, ren_table->db, old_alias, &hton);
+
+ if (old_exists)
{
- case FRMTYPE_TABLE:
+ DBUG_ASSERT(!thd->locked_tables_mode);
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL,
+ ren_table->db, ren_table->table_name, false);
+
+ if (hton != view_pseudo_hton)
+ {
+ if (!(rc= mysql_rename_table(hton, ren_table->db, old_alias,
+ new_db, new_alias, 0)))
{
- if (!(rc= mysql_rename_table(ha_resolve_by_legacy_type(thd,
- table_type),
- ren_table->db, old_alias,
- new_db, new_alias, 0)))
+ LEX_STRING db_name= { ren_table->db, ren_table->db_length };
+ LEX_STRING table_name= { ren_table->table_name,
+ ren_table->table_name_length };
+ LEX_STRING new_table= { (char *) new_alias, strlen(new_alias) };
+ (void) rename_table_in_stat_tables(thd, &db_name, &table_name,
+ &db_name, &new_table);
+ if ((rc= Table_triggers_list::change_table_name(thd, ren_table->db,
+ old_alias,
+ ren_table->table_name,
+ new_db,
+ new_alias)))
{
- LEX_STRING db_name= { ren_table->db, ren_table->db_length };
- LEX_STRING table_name= { ren_table->table_name,
- ren_table->table_name_length };
- LEX_STRING new_table= { (char *) new_alias, strlen(new_alias) };
- (void) rename_table_in_stat_tables(thd, &db_name, &table_name,
- &db_name, &new_table);
- if ((rc= Table_triggers_list::change_table_name(thd, ren_table->db,
- old_alias,
- ren_table->table_name,
- new_db,
- new_alias)))
- {
- /*
- We've succeeded in renaming table's .frm and in updating
- corresponding handler data, but have failed to update table's
- triggers appropriately. So let us revert operations on .frm
- and handler's data and report about failure to rename table.
- */
- (void) mysql_rename_table(ha_resolve_by_legacy_type(thd,
- table_type),
- new_db, new_alias,
- ren_table->db, old_alias, 0);
- }
+ /*
+ We've succeeded in renaming table's .frm and in updating
+ corresponding handler data, but have failed to update table's
+ triggers appropriately. So let us revert operations on .frm
+ and handler's data and report about failure to rename table.
+ */
+ (void) mysql_rename_table(hton, new_db, new_alias,
+ ren_table->db, old_alias, 0);
}
}
- break;
- case FRMTYPE_VIEW:
+ }
+ else
+ {
/*
change of schema is not allowed
except of ALTER ...UPGRADE DATA DIRECTORY NAME command
@@ -314,22 +309,19 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
*/
if (thd->lex->sql_command != SQLCOM_ALTER_DB_UPGRADE &&
strcmp(ren_table->db, new_db))
- my_error(ER_FORBID_SCHEMA_CHANGE, MYF(0), ren_table->db,
- new_db);
+ my_error(ER_FORBID_SCHEMA_CHANGE, MYF(0), ren_table->db, new_db);
else
rc= mysql_rename_view(thd, new_db, new_alias, ren_table);
- break;
- default:
- DBUG_ASSERT(0); // should never happen
- case FRMTYPE_ERROR:
- my_error(ER_FILE_NOT_FOUND, MYF(0), old_name, my_errno);
- break;
+ }
+ }
+ else
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), ren_table->db, old_alias);
}
if (rc && !skip_error)
DBUG_RETURN(1);
DBUG_RETURN(0);
-
}
/*
Rename all tables in list; Return pointer to wrong entry if something goes
diff --git a/sql/sql_rename.h b/sql/sql_rename.h
index 039a3b8b4a1..aaf09a8d030 100644
--- a/sql/sql_rename.h
+++ b/sql/sql_rename.h
@@ -20,8 +20,5 @@ class THD;
struct TABLE_LIST;
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent);
-bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db,
- char *new_table_name, char *new_table_alias,
- bool skip_error);
#endif /* SQL_RENAME_INCLUDED */
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 4067e765316..5a93f3b819a 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2012, Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,10 +16,12 @@
#include "sql_priv.h"
#include "unireg.h"
+#include "sql_base.h"
#include "sql_parse.h" // check_access
#ifdef HAVE_REPLICATION
#include "rpl_mi.h"
+#include "rpl_rli.h"
#include "sql_repl.h"
#include "sql_acl.h" // SUPER_ACL
#include "log_event.h"
@@ -28,6 +30,14 @@
#include "rpl_handler.h"
#include "debug_sync.h"
+
+enum enum_gtid_until_state {
+ GTID_UNTIL_NOT_DONE,
+ GTID_UNTIL_STOP_AFTER_STANDALONE,
+ GTID_UNTIL_STOP_AFTER_TRANSACTION
+};
+
+
int max_binlog_dump_events = 0; // unlimited
my_bool opt_sporadic_binlog_dump_fail = 0;
#ifndef DBUG_OFF
@@ -36,6 +46,74 @@ static int binlog_dump_count = 0;
extern TYPELIB binlog_checksum_typelib;
+
+static int
+fake_event_header(String* packet, Log_event_type event_type, ulong extra_len,
+ my_bool *do_checksum, ha_checksum *crc, const char** errmsg,
+ uint8 checksum_alg_arg, uint32 end_pos)
+{
+ char header[LOG_EVENT_HEADER_LEN];
+ ulong event_len;
+
+ *do_checksum= checksum_alg_arg != BINLOG_CHECKSUM_ALG_OFF &&
+ checksum_alg_arg != BINLOG_CHECKSUM_ALG_UNDEF;
+
+ /*
+ 'when' (the timestamp) is set to 0 so that slave could distinguish between
+ real and fake Rotate events (if necessary)
+ */
+ memset(header, 0, 4);
+ header[EVENT_TYPE_OFFSET] = (uchar)event_type;
+ event_len= LOG_EVENT_HEADER_LEN + extra_len +
+ (*do_checksum ? BINLOG_CHECKSUM_LEN : 0);
+ int4store(header + SERVER_ID_OFFSET, global_system_variables.server_id);
+ int4store(header + EVENT_LEN_OFFSET, event_len);
+ int2store(header + FLAGS_OFFSET, LOG_EVENT_ARTIFICIAL_F);
+ // TODO: check what problems this may cause and fix them
+ int4store(header + LOG_POS_OFFSET, end_pos);
+ if (packet->append(header, sizeof(header)))
+ {
+ *errmsg= "Failed due to out-of-memory writing event";
+ return -1;
+ }
+ if (*do_checksum)
+ {
+ *crc= my_checksum(0L, NULL, 0);
+ *crc= my_checksum(*crc, (uchar*)header, sizeof(header));
+ }
+ return 0;
+}
+
+
+static int
+fake_event_footer(String *packet, my_bool do_checksum, ha_checksum crc, const char **errmsg)
+{
+ if (do_checksum)
+ {
+ char b[BINLOG_CHECKSUM_LEN];
+ int4store(b, crc);
+ if (packet->append(b, sizeof(b)))
+ {
+ *errmsg= "Failed due to out-of-memory writing event checksum";
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+static int
+fake_event_write(NET *net, String *packet, const char **errmsg)
+{
+ if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
+ {
+ *errmsg = "failed on my_net_write()";
+ return -1;
+ }
+ return 0;
+}
+
+
/*
fake_rotate_event() builds a fake (=which does not exist physically in any
binlog) Rotate event, which contains the name of the binlog we are going to
@@ -59,59 +137,71 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
uint8 checksum_alg_arg)
{
DBUG_ENTER("fake_rotate_event");
- char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN+100];
-
- /*
- this Rotate is to be sent with checksum if and only if
- slave's get_master_version_and_clock time handshake value
- of master's @@global.binlog_checksum was TRUE
- */
-
- my_bool do_checksum= checksum_alg_arg != BINLOG_CHECKSUM_ALG_OFF &&
- checksum_alg_arg != BINLOG_CHECKSUM_ALG_UNDEF;
-
- /*
- 'when' (the timestamp) is set to 0 so that slave could distinguish between
- real and fake Rotate events (if necessary)
- */
- memset(header, 0, 4);
- header[EVENT_TYPE_OFFSET] = ROTATE_EVENT;
-
+ char buf[ROTATE_HEADER_LEN+100];
+ my_bool do_checksum;
+ int err;
char* p = log_file_name+dirname_length(log_file_name);
uint ident_len = (uint) strlen(p);
- ulong event_len = ident_len + LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN +
- (do_checksum ? BINLOG_CHECKSUM_LEN : 0);
- int4store(header + SERVER_ID_OFFSET, server_id);
- int4store(header + EVENT_LEN_OFFSET, event_len);
- int2store(header + FLAGS_OFFSET, LOG_EVENT_ARTIFICIAL_F);
+ ha_checksum crc;
- // TODO: check what problems this may cause and fix them
- int4store(header + LOG_POS_OFFSET, 0);
+ if ((err= fake_event_header(packet, ROTATE_EVENT,
+ ident_len + ROTATE_HEADER_LEN, &do_checksum, &crc,
+ errmsg, checksum_alg_arg, 0)))
+ DBUG_RETURN(err);
- packet->append(header, sizeof(header));
int8store(buf+R_POS_OFFSET,position);
packet->append(buf, ROTATE_HEADER_LEN);
packet->append(p, ident_len);
if (do_checksum)
{
- char b[BINLOG_CHECKSUM_LEN];
- ha_checksum crc= my_checksum(0L, NULL, 0);
- crc= my_checksum(crc, (uchar*)header, sizeof(header));
crc= my_checksum(crc, (uchar*)buf, ROTATE_HEADER_LEN);
crc= my_checksum(crc, (uchar*)p, ident_len);
- int4store(b, crc);
- packet->append(b, sizeof(b));
}
- if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
+ if ((err= fake_event_footer(packet, do_checksum, crc, errmsg)) ||
+ (err= fake_event_write(net, packet, errmsg)))
+ DBUG_RETURN(err);
+
+ DBUG_RETURN(0);
+}
+
+
+static int fake_gtid_list_event(NET* net, String* packet,
+ Gtid_list_log_event *glev, const char** errmsg,
+ uint8 checksum_alg_arg, uint32 current_pos)
+{
+ my_bool do_checksum;
+ int err;
+ ha_checksum crc;
+ char buf[128];
+ String str(buf, sizeof(buf), system_charset_info);
+
+ str.length(0);
+ if (glev->to_packet(&str))
{
- *errmsg = "failed on my_net_write()";
- DBUG_RETURN(-1);
+ *errmsg= "Failed due to out-of-memory writing Gtid_list event";
+ return -1;
}
- DBUG_RETURN(0);
+ if ((err= fake_event_header(packet, GTID_LIST_EVENT,
+ str.length(), &do_checksum, &crc,
+ errmsg, checksum_alg_arg, current_pos)))
+ return err;
+
+ packet->append(str);
+ if (do_checksum)
+ {
+ crc= my_checksum(crc, (uchar*)str.ptr(), str.length());
+ }
+
+ if ((err= fake_event_footer(packet, do_checksum, crc, errmsg)) ||
+ (err= fake_event_write(net, packet, errmsg)))
+ return err;
+
+ return 0;
}
+
/*
Reset thread transmit packet buffer for event sending
@@ -505,6 +595,60 @@ get_mariadb_slave_capability(THD *thd)
/*
+ Get the value of the @slave_connect_state user variable into the supplied
+ String (this is the GTID connect state requested by the connecting slave).
+
+ Returns false if error (ie. slave did not set the variable and does not
+ want to use GTID to set start position), true if success.
+*/
+static bool
+get_slave_connect_state(THD *thd, String *out_str)
+{
+ bool null_value;
+
+ const LEX_STRING name= { C_STRING_WITH_LEN("slave_connect_state") };
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+ name.length);
+ return entry && entry->val_str(&null_value, out_str, 0) && !null_value;
+}
+
+
+static bool
+get_slave_gtid_strict_mode(THD *thd)
+{
+ bool null_value;
+
+ const LEX_STRING name= { C_STRING_WITH_LEN("slave_gtid_strict_mode") };
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+ name.length);
+ return entry && entry->val_int(&null_value) && !null_value;
+}
+
+
+/*
+ Get the value of the @slave_until_gtid user variable into the supplied
+ String (this is the GTID position specified for START SLAVE UNTIL
+ master_gtid_pos='xxx').
+
+ Returns false if error (ie. slave did not set the variable and is not doing
+ START SLAVE UNTIL mater_gtid_pos='xxx'), true if success.
+*/
+static bool
+get_slave_until_gtid(THD *thd, String *out_str)
+{
+ bool null_value;
+
+ const LEX_STRING name= { C_STRING_WITH_LEN("slave_until_gtid") };
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+ name.length);
+ return entry && entry->val_str(&null_value, out_str, 0) && !null_value;
+}
+
+
+/*
Function prepares and sends repliation heartbeat event.
@param net net object of THD
@@ -539,7 +683,7 @@ static int send_heartbeat_event(NET* net, String* packet,
uint ident_len = strlen(p);
ulong event_len = ident_len + LOG_EVENT_HEADER_LEN +
(do_checksum ? BINLOG_CHECKSUM_LEN : 0);
- int4store(header + SERVER_ID_OFFSET, server_id);
+ int4store(header + SERVER_ID_OFFSET, global_system_variables.server_id);
int4store(header + EVENT_LEN_OFFSET, event_len);
int2store(header + FLAGS_OFFSET, 0);
@@ -567,6 +711,760 @@ static int send_heartbeat_event(NET* net, String* packet,
}
+struct binlog_file_entry
+{
+ binlog_file_entry *next;
+ char *name;
+};
+
+static binlog_file_entry *
+get_binlog_list(MEM_ROOT *memroot)
+{
+ IO_CACHE *index_file;
+ char fname[FN_REFLEN];
+ size_t length;
+ binlog_file_entry *current_list= NULL, *e;
+ DBUG_ENTER("get_binlog_list");
+
+ if (!mysql_bin_log.is_open())
+ {
+ my_error(ER_NO_BINARY_LOGGING, MYF(0));
+ DBUG_RETURN(NULL);
+ }
+
+ mysql_bin_log.lock_index();
+ index_file=mysql_bin_log.get_index_file();
+ reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0);
+
+ /* The file ends with EOF or empty line */
+ while ((length=my_b_gets(index_file, fname, sizeof(fname))) > 1)
+ {
+ --length; /* Remove the newline */
+ if (!(e= (binlog_file_entry *)alloc_root(memroot, sizeof(*e))) ||
+ !(e->name= strmake_root(memroot, fname, length)))
+ {
+ mysql_bin_log.unlock_index();
+ my_error(ER_OUTOFMEMORY, MYF(0), length + 1 + sizeof(*e));
+ DBUG_RETURN(NULL);
+ }
+ e->next= current_list;
+ current_list= e;
+ }
+ mysql_bin_log.unlock_index();
+
+ DBUG_RETURN(current_list);
+}
+
+/*
+ Find the Gtid_list_log_event at the start of a binlog.
+
+ NULL for ok, non-NULL error message for error.
+
+ If ok, then the event is returned in *out_gtid_list. This can be NULL if we
+ get back to binlogs written by old server version without GTID support. If
+ so, it means we have reached the point to start from, as no GTID events can
+ exist in earlier binlogs.
+*/
+static const char *
+get_gtid_list_event(IO_CACHE *cache, Gtid_list_log_event **out_gtid_list)
+{
+ Format_description_log_event init_fdle(BINLOG_VERSION);
+ Format_description_log_event *fdle;
+ Log_event *ev;
+ const char *errormsg = NULL;
+
+ *out_gtid_list= NULL;
+
+ if (!(ev= Log_event::read_log_event(cache, 0, &init_fdle,
+ opt_master_verify_checksum)) ||
+ ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
+ {
+ if (ev)
+ delete ev;
+ return "Could not read format description log event while looking for "
+ "GTID position in binlog";
+ }
+
+ fdle= static_cast<Format_description_log_event *>(ev);
+
+ for (;;)
+ {
+ Log_event_type typ;
+
+ ev= Log_event::read_log_event(cache, 0, fdle, opt_master_verify_checksum);
+ if (!ev)
+ {
+ errormsg= "Could not read GTID list event while looking for GTID "
+ "position in binlog";
+ break;
+ }
+ typ= ev->get_type_code();
+ if (typ == GTID_LIST_EVENT)
+ break; /* Done, found it */
+ delete ev;
+ if (typ == ROTATE_EVENT || typ == STOP_EVENT ||
+ typ == FORMAT_DESCRIPTION_EVENT)
+ continue; /* Continue looking */
+
+ /* We did not find any Gtid_list_log_event, must be old binlog. */
+ ev= NULL;
+ break;
+ }
+
+ delete fdle;
+ *out_gtid_list= static_cast<Gtid_list_log_event *>(ev);
+ return errormsg;
+}
+
+
+/*
+ Check if every GTID requested by the slave is contained in this (or a later)
+ binlog file. Return true if so, false if not.
+
+ We do the check with a single scan of the list of GTIDs, avoiding the need
+ to build an in-memory hash or stuff like that.
+
+ We need to check that slave did not request GTID D-S-N1, when the
+ Gtid_list_log_event for this binlog file has D-S-N2 with N2 >= N1.
+ (Because this means that requested GTID is in an earlier binlog).
+ However, if the Gtid_list_log_event indicates that D-S-N1 is the very last
+ GTID for domain D in prior binlog files, then it is ok to start from the
+ very start of this binlog file. This special case is important, as it
+ allows to purge old logs even if some domain is unused for long.
+
+ In addition, we need to check that we do not have a GTID D-S-N3 in the
+ Gtid_list_log_event where D is not present in the requested slave state at
+ all. Since if D is not in requested slave state, it means that slave needs
+ to start at the very first GTID in domain D.
+*/
+static bool
+contains_all_slave_gtid(slave_connection_state *st, Gtid_list_log_event *glev)
+{
+ uint32 i;
+
+ for (i= 0; i < glev->count; ++i)
+ {
+ uint32 gl_domain_id= glev->list[i].domain_id;
+ const rpl_gtid *gtid= st->find(gl_domain_id);
+ if (!gtid)
+ {
+ /*
+ The slave needs to start from the very beginning of this domain, which
+ is in an earlier binlog file. So we need to search back further.
+ */
+ return false;
+ }
+ if (gtid->server_id == glev->list[i].server_id &&
+ gtid->seq_no <= glev->list[i].seq_no)
+ {
+ /*
+ The slave needs to start after gtid, but it is contained in an earlier
+ binlog file. So we need to search back further, unless it was the very
+ last gtid logged for the domain in earlier binlog files.
+ */
+ if (gtid->seq_no < glev->list[i].seq_no)
+ return false;
+
+ /*
+ The slave requested D-S-N1, which happens to be the last GTID logged
+ in prior binlog files with same domain id D and server id S.
+
+ The Gtid_list is kept sorted on domain_id, with the last GTID in each
+ domain_id group being the last one logged. So if this is the last GTID
+ within the domain_id group, then it is ok to start from the very
+ beginning of this group, per the special case explained in comment at
+ the start of this function. If not, then we need to search back further.
+ */
+ if (i+1 < glev->count && gl_domain_id == glev->list[i+1].domain_id)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+/*
+ Check the start GTID state requested by the slave against our binlog state.
+
+ Give an error if the slave requests something that we do not have in our
+ binlog.
+*/
+
+static int
+check_slave_start_position(THD *thd, slave_connection_state *st,
+ const char **errormsg, rpl_gtid *error_gtid,
+ slave_connection_state *until_gtid_state,
+ HASH *fake_gtid_hash)
+{
+ uint32 i;
+ int err;
+ rpl_gtid **delete_list= NULL;
+ uint32 delete_idx= 0;
+ bool slave_state_loaded= false;
+
+ for (i= 0; i < st->hash.records; ++i)
+ {
+ rpl_gtid *slave_gtid= (rpl_gtid *)my_hash_element(&st->hash, i);
+ rpl_gtid master_gtid;
+ rpl_gtid master_replication_gtid;
+ rpl_gtid start_gtid;
+
+ if (mysql_bin_log.find_in_binlog_state(slave_gtid->domain_id,
+ slave_gtid->server_id,
+ &master_gtid) &&
+ master_gtid.seq_no >= slave_gtid->seq_no)
+ continue;
+
+ if (!slave_state_loaded)
+ {
+ if (rpl_load_gtid_slave_state(thd))
+ {
+ *errormsg= "Failed to load replication slave GTID state";
+ err= ER_CANNOT_LOAD_SLAVE_GTID_STATE;
+ goto end;
+ }
+ slave_state_loaded= true;
+ }
+
+ if (!rpl_global_gtid_slave_state.domain_to_gtid(slave_gtid->domain_id,
+ &master_replication_gtid) ||
+ slave_gtid->server_id != master_replication_gtid.server_id ||
+ slave_gtid->seq_no != master_replication_gtid.seq_no)
+ {
+ rpl_gtid domain_gtid;
+ rpl_gtid *until_gtid;
+
+ if (!mysql_bin_log.lookup_domain_in_binlog_state(slave_gtid->domain_id,
+ &domain_gtid))
+ {
+ /*
+ We do not have anything in this domain, neither in the binlog nor
+ in the slave state. So we are probably one master in a multi-master
+ setup, and this domain is served by a different master.
+ */
+ continue;
+ }
+
+ if (until_gtid_state &&
+ ( !(until_gtid= until_gtid_state->find(slave_gtid->domain_id)) ||
+ (mysql_bin_log.find_in_binlog_state(until_gtid->domain_id,
+ until_gtid->server_id,
+ &master_gtid) &&
+ master_gtid.seq_no >= until_gtid->seq_no)))
+ {
+ /*
+ The slave requested to start from a position that is not (yet) in
+ our binlog, but it also specified an UNTIL condition that _is_ in
+ our binlog (or a missing UNTIL, which means stop at the very
+ beginning). So the stop position is before the start position, and
+ we just delete the entry from the UNTIL hash to mark that this
+ domain has already reached the UNTIL condition.
+ */
+ if(until_gtid)
+ until_gtid_state->remove(until_gtid);
+ continue;
+ }
+
+ *errormsg= "Requested slave GTID state not found in binlog";
+ *error_gtid= *slave_gtid;
+ err= ER_GTID_POSITION_NOT_FOUND_IN_BINLOG;
+ goto end;
+ }
+
+ /*
+ Ok, so connecting slave asked to start at a GTID that we do not have in
+ our binlog, but it was in fact the last GTID we applied earlier, when we
+ were acting as a replication slave.
+
+ So this means that we were running as a replication slave without
+ --log-slave-updates, but now we switched to be a master. It is worth it
+ to handle this special case, as it allows users to run a simple
+ master -> slave without --log-slave-updates, and then exchange slave and
+ master, as long as they make sure the slave is caught up before switching.
+ */
+
+ /*
+ First check if we logged something ourselves as a master after being a
+ slave. This will be seen as a GTID with our own server_id and bigger
+ seq_no than what is in the slave state.
+
+ If we did not log anything ourselves, then start the connecting slave
+ replicating from the current binlog end position, which in this case
+ corresponds to our replication slave state and hence what the connecting
+ slave is requesting.
+ */
+ if (mysql_bin_log.find_in_binlog_state(slave_gtid->domain_id,
+ global_system_variables.server_id,
+ &start_gtid) &&
+ start_gtid.seq_no > slave_gtid->seq_no)
+ {
+ rpl_gtid *fake_gtid;
+ /*
+ Start replication within this domain at the first GTID that we logged
+ ourselves after becoming a master.
+
+ Remember that this starting point is in fact a "fake" GTID which may
+ not exists in the binlog, so that we do not complain about it in
+ --gtid-strict-mode.
+ */
+ slave_gtid->server_id= global_system_variables.server_id;
+ if (!(fake_gtid= (rpl_gtid *)my_malloc(sizeof(*fake_gtid), MYF(0))))
+ {
+ *errormsg= "Out of memory while checking slave start position";
+ err= ER_OUT_OF_RESOURCES;
+ goto end;
+ }
+ *fake_gtid= *slave_gtid;
+ if (my_hash_insert(fake_gtid_hash, (uchar *)fake_gtid))
+ {
+ my_free(fake_gtid);
+ *errormsg= "Out of memory while checking slave start position";
+ err= ER_OUT_OF_RESOURCES;
+ goto end;
+ }
+ }
+ else if (mysql_bin_log.lookup_domain_in_binlog_state(slave_gtid->domain_id,
+ &start_gtid))
+ {
+ slave_gtid->server_id= start_gtid.server_id;
+ slave_gtid->seq_no= start_gtid.seq_no;
+ }
+ else
+ {
+ /*
+ We do not have _anything_ in our own binlog for this domain. Just
+ delete the entry in the slave connection state, then it will pick up
+ anything new that arrives.
+
+ We just queue up the deletion and do it later, after the loop, so that
+ we do not mess up the iteration over the hash.
+ */
+ if (!delete_list)
+ {
+ if (!(delete_list= (rpl_gtid **)
+ my_malloc(sizeof(*delete_list) * st->hash.records, MYF(MY_WME))))
+ {
+ *errormsg= "Out of memory while checking slave start position";
+ err= ER_OUT_OF_RESOURCES;
+ goto end;
+ }
+ }
+ delete_list[delete_idx++]= slave_gtid;
+ }
+ }
+
+ /* Do any delayed deletes from the hash. */
+ if (delete_list)
+ {
+ for (i= 0; i < delete_idx; ++i)
+ st->remove(delete_list[i]);
+ }
+ err= 0;
+
+end:
+ if (delete_list)
+ my_free(delete_list);
+ return err;
+}
+
+/*
+ Find the name of the binlog file to start reading for a slave that connects
+ using GTID state.
+
+ Returns the file name in out_name, which must be of size at least FN_REFLEN.
+
+ Returns NULL on ok, error message on error.
+
+ In case of non-error return, the returned binlog file is guaranteed to
+ contain the first event to be transmitted to the slave for every domain
+ present in our binlogs. It is still necessary to skip all GTIDs up to
+ and including the GTID requested by slave within each domain.
+
+ However, as a special case, if the event to be sent to the slave is the very
+ first event (within that domain) in the returned binlog, then nothing should
+ be skipped, so that domain is deleted from the passed in slave connection
+ state.
+
+ This is necessary in case the slave requests a GTID within a replication
+ domain that has long been inactive. The binlog file containing that GTID may
+ have been long since purged. However, as long as no GTIDs after that have
+ been purged, we have the GTID requested by slave in the Gtid_list_log_event
+ of the latest binlog. So we can start from there, as long as we delete the
+ corresponding entry in the slave state so we do not wrongly skip any events
+ that might turn up if that domain becomes active again, vainly looking for
+ the requested GTID that was already purged.
+*/
+static const char *
+gtid_find_binlog_file(slave_connection_state *state, char *out_name,
+ slave_connection_state *until_gtid_state)
+{
+ MEM_ROOT memroot;
+ binlog_file_entry *list;
+ Gtid_list_log_event *glev= NULL;
+ const char *errormsg= NULL;
+ char buf[FN_REFLEN];
+
+ init_alloc_root(&memroot, 10*(FN_REFLEN+sizeof(binlog_file_entry)), 0,
+ MYF(MY_THREAD_SPECIFIC));
+ if (!(list= get_binlog_list(&memroot)))
+ {
+ errormsg= "Out of memory while looking for GTID position in binlog";
+ goto end;
+ }
+
+ while (list)
+ {
+ File file;
+ IO_CACHE cache;
+
+ if (!list->next)
+ {
+ /*
+ It should be safe to read the currently used binlog, as we will only
+ read the header part that is already written.
+
+ But if that does not work on windows, then we will need to cache the
+ event somewhere in memory I suppose - that could work too.
+ */
+ }
+ /*
+ Read the Gtid_list_log_event at the start of the binlog file to
+ get the binlog state.
+ */
+ if (normalize_binlog_name(buf, list->name, false))
+ {
+ errormsg= "Failed to determine binlog file name while looking for "
+ "GTID position in binlog";
+ goto end;
+ }
+ bzero((char*) &cache, sizeof(cache));
+ if ((file= open_binlog(&cache, buf, &errormsg)) == (File)-1)
+ goto end;
+ errormsg= get_gtid_list_event(&cache, &glev);
+ end_io_cache(&cache);
+ mysql_file_close(file, MYF(MY_WME));
+ if (errormsg)
+ goto end;
+
+ if (!glev || contains_all_slave_gtid(state, glev))
+ {
+ strmake(out_name, buf, FN_REFLEN);
+
+ if (glev)
+ {
+ uint32 i;
+
+ /*
+ As a special case, we allow to start from binlog file N if the
+ requested GTID is the last event (in the corresponding domain) in
+ binlog file (N-1), but then we need to remove that GTID from the slave
+ state, rather than skipping events waiting for it to turn up.
+
+ If slave is doing START SLAVE UNTIL, check for any UNTIL conditions
+ that are already included in a previous binlog file. Delete any such
+ from the UNTIL hash, to mark that such domains have already reached
+ their UNTIL condition.
+ */
+ for (i= 0; i < glev->count; ++i)
+ {
+ const rpl_gtid *gtid= state->find(glev->list[i].domain_id);
+ if (!gtid)
+ {
+ /*
+ Contains_all_slave_gtid() returns false if there is any domain in
+ Gtid_list_event which is not in the requested slave position.
+
+ We may delete a domain from the slave state inside this loop, but
+ we only do this when it is the very last GTID logged for that
+ domain in earlier binlogs, and then we can not encounter it in any
+ further GTIDs in the Gtid_list.
+ */
+ DBUG_ASSERT(0);
+ } else if (gtid->server_id == glev->list[i].server_id &&
+ gtid->seq_no == glev->list[i].seq_no)
+ {
+ /*
+ The slave requested to start from the very beginning of this
+ domain in this binlog file. So delete the entry from the state,
+ we do not need to skip anything.
+ */
+ state->remove(gtid);
+ }
+
+ if (until_gtid_state &&
+ (gtid= until_gtid_state->find(glev->list[i].domain_id)) &&
+ gtid->server_id == glev->list[i].server_id &&
+ gtid->seq_no <= glev->list[i].seq_no)
+ {
+ /*
+ We've already reached the stop position in UNTIL for this domain,
+ since it is before the start position.
+ */
+ until_gtid_state->remove(gtid);
+ }
+ }
+ }
+
+ goto end;
+ }
+ delete glev;
+ glev= NULL;
+ list= list->next;
+ }
+
+ /* We reached the end without finding anything. */
+ errormsg= "Could not find GTID state requested by slave in any binlog "
+ "files. Probably the slave state is too old and required binlog files "
+ "have been purged.";
+
+end:
+ if (glev)
+ delete glev;
+
+ free_root(&memroot, MYF(0));
+ return errormsg;
+}
+
+
+/*
+ Given an old-style binlog position with file name and file offset, find the
+ corresponding gtid position. If the offset is not at an event boundary, give
+ an error.
+
+ Return NULL on ok, error message string on error.
+
+ ToDo: Improve the performance of this by using binlog index files.
+*/
+static const char *
+gtid_state_from_pos(const char *name, uint32 offset,
+ slave_connection_state *gtid_state)
+{
+ IO_CACHE cache;
+ File file;
+ const char *errormsg= NULL;
+ bool found_gtid_list_event= false;
+ bool found_format_description_event= false;
+ bool valid_pos= false;
+ uint8 current_checksum_alg= BINLOG_CHECKSUM_ALG_UNDEF;
+ int err;
+ String packet;
+
+ if (gtid_state->load((const rpl_gtid *)NULL, 0))
+ {
+ errormsg= "Internal error (out of memory?) initializing slave state "
+ "while scanning binlog to find start position";
+ return errormsg;
+ }
+
+ if ((file= open_binlog(&cache, name, &errormsg)) == (File)-1)
+ return errormsg;
+
+ /*
+ First we need to find the initial GTID_LIST_EVENT. We need this even
+ if the offset is at the very start of the binlog file.
+
+ But if we do not find any GTID_LIST_EVENT, then this is an old binlog
+ with no GTID information, so we return empty GTID state.
+ */
+ for (;;)
+ {
+ Log_event_type typ;
+ uint32 cur_pos;
+
+ cur_pos= (uint32)my_b_tell(&cache);
+ if (cur_pos == offset)
+ valid_pos= true;
+ if (found_format_description_event && found_gtid_list_event &&
+ cur_pos >= offset)
+ break;
+
+ packet.length(0);
+ err= Log_event::read_log_event(&cache, &packet, NULL,
+ current_checksum_alg);
+ if (err)
+ {
+ errormsg= "Could not read binlog while searching for slave start "
+ "position on master";
+ goto end;
+ }
+ /*
+ The cast to uchar is needed to avoid a signed char being converted to a
+ negative number.
+ */
+ typ= (Log_event_type)(uchar)packet[EVENT_TYPE_OFFSET];
+ if (typ == FORMAT_DESCRIPTION_EVENT)
+ {
+ if (found_format_description_event)
+ {
+ errormsg= "Duplicate format description log event found while "
+ "searching for old-style position in binlog";
+ goto end;
+ }
+
+ current_checksum_alg= get_checksum_alg(packet.ptr(), packet.length());
+ found_format_description_event= true;
+ }
+ else if (typ != FORMAT_DESCRIPTION_EVENT && !found_format_description_event)
+ {
+ errormsg= "Did not find format description log event while searching "
+ "for old-style position in binlog";
+ goto end;
+ }
+ else if (typ == ROTATE_EVENT || typ == STOP_EVENT ||
+ typ == BINLOG_CHECKPOINT_EVENT)
+ continue; /* Continue looking */
+ else if (typ == GTID_LIST_EVENT)
+ {
+ rpl_gtid *gtid_list;
+ bool status;
+ uint32 list_len;
+
+ if (found_gtid_list_event)
+ {
+ errormsg= "Found duplicate Gtid_list_log_event while scanning binlog "
+ "to find slave start position";
+ goto end;
+ }
+ status= Gtid_list_log_event::peek(packet.ptr(), packet.length(),
+ current_checksum_alg,
+ &gtid_list, &list_len);
+ if (status)
+ {
+ errormsg= "Error reading Gtid_list_log_event while searching "
+ "for old-style position in binlog";
+ goto end;
+ }
+ err= gtid_state->load(gtid_list, list_len);
+ my_free(gtid_list);
+ if (err)
+ {
+ errormsg= "Internal error (out of memory?) initialising slave state "
+ "while scanning binlog to find start position";
+ goto end;
+ }
+ found_gtid_list_event= true;
+ }
+ else if (!found_gtid_list_event)
+ {
+ /* We did not find any Gtid_list_log_event, must be old binlog. */
+ goto end;
+ }
+ else if (typ == GTID_EVENT)
+ {
+ rpl_gtid gtid;
+ uchar flags2;
+ if (Gtid_log_event::peek(packet.ptr(), packet.length(),
+ current_checksum_alg, &gtid.domain_id,
+ &gtid.server_id, &gtid.seq_no, &flags2))
+ {
+ errormsg= "Corrupt gtid_log_event found while scanning binlog to find "
+ "initial slave position";
+ goto end;
+ }
+ if (gtid_state->update(&gtid))
+ {
+ errormsg= "Internal error (out of memory?) updating slave state while "
+ "scanning binlog to find start position";
+ goto end;
+ }
+ }
+ }
+
+ if (!valid_pos)
+ {
+ errormsg= "Slave requested incorrect position in master binlog. "
+ "Requested position %u in file '%s', but this position does not "
+ "correspond to the location of any binlog event.";
+ }
+
+end:
+ end_io_cache(&cache);
+ mysql_file_close(file, MYF(MY_WME));
+
+ return errormsg;
+}
+
+
+int
+gtid_state_from_binlog_pos(const char *in_name, uint32 pos, String *out_str)
+{
+ slave_connection_state gtid_state;
+ const char *lookup_name;
+ char name_buf[FN_REFLEN];
+ LOG_INFO linfo;
+
+ if (!mysql_bin_log.is_open())
+ {
+ my_error(ER_NO_BINARY_LOGGING, MYF(0));
+ return 1;
+ }
+
+ if (in_name && in_name[0])
+ {
+ mysql_bin_log.make_log_name(name_buf, in_name);
+ lookup_name= name_buf;
+ }
+ else
+ lookup_name= NULL;
+ linfo.index_file_offset= 0;
+ if (mysql_bin_log.find_log_pos(&linfo, lookup_name, 1))
+ return 1;
+
+ if (pos < 4)
+ pos= 4;
+
+ if (gtid_state_from_pos(linfo.log_file_name, pos, &gtid_state) ||
+ gtid_state.to_string(out_str))
+ return 1;
+ return 0;
+}
+
+
+static bool
+is_until_reached(THD *thd, NET *net, String *packet, ulong *ev_offset,
+ enum_gtid_until_state gtid_until_group,
+ Log_event_type event_type, uint8 current_checksum_alg,
+ ushort flags, const char **errmsg,
+ rpl_binlog_state *until_binlog_state, uint32 current_pos)
+{
+ switch (gtid_until_group)
+ {
+ case GTID_UNTIL_NOT_DONE:
+ return false;
+ case GTID_UNTIL_STOP_AFTER_STANDALONE:
+ if (Log_event::is_part_of_group(event_type))
+ return false;
+ break;
+ case GTID_UNTIL_STOP_AFTER_TRANSACTION:
+ if (event_type != XID_EVENT &&
+ (event_type != QUERY_EVENT ||
+ !Query_log_event::peek_is_commit_rollback(packet->ptr()+*ev_offset,
+ packet->length()-*ev_offset,
+ current_checksum_alg)))
+ return false;
+ break;
+ }
+
+ /*
+ The last event group has been sent, now the START SLAVE UNTIL condition
+ has been reached.
+
+ Send a last fake Gtid_list_log_event with a flag set to mark that we
+ stop due to UNTIL condition.
+ */
+ if (reset_transmit_packet(thd, flags, ev_offset, errmsg))
+ return true;
+ Gtid_list_log_event glev(until_binlog_state,
+ Gtid_list_log_event::FLAG_UNTIL_REACHED);
+ if (fake_gtid_list_event(net, packet, &glev, errmsg, current_checksum_alg,
+ current_pos))
+ return true;
+ *errmsg= NULL;
+ return true;
+}
+
+
/*
Helper function for mysql_binlog_send() to write an event down the slave
connection.
@@ -577,9 +1475,195 @@ static const char *
send_event_to_slave(THD *thd, NET *net, String* const packet, ushort flags,
Log_event_type event_type, char *log_file_name,
IO_CACHE *log, int mariadb_slave_capability,
- ulong ev_offset, uint8 current_checksum_alg)
+ ulong ev_offset, uint8 current_checksum_alg,
+ bool using_gtid_state, slave_connection_state *gtid_state,
+ enum_gtid_skip_type *gtid_skip_group,
+ slave_connection_state *until_gtid_state,
+ enum_gtid_until_state *gtid_until_group,
+ rpl_binlog_state *until_binlog_state,
+ bool slave_gtid_strict_mode, rpl_gtid *error_gtid,
+ bool *send_fake_gtid_list, HASH *fake_gtid_hash)
{
my_off_t pos;
+ size_t len= packet->length();
+
+ if (event_type == GTID_LIST_EVENT && using_gtid_state && until_gtid_state)
+ {
+ rpl_gtid *gtid_list;
+ uint32 list_len;
+ bool err;
+
+ if (ev_offset > len ||
+ Gtid_list_log_event::peek(packet->ptr()+ev_offset, len - ev_offset,
+ current_checksum_alg,
+ &gtid_list, &list_len))
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed to read Gtid_list_log_event: corrupt binlog";
+ }
+ err= until_binlog_state->load(gtid_list, list_len);
+ my_free(gtid_list);
+ if (err)
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed in internal GTID book-keeping: Out of memory";
+ }
+ }
+
+ /* Skip GTID event groups until we reach slave position within a domain_id. */
+ if (event_type == GTID_EVENT && using_gtid_state)
+ {
+ uchar flags2;
+ rpl_gtid *gtid;
+
+ if (gtid_state->count() > 0 || until_gtid_state)
+ {
+ rpl_gtid event_gtid;
+
+ if (ev_offset > len ||
+ Gtid_log_event::peek(packet->ptr()+ev_offset, len - ev_offset,
+ current_checksum_alg,
+ &event_gtid.domain_id, &event_gtid.server_id,
+ &event_gtid.seq_no, &flags2))
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed to read Gtid_log_event: corrupt binlog";
+ }
+
+ DBUG_EXECUTE_IF("gtid_force_reconnect_at_10_1_100",
+ {
+ rpl_gtid *dbug_gtid;
+ if ((dbug_gtid= until_binlog_state->find(10,1)) &&
+ dbug_gtid->seq_no == 100)
+ {
+ DBUG_SET("-d,gtid_force_reconnect_at_10_1_100");
+ DBUG_SET_INITIAL("-d,gtid_force_reconnect_at_10_1_100");
+ my_errno= ER_UNKNOWN_ERROR;
+ return "DBUG-injected forced reconnect";
+ }
+ });
+
+ if (until_binlog_state->update(&event_gtid, false))
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed in internal GTID book-keeping: Out of memory";
+ }
+
+ if (gtid_state->count() > 0)
+ {
+ gtid= gtid_state->find(event_gtid.domain_id);
+ if (gtid != NULL)
+ {
+ /* Skip this event group if we have not yet reached slave start pos. */
+ if (event_gtid.server_id != gtid->server_id ||
+ event_gtid.seq_no <= gtid->seq_no)
+ *gtid_skip_group = (flags2 & Gtid_log_event::FL_STANDALONE ?
+ GTID_SKIP_STANDALONE : GTID_SKIP_TRANSACTION);
+ if (event_gtid.server_id == gtid->server_id &&
+ event_gtid.seq_no >= gtid->seq_no)
+ {
+ if (slave_gtid_strict_mode && event_gtid.seq_no > gtid->seq_no &&
+ !my_hash_search(fake_gtid_hash,
+ (const uchar *)&event_gtid.domain_id, 0))
+ {
+ /*
+ In strict mode, it is an error if the slave requests to start
+ in a "hole" in the master's binlog: a GTID that does not
+ exist, even though both the prior and subsequent seq_no exists
+ for same domain_id and server_id.
+ */
+ my_errno= ER_GTID_START_FROM_BINLOG_HOLE;
+ *error_gtid= *gtid;
+ return "The binlog on the master is missing the GTID requested "
+ "by the slave (even though both a prior and a subsequent "
+ "sequence number does exist), and GTID strict mode is enabled.";
+ }
+
+ /*
+ Send a fake Gtid_list event to the slave.
+ This allows the slave to update its current binlog position
+ so MASTER_POS_WAIT() and MASTER_GTID_WAIT() can work.
+ The fake event will be sent at the end of this event group.
+ */
+ *send_fake_gtid_list= true;
+
+ /*
+ Delete this entry if we have reached slave start position (so we
+ will not skip subsequent events and won't have to look them up
+ and check).
+ */
+ gtid_state->remove(gtid);
+ }
+ }
+ }
+
+ if (until_gtid_state)
+ {
+ gtid= until_gtid_state->find(event_gtid.domain_id);
+ if (gtid == NULL)
+ {
+ /*
+ This domain already reached the START SLAVE UNTIL stop condition,
+ so skip this event group.
+ */
+ *gtid_skip_group = (flags2 & Gtid_log_event::FL_STANDALONE ?
+ GTID_SKIP_STANDALONE : GTID_SKIP_TRANSACTION);
+ }
+ else if (event_gtid.server_id == gtid->server_id &&
+ event_gtid.seq_no >= gtid->seq_no)
+ {
+ /*
+ We have reached the stop condition.
+ Delete this domain_id from the hash, so we will skip all further
+ events in this domain and eventually stop when all domains are
+ done.
+ */
+ uint64 until_seq_no= gtid->seq_no;
+ until_gtid_state->remove(gtid);
+ if (until_gtid_state->count() == 0)
+ *gtid_until_group= (flags2 & Gtid_log_event::FL_STANDALONE ?
+ GTID_UNTIL_STOP_AFTER_STANDALONE :
+ GTID_UNTIL_STOP_AFTER_TRANSACTION);
+ if (event_gtid.seq_no > until_seq_no)
+ {
+ /*
+ The GTID in START SLAVE UNTIL condition is missing in our binlog.
+ This should normally not happen (user error), but since we can be
+ sure that we are now beyond the position that the UNTIL condition
+ should be in, we can just stop now. And we also need to skip this
+ event group (as it is beyond the UNTIL condition).
+ */
+ *gtid_skip_group = (flags2 & Gtid_log_event::FL_STANDALONE ?
+ GTID_SKIP_STANDALONE : GTID_SKIP_TRANSACTION);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ Skip event group if we have not yet reached the correct slave GTID position.
+
+ Note that slave that understands GTID can also tolerate holes, so there is
+ no need to supply dummy event.
+ */
+ switch (*gtid_skip_group)
+ {
+ case GTID_SKIP_STANDALONE:
+ if (!Log_event::is_part_of_group(event_type))
+ *gtid_skip_group= GTID_SKIP_NOT;
+ return NULL;
+ case GTID_SKIP_TRANSACTION:
+ if (event_type == XID_EVENT ||
+ (event_type == QUERY_EVENT &&
+ Query_log_event::peek_is_commit_rollback(packet->ptr() + ev_offset,
+ len - ev_offset,
+ current_checksum_alg)))
+ *gtid_skip_group= GTID_SKIP_NOT;
+ return NULL;
+ case GTID_SKIP_NOT:
+ break;
+ }
/* Do not send annotate_rows events unless slave requested it. */
if (event_type == ANNOTATE_ROWS_EVENT && !(flags & BINLOG_SEND_ANNOTATE_ROWS_EVENT))
@@ -611,15 +1695,45 @@ send_event_to_slave(THD *thd, NET *net, String* const packet, ushort flags,
a no-operation on the slave.
*/
if (Query_log_event::dummy_event(packet, ev_offset, current_checksum_alg))
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
return "Failed to replace row annotate event with dummy: too small event.";
+ }
}
}
/*
- Do not send binlog checkpoint events to a slave that does not understand it.
+ Replace GTID events with old-style BEGIN events for slaves that do not
+ understand global transaction IDs. For stand-alone events, where there is
+ no terminating COMMIT query event, omit the GTID event or replace it with
+ a dummy event, as appropriate.
*/
- if (unlikely(event_type == BINLOG_CHECKPOINT_EVENT) &&
- mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT)
+ if (event_type == GTID_EVENT &&
+ mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_GTID)
+ {
+ bool need_dummy=
+ mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES;
+ bool err= Gtid_log_event::make_compatible_event(packet, &need_dummy,
+ ev_offset,
+ current_checksum_alg);
+ if (err)
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed to replace GTID event with backwards-compatible event: "
+ "currupt event.";
+ }
+ if (!need_dummy)
+ return NULL;
+ }
+
+ /*
+ Do not send binlog checkpoint or gtid list events to a slave that does not
+ understand it.
+ */
+ if ((unlikely(event_type == BINLOG_CHECKPOINT_EVENT) &&
+ mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT) ||
+ (unlikely(event_type == GTID_LIST_EVENT) &&
+ mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_GTID))
{
if (mariadb_slave_capability >= MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES)
{
@@ -634,8 +1748,11 @@ send_event_to_slave(THD *thd, NET *net, String* const packet, ushort flags,
binlog positions.
*/
if (Query_log_event::dummy_event(packet, ev_offset, current_checksum_alg))
- return "Failed to replace binlog checkpoint event with dummy: "
- "too small event.";
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ return "Failed to replace binlog checkpoint or gtid list event with "
+ "dummy: too small event.";
+ }
}
}
@@ -659,24 +1776,37 @@ send_event_to_slave(THD *thd, NET *net, String* const packet, ushort flags,
pos= my_b_tell(log);
if (RUN_HOOK(binlog_transmit, before_send_event,
(thd, flags, packet, log_file_name, pos)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
return "run 'before_send_event' hook failed";
+ }
- if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
+ if (my_net_write(net, (uchar*) packet->ptr(), len))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
return "Failed on my_net_write()";
+ }
DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] ));
if (event_type == LOAD_EVENT)
{
if (send_file(thd))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
return "failed in send_file()";
+ }
}
if (RUN_HOOK(binlog_transmit, after_send_event, (thd, flags, packet)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
return "Failed to run hook 'after_send_event'";
+ }
return NULL; /* Success */
}
+
void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
ushort flags)
{
@@ -696,16 +1826,36 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
mysql_mutex_t *log_lock;
mysql_cond_t *log_cond;
int mariadb_slave_capability;
+ char str_buf[128];
+ String connect_gtid_state(str_buf, sizeof(str_buf), system_charset_info);
+ bool using_gtid_state;
+ char str_buf2[128];
+ String slave_until_gtid_str(str_buf2, sizeof(str_buf2), system_charset_info);
+ slave_connection_state gtid_state, until_gtid_state_obj;
+ slave_connection_state *until_gtid_state= NULL;
+ rpl_gtid error_gtid;
+ enum_gtid_skip_type gtid_skip_group= GTID_SKIP_NOT;
+ enum_gtid_until_state gtid_until_group= GTID_UNTIL_NOT_DONE;
+ rpl_binlog_state until_binlog_state;
+ bool slave_gtid_strict_mode= false;
+ bool send_fake_gtid_list= false;
+ HASH fake_gtid_hash;
uint8 current_checksum_alg= BINLOG_CHECKSUM_ALG_UNDEF;
int old_max_allowed_packet= thd->variables.max_allowed_packet;
+
#ifndef DBUG_OFF
int left_events = max_binlog_dump_events;
+ uint dbug_reconnect_counter= 0;
#endif
DBUG_ENTER("mysql_binlog_send");
DBUG_PRINT("enter",("log_ident: '%s' pos: %ld", log_ident, (long) pos));
bzero((char*) &log,sizeof(log));
+ bzero(&error_gtid, sizeof(error_gtid));
+ my_hash_init(&fake_gtid_hash, &my_charset_bin, 32,
+ offsetof(rpl_gtid, domain_id), sizeof(uint32), NULL, my_free,
+ HASH_UNIQUE);
/*
heartbeat_period from @master_heartbeat_period user variable
*/
@@ -716,15 +1866,45 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
*p_start_coord= &start_coord;
LOG_POS_COORD coord_buf= { log_file_name, BIN_LOG_HEADER_SIZE },
*p_coord= &coord_buf;
- if (heartbeat_period != LL(0))
+ if (heartbeat_period != 0)
{
heartbeat_ts= &heartbeat_buf;
set_timespec_nsec(*heartbeat_ts, 0);
}
mariadb_slave_capability= get_mariadb_slave_capability(thd);
+
+ connect_gtid_state.length(0);
+ using_gtid_state= get_slave_connect_state(thd, &connect_gtid_state);
+ DBUG_EXECUTE_IF("simulate_non_gtid_aware_master", using_gtid_state= false;);
+ if (using_gtid_state)
+ {
+ slave_gtid_strict_mode= get_slave_gtid_strict_mode(thd);
+ if(get_slave_until_gtid(thd, &slave_until_gtid_str))
+ until_gtid_state= &until_gtid_state_obj;
+ }
+
+ DBUG_EXECUTE_IF("binlog_force_reconnect_after_22_events",
+ {
+ DBUG_SET("-d,binlog_force_reconnect_after_22_events");
+ DBUG_SET_INITIAL("-d,binlog_force_reconnect_after_22_events");
+ dbug_reconnect_counter= 22;
+ });
+
+ /*
+ We want to corrupt the first event, in Log_event::read_log_event().
+ But we do not want the corruption to happen early, eg. when client does
+ BINLOG_GTID_POS(). So test case sets a DBUG trigger which causes us to
+ set the real DBUG injection here.
+ */
+ DBUG_EXECUTE_IF("corrupt_read_log_event2_set",
+ {
+ DBUG_SET("-d,corrupt_read_log_event2_set");
+ DBUG_SET("+d,corrupt_read_log_event2");
+ });
+
if (global_system_variables.log_warnings > 1)
sql_print_information("Start binlog_dump to slave_server(%d), pos(%s, %lu)",
- thd->server_id, log_ident, (ulong)pos);
+ (int)thd->variables.server_id, log_ident, (ulong)pos);
if (RUN_HOOK(binlog_transmit, transmit_start, (thd, flags, log_ident, pos)))
{
errmsg= "Failed to run hook 'transmit_start'";
@@ -755,10 +1935,47 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
}
name=search_file_name;
- if (log_ident[0])
- mysql_bin_log.make_log_name(search_file_name, log_ident);
+ if (using_gtid_state)
+ {
+ if (gtid_state.load(connect_gtid_state.c_ptr_quick(),
+ connect_gtid_state.length()))
+ {
+ errmsg= "Out of memory or malformed slave request when obtaining start "
+ "position from GTID state";
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ if (until_gtid_state &&
+ until_gtid_state->load(slave_until_gtid_str.c_ptr_quick(),
+ slave_until_gtid_str.length()))
+ {
+ errmsg= "Out of memory or malformed slave request when obtaining UNTIL "
+ "position sent from slave";
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ if ((error= check_slave_start_position(thd, &gtid_state, &errmsg,
+ &error_gtid, until_gtid_state,
+ &fake_gtid_hash)))
+ {
+ my_errno= error;
+ goto err;
+ }
+ if ((errmsg= gtid_find_binlog_file(&gtid_state, search_file_name,
+ until_gtid_state)))
+ {
+ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ goto err;
+ }
+ pos= 4;
+ }
else
- name=0; // Find first log
+ {
+ if (log_ident[0])
+ mysql_bin_log.make_log_name(search_file_name, log_ident);
+ else
+ name=0; // Find first log
+ }
linfo.index_file_offset = 0;
@@ -935,20 +2152,34 @@ impossible position";
/* The Format_description_log_event event will be found naturally. */
}
+ /*
+ Handle the case of START SLAVE UNTIL with an UNTIL condition already
+ fulfilled at the start position.
+
+ We will send one event, the format_description, and then stop.
+ */
+ if (until_gtid_state && until_gtid_state->count() == 0)
+ gtid_until_group= GTID_UNTIL_STOP_AFTER_STANDALONE;
+
/* seek to the requested position, to start the requested dump */
my_b_seek(&log, pos); // Seek will done on next read
while (!net->error && net->vio != 0 && !thd->killed)
{
Log_event_type event_type= UNKNOWN_EVENT;
+ killed_state killed;
/* reset the transmit packet for the event read from binary log
file */
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
- while (!(error = Log_event::read_log_event(&log, packet, log_lock,
- current_checksum_alg)))
+ bool is_active_binlog= false;
+ while (!(killed= thd->killed) &&
+ !(error = Log_event::read_log_event(&log, packet, log_lock,
+ current_checksum_alg,
+ log_file_name,
+ &is_active_binlog)))
{
#ifndef DBUG_OFF
if (max_binlog_dump_events && !left_events--)
@@ -966,6 +2197,7 @@ impossible position";
event_type=
(Log_event_type)((uchar)(*packet)[LOG_EVENT_OFFSET+ev_offset]);
+#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid",
{
if (event_type == XID_EVENT)
@@ -984,6 +2216,7 @@ impossible position";
STRING_WITH_LEN(act2)));
}
});
+#endif
if (event_type == FORMAT_DESCRIPTION_EVENT)
{
current_checksum_alg= get_checksum_alg(packet->ptr() + ev_offset,
@@ -1007,15 +2240,57 @@ impossible position";
(*packet)[FLAGS_OFFSET+ev_offset] &= ~LOG_EVENT_BINLOG_IN_USE_F;
}
+#ifndef DBUG_OFF
+ if (dbug_reconnect_counter > 0)
+ {
+ --dbug_reconnect_counter;
+ if (dbug_reconnect_counter == 0)
+ {
+ errmsg= "DBUG-injected forced reconnect";
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ }
+#endif
+
if ((tmp_msg= send_event_to_slave(thd, net, packet, flags, event_type,
log_file_name, &log,
mariadb_slave_capability, ev_offset,
- current_checksum_alg)))
+ current_checksum_alg, using_gtid_state,
+ &gtid_state, &gtid_skip_group,
+ until_gtid_state, &gtid_until_group,
+ &until_binlog_state,
+ slave_gtid_strict_mode, &error_gtid,
+ &send_fake_gtid_list, &fake_gtid_hash)))
{
errmsg= tmp_msg;
- my_errno= ER_UNKNOWN_ERROR;
goto err;
}
+ if (unlikely(send_fake_gtid_list) && gtid_skip_group == GTID_SKIP_NOT)
+ {
+ Gtid_list_log_event glev(&until_binlog_state, 0);
+
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg) ||
+ fake_gtid_list_event(net, packet, &glev, &errmsg,
+ current_checksum_alg, my_b_tell(&log)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ send_fake_gtid_list= false;
+ }
+ if (until_gtid_state &&
+ is_until_reached(thd, net, packet, &ev_offset, gtid_until_group,
+ event_type, current_checksum_alg, flags, &errmsg,
+ &until_binlog_state, my_b_tell(&log)))
+ {
+ if (errmsg)
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ goto end;
+ }
DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid",
{
@@ -1029,6 +2304,15 @@ impossible position";
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
}
+ if (killed)
+ goto end;
+
+ DBUG_EXECUTE_IF("wait_after_binlog_EOF",
+ {
+ const char act[]= "now wait_for signal.rotate_finished";
+ DBUG_ASSERT(!debug_sync_set_action(current_thd,
+ STRING_WITH_LEN(act)));
+ };);
/*
TODO: now that we are logging the offset, check to make sure
@@ -1040,8 +2324,11 @@ impossible position";
if (test_for_non_eof_log_read_errors(error, &errmsg))
goto err;
- if (!(flags & BINLOG_DUMP_NON_BLOCK) &&
- mysql_bin_log.is_active(log_file_name))
+ /*
+ We should only move to the next binlog when the last read event
+ came from a already deactivated binlog.
+ */
+ if (!(flags & BINLOG_DUMP_NON_BLOCK) && is_active_binlog)
{
/*
Block until there is more data in the log
@@ -1095,7 +2382,8 @@ impossible position";
mysql_mutex_unlock(log_lock);
read_packet = 1;
p_coord->pos= uint4korr(packet->ptr() + ev_offset + LOG_POS_OFFSET);
- event_type= (Log_event_type)((*packet)[LOG_EVENT_OFFSET+ev_offset]);
+ event_type=
+ (Log_event_type)((uchar)(*packet)[LOG_EVENT_OFFSET+ev_offset]);
break;
case LOG_READ_EOF:
@@ -1103,7 +2391,8 @@ impossible position";
int ret;
ulong signal_cnt;
DBUG_PRINT("wait",("waiting for data in binary log"));
- if (thd->server_id==0) // for mysqlbinlog (mysqlbinlog.server_id==0)
+ /* For mysqlbinlog (mysqlbinlog.server_id==0). */
+ if (thd->variables.server_id==0)
{
mysql_mutex_unlock(log_lock);
goto end;
@@ -1124,6 +2413,8 @@ impossible position";
thd->ENTER_COND(log_cond, log_lock,
&stage_master_has_sent_all_binlog_to_slave,
&old_stage);
+ if (thd->killed)
+ break;
ret= mysql_bin_log.wait_for_update_bin_log(thd, heartbeat_ts);
DBUG_ASSERT(ret == 0 || (heartbeat_period != 0));
if (ret == ETIMEDOUT || ret == ETIME)
@@ -1155,7 +2446,7 @@ impossible position";
{
DBUG_PRINT("wait",("binary log received update or a broadcast signal caught"));
}
- } while (signal_cnt == mysql_bin_log.signal_cnt && !thd->killed);
+ } while (signal_cnt == mysql_bin_log.signal_cnt);
thd->EXIT_COND(&old_stage);
}
break;
@@ -1166,16 +2457,48 @@ impossible position";
goto err;
}
- if (read_packet &&
- (tmp_msg= send_event_to_slave(thd, net, packet, flags, event_type,
- log_file_name, &log,
- mariadb_slave_capability, ev_offset,
- current_checksum_alg)))
+ if (read_packet)
{
- errmsg= tmp_msg;
- my_errno= ER_UNKNOWN_ERROR;
- goto err;
- }
+ if ((tmp_msg= send_event_to_slave(thd, net, packet, flags, event_type,
+ log_file_name, &log,
+ mariadb_slave_capability, ev_offset,
+ current_checksum_alg,
+ using_gtid_state, &gtid_state,
+ &gtid_skip_group, until_gtid_state,
+ &gtid_until_group, &until_binlog_state,
+ slave_gtid_strict_mode, &error_gtid,
+ &send_fake_gtid_list,
+ &fake_gtid_hash)))
+ {
+ errmsg= tmp_msg;
+ goto err;
+ }
+ if (unlikely(send_fake_gtid_list) && gtid_skip_group == GTID_SKIP_NOT)
+ {
+ Gtid_list_log_event glev(&until_binlog_state, 0);
+
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg) ||
+ fake_gtid_list_event(net, packet, &glev, &errmsg,
+ current_checksum_alg, my_b_tell(&log)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ send_fake_gtid_list= false;
+ }
+ if (until_gtid_state &&
+ is_until_reached(thd, net, packet, &ev_offset, gtid_until_group,
+ event_type, current_checksum_alg, flags, &errmsg,
+ &until_binlog_state, my_b_tell(&log)))
+ {
+ if (errmsg)
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+ goto end;
+ }
+ }
log.error=0;
}
@@ -1235,6 +2558,7 @@ impossible position";
end:
end_io_cache(&log);
mysql_file_close(file, MYF(MY_WME));
+ my_hash_free(&fake_gtid_hash);
RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
my_eof(thd);
@@ -1262,6 +2586,33 @@ err:
my_basename(p_coord->file_name), p_coord->pos,
my_basename(log_file_name), my_b_tell(&log));
}
+ else if (my_errno == ER_GTID_POSITION_NOT_FOUND_IN_BINLOG)
+ {
+ my_snprintf(error_text, sizeof(error_text),
+ "Error: connecting slave requested to start from GTID "
+ "%u-%u-%llu, which is not in the master's binlog",
+ error_gtid.domain_id, error_gtid.server_id, error_gtid.seq_no);
+ /* Use this error code so slave will know not to try reconnect. */
+ my_errno = ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ }
+ else if (my_errno == ER_GTID_START_FROM_BINLOG_HOLE)
+ {
+ my_snprintf(error_text, sizeof(error_text),
+ "The binlog on the master is missing the GTID %u-%u-%llu "
+ "requested by the slave (even though both a prior and a "
+ "subsequent sequence number does exist), and GTID strict mode "
+ "is enabled",
+ error_gtid.domain_id, error_gtid.server_id, error_gtid.seq_no);
+ /* Use this error code so slave will know not to try reconnect. */
+ my_errno = ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ }
+ else if (my_errno == ER_CANNOT_LOAD_SLAVE_GTID_STATE)
+ {
+ my_snprintf(error_text, sizeof(error_text),
+ "Failed to load replication slave GTID state from table %s.%s",
+ "mysql", rpl_gtid_slave_state_table_name.str);
+ my_errno = ER_MASTER_FATAL_ERROR_READING_BINLOG;
+ }
else
strcpy(error_text, errmsg);
end_io_cache(&log);
@@ -1278,6 +2629,7 @@ err:
mysql_mutex_unlock(&LOCK_thread_count);
if (file >= 0)
mysql_file_close(file, MYF(MY_WME));
+ my_hash_free(&fake_gtid_hash);
thd->variables.max_allowed_packet= old_max_allowed_packet;
my_message(my_errno, error_text, MYF(0));
@@ -1313,15 +2665,36 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
create_logfile_name_with_suffix(master_info_file_tmp,
sizeof(master_info_file_tmp),
- master_info_file, 0, &mi->connection_name);
+ master_info_file, 0,
+ &mi->cmp_connection_name);
create_logfile_name_with_suffix(relay_log_info_file_tmp,
sizeof(relay_log_info_file_tmp),
relay_log_info_file, 0,
- &mi->connection_name);
+ &mi->cmp_connection_name);
lock_slave_threads(mi); // this allows us to cleanly read slave_running
// Get a mask of _stopped_ threads
init_thread_mask(&thread_mask,mi,1 /* inverse */);
+
+ if (thd->lex->mi.gtid_pos_str.str)
+ {
+ if (thread_mask != (SLAVE_IO|SLAVE_SQL))
+ {
+ slave_errno= ER_SLAVE_WAS_RUNNING;
+ goto err;
+ }
+ if (thd->lex->slave_thd_opt)
+ {
+ slave_errno= ER_BAD_SLAVE_UNTIL_COND;
+ goto err;
+ }
+ if (mi->using_gtid == Master_info::USE_GTID_NO)
+ {
+ slave_errno= ER_UNTIL_REQUIRES_USING_GTID;
+ goto err;
+ }
+ }
+
/*
Below we will start all stopped threads. But if the user wants to
start only one thread, do as if the other thread was running (as we
@@ -1348,26 +2721,40 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
if (thd->lex->mi.pos)
{
+ if (thd->lex->mi.relay_log_pos)
+ slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS;
mi->rli.until_log_pos= thd->lex->mi.pos;
/*
We don't check thd->lex->mi.log_file_name for NULL here
since it is checked in sql_yacc.yy
*/
- strmake(mi->rli.until_log_name, thd->lex->mi.log_file_name,
- sizeof(mi->rli.until_log_name)-1);
+ strmake_buf(mi->rli.until_log_name, thd->lex->mi.log_file_name);
}
else if (thd->lex->mi.relay_log_pos)
{
+ if (thd->lex->mi.pos)
+ slave_errno=ER_BAD_SLAVE_UNTIL_COND;
mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS;
mi->rli.until_log_pos= thd->lex->mi.relay_log_pos;
- strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name,
- sizeof(mi->rli.until_log_name)-1);
+ strmake_buf(mi->rli.until_log_name, thd->lex->mi.relay_log_name);
+ }
+ else if (thd->lex->mi.gtid_pos_str.str)
+ {
+ if (mi->rli.until_gtid_pos.load(thd->lex->mi.gtid_pos_str.str,
+ thd->lex->mi.gtid_pos_str.length))
+ {
+ slave_errno= ER_INCORRECT_GTID_STATE;
+ mysql_mutex_unlock(&mi->rli.data_lock);
+ goto err;
+ }
+ mi->rli.until_condition= Relay_log_info::UNTIL_GTID;
}
else
mi->rli.clear_until_condition();
- if (mi->rli.until_condition != Relay_log_info::UNTIL_NONE)
+ if (mi->rli.until_condition == Relay_log_info::UNTIL_MASTER_POS ||
+ mi->rli.until_condition == Relay_log_info::UNTIL_RELAY_POS)
{
/* Preparing members for effective until condition checking */
const char *p= fn_ext(mi->rli.until_log_name);
@@ -1390,7 +2777,10 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
/* mark the cached result of the UNTIL comparison as "undefined" */
mi->rli.until_log_names_cmp_result=
Relay_log_info::UNTIL_LOG_NAMES_CMP_UNKNOWN;
+ }
+ if (mi->rli.until_condition != Relay_log_info::UNTIL_NONE)
+ {
/* Issuing warning then started without --skip-slave-start */
if (!opt_skip_slave_start)
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
@@ -1422,6 +2812,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
ER(ER_SLAVE_WAS_RUNNING));
}
+err:
unlock_slave_threads(mi);
if (slave_errno)
@@ -1558,11 +2949,13 @@ int reset_slave(THD *thd, Master_info* mi)
// and delete these two files
create_logfile_name_with_suffix(master_info_file_tmp,
- sizeof(master_info_file_tmp),
- master_info_file, 0, &mi->connection_name);
+ sizeof(master_info_file_tmp),
+ master_info_file, 0,
+ &mi->cmp_connection_name);
create_logfile_name_with_suffix(relay_log_info_file_tmp,
- sizeof(relay_log_info_file_tmp),
- relay_log_info_file, 0, &mi->connection_name);
+ sizeof(relay_log_info_file_tmp),
+ relay_log_info_file, 0,
+ &mi->cmp_connection_name);
fn_format(fname, master_info_file_tmp, mysql_data_home, "", 4+32);
if (mysql_file_stat(key_file_master_info, fname, &stat_area, MYF(0)) &&
@@ -1622,7 +3015,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id)
while ((tmp=it++))
{
if (tmp->get_command() == COM_BINLOG_DUMP &&
- tmp->server_id == slave_server_id)
+ tmp->variables.server_id == slave_server_id)
{
mysql_mutex_lock(&tmp->LOCK_thd_data); // Lock from delete
break;
@@ -1651,14 +3044,15 @@ void kill_zombie_dump_threads(uint32 slave_server_id)
*/
static bool get_string_parameter(char *to, const char *from, size_t length,
- const char *name)
+ const char *name, CHARSET_INFO *cs)
{
if (from) // Empty paramaters allowed
{
- size_t from_length;
- if ((from_length= strlen(from)) > length)
+ size_t from_length= strlen(from);
+ uint from_numchars= cs->cset->numchars(cs, from, from + from_length);
+ if (from_numchars > length / cs->mbmaxlen)
{
- my_error(ER_WRONG_STRING_LENGTH, MYF(0), from, name, (int) length);
+ my_error(ER_WRONG_STRING_LENGTH, MYF(0), from, name, length / cs->mbmaxlen);
return 1;
}
memcpy(to, from, from_length+1);
@@ -1692,6 +3086,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
char saved_host[HOSTNAME_LENGTH + 1];
uint saved_port;
char saved_log_name[FN_REFLEN];
+ Master_info::enum_using_gtid saved_using_gtid;
char master_info_file_tmp[FN_REFLEN];
char relay_log_info_file_tmp[FN_REFLEN];
my_off_t saved_log_pos;
@@ -1729,11 +3124,13 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
THD_STAGE_INFO(thd, stage_changing_master);
create_logfile_name_with_suffix(master_info_file_tmp,
- sizeof(master_info_file_tmp),
- master_info_file, 0, &mi->connection_name);
+ sizeof(master_info_file_tmp),
+ master_info_file, 0,
+ &mi->cmp_connection_name);
create_logfile_name_with_suffix(relay_log_info_file_tmp,
- sizeof(relay_log_info_file_tmp),
- relay_log_info_file, 0, &mi->connection_name);
+ sizeof(relay_log_info_file_tmp),
+ relay_log_info_file, 0,
+ &mi->cmp_connection_name);
/* if new Master_info doesn't exists, add it */
if (!master_info_index->get_master_info(&mi->connection_name,
@@ -1775,10 +3172,11 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
/*
Before processing the command, save the previous state.
*/
- strmake(saved_host, mi->host, HOSTNAME_LENGTH);
+ strmake_buf(saved_host, mi->host);
saved_port= mi->port;
- strmake(saved_log_name, mi->master_log_name, FN_REFLEN - 1);
+ strmake_buf(saved_log_name, mi->master_log_name);
saved_log_pos= mi->master_log_pos;
+ saved_using_gtid= mi->using_gtid;
/*
If the user specified host or port without binlog or position,
@@ -1792,8 +3190,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
}
if (lex_mi->log_file_name)
- strmake(mi->master_log_name, lex_mi->log_file_name,
- sizeof(mi->master_log_name)-1);
+ strmake_buf(mi->master_log_name, lex_mi->log_file_name);
if (lex_mi->pos)
{
mi->master_log_pos= lex_mi->pos;
@@ -1801,11 +3198,12 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
if (get_string_parameter(mi->host, lex_mi->host, sizeof(mi->host)-1,
- "MASTER_HOST") ||
+ "MASTER_HOST", system_charset_info) ||
get_string_parameter(mi->user, lex_mi->user, sizeof(mi->user)-1,
- "MASTER_USER") ||
+ "MASTER_USER", system_charset_info) ||
get_string_parameter(mi->password, lex_mi->password,
- sizeof(mi->password)-1, "MASTER_PASSWORD"))
+ sizeof(mi->password)-1, "MASTER_PASSWORD",
+ &my_charset_bin))
{
ret= TRUE;
goto err;
@@ -1820,7 +3218,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
else
mi->heartbeat_period= (float) MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD,
(slave_net_timeout/2.0));
- mi->received_heartbeats= LL(0); // counter lives until master is CHANGEd
+ mi->received_heartbeats= 0; // counter lives until master is CHANGEd
/*
reset the last time server_id list if the current CHANGE MASTER
is mentioning IGNORE_SERVER_IDS= (...)
@@ -1831,7 +3229,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
{
ulong s_id;
get_dynamic(&lex_mi->repl_ignore_server_ids, (uchar*) &s_id, i);
- if (s_id == ::server_id && replicate_same_server_id)
+ if (s_id == global_system_variables.server_id && replicate_same_server_id)
{
my_error(ER_SLAVE_IGNORE_SERVER_IDS, MYF(0), static_cast<int>(s_id));
ret= TRUE;
@@ -1857,19 +3255,19 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
(lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::LEX_MI_ENABLE);
if (lex_mi->ssl_ca)
- strmake(mi->ssl_ca, lex_mi->ssl_ca, sizeof(mi->ssl_ca)-1);
+ strmake_buf(mi->ssl_ca, lex_mi->ssl_ca);
if (lex_mi->ssl_capath)
- strmake(mi->ssl_capath, lex_mi->ssl_capath, sizeof(mi->ssl_capath)-1);
+ strmake_buf(mi->ssl_capath, lex_mi->ssl_capath);
if (lex_mi->ssl_cert)
- strmake(mi->ssl_cert, lex_mi->ssl_cert, sizeof(mi->ssl_cert)-1);
+ strmake_buf(mi->ssl_cert, lex_mi->ssl_cert);
if (lex_mi->ssl_cipher)
- strmake(mi->ssl_cipher, lex_mi->ssl_cipher, sizeof(mi->ssl_cipher)-1);
+ strmake_buf(mi->ssl_cipher, lex_mi->ssl_cipher);
if (lex_mi->ssl_key)
- strmake(mi->ssl_key, lex_mi->ssl_key, sizeof(mi->ssl_key)-1);
+ strmake_buf(mi->ssl_key, lex_mi->ssl_key);
if (lex_mi->ssl_crl)
- strmake(mi->ssl_crl, lex_mi->ssl_crl, sizeof(mi->ssl_crl)-1);
+ strmake_buf(mi->ssl_crl, lex_mi->ssl_crl);
if (lex_mi->ssl_crlpath)
- strmake(mi->ssl_crlpath, lex_mi->ssl_crlpath, sizeof(mi->ssl_crlpath)-1);
+ strmake_buf(mi->ssl_crlpath, lex_mi->ssl_crlpath);
#ifndef HAVE_OPENSSL
if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath ||
@@ -1884,10 +3282,8 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
need_relay_log_purge= 0;
char relay_log_name[FN_REFLEN];
mi->rli.relay_log.make_log_name(relay_log_name, lex_mi->relay_log_name);
- strmake(mi->rli.group_relay_log_name, relay_log_name,
- sizeof(mi->rli.group_relay_log_name)-1);
- strmake(mi->rli.event_relay_log_name, relay_log_name,
- sizeof(mi->rli.event_relay_log_name)-1);
+ strmake_buf(mi->rli.group_relay_log_name, relay_log_name);
+ strmake_buf(mi->rli.event_relay_log_name, relay_log_name);
}
if (lex_mi->relay_log_pos)
@@ -1896,6 +3292,15 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
mi->rli.group_relay_log_pos= mi->rli.event_relay_log_pos= lex_mi->relay_log_pos;
}
+ if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_SLAVE_POS)
+ mi->using_gtid= Master_info::USE_GTID_SLAVE_POS;
+ else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_CURRENT_POS)
+ mi->using_gtid= Master_info::USE_GTID_CURRENT_POS;
+ else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_NO ||
+ lex_mi->log_file_name || lex_mi->pos ||
+ lex_mi->relay_log_name || lex_mi->relay_log_pos)
+ mi->using_gtid= Master_info::USE_GTID_NO;
+
/*
If user did specify neither host nor port nor any log name nor any log
pos, i.e. he specified only user/password/master_connect_retry, he probably
@@ -1923,9 +3328,9 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
*/
mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE,
mi->rli.group_master_log_pos);
- strmake(mi->master_log_name, mi->rli.group_master_log_name,
- sizeof(mi->master_log_name)-1);
+ strmake_buf(mi->master_log_name, mi->rli.group_master_log_name);
}
+
/*
Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never
a slave before).
@@ -1977,8 +3382,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
*/
mi->rli.group_master_log_pos= mi->master_log_pos;
DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- strmake(mi->rli.group_master_log_name,mi->master_log_name,
- sizeof(mi->rli.group_master_log_name)-1);
+ strmake_buf(mi->rli.group_master_log_name,mi->master_log_name);
if (!mi->rli.group_master_log_name[0]) // uninitialized case
mi->rli.group_master_log_pos=0;
@@ -1996,6 +3400,11 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
"master_log_pos='%ld'.", saved_host, saved_port, saved_log_name,
(ulong) saved_log_pos, mi->host, mi->port, mi->master_log_name,
(ulong) mi->master_log_pos);
+ if (saved_using_gtid != Master_info::USE_GTID_NO ||
+ mi->using_gtid != Master_info::USE_GTID_NO)
+ sql_print_information("Previous Using_Gtid=%s. New Using_Gtid=%s",
+ mi->using_gtid_astext(saved_using_gtid),
+ mi->using_gtid_astext(mi->using_gtid));
/*
If we don't write new coordinates to disk now, then old will remain in
@@ -2359,6 +3768,8 @@ bool show_binlogs(THD* thd)
if (protocol->write())
goto err;
}
+ if(index_file->error == -1)
+ goto err;
mysql_bin_log.unlock_index();
my_eof(thd);
DBUG_RETURN(FALSE);
@@ -2420,4 +3831,190 @@ int log_loaded_block(IO_CACHE* file)
DBUG_RETURN(0);
}
+
+/**
+ Initialise the slave replication state from the mysql.gtid_slave_pos table.
+
+ This is called each time an SQL thread starts, but the data is only actually
+ loaded on the first call.
+
+ The slave state is the last GTID applied on the slave within each
+ replication domain.
+
+ To avoid row lock contention, there are multiple rows for each domain_id.
+ The one containing the current slave state is the one with the maximal
+ sub_id value, within each domain_id.
+
+ CREATE TABLE mysql.gtid_slave_pos (
+ domain_id INT UNSIGNED NOT NULL,
+ sub_id BIGINT UNSIGNED NOT NULL,
+ server_id INT UNSIGNED NOT NULL,
+ seq_no BIGINT UNSIGNED NOT NULL,
+ PRIMARY KEY (domain_id, sub_id))
+*/
+
+void
+rpl_init_gtid_slave_state()
+{
+ rpl_global_gtid_slave_state.init();
+}
+
+
+void
+rpl_deinit_gtid_slave_state()
+{
+ rpl_global_gtid_slave_state.deinit();
+}
+
+
+/*
+ Format the current GTID state as a string, for returning the value of
+ @@global.gtid_slave_pos.
+
+ If the flag use_binlog is true, then the contents of the binary log (if
+ enabled) is merged into the current GTID state (@@global.gtid_current_pos).
+*/
+int
+rpl_append_gtid_state(String *dest, bool use_binlog)
+{
+ int err;
+ rpl_gtid *gtid_list= NULL;
+ uint32 num_gtids= 0;
+
+ if (use_binlog && opt_bin_log &&
+ (err= mysql_bin_log.get_most_recent_gtid_list(&gtid_list, &num_gtids)))
+ return err;
+
+ err= rpl_global_gtid_slave_state.tostring(dest, gtid_list, num_gtids);
+ my_free(gtid_list);
+
+ return err;
+}
+
+
+/*
+ Load the current GITD position into a slave_connection_state, for use when
+ connecting to a master server with GTID.
+
+ If the flag use_binlog is true, then the contents of the binary log (if
+ enabled) is merged into the current GTID state (master_use_gtid=current_pos).
+*/
+int
+rpl_load_gtid_state(slave_connection_state *state, bool use_binlog)
+{
+ int err;
+ rpl_gtid *gtid_list= NULL;
+ uint32 num_gtids= 0;
+
+ if (use_binlog && opt_bin_log &&
+ (err= mysql_bin_log.get_most_recent_gtid_list(&gtid_list, &num_gtids)))
+ return err;
+
+ err= state->load(&rpl_global_gtid_slave_state, gtid_list, num_gtids);
+ my_free(gtid_list);
+
+ return err;
+}
+
+
+bool
+rpl_gtid_pos_check(THD *thd, char *str, size_t len)
+{
+ slave_connection_state tmp_slave_state;
+ bool gave_conflict_warning= false, gave_missing_warning= false;
+
+ /* Check that we can parse the supplied string. */
+ if (tmp_slave_state.load(str, len))
+ return true;
+
+ /*
+ Check our own binlog for any of our own transactions that are newer
+ than the GTID state the user is requesting. Any such transactions would
+ result in an out-of-order binlog, which could break anyone replicating
+ with us as master.
+
+ So give an error if this is found, requesting the user to do a
+ RESET MASTER (to clean up the binlog) if they really want this.
+ */
+ if (mysql_bin_log.is_open())
+ {
+ rpl_gtid *binlog_gtid_list= NULL;
+ uint32 num_binlog_gtids= 0;
+ uint32 i;
+
+ if (mysql_bin_log.get_most_recent_gtid_list(&binlog_gtid_list,
+ &num_binlog_gtids))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(MY_WME));
+ return true;
+ }
+ for (i= 0; i < num_binlog_gtids; ++i)
+ {
+ rpl_gtid *binlog_gtid= &binlog_gtid_list[i];
+ rpl_gtid *slave_gtid;
+ if (binlog_gtid->server_id != global_system_variables.server_id)
+ continue;
+ if (!(slave_gtid= tmp_slave_state.find(binlog_gtid->domain_id)))
+ {
+ if (opt_gtid_strict_mode)
+ {
+ my_error(ER_MASTER_GTID_POS_MISSING_DOMAIN, MYF(0),
+ binlog_gtid->domain_id, binlog_gtid->domain_id,
+ binlog_gtid->server_id, binlog_gtid->seq_no);
+ break;
+ }
+ else if (!gave_missing_warning)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_MASTER_GTID_POS_MISSING_DOMAIN,
+ ER(ER_MASTER_GTID_POS_MISSING_DOMAIN),
+ binlog_gtid->domain_id, binlog_gtid->domain_id,
+ binlog_gtid->server_id, binlog_gtid->seq_no);
+ gave_missing_warning= true;
+ }
+ }
+ else if (slave_gtid->seq_no < binlog_gtid->seq_no)
+ {
+ if (opt_gtid_strict_mode)
+ {
+ my_error(ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG, MYF(0),
+ slave_gtid->domain_id, slave_gtid->server_id,
+ slave_gtid->seq_no, binlog_gtid->domain_id,
+ binlog_gtid->server_id, binlog_gtid->seq_no);
+ break;
+ }
+ else if (!gave_conflict_warning)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG,
+ ER(ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG),
+ slave_gtid->domain_id, slave_gtid->server_id,
+ slave_gtid->seq_no, binlog_gtid->domain_id,
+ binlog_gtid->server_id, binlog_gtid->seq_no);
+ gave_conflict_warning= true;
+ }
+ }
+ }
+ my_free(binlog_gtid_list);
+ if (i != num_binlog_gtids)
+ return true;
+ }
+
+ return false;
+}
+
+
+bool
+rpl_gtid_pos_update(THD *thd, char *str, size_t len)
+{
+ if (rpl_global_gtid_slave_state.load(thd, str, len, true, true))
+ {
+ my_error(ER_FAILED_GTID_STATE_INIT, MYF(0));
+ return true;
+ }
+ else
+ return false;
+}
+
+
#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 9ca7e6b00b1..917da9b598e 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -32,6 +32,8 @@ typedef struct st_slave_info
THD* thd;
} SLAVE_INFO;
+struct slave_connection_state;
+
extern my_bool opt_show_slave_auth_info;
extern char *master_host, *master_info_file;
extern bool server_id_supplied;
@@ -65,6 +67,15 @@ int log_loaded_block(IO_CACHE* file);
int init_replication_sys_vars();
void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags);
+extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state;
+void rpl_init_gtid_slave_state();
+void rpl_deinit_gtid_slave_state();
+int gtid_state_from_binlog_pos(const char *name, uint32 pos, String *out_str);
+int rpl_append_gtid_state(String *dest, bool use_binlog);
+int rpl_load_gtid_state(slave_connection_state *state, bool use_binlog);
+bool rpl_gtid_pos_check(THD *thd, char *str, size_t len);
+bool rpl_gtid_pos_update(THD *thd, char *str, size_t len);
+
#endif /* HAVE_REPLICATION */
#endif /* SQL_REPL_INCLUDED */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index cf01cd330f9..f5baaad5655 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2012 Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2013 Oracle and/or its affiliates.
Copyright (c) 2009, 2013 Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/**
@file
@@ -87,12 +87,14 @@ void best_access_path(JOIN *join, JOIN_TAB *s,
POSITION *pos, POSITION *loose_scan_pos);
static void optimize_straight_join(JOIN *join, table_map join_tables);
static bool greedy_search(JOIN *join, table_map remaining_tables,
- uint depth, uint prune_level);
+ uint depth, uint prune_level,
+ uint use_cond_selectivity);
static bool best_extension_by_limited_search(JOIN *join,
table_map remaining_tables,
uint idx, double record_count,
double read_time, uint depth,
- uint prune_level);
+ uint prune_level,
+ uint use_cond_selectivity);
static uint determine_search_depth(JOIN* join);
C_MODE_START
static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2);
@@ -128,10 +130,12 @@ static int return_zero_rows(JOIN *join, select_result *res,
List<Item> &fields, bool send_row,
ulonglong select_options, const char *info,
Item *having, List<Item> &all_fields);
-static COND *build_equal_items(THD *thd, COND *cond,
+static COND *build_equal_items(JOIN *join, COND *cond,
COND_EQUAL *inherited,
List<TABLE_LIST> *join_list,
- COND_EQUAL **cond_equal_ref);
+ bool ignore_on_conds,
+ COND_EQUAL **cond_equal_ref,
+ bool link_equal_fields= FALSE);
static COND* substitute_for_best_equal_field(JOIN_TAB *context_tab,
COND *cond,
COND_EQUAL *cond_equal,
@@ -146,8 +150,10 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
static COND *optimize_cond(JOIN *join, COND *conds,
List<TABLE_LIST> *join_list,
+ bool ignore_on_conds,
Item::cond_result *cond_value,
- COND_EQUAL **cond_equal);
+ COND_EQUAL **cond_equal,
+ int flags= 0);
bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
Procedure *proc);
@@ -272,6 +278,8 @@ enum enum_exec_or_opt {WALK_OPTIMIZATION_TABS , WALK_EXECUTION_TABS};
JOIN_TAB *first_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind);
JOIN_TAB *next_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind,
JOIN_TAB *tab);
+static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
+ table_map rem_tables);
#ifndef DBUG_OFF
@@ -597,28 +605,37 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array,
List<Item> &all_fields,
COND **conds,
ORDER *order,
- ORDER *group, bool *hidden_group_fields)
+ ORDER *group,
+ bool *hidden_group_fields,
+ uint *reserved)
{
int res;
- nesting_map save_allow_sum_func=thd->lex->allow_sum_func ;
+ st_select_lex *const select= thd->lex->current_select;
+ nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
/*
Need to save the value, so we can turn off only any new non_agg_field_used
additions coming from the WHERE
*/
- const bool saved_non_agg_field_used=
- thd->lex->current_select->non_agg_field_used();
+ const bool saved_non_agg_field_used= select->non_agg_field_used();
DBUG_ENTER("setup_without_group");
- thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level);
+ thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level);
res= setup_conds(thd, tables, leaves, conds);
+ if (thd->lex->current_select->first_cond_optimization)
+ {
+ if (!res && *conds)
+ (*reserved)= (*conds)->exists2in_reserved_items();
+ else
+ (*reserved)= 0;
+ }
/* it's not wrong to have non-aggregated columns in a WHERE */
- thd->lex->current_select->set_non_agg_field_used(saved_non_agg_field_used);
+ select->set_non_agg_field_used(saved_non_agg_field_used);
- thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level;
+ thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level;
res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields,
order);
- thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level);
+ thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level);
res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields,
group, hidden_group_fields);
thd->lex->allow_sum_func= save_allow_sum_func;
@@ -758,7 +775,7 @@ JOIN::prepare(Item ***rref_pointer_array,
setup_without_group(thd, (*rref_pointer_array), tables_list,
select_lex->leaf_tables, fields_list,
all_fields, &conds, order, group_list,
- &hidden_group_fields))
+ &hidden_group_fields, &select_lex->select_n_reserved))
DBUG_RETURN(-1); /* purecov: inspected */
ref_pointer_array= *rref_pointer_array;
@@ -767,7 +784,7 @@ JOIN::prepare(Item ***rref_pointer_array,
{
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
thd->where="having clause";
- thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level;
+ thd->lex->allow_sum_func|= (nesting_map)1 << select_lex_arg->nest_level;
select_lex->having_fix_field= 1;
/*
Wrap alone field in HAVING clause in case it will be outer field of subquery
@@ -1038,6 +1055,23 @@ JOIN::optimize_inner()
table_count= select_lex->leaf_tables.elements;
select_lex->update_used_tables();
}
+ /*
+ In fact we transform underlying subqueries after their 'prepare' phase and
+ before 'optimize' from upper query 'optimize' to allow semijoin
+ conversion happened (which done in the same way.
+ */
+ if(select_lex->first_cond_optimization &&
+ conds && conds->walk(&Item::exists2in_processor, 0, (uchar *)thd))
+ DBUG_RETURN(1);
+ /*
+TODO: make view to decide if it is possible to write to WHERE directly or make Semi-Joins able to process ON condition if it is possible
+ for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (tbl->on_expr &&
+ tbl->on_expr->walk(&Item::exists2in_processor, 0, (uchar *)thd))
+ DBUG_RETURN(1);
+ }
+ */
if (transform_max_min_subquery())
DBUG_RETURN(1); /* purecov: inspected */
@@ -1124,7 +1158,8 @@ JOIN::optimize_inner()
if (setup_jtbm_semi_joins(this, join_list, &conds))
DBUG_RETURN(1);
- conds= optimize_cond(this, conds, join_list, &cond_value, &cond_equal);
+ conds= optimize_cond(this, conds, join_list, FALSE,
+ &cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS);
if (thd->is_error())
{
@@ -1134,7 +1169,9 @@ JOIN::optimize_inner()
}
{
- having= optimize_cond(this, having, join_list, &having_value, &having_equal);
+ having= optimize_cond(this, having, join_list, TRUE,
+ &having_value, &having_equal);
+
if (thd->is_error())
{
error= 1;
@@ -2141,8 +2178,7 @@ JOIN::reinit()
DBUG_ENTER("JOIN::reinit");
unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ?
- select_lex->offset_limit->val_uint() :
- ULL(0));
+ select_lex->offset_limit->val_uint() : 0);
first_record= 0;
cleaned= false;
@@ -2282,6 +2318,7 @@ void JOIN::exec_inner()
{
List<Item> *columns_list= &fields_list;
int tmp_error;
+
DBUG_ENTER("JOIN::exec");
const bool has_group_by= this->group;
@@ -2459,7 +2496,12 @@ void JOIN::exec_inner()
List<Item> *curr_all_fields= &all_fields;
List<Item> *curr_fields_list= &fields_list;
TABLE *curr_tmp_table= 0;
- bool tmp_having_used_tables_updated= FALSE;
+ /*
+ curr_join->join_free() will call JOIN::cleanup(full=TRUE). It will not
+ be safe to call update_used_tables() after that.
+ */
+ if (curr_join->tmp_having)
+ curr_join->tmp_having->update_used_tables();
/*
Initialize examined rows here because the values from all join parts
@@ -2715,16 +2757,6 @@ void JOIN::exec_inner()
curr_join->select_distinct=0; /* Each row is unique */
- /*
- curr_join->join_free() will call JOIN::cleanup(full=TRUE). It will not
- be safe to call update_used_tables() after that.
- */
- if (curr_join->tmp_having)
- {
- curr_join->tmp_having->update_used_tables();
- tmp_having_used_tables_updated= TRUE;
- }
-
curr_join->join_free(); /* Free quick selects */
if (curr_join->select_distinct && ! curr_join->group_list)
@@ -2805,9 +2837,6 @@ void JOIN::exec_inner()
if (curr_join->tmp_having && ! curr_join->group_list &&
! curr_join->sort_and_group)
{
- // Some tables may have been const
- if (!tmp_having_used_tables_updated)
- curr_join->tmp_having->update_used_tables();
JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables];
table_map used_tables= (curr_join->const_table_map |
curr_table->table->map);
@@ -3321,6 +3350,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
table->pos_in_table_list= tables;
error= tables->fetch_number_of_rows();
set_statistics_for_table(join->thd, table);
+ bitmap_clear_all(&table->cond_set);
#ifdef WITH_PARTITION_STORAGE_ENGINE
const bool all_partitions_pruned_away= table->all_partitions_pruned_away;
@@ -3655,9 +3685,16 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
!table->fulltext_searched &&
(!embedding || (embedding->sj_on_expr && !embedding->embedding)))
{
+ key_map base_part, base_const_ref, base_eq_part;
+ base_part.set_prefix(keyinfo->user_defined_key_parts);
+ base_const_ref= const_ref;
+ base_const_ref.intersect(base_part);
+ base_eq_part= eq_part;
+ base_eq_part.intersect(base_part);
if (table->actual_key_flags(keyinfo) & HA_NOSAME)
{
- if (const_ref == eq_part &&
+
+ if (base_const_ref == base_eq_part &&
!has_expensive_keyparts &&
!((outer_join & table->map) &&
(*s->on_expr_ref)->is_expensive()))
@@ -3683,7 +3720,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
else
found_ref|= refs; // Table is const if all refs are const
}
- else if (const_ref == eq_part)
+ else if (base_const_ref == base_eq_part)
s->const_keys.set_bit(key);
}
}
@@ -3752,6 +3789,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
all select distinct fields participate in one index.
*/
add_group_and_distinct_keys(join, s);
+
+ s->table->cond_selectivity= 1.0;
/*
Perform range analysis if there are keys it could use (1).
@@ -3760,7 +3799,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
Don't do range analysis for materialized subqueries (4).
Don't do range analysis for materialized derived tables (5)
*/
- if (!s->const_keys.is_clear_all() && // (1)
+ if ((!s->const_keys.is_clear_all() ||
+ !bitmap_is_clear_all(&s->table->cond_set)) && // (1)
(!s->table->pos_in_table_list->embedding || // (2)
(s->table->pos_in_table_list->embedding && // (3)
s->table->pos_in_table_list->embedding->sj_on_expr)) && // (3)
@@ -3768,20 +3808,37 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
!(s->table->pos_in_table_list->derived && // (5)
s->table->pos_in_table_list->is_materialized_derived())) // (5)
{
- ha_rows records;
- SQL_SELECT *select;
- select= make_select(s->table, found_const_table_map,
- found_const_table_map,
- *s->on_expr_ref ? *s->on_expr_ref : conds,
- 1, &error);
- if (!select)
- goto error;
- records= get_quick_record_count(join->thd, select, s->table,
- &s->const_keys, join->row_limit);
- s->quick=select->quick;
- s->needed_reg=select->needed_reg;
- select->quick=0;
- if (records == 0 && s->table->reginfo.impossible_range)
+ bool impossible_range= FALSE;
+ ha_rows records= HA_POS_ERROR;
+ SQL_SELECT *select= 0;
+ if (!s->const_keys.is_clear_all())
+ {
+ select= make_select(s->table, found_const_table_map,
+ found_const_table_map,
+ *s->on_expr_ref ? *s->on_expr_ref : conds,
+ 1, &error);
+ if (!select)
+ goto error;
+ records= get_quick_record_count(join->thd, select, s->table,
+ &s->const_keys, join->row_limit);
+ s->quick=select->quick;
+ s->needed_reg=select->needed_reg;
+ select->quick=0;
+ impossible_range= records == 0 && s->table->reginfo.impossible_range;
+ }
+ if (!impossible_range)
+ {
+ if (join->thd->variables.optimizer_use_condition_selectivity > 1)
+ calculate_cond_selectivity_for_table(join->thd, s->table,
+ *s->on_expr_ref ?
+ *s->on_expr_ref : conds);
+ if (s->table->reginfo.impossible_range)
+ {
+ impossible_range= TRUE;
+ records= 0;
+ }
+ }
+ if (impossible_range)
{
/*
Impossible WHERE or ON expression
@@ -3806,14 +3863,17 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->found_records=records;
s->read_time= s->quick ? s->quick->read_time : 0.0;
}
- delete select;
+ if (select)
+ delete select;
}
+
}
if (pull_out_semijoin_tables(join))
DBUG_RETURN(TRUE);
join->join_tab=stat;
+ join->top_join_tab_count= table_count;
join->map2table=stat_ref;
join->table= table_vector;
join->const_tables=const_count;
@@ -3861,6 +3921,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (join->choose_subquery_plan(all_table_map & ~join->const_table_map))
goto error;
+ DEBUG_SYNC(join->thd, "inside_make_join_statistics");
+
/* Generate an execution plan from the found optimal join order. */
DBUG_RETURN(join->thd->check_killed() || get_best_combination(join));
@@ -4154,18 +4216,19 @@ add_key_field(JOIN *join,
!(field->table->pos_in_table_list->is_materialized_derived() &&
field->table->created)) ||
(field->table->pos_in_table_list->is_materialized_derived() &&
- !field->table->created)))
+ !field->table->created && !(field->flags & BLOB_FLAG))))
{
optimize= KEY_OPTIMIZE_EQ;
}
else if (!(field->flags & PART_KEY_FLAG))
{
// Don't remove column IS NULL on a LEFT JOIN table
- if (!eq_func || (*value)->type() != Item::NULL_ITEM ||
- !field->table->maybe_null || field->null_ptr)
- return; // Not a key. Skip it
- optimize= KEY_OPTIMIZE_EXISTS;
- DBUG_ASSERT(num_values == 1);
+ if (eq_func && (*value)->type() == Item::NULL_ITEM &&
+ field->table->maybe_null && !field->null_ptr)
+ {
+ optimize= KEY_OPTIMIZE_EXISTS;
+ DBUG_ASSERT(num_values == 1);
+ }
}
if (optimize != KEY_OPTIMIZE_EXISTS)
{
@@ -4214,7 +4277,11 @@ add_key_field(JOIN *join,
break;
}
if (is_const)
+ {
stat[0].const_keys.merge(possible_keys);
+ if (possible_keys.is_clear_all())
+ bitmap_set_bit(&field->table->cond_set, field->field_index);
+ }
else if (!eq_func)
{
/*
@@ -4241,7 +4308,7 @@ add_key_field(JOIN *join,
{
if ((*value)->cmp_type() != STRING_RESULT)
return;
- if (((Field_str*)field)->charset() != cond->compare_collation())
+ if (field->charset() != cond->compare_collation())
return;
}
}
@@ -5131,6 +5198,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
}
+
/**
Check for the presence of AGGFN(DISTINCT a) queries that may be subject
to loose index scan.
@@ -5284,6 +5352,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].table= table;
join->positions[idx].key=key;
join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].ref_depend_map= 0;
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
@@ -5457,6 +5526,8 @@ best_access_path(JOIN *join,
2. we won't get two ref-or-null's
*/
if (!(remaining_tables & keyuse->used_tables) &&
+ s->access_from_tables_is_allowed(keyuse->used_tables,
+ join->sjm_lookup_tables) &&
!(ref_or_null_part && (keyuse->optimize &
KEY_OPTIMIZE_REF_OR_NULL)))
{
@@ -5577,7 +5648,8 @@ best_access_path(JOIN *join,
in ReuseRangeEstimateForRef-3.
*/
if (table->quick_keys.is_set(key) &&
- (const_part & ((1 << table->quick_key_parts[key])-1)) ==
+ (const_part &
+ (((key_part_map)1 << table->quick_key_parts[key])-1)) ==
(((key_part_map)1 << table->quick_key_parts[key])-1) &&
table->quick_n_ranges[key] == 1 &&
records > (double) table->quick_rows[key])
@@ -5741,7 +5813,8 @@ best_access_path(JOIN *join,
*/
if (table->quick_keys.is_set(key) &&
table->quick_key_parts[key] <= max_key_part &&
- const_part & (1 << table->quick_key_parts[key]) &&
+ const_part &
+ ((key_part_map)1 << table->quick_key_parts[key]) &&
table->quick_n_ranges[key] == 1 + test(ref_or_null_part &
const_part) &&
records > (double) table->quick_rows[key])
@@ -6001,6 +6074,7 @@ static void choose_initial_table_order(JOIN *join)
TABLE_LIST *emb_subq;
JOIN_TAB **tab= join->best_ref + join->const_tables;
JOIN_TAB **tabs_end= tab + join->table_count - join->const_tables;
+ DBUG_ENTER("choose_initial_table_order");
/* Find where the top-level JOIN_TABs end and subquery JOIN_TABs start */
for (; tab != tabs_end; tab++)
{
@@ -6010,7 +6084,7 @@ static void choose_initial_table_order(JOIN *join)
uint n_subquery_tabs= tabs_end - tab;
if (!n_subquery_tabs)
- return;
+ DBUG_VOID_RETURN;
/* Copy the subquery JOIN_TABs to a separate array */
JOIN_TAB *subquery_tabs[MAX_TABLES];
@@ -6065,6 +6139,7 @@ static void choose_initial_table_order(JOIN *join)
subq_tab += n_subquery_tables - 1;
}
}
+ DBUG_VOID_RETURN;
}
@@ -6096,6 +6171,8 @@ choose_plan(JOIN *join, table_map join_tables)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
uint prune_level= join->thd->variables.optimizer_prune_level;
+ uint use_cond_selectivity=
+ join->thd->variables.optimizer_use_condition_selectivity;
bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
DBUG_ENTER("choose_plan");
@@ -6160,7 +6237,8 @@ choose_plan(JOIN *join, table_map join_tables)
if (search_depth == 0)
/* Automatically determine a reasonable value for 'search_depth' */
search_depth= determine_search_depth(join);
- if (greedy_search(join, join_tables, search_depth, prune_level))
+ if (greedy_search(join, join_tables, search_depth, prune_level,
+ use_cond_selectivity))
DBUG_RETURN(TRUE);
}
}
@@ -6434,6 +6512,8 @@ optimize_straight_join(JOIN *join, table_map join_tables)
bool disable_jbuf= join->thd->variables.join_cache_level == 0;
double record_count= 1.0;
double read_time= 0.0;
+ uint use_cond_selectivity=
+ join->thd->variables.optimizer_use_condition_selectivity;
POSITION loose_scan_pos;
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
@@ -6450,6 +6530,11 @@ optimize_straight_join(JOIN *join, table_map join_tables)
&loose_scan_pos);
join_tables&= ~(s->table->map);
+ double pushdown_cond_selectivity= 1.0;
+ if (use_cond_selectivity > 1)
+ pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
+ join_tables);
+ join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
++idx;
}
@@ -6537,6 +6622,8 @@ optimize_straight_join(JOIN *join, table_map join_tables)
@param search_depth controlls the exhaustiveness of the search
@param prune_level the pruning heuristics that should be applied during
search
+ @param use_cond_selectivity specifies how the selectivity of the conditions
+ pushed to a table should be taken into account
@retval
FALSE ok
@@ -6548,7 +6635,8 @@ static bool
greedy_search(JOIN *join,
table_map remaining_tables,
uint search_depth,
- uint prune_level)
+ uint prune_level,
+ uint use_cond_selectivity)
{
double record_count= 1.0;
double read_time= 0.0;
@@ -6573,7 +6661,8 @@ greedy_search(JOIN *join,
/* Find the extension of the current QEP with the lowest cost */
join->best_read= DBL_MAX;
if (best_extension_by_limited_search(join, remaining_tables, idx, record_count,
- read_time, search_depth, prune_level))
+ read_time, search_depth, prune_level,
+ use_cond_selectivity))
DBUG_RETURN(TRUE);
/*
'best_read < DBL_MAX' means that optimizer managed to find
@@ -6813,6 +6902,240 @@ double JOIN::get_examined_rows()
/**
+ @brief
+ Get the selectivity of equalities between columns when joining a table
+
+ @param join The optimized join
+ @param idx The number of tables in the evaluated partual join
+ @param s The table to be joined for evaluation
+ @param rem_tables The bitmap of tables to be joined later
+ @param keyparts The number of key parts to used when joining s
+ @param ref_keyuse_steps Array of references to keyuses employed to join s
+*/
+
+static
+double table_multi_eq_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
+ table_map rem_tables, uint keyparts,
+ uint16 *ref_keyuse_steps)
+{
+ double sel= 1.0;
+ COND_EQUAL *cond_equal= join->cond_equal;
+
+ if (!cond_equal || !cond_equal->current_level.elements)
+ return sel;
+
+ if (!s->keyuse)
+ return sel;
+
+ Item_equal *item_equal;
+ List_iterator_fast<Item_equal> it(cond_equal->current_level);
+ TABLE *table= s->table;
+ table_map table_bit= table->map;
+ POSITION *pos= &join->positions[idx];
+
+ while ((item_equal= it++))
+ {
+ /*
+ Check whether we need to take into account the selectivity of
+ multiple equality item_equal. If this is the case multiply
+ the current value of sel by this selectivity
+ */
+ table_map used_tables= item_equal->used_tables();
+ if (!(used_tables & table_bit))
+ continue;
+ if (item_equal->get_const())
+ continue;
+
+ Field *fld;
+ bool adjust_sel= FALSE;
+ Item_equal_fields_iterator fi(*item_equal);
+ while((fi++) && !adjust_sel)
+ {
+ Field *fld= fi.get_curr_field();
+ if (fld->table->map != table_bit)
+ continue;
+ if (pos->key == 0)
+ adjust_sel= TRUE;
+ else
+ {
+ uint i;
+ KEYUSE *keyuse= pos->key;
+ uint key= keyuse->key;
+
+ for (i= 0; i < keyparts; i++)
+ {
+ uint fldno;
+ if (is_hash_join_key_no(key))
+ fldno= keyuse->keypart;
+ else
+ fldno= table->key_info[key].key_part[keyparts-1].fieldnr - 1;
+ if (fld->field_index == fldno)
+ break;
+ }
+ if (i == keyparts)
+ {
+ /*
+ Field fld is included in multiple equality item_equal
+ and is not a part of the ref key.
+ The selectivity of the multiple equality must be taken
+ into account unless one of the ref arguments is
+ equal to fld.
+ */
+ adjust_sel= TRUE;
+ for (uint j= 0; j < keyparts && adjust_sel; j++)
+ {
+ if (j > 0)
+ keyuse+= ref_keyuse_steps[j-1];
+ Item *ref_item= keyuse->val;
+ if (ref_item->real_item()->type() == Item::FIELD_ITEM)
+ {
+ Item_field *field_item= (Item_field *) (ref_item->real_item());
+ if (item_equal->contains(field_item->field))
+ adjust_sel= FALSE;
+ }
+ }
+ }
+ }
+ }
+ if (adjust_sel)
+ {
+ /*
+ If ref == 0 and there are no fields in the multiple equality
+ item_equal that belong to the tables joined prior to s
+ then the selectivity of multiple equality will be set to 1.0.
+ */
+ double eq_fld_sel= 1.0;
+ fi.rewind();
+ while ((fi++))
+ {
+ double curr_eq_fld_sel;
+ fld= fi.get_curr_field();
+ if (!fld->table->map & ~(table_bit | rem_tables))
+ continue;
+ curr_eq_fld_sel= get_column_avg_frequency(fld) /
+ fld->table->stat_records();
+ if (curr_eq_fld_sel < 1.0)
+ set_if_bigger(eq_fld_sel, curr_eq_fld_sel);
+ }
+ sel*= eq_fld_sel;
+ }
+ }
+ return sel;
+}
+
+
+/**
+ @brief
+ Get the selectivity of conditions when joining a table
+
+ @param join The optimized join
+ @param s The table to be joined for evaluation
+ @param rem_tables The bitmap of tables to be joined later
+
+ @retval
+ selectivity of the conditions imposed on the rows of s
+*/
+
+static
+double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
+ table_map rem_tables)
+{
+ uint16 ref_keyuse_steps[MAX_REF_PARTS - 1];
+ Field *field;
+ TABLE *table= s->table;
+ MY_BITMAP *read_set= table->read_set;
+ double sel= s->table->cond_selectivity;
+ double table_records= table->stat_records();
+ POSITION *pos= &join->positions[idx];
+ uint keyparts= 0;
+ uint found_part_ref_or_null= 0;
+
+ /* Discount the selectivity of the access method used to join table s */
+ if (s->quick && s->quick->index != MAX_KEY)
+ {
+ if (pos->key == 0 && table_records > 0)
+ {
+ sel/= table->quick_rows[s->quick->index]/table_records;
+ }
+ }
+ else if (pos->key != 0)
+ {
+ /* A ref/ access or hash join is used to join table */
+ KEYUSE *keyuse= pos->key;
+ KEYUSE *prev_ref_keyuse= keyuse;
+ uint key= keyuse->key;
+ do
+ {
+ if (!(keyuse->used_tables & (rem_tables | table->map)))
+ {
+ if (are_tables_local(s, keyuse->val->used_tables()))
+ {
+ if (is_hash_join_key_no(key))
+ {
+ if (keyparts == keyuse->keypart)
+ keyparts++;
+ }
+ else
+ {
+ if (keyparts == keyuse->keypart &&
+ !(~(keyuse->val->used_tables()) & pos->ref_depend_map) &&
+ !(found_part_ref_or_null & keyuse->optimize))
+ {
+ keyparts++;
+ found_part_ref_or_null|= keyuse->optimize & ~KEY_OPTIMIZE_EQ;
+ }
+ }
+ if (keyparts > keyuse->keypart)
+ {
+ uint fldno;
+ if (is_hash_join_key_no(key))
+ fldno= keyuse->keypart;
+ else
+ fldno= table->key_info[key].key_part[keyparts-1].fieldnr - 1;
+ if (keyuse->val->const_item())
+ sel*= table->field[fldno]->cond_selectivity;
+ if (keyparts > 1)
+ {
+ ref_keyuse_steps[keyparts-2]= keyuse - prev_ref_keyuse;
+ prev_ref_keyuse= keyuse;
+ }
+ }
+ }
+ }
+ keyuse++;
+ } while (keyuse->table == table && keyuse->key == key);
+ }
+
+ /*
+ If the field f from the table is equal to a field from one the
+ earlier joined tables then the selectivity of the range conditions
+ over the field f must be discounted.
+ */
+ for (Field **f_ptr=table->field ; (field= *f_ptr) ; f_ptr++)
+ {
+ if (!bitmap_is_set(read_set, field->field_index) ||
+ !field->next_equal_field)
+ continue;
+ for (Field *next_field= field->next_equal_field;
+ next_field != field;
+ next_field= next_field->next_equal_field)
+ {
+ if (!(next_field->table->map & rem_tables) && next_field->table != table)
+ {
+ sel/= field->cond_selectivity;
+ break;
+ }
+ }
+ }
+
+ sel*= table_multi_eq_cond_selectivity(join, idx, s, rem_tables,
+ keyparts, ref_keyuse_steps);
+
+ return sel;
+}
+
+
+/**
Find a good, possibly optimal, query execution plan (QEP) by a possibly
exhaustive search.
@@ -6922,6 +7245,8 @@ double JOIN::get_examined_rows()
@param prune_level pruning heuristics that should be applied during
optimization
(values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS)
+ @param use_cond_selectivity specifies how the selectivity of the conditions
+ pushed to a table should be taken into account
@retval
FALSE ok
@@ -6936,7 +7261,8 @@ best_extension_by_limited_search(JOIN *join,
double record_count,
double read_time,
uint search_depth,
- uint prune_level)
+ uint prune_level,
+ uint use_cond_selectivity)
{
DBUG_ENTER("best_extension_by_limited_search");
@@ -7039,16 +7365,25 @@ best_extension_by_limited_search(JOIN *join,
}
}
+ double pushdown_cond_selectivity= 1.0;
+ if (use_cond_selectivity > 1)
+ pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
+ remaining_tables &
+ ~real_table_bit);
+ join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
+ double partial_join_cardinality= current_record_count *
+ pushdown_cond_selectivity;
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
if (best_extension_by_limited_search(join,
remaining_tables & ~real_table_bit,
idx + 1,
- current_record_count,
+ partial_join_cardinality,
current_read_time,
search_depth - 1,
- prune_level))
+ prune_level,
+ use_cond_selectivity))
DBUG_RETURN(TRUE);
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
}
@@ -7066,7 +7401,7 @@ best_extension_by_limited_search(JOIN *join,
{
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION) * (idx + 1));
- join->record_count= current_record_count;
+ join->record_count= partial_join_cardinality;
join->best_read= current_read_time - 0.001;
}
DBUG_EXECUTE("opt", print_plan(join, idx+1,
@@ -7710,8 +8045,8 @@ get_best_combination(JOIN *join)
sub-order
*/
SJ_MATERIALIZATION_INFO *sjm= cur_pos->table->emb_sj_nest->sj_mat_info;
- j->records_read= sjm->is_sj_scan? sjm->rows : 1;
- j->records= (ha_rows) j->records_read;
+ j->records= j->records_read= (ha_rows)(sjm->is_sj_scan? sjm->rows : 1);
+ j->cond_selectivity= 1.0;
JOIN_TAB *jt;
JOIN_TAB_RANGE *jt_range;
if (!(jt= (JOIN_TAB*)join->thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) ||
@@ -7774,7 +8109,8 @@ get_best_combination(JOIN *join)
Save records_read in JOIN_TAB so that select_describe()/etc don't have
to access join->best_positions[].
*/
- j->records_read= join->best_positions[tablenr].records_read;
+ j->records_read= (ha_rows)join->best_positions[tablenr].records_read;
+ j->cond_selectivity= join->best_positions[tablenr].cond_selectivity;
join->map2table[j->table->tablenr]= j;
/* If we've reached the end of sjm nest, switch back to main sequence */
@@ -7994,7 +8330,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
*/
do
{
- if (!(~used_tables & keyuse->used_tables))
+ if (!(~used_tables & keyuse->used_tables) &&
+ j->access_from_tables_is_allowed(keyuse->used_tables,
+ join->sjm_lookup_tables))
{
if (are_tables_local(j, keyuse->val->used_tables()))
{
@@ -8063,7 +8401,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
uint i;
for (i=0 ; i < keyparts ; keyuse++,i++)
{
- while (((~used_tables) & keyuse->used_tables) ||
+ while (((~used_tables) & keyuse->used_tables) ||
+ !j->access_from_tables_is_allowed(keyuse->used_tables,
+ join->sjm_lookup_tables) ||
keyuse->keypart == NO_KEYPART ||
(keyuse->keypart !=
(is_hash_join_key_no(key) ?
@@ -8075,7 +8415,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
j->ref.items[i]=keyuse->val; // Save for cond removal
j->ref.cond_guards[i]= keyuse->cond_guard;
if (keyuse->null_rejecting)
- j->ref.null_rejecting |= 1 << i;
+ j->ref.null_rejecting|= (key_part_map)1 << i;
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
/*
Todo: we should remove this check for thd->lex->describe on the next
@@ -8122,20 +8462,17 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
ulong key_flags= j->table->actual_key_flags(keyinfo);
if (j->type == JT_CONST)
j->table->const_table= 1;
- else if (((key_flags & (HA_NOSAME | HA_NULL_PART_KEY))!= HA_NOSAME) ||
- keyparts != j->table->actual_n_key_parts(keyinfo) ||
- null_ref_key)
- {
- if (test(key_flags & HA_EXT_NOSAME) && keyparts == keyinfo->ext_key_parts &&
- !null_ref_key)
- j->type= JT_EQ_REF;
- else
- {
- /* Must read with repeat */
- j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
- j->ref.null_ref_key= null_ref_key;
- j->ref.null_ref_part= null_ref_part;
- }
+ else if (!((keyparts == keyinfo->user_defined_key_parts &&
+ ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)) ||
+ (keyparts > keyinfo->user_defined_key_parts && // true only for extended keys
+ test(key_flags & HA_EXT_NOSAME) &&
+ keyparts == keyinfo->ext_key_parts)) ||
+ null_ref_key)
+ {
+ /* Must read with repeat */
+ j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
+ j->ref.null_ref_key= null_ref_key;
+ j->ref.null_ref_part= null_ref_part;
}
else if (keyuse_uses_no_tables)
{
@@ -8270,6 +8607,7 @@ JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
join_tab->ref.key = -1;
join_tab->read_first_record= join_init_read_record;
join_tab->join= this;
+ join_tab->ref.key_parts= 0;
bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
temp_table->status=0;
temp_table->null_row=0;
@@ -8286,9 +8624,9 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2)
Item *res;
if ((res= new Item_cond_and(*e1, e2)))
{
- *e1= res;
res->fix_fields(thd, 0);
res->update_used_tables();
+ *e1= res;
}
}
else
@@ -8360,7 +8698,7 @@ static void add_not_null_conds(JOIN *join)
{
for (uint keypart= 0; keypart < tab->ref.key_parts; keypart++)
{
- if (tab->ref.null_rejecting & (1 << keypart))
+ if (tab->ref.null_rejecting & ((key_part_map)1 << keypart))
{
Item *item= tab->ref.items[keypart];
Item *notnull;
@@ -9595,7 +9933,7 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
create_internal_tmp_table_from_heap(thd, table,
sjm->sjm_table_param.start_recinfo,
- &sjm->sjm_table_param.recinfo, error, 1))
+ &sjm->sjm_table_param.recinfo, error, 1, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
}
@@ -10165,10 +10503,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
join_read_system :join_read_const;
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->enable_keyread();
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
@@ -10177,10 +10512,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
/* fall through */
if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->enable_keyread();
else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
@@ -10544,11 +10876,14 @@ bool JOIN_TAB::preread_init()
dbug_serve_apcs(join->thd, 1);
);
+ /* init ftfuns for just initialized derived table */
+ if (table->fulltext_searched)
+ init_ftfuncs(join->thd, join->select_lex, test(join->order));
+
return FALSE;
}
-
/**
Build a TABLE_REF structure for index lookup in the temporary table
@@ -10782,11 +11117,27 @@ void JOIN::cleanup(bool full)
else
clean_pre_sort_join_tab();
}
+ /*
+ Call cleanup() on join tabs used by the join optimization
+ (join->join_tab may now be pointing to result of make_simple_join
+ reading from the temporary table)
- for (tab= first_linear_tab(this, WITH_CONST_TABLES); tab;
- tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ We also need to check table_count to handle various degenerate joins
+ w/o tables: they don't have some members initialized and
+ WALK_OPTIMIZATION_TABS may not work correctly for them.
+ */
+ enum enum_exec_or_opt tabs_kind;
+ if (first_breadth_first_tab(this, WALK_OPTIMIZATION_TABS))
+ tabs_kind= WALK_OPTIMIZATION_TABS;
+ else
+ tabs_kind= WALK_EXECUTION_TABS;
+ if (table_count)
{
- tab->cleanup();
+ for (tab= first_breadth_first_tab(this, tabs_kind); tab;
+ tab= next_breadth_first_tab(this, tabs_kind, tab))
+ {
+ tab->cleanup();
+ }
}
cleaned= true;
}
@@ -10797,8 +11148,10 @@ void JOIN::cleanup(bool full)
{
if (tab->table)
{
- DBUG_PRINT("info", ("close index: %s.%s", tab->table->s->db.str,
- tab->table->s->table_name.str));
+ DBUG_PRINT("info", ("close index: %s.%s alias: %s",
+ tab->table->s->db.str,
+ tab->table->s->table_name.str,
+ tab->table->alias.c_ptr()));
tab->table->file->ha_index_or_rnd_end();
}
}
@@ -11074,7 +11427,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
*simple_order=0; // Must do a temp table to sort
else if (!(order_tables & not_const_tables))
{
- if (order->item[0]->with_subselect)
+ if (order->item[0]->has_subquery())
{
/*
Delay the evaluation of constant ORDER and/or GROUP expressions that
@@ -11518,7 +11871,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item,
if (field_item->cmp_type() == STRING_RESULT)
{
- CHARSET_INFO *cs= ((Field_str*) field_item->field)->charset();
+ CHARSET_INFO *cs= field_item->field->charset();
if (!item)
{
Item_func_eq *eq_item;
@@ -11748,7 +12101,8 @@ static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
*/
static COND *build_equal_items_for_cond(THD *thd, COND *cond,
- COND_EQUAL *inherited)
+ COND_EQUAL *inherited,
+ bool link_item_fields)
{
Item_equal *item_equal;
COND_EQUAL cond_equal;
@@ -11795,13 +12149,16 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
List_iterator_fast<Item_equal> it(cond_equal.current_level);
while ((item_equal= it++))
{
+ item_equal->set_link_equal_fields(link_item_fields);
item_equal->fix_fields(thd, NULL);
item_equal->update_used_tables();
set_if_bigger(thd->lex->current_select->max_equal_elems,
item_equal->n_field_items());
}
- ((Item_cond_and*)cond)->cond_equal= cond_equal;
+ ((Item_cond_and*)cond)->cond_equal.copy(cond_equal);
+ cond_equal.current_level=
+ ((Item_cond_and*)cond)->cond_equal.current_level;
inherited= &(((Item_cond_and*)cond)->cond_equal);
}
/*
@@ -11812,7 +12169,8 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
while ((item= li++))
{
Item *new_item;
- if ((new_item= build_equal_items_for_cond(thd, item, inherited)) != item)
+ if ((new_item= build_equal_items_for_cond(thd, item, inherited, FALSE))
+ != item)
{
/* This replacement happens only for standalone equalities */
/*
@@ -11856,6 +12214,7 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
item_equal->update_used_tables();
set_if_bigger(thd->lex->current_select->max_equal_elems,
item_equal->n_field_items());
+ item_equal->upper_levels= inherited;
return item_equal;
}
@@ -11878,7 +12237,8 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
set_if_bigger(thd->lex->current_select->max_equal_elems,
item_equal->n_field_items());
}
- and_cond->cond_equal= cond_equal;
+ and_cond->cond_equal.copy(cond_equal);
+ cond_equal.current_level= and_cond->cond_equal.current_level;
args->concat((List<Item> *)&cond_equal.current_level);
return and_cond;
@@ -11953,29 +12313,36 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
@endcode
Thus, applying equalities from the where condition we basically
can get more freedom in performing join operations.
- Althogh we don't use this property now, it probably makes sense to use
+ Although we don't use this property now, it probably makes sense to use
it in the future.
- @param thd Thread handler
+ @param thd Thread handler
@param cond condition to build the multiple equalities for
@param inherited path to all inherited multiple equality items
@param join_list list of join tables to which the condition
refers to
+ @ignore_on_conds TRUE <-> do not build multiple equalities
+ for on expressions
@param[out] cond_equal_ref pointer to the structure to place built
equalities in
+ @param link_equal_items equal fields are to be linked
@return
pointer to the transformed condition containing multiple equalities
*/
-static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited,
+static COND *build_equal_items(JOIN *join, COND *cond,
+ COND_EQUAL *inherited,
List<TABLE_LIST> *join_list,
- COND_EQUAL **cond_equal_ref)
+ bool ignore_on_conds,
+ COND_EQUAL **cond_equal_ref,
+ bool link_equal_fields)
{
+ THD *thd= join->thd;
COND_EQUAL *cond_equal= 0;
if (cond)
{
- cond= build_equal_items_for_cond(thd, cond, inherited);
+ cond= build_equal_items_for_cond(thd, cond, inherited, link_equal_fields);
cond->update_used_tables();
if (cond->type() == Item::COND_ITEM &&
((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
@@ -11995,7 +12362,7 @@ static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited,
}
*cond_equal_ref= cond_equal;
- if (join_list)
+ if (join_list && !ignore_on_conds)
{
TABLE_LIST *table;
List_iterator<TABLE_LIST> li(*join_list);
@@ -12010,8 +12377,8 @@ static COND *build_equal_items(THD *thd, COND *cond, COND_EQUAL *inherited,
We can modify table->on_expr because its old value will
be restored before re-execution of PS/SP.
*/
- table->on_expr= build_equal_items(thd, table->on_expr, inherited,
- nested_join_list,
+ table->on_expr= build_equal_items(join, table->on_expr, inherited,
+ nested_join_list, ignore_on_conds,
&table->cond_equal);
}
}
@@ -12208,11 +12575,16 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
Item *item_const= item_equal->get_const();
Item_equal_fields_iterator it(*item_equal);
Item *head;
- DBUG_ASSERT(!cond || cond->type() == Item::COND_ITEM);
-
TABLE_LIST *current_sjm= NULL;
Item *current_sjm_head= NULL;
+ DBUG_ASSERT(!cond ||
+ cond->type() == Item::INT_ITEM ||
+ (cond->type() == Item::FUNC_ITEM &&
+ ((Item_func *) cond)->functype() == Item_func::EQ_FUNC) ||
+ (cond->type() == Item::COND_ITEM &&
+ ((Item_func *) cond)->functype() == Item_func::COND_AND_FUNC));
+
/*
Pick the "head" item: the constant one or the first in the join order
(if the first in the join order happends to be inside an SJM nest, that's
@@ -12287,8 +12659,8 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
if (produce_equality)
{
- if (eq_item)
- eq_list.push_back(eq_item);
+ if (eq_item && eq_list.push_back(eq_item))
+ return 0;
/*
If we're inside an SJM-nest (current_sjm!=NULL), and the multi-equality
@@ -12312,31 +12684,61 @@ Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
current_sjm= field_sjm;
}
- if (!cond)
+ /*
+ We have produced zero, one, or more pair-wise equalities eq_i. We want to
+ return an expression in form:
+
+ cond AND eq_1 AND eq_2 AND eq_3 AND ...
+
+ 'cond' is a parameter for this function, which may be NULL, an Item_int(1),
+ or an Item_func_eq or an Item_cond_and.
+
+ We want to return a well-formed condition: no nested Item_cond_and objects,
+ or Item_cond_and with a single child:
+ - if 'cond' is an Item_cond_and, we add eq_i as its tail
+ - if 'cond' is Item_int(1), we return eq_i
+ - otherwise, we create our own Item_cond_and and put 'cond' at the front of
+ it.
+ - if we have only one condition to return, we don't create an Item_cond_and
+ */
+
+ if (eq_item && eq_list.push_back(eq_item))
+ return 0;
+ COND *res= 0;
+ switch (eq_list.elements)
+ {
+ case 0:
+ res= cond ? cond : new Item_int((longlong) 1, 1);
+ break;
+ case 1:
+ if (!cond || cond->type() == Item::INT_ITEM)
+ res= eq_item;
+ break;
+ default:
+ break;
+ }
+ if (!res)
{
- if (eq_list.is_empty())
+ if (cond)
{
- if (eq_item)
- return eq_item;
- return new Item_int((longlong) 1, 1);
+ if (cond->type() == Item::COND_ITEM)
+ {
+ res= cond;
+ ((Item_cond *) res)->add_at_end(&eq_list);
+ }
+ else if (eq_list.push_front(cond))
+ return 0;
}
- /* eq_item is always set if list is not empty */
- DBUG_ASSERT(eq_item);
- eq_list.push_back(eq_item);
- if (!(cond= new Item_cond_and(eq_list)))
- return 0; // Error
- }
- else
+ }
+ if (!res)
+ res= new Item_cond_and(eq_list);
+ if (res)
{
- if (eq_item)
- eq_list.push_back(eq_item);
- if (!eq_list.is_empty())
- ((Item_cond *) cond)->add_at_head(&eq_list);
+ res->quick_fix_field();
+ res->update_used_tables();
}
- cond->quick_fix_field();
- cond->update_used_tables();
-
- return cond;
+
+ return res;
}
@@ -12439,31 +12841,68 @@ static COND* substitute_for_best_equal_field(JOIN_TAB *context_tab,
if (and_level)
{
+ COND *eq_cond= 0;
List_iterator_fast<Item_equal> it(cond_equal->current_level);
+ bool false_eq_cond= FALSE;
while ((item_equal= it++))
{
- cond= eliminate_item_equal(cond, cond_equal->upper_levels, item_equal);
- // This occurs when eliminate_item_equal() founds that cond is
- // always false and substitutes it with Item_int 0.
- // Due to this, value of item_equal will be 0, so just return it.
- if (!cond)
- return org_cond; // Error
- if (cond->type() != Item::COND_ITEM)
+ eq_cond= eliminate_item_equal(eq_cond, cond_equal->upper_levels,
+ item_equal);
+ if (!eq_cond)
+ {
+ eq_cond= 0;
break;
+ }
+ else if (eq_cond->type() == Item::INT_ITEM && !eq_cond->val_bool())
+ {
+ /*
+ This occurs when eliminate_item_equal() founds that cond is
+ always false and substitutes it with Item_int 0.
+ Due to this, value of item_equal will be 0, so just return it.
+ */
+ cond= eq_cond;
+ false_eq_cond= TRUE;
+ break;
+ }
}
- }
- if (cond->type() == Item::COND_ITEM &&
- !((Item_cond*)cond)->argument_list()->elements)
- cond= new Item_int((int32)cond->val_bool());
-
+ if (eq_cond && !false_eq_cond)
+ {
+ /* Insert the generated equalities before all other conditions */
+ if (eq_cond->type() == Item::COND_ITEM)
+ ((Item_cond *) cond)->add_at_head(
+ ((Item_cond *) eq_cond)->argument_list());
+ else
+ {
+ if (cond_list->is_empty())
+ cond= eq_cond;
+ else
+ {
+ /* Do not add an equality condition if it's always true */
+ if (eq_cond->type() != Item::INT_ITEM &&
+ cond_list->push_front(eq_cond))
+ eq_cond= 0;
+ }
+ }
+ }
+ if (!eq_cond)
+ {
+ /*
+ We are out of memory doing the transformation.
+ This is a fatal error now. However we bail out by returning the
+ original condition that we had before we started the transformation.
+ */
+ cond_list->concat((List<Item> *) &cond_equal->current_level);
+ }
+ }
}
else if (cond->type() == Item::FUNC_ITEM &&
((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
{
item_equal= (Item_equal *) cond;
item_equal->sort(&compare_fields_by_table_order, table_join_idx);
+ cond_equal= item_equal->upper_levels;
if (cond_equal && cond_equal->current_level.head() == item_equal)
- cond_equal= 0;
+ cond_equal= cond_equal->upper_levels;
cond= eliminate_item_equal(0, cond_equal, item_equal);
return cond ? cond : org_cond;
}
@@ -12914,9 +13353,6 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
table->prep_on_expr= table->on_expr= 0;
}
}
-
- if (!top)
- continue;
/*
Only inner tables of non-convertible outer joins
@@ -13311,13 +13747,13 @@ static void restore_prev_nj_state(JOIN_TAB *last)
bool was_fully_covered= nest->is_fully_covered();
+ join->cur_embedding_map|= nest->nj_map;
+
if (--nest->counter == 0)
join->cur_embedding_map&= ~nest->nj_map;
if (!was_fully_covered)
break;
-
- join->cur_embedding_map|= nest->nj_map;
}
}
}
@@ -13422,8 +13858,10 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
static COND *
-optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
- Item::cond_result *cond_value, COND_EQUAL **cond_equal)
+optimize_cond(JOIN *join, COND *conds,
+ List<TABLE_LIST> *join_list, bool ignore_on_conds,
+ Item::cond_result *cond_value, COND_EQUAL **cond_equal,
+ int flags)
{
THD *thd= join->thd;
DBUG_ENTER("optimize_cond");
@@ -13431,7 +13869,9 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
if (!conds)
{
*cond_value= Item::COND_TRUE;
- build_equal_items(join->thd, NULL, NULL, join_list, cond_equal);
+ if (!ignore_on_conds)
+ build_equal_items(join, NULL, NULL, join_list, ignore_on_conds,
+ cond_equal);
}
else
{
@@ -13444,8 +13884,10 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
multiple equality contains a constant.
*/
DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
- conds= build_equal_items(join->thd, conds, NULL, join_list, cond_equal);
- DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+ conds= build_equal_items(join, conds, NULL, join_list,
+ ignore_on_conds, cond_equal,
+ test(flags & OPT_LINK_EQUAL_FIELDS));
+ DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
/* change field = field to field = const for each found field = const */
propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
@@ -13500,7 +13942,87 @@ internal_remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
li.remove();
else if (item != new_item)
{
- (void) li.replace(new_item);
+ if (and_level)
+ {
+ /*
+ Take a special care of multiple equality predicates
+ that may be part of 'cond' and 'new_item'.
+ Those multiple equalities that have common members
+ must be merged.
+ */
+ Item_cond_and *cond_and= (Item_cond_and *) cond;
+ List<Item_equal> *cond_equal_items=
+ &cond_and->cond_equal.current_level;
+ List<Item> *cond_and_list= cond_and->argument_list();
+
+ if (new_item->type() == Item::COND_ITEM &&
+ ((Item_cond*) new_item)->functype() == Item_func::COND_AND_FUNC)
+ {
+ Item_cond_and *new_item_and= (Item_cond_and *) new_item;
+ List<Item_equal> *new_item_equal_items=
+ &new_item_and->cond_equal.current_level;
+ List<Item> *new_item_and_list= new_item_and->argument_list();
+ cond_and_list->disjoin((List<Item>*) cond_equal_items);
+ new_item_and_list->disjoin((List<Item>*) new_item_equal_items);
+ Item_equal *equal_item;
+ List_iterator<Item_equal> it(*new_item_equal_items);
+ while ((equal_item= it++))
+ {
+ equal_item->merge_into_list(cond_equal_items);
+ }
+ if (new_item_and_list->is_empty())
+ li.remove();
+ else
+ {
+ Item *list_item;
+ Item *new_list_item;
+ uint cnt= new_item_and_list->elements;
+ List_iterator<Item> it(*new_item_and_list);
+ while ((list_item= it++))
+ {
+ uchar* is_subst_valid= (uchar *) Item::ANY_SUBST;
+ new_list_item=
+ list_item->compile(&Item::subst_argument_checker,
+ &is_subst_valid,
+ &Item::equal_fields_propagator,
+ (uchar *) &cond_and->cond_equal);
+ if (new_list_item != list_item)
+ it.replace(new_list_item);
+ new_list_item->update_used_tables();
+ }
+ li.replace(*new_item_and_list);
+ for (cnt--; cnt; cnt--)
+ item= li++;
+ }
+ cond_and_list->concat((List<Item>*) cond_equal_items);
+ }
+ else if (new_item->type() == Item::FUNC_ITEM &&
+ ((Item_cond*) new_item)->functype() ==
+ Item_func::MULT_EQUAL_FUNC)
+ {
+ cond_and_list->disjoin((List<Item>*) cond_equal_items);
+ ((Item_equal *) new_item)->merge_into_list(cond_equal_items);
+ li.remove();
+ cond_and_list->concat((List<Item>*) cond_equal_items);
+ }
+ else
+ li.replace(new_item);
+ }
+ else
+ {
+ if (new_item->type() == Item::COND_ITEM &&
+ ((Item_cond*) new_item)->functype() ==
+ ((Item_cond*) cond)->functype())
+ {
+ List<Item> *arg_list= ((Item_cond*) new_item)->argument_list();
+ uint cnt= arg_list->elements;
+ li.replace(*arg_list);
+ for ( cnt--; cnt; cnt--)
+ item= li++;
+ }
+ else
+ li.replace(new_item);
+ }
should_fix_fields=1;
}
if (*cond_value == Item::COND_UNDEF)
@@ -13884,6 +14406,10 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
((Field_double *) new_field)->not_fixed= TRUE;
new_field->vcol_info= 0;
new_field->stored_in_db= TRUE;
+ new_field->cond_selectivity= 1.0;
+ new_field->next_equal_field= NULL;
+ new_field->option_list= NULL;
+ new_field->option_struct= NULL;
}
return new_field;
}
@@ -13976,7 +14502,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
if (new_field)
new_field->init(table);
- if (copy_func && item->is_result_field())
+ if (copy_func && item->real_item()->is_result_field())
*((*copy_func)++) = item; // Save for copy_funcs
if (modify_item)
item->set_result_field(new_field);
@@ -14079,6 +14605,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
case Item::FIELD_ITEM:
case Item::DEFAULT_VALUE_ITEM:
+ case Item::INSERT_VALUE_ITEM:
{
Item_field *field= (Item_field*) item;
bool orig_modify= modify_item;
@@ -14183,6 +14710,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
case Item::REAL_ITEM:
case Item::DECIMAL_ITEM:
case Item::STRING_ITEM:
+ case Item::DATE_ITEM:
case Item::REF_ITEM:
case Item::NULL_ITEM:
case Item::VARBIN_ITEM:
@@ -14227,6 +14755,9 @@ void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
bitmap_init(&table->eq_join_set,
(my_bitmap_map*) (bitmaps+ 3*bitmap_buffer_size(field_count)),
field_count, FALSE);
+ bitmap_init(&table->cond_set,
+ (my_bitmap_map*) (bitmaps+ 4*bitmap_buffer_size(field_count)),
+ field_count, FALSE);
/* write_set and all_set are copies of read_set */
table->def_write_set= table->def_read_set;
table->s->all_set= table->def_read_set;
@@ -14390,7 +14921,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
&tmpname, (uint) strlen(path)+1,
&group_buff, (group && ! using_unique_constraint ?
param->group_length : 0),
- &bitmaps, bitmap_buffer_size(field_count)*4,
+ &bitmaps, bitmap_buffer_size(field_count)*5,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
@@ -14762,11 +15293,11 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
else
{
- recinfo->null_bit= 1 << (null_count & 7);
+ recinfo->null_bit= (uint8)1 << (null_count & 7);
recinfo->null_pos= null_count/8;
}
field->move_field(pos,null_flags+null_count/8,
- 1 << (null_count & 7));
+ (uint8)1 << (null_count & 7));
null_count++;
}
else
@@ -15086,8 +15617,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (share->db_type() == TMP_ENGINE_HTON)
{
if (create_internal_tmp_table(table, param->keyinfo, param->start_recinfo,
- &param->recinfo, select_options,
- thd->variables.big_tables))
+ &param->recinfo, select_options))
goto err;
}
if (open_tmp_table(table))
@@ -15151,7 +15681,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
&share, sizeof(*share),
&field, (field_count + 1) * sizeof(Field*),
&blob_field, (field_count+1) *sizeof(uint),
- &bitmaps, bitmap_buffer_size(field_count)*4,
+ &bitmaps, bitmap_buffer_size(field_count)*5,
NullS))
return 0;
@@ -15220,7 +15750,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
{
cur_field->move_field(field_pos, (uchar*) null_pos, null_bit);
null_bit<<= 1;
- if (null_bit == (1 << 8))
+ if (null_bit == (uint)1 << 8)
{
++null_pos;
null_bit= 1;
@@ -15304,7 +15834,7 @@ bool open_tmp_table(TABLE *table)
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
- ulonglong options, my_bool big_tables)
+ ulonglong options)
{
int error;
MARIA_KEYDEF keydef;
@@ -15398,7 +15928,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
bzero((char*) &create_info,sizeof(create_info));
- if (big_tables && !(options & SELECT_SMALL_RESULT))
+ /* Use long data format, to ensure we never get a 'table is full' error */
+ if (!(options & SELECT_SMALL_RESULT))
create_info.data_file_length= ~(ulonglong) 0;
/*
@@ -15474,7 +16005,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
- ulonglong options, my_bool big_tables)
+ ulonglong options)
{
int error;
MI_KEYDEF keydef;
@@ -15561,7 +16092,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
MI_CREATE_INFO create_info;
bzero((char*) &create_info,sizeof(create_info));
- if (big_tables && !(options & SELECT_SMALL_RESULT))
+ if (!(options & SELECT_SMALL_RESULT))
create_info.data_file_length= ~(ulonglong) 0;
if ((error=mi_create(share->table_name.str, share->keys, &keydef,
@@ -15584,7 +16115,6 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
DBUG_RETURN(1);
}
-
#endif /* USE_ARIA_FOR_TMP_TABLES */
@@ -15599,7 +16129,8 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
int error,
- bool ignore_last_dupp_key_error)
+ bool ignore_last_dupp_key_error,
+ bool *is_duplicate)
{
TABLE new_table;
TABLE_SHARE share;
@@ -15638,8 +16169,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
if (create_internal_tmp_table(&new_table, table->key_info, start_recinfo,
recinfo,
thd->lex->select_lex.options |
- thd->variables.option_bits,
- thd->variables.big_tables))
+ thd->variables.option_bits))
goto err2;
if (open_tmp_table(&new_table))
goto err1;
@@ -15683,6 +16213,13 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
!ignore_last_dupp_key_error)
goto err;
+ if (is_duplicate)
+ *is_duplicate= TRUE;
+ }
+ else
+ {
+ if (is_duplicate)
+ *is_duplicate= FALSE;
}
/* remove heap table and change to use myisam table */
@@ -16267,7 +16804,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (rc != NESTED_LOOP_NO_MORE_ROWS)
{
error= (*join_tab->read_first_record)(join_tab);
- if (join_tab->keep_current_rowid)
+ if (!error && join_tab->keep_current_rowid)
join_tab->table->file->position(join_tab->table->record[0]);
rc= evaluate_join_record(join, join_tab, error);
}
@@ -16348,7 +16885,9 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
DBUG_ENTER("evaluate_join_record");
DBUG_PRINT("enter",
("evaluate_join_record join: %p join_tab: %p"
- " cond: %p error: %d", join, join_tab, select_cond, error));
+ " cond: %p error: %d alias %s",
+ join, join_tab, select_cond, error,
+ join_tab->table->alias.ptr()));
if (error > 0 || (join->thd->is_error())) // Fatal error
DBUG_RETURN(NESTED_LOOP_ERROR);
if (error < 0)
@@ -16461,6 +17000,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
if (join_tab->check_weed_out_table && found)
{
int res= join_tab->check_weed_out_table->sj_weedout_check_row(join->thd);
+ DBUG_PRINT("info", ("weedout_check: %d", res));
if (res == -1)
DBUG_RETURN(NESTED_LOOP_ERROR);
else if (res == 1)
@@ -16481,8 +17021,8 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
(See above join->return_tab= tab).
*/
join->examined_rows++;
- DBUG_PRINT("counts", ("join->examined_rows++: %lu",
- (ulong) join->examined_rows));
+ DBUG_PRINT("counts", ("join->examined_rows++: %lu found: %d",
+ (ulong) join->examined_rows, (int) found));
if (found)
{
@@ -17176,6 +17716,8 @@ join_read_first(JOIN_TAB *tab)
{
int error= 0;
TABLE *table=tab->table;
+ DBUG_ENTER("join_read_first");
+
if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
!table->key_read)
table->enable_keyread();
@@ -17192,9 +17734,9 @@ join_read_first(JOIN_TAB *tab)
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
report_error(table, error);
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -17214,6 +17756,8 @@ join_read_last(JOIN_TAB *tab)
{
TABLE *table=tab->table;
int error= 0;
+ DBUG_ENTER("join_read_first");
+
if (table->covering_keys.is_set(tab->index) && !table->no_keyread &&
!table->key_read)
table->enable_keyread();
@@ -17227,9 +17771,9 @@ join_read_last(JOIN_TAB *tab)
if (!error)
error= table->file->prepare_index_scan();
if (error || (error= tab->table->file->ha_index_last(tab->table->record[0])))
- return report_error(table, error);
+ DBUG_RETURN(report_error(table, error));
- return 0;
+ DBUG_RETURN(0);
}
@@ -17343,7 +17887,13 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!end_of_records)
{
if (join->table_count &&
- join->join_tab->is_using_loose_index_scan())
+ (join->join_tab->is_using_loose_index_scan() ||
+ /*
+ When order by used a loose scan as its input, the quick select may
+ be attached to pre_sort_join_tab.
+ */
+ (join->pre_sort_join_tab &&
+ join->pre_sort_join_tab->is_using_loose_index_scan())))
{
/* Copy non-aggregated fields when loose index scan is used. */
copy_fields(&join->tmp_table_param);
@@ -17584,11 +18134,14 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
+ bool is_duplicate;
if (create_internal_tmp_table_from_heap(join->thd, table,
join->tmp_table_param.start_recinfo,
&join->tmp_table_param.recinfo,
- error,1))
+ error, 1, &is_duplicate))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
+ if (is_duplicate)
+ goto end;
table->s->uniques=0; // To ensure rows are the same
}
if (++join->send_records >= join->tmp_table_param.end_write_records &&
@@ -17673,7 +18226,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (create_internal_tmp_table_from_heap(join->thd, table,
join->tmp_table_param.start_recinfo,
&join->tmp_table_param.recinfo,
- error, 0))
+ error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
if ((error= table->file->ha_index_init(0, 0)))
@@ -17778,7 +18331,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
create_internal_tmp_table_from_heap(join->thd, table,
join->tmp_table_param.start_recinfo,
&join->tmp_table_param.recinfo,
- error, 0))
+ error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR);
}
if (join->rollup.state != ROLLUP::STATE_NONE)
@@ -18839,7 +19392,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
!(table->file->index_flags(best_key, 0, 1) & HA_CLUSTERED_INDEX)))
goto use_filesort;
- if (table->quick_keys.is_set(best_key) && best_key != ref_key)
+ if (select &&
+ table->quick_keys.is_set(best_key) && best_key != ref_key)
{
key_map map;
map.clear_all(); // Force the creation of quick select
@@ -19183,6 +19737,9 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
MY_THREAD_SPECIFIC));
table->status=0; // May be wrong if quick_select
+ if (!tab->preread_init_done && tab->preread_init())
+ goto err;
+
// If table has a range, move it to select
if (select && !select->quick && tab->ref.key >= 0)
{
@@ -19219,8 +19776,6 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
goto err;
- if (!tab->preread_init_done && tab->preread_init())
- goto err;
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
filesort_retval= filesort(thd, table, join->sortorder, length,
@@ -19245,7 +19800,6 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
*(join->pre_sort_join_tab)= *tab;
-
tab->select=NULL;
tab->set_select_cond(NULL, __LINE__);
tab->type=JT_ALL; // Read with normal read_record
@@ -19418,7 +19972,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (copy_blobs(first_field))
{
- my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0));
+ my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(ME_FATALERROR));
error=0;
goto err;
}
@@ -19451,7 +20005,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
if (!found)
break; // End of file
/* Restart search on saved row */
- error=file->restart_rnd_next(record);
+ if ((error= file->restart_rnd_next(record)))
+ goto err;
}
file->extra(HA_EXTRA_NO_CACHE);
@@ -21532,7 +22087,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
if (create_internal_tmp_table_from_heap(thd, table_arg,
tmp_table_param.start_recinfo,
&tmp_table_param.recinfo,
- write_error, 0))
+ write_error, 0, NULL))
return 1;
}
}
@@ -22007,7 +22562,13 @@ int JOIN::print_explain(select_result_sink *result, uint8 explain_flags,
{
float f= 0.0;
if (examined_rows)
- f= (100.0 * (float)tab->records_read) / examined_rows;
+ {
+ double pushdown_cond_selectivity= tab->cond_selectivity;
+ if (pushdown_cond_selectivity == 1.0)
+ f= (float) (100.0 * tab->records_read / examined_rows);
+ else
+ f= (float) (100.0 * pushdown_cond_selectivity);
+ }
set_if_smaller(f, 100.0);
item_list.push_back(new Item_float(f, 2));
}
@@ -23443,6 +24004,78 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
return MAX_KEY;
}
+/*
+ Count how much times conditions are true for several first rows of the table
+
+ @param thd thread handle
+ @param rows_to_read how much rows to check
+ @param table table which should be checked
+ @conds conds list of conditions and countars for them
+
+ @return number of really checked rows or 0 in case of error or empty table
+*/
+
+ulong check_selectivity(THD *thd,
+ ulong rows_to_read,
+ TABLE *table,
+ List<COND_STATISTIC> *conds)
+{
+ ulong count= 0;
+ COND_STATISTIC *cond;
+ List_iterator_fast<COND_STATISTIC> it(*conds);
+ handler *file= table->file;
+ uchar *record= table->record[0];
+ int error= 0;
+ DBUG_ENTER("check_selectivity");
+
+ DBUG_ASSERT(rows_to_read > 0);
+ while ((cond= it++))
+ {
+ DBUG_ASSERT(cond->cond);
+ DBUG_ASSERT(cond->cond->used_tables() == table->map);
+ cond->positive= 0;
+ }
+ it.rewind();
+
+ if (file->ha_rnd_init_with_error(1))
+ DBUG_RETURN(0);
+ do
+ {
+ error= file->ha_rnd_next(record);
+
+ if (thd->killed)
+ {
+ thd->send_kill_message();
+ count= 0;
+ goto err;
+ }
+ if (error)
+ {
+ if (error == HA_ERR_RECORD_DELETED)
+ continue;
+ if (error == HA_ERR_END_OF_FILE)
+ break;
+ goto err;
+ }
+
+ count++;
+ while ((cond= it++))
+ {
+ if (cond->cond->val_bool())
+ cond->positive++;
+ }
+ it.rewind();
+
+ } while (count < rows_to_read);
+
+ file->ha_rnd_end();
+ DBUG_RETURN(count);
+
+err:
+ DBUG_PRINT("error", ("error %d", error));
+ file->ha_rnd_end();
+ DBUG_RETURN(0);
+}
/**
@} (end of group Query_Optimizer)
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 54aca3c4829..9ff08a3bc03 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1,8 +1,8 @@
#ifndef SQL_SELECT_INCLUDED
#define SQL_SELECT_INCLUDED
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008-2011 Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -283,6 +283,9 @@ typedef struct st_join_table {
/* Copy of POSITION::records_read, set by get_best_combination() */
double records_read;
+ /* The selectivity of the conditions that can be pushed to the table */
+ double cond_selectivity;
+
/* Startup cost for execution */
double startup_cost;
@@ -514,6 +517,16 @@ typedef struct st_join_table {
bool preread_init();
bool is_sjm_nest() { return test(bush_children); }
+
+ bool access_from_tables_is_allowed(table_map used_tables,
+ table_map sjm_lookup_tables)
+ {
+ table_map used_sjm_lookup_tables= used_tables & sjm_lookup_tables;
+ return !used_sjm_lookup_tables ||
+ (emb_sj_nest &&
+ !(used_sjm_lookup_tables & ~emb_sj_nest->sj_inner_tables));
+ }
+
} JOIN_TAB;
@@ -753,6 +766,9 @@ typedef struct st_position :public Sql_alloc
*/
double records_read;
+ /* The selectivity of the pushed down conditions */
+ double cond_selectivity;
+
/*
Cost accessing the table in course of the entire complete join execution,
i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times
@@ -964,6 +980,11 @@ public:
bool hash_join;
bool do_send_rows;
table_map const_table_map;
+ /**
+ Bitmap of semijoin tables that the current partial plan decided
+ to materialize and access by lookups
+ */
+ table_map sjm_lookup_tables;
/*
Constant tables for which we have found a row (as opposed to those for
which we didn't).
@@ -1295,8 +1316,15 @@ public:
outer_ref_cond= pseudo_bits_cond= NULL;
in_to_exists_where= NULL;
in_to_exists_having= NULL;
-
pre_sort_join_tab= NULL;
+ emb_sjm_nest= NULL;
+ sjm_lookup_tables= 0;
+ /*
+ The following is needed because JOIN::cleanup(true) may be called for
+ joins for which JOIN::optimize was aborted with an error before a proper
+ query plan was produced
+ */
+ table_access_tabs= NULL;
}
int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num,
@@ -1795,6 +1823,8 @@ void eliminate_tables(JOIN *join);
/* Index Condition Pushdown entry point function */
void push_index_cond(JOIN_TAB *tab, uint keyno);
+#define OPT_LINK_EQUAL_FIELDS 1
+
/****************************************************************************
Temporary table support for SQL Runtime
***************************************************************************/
@@ -1812,14 +1842,28 @@ void free_tmp_table(THD *thd, TABLE *entry);
bool create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
- int error, bool ignore_last_dupp_key_error);
+ int error, bool ignore_last_dupp_key_error,
+ bool *is_duplicate);
bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
- ulonglong options, my_bool big_tables);
+ ulonglong options);
bool open_tmp_table(TABLE *table);
void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
double prev_record_reads(POSITION *positions, uint idx, table_map found_ref);
void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List<TABLE_LIST> *tlist);
+struct st_cond_statistic
+{
+ Item *cond;
+ Field *field_arg;
+ ulong positive;
+};
+typedef struct st_cond_statistic COND_STATISTIC;
+
+ulong check_selectivity(THD *thd,
+ ulong rows_to_read,
+ TABLE *table,
+ List<COND_STATISTIC> *conds);
+
#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d315a2fc14a..587d4b6ebdb 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2012, Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@
/* Function with list databases, tables or fields */
-#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "sql_plugin.h"
#include "sql_priv.h"
#include "unireg.h"
#include "sql_acl.h" // fill_schema_*_privileges
@@ -57,11 +57,8 @@
#include <my_dir.h>
#include "lock.h" // MYSQL_OPEN_IGNORE_FLUSH
#include "debug_sync.h"
-#include "datadict.h" // dd_frm_type()
#include "keycaches.h"
-#define STR_OR_NIL(S) ((S) ? (S) : "<nil>")
-
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
@@ -122,6 +119,14 @@ append_algorithm(TABLE_LIST *table, String *buff);
static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table);
+typedef struct st_lookup_field_values
+{
+ LEX_STRING db_value, table_value;
+ bool wild_db_value, wild_table_value;
+} LOOKUP_FIELD_VALUES;
+
+bool get_lookup_field_values(THD *, COND *, TABLE_LIST *, LOOKUP_FIELD_VALUES *);
+
/***************************************************************************
** List all table types supported
***************************************************************************/
@@ -160,7 +165,6 @@ static my_bool show_plugins(THD *thd, plugin_ref plugin,
cs);
switch (plugin_state(plugin)) {
- /* case PLUGIN_IS_FREED: does not happen */
case PLUGIN_IS_DELETED:
table->field[2]->store(STRING_WITH_LEN("DELETED"), cs);
break;
@@ -173,6 +177,9 @@ static my_bool show_plugins(THD *thd, plugin_ref plugin,
case PLUGIN_IS_DISABLED:
table->field[2]->store(STRING_WITH_LEN("DISABLED"), cs);
break;
+ case PLUGIN_IS_FREED: // filtered in fill_plugins, used in fill_all_plugins
+ table->field[2]->store(STRING_WITH_LEN("NOT INSTALLED"), cs);
+ break;
default:
DBUG_ASSERT(0);
}
@@ -270,6 +277,65 @@ int fill_plugins(THD *thd, TABLE_LIST *tables, COND *cond)
}
+int fill_all_plugins(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ DBUG_ENTER("fill_all_plugins");
+ TABLE *table= tables->table;
+ LOOKUP_FIELD_VALUES lookup;
+
+ if (get_lookup_field_values(thd, cond, tables, &lookup))
+ DBUG_RETURN(0);
+
+ if (lookup.db_value.str && !lookup.db_value.str[0])
+ DBUG_RETURN(0); // empty string never matches a valid SONAME
+
+ MY_DIR *dirp= my_dir(opt_plugin_dir, MY_THREAD_SPECIFIC);
+ if (!dirp)
+ {
+ my_error(ER_CANT_READ_DIR, MYF(0), opt_plugin_dir, my_errno);
+ DBUG_RETURN(1);
+ }
+
+ if (!lookup.db_value.str)
+ plugin_dl_foreach(thd, 0, show_plugins, table);
+
+ const char *wstr= lookup.db_value.str, *wend= wstr + lookup.db_value.length;
+ for (uint i=0; i < (uint) dirp->number_of_files; i++)
+ {
+ FILEINFO *file= dirp->dir_entry+i;
+ LEX_STRING dl= { file->name, strlen(file->name) };
+ const char *dlend= dl.str + dl.length;
+ const size_t so_ext_len= sizeof(SO_EXT) - 1;
+
+ if (strcasecmp(dlend - so_ext_len, SO_EXT))
+ continue;
+
+ if (lookup.db_value.str)
+ {
+ if (lookup.wild_db_value)
+ {
+ if (my_wildcmp(files_charset_info, dl.str, dlend, wstr, wend,
+ wild_prefix, wild_one, wild_many))
+ continue;
+ }
+ else
+ {
+ if (my_strnncoll(files_charset_info,
+ (uchar*)dl.str, dl.length,
+ (uchar*)lookup.db_value.str, lookup.db_value.length))
+ continue;
+ }
+ }
+
+ plugin_dl_foreach(thd, &dl, show_plugins, table);
+ thd->clear_error();
+ }
+
+ my_dirend(dirp);
+ DBUG_RETURN(0);
+}
+
+
/***************************************************************************
** List all Authors.
** If you can update it, you get to be in it :)
@@ -689,6 +755,11 @@ db_name_is_in_ignore_db_dirs_list(const char *directory)
return my_hash_search(&ignore_db_dirs_hash, (uchar *) buff, buff_len)!=NULL;
}
+enum find_files_result {
+ FIND_FILES_OK,
+ FIND_FILES_OOM,
+ FIND_FILES_DIR
+};
/*
find_files() - find files in a given directory.
@@ -697,11 +768,10 @@ db_name_is_in_ignore_db_dirs_list(const char *directory)
find_files()
thd thread handler
files put found files in this list
- db database name to set in TABLE_LIST structure
+ db database name to search tables in
+ or NULL to search for databases
path path to database
wild filter for found files
- dir read databases in path if TRUE, read .frm files in
- database otherwise
RETURN
FIND_FILES_OK success
@@ -710,65 +780,40 @@ db_name_is_in_ignore_db_dirs_list(const char *directory)
*/
-find_files_result
-find_files(THD *thd, List<LEX_STRING> *files, const char *db,
- const char *path, const char *wild, bool dir)
+static find_files_result
+find_files(THD *thd, Dynamic_array<LEX_STRING*> *files, LEX_STRING *db,
+ const char *path, const LEX_STRING *wild)
{
- uint i;
- char *ext;
MY_DIR *dirp;
- FILEINFO *file;
- LEX_STRING *file_name= 0;
- uint file_name_len;
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
- uint col_access=thd->col_access;
-#endif
- uint wild_length= 0;
- TABLE_LIST table_list;
+ Discovered_table_list tl(thd, files, wild);
DBUG_ENTER("find_files");
- if (wild)
- {
- if (!wild[0])
- wild= 0;
- else
- wild_length= strlen(wild);
- }
-
- bzero((char*) &table_list,sizeof(table_list));
-
- if (!(dirp = my_dir(path,MYF((dir ? MY_WANT_STAT : 0) |
- MY_THREAD_SPECIFIC))))
+ if (!(dirp = my_dir(path, MY_THREAD_SPECIFIC | (db ? 0 : MY_WANT_STAT))))
{
if (my_errno == ENOENT)
- my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db);
+ my_error(ER_BAD_DB_ERROR, MYF(ME_BELL | ME_WAITTANG), db->str);
else
- my_error(ER_CANT_READ_DIR, MYF(ME_BELL+ME_WAITTANG), path, my_errno);
+ my_error(ER_CANT_READ_DIR, MYF(ME_BELL | ME_WAITTANG), path, my_errno);
DBUG_RETURN(FIND_FILES_DIR);
}
- for (i=0 ; i < (uint) dirp->number_off_files ; i++)
+ if (!db) /* Return databases */
{
- char uname[SAFE_NAME_LEN + 1]; /* Unencoded name */
- file=dirp->dir_entry+i;
- if (dir)
- { /* Return databases */
- if ((file->name[0] == '.' &&
- ((file->name[1] == '.' && file->name[2] == '\0') ||
- file->name[1] == '\0')))
- continue; /* . or .. */
+ for (uint i=0; i < (uint) dirp->number_of_files; i++)
+ {
+ FILEINFO *file= dirp->dir_entry+i;
#ifdef USE_SYMDIR
char *ext;
char buff[FN_REFLEN];
if (my_use_symdir && !strcmp(ext=fn_ext(file->name), ".sym"))
{
- /* Only show the sym file if it points to a directory */
- char *end;
+ /* Only show the sym file if it points to a directory */
+ char *end;
*ext=0; /* Remove extension */
- unpack_dirname(buff, file->name);
- end= strend(buff);
- if (end != buff && end[-1] == FN_LIBCHAR)
- end[-1]= 0; // Remove end FN_LIBCHAR
+ unpack_dirname(buff, file->name);
+ end= strend(buff);
+ if (end != buff && end[-1] == FN_LIBCHAR)
+ end[-1]= 0; // Remove end FN_LIBCHAR
if (!mysql_file_stat(key_file_misc, buff, file->mystat, MYF(0)))
continue;
}
@@ -779,70 +824,25 @@ find_files(THD *thd, List<LEX_STRING> *files, const char *db,
if (is_in_ignore_db_dirs_list(file->name))
continue;
- file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
- if (wild)
- {
- if (lower_case_table_names)
- {
- if (my_wildcmp(files_charset_info,
- uname, uname + file_name_len,
- wild, wild + wild_length,
- wild_prefix, wild_one, wild_many))
- continue;
- }
- else if (wild_compare(uname, wild, 0))
- continue;
- }
- }
- else
- {
- // Return only .frm files which aren't temp files.
- if (my_strcasecmp(system_charset_info, ext=fn_rext(file->name),reg_ext) ||
- is_prefix(file->name, tmp_file_prefix))
- continue;
- *ext=0;
- file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
- if (wild)
- {
- if (lower_case_table_names)
- {
- if (my_wildcmp(files_charset_info,
- uname, uname + file_name_len,
- wild, wild + wild_length,
- wild_prefix, wild_one,wild_many))
- continue;
- }
- else if (wild_compare(uname, wild, 0))
- continue;
- }
- }
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
- /* Don't show tables where we don't have any privileges */
- if (db && !(col_access & TABLE_ACLS))
- {
- table_list.db= (char*) db;
- table_list.db_length= strlen(db);
- table_list.table_name= uname;
- table_list.table_name_length= file_name_len;
- table_list.grant.privilege=col_access;
- if (check_grant(thd, TABLE_ACLS, &table_list, TRUE, 1, TRUE))
- continue;
- }
-#endif
- if (!(file_name=
- thd->make_lex_string(file_name, uname, file_name_len, TRUE)) ||
- files->push_back(file_name))
- {
- my_dirend(dirp);
- DBUG_RETURN(FIND_FILES_OOM);
+ if (tl.add_file(file->name))
+ goto err;
}
+ tl.sort();
+ }
+ else
+ {
+ if (ha_discover_table_names(thd, db, dirp, &tl, false))
+ goto err;
}
- DBUG_PRINT("info",("found: %d files", files->elements));
- my_dirend(dirp);
- (void) ha_find_files(thd, db, path, wild, dir, files);
+ DBUG_PRINT("info",("found: %zu files", files->elements()));
+ my_dirend(dirp);
DBUG_RETURN(FIND_FILES_OK);
+
+err:
+ my_dirend(dirp);
+ DBUG_RETURN(FIND_FILES_OOM);
}
@@ -1561,6 +1561,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
MODE_MYSQL323 |
MODE_MYSQL40)) != 0;
my_bitmap_map *old_map;
+ int error= 0;
DBUG_ENTER("store_create_info");
DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
@@ -1940,28 +1941,34 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
{
- /*
- Partition syntax for CREATE TABLE is at the end of the syntax.
- */
- uint part_syntax_len;
- char *part_syntax;
if (table->part_info &&
- (!table->part_info->is_auto_partitioned) &&
- ((part_syntax= generate_partition_syntax(table->part_info,
+ !((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
+ table->part_info->is_auto_partitioned))
+ {
+ /*
+ Partition syntax for CREATE TABLE is at the end of the syntax.
+ */
+ uint part_syntax_len;
+ char *part_syntax;
+ String comment_start;
+ table->part_info->set_show_version_string(&comment_start);
+ if ((part_syntax= generate_partition_syntax(table->part_info,
&part_syntax_len,
FALSE,
show_table_options,
- NULL, NULL))))
- {
- table->part_info->set_show_version_string(packet);
- packet->append(part_syntax, part_syntax_len);
- packet->append(STRING_WITH_LEN(" */"));
- my_free(part_syntax);
+ NULL, NULL)))
+ {
+ packet->append(comment_start);
+ if (packet->append(part_syntax, part_syntax_len) ||
+ packet->append(STRING_WITH_LEN(" */")))
+ error= 1;
+ my_free(part_syntax);
+ }
}
}
#endif
tmp_restore_column_map(table->read_set, old_map);
- DBUG_RETURN(0);
+ DBUG_RETURN(error);
}
@@ -2201,7 +2208,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
DBUG_ENTER("mysqld_list_processes");
field_list.push_back(new Item_int("Id", 0, MY_INT32_NUM_DECIMAL_DIGITS));
- field_list.push_back(new Item_empty_string("User",16));
+ field_list.push_back(new Item_empty_string("User", USERNAME_CHAR_LENGTH));
field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN));
field_list.push_back(field=new Item_empty_string("db",NAME_CHAR_LEN));
field->maybe_null=1;
@@ -2250,10 +2257,10 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ?
tmp_sctx->host_or_ip :
tmp_sctx->host ? tmp_sctx->host : "");
- if ((thd_info->db=tmp->db)) // Safe test
- thd_info->db=thd->strdup(thd_info->db);
thd_info->command=(int) tmp->get_command();
mysql_mutex_lock(&tmp->LOCK_thd_data);
+ if ((thd_info->db= tmp->db)) // Safe test
+ thd_info->db= thd->strdup(thd_info->db);
if ((mysys_var= tmp->mysys_var))
mysql_mutex_lock(&mysys_var->mutex);
thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
@@ -2508,6 +2515,8 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
my_hrtime_t unow= my_hrtime();
DBUG_ENTER("fill_schema_processlist");
+ DEBUG_SYNC(thd,"fill_schema_processlist_after_unow");
+
user= thd->security_ctx->master_access & PROCESS_ACL ?
NullS : thd->security_ctx->priv_user;
@@ -2566,9 +2575,8 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
table->field[4]->store(command_name[tmp->get_command()].str,
command_name[tmp->get_command()].length, cs);
/* MYSQL_TIME */
- const ulonglong utime= (tmp->start_time ?
- (unow.val - tmp->start_time * HRTIME_RESOLUTION -
- tmp->start_time_sec_part) : 0);
+ ulonglong start_utime= tmp->start_time * HRTIME_RESOLUTION + tmp->start_time_sec_part;
+ ulonglong utime= start_utime < unow.val ? unow.val - start_utime : 0;
table->field[5]->store(utime / HRTIME_RESOLUTION, TRUE);
/* STATE */
if ((val= thread_state_info(tmp)))
@@ -2642,7 +2650,7 @@ static bool status_vars_inited= 0;
C_MODE_START
static int show_var_cmp(const void *var1, const void *var2)
{
- return strcmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
+ return strcasecmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
}
C_MODE_END
@@ -2769,12 +2777,11 @@ void remove_status_vars(SHOW_VAR *list)
{
mysql_mutex_lock(&LOCK_status);
SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
- int a= 0, b= all_status_vars.elements, c= (a+b)/2;
for (; list->name; list++)
{
- int res= 0;
- for (a= 0, b= all_status_vars.elements; b-a > 1; c= (a+b)/2)
+ int res= 0, a= 0, b= all_status_vars.elements, c= (a+b)/2;
+ for (; b-a > 0; c= (a+b)/2)
{
res= show_var_cmp(list, all+c);
if (res < 0)
@@ -2847,6 +2854,17 @@ static bool show_status_array(THD *thd, const char *wild,
name_buffer[sizeof(name_buffer)-1]=0; /* Safety */
if (ucase_names)
my_caseup_str(system_charset_info, name_buffer);
+ else
+ {
+ my_casedn_str(system_charset_info, name_buffer);
+ DBUG_ASSERT(name_buffer[0] >= 'a');
+ DBUG_ASSERT(name_buffer[0] <= 'z');
+
+ /* traditionally status variables have a first letter uppercased */
+ if (status_var)
+ name_buffer[0]-= 'a' - 'A';
+ }
+
restore_record(table, s->default_values);
table->field[0]->store(name_buffer, strlen(name_buffer),
@@ -2963,6 +2981,14 @@ static bool show_status_array(THD *thd, const char *wild,
{
if (!(pos= *(char**) value))
pos= "";
+
+ DBUG_EXECUTE_IF("alter_server_version_str",
+ if (!my_strcasecmp(system_charset_info,
+ variables->name,
+ "version")) {
+ pos= "some-other-version";
+ });
+
end= strend(pos);
break;
}
@@ -3168,8 +3194,8 @@ int fill_schema_user_stats(THD* thd, TABLE_LIST* tables, COND* cond)
int result;
DBUG_ENTER("fill_schema_user_stats");
- if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
- DBUG_RETURN(1);
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true))
+ DBUG_RETURN(0);
/*
Iterates through all the global stats and sends them to the client.
@@ -3203,8 +3229,8 @@ int fill_schema_client_stats(THD* thd, TABLE_LIST* tables, COND* cond)
int result;
DBUG_ENTER("fill_schema_client_stats");
- if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
- DBUG_RETURN(1);
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL, true))
+ DBUG_RETURN(0);
/*
Iterates through all the global stats and sends them to the client.
@@ -3345,13 +3371,6 @@ void calc_sum_of_all_status(STATUS_VAR *to)
/* This is only used internally, but we need it here as a forward reference */
extern ST_SCHEMA_TABLE schema_tables[];
-typedef struct st_lookup_field_values
-{
- LEX_STRING db_value, table_value;
- bool wild_db_value, wild_table_value;
-} LOOKUP_FIELD_VALUES;
-
-
/*
Store record to I_S table, convert HEAP table
to MyISAM if necessary
@@ -3373,7 +3392,7 @@ bool schema_table_store_record(THD *thd, TABLE *table)
{
TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param;
if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo,
- &param->recinfo, error, 0))
+ &param->recinfo, error, 0, NULL))
return 1;
}
@@ -3459,8 +3478,8 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
(uchar *) item_field->field_name,
strlen(item_field->field_name), 0))
{
- thd->make_lex_string(&lookup_field_vals->db_value, tmp_str->ptr(),
- tmp_str->length(), FALSE);
+ thd->make_lex_string(&lookup_field_vals->db_value,
+ tmp_str->ptr(), tmp_str->length());
}
/* Lookup value is table name */
else if (!cs->coll->strnncollsp(cs, (uchar *) field_name2,
@@ -3468,8 +3487,8 @@ bool get_lookup_value(THD *thd, Item_func *item_func,
(uchar *) item_field->field_name,
strlen(item_field->field_name), 0))
{
- thd->make_lex_string(&lookup_field_vals->table_value, tmp_str->ptr(),
- tmp_str->length(), FALSE);
+ thd->make_lex_string(&lookup_field_vals->table_value,
+ tmp_str->ptr(), tmp_str->length());
}
}
return 0;
@@ -3646,7 +3665,7 @@ bool get_lookup_field_values(THD *thd, COND *cond, TABLE_LIST *tables,
LOOKUP_FIELD_VALUES *lookup_field_values)
{
LEX *lex= thd->lex;
- const char *wild= lex->wild ? lex->wild->ptr() : NullS;
+ String *wild= lex->wild;
bool rc= 0;
bzero((char*) lookup_field_values, sizeof(LOOKUP_FIELD_VALUES));
@@ -3654,8 +3673,8 @@ bool get_lookup_field_values(THD *thd, COND *cond, TABLE_LIST *tables,
case SQLCOM_SHOW_DATABASES:
if (wild)
{
- thd->make_lex_string(&lookup_field_values->db_value,
- wild, strlen(wild), 0);
+ thd->make_lex_string(&lookup_field_values->db_value,
+ wild->ptr(), wild->length());
lookup_field_values->wild_db_value= 1;
}
break;
@@ -3664,14 +3683,25 @@ bool get_lookup_field_values(THD *thd, COND *cond, TABLE_LIST *tables,
case SQLCOM_SHOW_TRIGGERS:
case SQLCOM_SHOW_EVENTS:
thd->make_lex_string(&lookup_field_values->db_value,
- lex->select_lex.db, strlen(lex->select_lex.db), 0);
+ lex->select_lex.db, strlen(lex->select_lex.db));
if (wild)
{
thd->make_lex_string(&lookup_field_values->table_value,
- wild, strlen(wild), 0);
+ wild->ptr(), wild->length());
lookup_field_values->wild_table_value= 1;
}
break;
+ case SQLCOM_SHOW_PLUGINS:
+ if (lex->ident.str)
+ thd->make_lex_string(&lookup_field_values->db_value,
+ lex->ident.str, lex->ident.length);
+ else if (lex->wild)
+ {
+ thd->make_lex_string(&lookup_field_values->db_value,
+ lex->wild->ptr(), lex->wild->length());
+ lookup_field_values->wild_db_value= 1;
+ }
+ break;
default:
/*
The "default" is for queries over I_S.
@@ -3714,23 +3744,15 @@ enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table)
wild wild string
idx_field_vals idx_field_vals->db_name contains db name or
wild string
- with_i_schema returns 1 if we added 'IS' name to list
- otherwise returns 0
RETURN
zero success
non-zero error
*/
-int make_db_list(THD *thd, List<LEX_STRING> *files,
- LOOKUP_FIELD_VALUES *lookup_field_vals,
- bool *with_i_schema)
+int make_db_list(THD *thd, Dynamic_array<LEX_STRING*> *files,
+ LOOKUP_FIELD_VALUES *lookup_field_vals)
{
- LEX_STRING *i_s_name_copy= 0;
- i_s_name_copy= thd->make_lex_string(i_s_name_copy,
- INFORMATION_SCHEMA_NAME.str,
- INFORMATION_SCHEMA_NAME.length, TRUE);
- *with_i_schema= 0;
if (lookup_field_vals->wild_db_value)
{
/*
@@ -3743,12 +3765,11 @@ int make_db_list(THD *thd, List<LEX_STRING> *files,
INFORMATION_SCHEMA_NAME.str,
lookup_field_vals->db_value.str))
{
- *with_i_schema= 1;
- if (files->push_back(i_s_name_copy))
+ if (files->append_val(&INFORMATION_SCHEMA_NAME))
return 1;
}
- return (find_files(thd, files, NullS, mysql_data_home,
- lookup_field_vals->db_value.str, 1) != FIND_FILES_OK);
+ return find_files(thd, files, 0, mysql_data_home,
+ &lookup_field_vals->db_value);
}
@@ -3764,12 +3785,11 @@ int make_db_list(THD *thd, List<LEX_STRING> *files,
if (is_infoschema_db(lookup_field_vals->db_value.str,
lookup_field_vals->db_value.length))
{
- *with_i_schema= 1;
- if (files->push_back(i_s_name_copy))
+ if (files->append_val(&INFORMATION_SCHEMA_NAME))
return 1;
return 0;
}
- if (files->push_back(&lookup_field_vals->db_value))
+ if (files->append_val(&lookup_field_vals->db_value))
return 1;
return 0;
}
@@ -3778,17 +3798,15 @@ int make_db_list(THD *thd, List<LEX_STRING> *files,
Create list of existing databases. It is used in case
of select from information schema table
*/
- if (files->push_back(i_s_name_copy))
+ if (files->append_val(&INFORMATION_SCHEMA_NAME))
return 1;
- *with_i_schema= 1;
- return (find_files(thd, files, NullS,
- mysql_data_home, NullS, 1) != FIND_FILES_OK);
+ return find_files(thd, files, 0, mysql_data_home, &null_lex_str);
}
struct st_add_schema_table
{
- List<LEX_STRING> *files;
+ Dynamic_array<LEX_STRING*> *files;
const char *wild;
};
@@ -3798,7 +3816,7 @@ static my_bool add_schema_table(THD *thd, plugin_ref plugin,
{
LEX_STRING *file_name= 0;
st_add_schema_table *data= (st_add_schema_table *)p_data;
- List<LEX_STRING> *file_list= data->files;
+ Dynamic_array<LEX_STRING*> *file_list= data->files;
const char *wild= data->wild;
ST_SCHEMA_TABLE *schema_table= plugin_data(plugin, ST_SCHEMA_TABLE *);
DBUG_ENTER("add_schema_table");
@@ -3818,16 +3836,16 @@ static my_bool add_schema_table(THD *thd, plugin_ref plugin,
DBUG_RETURN(0);
}
- if ((file_name= thd->make_lex_string(file_name, schema_table->table_name,
- strlen(schema_table->table_name),
- TRUE)) &&
- !file_list->push_back(file_name))
+ if ((file_name= thd->make_lex_string(schema_table->table_name,
+ strlen(schema_table->table_name))) &&
+ !file_list->append(file_name))
DBUG_RETURN(0);
DBUG_RETURN(1);
}
-int schema_tables_add(THD *thd, List<LEX_STRING> *files, const char *wild)
+int schema_tables_add(THD *thd, Dynamic_array<LEX_STRING*> *files,
+ const char *wild)
{
LEX_STRING *file_name= 0;
ST_SCHEMA_TABLE *tmp_schema_table= schema_tables;
@@ -3851,9 +3869,9 @@ int schema_tables_add(THD *thd, List<LEX_STRING> *files, const char *wild)
continue;
}
if ((file_name=
- thd->make_lex_string(file_name, tmp_schema_table->table_name,
- strlen(tmp_schema_table->table_name), TRUE)) &&
- !files->push_back(file_name))
+ thd->make_lex_string(tmp_schema_table->table_name,
+ strlen(tmp_schema_table->table_name))) &&
+ !files->append(file_name))
continue;
DBUG_RETURN(1);
}
@@ -3878,7 +3896,6 @@ int schema_tables_add(THD *thd, List<LEX_STRING> *files, const char *wild)
@param[in] table_names List of table names in database
@param[in] lex pointer to LEX struct
@param[in] lookup_field_vals pointer to LOOKUP_FIELD_VALUE struct
- @param[in] with_i_schema TRUE means that we add I_S tables to list
@param[in] db_name database name
@return Operation status
@@ -3888,40 +3905,32 @@ int schema_tables_add(THD *thd, List<LEX_STRING> *files, const char *wild)
*/
static int
-make_table_name_list(THD *thd, List<LEX_STRING> *table_names, LEX *lex,
- LOOKUP_FIELD_VALUES *lookup_field_vals,
- bool with_i_schema, LEX_STRING *db_name)
+make_table_name_list(THD *thd, Dynamic_array<LEX_STRING*> *table_names,
+ LEX *lex, LOOKUP_FIELD_VALUES *lookup_field_vals,
+ LEX_STRING *db_name)
{
char path[FN_REFLEN + 1];
build_table_filename(path, sizeof(path) - 1, db_name->str, "", "", 0);
if (!lookup_field_vals->wild_table_value &&
lookup_field_vals->table_value.str)
{
- if (with_i_schema)
+ if (db_name == &INFORMATION_SCHEMA_NAME)
{
LEX_STRING *name;
ST_SCHEMA_TABLE *schema_table=
find_schema_table(thd, lookup_field_vals->table_value.str);
if (schema_table && !schema_table->hidden)
{
- if (!(name=
- thd->make_lex_string(NULL, schema_table->table_name,
- strlen(schema_table->table_name), TRUE)) ||
- table_names->push_back(name))
+ if (!(name= thd->make_lex_string(schema_table->table_name,
+ strlen(schema_table->table_name))) ||
+ table_names->append(name))
return 1;
}
}
else
{
- if (table_names->push_back(&lookup_field_vals->table_value))
+ if (table_names->append_val(&lookup_field_vals->table_value))
return 1;
- /*
- Check that table is relevant in current transaction.
- (used for ndb engine, see ndbcluster_find_files(), ha_ndbcluster.cc)
- */
- (void) ha_find_files(thd, db_name->str, path,
- lookup_field_vals->table_value.str, 0,
- table_names);
}
return 0;
}
@@ -3930,12 +3939,12 @@ make_table_name_list(THD *thd, List<LEX_STRING> *table_names, LEX *lex,
This call will add all matching the wildcards (if specified) IS tables
to the list
*/
- if (with_i_schema)
+ if (db_name == &INFORMATION_SCHEMA_NAME)
return (schema_tables_add(thd, table_names,
lookup_field_vals->table_value.str));
- find_files_result res= find_files(thd, table_names, db_name->str, path,
- lookup_field_vals->table_value.str, 0);
+ find_files_result res= find_files(thd, table_names, db_name, path,
+ &lookup_field_vals->table_value);
if (res != FIND_FILES_OK)
{
/*
@@ -4031,10 +4040,10 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
These copies are used for make_table_list() while unaltered values
are passed to process_table() functions.
*/
- if (!thd->make_lex_string(&db_name, orig_db_name->str,
- orig_db_name->length, FALSE) ||
- !thd->make_lex_string(&table_name, orig_table_name->str,
- orig_table_name->length, FALSE))
+ if (!thd->make_lex_string(&db_name,
+ orig_db_name->str, orig_db_name->length) ||
+ !thd->make_lex_string(&table_name,
+ orig_table_name->str, orig_table_name->length))
goto end;
/*
@@ -4102,12 +4111,14 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
of backward compatibility.
*/
if (!is_show_fields_or_keys && result && thd->is_error() &&
- thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE)
+ (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE ||
+ thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT))
{
/*
Hide error for a non-existing table.
For example, this error can occur when we use a where condition
- with a db name and table, but the table does not exist.
+ with a db name and table, but the table does not exist or
+ there is a view with the same name.
*/
result= false;
thd->clear_error();
@@ -4159,7 +4170,6 @@ end:
@param[in] table TABLE struct for I_S table
@param[in] db_name database name
@param[in] table_name table name
- @param[in] with_i_schema I_S table if TRUE
@return Operation status
@retval 0 success
@@ -4167,37 +4177,28 @@ end:
*/
static int fill_schema_table_names(THD *thd, TABLE_LIST *tables,
- LEX_STRING *db_name, LEX_STRING *table_name,
- bool with_i_schema)
+ LEX_STRING *db_name, LEX_STRING *table_name)
{
TABLE *table= tables->table;
- if (with_i_schema)
+ if (db_name == &INFORMATION_SCHEMA_NAME)
{
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"),
system_charset_info);
}
else if (tables->table_open_method != SKIP_OPEN_TABLE)
{
- enum legacy_db_type not_used;
- char path[FN_REFLEN + 1];
- (void) build_table_filename(path, sizeof(path) - 1, db_name->str,
- table_name->str, reg_ext, 0);
- switch (dd_frm_type(thd, path, &not_used)) {
- case FRMTYPE_ERROR:
- table->field[3]->store(STRING_WITH_LEN("ERROR"),
- system_charset_info);
- break;
- case FRMTYPE_TABLE:
- table->field[3]->store(STRING_WITH_LEN("BASE TABLE"),
- system_charset_info);
- break;
- case FRMTYPE_VIEW:
- table->field[3]->store(STRING_WITH_LEN("VIEW"),
- system_charset_info);
- break;
- default:
- DBUG_ASSERT(0);
+ CHARSET_INFO *cs= system_charset_info;
+ handlerton *hton;
+ if (ha_table_exists(thd, db_name->str, table_name->str, &hton))
+ {
+ if (hton == view_pseudo_hton)
+ table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
+ else
+ table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
}
+ else
+ table->field[3]->store(STRING_WITH_LEN("ERROR"), cs);
+
if (thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE)
{
thd->clear_error();
@@ -4352,10 +4353,6 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
TABLE tbl;
TABLE_LIST table_list;
uint res= 0;
- int not_used;
- my_hash_value_type hash_value;
- const char *key;
- uint key_length;
char db_name_buff[NAME_LEN + 1], table_name_buff[NAME_LEN + 1];
bzero((char*) &table_list, sizeof(TABLE_LIST));
@@ -4427,15 +4424,12 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
goto end;
}
- key_length= get_table_def_key(&table_list, &key);
- hash_value= my_calc_hash(&table_def_cache, (uchar*) key, key_length);
- mysql_mutex_lock(&LOCK_open);
- share= get_table_share(thd, &table_list, key,
- key_length, OPEN_VIEW, &not_used, hash_value);
+ share= get_table_share(thd, table_list.db, table_list.table_name,
+ GTS_TABLE | GTS_VIEW);
if (!share)
{
res= 0;
- goto end_unlock;
+ goto end;
}
if (share->is_view)
@@ -4455,10 +4449,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
res= 1;
goto end_share;
}
- }
- if (share->is_view)
- {
if (open_new_frm(thd, share, table_name->str,
(uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
HA_GET_INDEX | HA_TRY_READ_ONLY),
@@ -4484,10 +4475,10 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
free_root(&tbl.mem_root, MYF(0));
}
+
end_share:
+ mysql_mutex_lock(&LOCK_open);
release_table_share(share);
-
-end_unlock:
mysql_mutex_unlock(&LOCK_open);
end:
@@ -4577,14 +4568,12 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
{
LEX *lex= thd->lex;
TABLE *table= tables->table;
+ TABLE_LIST table_acl_check;
SELECT_LEX *lsel= tables->schema_select_lex;
ST_SCHEMA_TABLE *schema_table= tables->schema_table;
LOOKUP_FIELD_VALUES lookup_field_vals;
- LEX_STRING *db_name, *table_name;
- bool with_i_schema;
enum enum_schema_tables schema_table_idx;
- List<LEX_STRING> db_names;
- List_iterator_fast<LEX_STRING> it(db_names);
+ Dynamic_array<LEX_STRING*> db_names;
COND *partial_cond= 0;
int error= 1;
Open_tables_backup open_tables_state_backup;
@@ -4647,9 +4636,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
goto err;
}
- DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'",
- STR_OR_NIL(lookup_field_vals.db_value.str),
- STR_OR_NIL(lookup_field_vals.table_value.str)));
+ DBUG_PRINT("info",("db_name='%s', table_name='%s'",
+ lookup_field_vals.db_value.str,
+ lookup_field_vals.table_value.str));
if (!lookup_field_vals.wild_db_value && !lookup_field_vals.wild_table_value)
{
@@ -4686,11 +4675,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
goto err;
}
- if (make_db_list(thd, &db_names, &lookup_field_vals, &with_i_schema))
+ bzero((char*) &table_acl_check, sizeof(table_acl_check));
+
+ if (make_db_list(thd, &db_names, &lookup_field_vals))
goto err;
- it.rewind(); /* To get access to new elements in basis list */
- while ((db_name= it++))
+ for (size_t i=0; i < db_names.elements(); i++)
{
+ LEX_STRING *db_name= db_names.at(i);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!(check_access(thd, SELECT_ACL, db_name->str,
&thd->col_access, NULL, 0, 1) ||
@@ -4699,18 +4690,30 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name->str, 0))
#endif
{
- List<LEX_STRING> table_names;
+ Dynamic_array<LEX_STRING*> table_names;
int res= make_table_name_list(thd, &table_names, lex,
- &lookup_field_vals,
- with_i_schema, db_name);
+ &lookup_field_vals, db_name);
if (res == 2) /* Not fatal error, continue */
continue;
if (res)
goto err;
- List_iterator_fast<LEX_STRING> it_files(table_names);
- while ((table_name= it_files++))
+ for (size_t i=0; i < table_names.elements(); i++)
{
+ LEX_STRING *table_name= table_names.at(i);
+
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ if (!(thd->col_access & TABLE_ACLS))
+ {
+ table_acl_check.db= db_name->str;
+ table_acl_check.db_length= db_name->length;
+ table_acl_check.table_name= table_name->str;
+ table_acl_check.table_name_length= table_name->length;
+ table_acl_check.grant.privilege= thd->col_access;
+ if (check_grant(thd, TABLE_ACLS, &table_acl_check, TRUE, 1, TRUE))
+ continue;
+ }
+#endif
restore_record(table, s->default_values);
table->field[schema_table->idx_field1]->
store(db_name->str, db_name->length, system_charset_info);
@@ -4738,14 +4741,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
/* SHOW TABLE NAMES command */
if (schema_table_idx == SCH_TABLE_NAMES)
{
- if (fill_schema_table_names(thd, tables, db_name,
- table_name, with_i_schema))
+ if (fill_schema_table_names(thd, tables, db_name, table_name))
continue;
}
else
{
if (!(table_open_method & ~OPEN_FRM_ONLY) &&
- !with_i_schema)
+ db_name != &INFORMATION_SCHEMA_NAME)
{
/*
Here we need to filter out warnings, which can happen
@@ -4779,11 +4781,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
}
}
}
- /*
- If we have information schema its always the first table and only
- the first table. Reset for other tables.
- */
- with_i_schema= 0;
}
}
@@ -4815,9 +4812,7 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
*/
LOOKUP_FIELD_VALUES lookup_field_vals;
- List<LEX_STRING> db_names;
- LEX_STRING *db_name;
- bool with_i_schema;
+ Dynamic_array<LEX_STRING*> db_names;
HA_CREATE_INFO create;
TABLE *table= tables->table;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -4830,15 +4825,14 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
DBUG_PRINT("INDEX VALUES",("db_name: %s table_name: %s",
lookup_field_vals.db_value.str,
lookup_field_vals.table_value.str));
- if (make_db_list(thd, &db_names, &lookup_field_vals,
- &with_i_schema))
+ if (make_db_list(thd, &db_names, &lookup_field_vals))
DBUG_RETURN(1);
/*
If we have lookup db value we should check that the database exists
*/
if(lookup_field_vals.db_value.str && !lookup_field_vals.wild_db_value &&
- !with_i_schema)
+ db_names.at(0) != &INFORMATION_SCHEMA_NAME)
{
char path[FN_REFLEN+16];
uint path_len;
@@ -4852,15 +4846,14 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
DBUG_RETURN(0);
}
- List_iterator_fast<LEX_STRING> it(db_names);
- while ((db_name=it++))
+ for (size_t i=0; i < db_names.elements(); i++)
{
- if (with_i_schema) // information schema name is always first in list
+ LEX_STRING *db_name= db_names.at(i);
+ if (db_name == &INFORMATION_SCHEMA_NAME)
{
if (store_schema_shemata(thd, table, db_name,
system_charset_info))
DBUG_RETURN(1);
- with_i_schema= 0;
continue;
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -4945,7 +4938,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
if (share->db_type() == partition_hton &&
share->partition_info_str_len)
{
- tmp_db_type= share->default_part_db_type;
+ tmp_db_type= plugin_hton(share->default_part_plugin);
is_partitioned= TRUE;
}
#endif
@@ -5445,7 +5438,7 @@ static my_bool iter_schema_engines(THD *thd, plugin_ref plugin,
void *ptable)
{
TABLE *table= (TABLE *) ptable;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
CHARSET_INFO *scs= system_charset_info;
handlerton *default_type= ha_default_handlerton(thd);
@@ -5631,7 +5624,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
CHARSET_INFO *cs= system_charset_info;
char params_buff[MAX_FIELD_WIDTH], returns_buff[MAX_FIELD_WIDTH],
sp_db_buff[NAME_LEN], sp_name_buff[NAME_LEN], path[FN_REFLEN],
- definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 1];
+ definer_buff[DEFINER_LENGTH + 1];
String params(params_buff, sizeof(params_buff), cs);
String returns(returns_buff, sizeof(returns_buff), cs);
String sp_db(sp_db_buff, sizeof(sp_db_buff), cs);
@@ -5775,7 +5768,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
LEX *lex= thd->lex;
CHARSET_INFO *cs= system_charset_info;
char sp_db_buff[SAFE_NAME_LEN + 1], sp_name_buff[NAME_LEN + 1],
- definer_buff[USERNAME_LENGTH + HOSTNAME_LENGTH + 2],
+ definer_buff[DEFINER_LENGTH + 1],
returns_buff[MAX_FIELD_WIDTH];
String sp_db(sp_db_buff, sizeof(sp_db_buff), cs);
@@ -6036,8 +6029,8 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
KEY *key=show_table->key_info+i;
if (key->rec_per_key[j])
{
- ha_rows records=((double) show_table->stat_records() /
- key->actual_rec_per_key(j));
+ ha_rows records= (ha_rows) ((double) show_table->stat_records() /
+ key->actual_rec_per_key(j));
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
}
@@ -7316,7 +7309,7 @@ struct schema_table_ref
ST_FIELD_INFO user_stats_fields_info[]=
{
- {"USER", USERNAME_LENGTH, MYSQL_TYPE_STRING, 0, 0, "User", SKIP_OPEN_TABLE},
+ {"USER", USERNAME_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "User", SKIP_OPEN_TABLE},
{"TOTAL_CONNECTIONS", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Total_connections",SKIP_OPEN_TABLE},
{"CONCURRENT_CONNECTIONS", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Concurrent_connections",SKIP_OPEN_TABLE},
{"CONNECTED_TIME", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Connected_time",SKIP_OPEN_TABLE},
@@ -7516,20 +7509,20 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
break;
case MYSQL_TYPE_DATE:
if (!(item=new Item_return_date_time(fields_info->field_name,
- MAX_DATE_WIDTH,
+ strlen(fields_info->field_name),
fields_info->field_type)))
DBUG_RETURN(0);
break;
case MYSQL_TYPE_TIME:
if (!(item=new Item_return_date_time(fields_info->field_name,
- MAX_TIME_FULL_WIDTH,
+ strlen(fields_info->field_name),
fields_info->field_type)))
DBUG_RETURN(0);
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
if (!(item=new Item_return_date_time(fields_info->field_name,
- MAX_DATETIME_WIDTH,
+ strlen(fields_info->field_name),
fields_info->field_type)))
DBUG_RETURN(0);
break;
@@ -7902,16 +7895,22 @@ int make_schema_select(THD *thd, SELECT_LEX *sel,
We have to make non const db_name & table_name
because of lower_case_table_names
*/
- thd->make_lex_string(&db, INFORMATION_SCHEMA_NAME.str,
- INFORMATION_SCHEMA_NAME.length, 0);
- thd->make_lex_string(&table, schema_table->table_name,
- strlen(schema_table->table_name), 0);
- if (schema_table->old_format(thd, schema_table) || /* Handle old syntax */
- !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0),
+ if (!thd->make_lex_string(&db, INFORMATION_SCHEMA_NAME.str,
+ INFORMATION_SCHEMA_NAME.length))
+ DBUG_RETURN(1);
+
+ if (!thd->make_lex_string(&table, schema_table->table_name,
+ strlen(schema_table->table_name)))
+ DBUG_RETURN(1);
+
+ if (schema_table->old_format(thd, schema_table))
+
+ DBUG_RETURN(1);
+
+ if (!sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0),
0, 0, TL_READ, MDL_SHARED_READ))
- {
DBUG_RETURN(1);
- }
+
DBUG_RETURN(0);
}
@@ -8094,7 +8093,7 @@ static my_bool run_hton_fill_schema_table(THD *thd, plugin_ref plugin,
{
struct run_hton_fill_schema_table_args *args=
(run_hton_fill_schema_table_args *) arg;
- handlerton *hton= plugin_data(plugin, handlerton *);
+ handlerton *hton= plugin_hton(plugin);
if (hton->fill_is_table && hton->state == SHOW_OPTION_YES)
hton->fill_is_table(hton, thd, args->tables, args->cond,
get_schema_table_idx(args->tables->schema_table));
@@ -8339,7 +8338,7 @@ ST_FIELD_INFO events_fields_info[]=
SKIP_OPEN_TABLE},
{"EVENT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
SKIP_OPEN_TABLE},
- {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
+ {"DEFINER", DEFINER_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
{"TIME_ZONE", 64, MYSQL_TYPE_STRING, 0, 0, "Time zone", SKIP_OPEN_TABLE},
{"EVENT_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"EVENT_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
@@ -8416,7 +8415,7 @@ ST_FIELD_INFO proc_fields_info[]=
{"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"ROUTINE_COMMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Comment",
SKIP_OPEN_TABLE},
- {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
+ {"DEFINER", DEFINER_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
{"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
"character_set_client", SKIP_OPEN_TABLE},
{"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
@@ -8461,7 +8460,7 @@ ST_FIELD_INFO view_fields_info[]=
{"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"CHECK_OPTION", 8, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"IS_UPDATABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
- {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
+ {"DEFINER", DEFINER_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FRM_ONLY},
@@ -8606,7 +8605,7 @@ ST_FIELD_INFO triggers_fields_info[]=
{"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 1, "Created", OPEN_FRM_ONLY},
{"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, "sql_mode", OPEN_FRM_ONLY},
- {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", OPEN_FRM_ONLY},
+ {"DEFINER", DEFINER_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "Definer", OPEN_FRM_ONLY},
{"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
"character_set_client", OPEN_FRM_ONLY},
{"COLLATION_CONNECTION", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
@@ -8672,7 +8671,8 @@ ST_FIELD_INFO variables_fields_info[]=
ST_FIELD_INFO processlist_fields_info[]=
{
{"ID", 4, MYSQL_TYPE_LONGLONG, 0, 0, "Id", SKIP_OPEN_TABLE},
- {"USER", 16, MYSQL_TYPE_STRING, 0, 0, "User", SKIP_OPEN_TABLE},
+ {"USER", USERNAME_CHAR_LENGTH, MYSQL_TYPE_STRING, 0, 0, "User",
+ SKIP_OPEN_TABLE},
{"HOST", LIST_PROCESS_HOST_LEN, MYSQL_TYPE_STRING, 0, 0, "Host",
SKIP_OPEN_TABLE},
{"DB", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Db", SKIP_OPEN_TABLE},
@@ -8698,7 +8698,7 @@ ST_FIELD_INFO plugin_fields_info[]=
{"PLUGIN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
SKIP_OPEN_TABLE},
{"PLUGIN_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"PLUGIN_STATUS", 10, MYSQL_TYPE_STRING, 0, 0, "Status", SKIP_OPEN_TABLE},
+ {"PLUGIN_STATUS", 16, MYSQL_TYPE_STRING, 0, 0, "Status", SKIP_OPEN_TABLE},
{"PLUGIN_TYPE", 80, MYSQL_TYPE_STRING, 0, 0, "Type", SKIP_OPEN_TABLE},
{"PLUGIN_TYPE_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"PLUGIN_LIBRARY", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Library",
@@ -8959,6 +8959,8 @@ ST_SCHEMA_TABLE schema_tables[]=
OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"PLUGINS", plugin_fields_info, create_schema_table,
fill_plugins, make_old_format, 0, -1, -1, 0, 0},
+ {"ALL_PLUGINS", plugin_fields_info, create_schema_table,
+ fill_all_plugins, make_old_format, 0, 5, -1, 0, 0},
{"PROCESSLIST", processlist_fields_info, create_schema_table,
fill_schema_processlist, make_old_format, 0, -1, -1, 0, 0},
{"PROFILING", query_profile_statistics_info, create_schema_table,
diff --git a/sql/sql_show.h b/sql/sql_show.h
index 03d8af3aabd..ec4d6a2b7c9 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -27,21 +27,13 @@ class String;
class THD;
class sp_name;
struct TABLE_LIST;
-struct st_ha_create_information;
typedef class st_select_lex SELECT_LEX;
-typedef st_ha_create_information HA_CREATE_INFO;
struct LEX;
typedef struct st_mysql_show_var SHOW_VAR;
typedef struct st_schema_table ST_SCHEMA_TABLE;
struct TABLE;
typedef struct system_status_var STATUS_VAR;
-enum find_files_result {
- FIND_FILES_OK,
- FIND_FILES_OOM,
- FIND_FILES_DIR
-};
-
/* Used by handlers to store things in schema tables */
#define IS_FILES_FILE_ID 0
#define IS_FILES_FILE_NAME 1
@@ -82,9 +74,6 @@ enum find_files_result {
#define IS_FILES_STATUS 36
#define IS_FILES_EXTRA 37
-find_files_result find_files(THD *thd, List<LEX_STRING> *files, const char *db,
- const char *path, const char *wild, bool dir);
-
int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
HA_CREATE_INFO *create_info_arg, bool show_database);
int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff);
diff --git a/sql/sql_state.c b/sql/sql_state.c
index 5acf97f16cc..c733d4b37c0 100644
--- a/sql/sql_state.c
+++ b/sql/sql_state.c
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Functions to map mysqld errno to sql_state */
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index c872069ed28..94cbf3b946a 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -26,6 +26,7 @@
#include "sql_base.h"
#include "key.h"
#include "sql_statistics.h"
+#include "opt_range.h"
#include "my_atomic.h"
/*
@@ -249,12 +250,13 @@ public:
and column_name with column_name taken out of the only parameter f of the
Field* type passed to this method. After this get_stat_values looks
for a row by the set key value. If the row is found the values of statistical
- data columns min_value, max_value, nulls_ratio, avg_length, avg_frequency
- are read into internal structures. Values of nulls_ratio, avg_length,
- avg_frequency are read into the corresponding fields of the read_stat
- structure from the Field object f, while values from min_value and max_value
- are copied into the min_value and max_value record buffers attached to the
- TABLE structure for table t.
+ data columns min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ hist_size, hist_type, histogram are read into internal structures. Values
+ of nulls_ratio, avg_length, avg_frequency, hist_size, hist_type, histogram
+ are read into the corresponding fields of the read_stat structure from
+ the Field object f, while values from min_value and max_value are copied
+ into the min_value and max_value record buffers attached to the TABLE
+ structure for table t.
If the value of a statistical column in the found row is null, then the
corresponding flag in the f->read_stat.column_stat_nulls bitmap is set off.
Otherwise the flag is set on. If no row is found for the column the all flags
@@ -335,7 +337,7 @@ protected:
void store_record_for_lookup()
{
- store_record(stat_table, record[0]);
+ DBUG_ASSERT(record[0] == stat_table->record[0]);
}
bool update_record()
@@ -867,11 +869,12 @@ public:
@details
This implementation of a purely virtual method sets the value of the
- columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length' and
- 'avg_frequency' of the stistical table columns_stat according to the
- contents of the bitmap write_stat.column_stat_nulls and the values
- of the fields min_value, max_value, nulls_ratio, avg_length and
- avg_frequency of the structure write_stat from the Field structure
+ columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length',
+ 'avg_frequency', 'hist_size', 'hist_type' and 'histogram' of the
+ stistical table columns_stat according to the contents of the bitmap
+ write_stat.column_stat_nulls and the values of the fields min_value,
+ max_value, nulls_ratio, avg_length, avg_frequency, hist_size, hist_type
+ and histogram of the structure write_stat from the Field structure
for the field 'table_field'.
The value of the k-th column in the table columns_stat is set to NULL
if the k-th bit in the bitmap 'column_stat_nulls' is set to 1.
@@ -888,7 +891,7 @@ public:
char buff[MAX_FIELD_WIDTH];
String val(buff, sizeof(buff), &my_charset_utf8_bin);
- for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_AVG_FREQUENCY; i++)
+ for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++)
{
Field *stat_field= stat_table->field[i];
if (table_field->collected_stats->is_null(i))
@@ -923,7 +926,21 @@ public:
break;
case COLUMN_STAT_AVG_FREQUENCY:
stat_field->store(table_field->collected_stats->get_avg_frequency());
- break;
+ break;
+ case COLUMN_STAT_HIST_SIZE:
+ stat_field->store(table_field->collected_stats->histogram.get_size());
+ break;
+ case COLUMN_STAT_HIST_TYPE:
+ stat_field->store(table_field->collected_stats->histogram.get_type() +
+ 1);
+ break;
+ case COLUMN_STAT_HISTOGRAM:
+ const char * col_histogram=
+ (const char *) (table_field->collected_stats->histogram.get_values());
+ stat_field->store(col_histogram,
+ table_field->collected_stats->histogram.get_size(),
+ &my_charset_bin);
+ break;
}
}
}
@@ -936,14 +953,15 @@ public:
@details
This implementation of a purely virtual method first looks for a record
- the statistical table column_stats by its primary key set the record
+ in the statistical table column_stats by its primary key set in the record
buffer with the help of Column_stat::set_key_fields. Then, if the row is
found, the function reads the values of the columns 'min_value',
- 'max_value', 'nulls_ratio', 'avg_length' and 'avg_frequency' of the
- table column_stat and sets accordingly the value of the bitmap
- read_stat.column_stat_nulls' and the values of the fields min_value,
- max_value, nulls_ratio, avg_length and avg_frequency of the structure
- read_stat from the Field structure for the field 'table_field'.
+ 'max_value', 'nulls_ratio', 'avg_length', 'avg_frequency', 'hist_size' and
+ 'hist_type" of the table column_stat and sets accordingly the value of
+ the bitmap read_stat.column_stat_nulls' and the values of the fields
+ min_value, max_value, nulls_ratio, avg_length, avg_frequency, hist_size and
+ hist_type of the structure read_stat from the Field structure for the field
+ 'table_field'.
*/
void get_stat_values()
@@ -960,7 +978,7 @@ public:
char buff[MAX_FIELD_WIDTH];
String val(buff, sizeof(buff), &my_charset_utf8_bin);
- for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_AVG_FREQUENCY; i++)
+ for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HIST_TYPE; i++)
{
Field *stat_field= stat_table->field[i];
@@ -992,6 +1010,14 @@ public:
break;
case COLUMN_STAT_AVG_FREQUENCY:
table_field->read_stats->set_avg_frequency(stat_field->val_real());
+ break;
+ case COLUMN_STAT_HIST_SIZE:
+ table_field->read_stats->histogram.set_size(stat_field->val_int());
+ break;
+ case COLUMN_STAT_HIST_TYPE:
+ Histogram_type hist_type= (Histogram_type) (stat_field->val_int() -
+ 1);
+ table_field->read_stats->histogram.set_type(hist_type);
break;
}
}
@@ -999,6 +1025,37 @@ public:
}
}
+
+ /**
+ @brief
+ Read histogram from of column_stats
+
+ @details
+ This method first looks for a record in the statistical table column_stats
+ by its primary key set the record buffer with the help of
+ Column_stat::set_key_fields. Then, if the row is found, the function reads
+ the value of the column 'histogram' of the table column_stat and sets
+ accordingly the corresponding bit in the bitmap read_stat.column_stat_nulls.
+ The method assumes that the value of histogram size and the pointer to
+ the histogram location has been already set in the fields size and values
+ of read_stats->histogram.
+ */
+
+ void get_histogram_value()
+ {
+ if (find_stat())
+ {
+ char buff[MAX_FIELD_WIDTH];
+ String val(buff, sizeof(buff), &my_charset_utf8_bin);
+ uint fldno= COLUMN_STAT_HISTOGRAM;
+ Field *stat_field= stat_table->field[fldno];
+ table_field->read_stats->set_not_null(fldno);
+ stat_field->val_str(&val);
+ memcpy(table_field->read_stats->histogram.get_values(),
+ val.ptr(), table_field->read_stats->histogram.get_size());
+ }
+ }
+
};
@@ -1200,6 +1257,76 @@ public:
};
+/*
+ Histogram_builder is a helper class that is used to build histograms
+ for columns
+*/
+
+class Histogram_builder
+{
+ Field *column; /* table field for which the histogram is built */
+ uint col_length; /* size of this field */
+ ha_rows records; /* number of records the histogram is built for */
+ Field *min_value; /* pointer to the minimal value for the field */
+ Field *max_value; /* pointer to the maximal value for the field */
+ Histogram *histogram; /* the histogram location */
+ uint hist_width; /* the number of points in the histogram */
+ double bucket_capacity; /* number of rows in a bucket of the histogram */
+ uint curr_bucket; /* number of the current bucket to be built */
+ ulonglong count; /* number of values retrieved */
+ ulonglong count_distinct; /* number of distinct values retrieved */
+
+public:
+ Histogram_builder(Field *col, uint col_len, ha_rows rows)
+ : column(col), col_length(col_len), records(rows)
+ {
+ Column_statistics *col_stats= col->collected_stats;
+ min_value= col_stats->min_value;
+ max_value= col_stats->max_value;
+ histogram= &col_stats->histogram;
+ hist_width= histogram->get_width();
+ bucket_capacity= (double) records / (hist_width + 1);
+ curr_bucket= 0;
+ count= 0;
+ count_distinct= 0;
+ }
+
+ ulonglong get_count_distinct() { return count_distinct; }
+
+ int next(void *elem, element_count elem_cnt)
+ {
+ count_distinct++;
+ count+= elem_cnt;
+ if (curr_bucket == hist_width)
+ return 0;
+ if (count > bucket_capacity * (curr_bucket + 1))
+ {
+ column->store_field_value((uchar *) elem, col_length);
+ histogram->set_value(curr_bucket,
+ column->pos_in_interval(min_value, max_value));
+ curr_bucket++;
+ while (curr_bucket != hist_width &&
+ count > bucket_capacity * (curr_bucket + 1))
+ {
+ histogram->set_prev_value(curr_bucket);
+ curr_bucket++;
+ }
+ }
+ return 0;
+ }
+};
+
+
+C_MODE_START
+
+int histogram_build_walk(void *elem, element_count elem_cnt, void *arg)
+{
+ Histogram_builder *hist_builder= (Histogram_builder *) arg;
+ return hist_builder->next(elem, elem_cnt);
+}
+
+C_MODE_END
+
/*
The class Count_distinct_field is a helper class used to calculate
@@ -1220,6 +1347,8 @@ protected:
uint tree_key_length; /* The length of the keys for the elements of 'tree */
public:
+
+ Count_distinct_field() {}
/**
@param
@@ -1238,28 +1367,11 @@ public:
Count_distinct_field(Field *field, uint max_heap_table_size)
{
- qsort_cmp2 compare_key;
- void* cmp_arg;
- enum enum_field_types f_type= field->type();
-
table_field= field;
tree_key_length= field->pack_length();
- if ((f_type == MYSQL_TYPE_VARCHAR) ||
- (!field->binary() && (f_type == MYSQL_TYPE_STRING ||
- f_type == MYSQL_TYPE_VAR_STRING)))
- {
- compare_key= (qsort_cmp2) simple_str_key_cmp;
- cmp_arg= (void*) field;
- }
- else
- {
- cmp_arg= (void*) &tree_key_length;
- compare_key= (qsort_cmp2) simple_raw_key_cmp;
- }
-
- tree= new Unique(compare_key, cmp_arg,
- tree_key_length, max_heap_table_size);
+ tree= new Unique((qsort_cmp2) simple_str_key_cmp, (void*) field,
+ tree_key_length, max_heap_table_size, 1);
}
virtual ~Count_distinct_field()
@@ -1299,9 +1411,48 @@ public:
tree->walk(table_field->table, count_distinct_walk, (void*) &count);
return count;
}
+
+ /*
+ @brief
+ Build the histogram for the elements accumulated in the container of 'tree'
+ */
+ ulonglong get_value_with_histogram(ha_rows rows)
+ {
+ Histogram_builder hist_builder(table_field, tree_key_length, rows);
+ tree->walk(table_field->table, histogram_build_walk, (void *) &hist_builder);
+ return hist_builder.get_count_distinct();
+ }
+
+ /*
+ @brief
+ Get the size of the histogram in bytes built for table_field
+ */
+ uint get_hist_size()
+ {
+ return table_field->collected_stats->histogram.get_size();
+ }
+
+ /*
+ @brief
+ Get the pointer to the histogram built for table_field
+ */
+ uchar *get_histogram()
+ {
+ return table_field->collected_stats->histogram.get_values();
+ }
+
};
+static
+int simple_ulonglong_key_cmp(void* arg, uchar* key1, uchar* key2)
+{
+ ulonglong *val1= (ulonglong *) key1;
+ ulonglong *val2= (ulonglong *) key2;
+ return *val1 > *val2 ? 1 : *val1 == *val2 ? 0 : -1;
+}
+
+
/*
The class Count_distinct_field_bit is derived from the class
Count_distinct_field to be used only for fields of the MYSQL_TYPE_BIT type.
@@ -1311,8 +1462,17 @@ public:
class Count_distinct_field_bit: public Count_distinct_field
{
public:
+
Count_distinct_field_bit(Field *field, uint max_heap_table_size)
- :Count_distinct_field(field, max_heap_table_size) {}
+ {
+ table_field= field;
+ tree_key_length= sizeof(ulonglong);
+
+ tree= new Unique((qsort_cmp2) simple_ulonglong_key_cmp,
+ (void*) &tree_key_length,
+ tree_key_length, max_heap_table_size, 1);
+ }
+
bool add()
{
longlong val= table_field->val_int();
@@ -1671,13 +1831,27 @@ int alloc_statistics_for_table(THD* thd, TABLE *table)
ulong *idx_avg_frequency= (ulong*) alloc_root(&table->mem_root,
sizeof(ulong) * key_parts);
- if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency)
+ uint columns= 0;
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
+ columns++;
+ }
+ uint hist_size= thd->variables.histogram_size;
+ Histogram_type hist_type= (Histogram_type) (thd->variables.histogram_type);
+ uchar *histogram= NULL;
+ if (hist_size > 0)
+ histogram= (uchar *) alloc_root(&table->mem_root, hist_size * columns);
+
+ if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency ||
+ (hist_size && !histogram))
DBUG_RETURN(1);
table->collected_stats= table_stats;
table_stats->column_stats= column_stats;
table_stats->index_stats= index_stats;
table_stats->idx_avg_frequency= idx_avg_frequency;
+ table_stats->histograms= histogram;
memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
@@ -1686,6 +1860,13 @@ int alloc_statistics_for_table(THD* thd, TABLE *table)
(*field_ptr)->collected_stats= column_stats;
(*field_ptr)->collected_stats->max_value= NULL;
(*field_ptr)->collected_stats->min_value= NULL;
+ if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
+ {
+ column_stats->histogram.set_size(hist_size);
+ column_stats->histogram.set_type(hist_type);
+ column_stats->histogram.set_values(histogram);
+ histogram+= hist_size;
+ }
}
memset(idx_avg_frequency, 0, sizeof(ulong) * key_parts);
@@ -1790,7 +1971,6 @@ inline bool statistics_for_command_is_needed(THD *thd)
@note
Currently the function always is called with the parameter is_safe set
to FALSE.
-
*/
int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
@@ -1810,12 +1990,12 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
DBUG_RETURN(1);
if (!is_safe)
- mysql_mutex_lock(&table_share->LOCK_ha_data);
+ mysql_mutex_lock(&table_share->LOCK_share);
if (stats_cb->stats_can_be_read)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
}
@@ -1827,7 +2007,7 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
if (!table_stats)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(1);
}
memset(table_stats, 0, sizeof(Table_statistics));
@@ -1900,14 +2080,85 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
stats_cb->stats_can_be_read= TRUE;
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
-
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
}
/**
+ @brief
+ Allocate memory for the histogram used by a table share
+
+ @param
+ thd Thread handler
+ @param
+ table_share Table share for which the memory for histogram data is allocated
+ @param
+ is_safe TRUE <-> at any time only one thread can perform the function
+
+ @note
+ The function allocates the memory for the histogram built for a table in the
+ table's share memory with the intention to read the data there from the
+ system persistent statistical table mysql.column_stats,
+ The memory is allocated in the table_share's mem_root.
+ If the parameter is_safe is TRUE then it is guaranteed that at any given time
+ only one thread is executed the code of the function.
+
+ @retval
+ 0 If the memory for all statistical data has been successfully allocated
+ @retval
+ 1 Otherwise
+
+ @note
+ Currently the function always is called with the parameter is_safe set
+ to FALSE.
+*/
+
+static
+int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
+ bool is_safe)
+{
+ TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
+
+ DBUG_ENTER("alloc_histograms_for_table_share");
+
+ if (!is_safe)
+ mysql_mutex_lock(&table_share->LOCK_share);
+
+ if (stats_cb->histograms_can_be_read)
+ {
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_share);
+ DBUG_RETURN(0);
+ }
+
+ Table_statistics *table_stats= stats_cb->table_stats;
+ ulong total_hist_size= table_stats->total_hist_size;
+
+ if (total_hist_size && !table_stats->histograms)
+ {
+ uchar *histograms= (uchar *) alloc_root(&stats_cb->mem_root,
+ total_hist_size);
+ if (!histograms)
+ {
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_share);
+ DBUG_RETURN(1);
+ }
+ memset(histograms, 0, total_hist_size);
+ table_stats->histograms= histograms;
+ stats_cb->histograms_can_be_read= TRUE;
+ }
+
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_share);
+
+ DBUG_RETURN(0);
+
+}
+
+/**
@brief
Initialize the aggregation fields to collect statistics on a column
@@ -2005,14 +2256,29 @@ void Column_statistics_collected::finish(ha_rows rows)
set_not_null(COLUMN_STAT_AVG_LENGTH);
}
if (count_distinct)
- {
- ulonglong distincts= count_distinct->get_value();
+ {
+ ulonglong distincts;
+ uint hist_size= count_distinct->get_hist_size();
+ if (hist_size == 0)
+ distincts= count_distinct->get_value();
+ else
+ distincts= count_distinct->get_value_with_histogram(rows - nulls);
if (distincts)
{
val= (double) (rows - nulls) / distincts;
set_avg_frequency(val);
set_not_null(COLUMN_STAT_AVG_FREQUENCY);
}
+ else
+ hist_size= 0;
+ histogram.set_size(hist_size);
+ set_not_null(COLUMN_STAT_HIST_SIZE);
+ if (hist_size && distincts)
+ {
+ set_not_null(COLUMN_STAT_HIST_TYPE);
+ histogram.set_values(count_distinct->get_histogram());
+ set_not_null(COLUMN_STAT_HISTOGRAM);
+ }
delete count_distinct;
count_distinct= NULL;
}
@@ -2233,16 +2499,19 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
table->collected_stats->cardinality= rows;
}
+ bitmap_clear_all(table->write_set);
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
if (!bitmap_is_set(table->read_set, table_field->field_index))
continue;
+ bitmap_set_bit(table->write_set, table_field->field_index);
if (!rc)
table_field->collected_stats->finish(rows);
else
table_field->collected_stats->cleanup();
}
+bitmap_clear_all(table->write_set);
if (!rc)
{
@@ -2420,6 +2689,7 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
Field **field_ptr;
KEY *key_info, *key_info_end;
TABLE_SHARE *table_share= table->s;
+ Table_statistics *read_stats= table_share->stats_cb.table_stats;
DBUG_ENTER("read_statistics_for_table");
@@ -2431,16 +2701,18 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
/* Read statistics from the statistical table column_stats */
stat_table= stat_tables[COLUMN_STAT].table;
+ ulong total_hist_size= 0;
Column_stat column_stat(stat_table, table);
for (field_ptr= table_share->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
column_stat.set_key_fields(table_field);
column_stat.get_stat_values();
+ total_hist_size+= table_field->read_stats->histogram.get_size();
}
+ read_stats->total_hist_size= total_hist_size;
/* Read statistics from the statistical table index_stats */
- Table_statistics *read_stats= table_share->stats_cb.table_stats;
stat_table= stat_tables[INDEX_STAT].table;
Index_stat index_stat(stat_table, table);
for (key_info= table_share->key_info,
@@ -2558,10 +2830,14 @@ bool statistics_for_tables_is_needed(THD *thd, TABLE_LIST *tables)
TABLE_SHARE *table_share= tl->table->s;
if (table_share &&
table_share->stats_cb.stats_can_be_read &&
- !table_share->stats_cb.stats_is_read)
+ (!table_share->stats_cb.stats_is_read ||
+ (!table_share->stats_cb.histograms_are_read &&
+ thd->variables.optimizer_use_condition_selectivity > 3)))
return TRUE;
if (table_share->stats_cb.stats_is_read)
tl->table->stats_is_read= TRUE;
+ if (table_share->stats_cb.histograms_are_read)
+ tl->table->histograms_are_read= TRUE;
}
}
@@ -2571,6 +2847,73 @@ bool statistics_for_tables_is_needed(THD *thd, TABLE_LIST *tables)
/**
@brief
+ Read histogram for a table from the persistent statistical tables
+
+ @param
+ thd The thread handle
+ @param
+ table The table to read histograms for
+ @param
+ stat_tables The array of TABLE_LIST objects for statistical tables
+
+ @details
+ For the statistical table columns_stats the function looks for the rows
+ from this table that contain statistical data on 'table'. If such rows
+ are found the histograms from them are read into the memory allocated
+ for histograms of 'table'. Later at the query processing these histogram
+ are supposed to be used by the optimizer.
+ The parameter stat_tables should point to an array of TABLE_LIST
+ objects for all statistical tables linked into a list. All statistical
+ tables are supposed to be opened.
+ The function is called by read_statistics_for_tables_if_needed().
+
+ @retval
+ 0 If data has been successfully read for the table
+ @retval
+ 1 Otherwise
+
+ @note
+ Objects of the helper Column_stat are employed read histogram
+ from the statistical table column_stats now.
+*/
+
+static
+int read_histograms_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
+{
+ TABLE_SHARE *table_share= table->s;
+
+ DBUG_ENTER("read_histograms_for_table");
+
+ if (!table_share->stats_cb.histograms_can_be_read)
+ {
+ (void) alloc_histograms_for_table_share(thd, table_share, FALSE);
+ }
+ if (table_share->stats_cb.histograms_can_be_read &&
+ !table_share->stats_cb.histograms_are_read)
+ {
+ Field **field_ptr;
+ uchar *histogram= table_share->stats_cb.table_stats->histograms;
+ TABLE *stat_table= stat_tables[COLUMN_STAT].table;
+ Column_stat column_stat(stat_table, table);
+ for (field_ptr= table_share->field; *field_ptr; field_ptr++)
+ {
+ Field *table_field= *field_ptr;
+ uint hist_size= table_field->read_stats->histogram.get_size();
+ if (hist_size)
+ {
+ column_stat.set_key_fields(table_field);
+ table_field->read_stats->histogram.set_values(histogram);
+ column_stat.get_histogram_value();
+ histogram+= hist_size;
+ }
+ }
+ }
+
+ DBUG_RETURN(0);
+}
+
+/**
+ @brief
Read statistics for tables from a table list if it is needed
@param
@@ -2596,7 +2939,7 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
TABLE_LIST stat_tables[STATISTICS_TABLES];
Open_tables_backup open_tables_backup;
- DBUG_ENTER("read_statistics_for_table_if_needed");
+ DBUG_ENTER("read_statistics_for_tables_if_needed");
DEBUG_SYNC(thd, "statistics_read_start");
@@ -2623,6 +2966,14 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
}
if (table_share->stats_cb.stats_is_read)
tl->table->stats_is_read= TRUE;
+ if (thd->variables.optimizer_use_condition_selectivity > 3 &&
+ table_share && !table_share->stats_cb.histograms_are_read)
+ {
+ (void) read_histograms_for_table(thd, tl->table, stat_tables);
+ table_share->stats_cb.histograms_are_read= TRUE;
+ }
+ if (table_share->stats_cb.stats_is_read)
+ tl->table->histograms_are_read= TRUE;
}
}
@@ -3057,3 +3408,152 @@ void set_statistics_for_table(THD *thd, TABLE *table)
key_info->read_stats->get_avg_frequency(0) > 0.5);
}
}
+
+
+/**
+ @brief
+ Get the average frequency for a column
+
+ @param
+ field The column whose average frequency is required
+
+ @retval
+ The required average frequency
+*/
+
+double get_column_avg_frequency(Field * field)
+{
+ double res;
+ TABLE *table= field->table;
+
+ /*
+ Statistics is shared by table instances and is accessed through
+ the table share. If table->s->field is not set for 'table', then
+ no column statistics is available for the table .
+ */
+ if (!table->s->field)
+ {
+ res= table->stat_records();
+ return res;
+ }
+
+ Column_statistics *col_stats= table->s->field[field->field_index]->read_stats;
+
+ if (!col_stats)
+ res= table->stat_records();
+ else
+ res= col_stats->get_avg_frequency();
+ return res;
+}
+
+
+/**
+ @brief
+ Estimate the number of rows in a column range using data from stat tables
+
+ @param
+ field The column whose range cardinality is to be estimated
+ @param
+ min_endp The left end of the range whose cardinality is required
+ @param
+ max_endp The right end of the range whose cardinality is required
+ @param
+ range_flag The range flags
+
+ @details
+ The function gets an estimate of the number of rows in a column range
+ using the statistical data from the table column_stats.
+
+ @retval
+ The required estimate of the rows in the column range
+*/
+
+double get_column_range_cardinality(Field *field,
+ key_range *min_endp,
+ key_range *max_endp,
+ uint range_flag)
+{
+ double res;
+ TABLE *table= field->table;
+ Column_statistics *col_stats= table->field[field->field_index]->read_stats;
+ double tab_records= table->stat_records();
+
+ if (!col_stats)
+ return tab_records;
+
+ double col_nulls= tab_records * col_stats->get_nulls_ratio();
+
+ double col_non_nulls= tab_records - col_nulls;
+
+ bool nulls_incl= field->null_ptr && min_endp && min_endp->key[0] &&
+ !(range_flag & NEAR_MIN);
+
+ if (col_non_nulls < 1)
+ res= 0;
+ else if (min_endp && max_endp && min_endp->length == max_endp->length &&
+ !memcmp(min_endp->key, max_endp->key, min_endp->length))
+ {
+ if (nulls_incl)
+ {
+ /* This is null single point range */
+ res= col_nulls;
+ }
+ else
+ {
+ double avg_frequency= col_stats->get_avg_frequency();
+ res= avg_frequency;
+ if (avg_frequency > 1.0 + 0.000001 &&
+ col_stats->min_value && col_stats->max_value)
+ {
+ Histogram *hist= &col_stats->histogram;
+ if (hist->is_available())
+ {
+ double pos= field->pos_in_interval(col_stats->min_value,
+ col_stats->max_value);
+ res= col_non_nulls *
+ hist->point_selectivity(pos,
+ avg_frequency / col_non_nulls);
+ }
+ }
+ }
+ }
+ else
+ {
+ if (col_stats->min_value && col_stats->max_value)
+ {
+ double sel, min_mp_pos, max_mp_pos;
+
+ if (min_endp && !(field->null_ptr && min_endp->key[0]))
+ {
+ store_key_image_to_rec(field, (uchar *) min_endp->key,
+ min_endp->length);
+ min_mp_pos= field->pos_in_interval(col_stats->min_value,
+ col_stats->max_value);
+ }
+ else
+ min_mp_pos= 0.0;
+ if (max_endp)
+ {
+ store_key_image_to_rec(field, (uchar *) max_endp->key,
+ max_endp->length);
+ max_mp_pos= field->pos_in_interval(col_stats->min_value,
+ col_stats->max_value);
+ }
+ else
+ max_mp_pos= 1.0;
+
+ Histogram *hist= &col_stats->histogram;
+ if (!hist->is_available())
+ sel= (max_mp_pos - min_mp_pos);
+ else
+ sel= hist->range_selectivity(min_mp_pos, max_mp_pos);
+ res= col_non_nulls * sel;
+ set_if_bigger(res, col_stats->get_avg_frequency());
+ }
+ else
+ res= col_non_nulls;
+ if (nulls_incl)
+ res+= col_nulls;
+ }
+ return res;
+}
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 2b40d56fea4..c1c80921861 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -16,15 +16,6 @@
#ifndef SQL_STATISTICS_H
#define SQL_STATISTICS_H
-/*
- These enumeration types comprise the dictionary of three
- statistical tables table_stat, column_stat and index_stat
- as they defined in ../scripts/mysql_system_tables.sql.
-
- It would be nice if the declarations of these types were
- generated automatically by the table definitions.
-*/
-
typedef
enum enum_use_stat_tables_mode
{
@@ -33,6 +24,13 @@ enum enum_use_stat_tables_mode
PEFERABLY,
} Use_stat_tables_mode;
+typedef
+enum enum_histogram_type
+{
+ SINGLE_PREC_HB,
+ DOUBLE_PREC_HB
+} Histogram_type;
+
enum enum_stat_tables
{
TABLE_STAT,
@@ -40,6 +38,16 @@ enum enum_stat_tables
INDEX_STAT,
};
+
+/*
+ These enumeration types comprise the dictionary of three
+ statistical tables table_stat, column_stat and index_stat
+ as they defined in ../scripts/mysql_system_tables.sql.
+
+ It would be nice if the declarations of these types were
+ generated automatically by the table definitions.
+*/
+
enum enum_table_stat_col
{
TABLE_STAT_DB_NAME,
@@ -56,7 +64,10 @@ enum enum_column_stat_col
COLUMN_STAT_MAX_VALUE,
COLUMN_STAT_NULLS_RATIO,
COLUMN_STAT_AVG_LENGTH,
- COLUMN_STAT_AVG_FREQUENCY
+ COLUMN_STAT_AVG_FREQUENCY,
+ COLUMN_STAT_HIST_SIZE,
+ COLUMN_STAT_HIST_TYPE,
+ COLUMN_STAT_HISTOGRAM
};
enum enum_index_stat_col
@@ -90,6 +101,160 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
const char *new_name);
void set_statistics_for_table(THD *thd, TABLE *table);
+double get_column_avg_frequency(Field * field);
+
+double get_column_range_cardinality(Field *field,
+ key_range *min_endp,
+ key_range *max_endp,
+ uint range_flag);
+
+class Histogram
+{
+
+private:
+ Histogram_type type;
+ uint8 size;
+ uchar *values;
+
+ uint prec_factor()
+ {
+ switch (type) {
+ case SINGLE_PREC_HB:
+ return ((uint) (1 << 8) - 1);
+ case DOUBLE_PREC_HB:
+ return ((uint) (1 << 16) - 1);
+ }
+ return 1;
+ }
+
+public:
+ uint get_width()
+ {
+ switch (type) {
+ case SINGLE_PREC_HB:
+ return size;
+ case DOUBLE_PREC_HB:
+ return size / 2;
+ }
+ return 0;
+ }
+
+private:
+ uint get_value(uint i)
+ {
+ switch (type) {
+ case SINGLE_PREC_HB:
+ return (uint) (((uint8 *) values)[i]);
+ case DOUBLE_PREC_HB:
+ return (uint) (((uint16 *) values)[i]);
+ }
+ return 0;
+ }
+
+ uint find_bucket(double pos, bool first)
+ {
+ uint val= (uint) (pos * prec_factor());
+ int lp= 0;
+ int rp= get_width() - 1;
+ int d= get_width() / 2;
+ uint i= lp + d;
+ for ( ; d; d= (rp - lp) / 2, i= lp + d)
+ {
+ if (val == get_value(i))
+ break;
+ if (val < get_value(i))
+ rp= i;
+ else if (val > get_value(i + 1))
+ lp= i + 1;
+ else
+ break;
+ }
+ if (val == get_value(i))
+ {
+ if (first)
+ {
+ while(i && val == get_value(i - 1))
+ i--;
+ }
+ else
+ {
+ while(i + 1 < get_width() && val == get_value(i + 1))
+ i++;
+ }
+ }
+ return i;
+ }
+
+public:
+
+ uint get_size() { return (uint) size; }
+
+ Histogram_type get_type() { return type; }
+
+ uchar *get_values() { return (uchar *) values; }
+
+ void set_size (ulonglong sz) { size= (uint8) sz; }
+
+ void set_type (Histogram_type t) { type= t; }
+
+ void set_values (uchar *vals) { values= (uchar *) vals; }
+
+ bool is_available() { return get_size() > 0 && get_values(); }
+
+ void set_value(uint i, double val)
+ {
+ switch (type) {
+ case SINGLE_PREC_HB:
+ ((uint8 *) values)[i]= (uint8) (val * prec_factor());
+ return;
+ case DOUBLE_PREC_HB:
+ ((uint16 *) values)[i]= (uint16) (val * prec_factor());
+ return;
+ }
+ }
+
+ void set_prev_value(uint i)
+ {
+ switch (type) {
+ case SINGLE_PREC_HB:
+ ((uint8 *) values)[i]= ((uint8 *) values)[i-1];
+ return;
+ case DOUBLE_PREC_HB:
+ ((uint16 *) values)[i]= ((uint16 *) values)[i-1];
+ return;
+ }
+ }
+
+ double range_selectivity(double min_pos, double max_pos)
+ {
+ double sel;
+ double bucket_sel= 1.0/(get_width() + 1);
+ uint min= find_bucket(min_pos, TRUE);
+ uint max= find_bucket(max_pos, FALSE);
+ sel= bucket_sel * (max - min + 1);
+ return sel;
+ }
+
+ double point_selectivity(double pos, double avg_sel)
+ {
+ double sel;
+ double bucket_sel= 1.0/(get_width() + 1);
+ uint min= find_bucket(pos, TRUE);
+ uint max= min;
+ while (max + 1 < get_width() && get_value(max + 1) == get_value(max))
+ max++;
+ double inv_prec_factor= (double) 1.0 / prec_factor();
+ double width= (max + 1 == get_width() ?
+ 1.0 : get_value(max) * inv_prec_factor) -
+ (min == 0 ?
+ 0.0 : get_value(min-1) * inv_prec_factor);
+ sel= avg_sel * (bucket_sel * (max + 1 - min)) / width;
+ return sel;
+ }
+
+};
+
+
class Columns_statistics;
class Index_statistics;
@@ -116,8 +281,9 @@ public:
uchar *min_max_record_buffers; /* Record buffers for min/max values */
Column_statistics *column_stats; /* Array of statistical data for columns */
Index_statistics *index_stats; /* Array of statistical data for indexes */
- ulong *idx_avg_frequency; /* Array of records per key for index prefixes */
-
+ ulong *idx_avg_frequency; /* Array of records per key for index prefixes */
+ ulong total_hist_size; /* Total size of all histograms */
+ uchar *histograms; /* Sequence of histograms */
};
@@ -172,10 +338,12 @@ private:
public:
+ Histogram histogram;
+
void set_all_nulls()
{
column_stat_nulls=
- ((1 << (COLUMN_STAT_AVG_FREQUENCY-COLUMN_STAT_COLUMN_NAME))-1) <<
+ ((1 << (COLUMN_STAT_HISTOGRAM-COLUMN_STAT_COLUMN_NAME))-1) <<
(COLUMN_STAT_COLUMN_NAME+1);
}
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 8ccc8aff365..ddac315f80f 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -758,7 +758,7 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
{
if (from->Alloced_length >= from_length)
return from;
- if (from->alloced || !to || from == to)
+ if ((from->alloced && (from->Alloced_length != 0)) || !to || from == to)
{
(void) from->realloc(from_length);
return from;
@@ -1018,7 +1018,8 @@ outp:
characters as necessary.
Does not add the enclosing quotes, this is left up to caller.
*/
-void String::append_for_single_quote(const char *st, uint len)
+#define APPEND(X) if (append(X)) return 1; else break
+bool String::append_for_single_quote(const char *st, uint len)
{
const char *end= st+len;
for (; st < end; st++)
@@ -1026,28 +1027,16 @@ void String::append_for_single_quote(const char *st, uint len)
uchar c= *st;
switch (c)
{
- case '\\':
- append(STRING_WITH_LEN("\\\\"));
- break;
- case '\0':
- append(STRING_WITH_LEN("\\0"));
- break;
- case '\'':
- append(STRING_WITH_LEN("\\'"));
- break;
- case '\n':
- append(STRING_WITH_LEN("\\n"));
- break;
- case '\r':
- append(STRING_WITH_LEN("\\r"));
- break;
- case '\032': // Ctrl-Z
- append(STRING_WITH_LEN("\\Z"));
- break;
- default:
- append(c);
+ case '\\': APPEND(STRING_WITH_LEN("\\\\"));
+ case '\0': APPEND(STRING_WITH_LEN("\\0"));
+ case '\'': APPEND(STRING_WITH_LEN("\\'"));
+ case '\n': APPEND(STRING_WITH_LEN("\\n"));
+ case '\r': APPEND(STRING_WITH_LEN("\\r"));
+ case '\032': APPEND(STRING_WITH_LEN("\\Z"));
+ default: APPEND(c);
}
}
+ return 0;
}
void String::print(String *str)
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 40096466a92..352dfbe9fa3 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -2,8 +2,8 @@
#define SQL_STRING_INCLUDED
/*
- Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2008, 2011, Monty Program Ab
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -327,6 +327,7 @@ public:
DBUG_ASSERT(!s.uses_buffer_owned_by(this));
free();
Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length;
+ str_charset=s.str_charset;
}
return *this;
}
@@ -382,6 +383,16 @@ public:
}
return 0;
}
+ bool append_hex(const char *src, uint32 srclen)
+ {
+ for (const char *end= src + srclen ; src != end ; src++)
+ {
+ if (append(_dig_vec_lower[((uchar) *src) >> 4]) ||
+ append(_dig_vec_lower[((uchar) *src) & 0x0F]))
+ return true;
+ }
+ return false;
+ }
bool fill(uint32 max_length,char fill);
void strip_sp();
friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
@@ -480,7 +491,7 @@ public:
return FALSE;
}
void print(String *print);
- void append_for_single_quote(const char *st, uint len);
+ bool append_for_single_quote(const char *st, uint len);
/* Swap two string objects. Efficient way to exchange data without memcpy. */
void swap(String &s);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index db64c5afbc2..79c6d4cbaf9 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2010, 2011, Monty Program Ab
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -22,7 +22,6 @@
#include "unireg.h"
#include "debug_sync.h"
#include "sql_table.h"
-#include "sql_rename.h" // do_rename
#include "sql_parse.h" // test_if_data_home_dir
#include "sql_cache.h" // query_cache_*
#include "sql_base.h" // open_table_uncached, lock_table_names
@@ -55,7 +54,7 @@
#include "sql_parse.h"
#include "sql_show.h"
#include "transaction.h"
-#include "datadict.h" // dd_frm_type()
+#include "sql_audit.h"
#ifdef __WIN__
#include <io.h>
@@ -74,13 +73,8 @@ static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
static bool prepare_blob_field(THD *thd, Create_field *sql_field);
static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *);
-static int
-mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- bool tmp_table,
- uint *db_options,
- handler *file, KEY **key_info_buffer,
- uint *key_count, int create_table_mode);
+static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
+ uint *, handler *, KEY **, uint *, int);
/**
@brief Helper function for explain_filename
@@ -206,7 +200,6 @@ uint explain_filename(THD* thd,
uint to_length,
enum_explain_filename_mode explain_mode)
{
- uint res= 0;
char *to_p= to;
char *end_p= to_p + to_length;
const char *db_name= NULL;
@@ -217,7 +210,8 @@ uint explain_filename(THD* thd,
int part_name_len= 0;
const char *subpart_name= NULL;
int subpart_name_len= 0;
- uint name_variant= NORMAL_PART_NAME;
+ uint part_type= NORMAL_PART_NAME;
+
const char *tmp_p;
DBUG_ENTER("explain_filename");
DBUG_PRINT("enter", ("from '%s'", from));
@@ -236,17 +230,18 @@ uint explain_filename(THD* thd,
table_name= tmp_p;
}
tmp_p= table_name;
- while (!res && (tmp_p= strchr(tmp_p, '#')))
+ /* Look if there are partition tokens in the table name. */
+ while ((tmp_p= strchr(tmp_p, '#')))
{
tmp_p++;
switch (tmp_p[0]) {
case 'P':
case 'p':
if (tmp_p[1] == '#')
+ {
part_name= tmp_p + 2;
- else
- res= 1;
- tmp_p+= 2;
+ tmp_p+= 2;
+ }
break;
case 'S':
case 's':
@@ -256,48 +251,32 @@ uint explain_filename(THD* thd,
subpart_name= tmp_p + 3;
tmp_p+= 3;
}
- else if ((tmp_p[1] == 'Q' || tmp_p[1] == 'q') &&
- (tmp_p[2] == 'L' || tmp_p[2] == 'l') &&
- tmp_p[3] == '-')
- {
- tmp_p+= 4; /* sql- prefix found */
- }
- else
- res= 2;
break;
case 'T':
case 't':
if ((tmp_p[1] == 'M' || tmp_p[1] == 'm') &&
(tmp_p[2] == 'P' || tmp_p[2] == 'p') &&
tmp_p[3] == '#' && !tmp_p[4])
- name_variant= TEMP_PART_NAME;
- else
- res= 3;
- tmp_p+= 4;
+ {
+ part_type= TEMP_PART_NAME;
+ tmp_p+= 4;
+ }
break;
case 'R':
case 'r':
if ((tmp_p[1] == 'E' || tmp_p[1] == 'e') &&
(tmp_p[2] == 'N' || tmp_p[2] == 'n') &&
tmp_p[3] == '#' && !tmp_p[4])
- name_variant= RENAMED_PART_NAME;
- else
- res= 4;
- tmp_p+= 4;
+ {
+ part_type= RENAMED_PART_NAME;
+ tmp_p+= 4;
+ }
break;
default:
- res= 5;
+ /* Not partition name part. */
+ ;
}
}
- if (res)
- {
- /* Better to give something back if we fail parsing, than nothing at all */
- DBUG_PRINT("info", ("Error in explain_filename: %u", res));
- sql_print_warning("Invalid (old?) table or database name '%s'", from);
- DBUG_RETURN(my_snprintf(to, to_length,
- "<result %u when explaining filename '%s'>",
- res, from));
- }
if (part_name)
{
table_name_len= part_name - table_name - 3;
@@ -305,7 +284,7 @@ uint explain_filename(THD* thd,
subpart_name_len= strlen(subpart_name);
else
part_name_len= strlen(part_name);
- if (name_variant != NORMAL_PART_NAME)
+ if (part_type != NORMAL_PART_NAME)
{
if (subpart_name)
subpart_name_len-= 5;
@@ -347,9 +326,9 @@ uint explain_filename(THD* thd,
to_p= strnmov(to_p, " ", end_p - to_p);
else
to_p= strnmov(to_p, ", ", end_p - to_p);
- if (name_variant != NORMAL_PART_NAME)
+ if (part_type != NORMAL_PART_NAME)
{
- if (name_variant == TEMP_PART_NAME)
+ if (part_type == TEMP_PART_NAME)
to_p= strnmov(to_p, ER_THD_OR_DEFAULT(thd, ER_TEMPORARY_NAME),
end_p - to_p);
else
@@ -401,31 +380,14 @@ uint filename_to_tablename(const char *from, char *to, uint to_length
DBUG_ENTER("filename_to_tablename");
DBUG_PRINT("enter", ("from '%s'", from));
- if (!strncmp(from, tmp_file_prefix, tmp_file_prefix_length))
- {
- /* Temporary table name. */
- res= (strnmov(to, from, to_length) - to);
- }
- else
+ res= strconvert(&my_charset_filename, from, FN_REFLEN,
+ system_charset_info, to, to_length, &errors);
+ if (errors) // Old 5.0 name
{
- res= strconvert(&my_charset_filename, from, FN_REFLEN,
- system_charset_info, to, to_length, &errors);
- if (errors) // Old 5.0 name
- {
- res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) -
- to);
-#ifndef DBUG_OFF
- if (!stay_quiet) {
-#endif /* DBUG_OFF */
- sql_print_error("Invalid (old?) table or database name '%s'", from);
-#ifndef DBUG_OFF
- }
-#endif /* DBUG_OFF */
- /*
- TODO: add a stored procedure for fix table and database names,
- and mention its name in error log.
- */
- }
+ res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) -
+ to);
+ if (IF_DBUG(!stay_quiet,0))
+ sql_print_error("Invalid (old?) table or database name '%s'", from);
}
DBUG_PRINT("exit", ("to '%s'", to));
@@ -1844,11 +1806,10 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
if (flags & WFRM_WRITE_SHADOW)
{
if (mysql_prepare_create_table(lpt->thd, lpt->create_info, lpt->alter_info,
- /*tmp_table*/ 1,
&lpt->db_options, lpt->table->file,
&lpt->key_info_buffer, &lpt->key_count,
C_ALTER_TABLE))
- {
+ {
DBUG_RETURN(TRUE);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -1871,13 +1832,23 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
#endif
/* Write shadow frm file */
lpt->create_info->table_options= lpt->db_options;
- if ((mysql_create_frm(lpt->thd, shadow_frm_name, lpt->db,
- lpt->table_name, lpt->create_info,
- lpt->alter_info->create_list, lpt->key_count,
- lpt->key_info_buffer, lpt->table->file)) ||
- lpt->table->file->ha_create_handler_files(shadow_path, NULL,
- CHF_CREATE_FLAG,
- lpt->create_info))
+ LEX_CUSTRING frm= build_frm_image(lpt->thd, lpt->table_name,
+ lpt->create_info,
+ lpt->alter_info->create_list,
+ lpt->key_count, lpt->key_info_buffer,
+ lpt->table->file);
+ if (!frm.str)
+ {
+ error= 1;
+ goto end;
+ }
+
+ int error= writefrm(shadow_path, lpt->db, lpt->table_name,
+ lpt->create_info->tmp_table(), frm.str, frm.length);
+ my_free(const_cast<uchar*>(frm.str));
+
+ if (error || lpt->table->file->ha_create_partitioning_metadata(shadow_path,
+ NULL, CHF_CREATE_FLAG))
{
mysql_file_delete(key_file_frm, shadow_frm_name, MYF(0));
error= 1;
@@ -1892,12 +1863,12 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
handlers that have the main version of the frm file stored in the
handler.
*/
- uchar *data;
+ const uchar *data;
size_t length;
if (readfrm(shadow_path, &data, &length) ||
packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len))
{
- my_free(data);
+ my_free(const_cast<uchar*>(data));
my_free(lpt->pack_frm_data);
mem_alloc_error(length);
error= 1;
@@ -1915,7 +1886,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
*/
build_table_filename(path, sizeof(path) - 1, lpt->db,
lpt->table_name, "", 0);
- strxmov(frm_name, path, reg_ext, NullS);
+ strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS);
/*
When we are changing to use new frm file we need to ensure that we
don't collide with another thread in process to open the frm file.
@@ -1928,14 +1899,14 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
*/
if (mysql_file_delete(key_file_frm, frm_name, MYF(MY_WME)) ||
#ifdef WITH_PARTITION_STORAGE_ENGINE
- lpt->table->file->ha_create_handler_files(path, shadow_path,
- CHF_DELETE_FLAG, NULL) ||
+ lpt->table->file->ha_create_partitioning_metadata(path, shadow_path,
+ CHF_DELETE_FLAG) ||
deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) ||
(sync_ddl_log(), FALSE) ||
mysql_file_rename(key_file_frm,
shadow_frm_name, frm_name, MYF(MY_WME)) ||
- lpt->table->file->ha_create_handler_files(path, shadow_path,
- CHF_RENAME_FLAG, NULL))
+ lpt->table->file->ha_create_partitioning_metadata(path, shadow_path,
+ CHF_RENAME_FLAG))
#else
mysql_file_rename(key_file_frm,
shadow_frm_name, frm_name, MYF(MY_WME)))
@@ -2093,13 +2064,6 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
if (lock_table_names(thd, tables, NULL,
thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(true);
- for (table= tables; table; table= table->next_local)
- {
- if (is_temporary_table(table))
- continue;
- tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table->db, table->table_name,
- false);
- }
}
else
{
@@ -2315,8 +2279,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
bool is_trans;
char *db=table->db;
size_t db_length= table->db_length;
- handlerton *table_type;
- enum legacy_db_type frm_db_type= DB_TYPE_UNKNOWN;
+ handlerton *table_type= 0;
DBUG_PRINT("table", ("table_l: '%s'.'%s' table: 0x%lx s: 0x%lx",
table->db, table->table_name, (long) table->table,
@@ -2396,29 +2359,14 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
{
non_temp_tables_count++;
- if (thd->locked_tables_mode)
- {
- if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED))
- {
- error= -1;
- goto err;
- }
- close_all_tables_for_name(thd, table->table->s,
- HA_EXTRA_PREPARE_FOR_DROP, NULL);
- table->table= 0;
- }
-
- /* Check that we have an exclusive lock on the table to be dropped. */
DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db,
table->table_name,
- MDL_EXCLUSIVE));
+ MDL_SHARED));
alias= (lower_case_table_names == 2) ? table->alias : table->table_name;
/* remove .frm file and engine files */
path_length= build_table_filename(path, sizeof(path) - 1, db, alias,
- reg_ext,
- table->internal_tmp_table ?
- FN_IS_TMP : 0);
+ reg_ext, 0);
/*
This handles the case where a "DROP" was executed and a regular
@@ -2451,14 +2399,9 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
}
}
DEBUG_SYNC(thd, "rm_table_no_locks_before_delete_table");
- DBUG_EXECUTE_IF("sleep_before_no_locks_delete_table",
- my_sleep(100000););
error= 0;
- if (drop_temporary ||
- ((access(path, F_OK) &&
- ha_create_table_from_engine(thd, db, alias)) ||
- (!drop_view &&
- dd_frm_type(thd, path, &frm_db_type) != FRMTYPE_TABLE)))
+ if ((drop_temporary || !ha_table_exists(thd, db, alias, &table_type) ||
+ (!drop_view && table_type == view_pseudo_hton)))
{
/*
One of the following cases happened:
@@ -2489,29 +2432,46 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
{
char *end;
/*
- Cannot use the db_type from the table, since that might have changed
- while waiting for the exclusive name lock.
+ It could happen that table's share in the table_def_cache
+ is the only thing that keeps the engine plugin loaded
+ (if it is uninstalled and waits for the ref counter to drop to 0).
+
+ In this case, the tdc_remove_table() below will release and unload
+ the plugin. And ha_delete_table() will get a dangling pointer.
+
+ Let's lock the plugin till the end of the statement.
*/
- if (frm_db_type == DB_TYPE_UNKNOWN)
+ if (table_type && table_type != view_pseudo_hton)
+ ha_lock_engine(thd, table_type);
+
+ if (thd->locked_tables_mode)
{
- dd_frm_type(thd, path, &frm_db_type);
- DBUG_PRINT("info", ("frm_db_type %d from %s", frm_db_type, path));
+ if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED,
+ TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
+ {
+ error= -1;
+ goto err;
+ }
+ /* the following internally does TDC_RT_REMOVE_ALL */
+ close_all_tables_for_name(thd, table->table->s,
+ HA_EXTRA_PREPARE_FOR_DROP, NULL);
+ table->table= 0;
}
- table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
+ else
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table->db, table->table_name,
+ false);
+
+ /* Check that we have an exclusive lock on the table to be dropped. */
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db,
+ table->table_name,
+ MDL_EXCLUSIVE));
+
// Remove extension for delete
*(end= path + path_length - reg_ext_length)= '\0';
- DBUG_PRINT("info", ("deleting table of type %d",
- (table_type ? table_type->db_type : 0)));
+
error= ha_delete_table(thd, table_type, path, db, table->table_name,
!dont_log_query);
- /* No error if non existent table and 'IF EXIST' clause or view */
- if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) &&
- (if_exists || table_type == NULL))
- {
- error= 0;
- thd->clear_error();
- }
if (error == HA_ERR_ROW_IS_REFERENCED)
{
/* the table is referenced by a foreign key constraint */
@@ -2519,18 +2479,29 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
}
if (!error || error == ENOENT || error == HA_ERR_NO_SUCH_TABLE)
{
- int new_error;
+ int frm_delete_error, trigger_drop_error= 0;
/* Delete the table definition file */
strmov(end,reg_ext);
- if (!(new_error= mysql_file_delete(key_file_frm, path, MYF(MY_WME))))
+ frm_delete_error= mysql_file_delete(key_file_frm, path, MYF(MY_WME));
+ if (frm_delete_error)
+ frm_delete_error= my_errno;
+ else
{
non_tmp_table_deleted= TRUE;
- new_error= Table_triggers_list::drop_all_triggers(thd, db,
- table->table_name);
+ trigger_drop_error=
+ Table_triggers_list::drop_all_triggers(thd, db, table->table_name);
+ }
+
+ if (trigger_drop_error ||
+ (frm_delete_error && frm_delete_error != ENOENT))
+ error= 1;
+ else if (!frm_delete_error || !error || if_exists)
+ {
+ error= 0;
+ thd->clear_error();
}
- error|= new_error;
}
- non_tmp_error= error ? TRUE : non_tmp_error;
+ non_tmp_error= error ? TRUE : non_tmp_error;
}
if (error)
{
@@ -2540,6 +2511,13 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
wrong_tables.append('.');
wrong_tables.append(table->table_name);
}
+ else
+ {
+ PSI_CALL_drop_table_share(false, table->db, table->db_length,
+ table->table_name, table->table_name_length);
+ mysql_audit_drop_table(thd, table);
+ }
+
DBUG_PRINT("table", ("table: 0x%lx s: 0x%lx", (long) table->table,
table->table ? (long) table->table->s : (long) -1));
@@ -2677,11 +2655,18 @@ bool quick_rm_table(THD *thd, handlerton *base, const char *db,
handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base);
if (!file)
DBUG_RETURN(true);
- (void) file->ha_create_handler_files(path, NULL, CHF_DELETE_FLAG, NULL);
+ (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
delete file;
}
if (!(flags & (FRM_ONLY|NO_HA_TABLE)))
error|= ha_delete_table(current_thd, base, path, db, table_name, 0);
+
+ if (likely(error == 0))
+ {
+ PSI_CALL_drop_table_share(flags & FN_IS_TMP, db, strlen(db),
+ table_name, strlen(table_name));
+ }
+
DBUG_RETURN(error);
}
@@ -2947,6 +2932,8 @@ int prepare_create_field(Create_field *sql_field,
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_TIME2:
+ case MYSQL_TYPE_DATETIME2:
case MYSQL_TYPE_NULL:
sql_field->pack_flag=f_settype((uint) sql_field->sql_type);
break;
@@ -2965,6 +2952,7 @@ int prepare_create_field(Create_field *sql_field,
(sql_field->decimals << FIELDFLAG_DEC_SHIFT));
break;
case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_TIMESTAMP2:
/* fall-through */
default:
sql_field->pack_flag=(FIELDFLAG_NUMBER |
@@ -3035,7 +3023,7 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
while ((column_definition= it++) != NULL)
{
- if (column_definition->sql_type == MYSQL_TYPE_TIMESTAMP || // TIMESTAMP
+ if (is_timestamp_type(column_definition->sql_type) || // TIMESTAMP
column_definition->unireg_check == Field::TIMESTAMP_OLD_FIELD) // Legacy
{
if ((column_definition->flags & NOT_NULL_FLAG) != 0 && // NOT NULL,
@@ -3063,12 +3051,12 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
thd Thread object.
create_info Create information (like MAX_ROWS).
alter_info List of columns and indexes to create
- tmp_table If a temporary table is to be created.
db_options INOUT Table options (like HA_OPTION_PACK_RECORD).
file The handler for the new table.
key_info_buffer OUT An array of KEY structs for the indexes.
key_count OUT The number of elements in the array.
- select_field_count The number of fields coming from a select table.
+ create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE,
+ C_CREATE_SELECT, C_ASSISTED_DISCOVERY
DESCRIPTION
Prepares the table and key structures for table creation.
@@ -3083,9 +3071,7 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
static int
mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- bool tmp_table,
- uint *db_options,
+ Alter_info *alter_info, uint *db_options,
handler *file, KEY **key_info_buffer,
uint *key_count, int create_table_mode)
{
@@ -3101,6 +3087,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
List_iterator<Create_field> it2(alter_info->create_list);
uint total_uneven_bit_length= 0;
int select_field_count= C_CREATE_SELECT(create_table_mode);
+ bool tmp_table= create_table_mode == C_ALTER_TABLE;
DBUG_ENTER("mysql_prepare_create_table");
select_field_pos= alter_info->create_list.elements - select_field_count;
@@ -3387,8 +3374,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->offset= record_offset;
if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
auto_increment++;
- if (parse_option_list(thd, &sql_field->option_struct,
- sql_field->option_list,
+ if (parse_option_list(thd, create_info->db_type, &sql_field->option_struct,
+ &sql_field->option_list,
create_info->db_type->field_options, FALSE,
thd->mem_root))
DBUG_RETURN(TRUE);
@@ -3418,15 +3405,13 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (auto_increment &&
(file->ha_table_flags() & HA_NO_AUTO_INCREMENT))
{
- my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,
- ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0));
+ my_error(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT, MYF(0), file->table_type());
DBUG_RETURN(TRUE);
}
if (blob_columns && (file->ha_table_flags() & HA_NO_BLOBS))
{
- my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB),
- MYF(0));
+ my_error(ER_TABLE_CANT_HANDLE_BLOB, MYF(0), file->table_type());
DBUG_RETURN(TRUE);
}
@@ -3592,8 +3577,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_info->usable_key_parts= key_number;
key_info->algorithm= key->key_create_info.algorithm;
key_info->option_list= key->option_list;
- if (parse_option_list(thd, &key_info->option_struct,
- key_info->option_list,
+ if (parse_option_list(thd, create_info->db_type, &key_info->option_struct,
+ &key_info->option_list,
create_info->db_type->index_options, FALSE,
thd->mem_root))
DBUG_RETURN(TRUE);
@@ -3602,8 +3587,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
if (!(file->ha_table_flags() & HA_CAN_FULLTEXT))
{
- my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT),
- MYF(0));
+ my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0), file->table_type());
DBUG_RETURN(TRUE);
}
}
@@ -3620,8 +3604,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
if (!(file->ha_table_flags() & HA_CAN_RTREEKEYS))
{
- my_message(ER_TABLE_CANT_HANDLE_SPKEYS, ER(ER_TABLE_CANT_HANDLE_SPKEYS),
- MYF(0));
+ my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), file->table_type());
DBUG_RETURN(TRUE);
}
if (key_info->user_defined_key_parts != 1)
@@ -3736,7 +3719,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
{
- my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str);
+ my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str,
+ file->table_type());
DBUG_RETURN(TRUE);
}
if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
@@ -3858,7 +3842,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
else if (length == 0 && (sql_field->flags & NOT_NULL_FLAG))
{
- my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name.str);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
+ column->field_name.str);
DBUG_RETURN(TRUE);
}
if (length > file->max_key_part_length() && key->type != Key::FULLTEXT)
@@ -4000,7 +3985,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (thd->variables.sql_mode & MODE_NO_ZERO_DATE &&
!sql_field->def &&
- sql_field->sql_type == MYSQL_TYPE_TIMESTAMP &&
+ is_timestamp_type(sql_field->sql_type) &&
(sql_field->flags & NOT_NULL_FLAG) &&
(type == Field::NONE || type == Field::TIMESTAMP_UN_FIELD))
{
@@ -4023,6 +4008,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
+ if (create_info->tmp_table())
+ create_info->options|=HA_CREATE_DELAY_KEY_WRITE;
+
/* Give warnings for not supported table options */
#if defined(WITH_ARIA_STORAGE_ENGINE)
extern handlerton *maria_hton;
@@ -4035,8 +4023,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
file->engine_name()->str,
"TRANSACTIONAL=1");
- if (parse_option_list(thd, &create_info->option_struct,
- create_info->option_list,
+ if (parse_option_list(thd, file->partition_ht(), &create_info->option_struct,
+ &create_info->option_list,
file->partition_ht()->table_options, FALSE,
thd->mem_root))
DBUG_RETURN(TRUE);
@@ -4183,146 +4171,40 @@ void sp_prepare_create_field(THD *thd, Create_field *sql_field)
}
-/**
- Check that there is no frm file for given table
-
- @param old_path path to the old frm file
- @param path path to the frm file in new encoding
- @param db database name
- @param table_name table name
- @param alias table name for error message (for new encoding)
- @param issue_error should we issue error messages
-
- @retval FALSE there is no frm file
- @retval TRUE there is frm file
-*/
-
-bool check_table_file_presence(char *old_path,
- char *path,
- const char *db,
- const char *table_name,
- const char *alias,
- bool issue_error)
-{
- if (!access(path,F_OK))
- {
- if (issue_error)
- my_error(ER_TABLE_EXISTS_ERROR,MYF(0),alias);
- return TRUE;
- }
- {
- /*
- Check if file of the table in 5.0 file name encoding exists.
-
- Except case when it is the same table.
- */
- char tbl50[FN_REFLEN];
-#ifdef _WIN32
- if (check_if_legal_tablename(table_name) != 0)
- {
- /*
- Check for reserved device names for which access() returns 0
- (CON, AUX etc).
- */
- return FALSE;
- }
-#endif
- strxmov(tbl50, mysql_data_home, "/", db, "/", table_name, NullS);
- fn_format(tbl50, tbl50, "", reg_ext, MY_UNPACK_FILENAME);
- if (!access(tbl50, F_OK) &&
- (old_path == NULL ||
- strcmp(old_path, tbl50) != 0))
- {
- if (issue_error)
- {
- strxmov(tbl50, MYSQL50_TABLE_NAME_PREFIX, table_name, NullS);
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), tbl50);
- }
- return TRUE;
- }
- }
- return FALSE;
-}
-
-
-/**
- Create a table
-
- @param thd Thread object
- @param db Database
- @param table_name Table name
- @param path Path to table (i.e. to its .FRM file without
- the extension).
- @param create_info Create information (like MAX_ROWS)
- @param alter_info Description of fields and keys for new table
- @param internal_tmp_table Set to true if this is an internal temporary table
- (From ALTER TABLE)
- @param select_field_count Number of fields coming from SELECT part of
- CREATE TABLE ... SELECT statement. Must be zero
- for standard create of table.
- @param no_ha_table Indicates that only .FRM file (and PAR file if table
- is partitioned) needs to be created and not a table
- in the storage engine.
- @param[out] is_trans Identifies the type of engine where the table
- was created: either trans or non-trans.
- @param[out] key_info Array of KEY objects describing keys in table
- which was created.
- @param[out] key_count Number of keys in table which was created.
-
- If one creates a temporary table, this is automatically opened
-
- Note that this function assumes that caller already have taken
- exclusive metadata lock on table being created or used some other
- way to ensure that concurrent operations won't intervene.
- mysql_create_table() is a wrapper that can be used for this.
-
- @retval false OK
- @retval true error
-*/
-
-static
-bool create_table_impl(THD *thd,
- const char *db, const char *table_name,
- const char *path,
- HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- bool internal_tmp_table,
- bool no_ha_table,
- bool *is_trans,
- KEY **key_info,
- uint *key_count,
- int create_table_mode)
+handler *mysql_create_frm_image(THD *thd,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info, int create_table_mode,
+ KEY **key_info,
+ uint *key_count,
+ LEX_CUSTRING *frm)
{
- const char *alias;
uint db_options;
- handler *file;
- bool error= TRUE;
- DBUG_ENTER("create_table_impl");
- DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d",
- db, table_name, internal_tmp_table));
+ handler *file;
+ DBUG_ENTER("mysql_create_frm_image");
-
- /* Check for duplicate fields and check type of table to create */
if (!alter_info->create_list.elements)
{
- my_message(ER_TABLE_MUST_HAVE_COLUMNS, ER(ER_TABLE_MUST_HAVE_COLUMNS),
- MYF(0));
- DBUG_RETURN(TRUE);
+ my_error(ER_TABLE_MUST_HAVE_COLUMNS, MYF(0));
+ DBUG_RETURN(NULL);
}
+
if (check_engine(thd, db, table_name, create_info))
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(NULL);
set_table_default_charset(thd, create_info, (char*) db);
db_options= create_info->table_options;
- if (create_info->row_type == ROW_TYPE_DYNAMIC)
- db_options|=HA_OPTION_PACK_RECORD;
- alias= table_case_name(create_info, table_name);
+ if (create_table_mode != C_ALTER_TABLE_FRM_ONLY &&
+ create_info->row_type != ROW_TYPE_FIXED &&
+ create_info->row_type != ROW_TYPE_DEFAULT)
+ db_options|= HA_OPTION_PACK_RECORD;
+
if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
create_info->db_type)))
{
mem_alloc_error(sizeof(handler));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(NULL);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info= thd->work_part_info;
@@ -4339,7 +4221,7 @@ bool create_table_impl(THD *thd,
if (!part_info)
{
mem_alloc_error(sizeof(partition_info));
- DBUG_RETURN(TRUE);
+ goto err;
}
file->set_auto_partitions(part_info);
part_info->default_engine_type= create_info->db_type;
@@ -4355,12 +4237,11 @@ bool create_table_impl(THD *thd,
this information in the default_db_type variable, it is either
DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
*/
- Key *key;
handlerton *part_engine_type= create_info->db_type;
char *part_syntax_buf;
uint syntax_len;
handlerton *engine_type;
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ if (create_info->tmp_table())
{
my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
goto err;
@@ -4423,9 +4304,8 @@ bool create_table_impl(THD *thd,
delete file;
create_info->db_type= partition_hton;
if (!(file= get_ha_partition(part_info)))
- {
- DBUG_RETURN(TRUE);
- }
+ DBUG_RETURN(NULL);
+
/*
If we have default number of partitions or subpartitions we
might require to set-up the part_info object such that it
@@ -4467,203 +4347,212 @@ bool create_table_impl(THD *thd,
engine_type)))
{
mem_alloc_error(sizeof(handler));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(NULL);
}
}
- /*
- Unless table's storage engine supports partitioning natively
- don't allow foreign keys on partitioned tables (they won't
- work work even with InnoDB beneath of partitioning engine).
- If storage engine handles partitioning natively (like NDB)
- foreign keys support is possible, so we let the engine decide.
- */
- if (create_info->db_type == partition_hton)
+ }
+ /*
+ Unless table's storage engine supports partitioning natively
+ don't allow foreign keys on partitioned tables (they won't
+ work work even with InnoDB beneath of partitioning engine).
+ If storage engine handles partitioning natively (like NDB)
+ foreign keys support is possible, so we let the engine decide.
+ */
+ if (create_info->db_type == partition_hton)
+ {
+ List_iterator_fast<Key> key_iterator(alter_info->key_list);
+ Key *key;
+ while ((key= key_iterator++))
{
- List_iterator_fast<Key> key_iterator(alter_info->key_list);
- while ((key= key_iterator++))
+ if (key->type == Key::FOREIGN_KEY)
{
- if (key->type == Key::FOREIGN_KEY)
- {
- my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
- goto err;
- }
+ my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
+ goto err;
}
}
}
#endif
- if (mysql_prepare_create_table(thd, create_info, alter_info,
- internal_tmp_table,
- &db_options, file,
- key_info, key_count,
+ if (mysql_prepare_create_table(thd, create_info, alter_info, &db_options,
+ file, key_info, key_count,
create_table_mode))
goto err;
+ create_info->table_options=db_options;
+
+ *frm= build_frm_image(thd, table_name, create_info,
+ alter_info->create_list, *key_count,
+ *key_info, file);
+
+ if (frm->str)
+ DBUG_RETURN(file);
+
+err:
+ delete file;
+ DBUG_RETURN(NULL);
+}
+
+
+/**
+ Create a table
+
+ @param thd Thread object
+ @param db Database
+ @param table_name Table name
+ @param path Path to table (i.e. to its .FRM file without
+ the extension).
+ @param create_info Create information (like MAX_ROWS)
+ @param alter_info Description of fields and keys for new table
+ @param create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY
+ or any positive number (for C_CREATE_SELECT).
+ @param[out] is_trans Identifies the type of engine where the table
+ was created: either trans or non-trans.
+ @param[out] key_info Array of KEY objects describing keys in table
+ which was created.
+ @param[out] key_count Number of keys in table which was created.
+
+ If one creates a temporary table, this is automatically opened
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
- create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
+ Note that this function assumes that caller already have taken
+ exclusive metadata lock on table being created or used some other
+ way to ensure that concurrent operations won't intervene.
+ mysql_create_table() is a wrapper that can be used for this.
- /* Check if table already exists */
- if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
- find_temporary_table(thd, db, table_name))
+ @retval false OK
+ @retval true error
+*/
+
+static
+bool create_table_impl(THD *thd,
+ const char *db, const char *table_name,
+ const char *path,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info,
+ int create_table_mode,
+ bool *is_trans,
+ KEY **key_info,
+ uint *key_count,
+ LEX_CUSTRING *frm)
+{
+ const char *alias;
+ handler *file= 0;
+ bool error= TRUE;
+ bool frm_only= create_table_mode == C_ALTER_TABLE_FRM_ONLY;
+ bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || frm_only;
+ DBUG_ENTER("mysql_create_table_no_lock");
+ DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d",
+ db, table_name, internal_tmp_table));
+
+ if (!my_use_symdir || (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
{
- if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
- {
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
- alias);
- error= 0;
- goto err;
- }
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
- goto err;
+ if (create_info->data_file_name)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
+ "DATA DIRECTORY");
+ if (create_info->index_file_name)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
+ "INDEX DIRECTORY");
+ create_info->data_file_name= create_info->index_file_name= 0;
}
+ else
+ if (error_if_data_home_dir(create_info->data_file_name, "DATA DIRECTORY") ||
+ error_if_data_home_dir(create_info->index_file_name, "INDEX DIRECTORY")||
+ check_partition_dirs(thd->lex->part_info))
+ goto err;
- if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
- {
- char frm_name[FN_REFLEN+1];
- strxnmov(frm_name, sizeof(frm_name) - 1, path, reg_ext, NullS);
+ alias= table_case_name(create_info, table_name);
- if (!access(frm_name, F_OK))
+ /* Check if table exists */
+ if (create_info->tmp_table())
+ {
+ if (find_temporary_table(thd, db, table_name))
{
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
goto warn;
- my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
goto err;
}
- /*
- We don't assert here, but check the result, because the table could be
- in the table definition cache and in the same time the .frm could be
- missing from the disk, in case of manual intervention which deletes
- the .frm file. The user has to use FLUSH TABLES; to clear the cache.
- Then she could create the table. This case is pretty obscure and
- therefore we don't introduce a new error message only for it.
- */
- mysql_mutex_lock(&LOCK_open);
- if (get_cached_table_share(db, table_name))
+ }
+ else
+ {
+ if (!internal_tmp_table && ha_table_exists(thd, db, table_name))
{
- mysql_mutex_unlock(&LOCK_open);
+ if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
+ goto warn;
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
goto err;
}
- mysql_mutex_unlock(&LOCK_open);
}
- /*
- Check that table with given name does not already
- exist in any storage engine. In such a case it should
- be discovered and the error ER_TABLE_EXISTS_ERROR be returned
- unless user specified CREATE TABLE IF EXISTS
- An exclusive metadata lock ensures that no
- one else is attempting to discover the table. Since
- it's not on disk as a frm file, no one could be using it!
- */
- if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
+ THD_STAGE_INFO(thd, stage_creating_table);
+
+ if (create_table_mode == C_ASSISTED_DISCOVERY)
{
- bool create_if_not_exists =
- create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS;
- int retcode = ha_table_exists_in_engine(thd, db, table_name);
- DBUG_PRINT("info", ("exists_in_engine: %u",retcode));
- switch (retcode)
- {
- case HA_ERR_NO_SUCH_TABLE:
- /* Normal case, no table exists. we can go and create it */
- break;
- case HA_ERR_TABLE_EXIST:
- DBUG_PRINT("info", ("Table existed in handler"));
+ /* check that it's used correctly */
+ DBUG_ASSERT(alter_info->create_list.elements == 0);
+ DBUG_ASSERT(alter_info->key_list.elements == 0);
- if (create_if_not_exists)
- goto warn;
- my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
- goto err;
- break;
- default:
- DBUG_PRINT("info", ("error: %u from storage engine", retcode));
- my_error(retcode, MYF(0),table_name);
- goto err;
+ TABLE_SHARE share;
+ handlerton *hton= create_info->db_type;
+ int ha_err;
+ Field *no_fields= 0;
+
+ if (!hton->discover_table_structure)
+ {
+ my_error(ER_ILLEGAL_HA, MYF(0), hton_name(hton)->str, db, table_name);
+ goto err;
}
- }
- THD_STAGE_INFO(thd, stage_creating_table);
+ init_tmp_table_share(thd, &share, db, 0, table_name, path);
- {
- size_t dirlen;
- char dirpath[FN_REFLEN];
+ /* prepare everything for discovery */
+ share.field= &no_fields;
+ share.db_plugin= ha_lock_engine(thd, hton);
+ share.option_list= create_info->option_list;
+ share.connect_string= create_info->connect_string;
+
+ if (parse_engine_table_options(thd, hton, &share))
+ goto err;
+
+ ha_err= hton->discover_table_structure(hton, thd, &share, create_info);
/*
- data_file_name and index_file_name include the table name without
- extension. Mostly this does not refer to an existing file. When
- comparing data_file_name or index_file_name against the data
- directory, we try to resolve all symbolic links. On some systems,
- we use realpath(3) for the resolution. This returns ENOENT if the
- resolved path does not refer to an existing file. my_realpath()
- does then copy the requested path verbatim, without symlink
- resolution. Thereafter the comparison can fail even if the
- requested path is within the data directory. E.g. if symlinks to
- another file system are used. To make realpath(3) return the
- resolved path, we strip the table name and compare the directory
- path only. If the directory doesn't exist either, table creation
- will fail anyway.
+ if discovery failed, the plugin will be auto-unlocked, as it
+ was locked on the THD, see above.
+ if discovery succeeded, the plugin was replaced by a globally
+ locked plugin, that will be unlocked by free_table_share()
*/
- if (create_info->data_file_name)
- {
- dirname_part(dirpath, create_info->data_file_name, &dirlen);
- if (test_if_data_home_dir(dirpath))
- {
- my_error(ER_WRONG_ARGUMENTS, MYF(0), "DATA DIRECTORY");
- goto err;
- }
- }
- if (create_info->index_file_name)
+ if (ha_err)
+ share.db_plugin= 0; // will be auto-freed, locked above on the THD
+
+ free_table_share(&share);
+
+ if (ha_err)
{
- dirname_part(dirpath, create_info->index_file_name, &dirlen);
- if (test_if_data_home_dir(dirpath))
- {
- my_error(ER_WRONG_ARGUMENTS, MYF(0), "INDEX DIRECTORY");
- goto err;
- }
+ my_error(ER_GET_ERRNO, MYF(0), ha_err, hton_name(hton)->str);
+ goto err;
}
}
-
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (check_partition_dirs(thd->lex->part_info))
- {
- goto err;
- }
-#endif /* WITH_PARTITION_STORAGE_ENGINE */
-
-#ifdef HAVE_READLINK
- if (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)
-#endif
+ else
{
- if (create_info->data_file_name)
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
- "DATA DIRECTORY");
- if (create_info->index_file_name)
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
- "INDEX DIRECTORY");
- create_info->data_file_name= create_info->index_file_name= 0;
+ file= mysql_create_frm_image(thd, db, table_name, create_info, alter_info,
+ create_table_mode, key_info, key_count, frm);
+ if (!file)
+ goto err;
+ if (rea_create_table(thd, frm, path, db, table_name, create_info,
+ file, frm_only))
+ goto err;
}
- create_info->table_options=db_options;
-
- /*
- Create .FRM (and .PAR file for partitioned table).
- If "no_ha_table" is false also create table in storage engine.
- */
- if (rea_create_table(thd, path, db, table_name,
- create_info, alter_info->create_list,
- *key_count, *key_info, file, no_ha_table))
- goto err;
- if (!no_ha_table && create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ if (!frm_only && create_info->tmp_table())
{
/*
Open a table (skipping table cache) and add it into
THD::temporary_tables list.
*/
- TABLE *table= open_table_uncached(thd, path, db, table_name, true, true);
+ TABLE *table= open_table_uncached(thd, create_info->db_type, path,
+ db, table_name, true, true);
if (!table)
{
@@ -4677,7 +4566,7 @@ bool create_table_impl(THD *thd,
thd->thread_specific_used= TRUE;
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
- else if (part_info && no_ha_table)
+ else if (thd->work_part_info && frm_only)
{
/*
For partitioned tables we can't find some problems with table
@@ -4694,7 +4583,7 @@ bool create_table_impl(THD *thd,
init_tmp_table_share(thd, &share, db, 0, table_name, path);
- bool result= (open_table_def(thd, &share, 0) ||
+ bool result= (open_table_def(thd, &share, GTS_TABLE) ||
open_table_from_share(thd, &share, "", 0, (uint) READ_ALL,
0, &table, true));
if (!result)
@@ -4704,11 +4593,10 @@ bool create_table_impl(THD *thd,
if (result)
{
- char frm_name[FN_REFLEN + 1];
- strxnmov(frm_name, sizeof(frm_name) - 1, path, reg_ext, NullS);
+ char frm_name[FN_REFLEN];
+ strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS);
(void) mysql_file_delete(key_file_frm, frm_name, MYF(0));
- (void) file->ha_create_handler_files(path, NULL, CHF_DELETE_FLAG,
- create_info);
+ (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
goto err;
}
}
@@ -4728,7 +4616,6 @@ warn:
goto err;
}
-
/**
Simple wrapper around create_table_impl() to be used
in various version of CREATE TABLE statement.
@@ -4742,8 +4629,9 @@ bool mysql_create_table_no_lock(THD *thd,
KEY *not_used_1;
uint not_used_2;
char path[FN_REFLEN + 1];
+ LEX_CUSTRING frm= {0,0};
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ if (create_info->tmp_table())
build_tmptable_filename(thd, path, sizeof(path));
else
{
@@ -4759,12 +4647,13 @@ bool mysql_create_table_no_lock(THD *thd,
}
}
- return create_table_impl(thd, db, table_name, path, create_info, alter_info,
- false, false, is_trans,
- &not_used_1, &not_used_2, create_table_mode);
+ bool res= create_table_impl(thd, db, table_name, path, create_info,
+ alter_info, create_table_mode, is_trans,
+ &not_used_1, &not_used_2, &frm);
+ my_free(const_cast<uchar*>(frm.str));
+ return res;
}
-
/**
Implementation of SQLCOM_CREATE_TABLE.
@@ -4779,19 +4668,17 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
HA_CREATE_INFO *create_info,
Alter_info *alter_info)
{
- bool result;
+ const char *db= create_table->db;
+ const char *table_name= create_table->table_name;
bool is_trans= FALSE;
int create_table_mode;
DBUG_ENTER("mysql_create_table");
- /*
- Open or obtain an exclusive metadata lock on table being created.
- */
+ /* Open or obtain an exclusive metadata lock on table being created */
if (open_and_lock_tables(thd, thd->lex->query_tables, FALSE, 0))
{
/* is_error() may be 0 if table existed and we generated a warning */
- result= thd->is_error();
- goto end;
+ DBUG_RETURN(thd->is_error());
}
/* Got lock. */
@@ -4803,20 +4690,16 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
create_table_mode= C_ASSISTED_DISCOVERY;
promote_first_timestamp_column(&alter_info->create_list);
- result= mysql_create_table_no_lock(thd, create_table->db,
- create_table->table_name, create_info,
- alter_info, &is_trans, create_table_mode);
- if (result)
- DBUG_RETURN(result);
+ if (mysql_create_table_no_lock(thd, db, table_name, create_info, alter_info,
+ &is_trans, create_table_mode))
+ DBUG_RETURN(1);
/* In RBR we don't need to log CREATE TEMPORARY TABLE */
if (thd->is_current_stmt_binlog_format_row() && create_info->tmp_table())
DBUG_RETURN(0);
+ bool result;
result= write_bin_log(thd, TRUE, thd->query(), thd->query_length(), is_trans);
- thd->abort_on_warning= false;
-
-end:
DBUG_RETURN(result);
}
@@ -4940,16 +4823,20 @@ mysql_rename_table(handlerton *base, const char *old_db,
{
if (rename_file_ext(from,to,reg_ext))
error= my_errno;
- (void) file->ha_create_handler_files(to, from, CHF_RENAME_FLAG, NULL);
+ (void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG);
}
else if (!file || !(error=file->ha_rename_table(from_base, to_base)))
{
if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
{
error=my_errno;
- /* Restore old file name */
if (file)
- file->ha_rename_table(to_base, from_base);
+ {
+ if (error == ENOENT)
+ error= 0; // this is ok if file->ha_rename_table() succeeded
+ else
+ file->ha_rename_table(to_base, from_base); // Restore old file name
+ }
}
}
delete file;
@@ -4957,20 +4844,19 @@ mysql_rename_table(handlerton *base, const char *old_db,
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE");
else if (error)
my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error);
+ else if (!(flags & FN_IS_TMP))
+ mysql_audit_rename_table(thd, old_db, old_name, new_db, new_name);
-
-#ifdef HAVE_PSI_TABLE_INTERFACE
/*
Remove the old table share from the pfs table share array. The new table
share will be created when the renamed table is first accessed.
*/
if (likely(error == 0))
{
- my_bool temp_table= (my_bool)is_prefix(old_name, tmp_file_prefix);
- PSI_TABLE_CALL(drop_table_share)
- (temp_table, old_db, strlen(old_db), old_name, strlen(old_name));
+ PSI_CALL_drop_table_share(flags & FN_FROM_IS_TMP,
+ old_db, strlen(old_db),
+ old_name, strlen(old_name));
}
-#endif
DBUG_RETURN(error != 0);
}
@@ -5049,7 +4935,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
local_create_info.options|= create_info->options&HA_LEX_CREATE_IF_NOT_EXISTS;
/* Replace type of source table with one specified in the statement. */
local_create_info.options&= ~HA_LEX_CREATE_TMP_TABLE;
- local_create_info.options|= create_info->options & HA_LEX_CREATE_TMP_TABLE;
+ local_create_info.options|= create_info->tmp_table();
/* Reset auto-increment counter for the new table. */
local_create_info.auto_increment_value= 0;
/*
@@ -5067,7 +4953,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
Ensure that we have an exclusive lock on target table if we are creating
non-temporary table.
*/
- DBUG_ASSERT((create_info->options & HA_LEX_CREATE_TMP_TABLE) ||
+ DBUG_ASSERT((create_info->tmp_table()) ||
thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db,
table->table_name,
MDL_EXCLUSIVE));
@@ -5094,7 +4980,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
4 temporary temporary Nothing
==== ========= ========= ==============================
*/
- if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE))
+ if (!(create_info->tmp_table()))
{
if (src_table->table->s->tmp_table) // Case 2
{
@@ -5172,6 +5058,8 @@ int mysql_discard_or_import_tablespace(THD *thd,
int error;
DBUG_ENTER("mysql_discard_or_import_tablespace");
+ mysql_audit_alter_table(thd, table_list);
+
/*
Note that DISCARD/IMPORT TABLESPACE always is the only operation in an
ALTER TABLE
@@ -5255,11 +5143,248 @@ static bool is_candidate_key(KEY *key)
if (key_part->key_part_flag & HA_PART_KEY_SEG)
return false;
}
-
return true;
}
+/*
+ Preparation for table creation
+
+ SYNOPSIS
+ handle_if_exists_option()
+ thd Thread object.
+ table The altered table.
+ alter_info List of columns and indexes to create
+
+ DESCRIPTION
+ Looks for the IF [NOT] EXISTS options, checks the states and remove items
+ from the list if existing found.
+
+ RETURN VALUES
+ NONE
+*/
+
+static void
+handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
+{
+ Field **f_ptr;
+ DBUG_ENTER("handle_if_exists_option");
+
+ /* Handle ADD COLUMN IF NOT EXISTS. */
+ {
+ List_iterator<Create_field> it(alter_info->create_list);
+ Create_field *sql_field;
+
+ while ((sql_field=it++))
+ {
+ if (!sql_field->create_if_not_exists || sql_field->change)
+ continue;
+ /*
+ If there is a field with the same name in the table already,
+ remove the sql_field from the list.
+ */
+ for (f_ptr=table->field; *f_ptr; f_ptr++)
+ {
+ if (my_strcasecmp(system_charset_info,
+ sql_field->field_name, (*f_ptr)->field_name) == 0)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_DUP_FIELDNAME, ER(ER_DUP_FIELDNAME),
+ sql_field->field_name);
+ it.remove();
+ if (alter_info->create_list.is_empty())
+ {
+ alter_info->flags&= ~Alter_info::ALTER_ADD_COLUMN;
+ if (alter_info->key_list.is_empty())
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ /* Handle MODIFY COLUMN IF EXISTS. */
+ {
+ List_iterator<Create_field> it(alter_info->create_list);
+ Create_field *sql_field;
+
+ while ((sql_field=it++))
+ {
+ if (!sql_field->create_if_not_exists || !sql_field->change)
+ continue;
+ /*
+ If there is NO field with the same name in the table already,
+ remove the sql_field from the list.
+ */
+ for (f_ptr=table->field; *f_ptr; f_ptr++)
+ {
+ if (my_strcasecmp(system_charset_info,
+ sql_field->field_name, (*f_ptr)->field_name) == 0)
+ {
+ break;
+ }
+ }
+ if (*f_ptr == NULL)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR),
+ sql_field->change, table->s->table_name.str);
+ it.remove();
+ if (alter_info->create_list.is_empty())
+ {
+ alter_info->flags&= ~(Alter_info::ALTER_ADD_COLUMN |
+ Alter_info::ALTER_CHANGE_COLUMN);
+ if (alter_info->key_list.is_empty())
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
+ }
+ }
+ }
+ }
+
+ /* Handle DROP COLUMN/KEY IF EXISTS. */
+ {
+ List_iterator<Alter_drop> drop_it(alter_info->drop_list);
+ Alter_drop *drop;
+ bool remove_drop;
+ while ((drop= drop_it++))
+ {
+ if (!drop->drop_if_exists)
+ continue;
+ remove_drop= TRUE;
+ if (drop->type == Alter_drop::COLUMN)
+ {
+ /*
+ If there is NO field with that name in the table,
+ remove the 'drop' from the list.
+ */
+ for (f_ptr=table->field; *f_ptr; f_ptr++)
+ {
+ if (my_strcasecmp(system_charset_info,
+ drop->name, (*f_ptr)->field_name) == 0)
+ {
+ remove_drop= FALSE;
+ break;
+ }
+ }
+ }
+ else /* Alter_drop::KEY */
+ {
+ uint n_key;
+ for (n_key=0; n_key < table->s->keys; n_key++)
+ {
+ if (my_strcasecmp(system_charset_info,
+ drop->name, table->key_info[n_key].name) == 0)
+ {
+ remove_drop= FALSE;
+ break;
+ }
+ }
+ }
+ if (remove_drop)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_CANT_DROP_FIELD_OR_KEY, ER(ER_CANT_DROP_FIELD_OR_KEY),
+ drop->name);
+ drop_it.remove();
+ if (alter_info->drop_list.is_empty())
+ alter_info->flags&= ~(Alter_info::ALTER_DROP_COLUMN |
+ Alter_info::ALTER_DROP_INDEX);
+ }
+ }
+ }
+
+ /* ALTER TABLE ADD KEY IF NOT EXISTS */
+ /* ALTER TABLE ADD FOREIGN KEY IF NOT EXISTS */
+ {
+ Key *key;
+ List_iterator<Key> key_it(alter_info->key_list);
+ uint n_key;
+ while ((key=key_it++))
+ {
+ if (!key->create_if_not_exists)
+ continue;
+ for (n_key=0; n_key < table->s->keys; n_key++)
+ {
+ if (my_strcasecmp(system_charset_info,
+ key->name.str, table->key_info[n_key].name) == 0)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_DUP_KEYNAME, ER(ER_DUP_KEYNAME), key->name.str);
+ key_it.remove();
+ if (key->type == Key::FOREIGN_KEY)
+ {
+ /* ADD FOREIGN KEY appends two items. */
+ key_it.remove();
+ }
+ if (alter_info->key_list.is_empty())
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
+ break;
+ }
+ }
+ }
+ }
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *tab_part_info= table->part_info;
+ if (tab_part_info && thd->lex->check_exists)
+ {
+ /* ALTER TABLE ADD PARTITION IF NOT EXISTS */
+ if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION)
+ {
+ partition_info *alt_part_info= thd->lex->part_info;
+ if (alt_part_info)
+ {
+ List_iterator<partition_element> new_part_it(alt_part_info->partitions);
+ partition_element *pe;
+ while ((pe= new_part_it++))
+ {
+ if (!tab_part_info->has_unique_name(pe))
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_SAME_NAME_PARTITION, ER(ER_SAME_NAME_PARTITION),
+ pe->partition_name);
+ alter_info->flags&= ~Alter_info::ALTER_ADD_PARTITION;
+ thd->lex->part_info= NULL;
+ break;
+ }
+ }
+ }
+ }
+ /* ALTER TABLE DROP PARTITION IF EXISTS */
+ if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION)
+ {
+ List_iterator<char> names_it(alter_info->partition_names);
+ char *name;
+
+ while ((name= names_it++))
+ {
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+ partition_element *part_elem;
+ while ((part_elem= part_it++))
+ {
+ if (my_strcasecmp(system_charset_info,
+ part_elem->partition_name, name) == 0)
+ break;
+ }
+ if (!part_elem)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_DROP_PARTITION_NON_EXISTENT,
+ ER(ER_DROP_PARTITION_NON_EXISTENT), "DROP");
+ names_it.remove();
+ }
+ }
+ if (alter_info->partition_names.elements == 0)
+ alter_info->flags&= ~Alter_info::ALTER_DROP_PARTITION;
+ }
+ }
+#endif /*WITH_PARTITION_STORAGE_ENGINE*/
+
+ DBUG_VOID_RETURN;
+}
+
+
/**
Get Create_field object for newly created table by field index.
@@ -5868,10 +5993,9 @@ bool mysql_compare_tables(TABLE *table,
int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ?
C_ORDINARY_CREATE : C_ALTER_TABLE;
if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info,
- (table->s->tmp_table != NO_TMP_TABLE),
&db_options, table->file, &key_info_buffer,
&key_count, create_table_mode))
- DBUG_RETURN(true);
+ DBUG_RETURN(1);
/* Some very basic checks. */
if (table->s->fields != alter_info->create_list.elements ||
@@ -6012,6 +6136,7 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
switch (keys_onoff) {
case Alter_info::ENABLE:
+ DEBUG_SYNC(table->in_use, "alter_table_enable_indexes");
error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
break;
case Alter_info::LEAVE_AS_IS:
@@ -6028,7 +6153,6 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
table->file->table_type(),
table->s->db.str, table->s->table_name.str);
-
error= 0;
} else if (error)
table->file->print_error(error, MYF(0));
@@ -6684,7 +6808,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
*/
if ((def->sql_type == MYSQL_TYPE_DATE ||
def->sql_type == MYSQL_TYPE_NEWDATE ||
- def->sql_type == MYSQL_TYPE_DATETIME) &&
+ def->sql_type == MYSQL_TYPE_DATETIME ||
+ def->sql_type == MYSQL_TYPE_DATETIME2) &&
!alter_ctx->datetime_field &&
!(~def->flags & (NO_DEFAULT_VALUE_FLAG | NOT_NULL_FLAG)) &&
thd->variables.sql_mode & MODE_NO_ZERO_DATE)
@@ -6902,7 +7027,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
key= new Key(key_type, key_name, strlen(key_name),
&key_create_info,
test(key_info->flags & HA_GENERATED_KEY),
- key_parts, key_info->option_list);
+ key_parts, key_info->option_list, FALSE);
new_key_list.push_back(key);
}
}
@@ -7683,6 +7808,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
DBUG_RETURN(true);
}
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ mysql_audit_alter_table(thd, table_list);
+
THD_STAGE_INFO(thd, stage_setup);
if (!(alter_info->flags & ~(Alter_info::ALTER_RENAME |
Alter_info::ALTER_KEYS_ONOFF)) &&
@@ -7698,9 +7826,23 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
"LOCK=NONE/SHARED", "LOCK=EXCLUSIVE");
DBUG_RETURN(true);
}
- DBUG_RETURN(simple_rename_or_index_change(thd, table_list,
- alter_info->keys_onoff,
- &alter_ctx));
+ bool res= simple_rename_or_index_change(thd, table_list,
+ alter_info->keys_onoff,
+ &alter_ctx);
+ DBUG_RETURN(res);
+ }
+
+ handle_if_exists_options(thd, table, alter_info);
+
+ /* Look if we have to do anything at all. */
+ /* Normally ALTER can become NOOP only after handling */
+ /* the IF (NOT) EXISTS options. */
+ if (alter_info->flags == 0)
+ {
+ my_snprintf(alter_ctx.tmp_name, sizeof(alter_ctx.tmp_name),
+ ER(ER_INSERT_INFO), 0L, 0L, 0L);
+ my_ok(thd, 0L, 0L, alter_ctx.tmp_name);
+ DBUG_RETURN(false);
}
/* We have to do full alter table. */
@@ -7871,8 +8013,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
- DBUG_EXECUTE_IF("sleep_before_create_table_no_lock",
- my_sleep(100000););
/* We can abort alter table for any table type */
thd->abort_on_warning= !ignore && thd->is_strict_mode();
@@ -7892,17 +8032,22 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
mysql_prepare_create_table().
*/
bool varchar= create_info->varchar;
+ LEX_CUSTRING frm= {0,0};
tmp_disable_binlog(thd);
+ create_info->options|=HA_CREATE_TMP_ALTER;
error= create_table_impl(thd, alter_ctx.new_db, alter_ctx.tmp_name,
alter_ctx.get_tmp_path(),
create_info, alter_info,
- true, true, NULL,
- &key_info, &key_count, FALSE);
+ C_ALTER_TABLE_FRM_ONLY, NULL,
+ &key_info, &key_count, &frm);
reenable_binlog(thd);
thd->abort_on_warning= false;
if (error)
+ {
+ my_free(const_cast<uchar*>(frm.str));
DBUG_RETURN(true);
+ }
/* Remember that we have not created table in storage engine yet. */
bool no_ha_table= true;
@@ -7924,10 +8069,32 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (fill_alter_inplace_info(thd, table, varchar, &ha_alter_info))
goto err_new_table_cleanup;
+ if (ha_alter_info.handler_flags == 0)
+ {
+ /*
+ No-op ALTER, no need to call handler API functions.
+
+ If this code path is entered for an ALTER statement that
+ should not be a real no-op, new handler flags should be added
+ and fill_alter_inplace_info() adjusted.
+
+ Note that we can end up here if an ALTER statement has clauses
+ that cancel each other out (e.g. ADD/DROP identically index).
+
+ Also note that we ignore the LOCK clause here.
+
+ TODO don't create the frm in the first place
+ */
+ deletefrm(alter_ctx.get_tmp_path());
+ my_free(const_cast<uchar*>(frm.str));
+ goto end_inplace;
+ }
+
// We assume that the table is non-temporary.
DBUG_ASSERT(!table->s->tmp_table);
- if (!(altered_table= open_table_uncached(thd, alter_ctx.get_tmp_path(),
+ if (!(altered_table= open_table_uncached(thd, new_db_type,
+ alter_ctx.get_tmp_path(),
alter_ctx.new_db,
alter_ctx.tmp_name,
true, false)))
@@ -7944,24 +8111,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
altered_table->column_bitmaps_set_no_signal(&altered_table->s->all_set,
&altered_table->s->all_set);
- if (ha_alter_info.handler_flags == 0)
- {
- /*
- No-op ALTER, no need to call handler API functions.
-
- If this code path is entered for an ALTER statement that
- should not be a real no-op, new handler flags should be added
- and fill_alter_inplace_info() adjusted.
-
- Note that we can end up here if an ALTER statement has clauses
- that cancel each other out (e.g. ADD/DROP identically index).
-
- Also note that we ignore the LOCK clause here.
- */
- close_temporary_table(thd, altered_table, true, false);
- goto end_inplace;
- }
-
// Ask storage engine whether to use copy or in-place
enum_alter_inplace_result inplace_supported=
table->file->check_if_supported_inplace_alter(altered_table,
@@ -8032,6 +8181,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (use_inplace)
{
+ my_free(const_cast<uchar*>(frm.str));
if (mysql_inplace_alter_table(thd, table_list, table,
altered_table,
&ha_alter_info,
@@ -8091,15 +8241,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (ha_create_table(thd, alter_ctx.get_tmp_path(),
alter_ctx.new_db, alter_ctx.tmp_name,
- create_info, false))
+ create_info, &frm))
goto err_new_table_cleanup;
/* Mark that we have created table in storage engine. */
no_ha_table= false;
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ if (create_info->tmp_table())
{
- if (!open_table_uncached(thd, alter_ctx.get_tmp_path(),
+ if (!open_table_uncached(thd, new_db_type,
+ alter_ctx.get_tmp_path(),
alter_ctx.new_db, alter_ctx.tmp_name,
true, true))
goto err_new_table_cleanup;
@@ -8122,7 +8273,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
/* table is a normal table: Create temporary table in same directory */
/* Open our intermediate table. */
- new_table= open_table_uncached(thd, alter_ctx.get_tmp_path(),
+ new_table= open_table_uncached(thd, new_db_type, alter_ctx.get_tmp_path(),
alter_ctx.new_db, alter_ctx.tmp_name,
true, true);
}
@@ -8202,6 +8353,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (!thd->is_current_stmt_binlog_format_row() &&
write_bin_log(thd, true, thd->query(), thd->query_length()))
DBUG_RETURN(true);
+ my_free(const_cast<uchar*>(frm.str));
goto end_temporary;
}
@@ -8243,6 +8395,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_EXTRA_NOT_USED,
NULL);
table_list->table= table= NULL; /* Safety */
+ my_free(const_cast<uchar*>(frm.str));
/*
Rename the old table to temporary name to have a backup in case
@@ -8316,7 +8469,6 @@ end_inplace:
THD_STAGE_INFO(thd, stage_end);
- DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000););
DEBUG_SYNC(thd, "alter_table_before_main_binlog");
ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
@@ -8325,7 +8477,7 @@ end_inplace:
DBUG_ASSERT(!(mysql_bin_log.is_open() &&
thd->is_current_stmt_binlog_format_row() &&
- (create_info->options & HA_LEX_CREATE_TMP_TABLE)));
+ (create_info->tmp_table())));
if (write_bin_log(thd, true, thd->query(), thd->query_length()))
DBUG_RETURN(true);
@@ -8337,7 +8489,7 @@ end_inplace:
shutdown. But we do not need to attach MERGE children.
*/
TABLE *t_table;
- t_table= open_table_uncached(thd, alter_ctx.get_new_path(),
+ t_table= open_table_uncached(thd, new_db_type, alter_ctx.get_new_path(),
alter_ctx.new_db, alter_ctx.new_name,
false, true);
if (t_table)
@@ -8368,6 +8520,7 @@ end_temporary:
DBUG_RETURN(false);
err_new_table_cleanup:
+ my_free(const_cast<uchar*>(frm.str));
if (new_table)
{
/* close_temporary_table() frees the new_table pointer. */
@@ -8397,6 +8550,7 @@ err_new_table_cleanup:
t_type= MYSQL_TIMESTAMP_DATE;
break;
case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_DATETIME2:
f_val= "0000-00-00 00:00:00";
t_type= MYSQL_TIMESTAMP_DATETIME;
break;
@@ -8508,10 +8662,10 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (!(copy= new Copy_field[to->s->fields]))
DBUG_RETURN(-1); /* purecov: inspected */
+ /* We need external lock before we can disable/enable keys */
if (to->file->ha_external_lock(thd, F_WRLCK))
DBUG_RETURN(-1);
- /* We need external lock before we can disable/enable keys */
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
/* We can abort alter table for any table type */
@@ -8996,7 +9150,7 @@ static bool check_engine(THD *thd, const char *db_name,
ha_resolve_storage_engine_name(*new_engine),
table_name);
}
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE &&
+ if (create_info->tmp_table() &&
ha_check_storage_engine_flag(*new_engine, HTON_TEMPORARY_NOT_SUPPORTED))
{
if (create_info->used_fields & HA_CREATE_USED_ENGINE)
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 5e836602b0d..6bd111cae6d 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2006, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2011, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -26,8 +27,9 @@ struct TABLE_LIST;
class THD;
struct TABLE;
struct handlerton;
+class handler;
typedef struct st_ha_check_opt HA_CHECK_OPT;
-typedef struct st_ha_create_information HA_CREATE_INFO;
+struct HA_CREATE_INFO;
typedef struct st_key KEY;
typedef struct st_key_cache KEY_CACHE;
typedef struct st_lock_param_type ALTER_PARTITION_PARAM_TYPE;
@@ -146,12 +148,10 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
uint build_table_shadow_filename(char *buff, size_t bufflen,
ALTER_PARTITION_PARAM_TYPE *lpt);
uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen);
-bool check_table_file_presence(char *old_path, char *path, const char *db,
- const char *table_name, const char *alias,
- bool issue_error);
bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
HA_CREATE_INFO *create_info,
Alter_info *alter_info);
+
/*
mysql_create_table_no_lock can be called in one of the following
mutually exclusive situations:
@@ -194,9 +194,19 @@ bool mysql_create_table_no_lock(THD *thd, const char *db,
Alter_info *alter_info, bool *is_trans,
int create_table_mode);
+handler *mysql_create_frm_image(THD *thd,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info,
+ int create_table_mode,
+ KEY **key_info,
+ uint *key_count,
+ LEX_CUSTRING *frm);
+
int mysql_discard_or_import_tablespace(THD *thd,
TABLE_LIST *table_list,
bool discard);
+
bool mysql_prepare_alter_table(THD *thd, TABLE *table,
HA_CREATE_INFO *create_info,
Alter_info *alter_info,
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index c79362838e4..867d49808e1 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -629,7 +629,7 @@ Max used alarms: %u\n\
Next alarm time: %lu\n",
alarm_info.active_alarms,
alarm_info.max_used_alarms,
- alarm_info.next_alarm_time);
+ (ulong)alarm_info.next_alarm_time);
#endif
display_table_locks();
fflush(stdout);
diff --git a/sql/sql_time.cc b/sql/sql_time.cc
index 6f15ada2dcd..f4612ec517e 100644
--- a/sql/sql_time.cc
+++ b/sql/sql_time.cc
@@ -214,6 +214,22 @@ ulong convert_month_to_period(ulong month)
}
+bool
+check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
+ timestamp_type ts_type)
+{
+ int unused;
+ if (check_date(ltime, fuzzy_date, &unused))
+ {
+ ErrConvTime str(ltime);
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ &str, ts_type, 0);
+ return true;
+ }
+ return false;
+}
+
+
/*
Convert a string to 8-bit representation,
for use in str_to_time/str_to_date/str_to_date.
@@ -249,9 +265,9 @@ to_ascii(CHARSET_INFO *cs,
/* Character set-aware version of str_to_time() */
-timestamp_type
+bool
str_to_time(CHARSET_INFO *cs, const char *str,uint length,
- MYSQL_TIME *l_time, ulonglong fuzzydate, int *warning)
+ MYSQL_TIME *l_time, ulonglong fuzzydate, MYSQL_TIME_STATUS *status)
{
char cnv[32];
if ((cs->state & MY_CS_NONASCII) != 0)
@@ -259,14 +275,14 @@ str_to_time(CHARSET_INFO *cs, const char *str,uint length,
length= to_ascii(cs, str, length, cnv, sizeof(cnv));
str= cnv;
}
- return str_to_time(str, length, l_time, fuzzydate, warning);
+ return str_to_time(str, length, l_time, fuzzydate, status);
}
/* Character set-aware version of str_to_datetime() */
-timestamp_type str_to_datetime(CHARSET_INFO *cs,
- const char *str, uint length,
- MYSQL_TIME *l_time, ulonglong flags, int *was_cut)
+bool str_to_datetime(CHARSET_INFO *cs, const char *str, uint length,
+ MYSQL_TIME *l_time, ulonglong flags,
+ MYSQL_TIME_STATUS *status)
{
char cnv[32];
if ((cs->state & MY_CS_NONASCII) != 0)
@@ -274,7 +290,7 @@ timestamp_type str_to_datetime(CHARSET_INFO *cs,
length= to_ascii(cs, str, length, cnv, sizeof(cnv));
str= cnv;
}
- return str_to_datetime(str, length, l_time, flags, was_cut);
+ return str_to_datetime(str, length, l_time, flags, status);
}
@@ -286,21 +302,22 @@ timestamp_type str_to_datetime(CHARSET_INFO *cs,
See description of str_to_datetime() for more information.
*/
-timestamp_type
+bool
str_to_datetime_with_warn(CHARSET_INFO *cs,
const char *str, uint length, MYSQL_TIME *l_time,
ulonglong flags)
{
- int was_cut;
+ MYSQL_TIME_STATUS status;
THD *thd= current_thd;
- timestamp_type ts_type;
-
- ts_type= str_to_datetime(cs, str, length, l_time, flags, &was_cut);
- if (was_cut || ts_type <= MYSQL_TIMESTAMP_ERROR)
+ bool ret_val= str_to_datetime(cs, str, length, l_time, flags, &status);
+ if (ret_val || status.warnings)
make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
str, length, flags & TIME_TIME_ONLY ?
- MYSQL_TIMESTAMP_TIME : ts_type, NullS);
- return ts_type;
+ MYSQL_TIMESTAMP_TIME : l_time->time_type, NullS);
+ DBUG_EXECUTE_IF("str_to_datetime_warn",
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, str););
+ return ret_val;
}
@@ -311,7 +328,7 @@ str_to_datetime_with_warn(CHARSET_INFO *cs,
@param nr integer part of the number to convert
@param sec_part microsecond part of the number
@param ltime converted value will be written here
- @param fuzzydate conversion flags (TIME_FUZZY_DATE, etc)
+ @param fuzzydate conversion flags (TIME_INVALID_DATE, etc)
@param str original number, as an ErrConv. For the warning
@param field_name field name or NULL if not a field. For the warning
@@ -328,6 +345,7 @@ static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
if (fuzzydate & TIME_TIME_ONLY)
{
+ fuzzydate= TIME_TIME_ONLY; // clear other flags
f_type= MYSQL_TYPE_TIME;
res= number_to_time(neg, nr, sec_part, ltime, &was_cut);
}
@@ -337,7 +355,7 @@ static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
res= neg ? -1 : number_to_datetime(nr, sec_part, ltime, fuzzydate, &was_cut);
}
- if (res < 0 || (was_cut && !(fuzzydate & TIME_FUZZY_DATE)))
+ if (res < 0 || (was_cut && (fuzzydate & TIME_NO_ZERO_IN_DATE)))
{
make_truncated_value_warning(current_thd,
Sql_condition::WARN_LEVEL_WARN, str,
@@ -1012,13 +1030,13 @@ calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign, longlong *s
(uint) l_time2->day);
}
- microseconds= ((longlong)days*LL(86400) +
+ microseconds= ((longlong)days*86400LL +
(longlong)(l_time1->hour*3600L +
l_time1->minute*60L +
l_time1->second) -
l_sign*(longlong)(l_time2->hour*3600L +
l_time2->minute*60L +
- l_time2->second)) * LL(1000000) +
+ l_time2->second)) * 1000000LL +
(longlong)l_time1->second_part -
l_sign*(longlong)l_time2->second_part;
@@ -1050,7 +1068,7 @@ calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign, longlong *s
*/
-int my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b)
+int my_time_compare(const MYSQL_TIME *a, const MYSQL_TIME *b)
{
ulonglong a_t= pack_time(a);
ulonglong b_t= pack_time(b);
diff --git a/sql/sql_time.h b/sql/sql_time.h
index 8ce85712256..47b300d51cc 100644
--- a/sql/sql_time.h
+++ b/sql/sql_time.h
@@ -35,11 +35,9 @@ ulong convert_period_to_month(ulong period);
ulong convert_month_to_period(ulong month);
bool get_date_from_daynr(long daynr,uint *year, uint *month, uint *day);
my_time_t TIME_to_timestamp(THD *thd, const MYSQL_TIME *t, uint *error_code);
-bool str_to_time_with_warn(CHARSET_INFO *cs, const char *str, uint length,
- MYSQL_TIME *l_time, ulonglong fuzzydate);
-timestamp_type str_to_datetime_with_warn(CHARSET_INFO *cs, const char *str,
- uint length, MYSQL_TIME *l_time,
- ulonglong flags);
+bool str_to_datetime_with_warn(CHARSET_INFO *cs, const char *str,
+ uint length, MYSQL_TIME *l_time,
+ ulonglong flags);
bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
ulonglong fuzzydate,
const char *name);
@@ -77,7 +75,7 @@ bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type,
INTERVAL interval);
bool calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign,
longlong *seconds_out, long *microseconds_out);
-int my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b);
+int my_time_compare(const MYSQL_TIME *a, const MYSQL_TIME *b);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
void calc_time_from_sec(MYSQL_TIME *to, long seconds, long microseconds);
uint calc_week(MYSQL_TIME *l_time, uint week_behaviour, uint *year);
@@ -87,12 +85,14 @@ bool parse_date_time_format(timestamp_type format_type,
const char *format, uint format_length,
DATE_TIME_FORMAT *date_time_format);
/* Character set-aware version of str_to_time() */
-timestamp_type str_to_time(CHARSET_INFO *cs, const char *str,uint length,
- MYSQL_TIME *l_time, ulonglong fuzzydate, int *warning);
+bool str_to_time(CHARSET_INFO *cs, const char *str,uint length,
+ MYSQL_TIME *l_time, ulonglong fuzzydate,
+ MYSQL_TIME_STATUS *status);
/* Character set-aware version of str_to_datetime() */
-timestamp_type str_to_datetime(CHARSET_INFO *cs,
- const char *str, uint length,
- MYSQL_TIME *l_time, ulonglong flags, int *was_cut);
+bool str_to_datetime(CHARSET_INFO *cs,
+ const char *str, uint length,
+ MYSQL_TIME *l_time, ulonglong flags,
+ MYSQL_TIME_STATUS *status);
/* convenience wrapper */
inline bool parse_date_time_format(timestamp_type format_type,
@@ -111,4 +111,18 @@ extern DATE_TIME_FORMAT global_time_format;
extern KNOWN_DATE_TIME_FORMAT known_date_time_formats[];
extern LEX_STRING interval_type_to_name[];
+
+static inline bool
+non_zero_date(const MYSQL_TIME *ltime)
+{
+ return ltime->year || ltime->month || ltime->day;
+}
+static inline bool
+check_date(const MYSQL_TIME *ltime, ulonglong flags, int *was_cut)
+{
+ return check_date(ltime, non_zero_date(ltime), flags, was_cut);
+}
+bool check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
+ timestamp_type ts_type);
+
#endif /* SQL_TIME_INCLUDED */
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index dafb7ed3eb1..bc4986bebee 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#define MYSQL_LEX 1
@@ -443,7 +443,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
if (!create)
{
- bool if_exists= thd->lex->drop_if_exists;
+ bool if_exists= thd->lex->check_exists;
/*
Protect the query table list from the temporary and potentially
@@ -701,10 +701,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
thd->security_ctx->priv_host)))
{
if (check_global_access(thd, SUPER_ACL))
- {
- my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
return TRUE;
- }
}
/*
diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc
index 810daefb987..9cd984a6663 100644
--- a/sql/sql_truncate.cc
+++ b/sql/sql_truncate.cc
@@ -255,27 +255,18 @@ static bool recreate_temporary_table(THD *thd, TABLE *table)
{
bool error= TRUE;
TABLE_SHARE *share= table->s;
- HA_CREATE_INFO create_info;
handlerton *table_type= table->s->db_type();
DBUG_ENTER("recreate_temporary_table");
- memset(&create_info, 0, sizeof(create_info));
- create_info.options|= HA_LEX_CREATE_TMP_TABLE;
-
table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
/* Don't free share. */
close_temporary_table(thd, table, FALSE, FALSE);
- /*
- We must use share->normalized_path.str since for temporary tables it
- differs from what dd_recreate_table() would generate based
- on table and schema names.
- */
- ha_create_table(thd, share->normalized_path.str, share->db.str,
- share->table_name.str, &create_info, 1);
+ dd_recreate_table(thd, share->db.str, share->table_name.str,
+ share->normalized_path.str);
- if (open_table_uncached(thd, share->path.str, share->db.str,
+ if (open_table_uncached(thd, table_type, share->path.str, share->db.str,
share->table_name.str, true, true))
{
error= FALSE;
@@ -346,9 +337,27 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref,
thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(TRUE);
- if (dd_check_storage_engine_flag(thd, table_ref->db, table_ref->table_name,
- HTON_CAN_RECREATE, hton_can_recreate))
+ handlerton *hton;
+ if (!ha_table_exists(thd, table_ref->db, table_ref->table_name, &hton) ||
+ hton == view_pseudo_hton)
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), table_ref->db, table_ref->table_name);
DBUG_RETURN(TRUE);
+ }
+
+ if (!hton)
+ {
+ /*
+ The table exists, but its storage engine is unknown, perhaps not
+ loaded at the moment. We need to open and parse the frm to know the
+ storage engine in question, so let's proceed with the truncation and
+ try to open the table. This will produce the correct error message
+ about unknown engine.
+ */
+ *hton_can_recreate= false;
+ }
+ else
+ *hton_can_recreate= hton->flags & HTON_CAN_RECREATE;
}
/*
@@ -360,7 +369,8 @@ bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref,
{
DEBUG_SYNC(thd, "upgrade_lock_for_truncate");
/* To remove the table from the cache we need an exclusive lock. */
- if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_DROP))
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_DROP,
+ TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
DBUG_RETURN(TRUE);
m_ticket_downgrade= table->mdl_ticket;
/* Close if table is going to be recreated. */
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index cdb15b9e0f5..4aa055b9858 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -103,14 +103,14 @@ class udf_handler :public Sql_alloc
if (get_arguments())
{
*null_value=1;
- return LL(0);
+ return 0;
}
Udf_func_longlong func= (Udf_func_longlong) u_d->func;
longlong tmp=func(&initid, &f_args, &is_null, &error);
if (is_null || error)
{
*null_value=1;
- return LL(0);
+ return 0;
}
*null_value=0;
return tmp;
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 6a1e4d745e8..a835c182c86 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -83,13 +83,16 @@ int select_union::send_data(List<Item> &values)
*/
return -1;
}
+ bool is_duplicate;
/* create_internal_tmp_table_from_heap will generate error if needed */
if (table->file->is_fatal_error(write_err, HA_CHECK_DUP) &&
create_internal_tmp_table_from_heap(thd, table,
tmp_table_param.start_recinfo,
&tmp_table_param.recinfo,
- write_err, 1))
+ write_err, 1, &is_duplicate))
return 1;
+ if (is_duplicate)
+ return -1;
}
return 0;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 1dd83fa8865..b91215bcedd 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2011 Monty Program Ab
+/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2011, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -527,7 +527,10 @@ int mysql_update(THD *thd,
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
+ {
+ close_cached_file(&tempfile);
goto err;
+ }
table->file->try_semi_consistent_read(1);
/*
@@ -579,13 +582,18 @@ int mysql_update(THD *thd,
}
else
{
- table->file->unlock_row();
+ /*
+ Don't try unlocking the row if skip_record reported an error since in
+ this case the transaction might have been rolled back already.
+ */
if (error < 0)
{
/* Fatal error from select->skip_record() */
error= 1;
break;
}
+ else
+ table->file->unlock_row();
}
}
if (thd->killed && !error)
@@ -822,8 +830,17 @@ int mysql_update(THD *thd,
}
}
}
- else
+ /*
+ Don't try unlocking the row if skip_record reported an error since in
+ this case the transaction might have been rolled back already.
+ */
+ else if (!thd->is_error())
table->file->unlock_row();
+ else
+ {
+ error= 1;
+ break;
+ }
thd->get_stmt_da()->inc_current_row_for_warning();
if (thd->is_error())
{
@@ -1573,6 +1590,15 @@ int multi_update::prepare(List<Item> &not_used_values,
DBUG_RETURN(thd->is_fatal_error != 0);
}
+void multi_update::update_used_tables()
+{
+ Item *item;
+ List_iterator_fast<Item> it(*values);
+ while ((item= it++))
+ {
+ item->update_used_tables();
+ }
+}
/*
Check if table is safe to update on fly
@@ -1981,7 +2007,7 @@ int multi_update::send_data(List<Item> &not_used_values)
create_internal_tmp_table_from_heap(thd, tmp_table,
tmp_table_param[offset].start_recinfo,
&tmp_table_param[offset].recinfo,
- error, 1))
+ error, 1, NULL))
{
do_update= 0;
DBUG_RETURN(1); // Not a table_is_full error
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index d99867836cf..e0a567420ba 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2004, 2011, Oracle and/or its affiliates.
- Copyright (c) 2011 Monty Program Ab
+ Copyright (c) 2011, 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#define MYSQL_LEX 1
@@ -33,7 +33,7 @@
#include "sp_head.h"
#include "sp.h"
#include "sp_cache.h"
-#include "datadict.h" // dd_frm_type()
+#include "datadict.h" // dd_frm_is_view()
#define MD5_BUFF_LENGTH 33
@@ -211,17 +211,12 @@ static void make_valid_column_names(List<Item> &item_list)
static bool
fill_defined_view_parts (THD *thd, TABLE_LIST *view)
{
- const char *key;
- uint key_length;
LEX *lex= thd->lex;
TABLE_LIST decoy;
memcpy (&decoy, view, sizeof (TABLE_LIST));
-
- key_length= get_table_def_key(view, &key);
-
- if (tdc_open_view(thd, &decoy, decoy.alias, key, key_length,
- thd->mem_root, OPEN_VIEW_NO_PARSE))
+ if (tdc_open_view(thd, &decoy, decoy.alias, thd->mem_root,
+ OPEN_VIEW_NO_PARSE))
return TRUE;
if (!lex->definer)
@@ -873,7 +868,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
view->source= thd->lex->create_view_select;
if (!thd->make_lex_string(&view->select_stmt, view_query.ptr(),
- view_query.length(), false))
+ view_query.length()))
{
my_error(ER_OUT_OF_RESOURCES, MYF(0));
error= -1;
@@ -1006,7 +1001,7 @@ loop_out:
view->view_creation_ctx->get_connection_cl()->name);
if (!thd->make_lex_string(&view->view_body_utf8, is_query.ptr(),
- is_query.length(), false))
+ is_query.length()))
{
my_error(ER_OUT_OF_RESOURCES, MYF(0));
error= -1;
@@ -1158,9 +1153,10 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
TODO: when VIEWs will be stored in cache, table mem_root should
be used here
*/
- if (parser->parse((uchar*)table, thd->mem_root, view_parameters,
- required_view_parameters, &file_parser_dummy_hook))
- goto err;
+ if ((result= parser->parse((uchar*)table, thd->mem_root,
+ view_parameters, required_view_parameters,
+ &file_parser_dummy_hook)))
+ goto end;
/*
check old format view .frm
@@ -1223,6 +1219,11 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
now Lex placed in statement memory
*/
table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local;
+ if (!table->view)
+ {
+ result= true;
+ goto end;
+ }
{
char old_db_buf[SAFE_NAME_LEN+1];
@@ -1648,7 +1649,6 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
String non_existant_views;
char *wrong_object_db= NULL, *wrong_object_name= NULL;
bool error= FALSE;
- enum legacy_db_type not_used;
bool some_views_deleted= FALSE;
bool something_wrong= FALSE;
DBUG_ENTER("mysql_drop_view");
@@ -1670,23 +1670,28 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
for (view= views; view; view= view->next_local)
{
- frm_type_enum type= FRMTYPE_ERROR;
+ bool not_exist;
build_table_filename(path, sizeof(path) - 1,
view->db, view->table_name, reg_ext, 0);
- if (access(path, F_OK) ||
- FRMTYPE_VIEW != (type= dd_frm_type(thd, path, &not_used)))
+ if ((not_exist= my_access(path, F_OK)) || !dd_frm_is_view(thd, path))
{
char name[FN_REFLEN];
my_snprintf(name, sizeof(name), "%s.%s", view->db, view->table_name);
- if (thd->lex->drop_if_exists)
+ if (thd->lex->check_exists)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR),
name);
continue;
}
- if (type == FRMTYPE_TABLE)
+ if (not_exist)
+ {
+ if (non_existant_views.length())
+ non_existant_views.append(',');
+ non_existant_views.append(String(view->table_name,system_charset_info));
+ }
+ else
{
if (!wrong_object_name)
{
@@ -1694,12 +1699,6 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
wrong_object_name= view->table_name;
}
}
- else
- {
- if (non_existant_views.length())
- non_existant_views.append(',');
- non_existant_views.append(String(view->table_name,system_charset_info));
- }
continue;
}
if (mysql_file_delete(key_file_frm, path, MYF(MY_WME)))
@@ -1708,9 +1707,8 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
some_views_deleted= TRUE;
/*
- For a view, there is a TABLE_SHARE object, but its
- ref_count never goes above 1. Remove it from the table
- definition cache, in case the view was cached.
+ For a view, there is a TABLE_SHARE object.
+ Remove it from the table definition cache, in case the view was cached.
*/
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, view->db, view->table_name,
FALSE);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 5022972018f..07666822acf 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2010, 2011 Monty Program Ab
+ Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2011, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -32,6 +32,7 @@
#define YYTHD ((THD *)yythd)
#define YYLIP (& YYTHD->m_parser_state->m_lip)
#define YYPS (& YYTHD->m_parser_state->m_yacc)
+#define YYCSCL YYTHD->variables.character_set_client
#define MYSQL_YACC
#define YYINITDEPTH 100
@@ -775,7 +776,7 @@ static bool add_create_index (LEX *lex, Key::Keytype type,
{
Key *key;
key= new Key(type, name, info ? info : &lex->key_create_info, generated,
- lex->col_list, lex->option_list);
+ lex->col_list, lex->option_list, lex->check_exists);
if (key == NULL)
return TRUE;
@@ -952,6 +953,7 @@ static bool sp_create_assignment_instr(THD *thd, bool no_lookahead)
List<Condition_information_item> *cond_info_list;
DYNCALL_CREATE_DEF *dyncol_def;
List<DYNCALL_CREATE_DEF> *dyncol_def_list;
+ bool is_not_empty;
}
%{
@@ -960,10 +962,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%pure_parser /* We have threads */
/*
- Currently there are 167 shift/reduce conflicts.
+ Currently there are 185 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 167
+%expect 185
/*
Comments for TOKENS.
@@ -1006,6 +1008,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token AUTHORS_SYM
%token AUTOEXTEND_SIZE_SYM
%token AUTO_INC
+%token AUTO_SYM
%token AVG_ROW_LENGTH
%token AVG_SYM /* SQL-2003-N */
%token BACKUP_SYM
@@ -1087,6 +1090,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CURDATE /* MYSQL-FUNC */
%token CURRENT_SYM /* SQL-2003-R */
%token CURRENT_USER /* SQL-2003-R */
+%token CURRENT_POS_SYM
%token CURSOR_SYM /* SQL-2003-R */
%token CURSOR_NAME_SYM /* SQL-2003-N */
%token CURTIME /* MYSQL-FUNC */
@@ -1196,6 +1200,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token HAVING /* SQL-2003-R */
%token HELP_SYM
%token HEX_NUM
+%token HEX_STRING
%token HIGH_PRIORITY
%token HOST_SYM
%token HOSTS_SYM
@@ -1269,6 +1274,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token LOW_PRIORITY
%token LT /* OPERATOR */
%token MASTER_CONNECT_RETRY_SYM
+%token MASTER_GTID_POS_SYM
%token MASTER_HOST_SYM
%token MASTER_LOG_FILE_SYM
%token MASTER_LOG_POS_SYM
@@ -1286,6 +1292,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MASTER_SSL_VERIFY_SERVER_CERT_SYM
%token MASTER_SYM
%token MASTER_USER_SYM
+%token MASTER_USE_GTID_SYM
%token MASTER_HEARTBEAT_PERIOD_SYM
%token MATCH /* SQL-2003-R */
%token MAX_CONNECTIONS_PER_HOUR
@@ -1472,6 +1479,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token SIMPLE_SYM /* SQL-2003-N */
%token SLAVE
%token SLAVES
+%token SLAVE_POS_SYM
%token SLOW
%token SMALLINT /* SQL-2003-R */
%token SNAPSHOT_SYM
@@ -1632,19 +1640,20 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%left INTERVAL_SYM
%type <lex_str>
- IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM
+ IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM
+ HEX_NUM HEX_STRING hex_num_or_string
LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty
- opt_constraint constraint opt_ident
+ opt_constraint constraint opt_ident opt_if_not_exists_ident
%type <lex_str_ptr>
opt_table_alias
%type <table>
table_ident table_ident_nodb references xid
- table_ident_opt_wild
+ table_ident_opt_wild create_like
%type <simple_string>
remember_name remember_end opt_db text_or_password
@@ -1654,7 +1663,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <num>
type type_with_opt_collate int_type real_type order_dir lock_option
- udf_type if_exists opt_local opt_table_options table_options
+ udf_type opt_if_exists opt_local opt_table_options table_options
table_option opt_if_not_exists opt_no_write_to_binlog
opt_temporary all_or_any opt_distinct
opt_ignore_leaves fulltext_options spatial_type union_option
@@ -1692,7 +1701,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
replace_lock_option opt_low_priority insert_lock_option load_data_lock
%type <item>
- literal text_literal insert_ident order_ident
+ literal text_literal insert_ident order_ident temporal_literal
simple_ident expr opt_expr opt_else sum_expr in_sum_expr
variable variable_aux bool_pri
predicate bit_expr
@@ -1815,7 +1824,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
clear_privileges flush_options flush_option
opt_with_read_lock flush_options_list
equal optional_braces
- opt_mi_check_type opt_to mi_check_types normal_join
+ opt_mi_check_type opt_to mi_check_types
table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_read_or_scan
single_multi table_wild_list table_wild_one opt_wild
@@ -1823,7 +1832,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
precision subselect_start opt_and charset
subselect_end select_var_list select_var_list_init help
field_length opt_field_length
- opt_extended_describe
+ opt_extended_describe shutdown
prepare prepare_src execute deallocate
statement sp_suid
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
@@ -1862,7 +1871,7 @@ END_OF_INPUT
%type <lex> sp_cursor_stmt
%type <spname> sp_name
%type <index_hint> index_hint_type
-%type <num> index_hint_clause
+%type <num> index_hint_clause normal_join inner_join
%type <filetype> data_or_xml
%type <NONE> signal_stmt resignal_stmt
@@ -1881,8 +1890,12 @@ END_OF_INPUT
'-' '+' '*' '/' '%' '(' ')'
',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM
THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM DELETE_SYM
+
+%type <is_not_empty> opt_union_order_or_limit
+
%%
+
/*
Indentation of grammar rules:
@@ -2007,6 +2020,7 @@ statement:
| set
| signal_stmt
| show
+ | shutdown
| slave
| start
| truncate
@@ -2269,6 +2283,35 @@ master_file_def:
/* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */
Lex->mi.relay_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
}
+ | MASTER_USE_GTID_SYM EQ CURRENT_POS_SYM
+ {
+ if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid");
+ MYSQL_YYABORT;
+ }
+ Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_CURRENT_POS;
+ }
+ ;
+ | MASTER_USE_GTID_SYM EQ SLAVE_POS_SYM
+ {
+ if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid");
+ MYSQL_YYABORT;
+ }
+ Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_SLAVE_POS;
+ }
+ ;
+ | MASTER_USE_GTID_SYM EQ NO_SYM
+ {
+ if (Lex->mi.use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_UNCHANGED)
+ {
+ my_error(ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid");
+ MYSQL_YYABORT;
+ }
+ Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_NO;
+ }
;
optional_connection_name:
@@ -2322,7 +2365,7 @@ create:
lex->name.length= 0;
lex->create_last_non_select_table= lex->last_table();
}
- create2
+ create_body
{
LEX *lex= YYTHD->lex;
lex->current_select= &lex->select_lex;
@@ -2338,38 +2381,38 @@ create:
}
create_table_set_open_action_and_adjust_tables(lex);
}
- | CREATE opt_unique INDEX_SYM ident key_alg ON table_ident
+ | CREATE opt_unique INDEX_SYM opt_if_not_exists ident key_alg ON table_ident
{
- if (add_create_index_prepare(Lex, $7))
+ if (add_create_index_prepare(Lex, $8))
MYSQL_YYABORT;
}
'(' key_list ')' normal_key_options
{
- if (add_create_index(Lex, $2, $4))
+ if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
opt_index_lock_algorithm { }
- | CREATE fulltext INDEX_SYM ident init_key_options ON
+ | CREATE fulltext INDEX_SYM opt_if_not_exists ident init_key_options ON
table_ident
{
- if (add_create_index_prepare(Lex, $7))
+ if (add_create_index_prepare(Lex, $8))
MYSQL_YYABORT;
}
'(' key_list ')' fulltext_key_options
{
- if (add_create_index(Lex, $2, $4))
+ if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
opt_index_lock_algorithm { }
- | CREATE spatial INDEX_SYM ident init_key_options ON
+ | CREATE spatial INDEX_SYM opt_if_not_exists ident init_key_options ON
table_ident
{
- if (add_create_index_prepare(Lex, $7))
+ if (add_create_index_prepare(Lex, $8))
MYSQL_YYABORT;
}
'(' key_list ')' spatial_key_options
{
- if (add_create_index(Lex, $2, $4))
+ if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
opt_index_lock_algorithm { }
@@ -4661,36 +4704,23 @@ size_number:
End tablespace part
*/
-create2:
- '(' create2a {}
- | opt_create_table_options
- opt_create_partitioning
- create3 {}
- | LIKE table_ident
- {
- THD *thd= YYTHD;
- TABLE_LIST *src_table;
- LEX *lex= thd->lex;
-
- lex->create_info.options|= HA_LEX_CREATE_TABLE_LIKE;
- src_table= lex->select_lex.add_table_to_list(thd, $2, NULL, 0,
- TL_READ,
- MDL_SHARED_READ);
- if (! src_table)
- MYSQL_YYABORT;
- /* CREATE TABLE ... LIKE is not allowed for views. */
- src_table->required_type= FRMTYPE_TABLE;
- }
- | '(' LIKE table_ident ')'
+create_body:
+ '(' create_field_list ')'
+ { Lex->create_info.option_list= NULL; }
+ opt_create_table_options opt_create_partitioning opt_create_select {}
+ | opt_create_table_options opt_create_partitioning opt_create_select {}
+ /*
+ the following rule is redundant, but there's a shift/reduce
+ conflict that prevents the rule above from parsing a syntax like
+ CREATE TABLE t1 (SELECT 1);
+ */
+ | '(' create_select ')' { Select->set_braces(1);} union_opt {}
+ | create_like
{
- THD *thd= YYTHD;
- TABLE_LIST *src_table;
- LEX *lex= thd->lex;
- lex->create_info.options|= HA_LEX_CREATE_TABLE_LIKE;
- src_table= lex->select_lex.add_table_to_list(thd, $3, NULL, 0,
- TL_READ,
- MDL_SHARED_READ);
+ Lex->create_info.options|= HA_LEX_CREATE_TABLE_LIKE;
+ TABLE_LIST *src_table= Lex->select_lex.add_table_to_list(YYTHD,
+ $1, NULL, 0, TL_READ, MDL_SHARED_READ);
if (! src_table)
MYSQL_YYABORT;
/* CREATE TABLE ... LIKE is not allowed for views. */
@@ -4698,21 +4728,12 @@ create2:
}
;
-create2a:
- create_field_list ')'
- {
- Lex->create_info.option_list= NULL;
- }
- opt_create_table_options
- opt_create_partitioning
- create3 {}
- | opt_create_partitioning
- create_select ')'
- { Select->set_braces(1);}
- union_opt {}
+create_like:
+ LIKE table_ident { $$= $2; }
+ | '(' LIKE table_ident ')' { $$= $3; }
;
-create3:
+opt_create_select:
/* empty */ {}
| opt_duplicate opt_as create_select
{ Select->set_braces(0);}
@@ -4823,7 +4844,7 @@ partition:
;
part_type_def:
- opt_linear KEY_SYM '(' part_field_list ')'
+ opt_linear KEY_SYM opt_key_algo '(' part_field_list ')'
{
partition_info *part_info= Lex->part_info;
part_info->list_of_part_fields= TRUE;
@@ -4849,6 +4870,25 @@ opt_linear:
{ Lex->part_info->linear_hash_ind= TRUE;}
;
+opt_key_algo:
+ /* empty */
+ { Lex->part_info->key_algorithm= partition_info::KEY_ALGORITHM_NONE;}
+ | ALGORITHM_SYM EQ real_ulong_num
+ {
+ switch ($3) {
+ case 1:
+ Lex->part_info->key_algorithm= partition_info::KEY_ALGORITHM_51;
+ break;
+ case 2:
+ Lex->part_info->key_algorithm= partition_info::KEY_ALGORITHM_55;
+ break;
+ default:
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ }
+ ;
+
part_field_list:
/* empty */ {}
| part_field_item_list {}
@@ -4930,7 +4970,7 @@ opt_sub_part:
| SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
{ Lex->part_info->subpart_type= HASH_PARTITION; }
opt_num_subparts {}
- | SUBPARTITION_SYM BY opt_linear KEY_SYM
+ | SUBPARTITION_SYM BY opt_linear KEY_SYM opt_key_algo
'(' sub_part_field_list ')'
{
partition_info *part_info= Lex->part_info;
@@ -5472,9 +5512,17 @@ table_option:
;
opt_if_not_exists:
- /* empty */ { $$= 0; }
- | IF not EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; }
- ;
+ /* empty */
+ {
+ Lex->check_exists= FALSE;
+ $$= 0;
+ }
+ | IF not EXISTS
+ {
+ Lex->check_exists= TRUE;
+ $$=HA_LEX_CREATE_IF_NOT_EXISTS;
+ }
+ ;
opt_create_table_options:
/* empty */
@@ -5770,7 +5818,7 @@ storage_engines:
plugin_ref plugin= ha_resolve_by_name(YYTHD, &$1);
if (plugin)
- $$= plugin_data(plugin, handlerton*);
+ $$= plugin_hton(plugin);
else
{
if (YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)
@@ -5792,7 +5840,7 @@ known_storage_engines:
{
plugin_ref plugin;
if ((plugin= ha_resolve_by_name(YYTHD, &$1)))
- $$= plugin_data(plugin, handlerton*);
+ $$= plugin_hton(plugin);
else
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str);
@@ -5856,14 +5904,14 @@ column_def:
;
key_def:
- normal_key_type opt_ident key_alg '(' key_list ')'
+ normal_key_type opt_if_not_exists_ident key_alg '(' key_list ')'
{ Lex->option_list= NULL; }
normal_key_options
{
if (add_create_index (Lex, $1, $2))
MYSQL_YYABORT;
}
- | fulltext opt_key_or_index opt_ident init_key_options
+ | fulltext opt_key_or_index opt_if_not_exists_ident init_key_options
'(' key_list ')'
{ Lex->option_list= NULL; }
fulltext_key_options
@@ -5871,7 +5919,7 @@ key_def:
if (add_create_index (Lex, $1, $3))
MYSQL_YYABORT;
}
- | spatial opt_key_or_index opt_ident init_key_options
+ | spatial opt_key_or_index opt_if_not_exists_ident init_key_options
'(' key_list ')'
{ Lex->option_list= NULL; }
spatial_key_options
@@ -5887,7 +5935,7 @@ key_def:
if (add_create_index (Lex, $2, $3.str ? $3 : $1))
MYSQL_YYABORT;
}
- | opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
+ | opt_constraint FOREIGN KEY_SYM opt_if_not_exists_ident '(' key_list ')' references
{
LEX *lex=Lex;
Key *key= new Foreign_key($4.str ? $4 : $1, lex->col_list,
@@ -5896,7 +5944,8 @@ key_def:
lex->ref_list,
lex->fk_delete_opt,
lex->fk_update_opt,
- lex->fk_match_option);
+ lex->fk_match_option,
+ lex->check_exists);
if (key == NULL)
MYSQL_YYABORT;
lex->alter_info.key_list.push_back(key);
@@ -6447,6 +6496,11 @@ now_or_signed_literal:
{ $$=$1; }
;
+hex_num_or_string:
+ HEX_NUM {}
+ | HEX_STRING {}
+ ;
+
charset:
CHAR_SYM SET {}
| CHARSET {}
@@ -6864,6 +6918,18 @@ opt_ident:
| field_ident { $$= $1; }
;
+opt_if_not_exists_ident:
+ opt_if_not_exists opt_ident
+ {
+ LEX *lex= Lex;
+ if (lex->check_exists && lex->sql_command != SQLCOM_ALTER_TABLE)
+ {
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ $$= $2;
+ };
+
opt_component:
/* empty */ { $$= null_lex_str; }
| '.' ident { $$= $2; }
@@ -7132,7 +7198,7 @@ alter_commands:
will be longer.
*/
| add_partition_rule
- | DROP PARTITION_SYM alt_part_name_list
+ | DROP PARTITION_SYM opt_if_exists alt_part_name_list
{
Lex->alter_info.flags|= Alter_info::ALTER_DROP_PARTITION;
}
@@ -7258,7 +7324,7 @@ all_or_alt_part_name_list:
;
add_partition_rule:
- ADD PARTITION_SYM opt_no_write_to_binlog
+ ADD PARTITION_SYM opt_if_not_exists opt_no_write_to_binlog
{
LEX *lex= Lex;
lex->part_info= new partition_info();
@@ -7268,7 +7334,7 @@ add_partition_rule:
MYSQL_YYABORT;
}
lex->alter_info.flags|= Alter_info::ALTER_ADD_PARTITION;
- lex->no_write_to_binlog= $3;
+ lex->no_write_to_binlog= $4;
}
add_part_extra
{}
@@ -7344,7 +7410,7 @@ alter_list:
;
add_column:
- ADD opt_column
+ ADD opt_column opt_if_not_exists
{
LEX *lex=Lex;
lex->change=0;
@@ -7367,10 +7433,10 @@ alter_list_item:
Lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN |
Alter_info::ALTER_ADD_INDEX;
}
- | CHANGE opt_column field_ident
+ | CHANGE opt_column opt_if_exists field_ident
{
LEX *lex=Lex;
- lex->change= $3.str;
+ lex->change= $4.str;
lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN;
lex->option_list= NULL;
}
@@ -7378,7 +7444,7 @@ alter_list_item:
{
Lex->create_last_non_select_table= Lex->last_table();
}
- | MODIFY_SYM opt_column field_ident
+ | MODIFY_SYM opt_column opt_if_exists field_ident
{
LEX *lex=Lex;
lex->length=lex->dec=0; lex->type=0;
@@ -7392,12 +7458,12 @@ alter_list_item:
field_def
{
LEX *lex=Lex;
- if (add_field_to_list(lex->thd,&$3,
- (enum enum_field_types) $5,
+ if (add_field_to_list(lex->thd,&$4,
+ (enum enum_field_types) $6,
lex->length,lex->dec,lex->type,
lex->default_value, lex->on_update_value,
&lex->comment,
- $3.str, &lex->interval_list, lex->charset,
+ $4.str, &lex->interval_list, lex->charset,
lex->uint_geom_type,
lex->vcol_info, lex->option_list))
MYSQL_YYABORT;
@@ -7406,19 +7472,19 @@ alter_list_item:
{
Lex->create_last_non_select_table= Lex->last_table();
}
- | DROP opt_column field_ident opt_restrict
+ | DROP opt_column opt_if_exists field_ident opt_restrict
{
LEX *lex=Lex;
- Alter_drop *ad= new Alter_drop(Alter_drop::COLUMN, $3.str);
+ Alter_drop *ad= new Alter_drop(Alter_drop::COLUMN, $4.str, $3);
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
lex->alter_info.flags|= Alter_info::ALTER_DROP_COLUMN;
}
- | DROP FOREIGN KEY_SYM field_ident
+ | DROP FOREIGN KEY_SYM opt_if_exists field_ident
{
LEX *lex=Lex;
- Alter_drop *ad= new Alter_drop(Alter_drop::FOREIGN_KEY, $4.str);
+ Alter_drop *ad= new Alter_drop(Alter_drop::FOREIGN_KEY, $5.str, $4);
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
@@ -7427,16 +7493,17 @@ alter_list_item:
| DROP PRIMARY_SYM KEY_SYM
{
LEX *lex=Lex;
- Alter_drop *ad= new Alter_drop(Alter_drop::KEY, primary_key_name);
+ Alter_drop *ad= new Alter_drop(Alter_drop::KEY, primary_key_name,
+ FALSE);
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
lex->alter_info.flags|= Alter_info::ALTER_DROP_INDEX;
}
- | DROP key_or_index field_ident
+ | DROP key_or_index opt_if_exists field_ident
{
LEX *lex=Lex;
- Alter_drop *ad= new Alter_drop(Alter_drop::KEY, $3.str);
+ Alter_drop *ad= new Alter_drop(Alter_drop::KEY, $4.str, $3);
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
@@ -7770,6 +7837,10 @@ slave_until:
MYSQL_YYABORT;
}
}
+ | UNTIL_SYM MASTER_GTID_POS_SYM EQ TEXT_STRING_sys
+ {
+ Lex->mi.gtid_pos_str = $4;
+ }
;
slave_until_opts:
@@ -9081,7 +9152,48 @@ simple_expr:
MYSQL_YYABORT;
}
| '{' ident expr '}'
- { $$= $3; }
+ {
+ Item_string *item;
+ $$= NULL;
+ /*
+ If "expr" is reasonably short pure ASCII string literal,
+ try to parse known ODBC style date, time or timestamp literals,
+ e.g:
+ SELECT {d'2001-01-01'};
+ SELECT {t'10:20:30'};
+ SELECT {ts'2001-01-01 10:20:30'};
+ */
+ if ($3->type() == Item::STRING_ITEM &&
+ (item= (Item_string *) $3) &&
+ item->collation.repertoire == MY_REPERTOIRE_ASCII &&
+ item->str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4)
+ {
+ enum_field_types type= MYSQL_TYPE_STRING;
+ LEX_STRING *ls= &$2;
+ if (ls->length == 1)
+ {
+ if (ls->str[0] == 'd') /* {d'2001-01-01'} */
+ type= MYSQL_TYPE_DATE;
+ else if (ls->str[0] == 't') /* {t'10:20:30'} */
+ type= MYSQL_TYPE_TIME;
+ }
+ else if (ls->length == 2) /* {ts'2001-01-01 10:20:30'} */
+ {
+ if (ls->str[0] == 't' && ls->str[1] == 's')
+ type= MYSQL_TYPE_DATETIME;
+ }
+ if (type != MYSQL_TYPE_STRING)
+ {
+ $$= create_temporal_literal(YYTHD,
+ item->str_value.ptr(),
+ item->str_value.length(),
+ item->str_value.charset(),
+ type, false);
+ }
+ }
+ if ($$ == NULL)
+ $$= $3;
+ }
| MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')'
{
$2->push_front($5);
@@ -10075,6 +10187,7 @@ sum_expr:
if ($$ == NULL)
MYSQL_YYABORT;
$5->empty();
+ sel->gorder_list.empty();
}
;
@@ -10144,18 +10257,27 @@ opt_gconcat_separator:
opt_gorder_clause:
/* empty */
+ | ORDER_SYM BY
{
- Select->gorder_list = NULL;
- }
- | order_clause
- {
- SELECT_LEX *select= Select;
- select->gorder_list= new (YYTHD->mem_root)
- SQL_I_List<ORDER>(select->order_list);
- if (select->gorder_list == NULL)
+ LEX *lex= Lex;
+ SELECT_LEX *sel= lex->current_select;
+ if (sel->linkage != GLOBAL_OPTIONS_TYPE &&
+ sel->olap != UNSPECIFIED_OLAP_TYPE &&
+ (sel->linkage != UNION_TYPE || sel->braces))
+ {
+ my_error(ER_WRONG_USAGE, MYF(0),
+ "CUBE/ROLLUP", "ORDER BY");
MYSQL_YYABORT;
- select->order_list.empty();
+ }
}
+ gorder_list;
+ ;
+
+gorder_list:
+ gorder_list ',' order_ident order_dir
+ { if (add_gorder_to_list(YYTHD, $3,(bool) $4)) MYSQL_YYABORT; }
+ | order_ident order_dir
+ { if (add_gorder_to_list(YYTHD, $1,(bool) $2)) MYSQL_YYABORT; }
;
in_sum_expr:
@@ -10288,7 +10410,10 @@ table_ref:
{
LEX *lex= Lex;
if (!($$= lex->current_select->nest_last_join(lex->thd)))
+ {
+ my_parse_error(ER(ER_SYNTAX_ERROR));
MYSQL_YYABORT;
+ }
}
;
@@ -10333,9 +10458,7 @@ join_table:
left-associative joins.
*/
table_ref normal_join table_ref %prec TABLE_REF_PRIORITY
- { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); }
- | table_ref STRAIGHT_JOIN table_factor
- { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=1; }
+ { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=$2; }
| table_ref normal_join table_ref
ON
{
@@ -10347,22 +10470,7 @@ join_table:
}
expr
{
- add_join_on($3,$6);
- Lex->pop_context();
- Select->parsing_place= NO_MATTER;
- }
- | table_ref STRAIGHT_JOIN table_factor
- ON
- {
- MYSQL_YYABORT_UNLESS($1 && $3);
- /* Change the current name resolution context to a local context. */
- if (push_new_name_resolution_context(YYTHD, $1, $3))
- MYSQL_YYABORT;
- Select->parsing_place= IN_ON;
- }
- expr
- {
- $3->straight=1;
+ $3->straight=$2;
add_join_on($3,$6);
Lex->pop_context();
Select->parsing_place= NO_MATTER;
@@ -10373,10 +10481,15 @@ join_table:
MYSQL_YYABORT_UNLESS($1 && $3);
}
'(' using_list ')'
- { add_join_natural($1,$3,$7,Select); $$=$3; }
- | table_ref NATURAL JOIN_SYM table_factor
+ {
+ $3->straight=$2;
+ add_join_natural($1,$3,$7,Select);
+ $$=$3;
+ }
+ | table_ref NATURAL inner_join table_factor
{
MYSQL_YYABORT_UNLESS($1 && ($$=$4));
+ $4->straight=$3;
add_join_natural($1,$4,NULL,Select);
}
@@ -10456,10 +10569,16 @@ join_table:
}
;
+
+inner_join: /* $$ set if using STRAIGHT_JOIN, false otherwise */
+ JOIN_SYM { $$ = 0; }
+ | INNER_SYM JOIN_SYM { $$ = 0; }
+ | STRAIGHT_JOIN { $$ = 1; }
+ ;
+
normal_join:
- JOIN_SYM {}
- | INNER_SYM JOIN_SYM {}
- | CROSS JOIN_SYM {}
+ inner_join { $$ = $1; }
+ | CROSS JOIN_SYM { $$ = 0; }
;
/*
@@ -10576,12 +10695,16 @@ table_factor:
lex->pop_context();
lex->nest_level--;
}
- else if (($3->select_lex &&
+ /*else if (($3->select_lex &&
$3->select_lex->master_unit()->is_union() &&
($3->select_lex->master_unit()->first_select() ==
- $3->select_lex || !$3->lifted)) || $5)
+ $3->select_lex || !$3->lifted)) || $5)*/
+ else if ($5 != NULL)
{
- /* simple nested joins cannot have aliases or unions */
+ /*
+ Tables with or without joins within parentheses cannot
+ have aliases, and we ruled out derived tables above.
+ */
my_parse_error(ER(ER_SYNTAX_ERROR));
MYSQL_YYABORT;
}
@@ -10594,8 +10717,34 @@ table_factor:
}
;
+/*
+ This rule accepts just about anything. The reason is that we have
+ empty-producing rules in the beginning of rules, in this case
+ subselect_start. This forces bison to take a decision which rules to
+ reduce by long before it has seen any tokens. This approach ties us
+ to a very limited class of parseable languages, and unfortunately
+ SQL is not one of them. The chosen 'solution' was this rule, which
+ produces just about anything, even complete bogus statements, for
+ instance ( table UNION SELECT 1 ).
+ Fortunately, we know that the semantic value returned by
+ select_derived is NULL if it contained a derived table, and a pointer to
+ the base table's TABLE_LIST if it was a base table. So in the rule
+ regarding union's, we throw a parse error manually and pretend it
+ was bison that did it.
+
+ Also worth noting is that this rule concerns query expressions in
+ the from clause only. Top level select statements and other types of
+ subqueries have their own union rules.
+*/
select_derived_union:
select_derived opt_union_order_or_limit
+ {
+ if ($1 && $2)
+ {
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ }
| select_derived_union
UNION_SYM
union_option
@@ -10612,6 +10761,13 @@ select_derived_union:
Lex->pop_context();
}
opt_union_order_or_limit
+ {
+ if ($1 != NULL)
+ {
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ }
;
/* The equivalent of select_init2 for nested queries. */
@@ -11437,41 +11593,41 @@ do:
*/
drop:
- DROP opt_temporary table_or_tables if_exists
+ DROP opt_temporary table_or_tables opt_if_exists
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_DROP_TABLE;
lex->drop_temporary= $2;
- lex->drop_if_exists= $4;
+ lex->check_exists= $4;
YYPS->m_lock_type= TL_UNLOCK;
YYPS->m_mdl_type= MDL_EXCLUSIVE;
}
table_list opt_restrict
{}
- | DROP INDEX_SYM ident ON table_ident {}
+ | DROP INDEX_SYM opt_if_exists ident ON table_ident {}
{
LEX *lex=Lex;
- Alter_drop *ad= new Alter_drop(Alter_drop::KEY, $3.str);
+ Alter_drop *ad= new Alter_drop(Alter_drop::KEY, $4.str, $3);
if (ad == NULL)
MYSQL_YYABORT;
lex->sql_command= SQLCOM_DROP_INDEX;
lex->alter_info.reset();
lex->alter_info.flags= Alter_info::ALTER_DROP_INDEX;
lex->alter_info.drop_list.push_back(ad);
- if (!lex->current_select->add_table_to_list(lex->thd, $5, NULL,
+ if (!lex->current_select->add_table_to_list(lex->thd, $6, NULL,
TL_OPTION_UPDATING,
TL_READ_NO_INSERT,
MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
}
- | DROP DATABASE if_exists ident
+ | DROP DATABASE opt_if_exists ident
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_DROP_DB;
- lex->drop_if_exists=$3;
+ lex->check_exists=$3;
lex->name= $4;
}
- | DROP FUNCTION_SYM if_exists ident '.' ident
+ | DROP FUNCTION_SYM opt_if_exists ident '.' ident
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
@@ -11487,14 +11643,14 @@ drop:
MYSQL_YYABORT;
}
lex->sql_command = SQLCOM_DROP_FUNCTION;
- lex->drop_if_exists= $3;
+ lex->check_exists= $3;
spname= new sp_name($4, $6, true);
if (spname == NULL)
MYSQL_YYABORT;
spname->init_qname(thd);
lex->spname= spname;
}
- | DROP FUNCTION_SYM if_exists ident
+ | DROP FUNCTION_SYM opt_if_exists ident
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
@@ -11508,14 +11664,14 @@ drop:
if (thd->db && lex->copy_db_to(&db.str, &db.length))
MYSQL_YYABORT;
lex->sql_command = SQLCOM_DROP_FUNCTION;
- lex->drop_if_exists= $3;
+ lex->check_exists= $3;
spname= new sp_name(db, $4, false);
if (spname == NULL)
MYSQL_YYABORT;
spname->init_qname(thd);
lex->spname= spname;
}
- | DROP PROCEDURE_SYM if_exists sp_name
+ | DROP PROCEDURE_SYM opt_if_exists sp_name
{
LEX *lex=Lex;
if (lex->sphead)
@@ -11524,34 +11680,34 @@ drop:
MYSQL_YYABORT;
}
lex->sql_command = SQLCOM_DROP_PROCEDURE;
- lex->drop_if_exists= $3;
+ lex->check_exists= $3;
lex->spname= $4;
}
| DROP USER clear_privileges user_list
{
Lex->sql_command = SQLCOM_DROP_USER;
}
- | DROP VIEW_SYM if_exists
+ | DROP VIEW_SYM opt_if_exists
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DROP_VIEW;
- lex->drop_if_exists= $3;
+ lex->check_exists= $3;
YYPS->m_lock_type= TL_UNLOCK;
YYPS->m_mdl_type= MDL_EXCLUSIVE;
}
table_list opt_restrict
{}
- | DROP EVENT_SYM if_exists sp_name
+ | DROP EVENT_SYM opt_if_exists sp_name
{
- Lex->drop_if_exists= $3;
+ Lex->check_exists= $3;
Lex->spname= $4;
Lex->sql_command = SQLCOM_DROP_EVENT;
}
- | DROP TRIGGER_SYM if_exists sp_name
+ | DROP TRIGGER_SYM opt_if_exists sp_name
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DROP_TRIGGER;
- lex->drop_if_exists= $3;
+ lex->check_exists= $3;
lex->spname= $4;
}
| DROP TABLESPACE tablespace_name opt_ts_engine opt_ts_wait
@@ -11564,10 +11720,10 @@ drop:
LEX *lex= Lex;
lex->alter_tablespace_info->ts_cmd_type= DROP_LOGFILE_GROUP;
}
- | DROP SERVER_SYM if_exists ident_or_text
+ | DROP SERVER_SYM opt_if_exists ident_or_text
{
Lex->sql_command = SQLCOM_DROP_SERVER;
- Lex->drop_if_exists= $3;
+ Lex->check_exists= $3;
Lex->server_options.server_name= $4.str;
Lex->server_options.server_name_length= $4.length;
}
@@ -11618,9 +11774,17 @@ table_alias_ref:
}
;
-if_exists:
- /* empty */ { $$= 0; }
- | IF EXISTS { $$= 1; }
+opt_if_exists:
+ /* empty */
+ {
+ Lex->check_exists= FALSE;
+ $$= 0;
+ }
+ | IF EXISTS
+ {
+ Lex->check_exists= TRUE;
+ $$= 1;
+ }
;
opt_temporary:
@@ -12086,12 +12250,15 @@ show:
{
LEX *lex=Lex;
lex->wild=0;
+ lex->ident=null_lex_str;
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
}
show_param
- {}
+ {
+ Select->parsing_place= NO_MATTER;
+ }
;
show_param:
@@ -12149,6 +12316,19 @@ show_param:
if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS))
MYSQL_YYABORT;
}
+ | PLUGINS_SYM SONAME_SYM TEXT_STRING_sys
+ {
+ Lex->ident= $3;
+ Lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, Lex, 0, SCH_ALL_PLUGINS))
+ MYSQL_YYABORT;
+ }
+ | PLUGINS_SYM SONAME_SYM wild_and_where
+ {
+ Lex->sql_command= SQLCOM_SHOW_PLUGINS;
+ if (prepare_schema_table(YYTHD, Lex, 0, SCH_ALL_PLUGINS))
+ MYSQL_YYABORT;
+ }
| ENGINE_SYM known_storage_engines show_engine_param
{ Lex->create_info.db_type= $2; }
| ENGINE_SYM ALL show_engine_param
@@ -12491,7 +12671,10 @@ describe:
if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS))
MYSQL_YYABORT;
}
- opt_describe_column {}
+ opt_describe_column
+ {
+ Select->parsing_place= NO_MATTER;
+ }
| describe_command opt_extended_describe
{ Lex->describe|= DESCRIBE_NORMAL; }
select
@@ -12746,6 +12929,11 @@ kill_expr:
}
;
+
+shutdown:
+ SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
+ ;
+
/* change database */
use:
@@ -13030,7 +13218,7 @@ text_string:
}
| HEX_NUM
{
- Item *tmp= new (YYTHD->mem_root) Item_hex_string($1.str, $1.length);
+ Item *tmp= new (YYTHD->mem_root) Item_hex_hybrid($1.str, $1.length);
if (tmp == NULL)
MYSQL_YYABORT;
/*
@@ -13040,6 +13228,14 @@ text_string:
tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
+ | HEX_STRING
+ {
+ Item *tmp= new (YYTHD->mem_root) Item_hex_string($1.str, $1.length);
+ if (tmp == NULL)
+ MYSQL_YYABORT;
+ tmp->quick_fix_field();
+ $$= tmp->val_str((String*) 0);
+ }
| BIN_NUM
{
Item *tmp= new (YYTHD->mem_root) Item_bin_string($1.str, $1.length);
@@ -13088,6 +13284,7 @@ signed_literal:
literal:
text_literal { $$ = $1; }
| NUM_literal { $$ = $1; }
+ | temporal_literal { $$= $1; }
| NULL_SYM
{
$$ = new (YYTHD->mem_root) Item_null();
@@ -13109,6 +13306,12 @@ literal:
}
| HEX_NUM
{
+ $$ = new (YYTHD->mem_root) Item_hex_hybrid($1.str, $1.length);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | HEX_STRING
+ {
$$ = new (YYTHD->mem_root) Item_hex_string($1.str, $1.length);
if ($$ == NULL)
MYSQL_YYABORT;
@@ -13119,7 +13322,7 @@ literal:
if ($$ == NULL)
MYSQL_YYABORT;
}
- | UNDERSCORE_CHARSET HEX_NUM
+ | UNDERSCORE_CHARSET hex_num_or_string
{
Item *tmp= new (YYTHD->mem_root) Item_hex_string($2.str, $2.length);
if (tmp == NULL)
@@ -13176,9 +13379,6 @@ literal:
$$= item_str;
}
- | DATE_SYM text_literal { $$ = $2; }
- | TIME_SYM text_literal { $$ = $2; }
- | TIMESTAMP text_literal { $$ = $2; }
;
NUM_literal:
@@ -13227,6 +13427,31 @@ NUM_literal:
}
;
+
+temporal_literal:
+ DATE_SYM TEXT_STRING
+ {
+ if (!($$= create_temporal_literal(YYTHD, $2.str, $2.length, YYCSCL,
+ MYSQL_TYPE_DATE, true)))
+ MYSQL_YYABORT;
+ }
+ | TIME_SYM TEXT_STRING
+ {
+ if (!($$= create_temporal_literal(YYTHD, $2.str, $2.length, YYCSCL,
+ MYSQL_TYPE_TIME, true)))
+ MYSQL_YYABORT;
+ }
+ | TIMESTAMP TEXT_STRING
+ {
+ if (!($$= create_temporal_literal(YYTHD, $2.str, $2.length, YYCSCL,
+ MYSQL_TYPE_DATETIME, true)))
+ MYSQL_YYABORT;
+ }
+ ;
+
+
+
+
/**********************************************************************
** Creating different items.
**********************************************************************/
@@ -13675,7 +13900,7 @@ user:
$$->auth= empty_lex_str;
if (check_string_char_length(&$$->user, ER(ER_USERNAME),
- USERNAME_CHAR_LENGTH,
+ username_char_length,
system_charset_info, 0))
MYSQL_YYABORT;
}
@@ -13690,7 +13915,7 @@ user:
$$->auth= empty_lex_str;
if (check_string_char_length(&$$->user, ER(ER_USERNAME),
- USERNAME_CHAR_LENGTH,
+ username_char_length,
system_charset_info, 0) ||
check_host_name(&$$->host))
MYSQL_YYABORT;
@@ -13747,6 +13972,7 @@ keyword:
| LANGUAGE_SYM {}
| NO_SYM {}
| OPEN_SYM {}
+ | OPTION {}
| OPTIONS_SYM {}
| OWNER_SYM {}
| PARSER_SYM {}
@@ -13794,6 +14020,7 @@ keyword_sp:
| AUTHORS_SYM {}
| AUTO_INC {}
| AUTOEXTEND_SIZE_SYM {}
+ | AUTO_SYM {}
| AVG_ROW_LENGTH {}
| AVG_SYM {}
| BINLOG_SYM {}
@@ -13827,6 +14054,7 @@ keyword_sp:
| CONSTRAINT_NAME_SYM {}
| CONTEXT_SYM {}
| CONTRIBUTORS_SYM {}
+ | CURRENT_POS_SYM {}
| CPU_SYM {}
| CUBE_SYM {}
/*
@@ -13911,11 +14139,13 @@ keyword_sp:
| MAX_ROWS {}
| MASTER_SYM {}
| MASTER_HEARTBEAT_PERIOD_SYM {}
+ | MASTER_GTID_POS_SYM {}
| MASTER_HOST_SYM {}
| MASTER_PORT_SYM {}
| MASTER_LOG_FILE_SYM {}
| MASTER_LOG_POS_SYM {}
| MASTER_USER_SYM {}
+ | MASTER_USE_GTID_SYM {}
| MASTER_PASSWORD_SYM {}
| MASTER_SERVER_ID_SYM {}
| MASTER_CONNECT_RETRY_SYM {}
@@ -14023,6 +14253,7 @@ keyword_sp:
| SIMPLE_SYM {}
| SHARE_SYM {}
| SHUTDOWN {}
+ | SLAVE_POS_SYM {}
| SLOW {}
| SNAPSHOT_SYM {}
| SOFT_SYM {}
@@ -15293,8 +15524,8 @@ union_opt:
;
opt_union_order_or_limit:
- /* Empty */
- | union_order_or_limit
+ /* Empty */{ $$= false; }
+ | union_order_or_limit { $$= true; }
;
union_order_or_limit:
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index b8126fab048..a5a64c065ce 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Some useful string utility functions used by the MySQL server */
@@ -86,7 +86,7 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs,
*set_warning= 1;
}
else
- found|= ((longlong) 1 << (find - 1));
+ found|= 1ULL << (find - 1);
if (pos >= end)
break;
start= pos + mblen;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 3888b58e277..457636629a1 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -31,7 +31,7 @@
(for example in storage/myisam/ha_myisam.cc) !
*/
-#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
+#include "sql_plugin.h"
#include "sql_priv.h"
#include "sql_class.h" // set_var.h: THD
#include "sys_vars.h"
@@ -59,6 +59,8 @@
#include "../storage/perfschema/pfs_server.h"
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
#include "threadpool.h"
+#include "sql_repl.h"
+#include "opt_range.h"
/*
The rule for this file: everything should be 'static'. When a sys_var
@@ -376,7 +378,7 @@ static Sys_var_ulonglong Sys_binlog_cache_size(
"you can increase this to get more performance",
GLOBAL_VAR(binlog_cache_size),
CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(IO_SIZE, ULONGLONG_MAX), DEFAULT(32768), BLOCK_SIZE(IO_SIZE));
+ VALID_RANGE(IO_SIZE, SIZE_T_MAX), DEFAULT(32768), BLOCK_SIZE(IO_SIZE));
static Sys_var_ulonglong Sys_binlog_stmt_cache_size(
"binlog_stmt_cache_size", "The size of the statement cache for "
@@ -385,7 +387,7 @@ static Sys_var_ulonglong Sys_binlog_stmt_cache_size(
"you can increase this to get more performance",
GLOBAL_VAR(binlog_stmt_cache_size),
CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(IO_SIZE, ULONGLONG_MAX), DEFAULT(32768), BLOCK_SIZE(IO_SIZE));
+ VALID_RANGE(IO_SIZE, SIZE_T_MAX), DEFAULT(32768), BLOCK_SIZE(IO_SIZE));
/*
Some variables like @sql_log_bin and @binlog_format change how/if binlogging
@@ -518,7 +520,7 @@ static Sys_var_ulonglong Sys_bulk_insert_buff_size(
"bulk_insert_buffer_size", "Size of tree cache used in bulk "
"insert optimisation. Note that this is a limit per thread!",
SESSION_VAR(bulk_insert_buff_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, ULONGLONG_MAX), DEFAULT(8192*1024), BLOCK_SIZE(1));
+ VALID_RANGE(0, SIZE_T_MAX), DEFAULT(8192*1024), BLOCK_SIZE(1));
static Sys_var_charptr Sys_character_sets_dir(
"character_sets_dir", "Directory where character sets are",
@@ -538,16 +540,19 @@ static bool check_charset(sys_var *self, THD *thd, set_var *var)
if (var->value->result_type() == STRING_RESULT)
{
String str(buff, sizeof(buff), system_charset_info), *res;
- if (!(res=var->value->val_str(&str)))
+ if (!(res= var->value->val_str(&str)))
var->save_result.ptr= NULL;
- else if (!(var->save_result.ptr= get_charset_by_csname(res->c_ptr(),
- MY_CS_PRIMARY,
- MYF(0))) &&
- !(var->save_result.ptr=get_old_charset_by_name(res->c_ptr())))
+ else
{
- ErrConvString err(res);
- my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), err.ptr());
- return true;
+ ErrConvString err(res); /* Get utf8 '\0' terminated string */
+ if (!(var->save_result.ptr= get_charset_by_csname(err.ptr(),
+ MY_CS_PRIMARY,
+ MYF(0))) &&
+ !(var->save_result.ptr= get_old_charset_by_name(err.ptr())))
+ {
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), err.ptr());
+ return true;
+ }
}
}
else // INT_RESULT
@@ -658,11 +663,14 @@ static bool check_collation_not_null(sys_var *self, THD *thd, set_var *var)
String str(buff, sizeof(buff), system_charset_info), *res;
if (!(res= var->value->val_str(&str)))
var->save_result.ptr= NULL;
- else if (!(var->save_result.ptr= get_charset_by_name(res->c_ptr(), MYF(0))))
+ else
{
- ErrConvString err(res);
- my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr());
- return true;
+ ErrConvString err(res); /* Get utf8 '\0'-terminated string */
+ if (!(var->save_result.ptr= get_charset_by_name(err.ptr(), MYF(0))))
+ {
+ my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr());
+ return true;
+ }
}
}
else // INT_RESULT
@@ -1004,7 +1012,7 @@ static Sys_var_ulonglong Sys_join_buffer_size(
"join_buffer_size",
"The size of the buffer that is used for joins",
SESSION_VAR(join_buff_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(128, ULONGLONG_MAX), DEFAULT(128*1024), BLOCK_SIZE(128));
+ VALID_RANGE(128, SIZE_T_MAX), DEFAULT(128*1024), BLOCK_SIZE(128));
static Sys_var_keycache Sys_key_buffer_size(
"key_buffer_size", "The size of the buffer used for "
@@ -1244,16 +1252,16 @@ static Sys_var_ulonglong Sys_max_binlog_cache_size(
"max_binlog_cache_size",
"Sets the total size of the transactional cache",
GLOBAL_VAR(max_binlog_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(IO_SIZE, ULONGLONG_MAX),
- DEFAULT((UINT_MAX/IO_SIZE)*IO_SIZE),
+ VALID_RANGE(IO_SIZE, SIZE_T_MAX),
+ DEFAULT((SIZE_T_MAX/IO_SIZE)*IO_SIZE),
BLOCK_SIZE(IO_SIZE));
static Sys_var_ulonglong Sys_max_binlog_stmt_cache_size(
"max_binlog_stmt_cache_size",
"Sets the total size of the statement cache",
GLOBAL_VAR(max_binlog_stmt_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(IO_SIZE, ULONGLONG_MAX),
- DEFAULT((UINT_MAX/IO_SIZE)*IO_SIZE),
+ VALID_RANGE(IO_SIZE, SIZE_T_MAX),
+ DEFAULT((SIZE_T_MAX/IO_SIZE)*IO_SIZE),
BLOCK_SIZE(IO_SIZE));
static bool fix_max_binlog_size(sys_var *self, THD *thd, enum_var_type type)
@@ -1361,6 +1369,240 @@ static Sys_var_ulong Sys_pseudo_thread_id(
BLOCK_SIZE(1), NO_MUTEX_GUARD, IN_BINLOG,
ON_CHECK(check_has_super));
+static bool
+check_gtid_domain_id(sys_var *self, THD *thd, set_var *var)
+{
+ if (check_has_super(self, thd, var))
+ return true;
+ if (var->type != OPT_GLOBAL &&
+ error_if_in_trans_or_substatement(thd,
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO,
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO))
+ return true;
+
+ return false;
+}
+
+
+static Sys_var_uint Sys_gtid_domain_id(
+ "gtid_domain_id",
+ "Used with global transaction ID to identify logically independent "
+ "replication streams. When events can propagate through multiple "
+ "parallel paths (for example multiple masters), each independent "
+ "source server must use a distinct domain_id. For simple tree-shaped "
+ "replication topologies, it can be left at its default, 0.",
+ SESSION_VAR(gtid_domain_id),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, UINT_MAX32), DEFAULT(0),
+ BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(check_gtid_domain_id));
+
+
+static bool check_gtid_seq_no(sys_var *self, THD *thd, set_var *var)
+{
+ uint32 domain_id, server_id;
+ uint64 seq_no;
+
+ if (check_has_super(self, thd, var))
+ return true;
+ if (error_if_in_trans_or_substatement(thd,
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO,
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO))
+ return true;
+
+ domain_id= thd->variables.gtid_domain_id;
+ server_id= thd->variables.server_id;
+ seq_no= (uint64)var->value->val_uint();
+ DBUG_EXECUTE_IF("ignore_set_gtid_seq_no_check", return 0;);
+ if (opt_gtid_strict_mode && opt_bin_log &&
+ mysql_bin_log.check_strict_gtid_sequence(domain_id, server_id, seq_no))
+ return true;
+
+ return false;
+}
+
+
+static Sys_var_ulonglong Sys_gtid_seq_no(
+ "gtid_seq_no",
+ "Internal server usage, for replication with global transaction id. "
+ "When set, next event group logged to the binary log will use this "
+ "sequence number, not generate a new one, thus allowing to preserve "
+ "master's GTID in slave's binlog.",
+ SESSION_ONLY(gtid_seq_no),
+ NO_CMD_LINE, VALID_RANGE(0, ULONGLONG_MAX), DEFAULT(0),
+ BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(check_gtid_seq_no));
+
+
+#ifdef HAVE_REPLICATION
+static unsigned char opt_gtid_binlog_pos_dummy;
+static Sys_var_gtid_binlog_pos Sys_gtid_binlog_pos(
+ "gtid_binlog_pos", "Last GTID logged to the binary log, per replication"
+ "domain",
+ READ_ONLY GLOBAL_VAR(opt_gtid_binlog_pos_dummy), NO_CMD_LINE);
+
+
+uchar *
+Sys_var_gtid_binlog_pos::global_value_ptr(THD *thd, LEX_STRING *base)
+{
+ char buf[128];
+ String str(buf, sizeof(buf), system_charset_info);
+ char *p;
+
+ if (!rpl_global_gtid_slave_state.loaded)
+ {
+ my_error(ER_CANNOT_LOAD_SLAVE_GTID_STATE, MYF(0), "mysql",
+ rpl_gtid_slave_state_table_name.str);
+ return NULL;
+ }
+ str.length(0);
+ if ((opt_bin_log && mysql_bin_log.append_state_pos(&str)) ||
+ !(p= thd->strmake(str.ptr(), str.length())))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return NULL;
+ }
+
+ return (uchar *)p;
+}
+
+
+static unsigned char opt_gtid_current_pos_dummy;
+static Sys_var_gtid_current_pos Sys_gtid_current_pos(
+ "gtid_current_pos", "Current GTID position of the server. Per "
+ "replication domain, this is either the last GTID replicated by a "
+ "slave thread, or the GTID logged to the binary log, whichever is "
+ "most recent.",
+ READ_ONLY GLOBAL_VAR(opt_gtid_current_pos_dummy), NO_CMD_LINE);
+
+
+uchar *
+Sys_var_gtid_current_pos::global_value_ptr(THD *thd, LEX_STRING *base)
+{
+ String str;
+ char *p;
+
+ str.length(0);
+ if (rpl_append_gtid_state(&str, true) ||
+ !(p= thd->strmake(str.ptr(), str.length())))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return NULL;
+ }
+
+ return (uchar *)p;
+}
+
+
+bool
+Sys_var_gtid_slave_pos::do_check(THD *thd, set_var *var)
+{
+ String str, *res;
+ bool running;
+
+ DBUG_ASSERT(var->type == OPT_GLOBAL);
+
+ if (!rpl_global_gtid_slave_state.loaded)
+ {
+ my_error(ER_CANNOT_LOAD_SLAVE_GTID_STATE, MYF(0), "mysql",
+ rpl_gtid_slave_state_table_name.str);
+ return true;
+ }
+
+ mysql_mutex_lock(&LOCK_active_mi);
+ running= master_info_index->give_error_if_slave_running();
+ mysql_mutex_unlock(&LOCK_active_mi);
+ if (running)
+ return true;
+ if (!(res= var->value->val_str(&str)))
+ return true;
+ if (thd->in_active_multi_stmt_transaction())
+ {
+ my_error(ER_CANT_DO_THIS_DURING_AN_TRANSACTION, MYF(0));
+ return true;
+ }
+ if (rpl_gtid_pos_check(thd, &((*res)[0]), res->length()))
+ return true;
+
+ if (!(var->save_result.string_value.str=
+ thd->strmake(res->ptr(), res->length())))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return true;
+ }
+ var->save_result.string_value.length= res->length();
+ return false;
+}
+
+
+bool
+Sys_var_gtid_slave_pos::global_update(THD *thd, set_var *var)
+{
+ bool err;
+
+ DBUG_ASSERT(var->type == OPT_GLOBAL);
+
+ if (!var->value)
+ {
+ my_error(ER_NO_DEFAULT, MYF(0), var->var->name.str);
+ return true;
+ }
+
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ mysql_mutex_lock(&LOCK_active_mi);
+ if (master_info_index->give_error_if_slave_running())
+ err= true;
+ else
+ err= rpl_gtid_pos_update(thd, var->save_result.string_value.str,
+ var->save_result.string_value.length);
+ mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ return err;
+}
+
+
+uchar *
+Sys_var_gtid_slave_pos::global_value_ptr(THD *thd, LEX_STRING *base)
+{
+ String str;
+ char *p;
+
+ if (!rpl_global_gtid_slave_state.loaded)
+ {
+ my_error(ER_CANNOT_LOAD_SLAVE_GTID_STATE, MYF(0), "mysql",
+ rpl_gtid_slave_state_table_name.str);
+ return NULL;
+ }
+
+ str.length(0);
+ if (rpl_append_gtid_state(&str, false) ||
+ !(p= thd->strmake(str.ptr(), str.length())))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ return NULL;
+ }
+
+ return (uchar *)p;
+}
+
+
+static unsigned char opt_gtid_slave_pos_dummy;
+static Sys_var_gtid_slave_pos Sys_gtid_slave_pos(
+ "gtid_slave_pos",
+ "The list of global transaction IDs that were last replicated on the "
+ "server, one for each replication domain.",
+ GLOBAL_VAR(opt_gtid_slave_pos_dummy), NO_CMD_LINE);
+
+
+static Sys_var_mybool Sys_gtid_strict_mode(
+ "gtid_strict_mode",
+ "Enforce strict seq_no ordering of events in the binary log. Slave "
+ "stops with an error if it encounters an event that would cause it to "
+ "generate an out-of-order binlog if executed.",
+ GLOBAL_VAR(opt_gtid_strict_mode),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+#endif
+
+
static bool fix_max_join_size(sys_var *self, THD *thd, enum_var_type type)
{
SV *sv= type == OPT_GLOBAL ? &global_system_variables : &thd->variables;
@@ -1584,6 +1826,35 @@ static Sys_var_ulong Sys_optimizer_prune_level(
SESSION_VAR(optimizer_prune_level), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 1), DEFAULT(1), BLOCK_SIZE(1));
+static Sys_var_ulong Sys_optimizer_selectivity_sampling_limit(
+ "optimizer_selectivity_sampling_limit",
+ "Controls number of record samples to check condition selectivity",
+ SESSION_VAR(optimizer_selectivity_sampling_limit),
+ CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(SELECTIVITY_SAMPLING_THRESHOLD, UINT_MAX),
+ DEFAULT(SELECTIVITY_SAMPLING_LIMIT), BLOCK_SIZE(1));
+
+static Sys_var_ulong Sys_optimizer_use_condition_selectivity(
+ "optimizer_use_condition_selectivity",
+ "Controls selectivity of which conditions the optimizer takes into "
+ "account to calculate cardinality of a partial join when it searches "
+ "for the best execution plan "
+ "Meaning: "
+ "1 - use selectivity of index backed range conditions to calculate "
+ "the cardinality of a partial join if the last joined table is "
+ "accessed by full table scan or an index scan, "
+ "2 - use selectivity of index backed range conditions to calculate "
+ "the cardinality of a partial join in any case, "
+ "3 - additionally always use selectivity of range conditions that are "
+ "not backed by any index to calculate the cardinality of a partial join, "
+ "4 - use histograms to calculate selectivity of range conditions that "
+ "are not backed by any index to calculate the cardinality of "
+ "a partial join."
+ "5 - additionally use selectivity of certain non-range predicates "
+ "calculated on record samples",
+ SESSION_VAR(optimizer_use_condition_selectivity), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1, 5), DEFAULT(1), BLOCK_SIZE(1));
+
/** Warns about deprecated value 63 */
static bool fix_optimizer_search_depth(sys_var *self, THD *thd,
enum_var_type type)
@@ -1633,6 +1904,7 @@ export const char *optimizer_switch_names[]=
"optimize_join_buffer_size",
"table_elimination",
"extended_keys",
+ "exists_to_in",
"default", NullS
};
/** propagates changes to @@engine_condition_pushdown */
@@ -1674,7 +1946,8 @@ static Sys_var_flagset Sys_optimizer_switch(
"semijoin_with_cache, "
"subquery_cache, "
"table_elimination, "
- "extended_keys "
+ "extended_keys, "
+ "exists_to_in "
"} and val is one of {on, off, default}",
SESSION_VAR(optimizer_switch), CMD_LINE(REQUIRED_ARG),
optimizer_switch_names, DEFAULT(OPTIMIZER_SWITCH_DEFAULT),
@@ -2028,19 +2301,26 @@ static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type)
query_cache_size= new_cache_size;
return false;
}
+static bool fix_query_cache_limit(sys_var *self, THD *thd, enum_var_type type)
+{
+ query_cache.result_size_limit(query_cache_limit);
+ return false;
+}
static Sys_var_ulonglong Sys_query_cache_size(
"query_cache_size",
"The memory allocated to store results from old queries",
GLOBAL_VAR(query_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, ULONGLONG_MAX), DEFAULT(0), BLOCK_SIZE(1024),
+ VALID_RANGE(0, ULONG_MAX), DEFAULT(0), BLOCK_SIZE(1024),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_query_cache_size),
ON_UPDATE(fix_query_cache_size));
static Sys_var_ulong Sys_query_cache_limit(
"query_cache_limit",
"Don't cache results that are bigger than this",
- GLOBAL_VAR(query_cache.query_cache_limit), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1));
+ GLOBAL_VAR(query_cache_limit), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL),
+ ON_UPDATE(fix_query_cache_limit));
static bool fix_qcache_min_res_unit(sys_var *self, THD *thd, enum_var_type type)
{
@@ -2124,17 +2404,27 @@ static Sys_var_charptr Sys_secure_file_priv(
static bool fix_server_id(sys_var *self, THD *thd, enum_var_type type)
{
- server_id_supplied = 1;
- thd->server_id= server_id;
+ if (type == OPT_GLOBAL)
+ {
+ server_id_supplied = 1;
+ thd->variables.server_id= global_system_variables.server_id;
+ /*
+ Historically, server_id was a global variable that is exported to
+ plugins. Now it is a session variable, and lives in the
+ global_system_variables struct, but we still need to export the
+ value for reading to plugins for backwards compatibility reasons.
+ */
+ ::server_id= global_system_variables.server_id;
+ }
return false;
}
static Sys_var_ulong Sys_server_id(
"server_id",
"Uniquely identifies the server instance in the community of "
"replication partners",
- GLOBAL_VAR(server_id), CMD_LINE(REQUIRED_ARG, OPT_SERVER_ID),
+ SESSION_VAR(server_id), CMD_LINE(REQUIRED_ARG, OPT_SERVER_ID),
VALID_RANGE(0, UINT_MAX32), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
- NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(fix_server_id));
+ NOT_IN_BINLOG, ON_CHECK(check_has_super), ON_UPDATE(fix_server_id));
static Sys_var_mybool Sys_slave_compressed_protocol(
"slave_compressed_protocol",
@@ -2227,7 +2517,7 @@ static Sys_var_ulonglong Sys_sort_buffer(
"sort_buffer_size",
"Each thread that needs to do a sort allocates a buffer of this size",
SESSION_VAR(sortbuff_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(MIN_SORT_MEMORY, ULONGLONG_MAX), DEFAULT(MAX_SORT_MEMORY),
+ VALID_RANGE(MIN_SORT_MEMORY, SIZE_T_MAX), DEFAULT(MAX_SORT_MEMORY),
BLOCK_SIZE(1));
export ulonglong expand_sql_mode(ulonglong sql_mode)
@@ -3093,7 +3383,7 @@ static Sys_var_ulonglong Sys_group_concat_max_len(
"group_concat_max_len",
"The maximum length of the result of function GROUP_CONCAT()",
SESSION_VAR(group_concat_max_len), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(4, ULONGLONG_MAX), DEFAULT(1024), BLOCK_SIZE(1));
+ VALID_RANGE(4, SIZE_T_MAX), DEFAULT(1024), BLOCK_SIZE(1));
static char *glob_hostname_ptr;
static Sys_var_charptr Sys_hostname(
@@ -3448,19 +3738,48 @@ static Sys_var_mybool Sys_relay_log_recovery(
bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var)
{
bool result= true; // Assume error
+ Master_info *mi;
mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
- if (!master_info_index->give_error_if_slave_running())
- result= set_filter_value(var->save_result.string_value.str);
+
+ if (!var->base.length) // no base name
+ {
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ Sql_condition::WARN_LEVEL_ERROR);
+ }
+ else // has base name
+ {
+ mi= master_info_index->
+ get_master_info(&var->base,
+ Sql_condition::WARN_LEVEL_WARN);
+ }
+
+ if (mi)
+ {
+ if (mi->rli.slave_running)
+ {
+ my_error(ER_SLAVE_MUST_STOP, MYF(0),
+ mi->connection_name.length,
+ mi->connection_name.str);
+ result= true;
+ }
+ else
+ {
+ result= set_filter_value(var->save_result.string_value.str, mi);
+ }
+ }
+
mysql_mutex_unlock(&LOCK_active_mi);
mysql_mutex_lock(&LOCK_global_system_variables);
return result;
}
-bool Sys_var_rpl_filter::set_filter_value(const char *value)
+bool Sys_var_rpl_filter::set_filter_value(const char *value, Master_info *mi)
{
bool status= true;
+ Rpl_filter* rpl_filter= mi ? mi->rpl_filter : global_rpl_filter;
switch (opt_id) {
case OPT_REPLICATE_DO_DB:
@@ -3490,7 +3809,32 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base)
{
char buf[256];
String tmp(buf, sizeof(buf), &my_charset_bin);
+ uchar *ret;
+ Master_info *mi;
+ Rpl_filter *rpl_filter;
+
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ mysql_mutex_lock(&LOCK_active_mi);
+ if (!base->length) // no base name
+ {
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ Sql_condition::WARN_LEVEL_ERROR);
+ }
+ else // has base name
+ {
+ mi= master_info_index->
+ get_master_info(base,
+ Sql_condition::WARN_LEVEL_WARN);
+ }
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ if (!mi)
+ {
+ mysql_mutex_unlock(&LOCK_active_mi);
+ return 0;
+ }
+ rpl_filter= mi->rpl_filter;
tmp.length(0);
switch (opt_id) {
@@ -3514,7 +3858,10 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base)
break;
}
- return (uchar *) thd->strmake(tmp.ptr(), tmp.length());
+ ret= (uchar *) thd->strmake(tmp.ptr(), tmp.length());
+ mysql_mutex_unlock(&LOCK_active_mi);
+
+ return ret;
}
static Sys_var_rpl_filter Sys_replicate_do_db(
@@ -3582,6 +3929,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset)
{
Master_info *mi;
uint res= 0; // Default value
+ mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
@@ -3593,6 +3941,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset)
mysql_mutex_unlock(&mi->rli.data_lock);
}
mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_global_system_variables);
return res;
}
@@ -3604,6 +3953,8 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd,
bool result= true;
Master_info *mi;
+ if (type == OPT_GLOBAL)
+ mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
@@ -3617,6 +3968,8 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd,
mysql_mutex_unlock(&mi->rli.run_lock);
}
mysql_mutex_unlock(&LOCK_active_mi);
+ if (type == OPT_GLOBAL)
+ mysql_mutex_lock(&LOCK_global_system_variables);
return result;
}
@@ -3982,6 +4335,23 @@ static Sys_var_enum Sys_optimizer_use_stat_tables(
SESSION_VAR(use_stat_tables), CMD_LINE(REQUIRED_ARG),
use_stat_tables_modes, DEFAULT(0));
+static Sys_var_ulong Sys_histogram_size(
+ "histogram_size",
+ "Number of bytes used for a histogram. "
+ "If set to 0, no histograms are created by ANALYZE.",
+ SESSION_VAR(histogram_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 255), DEFAULT(0), BLOCK_SIZE(1));
+
+extern const char *histogram_types[];
+static Sys_var_enum Sys_histogram_type(
+ "histogram_type",
+ "Specifies type of the histograms created by ANALYZE. "
+ "Possible values are: "
+ "SINGLE_PREC_HB - single precision height-balanced, "
+ "DOUBLE_PREC_HB - double precision height-balanced.",
+ SESSION_VAR(histogram_type), CMD_LINE(REQUIRED_ARG),
+ histogram_types, DEFAULT(0));
+
static Sys_var_mybool Sys_no_thread_alarm(
"debug_no_thread_alarm",
"Disable system thread alarm calls. Disabling it may be useful "
@@ -4014,9 +4384,67 @@ static Sys_var_ulong Sys_debug_binlog_fsync_sleep(
CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1));
#endif
+
static Sys_var_harows Sys_expensive_subquery_limit(
"expensive_subquery_limit",
"The maximum number of rows a subquery may examine in order to be "
"executed during optimization and used for constant optimization",
SESSION_VAR(expensive_subquery_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, HA_POS_ERROR), DEFAULT(100), BLOCK_SIZE(1));
+
+static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var)
+{
+ longlong previous_val= thd->variables.pseudo_slave_mode;
+ longlong val= (longlong) var->save_result.ulonglong_value;
+ bool rli_fake= false;
+
+#ifndef EMBEDDED_LIBRARY
+ rli_fake= thd->rli_fake ? true : false;
+#endif
+
+ if (rli_fake)
+ {
+ if (!val)
+ {
+#ifndef EMBEDDED_LIBRARY
+ delete thd->rli_fake;
+ thd->rli_fake= NULL;
+#endif
+ }
+ else if (previous_val && val)
+ goto ineffective;
+ else if (!previous_val && val)
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "'pseudo_slave_mode' is already ON.");
+ }
+ else
+ {
+ if (!previous_val && !val)
+ goto ineffective;
+ else if (previous_val && !val)
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Slave applier execution mode not active, "
+ "statement ineffective.");
+ }
+ goto end;
+
+ineffective:
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "'pseudo_slave_mode' change was ineffective.");
+
+end:
+ return FALSE;
+}
+static Sys_var_mybool Sys_pseudo_slave_mode(
+ "pseudo_slave_mode",
+ "SET pseudo_slave_mode= 0,1 are commands that mysqlbinlog "
+ "adds to beginning and end of binary log dumps. While zero "
+ "value indeed disables, the actual enabling of the slave "
+ "applier execution mode is done implicitly when a "
+ "Format_description_event is sent through the session.",
+ SESSION_ONLY(pseudo_slave_mode), NO_CMD_LINE, DEFAULT(FALSE),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_pseudo_slave_mode));
+
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index 54916bb929e..179069040ff 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -28,6 +28,7 @@
#include "keycaches.h"
#include "strfunc.h"
#include "tztime.h" // my_tz_find, my_tz_SYSTEM, struct Time_zone
+#include "rpl_mi.h" // For Multi-Source Replication
/*
a set of mostly trivial (as in f(X)=X) defines below to make system variable
@@ -550,11 +551,11 @@ public:
protected:
virtual uchar *session_value_ptr(THD *thd, LEX_STRING *base)
{
- return thd->security_ctx->proxy_user[0] ?
- (uchar *) &(thd->security_ctx->proxy_user[0]) : NULL;
+ return (uchar*)thd->security_ctx->external_user;
}
};
+class Master_info;
class Sys_var_rpl_filter: public sys_var
{
private:
@@ -566,7 +567,7 @@ public:
NO_ARG, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
NULL, NULL, NULL), opt_id(getopt_id)
{
- option.var_type= GET_STR;
+ option.var_type= GET_STR | GET_ASK_ADDR;
}
bool do_check(THD *thd, set_var *var)
@@ -592,7 +593,7 @@ public:
protected:
uchar *global_value_ptr(THD *thd, LEX_STRING *base);
- bool set_filter_value(const char *value);
+ bool set_filter_value(const char *value, Master_info *mi);
};
/**
@@ -2048,3 +2049,145 @@ public:
}
};
+
+/**
+ Class for @@global.gtid_current_pos.
+*/
+class Sys_var_gtid_current_pos: public sys_var
+{
+public:
+ Sys_var_gtid_current_pos(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt)
+ : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id,
+ getopt.arg_type, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
+ NULL, NULL, NULL)
+ {
+ option.var_type= GET_STR;
+ }
+ bool do_check(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool check_update_type(Item_result type) {
+ DBUG_ASSERT(false);
+ return false;
+ }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ }
+ void global_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ }
+ uchar *session_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ DBUG_ASSERT(false);
+ return NULL;
+ }
+ uchar *global_value_ptr(THD *thd, LEX_STRING *base);
+};
+
+
+/**
+ Class for @@global.gtid_binlog_pos.
+*/
+class Sys_var_gtid_binlog_pos: public sys_var
+{
+public:
+ Sys_var_gtid_binlog_pos(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt)
+ : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id,
+ getopt.arg_type, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
+ NULL, NULL, NULL)
+ {
+ option.var_type= GET_STR;
+ }
+ bool do_check(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool check_update_type(Item_result type) {
+ DBUG_ASSERT(false);
+ return false;
+ }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ }
+ void global_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ }
+ uchar *session_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ DBUG_ASSERT(false);
+ return NULL;
+ }
+ uchar *global_value_ptr(THD *thd, LEX_STRING *base);
+};
+
+
+/**
+ Class for @@global.gtid_slave_pos.
+*/
+class Sys_var_gtid_slave_pos: public sys_var
+{
+public:
+ Sys_var_gtid_slave_pos(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt)
+ : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id,
+ getopt.arg_type, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
+ NULL, NULL, NULL)
+ {
+ option.var_type= GET_STR;
+ }
+ bool do_check(THD *thd, set_var *var);
+ bool session_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ return true;
+ }
+ bool global_update(THD *thd, set_var *var);
+ bool check_update_type(Item_result type) { return type != STRING_RESULT; }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(false);
+ }
+ void global_save_default(THD *thd, set_var *var)
+ {
+ /* Record the attempt to use default so we can error. */
+ var->value= 0;
+ }
+ uchar *session_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ DBUG_ASSERT(false);
+ return NULL;
+ }
+ uchar *global_value_ptr(THD *thd, LEX_STRING *base);
+};
diff --git a/sql/table.cc b/sql/table.cc
index a386510f180..266749d98a2 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008-2011 Monty Program Ab
+ Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
/* Some general useful functions */
@@ -22,7 +22,6 @@
#include "sql_priv.h"
#include "unireg.h" // REQUIRED: for other includes
#include "table.h"
-#include "frm_crypt.h" // get_crypt_for_frm
#include "key.h" // find_ref_key
#include "sql_table.h" // build_table_filename,
// primary_key_name
@@ -40,6 +39,7 @@
#include "sql_select.h"
#include "sql_derived.h"
#include "sql_statistics.h"
+#include "discover.h"
#include "mdl.h" // MDL_wait_for_graph_visitor
/* INFORMATION_SCHEMA name */
@@ -65,18 +65,12 @@ LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") };
/* Functions defined in this file */
-void open_table_error(TABLE_SHARE *share, int error, int db_errno,
- myf errortype, int errarg);
-static int open_binary_frm(THD *thd, TABLE_SHARE *share,
- uchar *head, File file);
static void fix_type_pointers(const char ***array, TYPELIB *point_to_type,
uint types, char **names);
static uint find_field(Field **fields, uchar *record, uint start, uint length);
inline bool is_system_table_name(const char *name, uint length);
-static ulong get_form_pos(File file, uchar *head);
-
/**************************************************************************
Object_creation_ctx implementation.
**************************************************************************/
@@ -279,7 +273,7 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name)
/*
- Allocate a setup TABLE_SHARE structure
+ Allocate and setup a TABLE_SHARE structure
SYNOPSIS
alloc_table_share()
@@ -292,8 +286,8 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name)
# Share
*/
-TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, const char *key,
- uint key_length)
+TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
+ const char *key, uint key_length)
{
MEM_ROOT mem_root;
TABLE_SHARE *share;
@@ -301,12 +295,10 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, const char *key,
char path[FN_REFLEN];
uint path_length;
DBUG_ENTER("alloc_table_share");
- DBUG_PRINT("enter", ("table: '%s'.'%s'",
- table_list->db, table_list->table_name));
+ DBUG_PRINT("enter", ("table: '%s'.'%s'", db, table_name));
path_length= build_table_filename(path, sizeof(path) - 1,
- table_list->db,
- table_list->table_name, "", 0);
+ db, table_name, "", 0);
init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (multi_alloc_root(&mem_root,
&share, sizeof(*share),
@@ -323,8 +315,9 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, const char *key,
strmov(share->path.str, path);
share->normalized_path.str= share->path.str;
share->normalized_path.length= path_length;
-
- share->version= refresh_version;
+ share->table_category= get_table_category(& share->db, & share->table_name);
+ share->set_refresh_version();
+ share->open_errno= ENOENT;
/*
Since alloc_table_share() can be called without any locking (for
@@ -343,6 +336,8 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, const char *key,
init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root));
+ mysql_mutex_init(key_TABLE_SHARE_LOCK_share,
+ &share->LOCK_share, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data,
&share->LOCK_ha_data, MY_MUTEX_INIT_FAST);
}
@@ -438,10 +433,15 @@ void TABLE_SHARE::destroy()
free_root(&stats_cb.mem_root, MYF(0));
stats_cb.stats_can_be_read= FALSE;
stats_cb.stats_is_read= FALSE;
+ stats_cb.histograms_can_be_read= FALSE;
+ stats_cb.histograms_are_read= FALSE;
- /* The mutex is initialized only for shares that are part of the TDC */
+ /* The mutexes are initialized only for shares that are part of the TDC */
if (tmp_table == NO_TMP_TABLE)
+ {
+ mysql_mutex_destroy(&LOCK_share);
mysql_mutex_destroy(&LOCK_ha_data);
+ }
my_hash_free(&name_hash);
plugin_unlock(NULL, db_plugin);
@@ -457,9 +457,12 @@ void TABLE_SHARE::destroy()
info_it->flags= 0;
}
}
-#ifdef HAVE_PSI_TABLE_INTERFACE
- PSI_TABLE_CALL(release_table_share)(m_psi);
-#endif
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ plugin_unlock(NULL, default_part_plugin);
+#endif /* WITH_PARTITION_STORAGE_ENGINE */
+
+ PSI_CALL_release_table_share(m_psi);
/*
Make a copy since the share is allocated in its own root,
@@ -576,27 +579,6 @@ inline bool is_system_table_name(const char *name, uint length)
}
-/**
- Check if a string contains path elements
-*/
-
-static bool has_disabled_path_chars(const char *str)
-{
- for (; *str; str++)
- {
- switch (*str) {
- case FN_EXTCHAR:
- case '/':
- case '\\':
- case '~':
- case '@':
- return TRUE;
- }
- }
- return FALSE;
-}
-
-
/*
Read table definition from a binary / text based .frm file
@@ -611,175 +593,147 @@ static bool has_disabled_path_chars(const char *str)
table_def_cache
The data is returned in 'share', which is alloced by
alloc_table_share().. The code assumes that share is initialized.
-
- RETURN VALUES
- 0 ok
- 1 Error (see open_table_error)
- 2 Error (see open_table_error)
- 3 Wrong data in .frm file
- 4 Error (see open_table_error)
- 5 Error (see open_table_error: charset unavailable)
- 6 Unknown .frm version
*/
-int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags)
+enum open_frm_error open_table_def(THD *thd, TABLE_SHARE *share, uint flags)
{
- int error, table_type;
- bool error_given;
+ bool error_given= false;
File file;
- uchar head[64];
+ uchar *buf;
+ uchar head[FRM_HEADER_SIZE];
char path[FN_REFLEN];
- MEM_ROOT **root_ptr, *old_root;
+ size_t frmlen, read_length;
DBUG_ENTER("open_table_def");
DBUG_PRINT("enter", ("table: '%s'.'%s' path: '%s'", share->db.str,
share->table_name.str, share->normalized_path.str));
- error= 1;
- error_given= 0;
+ share->error= OPEN_FRM_OPEN_ERROR;
strxmov(path, share->normalized_path.str, reg_ext, NullS);
- if ((file= mysql_file_open(key_file_frm,
- path, O_RDONLY | O_SHARE, MYF(0))) < 0)
+ if (flags & GTS_FORCE_DISCOVERY)
{
- /*
- We don't try to open 5.0 unencoded name, if
- - non-encoded name contains '@' signs,
- because '@' can be misinterpreted.
- It is not clear if '@' is escape character in 5.1,
- or a normal character in 5.0.
-
- - non-encoded db or table name contain "#mysql50#" prefix.
- This kind of tables must have been opened only by the
- mysql_file_open() above.
- */
- if (has_disabled_path_chars(share->table_name.str) ||
- has_disabled_path_chars(share->db.str) ||
- !strncmp(share->db.str, MYSQL50_TABLE_NAME_PREFIX,
- MYSQL50_TABLE_NAME_PREFIX_LENGTH) ||
- !strncmp(share->table_name.str, MYSQL50_TABLE_NAME_PREFIX,
- MYSQL50_TABLE_NAME_PREFIX_LENGTH))
- goto err_not_open;
-
- /* Try unencoded 5.0 name */
- uint length;
- strxnmov(path, sizeof(path)-1,
- mysql_data_home, "/", share->db.str, "/",
- share->table_name.str, reg_ext, NullS);
- length= unpack_filename(path, path) - reg_ext_length;
- /*
- The following is a safety test and should never fail
- as the old file name should never be longer than the new one.
- */
- DBUG_ASSERT(length <= share->normalized_path.length);
- /*
- If the old and the new names have the same length,
- then table name does not have tricky characters,
- so no need to check the old file name.
- */
- if (length == share->normalized_path.length ||
- ((file= mysql_file_open(key_file_frm,
- path, O_RDONLY | O_SHARE, MYF(0))) < 0))
- goto err_not_open;
+ DBUG_ASSERT(flags & GTS_TABLE);
+ DBUG_ASSERT(flags & GTS_USE_DISCOVERY);
+ mysql_file_delete_with_symlink(key_file_frm, path, MYF(0));
+ file= -1;
+ }
+ else
+ file= mysql_file_open(key_file_frm, path, O_RDONLY | O_SHARE, MYF(0));
- /* Unencoded 5.0 table name found */
- path[length]= '\0'; // Remove .frm extension
- strmov(share->normalized_path.str, path);
- share->normalized_path.length= length;
+ if (file < 0)
+ {
+ if ((flags & GTS_TABLE) && (flags & GTS_USE_DISCOVERY))
+ {
+ ha_discover_table(thd, share);
+ error_given= true;
+ }
+ goto err_not_open;
}
- error= 4;
- if (mysql_file_read(file, head, 64, MYF(MY_NABP)))
+ if (mysql_file_read(file, head, sizeof(head), MYF(MY_NABP)))
+ {
+ share->error = my_errno == HA_ERR_FILE_TOO_SHORT
+ ? OPEN_FRM_CORRUPTED : OPEN_FRM_READ_ERROR;
goto err;
+ }
- if (head[0] == (uchar) 254 && head[1] == 1)
+ if (memcmp(head, STRING_WITH_LEN("TYPE=VIEW\n")) == 0)
{
- if (head[2] == FRM_VER || head[2] == FRM_VER+1 ||
- (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4))
- {
- /* Open view only */
- if (db_flags & OPEN_VIEW_ONLY)
- {
- error_given= 1;
- goto err;
- }
- table_type= 1;
- }
- else
- {
- error= 6; // Unkown .frm version
- goto err;
- }
+ share->is_view= 1;
+ share->error= flags & GTS_VIEW ? OPEN_FRM_OK : OPEN_FRM_NOT_A_TABLE;
+ goto err;
}
- else if (memcmp(head, STRING_WITH_LEN("TYPE=")) == 0)
+ if (!is_binary_frm_header(head))
{
- error= 5;
- if (memcmp(head+5,"VIEW",4) == 0)
- {
- share->is_view= 1;
- if (db_flags & OPEN_VIEW)
- error= 0;
- }
+ /* No handling of text based files yet */
+ share->error = OPEN_FRM_CORRUPTED;
goto err;
}
- else
+ if (!(flags & GTS_TABLE))
+ {
+ share->error = OPEN_FRM_NOT_A_VIEW;
+ goto err;
+ }
+
+ frmlen= uint4korr(head+10);
+ set_if_smaller(frmlen, FRM_MAX_SIZE); // safety
+
+ if (!(buf= (uchar*)my_malloc(frmlen, MYF(MY_THREAD_SPECIFIC|MY_WME))))
goto err;
- /* No handling of text based files yet */
- if (table_type == 1)
+ memcpy(buf, head, sizeof(head));
+
+ read_length= mysql_file_read(file, buf + sizeof(head),
+ frmlen - sizeof(head), MYF(MY_WME));
+ if (read_length == 0 || read_length == (size_t)-1)
{
- root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
- old_root= *root_ptr;
- *root_ptr= &share->mem_root;
- error= open_binary_frm(thd, share, head, file);
- *root_ptr= old_root;
- error_given= 1;
+ share->error = OPEN_FRM_READ_ERROR;
+ my_free(buf);
+ goto err;
}
+ mysql_file_close(file, MYF(MY_WME));
+
+ frmlen= read_length + sizeof(head);
- share->table_category= get_table_category(& share->db, & share->table_name);
+ share->init_from_binary_frm_image(thd, false, buf, frmlen);
+ error_given= true; // init_from_binary_frm_image has already called my_error()
+ my_free(buf);
- if (!error)
- thd->status_var.opened_shares++;
+ goto err_not_open;
err:
mysql_file_close(file, MYF(MY_WME));
err_not_open:
- if (error && !error_given)
+ if (share->error && !error_given)
{
- share->error= error;
- open_table_error(share, error, (share->open_errno= my_errno), 0);
+ share->open_errno= my_errno;
+ open_table_error(share, share->error, share->open_errno);
}
- DBUG_RETURN(error);
+ DBUG_RETURN(share->error);
}
-/*
- Read data from a binary .frm file from MySQL 3.23 - 5.0 into TABLE_SHARE
+/**
+ Read data from a binary .frm file image into a TABLE_SHARE
+
+ @note
+ frm bytes at the following offsets are unused in MariaDB 10.0:
+
+ 8..9 (used to be the number of "form names")
+ 28..29 (used to be key_info_length)
+
+ They're still set, for compatibility reasons, but never read.
+
+ 42..46 are unused since 5.0 (were for RAID support)
+ Also, there're few unused bytes in forminfo.
+
*/
-static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
- File file)
+int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
+ const uchar *frm_image,
+ size_t frm_length)
{
- int error, errarg= 0;
+ TABLE_SHARE *share= this;
uint new_frm_ver, field_pack_length, new_field_pack_flag;
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
- uint key_info_length, com_length, null_bit_pos;
+ uint com_length, null_bit_pos;
uint extra_rec_buf_length;
uint i,j;
bool use_hash;
char *keynames, *names, *comment_pos;
- uchar forminfo[288];
- uchar *record;
- uchar *disk_buff, *strpos, *null_flags, *null_pos;
+ const uchar *forminfo, *extra2;
+ const uchar *frm_image_end = frm_image + frm_length;
+ uchar *record, *null_flags, *null_pos;
+ const uchar *disk_buff, *strpos;
ulong pos, record_offset;
ulong *rec_per_key= NULL;
ulong rec_buff_length;
handler *handler_file= 0;
KEY *keyinfo;
KEY_PART_INFO *key_part= NULL;
- SQL_CRYPT *crypted=0;
Field **field_ptr, *reg_field;
const char **interval_array;
enum legacy_db_type legacy_db_type;
@@ -787,76 +741,153 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
bool null_bits_are_used;
uint vcol_screen_length, UNINIT_VAR(options_len);
char *vcol_screen_pos;
- uchar *UNINIT_VAR(options);
- uchar *extra_segment_buff= 0;
+ const uchar *options= 0;
KEY first_keyinfo;
uint len;
KEY_PART_INFO *first_key_part= NULL;
uint ext_key_parts= 0;
uint first_key_parts= 0;
+ plugin_ref se_plugin= 0;
keyinfo= &first_keyinfo;
share->ext_key_parts= 0;
- DBUG_ENTER("open_binary_frm");
+ MEM_ROOT **root_ptr, *old_root;
+ DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image");
+
+ root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ old_root= *root_ptr;
+ *root_ptr= &share->mem_root;
- new_field_pack_flag= head[27];
- new_frm_ver= (head[2] - FRM_VER);
+ if (write && write_frm_image(frm_image, frm_length))
+ goto err;
+
+ if (frm_length < FRM_HEADER_SIZE + FRM_FORMINFO_SIZE)
+ goto err;
+
+ new_field_pack_flag= frm_image[27];
+ new_frm_ver= (frm_image[2] - FRM_VER);
field_pack_length= new_frm_ver < 2 ? 11 : 17;
- disk_buff= 0;
- error= 3;
- /* Position of the form in the form file. */
- if (!(pos= get_form_pos(file, head)))
- goto err; /* purecov: inspected */
+ /* Length of the MariaDB extra2 segment in the form file. */
+ len = uint2korr(frm_image+4);
+ extra2= frm_image + 64;
+
+ if (*extra2 != '/') // old frm had '/' there
+ {
+ const uchar *e2end= extra2 + len;
+ while (extra2 + 3 < e2end)
+ {
+ uchar type= *extra2++;
+ size_t length= *extra2++;
+ if (!length)
+ {
+ if (extra2 + 258 >= e2end)
+ goto err;
+ length= uint2korr(extra2);
+ extra2+=2;
+ if (length < 256)
+ goto err;
+ }
+ if (extra2 + length > e2end)
+ goto err;
+ switch (type) {
+ case EXTRA2_TABLEDEF_VERSION:
+ if (tabledef_version.str) // see init_from_sql_statement_string()
+ {
+ if (length != tabledef_version.length ||
+ memcmp(extra2, tabledef_version.str, length))
+ goto err;
+ }
+ else
+ {
+ tabledef_version.length= length;
+ tabledef_version.str= (uchar*)memdup_root(&mem_root, extra2, length);
+ if (!tabledef_version.str)
+ goto err;
+ }
+ break;
+ case EXTRA2_ENGINE_TABLEOPTS:
+ if (options)
+ goto err;
+ /* remember but delay parsing until we have read fields and keys */
+ options= extra2;
+ options_len= length;
+ break;
+ case EXTRA2_DEFAULT_PART_ENGINE:
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ LEX_STRING name= { (char*)extra2, length };
+ share->default_part_plugin= ha_resolve_by_name(NULL, &name);
+ if (!share->default_part_plugin)
+ goto err;
+ }
+#endif
+ break;
+ default:
+ /* abort frm parsing if it's an unknown but important extra2 value */
+ if (type >= EXTRA2_ENGINE_IMPORTANT)
+ goto err;
+ }
+ extra2+= length;
+ }
+ if (extra2 != e2end)
+ goto err;
+ }
+
+ if (frm_length < FRM_HEADER_SIZE + len ||
+ !(pos= uint4korr(frm_image + FRM_HEADER_SIZE + len)))
+ goto err;
- mysql_file_seek(file,pos,MY_SEEK_SET,MYF(0));
- if (mysql_file_read(file, forminfo,288,MYF(MY_NABP)))
+ forminfo= frm_image + pos;
+ if (forminfo + FRM_FORMINFO_SIZE >= frm_image_end)
goto err;
- share->frm_version= head[2];
+
+ share->frm_version= frm_image[2];
/*
Check if .frm file created by MySQL 5.0. In this case we want to
display CHAR fields as CHAR and not as VARCHAR.
We do it this way as we want to keep the old frm version to enable
MySQL 4.1 to read these files.
*/
- if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && head[33] == 5)
+ if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && frm_image[33] == 5)
share->frm_version= FRM_VER_TRUE_VARCHAR;
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (*(head+61) &&
- !(share->default_part_db_type=
- ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 1, 0)))
- goto err;
- DBUG_PRINT("info", ("default_part_db_type = %u", head[61]));
+ if (frm_image[61] && !share->default_part_plugin)
+ {
+ enum legacy_db_type db_type= (enum legacy_db_type) (uint) frm_image[61];
+ share->default_part_plugin=
+ ha_lock_engine(NULL, ha_checktype(thd, db_type, 1, 0));
+ if (!share->default_part_plugin)
+ goto err;
+ }
#endif
- legacy_db_type= (enum legacy_db_type) (uint) *(head+3);
- DBUG_ASSERT(share->db_plugin == NULL);
+ legacy_db_type= (enum legacy_db_type) (uint) frm_image[3];
/*
if the storage engine is dynamic, no point in resolving it by its
dynamically allocated legacy_db_type. We will resolve it later by name.
*/
if (legacy_db_type > DB_TYPE_UNKNOWN &&
legacy_db_type < DB_TYPE_FIRST_DYNAMIC)
- share->db_plugin= ha_lock_engine(NULL,
- ha_checktype(thd, legacy_db_type, 0, 0));
- share->db_create_options= db_create_options= uint2korr(head+30);
+ se_plugin= ha_lock_engine(NULL, ha_checktype(thd, legacy_db_type, 0, 0));
+ share->db_create_options= db_create_options= uint2korr(frm_image+30);
share->db_options_in_use= share->db_create_options;
- share->mysql_version= uint4korr(head+51);
+ share->mysql_version= uint4korr(frm_image+51);
share->null_field_first= 0;
- if (!head[32]) // New frm file in 3.23
- {
- share->avg_row_length= uint4korr(head+34);
- share->transactional= (ha_choice) (head[39] & 3);
- share->page_checksum= (ha_choice) ((head[39] >> 2) & 3);
- share->row_type= (row_type) head[40];
- share->table_charset= get_charset((((uint) head[41]) << 8) +
- (uint) head[38],MYF(0));
+ if (!frm_image[32]) // New frm file in 3.23
+ {
+ share->avg_row_length= uint4korr(frm_image+34);
+ share->transactional= (ha_choice) (frm_image[39] & 3);
+ share->page_checksum= (ha_choice) ((frm_image[39] >> 2) & 3);
+ share->row_type= (enum row_type) frm_image[40];
+ share->table_charset= get_charset((((uint) frm_image[41]) << 8) +
+ (uint) frm_image[38], MYF(0));
share->null_field_first= 1;
- share->stats_sample_pages= uint2korr(head+42);
- share->stats_auto_recalc= static_cast<enum_stats_auto_recalc>(head[44]);
+ share->stats_sample_pages= uint2korr(frm_image+42);
+ share->stats_auto_recalc= (enum_stats_auto_recalc)(frm_image[44]);
}
if (!share->table_charset)
{
- /* unknown charset in head[38] or pre-3.23 frm */
+ /* unknown charset in frm_image[38] or pre-3.23 frm */
if (use_mb(default_charset_info))
{
/* Warn that we may be changing the size of character columns */
@@ -868,15 +899,15 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
share->table_charset= default_charset_info;
}
share->db_record_offset= 1;
- error=4;
- share->max_rows= uint4korr(head+18);
- share->min_rows= uint4korr(head+22);
+ share->max_rows= uint4korr(frm_image+18);
+ share->min_rows= uint4korr(frm_image+22);
/* Read keyinformation */
- key_info_length= (uint) uint2korr(head+28);
- mysql_file_seek(file, (ulong) uint2korr(head+6), MY_SEEK_SET, MYF(0));
- if (read_string(file,(uchar**) &disk_buff,key_info_length))
- goto err; /* purecov: inspected */
+ disk_buff= frm_image + uint2korr(frm_image+6);
+
+ if (disk_buff + 6 >= frm_image_end)
+ goto err;
+
if (disk_buff[0] & 0x80)
{
share->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f);
@@ -926,6 +957,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
if (new_frm_ver >= 3)
{
+ if (strpos + 8 >= frm_image_end)
+ goto err;
keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME;
keyinfo->key_length= (uint) uint2korr(strpos+2);
keyinfo->user_defined_key_parts= (uint) strpos[4];
@@ -935,6 +968,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
}
else
{
+ if (strpos + 4 >= frm_image_end)
+ goto err;
keyinfo->flags= ((uint) strpos[0]) ^ HA_NOSAME;
keyinfo->key_length= (uint) uint2korr(strpos+1);
keyinfo->user_defined_key_parts= (uint) strpos[3];
@@ -972,6 +1007,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
keyinfo->rec_per_key= rec_per_key;
for (j=keyinfo->user_defined_key_parts ; j-- ; key_part++)
{
+ if (strpos + (new_frm_ver >= 1 ? 9 : 7) >= frm_image_end)
+ goto err;
*rec_per_key++=0;
key_part->fieldnr= (uint16) (uint2korr(strpos) & FIELD_NR_MASK);
key_part->offset= (uint) uint2korr(strpos+2)-1;
@@ -1028,57 +1065,58 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
}
}
if (j == first_key_parts)
- keyinfo->ext_key_flags= keyinfo->flags | HA_NOSAME | HA_EXT_NOSAME;
+ keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
}
share->ext_key_parts+= keyinfo->ext_key_parts;
}
keynames=(char*) key_part;
- strpos+= (strmov(keynames, (char *) strpos) - keynames)+1;
+ strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames;
+ if (*strpos++) // key names are \0-terminated
+ goto err;
//reading index comments
for (keyinfo= share->key_info, i=0; i < keys; i++, keyinfo++)
{
if (keyinfo->flags & HA_USES_COMMENT)
{
+ if (strpos + 2 >= frm_image_end)
+ goto err;
keyinfo->comment.length= uint2korr(strpos);
- keyinfo->comment.str= strmake_root(&share->mem_root, (char*) strpos+2,
+ strpos+= 2;
+
+ if (strpos + keyinfo->comment.length >= frm_image_end)
+ goto err;
+ keyinfo->comment.str= strmake_root(&share->mem_root, (char*) strpos,
keyinfo->comment.length);
- strpos+= 2 + keyinfo->comment.length;
+ strpos+= keyinfo->comment.length;
}
DBUG_ASSERT(test(keyinfo->flags & HA_USES_COMMENT) ==
(keyinfo->comment.length > 0));
}
- share->reclength = uint2korr((head+16));
+ share->reclength = uint2korr((frm_image+16));
share->stored_rec_length= share->reclength;
- if (*(head+26) == 1)
+ if (frm_image[26] == 1)
share->system= 1; /* one-record-database */
-#ifdef HAVE_CRYPTED_FRM
- else if (*(head+26) == 2)
- {
- crypted= get_crypt_for_frm();
- share->crypted= 1;
- }
-#endif
- record_offset= (ulong) (uint2korr(head+6)+
- ((uint2korr(head+14) == 0xffff ?
- uint4korr(head+47) : uint2korr(head+14))));
+ record_offset= (ulong) (uint2korr(frm_image+6)+
+ ((uint2korr(frm_image+14) == 0xffff ?
+ uint4korr(frm_image+47) : uint2korr(frm_image+14))));
+
+ if (record_offset + share->reclength >= frm_length)
+ goto err;
- if ((n_length= uint4korr(head+55)))
+ if ((n_length= uint4korr(frm_image+55)))
{
/* Read extra data segment */
- uchar *next_chunk, *buff_end;
+ const uchar *next_chunk, *buff_end;
DBUG_PRINT("info", ("extra segment size is %u bytes", n_length));
- if (!(extra_segment_buff= (uchar*) my_malloc(n_length + 1, MYF(MY_WME))))
- goto err;
- next_chunk= extra_segment_buff;
- if (mysql_file_pread(file, extra_segment_buff,
- n_length, record_offset + share->reclength,
- MYF(MY_NABP)))
- {
+ next_chunk= frm_image + record_offset + share->reclength;
+ buff_end= next_chunk + n_length;
+
+ if (buff_end >= frm_image_end)
goto err;
- }
+
share->connect_string.length= uint2korr(next_chunk);
if (!(share->connect_string.str= strmake_root(&share->mem_root,
(char*) next_chunk + 2,
@@ -1088,7 +1126,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
goto err;
}
next_chunk+= share->connect_string.length + 2;
- buff_end= extra_segment_buff + n_length;
if (next_chunk + 2 < buff_end)
{
uint str_db_type_length= uint2korr(next_chunk);
@@ -1097,26 +1134,20 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
name.length= str_db_type_length;
plugin_ref tmp_plugin= ha_resolve_by_name(thd, &name);
- if (tmp_plugin != NULL && !plugin_equals(tmp_plugin, share->db_plugin))
+ if (tmp_plugin != NULL && !plugin_equals(tmp_plugin, se_plugin))
{
- if (legacy_db_type > DB_TYPE_UNKNOWN &&
- legacy_db_type < DB_TYPE_FIRST_DYNAMIC &&
- legacy_db_type != ha_legacy_type(
- plugin_data(tmp_plugin, handlerton *)))
+ if (se_plugin)
{
/* bad file, legacy_db_type did not match the name */
goto err;
}
/*
tmp_plugin is locked with a local lock.
- we unlock the old value of share->db_plugin before
+ we unlock the old value of se_plugin before
replacing it with a globally locked version of tmp_plugin
*/
- plugin_unlock(NULL, share->db_plugin);
- share->db_plugin= my_plugin_lock(NULL, tmp_plugin);
- DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)",
- str_db_type_length, next_chunk + 2,
- ha_legacy_type(share->db_type())));
+ plugin_unlock(NULL, se_plugin);
+ se_plugin= plugin_lock(NULL, tmp_plugin);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
else if (str_db_type_length == 9 &&
@@ -1125,28 +1156,23 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
/*
Use partition handler
tmp_plugin is locked with a local lock.
- we unlock the old value of share->db_plugin before
+ we unlock the old value of se_plugin before
replacing it with a globally locked version of tmp_plugin
*/
/* Check if the partitioning engine is ready */
if (!plugin_is_ready(&name, MYSQL_STORAGE_ENGINE_PLUGIN))
{
- error= 8;
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
"--skip-partition");
goto err;
}
- plugin_unlock(NULL, share->db_plugin);
- share->db_plugin= ha_lock_engine(NULL, partition_hton);
- DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)",
- str_db_type_length, next_chunk + 2,
- ha_legacy_type(share->db_type())));
+ plugin_unlock(NULL, se_plugin);
+ se_plugin= ha_lock_engine(NULL, partition_hton);
}
#endif
else if (!tmp_plugin)
{
/* purecov: begin inspected */
- error= 8;
name.str[name.length]=0;
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), name.str);
goto err;
@@ -1230,41 +1256,31 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
DBUG_ASSERT(next_chunk <= buff_end);
- if (share->db_create_options & HA_OPTION_TEXT_CREATE_OPTIONS)
+ if (share->db_create_options & HA_OPTION_TEXT_CREATE_OPTIONS_legacy)
{
- /*
- store options position, but skip till the time we will
- know number of fields
- */
+ if (options)
+ goto err;
options_len= uint4korr(next_chunk);
options= next_chunk + 4;
next_chunk+= options_len + 4;
}
DBUG_ASSERT(next_chunk <= buff_end);
}
- share->key_block_size= uint2korr(head+62);
+ share->key_block_size= uint2korr(frm_image+62);
+
+ if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
+ goto err; // wrong engine (someone changed the frm under our feet?)
- error=4;
- extra_rec_buf_length= uint2korr(head+59);
+ extra_rec_buf_length= uint2korr(frm_image+59);
rec_buff_length= ALIGN_SIZE(share->reclength + 1 + extra_rec_buf_length);
share->rec_buff_length= rec_buff_length;
if (!(record= (uchar *) alloc_root(&share->mem_root,
rec_buff_length)))
goto err; /* purecov: inspected */
share->default_values= record;
- if (mysql_file_pread(file, record, (size_t) share->reclength,
- record_offset, MYF(MY_NABP)))
- goto err; /* purecov: inspected */
+ memcpy(record, frm_image + record_offset, share->reclength);
- mysql_file_seek(file, pos+288, MY_SEEK_SET, MYF(0));
-#ifdef HAVE_CRYPTED_FRM
- if (crypted)
- {
- crypted->decode((char*) forminfo+256,288-256);
- if (sint2korr(forminfo+284) != 0) // Should be 0
- goto err; // Wrong password
- }
-#endif
+ disk_buff= frm_image + pos + FRM_FORMINFO_SIZE;
share->fields= uint2korr(forminfo+258);
pos= uint2korr(forminfo+260); /* Length of all screens */
@@ -1302,16 +1318,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
read_length=(uint) (share->fields * field_pack_length +
pos+ (uint) (n_length+int_length+com_length+
vcol_screen_length));
- if (read_string(file,(uchar**) &disk_buff,read_length))
- goto err; /* purecov: inspected */
-#ifdef HAVE_CRYPTED_FRM
- if (crypted)
- {
- crypted->decode((char*) disk_buff,read_length);
- delete crypted;
- crypted=0;
- }
-#endif
strpos= disk_buff+pos;
share->intervals= (TYPELIB*) (field_ptr+share->fields+1);
@@ -1359,7 +1365,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
/* Allocate handler */
if (!(handler_file= get_new_handler(share, thd->mem_root,
- share->db_type())))
+ plugin_hton(se_plugin))))
goto err;
if (handler_file->set_ha_share_ref(&share->ha_share))
@@ -1369,7 +1375,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
null_bits_are_used= share->null_fields != 0;
if (share->null_field_first)
{
- null_flags= null_pos= (uchar*) record+1;
+ null_flags= null_pos= record+1;
null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1;
/*
null_bytes below is only correct under the condition that
@@ -1382,8 +1388,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
else
{
share->null_bytes= (share->null_fields+7)/8;
- null_flags= null_pos= (uchar*) (record + 1 +share->reclength -
- share->null_bytes);
+ null_flags= null_pos= record + 1 + share->reclength - share->null_bytes;
null_bit_pos= 0;
}
#endif
@@ -1425,7 +1430,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
geom_type= (Field::geometry_type) strpos[14];
charset= &my_charset_bin;
#else
- error= 4; // unsupported field type
goto err;
#endif
}
@@ -1436,8 +1440,16 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
charset= &my_charset_bin;
else if (!(charset= get_charset(csid, MYF(0))))
{
- error= 5; // Unknown or unavailable charset
- errarg= (int) csid;
+ const char *csname= get_charset_name((uint) csid);
+ char tmp[10];
+ if (!csname || csname[0] =='?')
+ {
+ my_snprintf(tmp, sizeof(tmp), "#%d", csid);
+ csname= tmp;
+ }
+ my_printf_error(ER_UNKNOWN_COLLATION,
+ "Unknown collation '%s' in table '%-.64s' definition",
+ MYF(0), csname, share->table_name.str);
goto err;
}
}
@@ -1481,10 +1493,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (opt_interval_id)
interval_nr= (uint)vcol_screen_pos[3];
else if ((uint)vcol_screen_pos[0] != 1)
- {
- error= 4;
goto err;
- }
+
fld_stored_in_db= (bool) (uint) vcol_screen_pos[2];
vcol_expr_length= vcol_info_length -
(uint)(FRM_VCOL_HEADER_SIZE(opt_interval_id));
@@ -1583,10 +1593,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
(TYPELIB*) 0),
share->fieldnames.type_names[i]);
if (!reg_field) // Not supported field type
- {
- error= 4;
- goto err; /* purecov: inspected */
- }
+ goto err;
+
reg_field->field_index= i;
reg_field->comment=comment;
@@ -1612,20 +1620,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (reg_field->unireg_check == Field::NEXT_NUMBER)
share->found_next_number_field= field_ptr;
- if (use_hash)
- {
- if (my_hash_insert(&share->name_hash,
- (uchar*) field_ptr))
- {
- /*
- Set return code 8 here to indicate that an error has
- occurred but that the error message already has been
- sent (OOM).
- */
- error= 8;
- goto err;
- }
- }
+ if (use_hash && my_hash_insert(&share->name_hash, (uchar*) field_ptr))
+ goto err;
if (!reg_field->stored_in_db)
{
share->stored_fields--;
@@ -1777,10 +1773,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
(uint) key_part->offset,
(uint) key_part->length);
if (!key_part->fieldnr)
- {
- error= 4; // Wrong file
goto err;
- }
+
field= key_part->field= share->field[key_part->fieldnr-1];
key_part->type= field->key_type();
if (field->null_ptr)
@@ -1928,8 +1922,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
}
else
share->primary_key= MAX_KEY;
- my_free(disk_buff);
- disk_buff=0;
if (new_field_pack_flag <= 1)
{
/* Old file format with default as not null */
@@ -1938,7 +1930,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
null_length, 255);
}
- if (share->db_create_options & HA_OPTION_TEXT_CREATE_OPTIONS)
+ if (options)
{
DBUG_ASSERT(options_len);
if (engine_table_options_frm_read(options, options_len, share))
@@ -1955,13 +1947,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
share->default_values, reg_field,
&share->next_number_key_offset,
&share->next_number_keypart)) < 0)
- {
- /* Wrong field definition */
- error= 4;
- goto err;
- }
- else
- reg_field->flags |= AUTO_INCREMENT_FLAG;
+ goto err; // Wrong field definition
+ reg_field->flags |= AUTO_INCREMENT_FLAG;
}
if (share->blob_fields)
@@ -2005,22 +1992,184 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (use_hash)
(void) my_hash_check(&share->name_hash);
#endif
- my_free(extra_segment_buff);
- DBUG_RETURN (0);
+
+ share->db_plugin= se_plugin;
+ share->error= OPEN_FRM_OK;
+ thd->status_var.opened_shares++;
+ *root_ptr= old_root;
+ DBUG_RETURN(0);
err:
- share->error= error;
+ share->error= OPEN_FRM_CORRUPTED;
share->open_errno= my_errno;
- share->errarg= errarg;
- my_free(disk_buff);
- my_free(extra_segment_buff);
- delete crypted;
delete handler_file;
+ plugin_unlock(0, se_plugin);
my_hash_free(&share->name_hash);
- open_table_error(share, error, share->open_errno, errarg);
- DBUG_RETURN(error);
-} /* open_binary_frm */
+ if (!thd->is_error())
+ open_table_error(share, OPEN_FRM_CORRUPTED, share->open_errno);
+
+ *root_ptr= old_root;
+ DBUG_RETURN(HA_ERR_NOT_A_TABLE);
+}
+
+
+static bool sql_unusable_for_discovery(THD *thd, const char *sql)
+{
+ LEX *lex= thd->lex;
+ HA_CREATE_INFO *create_info= &lex->create_info;
+
+ // ... not CREATE TABLE
+ if (lex->sql_command != SQLCOM_CREATE_TABLE)
+ return 1;
+ // ... create like
+ if (create_info->options & HA_LEX_CREATE_TABLE_LIKE)
+ return 1;
+ // ... create select
+ if (lex->select_lex.item_list.elements)
+ return 1;
+ // ... temporary
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
+ return 1;
+ // ... if exists
+ if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
+ return 1;
+
+ // XXX error out or rather ignore the following:
+ // ... partitioning
+ if (lex->part_info)
+ return 1;
+ // ... union
+ if (create_info->used_fields & HA_CREATE_USED_UNION)
+ return 1;
+ // ... index/data directory
+ if (create_info->data_file_name || create_info->index_file_name)
+ return 1;
+ // ... engine
+ if (create_info->used_fields & HA_CREATE_USED_ENGINE)
+ return 1;
+
+ return 0;
+}
+
+int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
+ const char *sql, size_t sql_length)
+{
+ ulonglong saved_mode= thd->variables.sql_mode;
+ CHARSET_INFO *old_cs= thd->variables.character_set_client;
+ Parser_state parser_state;
+ bool error;
+ char *sql_copy;
+ handler *file;
+ LEX *old_lex;
+ Query_arena *arena, backup;
+ LEX tmp_lex;
+ KEY *unused1;
+ uint unused2;
+ LEX_CUSTRING frm= {0,0};
+
+ DBUG_ENTER("TABLE_SHARE::init_from_sql_statement_string");
+
+ /*
+ Ouch. Parser may *change* the string it's working on.
+ Currently (2013-02-26) it is used to permanently disable
+ conditional comments.
+ Anyway, let's copy the caller's string...
+ */
+ if (!(sql_copy= thd->strmake(sql, sql_length)))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ if (parser_state.init(thd, sql_copy, sql_length))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ thd->variables.sql_mode= MODE_NO_ENGINE_SUBSTITUTION | MODE_NO_DIR_IN_CREATE;
+ thd->variables.character_set_client= system_charset_info;
+ tmp_disable_binlog(thd);
+ old_lex= thd->lex;
+ thd->lex= &tmp_lex;
+
+ arena= thd->stmt_arena;
+ if (arena->is_conventional())
+ arena= 0;
+ else
+ thd->set_n_backup_active_arena(arena, &backup);
+
+ lex_start(thd);
+
+ if ((error= parse_sql(thd, & parser_state, NULL) ||
+ sql_unusable_for_discovery(thd, sql_copy)))
+ goto ret;
+
+ thd->lex->create_info.db_type= plugin_hton(db_plugin);
+
+ if (tabledef_version.str)
+ thd->lex->create_info.tabledef_version= tabledef_version;
+
+ file= mysql_create_frm_image(thd, db.str, table_name.str,
+ &thd->lex->create_info, &thd->lex->alter_info,
+ C_ORDINARY_CREATE, &unused1, &unused2, &frm);
+ error|= file == 0;
+ delete file;
+
+ if (frm.str)
+ {
+ option_list= 0; // cleanup existing options ...
+ option_struct= 0; // ... if it's an assisted discovery
+ error= init_from_binary_frm_image(thd, write, frm.str, frm.length);
+ }
+
+ret:
+ my_free(const_cast<uchar*>(frm.str));
+ lex_end(thd->lex);
+ thd->lex= old_lex;
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ reenable_binlog(thd);
+ thd->variables.sql_mode= saved_mode;
+ thd->variables.character_set_client= old_cs;
+ if (thd->is_error() || error)
+ {
+ thd->clear_error();
+ my_error(ER_SQL_DISCOVER_ERROR, MYF(0),
+ plugin_name(db_plugin)->str, db.str, table_name.str,
+ sql_copy);
+ DBUG_RETURN(HA_ERR_GENERIC);
+ }
+ DBUG_RETURN(0);
+}
+
+bool TABLE_SHARE::write_frm_image(const uchar *frm, size_t len)
+{
+ return writefrm(normalized_path.str, db.str, table_name.str, false, frm, len);
+}
+
+
+bool TABLE_SHARE::read_frm_image(const uchar **frm, size_t *len)
+{
+ if (IF_PARTITIONING(partition_info_str, 0)) // cannot discover a partition
+ {
+ DBUG_ASSERT(db_type()->discover_table == 0);
+ return 1;
+ }
+
+ if (frm_image)
+ {
+ *frm= frm_image->str;
+ *len= frm_image->length;
+ frm_image->str= 0; // pass the ownership to the caller
+ frm_image= 0;
+ return 0;
+ }
+ return readfrm(normalized_path.str, frm, len);
+}
+
+
+void TABLE_SHARE::free_frm_image(const uchar *frm)
+{
+ if (frm)
+ my_free(const_cast<uchar*>(frm));
+}
+
/*
@brief
@@ -2329,11 +2478,12 @@ end:
7 Table definition has changed in engine
*/
-int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
- uint db_stat, uint prgflag, uint ha_open_flags,
- TABLE *outparam, bool is_create_table)
+enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
+ const char *alias, uint db_stat, uint prgflag,
+ uint ha_open_flags, TABLE *outparam,
+ bool is_create_table)
{
- int error;
+ enum open_frm_error error;
uint records, i, bitmap_size;
bool error_reported= FALSE;
uchar *record, *bitmaps;
@@ -2345,7 +2495,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_VIEW; // not a view
- error= 1;
+ error= OPEN_FRM_ERROR_ALREADY_ISSUED; // for OOM errors below
bzero((char*) outparam, sizeof(*outparam));
outparam->in_use= thd;
outparam->s= share;
@@ -2377,7 +2527,6 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
DBUG_ASSERT(!db_stat);
}
- error= 4;
outparam->reginfo.lock_type= TL_UNLOCK;
outparam->current_lock= F_UNLCK;
records=0;
@@ -2530,7 +2679,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
&(*field_ptr)->vcol_info->expr_str,
&error_reported))
{
- error= 4; // in case no error is reported
+ error= OPEN_FRM_CORRUPTED;
goto err;
}
*(vfield_ptr++)= *field_ptr;
@@ -2572,7 +2721,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
tmp= mysql_unpack_partition(thd, share->partition_info_str,
share->partition_info_str_len,
outparam, is_create_table,
- share->default_part_db_type,
+ plugin_hton(share->default_part_plugin),
&work_part_info_used);
if (tmp)
{
@@ -2627,7 +2776,7 @@ partititon_err:
/* Allocate bitmaps */
bitmap_size= share->column_bitmap_size;
- if (!(bitmaps= (uchar*) alloc_root(&outparam->mem_root, bitmap_size*5)))
+ if (!(bitmaps= (uchar*) alloc_root(&outparam->mem_root, bitmap_size*6)))
goto err;
bitmap_init(&outparam->def_read_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
@@ -2639,56 +2788,53 @@ partititon_err:
(my_bitmap_map*) (bitmaps+bitmap_size*3), share->fields, FALSE);
bitmap_init(&outparam->eq_join_set,
(my_bitmap_map*) (bitmaps+bitmap_size*4), share->fields, FALSE);
+ bitmap_init(&outparam->cond_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size*5), share->fields, FALSE);
outparam->default_column_bitmaps();
+ outparam->cond_selectivity= 1.0;
+
/* The table struct is now initialized; Open the table */
- error= 2;
if (db_stat)
{
- int ha_err;
- if ((ha_err= (outparam->file->
- ha_open(outparam, share->normalized_path.str,
- (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR),
- (db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE :
- ((db_stat & HA_WAIT_IF_LOCKED) ||
- (specialflag & SPECIAL_WAIT_IF_LOCKED)) ?
- HA_OPEN_WAIT_IF_LOCKED :
- (db_stat & (HA_ABORT_IF_LOCKED | HA_GET_INFO)) ?
- HA_OPEN_ABORT_IF_LOCKED :
- HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags))))
+ if (db_stat & HA_OPEN_TEMPORARY)
+ ha_open_flags|= HA_OPEN_TMP_TABLE;
+ else if ((db_stat & HA_WAIT_IF_LOCKED) ||
+ (specialflag & SPECIAL_WAIT_IF_LOCKED))
+ ha_open_flags|= HA_OPEN_WAIT_IF_LOCKED;
+ else if (db_stat & (HA_ABORT_IF_LOCKED | HA_GET_INFO))
+ ha_open_flags|= HA_OPEN_ABORT_IF_LOCKED;
+ else
+ ha_open_flags|= HA_OPEN_IGNORE_IF_LOCKED;
+
+ int ha_err= outparam->file->ha_open(outparam, share->normalized_path.str,
+ (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR),
+ ha_open_flags);
+ if (ha_err)
{
+ share->open_errno= ha_err;
/* Set a flag if the table is crashed and it can be auto. repaired */
share->crashed= (outparam->file->auto_repair(ha_err) &&
!(ha_open_flags & HA_OPEN_FOR_REPAIR));
+ outparam->file->print_error(ha_err, MYF(0));
+ error_reported= TRUE;
- switch (ha_err)
- {
- case HA_ERR_NO_SUCH_TABLE:
- /*
- The table did not exists in storage engine, use same error message
- as if the .frm file didn't exist
- */
- error= 1;
- my_errno= ENOENT;
- break;
- case EMFILE:
- /*
- Too many files opened, use same error message as if the .frm
- file can't open
- */
- DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)",
- share->normalized_path.str, ha_err));
- error= 1;
- my_errno= EMFILE;
- break;
- default:
- outparam->file->print_error(ha_err, MYF(0));
- error_reported= TRUE;
- if (ha_err == HA_ERR_TABLE_DEF_CHANGED)
- error= 7;
- break;
- }
- goto err; /* purecov: inspected */
+ if (ha_err == HA_ERR_TABLE_DEF_CHANGED)
+ error= OPEN_FRM_DISCOVER;
+
+ /*
+ We're here, because .frm file was successfully opened.
+
+ But if the table doesn't exist in the engine and the engine
+ supports discovery, we force rediscover to discover
+ the fact that table doesn't in fact exist and remove
+ the stray .frm file.
+ */
+ if (share->db_type()->discover_table &&
+ (ha_err == ENOENT || ha_err == HA_ERR_NO_SUCH_TABLE))
+ error= OPEN_FRM_DISCOVER;
+
+ goto err;
}
}
@@ -2715,11 +2861,11 @@ partititon_err:
thd->status_var.opened_tables++;
thd->lex->context_analysis_only= save_context_analysis_only;
- DBUG_RETURN (0);
+ DBUG_RETURN (OPEN_FRM_OK);
err:
if (! error_reported)
- open_table_error(share, error, my_errno, 0);
+ open_table_error(share, error, my_errno);
delete outparam->file;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (outparam->part_info)
@@ -2831,158 +2977,17 @@ void free_field_buffers_larger_than(TABLE *table, uint32 size)
}
}
-/**
- Find where a form starts.
-
- @param head The start of the form file.
-
- @remark If formname is NULL then only formnames is read.
-
- @retval The form position.
-*/
-
-static ulong get_form_pos(File file, uchar *head)
-{
- uchar *pos, *buf;
- uint names, length;
- ulong ret_value=0;
- DBUG_ENTER("get_form_pos");
-
- names= uint2korr(head+8);
-
- if (!(names= uint2korr(head+8)))
- DBUG_RETURN(0);
-
- length= uint2korr(head+4);
-
- mysql_file_seek(file, 64L, MY_SEEK_SET, MYF(0));
-
- if (!(buf= (uchar*) my_malloc(length+names*4, MYF(MY_WME))))
- DBUG_RETURN(0);
-
- if (mysql_file_read(file, buf, length+names*4, MYF(MY_NABP)))
- {
- my_free(buf);
- DBUG_RETURN(0);
- }
-
- pos= buf+length;
- ret_value= uint4korr(pos);
-
- my_free(buf);
-
- DBUG_RETURN(ret_value);
-}
-
-
-/*
- Read string from a file with malloc
-
- NOTES:
- We add an \0 at end of the read string to make reading of C strings easier
-*/
+/* error message when opening a form file */
-int read_string(File file, uchar**to, size_t length)
+void open_table_error(TABLE_SHARE *share, enum open_frm_error error,
+ int db_errno)
{
- DBUG_ENTER("read_string");
-
- my_free(*to);
- if (!(*to= (uchar*) my_malloc(length+1,MYF(MY_WME))) ||
- mysql_file_read(file, *to, length, MYF(MY_NABP)))
- {
- my_free(*to); /* purecov: inspected */
- *to= 0; /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
- }
- *((char*) *to+length)= '\0';
- DBUG_RETURN (0);
-} /* read_string */
-
-
- /* Add a new form to a form file */
-
-ulong make_new_entry(File file, uchar *fileinfo, TYPELIB *formnames,
- const char *newname)
-{
- uint i,bufflength,maxlength,n_length,length,names;
- ulong endpos,newpos;
- uchar buff[IO_SIZE];
- uchar *pos;
- DBUG_ENTER("make_new_entry");
-
- length=(uint) strlen(newname)+1;
- n_length=uint2korr(fileinfo+4);
- maxlength=uint2korr(fileinfo+6);
- names=uint2korr(fileinfo+8);
- newpos=uint4korr(fileinfo+10);
-
- if (64+length+n_length+(names+1)*4 > maxlength)
- { /* Expand file */
- newpos+=IO_SIZE;
- int4store(fileinfo+10,newpos);
- /* Copy from file-end */
- endpos= (ulong) mysql_file_seek(file, 0L, MY_SEEK_END, MYF(0));
- bufflength= (uint) (endpos & (IO_SIZE-1)); /* IO_SIZE is a power of 2 */
-
- while (endpos > maxlength)
- {
- mysql_file_seek(file, (ulong) (endpos-bufflength), MY_SEEK_SET, MYF(0));
- if (mysql_file_read(file, buff, bufflength, MYF(MY_NABP+MY_WME)))
- DBUG_RETURN(0L);
- mysql_file_seek(file, (ulong) (endpos-bufflength+IO_SIZE), MY_SEEK_SET,
- MYF(0));
- if ((mysql_file_write(file, buff, bufflength, MYF(MY_NABP+MY_WME))))
- DBUG_RETURN(0);
- endpos-=bufflength; bufflength=IO_SIZE;
- }
- bzero(buff,IO_SIZE); /* Null new block */
- mysql_file_seek(file, (ulong) maxlength, MY_SEEK_SET, MYF(0));
- if (mysql_file_write(file, buff, bufflength, MYF(MY_NABP+MY_WME)))
- DBUG_RETURN(0L);
- maxlength+=IO_SIZE; /* Fix old ref */
- int2store(fileinfo+6,maxlength);
- for (i=names, pos= (uchar*) *formnames->type_names+n_length-1; i-- ;
- pos+=4)
- {
- endpos=uint4korr(pos)+IO_SIZE;
- int4store(pos,endpos);
- }
- }
-
- if (n_length == 1 )
- { /* First name */
- length++;
- (void) strxmov((char*) buff,"/",newname,"/",NullS);
- }
- else
- (void) strxmov((char*) buff,newname,"/",NullS); /* purecov: inspected */
- mysql_file_seek(file, 63L+(ulong) n_length, MY_SEEK_SET, MYF(0));
- if (mysql_file_write(file, buff, (size_t) length+1, MYF(MY_NABP+MY_WME)) ||
- (names && mysql_file_write(file,
- (uchar*) (*formnames->type_names+n_length-1),
- names*4, MYF(MY_NABP+MY_WME))) ||
- mysql_file_write(file, fileinfo+10, 4, MYF(MY_NABP+MY_WME)))
- DBUG_RETURN(0L); /* purecov: inspected */
-
- int2store(fileinfo+8,names+1);
- int2store(fileinfo+4,n_length+length);
- (void) mysql_file_chsize(file, newpos, 0, MYF(MY_WME));/* Append file with '\0' */
- DBUG_RETURN(newpos);
-} /* make_new_entry */
-
-
- /* error message when opening a form file */
-
-void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg)
-{
- int err_no;
char buff[FN_REFLEN];
- myf errortype= ME_ERROR+ME_WAITTANG; // Write fatals error to log
+ const myf errortype= ME_ERROR+ME_WAITTANG; // Write fatals error to log
DBUG_ENTER("open_table_error");
switch (error) {
- case 7:
- case 1:
+ case OPEN_FRM_OPEN_ERROR:
/*
Test if file didn't exists. We have to also test for EINVAL as this
may happen on windows when opening a file with a not legal file name
@@ -2996,55 +3001,30 @@ void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg)
errortype, buff, db_errno);
}
break;
- case 2:
- {
- handler *file= 0;
- const char *datext= "";
-
- if (share->db_type() != NULL)
- {
- if ((file= get_new_handler(share, current_thd->mem_root,
- share->db_type())))
- {
- if (!(datext= *file->bas_ext()))
- datext= "";
- }
- }
- err_no= (db_errno == ENOENT) ? ER_FILE_NOT_FOUND : (db_errno == EAGAIN) ?
- ER_FILE_USED : ER_CANT_OPEN_FILE;
- strxmov(buff, share->normalized_path.str, datext, NullS);
- my_error(err_no,errortype, buff, db_errno);
- delete file;
+ case OPEN_FRM_OK:
+ DBUG_ASSERT(0); // open_table_error() is never called for this one
break;
- }
- case 5:
- {
- const char *csname= get_charset_name((uint) errarg);
- char tmp[10];
- if (!csname || csname[0] =='?')
- {
- my_snprintf(tmp, sizeof(tmp), "#%d", errarg);
- csname= tmp;
- }
- my_printf_error(ER_UNKNOWN_COLLATION,
- "Unknown collation '%s' in table '%-.64s' definition",
- MYF(0), csname, share->table_name.str);
+ case OPEN_FRM_ERROR_ALREADY_ISSUED:
break;
- }
- case 6:
- strxmov(buff, share->normalized_path.str, reg_ext, NullS);
- my_printf_error(ER_NOT_FORM_FILE,
- "Table '%-.64s' was created with a different version "
- "of MySQL and cannot be read",
- MYF(0), buff);
+ case OPEN_FRM_NOT_A_VIEW:
+ my_error(ER_WRONG_OBJECT, MYF(0), share->db.str,
+ share->table_name.str, "VIEW");
+ break;
+ case OPEN_FRM_NOT_A_TABLE:
+ my_error(ER_WRONG_OBJECT, MYF(0), share->db.str,
+ share->table_name.str, "TABLE");
break;
- case 8:
+ case OPEN_FRM_DISCOVER:
+ DBUG_ASSERT(0); // open_table_error() is never called for this one
break;
- default: /* Better wrong error than none */
- case 4:
+ case OPEN_FRM_CORRUPTED:
strxmov(buff, share->normalized_path.str, reg_ext, NullS);
my_error(ER_NOT_FORM_FILE, errortype, buff);
break;
+ case OPEN_FRM_READ_ERROR:
+ strxmov(buff, share->normalized_path.str, reg_ext, NullS);
+ my_error(ER_ERROR_ON_READ, errortype, buff, db_errno);
+ break;
}
DBUG_VOID_RETURN;
} /* open_table_error */
@@ -3148,28 +3128,6 @@ static uint find_field(Field **fields, uchar *record, uint start, uint length)
}
- /* Check that the integer is in the internal */
-
-int set_zone(register int nr, int min_zone, int max_zone)
-{
- if (nr<=min_zone)
- return (min_zone);
- if (nr>=max_zone)
- return (max_zone);
- return (nr);
-} /* set_zone */
-
- /* Adjust number to next larger disk buffer */
-
-ulong next_io_size(register ulong pos)
-{
- reg2 ulong offset;
- if ((offset= pos & (IO_SIZE-1)))
- return pos-offset+IO_SIZE;
- return pos;
-} /* next_io_size */
-
-
/*
Store an SQL quoted string.
@@ -3232,22 +3190,12 @@ void append_unescaped(String *res, const char *pos, uint length)
}
- /* Create a .frm file */
-
-File create_frm(THD *thd, const char *name, const char *db,
- const char *table, uint reclength, uchar *fileinfo,
- HA_CREATE_INFO *create_info, uint keys, KEY *key_info)
+void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
+ HA_CREATE_INFO *create_info, uint keys, KEY *key_info)
{
- register File file;
- ulong length;
- uchar fill[IO_SIZE];
- int create_flags= O_RDWR | O_TRUNC;
ulong key_comment_total_bytes= 0;
uint i;
- DBUG_ENTER("create_frm");
-
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
- create_flags|= O_EXCL | O_NOFOLLOW;
+ DBUG_ENTER("prepare_frm_header");
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
if (create_info->max_rows > UINT_MAX32)
@@ -3255,100 +3203,76 @@ File create_frm(THD *thd, const char *name, const char *db,
if (create_info->min_rows > UINT_MAX32)
create_info->min_rows= UINT_MAX32;
- if ((file= mysql_file_create(key_file_frm,
- name, CREATE_MODE, create_flags, MYF(0))) >= 0)
- {
- uint key_length, tmp_key_length, tmp, csid;
- bzero((char*) fileinfo,64);
- /* header */
- fileinfo[0]=(uchar) 254;
- fileinfo[1]= 1;
- fileinfo[2]= FRM_VER+3+ test(create_info->varchar);
+ uint key_length, tmp_key_length, tmp, csid;
+ bzero((char*) fileinfo, FRM_HEADER_SIZE);
+ /* header */
+ fileinfo[0]=(uchar) 254;
+ fileinfo[1]= 1;
+ fileinfo[2]= FRM_VER+3+ test(create_info->varchar);
- fileinfo[3]= (uchar) ha_legacy_type(
- ha_checktype(thd,ha_legacy_type(create_info->db_type),0,0));
- fileinfo[4]=1;
- int2store(fileinfo+6,IO_SIZE); /* Next block starts here */
- /*
- Keep in sync with pack_keys() in unireg.cc
- For each key:
- 8 bytes for the key header
- 9 bytes for each key-part (MAX_REF_PARTS)
- NAME_LEN bytes for the name
- 1 byte for the NAMES_SEP_CHAR (before the name)
- For all keys:
- 6 bytes for the header
- 1 byte for the NAMES_SEP_CHAR (after the last name)
- 9 extra bytes (padding for safety? alignment?)
- */
- for (i= 0; i < keys; i++)
- {
- DBUG_ASSERT(test(key_info[i].flags & HA_USES_COMMENT) ==
- (key_info[i].comment.length > 0));
- if (key_info[i].flags & HA_USES_COMMENT)
- key_comment_total_bytes += 2 + key_info[i].comment.length;
- }
+ fileinfo[3]= (uchar) ha_legacy_type(
+ ha_checktype(thd,ha_legacy_type(create_info->db_type),0,0));
- key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
- + key_comment_total_bytes;
-
- length= next_io_size((ulong) (IO_SIZE+key_length+reclength+
- create_info->extra_size));
- int4store(fileinfo+10,length);
- tmp_key_length= (key_length < 0xffff) ? key_length : 0xffff;
- int2store(fileinfo+14,tmp_key_length);
- int2store(fileinfo+16,reclength);
- int4store(fileinfo+18,create_info->max_rows);
- int4store(fileinfo+22,create_info->min_rows);
- /* fileinfo[26] is set in mysql_create_frm() */
- fileinfo[27]=2; // Use long pack-fields
- /* fileinfo[28 & 29] is set to key_info_length in mysql_create_frm() */
- create_info->table_options|=HA_OPTION_LONG_BLOB_PTR; // Use portable blob pointers
- int2store(fileinfo+30,create_info->table_options);
- fileinfo[32]=0; // No filename anymore
- fileinfo[33]=5; // Mark for 5.0 frm file
- int4store(fileinfo+34,create_info->avg_row_length);
- csid= (create_info->default_table_charset ?
- create_info->default_table_charset->number : 0);
- fileinfo[38]= (uchar) csid;
- fileinfo[39]= (uchar) ((uint) create_info->transactional |
- ((uint) create_info->page_checksum << 2));
- fileinfo[40]= (uchar) create_info->row_type;
- /* Bytes 41-46 were for RAID support; now reused for other purposes */
- fileinfo[41]= (uchar) (csid >> 8);
- int2store(fileinfo+42, create_info->stats_sample_pages & 0xffff);
- fileinfo[44]= (uchar) create_info->stats_auto_recalc;
- fileinfo[45]= 0;
- fileinfo[46]= 0;
- int4store(fileinfo+47, key_length);
- tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
- int4store(fileinfo+51, tmp);
- int4store(fileinfo+55, create_info->extra_size);
- /*
- 59-60 is reserved for extra_rec_buf_length,
- 61 for default_part_db_type
- */
- int2store(fileinfo+62, create_info->key_block_size);
- bzero(fill,IO_SIZE);
- for (; length > IO_SIZE ; length-= IO_SIZE)
- {
- if (mysql_file_write(file, fill, IO_SIZE, MYF(MY_WME | MY_NABP)))
- {
- (void) mysql_file_close(file, MYF(0));
- (void) mysql_file_delete(key_file_frm, name, MYF(0));
- return(-1);
- }
- }
- }
- else
- {
- if (my_errno == ENOENT)
- my_error(ER_BAD_DB_ERROR,MYF(0),db);
- else
- my_error(ER_CANT_CREATE_TABLE,MYF(0),table,my_errno);
- }
- DBUG_RETURN(file);
-} /* create_frm */
+ /*
+ Keep in sync with pack_keys() in unireg.cc
+ For each key:
+ 8 bytes for the key header
+ 9 bytes for each key-part (MAX_REF_PARTS)
+ NAME_LEN bytes for the name
+ 1 byte for the NAMES_SEP_CHAR (before the name)
+ For all keys:
+ 6 bytes for the header
+ 1 byte for the NAMES_SEP_CHAR (after the last name)
+ 9 extra bytes (padding for safety? alignment?)
+ */
+ for (i= 0; i < keys; i++)
+ {
+ DBUG_ASSERT(test(key_info[i].flags & HA_USES_COMMENT) ==
+ (key_info[i].comment.length > 0));
+ if (key_info[i].flags & HA_USES_COMMENT)
+ key_comment_total_bytes += 2 + key_info[i].comment.length;
+ }
+
+ key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
+ + key_comment_total_bytes;
+
+ int2store(fileinfo+8,1);
+ tmp_key_length= (key_length < 0xffff) ? key_length : 0xffff;
+ int2store(fileinfo+14,tmp_key_length);
+ int2store(fileinfo+16,reclength);
+ int4store(fileinfo+18,create_info->max_rows);
+ int4store(fileinfo+22,create_info->min_rows);
+ /* fileinfo[26] is set in mysql_create_frm() */
+ fileinfo[27]=2; // Use long pack-fields
+ /* fileinfo[28 & 29] is set to key_info_length in mysql_create_frm() */
+ create_info->table_options|=HA_OPTION_LONG_BLOB_PTR; // Use portable blob pointers
+ int2store(fileinfo+30,create_info->table_options);
+ fileinfo[32]=0; // No filename anymore
+ fileinfo[33]=5; // Mark for 5.0 frm file
+ int4store(fileinfo+34,create_info->avg_row_length);
+ csid= (create_info->default_table_charset ?
+ create_info->default_table_charset->number : 0);
+ fileinfo[38]= (uchar) csid;
+ fileinfo[39]= (uchar) ((uint) create_info->transactional |
+ ((uint) create_info->page_checksum << 2));
+ fileinfo[40]= (uchar) create_info->row_type;
+ /* Bytes 41-46 were for RAID support; now reused for other purposes */
+ fileinfo[41]= (uchar) (csid >> 8);
+ int2store(fileinfo+42, create_info->stats_sample_pages & 0xffff);
+ fileinfo[44]= (uchar) create_info->stats_auto_recalc;
+ fileinfo[45]= 0;
+ fileinfo[46]= 0;
+ int4store(fileinfo+47, key_length);
+ tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
+ int4store(fileinfo+51, tmp);
+ int4store(fileinfo+55, create_info->extra_size);
+ /*
+ 59-60 is reserved for extra_rec_buf_length,
+ 61 for default_part_db_type
+ */
+ int2store(fileinfo+62, create_info->key_block_size);
+ DBUG_VOID_RETURN;
+} /* prepare_fileinfo */
void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table)
@@ -3377,7 +3301,7 @@ rename_file_ext(const char * from,const char * to,const char * ext)
char from_b[FN_REFLEN],to_b[FN_REFLEN];
(void) strxmov(from_b,from,ext,NullS);
(void) strxmov(to_b,to,ext,NullS);
- return (mysql_file_rename(key_file_frm, from_b, to_b, MYF(MY_WME)));
+ return mysql_file_rename(key_file_frm, from_b, to_b, MYF(0));
}
@@ -3409,7 +3333,7 @@ bool get_field(MEM_ROOT *mem, Field *field, String *res)
}
if (!(to= strmake_root(mem, str.ptr(), length)))
length= 0; // Safety fix
- res->set(to, length, ((Field_str*)field)->charset());
+ res->set(to, length, field->charset());
return 0;
}
@@ -3588,9 +3512,9 @@ bool check_column_name(const char *name)
}
#else
last_char_is_space= *name==' ';
-#endif
- if (*name == NAMES_SEP_CHAR)
+ if (*name == '\377')
return 1;
+#endif
name++;
name_length++;
}
@@ -3748,6 +3672,46 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
}
}
+ if (table_def->primary_key_parts)
+ {
+ if (table->s->primary_key == MAX_KEY)
+ {
+ report_error(0, "Incorrect definition of table %s.%s: "
+ "missing primary key.", table->s->db.str,
+ table->alias.c_ptr());
+ error= TRUE;
+ }
+ else
+ {
+ KEY *pk= &table->s->key_info[table->s->primary_key];
+ if (pk->user_defined_key_parts != table_def->primary_key_parts)
+ {
+ report_error(0, "Incorrect definition of table %s.%s: "
+ "Expected primary key to have %u columns, but instead "
+ "found %u columns.", table->s->db.str,
+ table->alias.c_ptr(), table_def->primary_key_parts,
+ pk->user_defined_key_parts);
+ error= TRUE;
+ }
+ else
+ {
+ for (i= 0; i < pk->user_defined_key_parts; ++i)
+ {
+ if (table_def->primary_key_columns[i] + 1 != pk->key_part[i].fieldnr)
+ {
+ report_error(0, "Incorrect definition of table %s.%s: Expected "
+ "primary key part %u to refer to column %u, but "
+ "instead found column %u.", table->s->db.str,
+ table->alias.c_ptr(), i + 1,
+ table_def->primary_key_columns[i] + 1,
+ pk->key_part[i].fieldnr);
+ error= TRUE;
+ }
+ }
+ }
+ }
+ }
+
if (! error)
table->s->table_field_def_cache= table_def;
@@ -3978,6 +3942,8 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
file->ha_start_of_new_statement();
reginfo.impossible_range= 0;
created= TRUE;
+ cond_selectivity= 1.0;
+ cond_selectivity_sampling_explain= NULL;
/* Catch wrong handling of the auto_increment_field_not_null. */
DBUG_ASSERT(!auto_increment_field_not_null);
@@ -3986,6 +3952,11 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
pos_in_table_list= tl;
clear_column_bitmaps();
+ for (Field **f_ptr= field ; *f_ptr ; f_ptr++)
+ {
+ (*f_ptr)->next_equal_field= NULL;
+ (*f_ptr)->cond_selectivity= 1.0;
+ }
DBUG_ASSERT(key_read == 0);
@@ -4236,6 +4207,7 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds,
bool no_where_clause)
{
DBUG_ENTER("TABLE_LIST::prep_where");
+ bool res= FALSE;
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
{
@@ -4284,10 +4256,11 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds,
if (tbl == 0)
{
if (*conds && !(*conds)->fixed)
- (*conds)->fix_fields(thd, conds);
- *conds= and_conds(*conds, where->copy_andor_structure(thd));
- if (*conds && !(*conds)->fixed)
- (*conds)->fix_fields(thd, conds);
+ res= (*conds)->fix_fields(thd, conds);
+ if (!res)
+ *conds= and_conds(*conds, where->copy_andor_structure(thd));
+ if (*conds && !(*conds)->fixed && !res)
+ res= (*conds)->fix_fields(thd, conds);
}
if (arena)
thd->restore_active_arena(arena, &backup);
@@ -4295,7 +4268,7 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds,
}
}
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(res);
}
/**
@@ -4833,7 +4806,7 @@ void TABLE_LIST::register_want_access(ulong want_access)
Load security context information for this view
SYNOPSIS
- TABLE_LIST::prepare_view_securety_context()
+ TABLE_LIST::prepare_view_security_context()
thd [in] thread handler
RETURN
@@ -4842,9 +4815,9 @@ void TABLE_LIST::register_want_access(ulong want_access)
*/
#ifndef NO_EMBEDDED_ACCESS_CHECKS
-bool TABLE_LIST::prepare_view_securety_context(THD *thd)
+bool TABLE_LIST::prepare_view_security_context(THD *thd)
{
- DBUG_ENTER("TABLE_LIST::prepare_view_securety_context");
+ DBUG_ENTER("TABLE_LIST::prepare_view_security_context");
DBUG_PRINT("enter", ("table: %s", alias));
DBUG_ASSERT(!prelocking_placeholder && view);
@@ -4952,7 +4925,7 @@ bool TABLE_LIST::prepare_security(THD *thd)
Security_context *save_security_ctx= thd->security_ctx;
DBUG_ASSERT(!prelocking_placeholder);
- if (prepare_view_securety_context(thd))
+ if (prepare_view_security_context(thd))
DBUG_RETURN(TRUE);
thd->security_ctx= find_view_security_context(thd);
while ((tbl= tb++))
diff --git a/sql/table.h b/sql/table.h
index 1e4774e021e..9079d6fa847 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -493,6 +493,8 @@ typedef struct st_table_field_def
{
uint count;
const TABLE_FIELD_TYPE *field;
+ uint primary_key_parts;
+ const uint *primary_key_columns;
} TABLE_FIELD_DEF;
@@ -549,6 +551,17 @@ typedef I_P_List <Wait_for_flush,
Wait_for_flush_list;
+enum open_frm_error {
+ OPEN_FRM_OK = 0,
+ OPEN_FRM_OPEN_ERROR,
+ OPEN_FRM_READ_ERROR,
+ OPEN_FRM_CORRUPTED,
+ OPEN_FRM_DISCOVER,
+ OPEN_FRM_ERROR_ALREADY_ISSUED,
+ OPEN_FRM_NOT_A_VIEW,
+ OPEN_FRM_NOT_A_TABLE
+};
+
/**
Control block to access table statistics loaded
from persistent statistical tables
@@ -560,7 +573,9 @@ struct TABLE_STATISTICS_CB
Table_statistics *table_stats; /* Structure to access the statistical data */
bool stats_can_be_read; /* Memory for statistical data is allocated */
bool stats_is_read; /* Statistical data for table has been read
- from statistical tables */
+ from statistical tables */
+ bool histograms_can_be_read;
+ bool histograms_are_read;
};
@@ -583,6 +598,7 @@ struct TABLE_SHARE
TYPELIB fieldnames; /* Pointer to fieldnames */
TYPELIB *intervals; /* pointer to interval info */
mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */
+ mysql_mutex_t LOCK_share; /* To protect TABLE_SHARE */
TABLE_SHARE *next, **prev; /* Link to unused shares */
/*
@@ -593,6 +609,8 @@ struct TABLE_SHARE
TABLE_list used_tables;
TABLE_list free_tables;
+ LEX_CUSTRING tabledef_version;
+
engine_option_value *option_list; /* text options for table */
ha_table_option_struct *option_struct; /* structure with parsed options */
@@ -643,8 +661,8 @@ struct TABLE_SHARE
plugin_ref db_plugin; /* storage engine plugin */
inline handlerton *db_type() const /* table_type for handler */
{
- // DBUG_ASSERT(db_plugin);
- return db_plugin ? plugin_data(db_plugin, handlerton*) : NULL;
+ return is_view ? view_pseudo_hton :
+ db_plugin ? plugin_hton(db_plugin) : NULL;
}
enum row_type row_type; /* How rows are stored */
enum tmp_table_type tmp_table;
@@ -685,7 +703,8 @@ struct TABLE_SHARE
uint next_number_index; /* autoincrement key number */
uint next_number_key_offset; /* autoinc keypart offset in a key */
uint next_number_keypart; /* autoinc keypart number in a key */
- uint error, open_errno, errarg; /* error from open_table_def() */
+ enum open_frm_error error; /* error from open_table_def() */
+ uint open_errno; /* error from open_table_def() */
uint column_bitmap_size;
uchar frm_version;
uint vfields; /* Number of computed (virtual) fields */
@@ -717,7 +736,7 @@ struct TABLE_SHARE
char *partition_info_str;
uint partition_info_str_len;
uint partition_info_buffer_size;
- handlerton *default_part_db_type;
+ plugin_ref default_part_plugin;
#endif
/**
@@ -811,12 +830,42 @@ struct TABLE_SHARE
return table_map_id;
}
-
/** Is this table share being expelled from the table definition cache? */
inline bool has_old_version() const
{
return version != refresh_version;
}
+ inline bool protected_against_usage() const
+ {
+ return version == 0;
+ }
+ inline void protect_against_usage()
+ {
+ version= 0;
+ }
+ /*
+ This is used only for the case of locked tables, as we want to
+ allow one to do SHOW commands on them even after ALTER or REPAIR
+ */
+ inline void allow_access_to_protected_table()
+ {
+ DBUG_ASSERT(version == 0);
+ version= 1;
+ }
+ /*
+ Remove from table definition cache at close.
+ Table can still be opened by SHOW
+ */
+ inline void remove_from_cache_at_close()
+ {
+ if (version != 0) /* Don't remove protection */
+ version= 1;
+ }
+ inline void set_refresh_version()
+ {
+ version= refresh_version;
+ }
+
/**
Convert unrelated members of TABLE_SHARE to one enum
representing its type.
@@ -930,6 +979,40 @@ struct TABLE_SHARE
}
uint actual_n_key_parts(THD *thd);
+
+ LEX_CUSTRING *frm_image; ///< only during CREATE TABLE (@sa ha_create_table)
+
+ /*
+ populates TABLE_SHARE from the table description in the binary frm image.
+ if 'write' is true, this frm image is also written into a corresponding
+ frm file, that serves as a persistent metadata cache to avoid
+ discovering the table over and over again
+ */
+ int init_from_binary_frm_image(THD *thd, bool write,
+ const uchar *frm_image, size_t frm_length);
+
+ /*
+ populates TABLE_SHARE from the table description, specified as the
+ complete CREATE TABLE sql statement.
+ if 'write' is true, this frm image is also written into a corresponding
+ frm file, that serves as a persistent metadata cache to avoid
+ discovering the table over and over again
+ */
+ int init_from_sql_statement_string(THD *thd, bool write,
+ const char *sql, size_t sql_length);
+ /*
+ writes the frm image to an frm file, corresponding to this table
+ */
+ bool write_frm_image(const uchar *frm_image, size_t frm_length);
+
+ /*
+ returns an frm image for this table.
+ the memory is allocated and must be freed later
+ */
+ bool read_frm_image(const uchar **frm_image, size_t *frm_length);
+
+ /* frees the memory allocated in read_frm_image */
+ void free_frm_image(const uchar *frm);
};
@@ -941,6 +1024,8 @@ enum index_hint_type
INDEX_HINT_FORCE
};
+struct st_cond_statistic;
+
#define CHECK_ROW_FOR_NULLS_TO_REJECT (1 << 0)
#define REJECT_ROW_DUE_TO_NULL_FIELDS (1 << 1)
@@ -1012,6 +1097,7 @@ public:
my_bitmap_map *bitmap_init_value;
MY_BITMAP def_read_set, def_write_set, def_vcol_set, tmp_set;
MY_BITMAP eq_join_set; /* used to mark equi-joined fields */
+ MY_BITMAP cond_set; /* used to mark fields from sargable conditions*/
MY_BITMAP *read_set, *write_set, *vcol_set; /* Active column sets */
/*
The ID of the query that opened and is using this table. Has different
@@ -1064,6 +1150,9 @@ public:
*/
ha_rows quick_condition_rows;
+ double cond_selectivity;
+ List<st_cond_statistic> *cond_selectivity_sampling_explain;
+
table_map map; /* ID bit of table (1,2,4,8,16...) */
uint lock_position; /* Position in MYSQL_LOCK.table */
@@ -1187,6 +1276,7 @@ public:
#endif
uint max_keys; /* Size of allocated key_info array. */
bool stats_is_read; /* Persistent statistics is read for the table */
+ bool histograms_are_read;
MDL_ticket *mdl_ticket;
void init(THD *thd, TABLE_LIST *tl);
@@ -1570,7 +1660,7 @@ struct TABLE_LIST
/**
Prepare TABLE_LIST that consists of one table instance to use in
- simple_open_and_lock_tables
+ open_and_lock_tables
*/
inline void init_one_table(const char *db_name_arg,
size_t db_length_arg,
@@ -1883,7 +1973,6 @@ struct TABLE_LIST
/* For transactional locking. */
int lock_timeout; /* NOWAIT or WAIT [X] */
bool lock_transactional; /* If transactional lock requested. */
- bool internal_tmp_table;
/** TRUE if an alias for this table was specified in the SQL. */
bool is_alias;
/** TRUE if the table is referred to in the statement using a fully
@@ -1999,7 +2088,7 @@ struct TABLE_LIST
bool prepare_security(THD *thd);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *find_view_security_context(THD *thd);
- bool prepare_view_securety_context(THD *thd);
+ bool prepare_view_security_context(THD *thd);
#endif
/*
Cleanup for re-execution in a prepared statement or a stored
@@ -2142,9 +2231,9 @@ private:
#else
inline void set_check_merged() {}
#endif
- /** See comments for set_metadata_id() */
+ /** See comments for set_table_ref_id() */
enum enum_table_ref_type m_table_ref_type;
- /** See comments for set_metadata_id() */
+ /** See comments for set_table_ref_id() */
ulong m_table_ref_version;
};
@@ -2398,26 +2487,36 @@ static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set,
#endif
}
+enum get_table_share_flags {
+ GTS_TABLE = 1,
+ GTS_VIEW = 2,
+ GTS_NOLOCK = 4,
+ GTS_USE_DISCOVERY = 8,
+ GTS_FORCE_DISCOVERY = 16
+};
size_t max_row_length(TABLE *table, const uchar *data);
-
void init_mdl_requests(TABLE_LIST *table_list);
-int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
- uint db_stat, uint prgflag, uint ha_open_flags,
- TABLE *outparam, bool is_create_table);
+enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
+ const char *alias, uint db_stat, uint prgflag,
+ uint ha_open_flags, TABLE *outparam,
+ bool is_create_table);
bool unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root,
TABLE *table, Field *field,
LEX_STRING *vcol_expr, bool *error_reported);
-TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, const char *key,
- uint key_length);
+TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
+ const char *key, uint key_length);
void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
uint key_length,
const char *table_name, const char *path);
void free_table_share(TABLE_SHARE *share);
-int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
-void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
+enum open_frm_error open_table_def(THD *thd, TABLE_SHARE *share,
+ uint flags = GTS_TABLE);
+
+void open_table_error(TABLE_SHARE *share, enum open_frm_error error,
+ int db_errno);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
bool check_and_convert_db_name(LEX_STRING *db, bool preserve_lettercase);
bool check_db_name(LEX_STRING *db);
@@ -2428,20 +2527,24 @@ char *get_field(MEM_ROOT *mem, Field *field);
bool get_field(MEM_ROOT *mem, Field *field, class String *res);
int closefrm(TABLE *table, bool free_share);
-int read_string(File file, uchar* *to, size_t length);
void free_blobs(TABLE *table);
void free_field_buffers_larger_than(TABLE *table, uint32 size);
-int set_zone(int nr,int min_zone,int max_zone);
ulong get_form_pos(File file, uchar *head, TYPELIB *save_names);
-ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
- const char *newname);
-ulong next_io_size(ulong pos);
void append_unescaped(String *res, const char *pos, uint length);
-File create_frm(THD *thd, const char *name, const char *db,
- const char *table, uint reclength, uchar *fileinfo,
- HA_CREATE_INFO *create_info, uint keys, KEY *key_info);
+void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
+ HA_CREATE_INFO *create_info, uint keys, KEY *key_info);
char *fn_rext(char *name);
+/* Check that the integer is in the internal */
+static inline int set_zone(int nr,int min_zone,int max_zone)
+{
+ if (nr <= min_zone)
+ return min_zone;
+ if (nr >= max_zone)
+ return max_zone;
+ return nr;
+}
+
/* performance schema */
extern LEX_STRING PERFORMANCE_SCHEMA_DB_NAME;
diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc
index 147a59df9b7..5be06f0bdc8 100644
--- a/sql/threadpool_common.cc
+++ b/sql/threadpool_common.cc
@@ -72,14 +72,18 @@ struct Worker_thread_context
void save()
{
+#ifdef HAVE_PSI_INTERFACE
psi_thread= PSI_server?PSI_server->get_thread():0;
+#endif
mysys_var= (st_my_thread_var *)pthread_getspecific(THR_KEY_mysys);
}
void restore()
{
+#ifdef HAVE_PSI_INTERFACE
if (PSI_server)
PSI_server->set_thread(psi_thread);
+#endif
pthread_setspecific(THR_KEY_mysys,mysys_var);
pthread_setspecific(THR_THD, 0);
pthread_setspecific(THR_MALLOC, 0);
@@ -95,8 +99,10 @@ static bool thread_attach(THD* thd)
pthread_setspecific(THR_KEY_mysys,thd->mysys_var);
thd->thread_stack=(char*)&thd;
thd->store_globals();
+#ifdef HAVE_PSI_INTERFACE
if (PSI_server)
PSI_server->set_thread(thd->event_scheduler.m_psi);
+#endif
return 0;
}
@@ -123,11 +129,13 @@ int threadpool_add_connection(THD *thd)
}
/* Create new PSI thread for use with the THD. */
+#ifdef HAVE_PSI_INTERFACE
if (PSI_server)
{
thd->event_scheduler.m_psi =
PSI_server->new_thread(key_thread_one_connection, thd, thd->thread_id);
}
+#endif
/* Login. */
diff --git a/sql/threadpool_unix.cc b/sql/threadpool_unix.cc
index 13deb167d9b..0f88d4920b8 100644
--- a/sql/threadpool_unix.cc
+++ b/sql/threadpool_unix.cc
@@ -29,14 +29,14 @@
#ifdef __linux__
#include <sys/epoll.h>
typedef struct epoll_event native_event;
-#endif
-#if defined (__FreeBSD__) || defined (__APPLE__)
+#elif defined(HAVE_KQUEUE)
#include <sys/event.h>
typedef struct kevent native_event;
-#endif
-#if defined (__sun)
+#elif defined (__sun)
#include <port.h>
typedef port_event_t native_event;
+#else
+#error threadpool is not available on this platform
#endif
/** Maximum number of native events a listener can read in one go */
@@ -52,6 +52,7 @@ static bool threadpool_started= false;
*/
+#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key key_group_mutex;
static PSI_mutex_key key_timer_mutex;
static PSI_mutex_info mutex_list[]=
@@ -79,6 +80,9 @@ static PSI_thread_info thread_list[] =
/* Macro to simplify performance schema registration */
#define PSI_register(X) \
if(PSI_server) PSI_server->register_ ## X("threadpool", X ## _list, array_elements(X ## _list))
+#else
+#define PSI_register(X) /* no-op */
+#endif
struct thread_group_t;
@@ -285,7 +289,21 @@ static void *native_event_get_userdata(native_event *event)
return event->data.ptr;
}
-#elif defined (__FreeBSD__) || defined (__APPLE__)
+#elif defined(HAVE_KQUEUE)
+
+/*
+ NetBSD is incompatible with other BSDs , last parameter in EV_SET macro
+ (udata, user data) needs to be intptr_t, whereas it needs to be void*
+ everywhere else.
+*/
+
+#ifdef __NetBSD__
+#define MY_EV_SET(a, b, c, d, e, f, g) EV_SET(a, b, c, d, e, f, (intptr_t)g)
+#else
+#define MY_EV_SET(a, b, c, d, e, f, g) EV_SET(a, b, c, d, e, f, g)
+#endif
+
+
int io_poll_create()
{
return kqueue();
@@ -294,7 +312,7 @@ int io_poll_create()
int io_poll_start_read(int pollfd, int fd, void *data)
{
struct kevent ke;
- EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
+ MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
0, 0, data);
return kevent(pollfd, &ke, 1, 0, 0, 0);
}
@@ -303,7 +321,7 @@ int io_poll_start_read(int pollfd, int fd, void *data)
int io_poll_associate_fd(int pollfd, int fd, void *data)
{
struct kevent ke;
- EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
+ MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
0, 0, data);
return io_poll_start_read(pollfd,fd, data);
}
@@ -312,7 +330,7 @@ int io_poll_associate_fd(int pollfd, int fd, void *data)
int io_poll_disassociate_fd(int pollfd, int fd)
{
struct kevent ke;
- EV_SET(&ke,fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+ MY_EV_SET(&ke,fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
return kevent(pollfd, &ke, 1, 0, 0, 0);
}
@@ -337,7 +355,7 @@ int io_poll_wait(int pollfd, struct kevent *events, int maxevents, int timeout_m
static void* native_event_get_userdata(native_event *event)
{
- return event->udata;
+ return (void *)event->udata;
}
#elif defined (__sun)
@@ -386,8 +404,6 @@ static void* native_event_get_userdata(native_event *event)
{
return event->portev_user;
}
-#else
-#error not ported yet to this OS
#endif
@@ -1247,11 +1263,12 @@ static void connection_abort(connection_t *connection)
DBUG_ENTER("connection_abort");
thread_group_t *group= connection->thread_group;
+ threadpool_remove_connection(connection->thd);
+
mysql_mutex_lock(&group->mutex);
group->connection_count--;
mysql_mutex_unlock(&group->mutex);
-
- threadpool_remove_connection(connection->thd);
+
my_free(connection);
DBUG_VOID_RETURN;
}
diff --git a/sql/transaction.cc b/sql/transaction.cc
index 2265b42be1c..239fdef7064 100644
--- a/sql/transaction.cc
+++ b/sql/transaction.cc
@@ -11,7 +11,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifdef USE_PRAGMA_IMPLEMENTATION
@@ -610,15 +610,19 @@ bool trans_xa_start(THD *thd)
my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
else if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction())
my_error(ER_XAER_OUTSIDE, MYF(0));
- else if (xid_cache_search(thd->lex->xid))
- my_error(ER_XAER_DUPID, MYF(0));
else if (!trans_begin(thd))
{
DBUG_ASSERT(thd->transaction.xid_state.xid.is_null());
thd->transaction.xid_state.xa_state= XA_ACTIVE;
thd->transaction.xid_state.rm_error= 0;
thd->transaction.xid_state.xid.set(thd->lex->xid);
- xid_cache_insert(&thd->transaction.xid_state);
+ if (xid_cache_insert(&thd->transaction.xid_state))
+ {
+ thd->transaction.xid_state.xa_state= XA_NOTR;
+ thd->transaction.xid_state.xid.null();
+ trans_rollback(thd);
+ DBUG_RETURN(true);
+ }
DBUG_RETURN(FALSE);
}
@@ -704,6 +708,16 @@ bool trans_xa_commit(THD *thd)
if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
{
+ /*
+ xid_state.in_thd is always true beside of xa recovery procedure.
+ Note, that there is no race condition here between xid_cache_search
+ and xid_cache_delete, since we always delete our own XID
+ (thd->lex->xid == thd->transaction.xid_state.xid).
+ The only case when thd->lex->xid != thd->transaction.xid_state.xid
+ and xid_state->in_thd == 0 is in the function
+ xa_cache_insert(XID, xa_states), which is called before starting
+ client connections, and thus is always single-threaded.
+ */
XID_STATE *xs= xid_cache_search(thd->lex->xid);
res= !xs || xs->in_thd;
if (res)
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 4fd68ca389e..272dfb6381b 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1857,7 +1857,7 @@ static Time_zone*
tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
{
TABLE *table= 0;
- TIME_ZONE_INFO *tz_info;
+ TIME_ZONE_INFO *tz_info= NULL;
Tz_names_entry *tmp_tzname;
Time_zone *return_val= 0;
int res;
@@ -1867,7 +1867,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
uchar keybuff[32];
Field *field;
String abbr(buff, sizeof(buff), &my_charset_latin1);
- char *alloc_buff, *tz_name_buff;
+ char *alloc_buff= NULL;
+ char *tz_name_buff= NULL;
/*
Temporary arrays that are used for loading of data for filling
TIME_ZONE_INFO structure
@@ -1887,22 +1888,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
DBUG_ENTER("tz_load_from_open_tables");
- /* Prepare tz_info for loading also let us make copy of time zone name */
- if (!(alloc_buff= (char*) alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
- tz_name->length() + 1)))
- {
- sql_print_error("Out of memory while loading time zone description");
- return 0;
- }
- tz_info= (TIME_ZONE_INFO *)alloc_buff;
- bzero(tz_info, sizeof(TIME_ZONE_INFO));
- tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO);
- /*
- By writing zero to the end we guarantee that we can call ptr()
- instead of c_ptr() for time zone name.
- */
- strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
-
/*
Let us find out time zone id by its name (there is only one index
and it is specifically for this purpose).
@@ -2521,7 +2506,7 @@ scan_tz_dir(char * name_end)
name_end= strmake(name_end, "/", FN_REFLEN - (name_end - fullname));
- for (i= 0; i < cur_dir->number_off_files; i++)
+ for (i= 0; i < cur_dir->number_of_files; i++)
{
if (cur_dir->dir_entry[i].name[0] != '.')
{
@@ -2575,7 +2560,7 @@ main(int argc, char **argv)
if (argc == 2)
{
- root_name_end= strmake(fullname, argv[1], FN_REFLEN);
+ root_name_end= strmake_buf(fullname, argv[1]);
printf("TRUNCATE TABLE time_zone;\n");
printf("TRUNCATE TABLE time_zone_name;\n");
@@ -2729,7 +2714,7 @@ main(int argc, char **argv)
(int)t, (int)t1);
/* Let us load time zone description */
- str_end= strmake(fullname, TZDIR, FN_REFLEN);
+ str_end= strmake_buf(fullname, TZDIR);
strmake(str_end, "/MET", FN_REFLEN - (str_end - fullname));
if (tz_load(fullname, &tz_info, &tz_storage))
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 9fa06311ece..0c1c34d495b 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -86,6 +86,7 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
full_size= size;
if (min_dupl_count_arg)
full_size+= sizeof(element_count);
+ with_counters= test(min_dupl_count_arg);
my_b_clear(&file);
init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func,
NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC));
@@ -428,6 +429,22 @@ static int buffpek_compare(void *arg, uchar *key_ptr1, uchar *key_ptr2)
C_MODE_END
+inline
+element_count get_counter_from_merged_element(void *ptr, uint ofs)
+{
+ element_count cnt;
+ memcpy((uchar *) &cnt, (uchar *) ptr + ofs, sizeof(element_count));
+ return cnt;
+}
+
+
+inline
+void put_counter_into_merged_element(void *ptr, uint ofs, element_count cnt)
+{
+ memcpy((uchar *) ptr + ofs, (uchar *) &cnt, sizeof(element_count));
+}
+
+
/*
DESCRIPTION
@@ -457,6 +474,8 @@ C_MODE_END
file file with all trees dumped. Trees in the file
must contain sorted unique values. Cache must be
initialized in read mode.
+ with counters take into account counters for equal merged
+ elements
RETURN VALUE
0 ok
<> 0 error
@@ -466,7 +485,7 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
uint key_length, BUFFPEK *begin, BUFFPEK *end,
tree_walk_action walk_action, void *walk_action_arg,
qsort_cmp2 compare, void *compare_arg,
- IO_CACHE *file)
+ IO_CACHE *file, bool with_counters)
{
BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg };
QUEUE queue;
@@ -485,6 +504,8 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
uint bytes_read; /* to hold return value of read_to_buffer */
BUFFPEK *top;
int res= 1;
+ uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0);
+ element_count cnt;
/*
Invariant: queue must contain top element from each tree, until a tree
is not completely walked through.
@@ -543,9 +564,17 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
/* new top has been obtained; if old top is unique, apply the action */
if (compare(compare_arg, old_key, top->key))
{
- if (walk_action(old_key, 1, walk_action_arg))
+ cnt= with_counters ?
+ get_counter_from_merged_element(old_key, cnt_ofs) : 1;
+ if (walk_action(old_key, cnt, walk_action_arg))
goto end;
}
+ else if (with_counters)
+ {
+ cnt= get_counter_from_merged_element(top->key, cnt_ofs);
+ cnt+= get_counter_from_merged_element(old_key, cnt_ofs);
+ put_counter_into_merged_element(top->key, cnt_ofs, cnt);
+ }
}
/*
Applying walk_action to the tail of the last tree: this is safe because
@@ -556,7 +585,10 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
{
do
{
- if (walk_action(top->key, 1, walk_action_arg))
+
+ cnt= with_counters ?
+ get_counter_from_merged_element(top->key, cnt_ofs) : 1;
+ if (walk_action(top->key, cnt, walk_action_arg))
goto end;
top->key+= key_length;
}
@@ -620,7 +652,7 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
(BUFFPEK *) file_ptrs.buffer,
(BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
action, walk_action_arg,
- tree.compare, tree.custom_arg, &file);
+ tree.compare, tree.custom_arg, &file, with_counters);
}
my_free(merge_buffer);
return res;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index fca4dc200c3..7bb943dc9b0 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,9 +28,9 @@
#include "sql_priv.h"
#include "unireg.h"
#include "sql_partition.h" // struct partition_info
-#include "sql_table.h" // check_duplicate_warning
#include "sql_class.h" // THD, Internal_error_handler
#include "create_options.h"
+#include "discover.h"
#include <m_ctype.h>
#include <assert.h>
@@ -38,137 +39,88 @@
/* threshold for safe_alloca */
#define ALLOCA_THRESHOLD 2048
-static uchar * pack_screens(List<Create_field> &create_fields,
- uint *info_length, uint *screens, bool small_file);
-static uint pack_keys(uchar *keybuff,uint key_count, KEY *key_info,
- ulong data_offset);
-static bool pack_header(uchar *forminfo,enum legacy_db_type table_type,
- List<Create_field> &create_fields,
- uint info_length, uint screens, uint table_options,
- ulong data_offset, handler *file);
+static uint pack_keys(uchar *,uint, KEY *, ulong);
+static bool pack_header(uchar *, List<Create_field> &, uint, ulong, handler *);
static uint get_interval_id(uint *,List<Create_field> &, Create_field *);
-static bool pack_fields(File file, List<Create_field> &create_fields,
- ulong data_offset);
-static bool make_empty_rec(THD *thd, int file, enum legacy_db_type table_type,
- uint table_options,
- List<Create_field> &create_fields,
- uint reclength, ulong data_offset,
- handler *handler);
-
-/**
- An interceptor to hijack ER_TOO_MANY_FIELDS error from
- pack_screens and retry again without UNIREG screens.
+static bool pack_fields(uchar *, List<Create_field> &, ulong);
+static size_t packed_fields_length(List<Create_field> &);
+static bool make_empty_rec(THD *, uchar *, uint, List<Create_field> &, uint, ulong);
- XXX: what is a UNIREG screen?
-*/
+static uchar *extra2_write_len(uchar *pos, size_t len)
+{
+ if (len < 255)
+ *pos++= len;
+ else
+ {
+ /*
+ At the moment we support options_len up to 64K.
+ We can easily extend it in the future, if the need arises.
+ */
+ DBUG_ASSERT(len <= 65535);
+ int2store(pos + 1, len);
+ pos+= 3;
+ }
+ return pos;
+}
-struct Pack_header_error_handler: public Internal_error_handler
+static uchar *extra2_write(uchar *pos, enum extra2_frm_value_type type,
+ LEX_STRING *str)
{
- virtual bool handle_condition(THD *thd,
- uint sql_errno,
- const char* sqlstate,
- Sql_condition::enum_warning_level level,
- const char* msg,
- Sql_condition ** cond_hdl);
- bool is_handled;
- Pack_header_error_handler() :is_handled(FALSE) {}
-};
-
-
-bool
-Pack_header_error_handler::
-handle_condition(THD *,
- uint sql_errno,
- const char*,
- Sql_condition::enum_warning_level,
- const char*,
- Sql_condition ** cond_hdl)
+ *pos++ = type;
+ pos= extra2_write_len(pos, str->length);
+ memcpy(pos, str->str, str->length);
+ return pos + str->length;
+}
+
+static uchar *extra2_write(uchar *pos, enum extra2_frm_value_type type,
+ LEX_CUSTRING *str)
{
- *cond_hdl= NULL;
- is_handled= (sql_errno == ER_TOO_MANY_FIELDS);
- return is_handled;
+ return extra2_write(pos, type, reinterpret_cast<LEX_STRING *>(str));
}
-/*
+/**
Create a frm (table definition) file
- SYNOPSIS
- mysql_create_frm()
- thd Thread handler
- file_name Path for file (including database and .frm)
- db Name of database
- table Name of table
- create_info create info parameters
- create_fields Fields to create
- keys number of keys to create
- key_info Keys to create
- db_file Handler to use. May be zero, in which case we use
- create_info->db_type
- RETURN
- false ok
- true error
+ @param thd Thread handler
+ @param table Name of table
+ @param create_info create info parameters
+ @param create_fields Fields to create
+ @param keys number of keys to create
+ @param key_info Keys to create
+ @param db_file Handler to use.
+
+ @return the generated frm image as a LEX_CUSTRING,
+ or null LEX_CUSTRING (str==0) in case of an error.
*/
-bool mysql_create_frm(THD *thd, const char *file_name,
- const char *db, const char *table,
- HA_CREATE_INFO *create_info,
- List<Create_field> &create_fields,
- uint keys, KEY *key_info,
- handler *db_file)
+LEX_CUSTRING build_frm_image(THD *thd, const char *table,
+ HA_CREATE_INFO *create_info,
+ List<Create_field> &create_fields,
+ uint keys, KEY *key_info, handler *db_file)
{
LEX_STRING str_db_type;
- uint reclength, info_length, screens, key_info_length, maxlength, tmp_len, i;
+ uint reclength, key_info_length, tmp_len, i;
ulong key_buff_length;
- File file;
ulong filepos, data_offset;
uint options_len;
- uchar fileinfo[64],forminfo[288],*keybuff;
- uchar *screen_buff;
- char buff[128];
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- partition_info *part_info= thd->work_part_info;
-#endif
- Pack_header_error_handler pack_header_error_handler;
+ uchar fileinfo[FRM_HEADER_SIZE],forminfo[FRM_FORMINFO_SIZE];
+ const partition_info *part_info= IF_PARTITIONING(thd->work_part_info, 0);
int error;
- DBUG_ENTER("mysql_create_frm");
-
- DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension
-
- if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0)))
- DBUG_RETURN(1);
- DBUG_ASSERT(db_file != NULL);
+ uchar *frm_ptr, *pos;
+ LEX_CUSTRING frm= {0,0};
+ DBUG_ENTER("build_frm_image");
/* If fixed row records, we need one bit to check for deleted rows */
if (!(create_info->table_options & HA_OPTION_PACK_RECORD))
create_info->null_bits++;
data_offset= (create_info->null_bits + 7) / 8;
- thd->push_internal_handler(&pack_header_error_handler);
-
- error= pack_header(forminfo, ha_legacy_type(create_info->db_type),
- create_fields,info_length,
- screens, create_info->table_options,
+ error= pack_header(forminfo, create_fields, create_info->table_options,
data_offset, db_file);
- thd->pop_internal_handler();
-
if (error)
- {
- my_free(screen_buff);
- if (! pack_header_error_handler.is_handled)
- DBUG_RETURN(1);
-
- // Try again without UNIREG screens (to get more columns)
- if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,1)))
- DBUG_RETURN(1);
- if (pack_header(forminfo, ha_legacy_type(create_info->db_type),
- create_fields,info_length,
- screens, create_info->table_options, data_offset, db_file))
- {
- my_free(screen_buff);
- DBUG_RETURN(1);
- }
- }
+ DBUG_RETURN(frm);
+
reclength=uint2korr(forminfo+266);
/* Calculate extra data segment length */
@@ -184,12 +136,8 @@ bool mysql_create_frm(THD *thd, const char *file_name,
=> Total 6 byte
*/
create_info->extra_size+= 6;
-#ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info)
- {
create_info->extra_size+= part_info->part_info_len;
- }
-#endif
for (i= 0; i < keys; i++)
{
@@ -201,13 +149,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
create_fields,
keys, key_info);
DBUG_PRINT("info", ("Options length: %u", options_len));
- if (options_len)
- {
- create_info->table_options|= HA_OPTION_TEXT_CREATE_OPTIONS;
- create_info->extra_size+= (options_len + 4);
- }
- else
- create_info->table_options&= ~HA_OPTION_TEXT_CREATE_OPTIONS;
/*
This gives us the byte-position of the character at
@@ -244,190 +185,164 @@ bool mysql_create_frm(THD *thd, const char *file_name,
if (thd->is_strict_mode())
{
my_error(ER_TOO_LONG_TABLE_COMMENT, MYF(0),
- real_table_name, static_cast<ulong>(TABLE_COMMENT_MAXLEN));
- my_free(screen_buff);
- DBUG_RETURN(1);
+ real_table_name, TABLE_COMMENT_MAXLEN);
+ DBUG_RETURN(frm);
}
char warn_buff[MYSQL_ERRMSG_SIZE];
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_TABLE_COMMENT),
- real_table_name, static_cast<ulong>(TABLE_COMMENT_MAXLEN));
- /* do not push duplicate warnings */
- if (!thd->get_stmt_da()->has_sql_condition(warn_buff, strlen(warn_buff)))
- push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TOO_LONG_TABLE_COMMENT, warn_buff);
+ real_table_name, TABLE_COMMENT_MAXLEN);
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_TOO_LONG_TABLE_COMMENT, warn_buff);
create_info->comment.length= tmp_len;
}
/*
If table comment is longer than TABLE_COMMENT_INLINE_MAXLEN bytes,
store the comment in an extra segment (up to TABLE_COMMENT_MAXLEN bytes).
- Pre 6.0, the limit was 60 characters, with no extra segment-handling.
+ Pre 5.5, the limit was 60 characters, with no extra segment-handling.
*/
if (create_info->comment.length > TABLE_COMMENT_INLINE_MAXLEN)
{
forminfo[46]=255;
create_info->extra_size+= 2 + create_info->comment.length;
}
- else{
+ else
+ {
strmake((char*) forminfo+47, create_info->comment.str ?
create_info->comment.str : "", create_info->comment.length);
forminfo[46]=(uchar) create_info->comment.length;
}
- if ((file=create_frm(thd, file_name, db, table, reclength, fileinfo,
- create_info, keys, key_info)) < 0)
+ if (!create_info->tabledef_version.str)
{
- my_free(screen_buff);
- DBUG_RETURN(1);
+ uchar *to= (uchar*) thd->alloc(MY_UUID_SIZE);
+ if (unlikely(!to))
+ DBUG_RETURN(frm);
+ my_uuid(to);
+ create_info->tabledef_version.str= to;
+ create_info->tabledef_version.length= MY_UUID_SIZE;
}
+ DBUG_ASSERT(create_info->tabledef_version.length > 0);
+ DBUG_ASSERT(create_info->tabledef_version.length <= 255);
+
+ prepare_frm_header(thd, reclength, fileinfo, create_info, keys, key_info);
+
+ /* one byte for a type, one or three for a length */
+ uint extra2_size= 1 + 1 + create_info->tabledef_version.length;
+ if (options_len)
+ extra2_size+= 1 + (options_len > 255 ? 3 : 1) + options_len;
+
+ if (part_info)
+ extra2_size+= 1 + 1 + hton_name(part_info->default_engine_type)->length;
key_buff_length= uint4korr(fileinfo+47);
- keybuff=(uchar*) my_malloc(key_buff_length, MYF(MY_THREAD_SPECIFIC));
- key_info_length= pack_keys(keybuff, keys, key_info, data_offset);
- /*
- Ensure that there are no forms in this newly created form file.
- Even if the form file exists, create_frm must truncate it to
- ensure one form per form file.
- */
- DBUG_ASSERT(uint2korr(fileinfo+8) == 0);
+ frm.length= FRM_HEADER_SIZE; // fileinfo;
+ frm.length+= extra2_size + 4; // mariadb extra2 frm segment
+
+ int2store(fileinfo+4, extra2_size);
+ int2store(fileinfo+6, frm.length); // Position to key information
+ frm.length+= key_buff_length;
+ frm.length+= reclength; // row with default values
+ frm.length+= create_info->extra_size;
+
+ filepos= frm.length;
+ frm.length+= FRM_FORMINFO_SIZE; // forminfo
+ frm.length+= packed_fields_length(create_fields);
+
+ frm_ptr= (uchar*) my_malloc(frm.length, MYF(MY_WME | MY_ZEROFILL |
+ MY_THREAD_SPECIFIC));
+ if (!frm_ptr)
+ DBUG_RETURN(frm);
+
+ /* write the extra2 segment */
+ pos = frm_ptr + 64;
+ compile_time_assert(EXTRA2_TABLEDEF_VERSION != '/');
+ pos= extra2_write(pos, EXTRA2_TABLEDEF_VERSION,
+ &create_info->tabledef_version);
- if (!(filepos= make_new_entry(file, fileinfo, NULL, "")))
- goto err;
- maxlength=(uint) next_io_size((ulong) (uint2korr(forminfo)+1000));
- int2store(forminfo+2,maxlength);
- int4store(fileinfo+10,(ulong) (filepos+maxlength));
+ if (part_info)
+ pos= extra2_write(pos, EXTRA2_DEFAULT_PART_ENGINE,
+ hton_name(part_info->default_engine_type));
+
+ if (options_len)
+ {
+ *pos++= EXTRA2_ENGINE_TABLEOPTS;
+ pos= extra2_write_len(pos, options_len);
+ pos= engine_table_options_frm_image(pos, create_info->option_list,
+ create_fields, keys, key_info);
+ }
+
+ int4store(pos, filepos); // end of the extra2 segment
+ pos+= 4;
+
+ DBUG_ASSERT(pos == frm_ptr + uint2korr(fileinfo+6));
+ key_info_length= pack_keys(pos, keys, key_info, data_offset);
+
+ int2store(forminfo+2, frm.length - filepos);
+ int4store(fileinfo+10, frm.length);
fileinfo[26]= (uchar) test((create_info->max_rows == 1) &&
(create_info->min_rows == 1) && (keys == 0));
int2store(fileinfo+28,key_info_length);
-#ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info)
{
fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type);
DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61]));
}
-#endif
- int2store(fileinfo+59,db_file->extra_rec_buf_length());
- if (mysql_file_pwrite(file, fileinfo, 64, 0L, MYF_RW) ||
- mysql_file_pwrite(file, keybuff, key_info_length,
- (ulong) uint2korr(fileinfo+6), MYF_RW))
- goto err;
- mysql_file_seek(file,
- (ulong) uint2korr(fileinfo+6) + (ulong) key_buff_length,
- MY_SEEK_SET, MYF(0));
- if (make_empty_rec(thd,file,ha_legacy_type(create_info->db_type),
- create_info->table_options,
- create_fields,reclength, data_offset, db_file))
- goto err;
+ int2store(fileinfo+59,db_file->extra_rec_buf_length());
- int2store(buff, create_info->connect_string.length);
- if (mysql_file_write(file, (const uchar*)buff, 2, MYF(MY_NABP)) ||
- mysql_file_write(file, (const uchar*)create_info->connect_string.str,
- create_info->connect_string.length, MYF(MY_NABP)))
- goto err;
+ memcpy(frm_ptr, fileinfo, FRM_HEADER_SIZE);
- int2store(buff, str_db_type.length);
- if (mysql_file_write(file, (const uchar*)buff, 2, MYF(MY_NABP)) ||
- mysql_file_write(file, (const uchar*)str_db_type.str,
- str_db_type.length, MYF(MY_NABP)))
+ pos+= key_buff_length;
+ if (make_empty_rec(thd, pos, create_info->table_options, create_fields,
+ reclength, data_offset))
goto err;
-#ifdef WITH_PARTITION_STORAGE_ENGINE
+ pos+= reclength;
+ int2store(pos, create_info->connect_string.length);
+ pos+= 2;
+ memcpy(pos, create_info->connect_string.str, create_info->connect_string.length);
+ pos+= create_info->connect_string.length;
+ int2store(pos, str_db_type.length);
+ pos+= 2;
+ memcpy(pos, str_db_type.str, str_db_type.length);
+ pos+= str_db_type.length;
+
if (part_info)
{
char auto_partitioned= part_info->is_auto_partitioned ? 1 : 0;
- int4store(buff, part_info->part_info_len);
- if (mysql_file_write(file, (const uchar*)buff, 4, MYF_RW) ||
- mysql_file_write(file, (const uchar*)part_info->part_info_string,
- part_info->part_info_len + 1, MYF_RW) ||
- mysql_file_write(file, (const uchar*)&auto_partitioned, 1, MYF_RW))
- goto err;
+ int4store(pos, part_info->part_info_len);
+ pos+= 4;
+ memcpy(pos, part_info->part_info_string, part_info->part_info_len + 1);
+ pos+= part_info->part_info_len + 1;
+ *pos++= auto_partitioned;
}
else
-#endif
{
- bzero((uchar*) buff, 6);
- if (mysql_file_write(file, (uchar*) buff, 6, MYF_RW))
- goto err;
+ pos+= 6;
}
for (i= 0; i < keys; i++)
{
if (key_info[i].parser_name)
{
- if (mysql_file_write(file, (const uchar*)key_info[i].parser_name->str,
- key_info[i].parser_name->length + 1, MYF(MY_NABP)))
- goto err;
+ memcpy(pos, key_info[i].parser_name->str, key_info[i].parser_name->length + 1);
+ pos+= key_info[i].parser_name->length + 1;
}
}
- if (forminfo[46] == (uchar)255)
+ if (forminfo[46] == (uchar)255) // New style MySQL 5.5 table comment
{
- uchar comment_length_buff[2];
- int2store(comment_length_buff,create_info->comment.length);
- if (mysql_file_write(file, comment_length_buff, 2, MYF(MY_NABP)) ||
- mysql_file_write(file, (uchar*) create_info->comment.str,
- create_info->comment.length, MYF(MY_NABP)))
- goto err;
+ int2store(pos, create_info->comment.length);
+ pos+=2;
+ memcpy(pos, create_info->comment.str, create_info->comment.length);
+ pos+= create_info->comment.length;
}
- if (options_len)
- {
- uchar *optbuff= (uchar *)my_safe_alloca(options_len + 4, ALLOCA_THRESHOLD);
- my_bool error;
- DBUG_PRINT("info", ("Create options length: %u", options_len));
- if (!optbuff)
- goto err;
- int4store(optbuff, options_len);
- engine_table_options_frm_image(optbuff + 4,
- create_info->option_list,
- create_fields,
- keys, key_info);
- error= my_write(file, optbuff, options_len + 4, MYF_RW);
- my_safe_afree(optbuff, options_len + 4, ALLOCA_THRESHOLD);
- if (error)
- goto err;
- }
-
- mysql_file_seek(file, filepos, MY_SEEK_SET, MYF(0));
- if (mysql_file_write(file, forminfo, 288, MYF_RW) ||
- mysql_file_write(file, screen_buff, info_length, MYF_RW) ||
- pack_fields(file, create_fields, data_offset))
+ memcpy(frm_ptr + filepos, forminfo, 288);
+ if (pack_fields(frm_ptr + filepos + 288, create_fields, data_offset))
goto err;
-#ifdef HAVE_CRYPTED_FRM
- if (create_info->password)
- {
- char tmp=2,*disk_buff=0;
- SQL_CRYPT *crypted=new SQL_CRYPT(create_info->password);
- if (!crypted || mysql_file_pwrite(file, &tmp, 1, 26, MYF_RW))// Mark crypted
- goto err;
- uint read_length=uint2korr(forminfo)-256;
- mysql_file_seek(file, filepos+256, MY_SEEK_SET, MYF(0));
- if (read_string(file,(uchar**) &disk_buff,read_length))
- goto err;
- crypted->encode(disk_buff,read_length);
- delete crypted;
- if (mysql_file_pwrite(file, disk_buff, read_length, filepos+256, MYF_RW))
- {
- my_free(disk_buff);
- goto err;
- }
- my_free(disk_buff);
- }
-#endif
-
- my_free(screen_buff);
- my_free(keybuff);
-
- if (opt_sync_frm && !(create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
- (mysql_file_sync(file, MYF(MY_WME)) ||
- my_sync_dir_by_file(file_name, MYF(MY_WME))))
- goto err2;
-
- if (mysql_file_close(file, MYF(MY_WME)))
- goto err3;
-
{
/*
Restore all UCS2 intervals.
@@ -444,155 +359,68 @@ bool mysql_create_frm(THD *thd, const char *file_name,
}
}
}
- DBUG_RETURN(0);
+
+ frm.str= frm_ptr;
+ DBUG_RETURN(frm);
err:
- my_free(screen_buff);
- my_free(keybuff);
-err2:
- (void) mysql_file_close(file, MYF(MY_WME));
-err3:
- mysql_file_delete(key_file_frm, file_name, MYF(0));
- DBUG_RETURN(1);
-} /* mysql_create_frm */
+ my_free(frm_ptr);
+ DBUG_RETURN(frm);
+}
/**
Create a frm (table definition) file and the tables
@param thd Thread handler
+ @param frm Binary frm image of the table to create
@param path Name of file (including database, without .frm)
@param db Data base name
@param table_name Table name
@param create_info create info parameters
- @param create_fields Fields to create
- @param keys number of keys to create
- @param key_info Keys to create
- @param file Handler to use
- @param no_ha_table Indicates that only .FRM file (and PAR file if table
- is partitioned) needs to be created and not a table
- in the storage engine.
+ @param file Handler to use or NULL if only frm needs to be created
@retval 0 ok
@retval 1 error
*/
-int rea_create_table(THD *thd, const char *path,
- const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- List<Create_field> &create_fields,
- uint keys, KEY *key_info, handler *file,
- bool no_ha_table)
+int rea_create_table(THD *thd, LEX_CUSTRING *frm,
+ const char *path, const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info, handler *file,
+ bool no_ha_create_table)
{
DBUG_ENTER("rea_create_table");
- char frm_name[FN_REFLEN];
- strxmov(frm_name, path, reg_ext, NullS);
- if (mysql_create_frm(thd, frm_name, db, table_name, create_info,
- create_fields, keys, key_info, file))
-
- DBUG_RETURN(1);
+ // TODO don't write frm for temp tables
+ if (no_ha_create_table || create_info->tmp_table())
+ {
+ if (writefrm(path, db, table_name, true, frm->str, frm->length))
+ goto err_frm;
+ }
- // Make sure mysql_create_frm din't remove extension
- DBUG_ASSERT(*fn_rext(frm_name));
if (thd->variables.keep_files_on_create)
create_info->options|= HA_CREATE_KEEP_FILES;
- if (file->ha_create_handler_files(path, NULL, CHF_CREATE_FLAG,
- create_info))
- goto err_handler_frm;
+ if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG))
+ goto err_part;
- if (!no_ha_table &&
- ha_create_table(thd, path, db, table_name, create_info, 0))
- goto err_handler;
- DBUG_RETURN(0);
+ if (!no_ha_create_table)
+ {
+ if (ha_create_table(thd, path, db, table_name, create_info, frm))
+ goto err_part;
+ }
-err_handler:
- (void) file->ha_create_handler_files(path, NULL, CHF_DELETE_FLAG, create_info);
+ DBUG_RETURN(0);
-err_handler_frm:
- mysql_file_delete(key_file_frm, frm_name, MYF(0));
+err_part:
+ file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
+err_frm:
+ deletefrm(path);
DBUG_RETURN(1);
} /* rea_create_table */
- /* Pack screens to a screen for save in a form-file */
-
-static uchar *pack_screens(List<Create_field> &create_fields,
- uint *info_length, uint *screens,
- bool small_file)
-{
- reg1 uint i;
- uint row,start_row,end_row,fields_on_screen;
- uint length,cols;
- uchar *info,*pos,*start_screen;
- uint fields=create_fields.elements;
- List_iterator<Create_field> it(create_fields);
- DBUG_ENTER("pack_screens");
-
- start_row=4; end_row=22; cols=80; fields_on_screen=end_row+1-start_row;
-
- *screens=(fields-1)/fields_on_screen+1;
- length= (*screens) * (SC_INFO_LENGTH+ (cols>> 1)+4);
-
- Create_field *field;
- while ((field=it++))
- length+=(uint) strlen(field->field_name)+1+TE_INFO_LENGTH+cols/2;
-
- if (!(info=(uchar*) my_malloc(length,MYF(MY_WME | MY_THREAD_SPECIFIC))))
- DBUG_RETURN(0);
-
- start_screen=0;
- row=end_row;
- pos=info;
- it.rewind();
- for (i=0 ; i < fields ; i++)
- {
- Create_field *cfield=it++;
- if (row++ == end_row)
- {
- if (i)
- {
- length=(uint) (pos-start_screen);
- int2store(start_screen,length);
- start_screen[2]=(uchar) (fields_on_screen+1);
- start_screen[3]=(uchar) (fields_on_screen);
- }
- row=start_row;
- start_screen=pos;
- pos+=4;
- pos[0]= (uchar) start_row-2; /* Header string */
- pos[1]= (uchar) (cols >> 2);
- pos[2]= (uchar) (cols >> 1) +1;
- strfill((char *) pos+3,(uint) (cols >> 1),' ');
- pos+=(cols >> 1)+4;
- }
- length=(uint) strlen(cfield->field_name);
- if (length > cols-3)
- length=cols-3;
-
- if (!small_file)
- {
- pos[0]=(uchar) row;
- pos[1]=0;
- pos[2]=(uchar) (length+1);
- pos=(uchar*) strmake((char*) pos+3,cfield->field_name,length)+1;
- }
- cfield->row=(uint8) row;
- cfield->col=(uint8) (length+1);
- cfield->sc_length=(uint8) MY_MIN(cfield->length,cols-(length+2));
- }
- length=(uint) (pos-start_screen);
- int2store(start_screen,length);
- start_screen[2]=(uchar) (row-start_row+2);
- start_screen[3]=(uchar) (row-start_row+1);
-
- *info_length=(uint) (pos-info);
- DBUG_RETURN(info);
-} /* pack_screens */
-
-
- /* Pack keyinfo and keynames to keybuff for save in form-file. */
+/* Pack keyinfo and keynames to keybuff for save in form-file. */
static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
ulong data_offset)
@@ -675,12 +503,10 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
} /* pack_keys */
- /* Make formheader */
+/* Make formheader */
-static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
- List<Create_field> &create_fields,
- uint info_length, uint screens, uint table_options,
- ulong data_offset, handler *file)
+static bool pack_header(uchar *forminfo, List<Create_field> &create_fields,
+ uint table_options, ulong data_offset, handler *file)
{
uint length,int_count,int_length,no_empty, int_parts;
uint time_stamp_pos,null_fields;
@@ -699,8 +525,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
com_length=vcol_info_length=0;
n_length=2L;
- /* Check fields */
-
+ /* Check fields */
List_iterator<Create_field> it(create_fields);
Create_field *field;
while ((field=it++))
@@ -712,22 +537,14 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
COLUMN_COMMENT_MAXLEN);
if (tmp_len < field->comment.length)
{
- THD *thd= current_thd;
+ myf myf_warning= current_thd->is_strict_mode() ? 0 : ME_JUST_WARNING;
- if (thd->is_strict_mode())
- {
- my_error(ER_TOO_LONG_FIELD_COMMENT, MYF(0), field->field_name,
- static_cast<ulong>(COLUMN_COMMENT_MAXLEN));
+ my_error(ER_TOO_LONG_FIELD_COMMENT, myf_warning, field->field_name,
+ COLUMN_COMMENT_MAXLEN);
+
+ if (!myf_warning)
DBUG_RETURN(1);
- }
- char warn_buff[MYSQL_ERRMSG_SIZE];
- my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_FIELD_COMMENT),
- field->field_name,
- static_cast<ulong>(COLUMN_COMMENT_MAXLEN));
- /* do not push duplicate warnings */
- if (!thd->get_stmt_da()->has_sql_condition(warn_buff, strlen(warn_buff)))
- push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TOO_LONG_FIELD_COMMENT, warn_buff);
+
field->comment.length= tmp_len;
}
if (field->vcol_info)
@@ -753,7 +570,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
expressions saved in the frm file for virtual columns.
*/
vcol_info_length+= field->vcol_info->expr_str.length+
- FRM_VCOL_HEADER_SIZE(field->interval!=NULL);
+ FRM_VCOL_HEADER_SIZE(field->interval);
}
totlength+= field->length;
@@ -830,8 +647,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
}
int_length+=int_count*2; // 255 prefix + 0 suffix
- /* Save values in forminfo */
-
+ /* Save values in forminfo */
if (reclength > (ulong) file->max_record_length())
{
my_error(ER_TOO_BIG_ROWSIZE, MYF(0), static_cast<long>(file->max_record_length()));
@@ -839,7 +655,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
}
/* Hack to avoid bugs with small static rows in MySQL */
reclength=MY_MAX(file->min_record_length(table_options),reclength);
- if (info_length+(ulong) create_fields.elements*FCOMP+288+
+ if ((ulong) create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+
n_length+int_length+com_length+vcol_info_length > 65535L ||
int_count > 255)
{
@@ -847,13 +663,13 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
DBUG_RETURN(1);
}
- bzero((char*)forminfo,288);
- length=(info_length+create_fields.elements*FCOMP+288+n_length+int_length+
+ bzero((char*)forminfo,FRM_FORMINFO_SIZE);
+ length=(create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+n_length+int_length+
com_length+vcol_info_length);
int2store(forminfo,length);
- forminfo[256] = (uint8) screens;
+ forminfo[256] = 0;
int2store(forminfo+258,create_fields.elements);
- int2store(forminfo+260,info_length);
+ int2store(forminfo+260,0);
int2store(forminfo+262,totlength);
int2store(forminfo+264,no_empty);
int2store(forminfo+266,reclength);
@@ -867,13 +683,11 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
int2store(forminfo+282,null_fields);
int2store(forminfo+284,com_length);
int2store(forminfo+286,vcol_info_length);
- /* forminfo+288 is free to use for additional information */
DBUG_RETURN(0);
} /* pack_header */
- /* get each unique interval each own id */
-
+/* get each unique interval each own id */
static uint get_interval_id(uint *int_count,List<Create_field> &create_fields,
Create_field *last_field)
{
@@ -900,29 +714,57 @@ static uint get_interval_id(uint *int_count,List<Create_field> &create_fields,
}
- /* Save fields, fieldnames and intervals */
+static size_t packed_fields_length(List<Create_field> &create_fields)
+{
+ Create_field *field;
+ size_t length= 0;
+ DBUG_ENTER("packed_fields_length");
-static bool pack_fields(File file, List<Create_field> &create_fields,
+ List_iterator<Create_field> it(create_fields);
+ uint int_count=0;
+ while ((field=it++))
+ {
+ if (field->interval_id > int_count)
+ {
+ int_count= field->interval_id;
+ length++;
+ for (int i=0; field->interval->type_names[i]; i++)
+ {
+ length+= field->interval->type_lengths[i];
+ length++;
+ }
+ length++;
+ }
+ if (field->vcol_info)
+ {
+ length+= field->vcol_info->expr_str.length +
+ FRM_VCOL_HEADER_SIZE(field->interval);
+ }
+ length+= FCOMP;
+ length+= strlen(field->field_name)+1;
+ length+= field->comment.length;
+ }
+ length++;
+ length++;
+ DBUG_RETURN(length);
+}
+
+/* Save fields, fieldnames and intervals */
+
+static bool pack_fields(uchar *buff, List<Create_field> &create_fields,
ulong data_offset)
{
- reg2 uint i;
uint int_count, comment_length= 0, vcol_info_length=0;
- uchar buff[MAX_FIELD_WIDTH];
Create_field *field;
DBUG_ENTER("pack_fields");
- /* Write field info */
-
+ /* Write field info */
List_iterator<Create_field> it(create_fields);
-
int_count=0;
while ((field=it++))
{
uint recpos;
uint cur_vcol_expr_len= 0;
- buff[0]= (uchar) field->row;
- buff[1]= (uchar) field->col;
- buff[2]= (uchar) field->sc_length;
int2store(buff+3, field->length);
/* The +1 is here becasue the col offset in .frm file have offset 1 */
recpos= field->offset+1 + (uint) data_offset;
@@ -956,40 +798,29 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
the additional data saved for the virtual field
*/
buff[12]= cur_vcol_expr_len= field->vcol_info->expr_str.length +
- FRM_VCOL_HEADER_SIZE(field->interval!=NULL);
- vcol_info_length+= cur_vcol_expr_len +
- FRM_VCOL_HEADER_SIZE(field->interval!=NULL);
+ FRM_VCOL_HEADER_SIZE(field->interval);
+ vcol_info_length+= cur_vcol_expr_len;
buff[13]= (uchar) MYSQL_TYPE_VIRTUAL;
}
int2store(buff+15, field->comment.length);
comment_length+= field->comment.length;
set_if_bigger(int_count,field->interval_id);
- if (mysql_file_write(file, buff, FCOMP, MYF_RW))
- DBUG_RETURN(1);
+ buff+= FCOMP;
}
- /* Write fieldnames */
- buff[0]=(uchar) NAMES_SEP_CHAR;
- if (mysql_file_write(file, buff, 1, MYF_RW))
- DBUG_RETURN(1);
- i=0;
+ /* Write fieldnames */
+ *buff++= NAMES_SEP_CHAR;
it.rewind();
while ((field=it++))
{
- char *pos= strmov((char*) buff,field->field_name);
- *pos++=NAMES_SEP_CHAR;
- if (i == create_fields.elements-1)
- *pos++=0;
- if (mysql_file_write(file, buff, (size_t) (pos-(char*) buff), MYF_RW))
- DBUG_RETURN(1);
- i++;
+ buff= (uchar*)strmov((char*) buff, field->field_name);
+ *buff++=NAMES_SEP_CHAR;
}
+ *buff++= 0;
- /* Write intervals */
+ /* Write intervals */
if (int_count)
{
- String tmp((char*) buff,sizeof(buff), &my_charset_bin);
- tmp.length(0);
it.rewind();
int_count=0;
while ((field=it++))
@@ -1031,34 +862,30 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
}
int_count= field->interval_id;
- tmp.append(sep);
- for (const char **pos=field->interval->type_names ; *pos ; pos++)
+ *buff++= sep;
+ for (int i=0; field->interval->type_names[i]; i++)
{
- tmp.append(*pos);
- tmp.append(sep);
+ memcpy(buff, field->interval->type_names[i], field->interval->type_lengths[i]);
+ buff+= field->interval->type_lengths[i];
+ *buff++= sep;
}
- tmp.append('\0'); // End of intervall
+ *buff++= 0;
+
}
}
- if (mysql_file_write(file, (uchar*) tmp.ptr(), tmp.length(), MYF_RW))
- DBUG_RETURN(1);
}
if (comment_length)
{
it.rewind();
- int_count=0;
while ((field=it++))
{
- if (field->comment.length)
- if (mysql_file_write(file, (uchar*) field->comment.str,
- field->comment.length, MYF_RW))
- DBUG_RETURN(1);
+ memcpy(buff, field->comment.str, field->comment.length);
+ buff+= field->comment.length;
}
}
if (vcol_info_length)
{
it.rewind();
- int_count=0;
while ((field=it++))
{
/*
@@ -1071,18 +898,13 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
*/
if (field->vcol_info && field->vcol_info->expr_str.length)
{
- buff[0]= (uchar)(1 + test(field->interval_id));
- buff[1]= (uchar) field->sql_type;
- buff[2]= (uchar) field->stored_in_db;
- if (field->interval_id)
- buff[3]= (uchar) field->interval_id;
- if (my_write(file, buff, 3 + test(field->interval_id), MYF_RW))
- DBUG_RETURN(1);
- if (my_write(file,
- (uchar*) field->vcol_info->expr_str.str,
- field->vcol_info->expr_str.length,
- MYF_RW))
- DBUG_RETURN(1);
+ *buff++= (uchar)(1 + test(field->interval));
+ *buff++= (uchar) field->sql_type;
+ *buff++= (uchar) field->stored_in_db;
+ if (field->interval)
+ *buff++= (uchar) field->interval_id;
+ memcpy(buff, field->vcol_info->expr_str.str, field->vcol_info->expr_str.length);
+ buff+= field->vcol_info->expr_str.length;
}
}
}
@@ -1090,19 +912,16 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
}
- /* save an empty record on start of formfile */
+/* save an empty record on start of formfile */
-static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
- uint table_options,
+static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
List<Create_field> &create_fields,
- uint reclength,
- ulong data_offset,
- handler *handler)
+ uint reclength, ulong data_offset)
{
int error= 0;
Field::utype type;
uint null_count;
- uchar *buff,*null_pos;
+ uchar *null_pos;
TABLE table;
TABLE_SHARE share;
Create_field *field;
@@ -1114,13 +933,6 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
bzero((char*) &share, sizeof(share));
table.s= &share;
- if (!(buff=(uchar*) my_malloc((size_t) reclength,
- MYF(MY_WME | MY_ZEROFILL |
- MY_THREAD_SPECIFIC))))
- {
- DBUG_RETURN(1);
- }
-
table.in_use= thd;
null_count=0;
@@ -1205,10 +1017,7 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
if (null_count & 7)
*(null_pos + null_count / 8)|= ~(((uchar) 1 << (null_count & 7)) - 1);
- error= mysql_file_write(file, buff, (size_t) reclength, MYF_RW) != 0;
-
err:
- my_free(buff);
thd->count_cuted_fields= old_count_cuted_fields;
DBUG_RETURN(error);
} /* make_empty_rec */
diff --git a/sql/unireg.h b/sql/unireg.h
index a1dffcf3d78..9b40b7b0779 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -15,7 +15,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#include "my_global.h" /* ulonglong */
@@ -23,8 +23,6 @@
/* Extra functions used by unireg library */
-typedef struct st_ha_create_information HA_CREATE_INFO;
-
#ifndef NO_ALARM_LOOP
#define NO_ALARM_LOOP /* lib5 and popen can't use alarm */
#endif
@@ -88,7 +86,7 @@ typedef struct st_ha_create_information HA_CREATE_INFO;
#define READ_ALL 1 /* openfrm: Read all parameters */
#define CHANGE_FRM 2 /* openfrm: open .frm as O_RDWR */
#define READ_KEYINFO 4 /* L{s nyckeldata fr}n filen */
-#define EXTRA_RECORD 8 /* Reservera plats f|r extra record */
+#define EXTRA_RECORD 8 /* Reserve space for an extra record */
#define DONT_OPEN_TABLES 8 /* Don't open database-files (frd) */
#define DONT_OPEN_MASTER_REG 16 /* Don't open first reg-file (prt) */
#define EXTRA_LONG_RECORD 16 /* Plats f|r dubbel s|k-record */
@@ -173,16 +171,46 @@ typedef struct st_ha_create_information HA_CREATE_INFO;
#include "sql_list.h" /* List<> */
#include "field.h" /* Create_field */
-bool mysql_create_frm(THD *thd, const char *file_name,
- const char *db, const char *table,
- HA_CREATE_INFO *create_info,
- List<Create_field> &create_field,
- uint key_count,KEY *key_info,handler *db_type);
-int rea_create_table(THD *thd, const char *path,
- const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- List<Create_field> &create_field,
- uint key_count,KEY *key_info,
- handler *file,
- bool no_ha_table);
+/*
+ Types of values in the MariaDB extra2 frm segment.
+ Each value is written as
+ type: 1 byte
+ length: 1 byte (1..255) or \0 and 2 bytes.
+ binary value of the 'length' bytes.
+
+ Older MariaDB servers can ignore values of unknown types if
+ the type code is less than 128 (EXTRA2_ENGINE_IMPORTANT).
+ Otherwise older (but newer than 10.0.1) servers are required
+ to report an error.
+*/
+enum extra2_frm_value_type {
+ EXTRA2_TABLEDEF_VERSION=0,
+ EXTRA2_DEFAULT_PART_ENGINE=1,
+
+#define EXTRA2_ENGINE_IMPORTANT 128
+
+ EXTRA2_ENGINE_TABLEOPTS=128,
+};
+
+int rea_create_table(THD *thd, LEX_CUSTRING *frm,
+ const char *path, const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info, handler *file,
+ bool no_ha_create_table);
+LEX_CUSTRING build_frm_image(THD *thd, const char *table,
+ HA_CREATE_INFO *create_info,
+ List<Create_field> &create_fields,
+ uint keys, KEY *key_info, handler *db_file);
+
+#define FRM_HEADER_SIZE 64
+#define FRM_FORMINFO_SIZE 288
+#define FRM_MAX_SIZE (256*1024)
+
+static inline bool is_binary_frm_header(uchar *head)
+{
+ return head[0] == 254
+ && head[1] == 1
+ && head[2] >= FRM_VER
+ && head[2] <= FRM_VER+4;
+}
+
#endif